'\r\n body += json.dumps(doc, indent=4)\r\n body += '
'\r\n res = redisClient.set('Hello', 'World')\r\n if res == True:\r\n # Display MongoDB & Redis message.\r\n body += '
Redis
'\r\n body += 'Get Hello => '+redisClient.get('Hello').decode(\"utf-8\")\r\n return body\r\n\r\nif __name__ == \"__main__\":\r\n ENVIRONMENT_DEBUG = os.environ.get(\"FLASK_DEBUG\", True)\r\n ENVIRONMENT_PORT = os.environ.get(\"FLASK_PORT\", 5000)\r\n application.run(host='0.0.0.0', port=ENVIRONMENT_PORT, debug=ENVIRONMENT_DEBUG)","sub_path":"Alphabet_guessing_game/app/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"301679282","text":"#!/usr/bin/env python3\n\nimport csv\nfrom math import erf, sqrt\nfrom randomness import randnr\nfrom regression import linear_regression_least_squares, covariance, variance, mean, predict\nfrom decimal import *\n\nrand = randnr(3)\ngetcontext().prec = 3\ndef in_random_order(x):\n # \"Returns an iterator that presents the list x in a random order\"\n indices = [i for i, _ in enumerate(x)]\n # \"inside-out\" Fisher-Yates shuffle. Step through the list, and at\n # each point, exchange the current element with a random element\n # in the list (including itself)\n for i in range(len(indices)):\n j = (rand.randint() // 65536) % (i+1) # The lower bits of our random generator are correlated!\n indices[i], indices[j] = indices[j], indices[i]\n for i in indices:\n yield x[i]\n\n# Functions go here!\n\n\ndef step(v, d, step_size):\n return [v[i] + d[i] * step_size for i in range(len(v))]\n\n\ndef stochastic_minimize(f, df, x: list, y: list, theta0, alpha0=0.001, iterations=50):\n # Finds the parameters theta giving the minimum\n # The alpha parameter will reduce as we continue\n data = list(zip(x, y))\n min_theta, min_value = None, float('inf')\n alpha, theta = alpha0, theta0\n iterations_without_imporvement = 0\n iter = 0\n while iterations_without_imporvement < iterations:\n # iterate all 500 datas(move theta along gradient 500 times)\n iter += 1\n\n # full value\n value = 0\n for i in range(len(x)):\n x_i = x[i]\n y_i = y[i]\n value = value + f(x_i, y_i, theta)\n\n if value < min_value:\n iterations_without_imporvement = 0\n alpha = alpha0\n min_value, min_theta = value, theta\n\n if value >= min_value:\n iterations_without_imporvement += 1\n theta = min_theta\n alpha *= .9\n\n for x_i, y_i in in_random_order(data):\n gradient = df(x_i, y_i, theta)\n for i in range(len(theta)):\n theta[i] -= alpha * gradient[i]\n if iter % 1000 == 0: print(theta)\n\n return min_theta\n\n\ndef loss(x_i, y_i, beta):\n sub_sum = sum(beta[i + 1] * x_i[i] for i in range(len(x_i)))\n return (beta[0] + sub_sum - y_i) ** 2\n\n\ndef dloss(x_i, y_i, beta):\n sub_sum = sum(beta[i+1] * x_i[i] for i in range(len(x_i)))\n ret = [2 * sub_sum - y_i]\n for i in range(len(x_i)):\n ret.append(2 * x_i[i] * sub_sum - y_i)\n return ret\n\n\ndef full_loss(f, x, y, beta):\n return sum(f(x_i, y_i, beta) for x_i, y_i in zip(x, y))\n\n\ndef R_2(f, x, y, beta):\n SSR_2 = sum((f(x_i, y_i, beta)) ** 2 for x_i, y_i in zip(x, y))\n mean_y = sum(y_i for y_i in y) / len(y)\n SST_2 = sum((y_i - mean_y) ** 2 for y_i in y)\n return 1 - SSR_2 / SST_2\n\n\nif __name__ == \"__main__\":\n # Here, we load the boston dataset\n boston = csv.reader(open('boston.csv')) # The boston housing dataset in csv format\n # First line contains the header, short info for each variable\n header = boston.__next__() # In python2, you might need boston.next() instead\n # Data will hold the 13 data variables, target is what we are trying to predict\n data, target = [], []\n for row in boston:\n # All but the last are the data points\n data.append([float(r) for r in row[:-1]])\n # The last is the median house value we are trying to predict\n target.append(float(row[-1]))\n # Now, use the dataset with your regression functions to answer the exercise questions\n '''\n print(\"Names of the columns\")\n print(header)\n print(\"First row of data ->variable to predict\")\n print(data[0], \" -> \", target[0])\n'''\n\n # The alpha parameter must be tuned low so that we don't jump too far\n # take the starting parameters as 0 for beta0 then the intercepts from the individual fits\n start = [0.011592,-0.041112,-0.066247,0.212368,6.314068,-33.892962,8.915683,0.041870,0.680705,0.069403,0.007788,-1.576933,0.014066,-0.147525]\n output = stochastic_minimize(loss, dloss, data, target, start, 1e-6)\n # Also need to calculate the full R^2!\n #print(R_2(loss, data, target, output))\n\n # Example of writing out the results.txt file\n fout = open('results.txt', 'w')\n for param in output:\n fout.write('%f\\n' % (param))# One line per variable\n fout.write('R squared %f' % (R_2(loss, data, target, output)))\n fout.close()\n\n","sub_path":"05-logistic/dae_multiple.py","file_name":"dae_multiple.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"478539934","text":"\nimport webapp2\nimport jinja2\nimport os\nfrom models import Sporocilo\nfrom google.appengine.api import users\nfrom google.appengine.api import urlfetch\nimport json\n\n\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), \"template\")\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=False)\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n def write(self, *a, **kw):\n return self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n def render(self, template, **kw):\n return self.write(self.render_str(template, **kw))\n\n def render_template(self, view_filename, params=None):\n if not params:\n params = {}\n template = jinja_env.get_template(view_filename)\n return self.response.out.write(template.render(params))\n\n\nclass MainHandler(BaseHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n logiran = True\n logout_url = users.create_logout_url(\"/\")\n params = {\"logiran\": logiran, \"logout_url\": logout_url, \"user\": user}\n else:\n logiran = False\n login_url = users.create_login_url(\"/\")\n params = {\"logiran\": logiran, \"login_url\": login_url, \"user\": user}\n return self.render_template(\"index.html\", params)\n\n\nclass RezultatHandler(BaseHandler):\n def post(self):\n rezultat = self.request.get(\"vnos\")\n rezultat1 = self.request.get(\"datum\")\n sporocilo = Sporocilo(vnos=rezultat, datum=rezultat1)\n sporocilo.put()\n return self.write(rezultat + \" do \" + rezultat1)\n\n\nclass SeznamSporocilHandler(BaseHandler):\n def get(self):\n seznam = Sporocilo.query().fetch()\n params = {\"seznam\": seznam}\n return self.render_template(\"seznam_sporocil.html\", params=params)\n\n\napp = webapp2.WSGIApplication({\n webapp2.Route('/', MainHandler),\n webapp2.Route('/rezultat', RezultatHandler),\n webapp2.Route('/seznam-opravil', SeznamSporocilHandler),\n\n}, debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"205995603","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 10 13:29:04 2018\n\n@author: a118905\n\"\"\"\n\n#Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n#loading the dataset, dependant and independant variable\ndataset = pd.read_csv('Churn_Modelling.csv')\n\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Encoding categorical (independant) data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n\nonehotencoder = OneHotEncoder(categorical_features= [1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\n#Spliting dataset training and test\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# Feature Scaling \nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n\n","sub_path":"ANN/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"617216475","text":"#step #1\n#Change random number\nimport random\ndef change_number(start, end):\n if end > start:\n return(random.randrange(start, end)), start, end\n else:\n print('...The end of the range is less than the beginning')\n return change_number(*input_num())\n\n\n#step #2\n#verified\ndef verified(input_number, sel_number, start, end):\n if start > input_number or input_number > end:\n print('...Nope, this out of range from', start, 'to', end)\n elif input_number > sel_number:\n print('...Nope, this number is too large!')\n return False\n elif input_number < sel_number:\n print('...Nope, this number is too small!')\n return False\n elif input_number == sel_number:\n #congratilation()\n return True \n \n#Welcome banner\ndef welcome_banner(s):\n if s:\n s = 'Welcome to'\n else:\n s = 'You are played in'\n c = 'the game \"Guess Number\"!'\n for i in range(2):\n print('*' * len(s + c))\n print(s, c)\n for i in range(2):\n print('*' * len(s + c))\n\ndef input_num(): \n try:\n start, end = input('Please, choose range: ').split(' ')\n #if end > start: \n # return int(start), int(end)\n #else:\n # print('End range smalled')\n return int(start), int(end)\n except ValueError as e:\n error = str(e)\n if error == 'too many values to unpack (expected 2)':\n print('...Please, input two numbers')\n return input_num()\n elif error.find('invalid literal for int() with base 10:') != -1:\n print('...Please, input only numbers')\n return input_num()\n elif error.find('empty range for randrange') != -1:\n print('...The end of the range is less than the beginning')\n return input_num()\n\n#welcome to game!\nwelcome_banner(1)\n#change number\nsel_number, start, end = change_number(*input_num())\nprint('...So, you choose range from', start, 'to', end)\n\ndef body(sel_number, start, end):\n try:\n s = verified(int(input('Guess, what?: ')), sel_number, start, end)\n except ValueError as e:\n error = str(e)\n if error == 'too many values to unpack':\n print('...Please, input one number')\n elif error.find('invalid literal for int() with base 10:') != -1:\n print('...Please, input only one number')\n s = False\n while not s:\n try:\n inp = int(input('Come on, repeat!: '))\n s = verified(inp, sel_number, start, end)\n except ValueError as e:\n error = str(e)\n if error == 'too many values to unpack':\n print('...Please, input one number')\n elif error.find('invalid literal for int() with base 10:') != -1:\n print('...Please, input only one number')\n s = False\n else:\n print('Yes! You win!')\n #good bye banner\n welcome_banner(0)\n \ntry:\n body(sel_number, start, end) \nexcept ValueError as e:\n error = str(e)\n if error == 'too many values to unpack':\n print('...Please, input one number')\n elif error.find('invalid literal for int() with base 10:') != -1:\n print('...Please, input only one number')\n try:\n body(sel_number, start, end)\n except:\n print('Sorry. Critical error, you input bad value')","sub_path":"python3/Course/guess_number_the_game.py","file_name":"guess_number_the_game.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"364206631","text":"class Othello(object):\n\n def __init__(self):\n super().__init__()\n self.maps = [['#'] * 8 for _ in range(8)]\n self.maps[3][3] = 'W'\n self.maps[4][4] = 'W'\n self.maps[3][4] = 'B'\n self.maps[4][3] = 'B'\n self.d_list = ['U', 'D', 'R', 'L', 'RU', 'RD', 'LU', 'LD']\n\n def set_d(self, color):\n flg = 'W' if color == 'B' else 'B'\n for d in self.d_list:\n yield flg, d\n\n def reverse(self, d, x, y, flg, color):\n c = 0\n while True:\n c += 1\n x, y = self.move(d, x, y)\n result = self.check_stone(x, y, flg)\n if result == 'fail':\n return\n elif result == 'next':\n continue\n elif result == 'decision':\n return c\n\n def check_stone(self, x, y, flg):\n if x < 0 or 7 < x or y < 0 or 7 < y:\n return 'fail'\n elif self.maps[y][x] == '#':\n return 'fail'\n elif self.maps[y][x] == flg:\n return 'next'\n else:\n return 'decision'\n\n def change_color(self, d, x, y, c, color):\n self.maps[y][x] = color\n for _ in range(c):\n x, y = self.move(d, x, y)\n self.maps[y][x] = color\n\n def move(self, d, x, y):\n if d == 'U':\n y -= 1\n elif d == 'D':\n y += 1\n elif d == 'R':\n x += 1\n elif d == 'L':\n x -= 1\n elif d == 'RU':\n x += 1\n y -= 1\n elif d == 'RD':\n x += 1\n y += 1\n elif d == 'LU':\n x -= 1\n y -= 1\n elif d == 'LD':\n x -= 1\n y += 1\n return x, y\n\n def display_result(self):\n B = W = 0\n for line in self.maps:\n for s in line:\n if s == 'B':\n B += 1\n elif s == 'W':\n W += 1\n if W < B:\n print('{0:0>2}-{1:0>2} The black won!'.format(B, W))\n elif B < W:\n print('{0:0>2}-{1:0>2} The white won!'.format(B, W))\n else:\n print('{0:0>2}-{1:0>2} Draw!'.format(B, W))\n\n\ndef f(x, y, color):\n for flg, d in othello.set_d(color):\n res = othello.reverse(d, x, y, flg, color)\n if res:\n othello.change_color(d, x, y, res, color)\n\n\nothello = Othello()\nfor _ in range(int(input())):\n _color, _x, _y = input().split()\n _x, _y = map(int, [_x, _y])\n f(_x - 1, _y - 1, _color)\n\nothello.display_result()\n","sub_path":"takumiy/A/A003_2.py","file_name":"A003_2.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"26055220","text":"#!/usr/bin/python3\n# coding: utf-8\n\"\"\"Classe DeputadoEstadual \"\"\"\n\nfrom Politico import Politico\n\n\nclass DeputadoEstadual(Politico):\n \"\"\" Classe Deputado Estadual \"\"\"\n\n def __init__(self, nome, partido, estado):\n \"\"\" Construtor da classe DeputadoEstadual \"\"\"\n Politico.__init__(self)\n self.set_nome(nome)\n self.set_salario(10000)\n self.set_partido(partido)\n self.set_estado(estado)\n self.set_funcao(\"propor as leis estaduais de interesse da população \")\n\n def apresentacao(self):\n super(DeputadoEstadual, self).apresentacao()\n print ('sou deputado estadual')\n print ('Minha função é ' + self.get_funcao())\n print ('Fui eleito por ' + self.get_estado())\n print ('============================')\n","sub_path":"POO/arquivos-livro/Projetos/Políticos/Python3/DeputadoEstadual.py","file_name":"DeputadoEstadual.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"481805415","text":"\"\"\"\nThis module implements all Humbug methods related to generating reports and publishing them to\nBugout knowledge bases.\n\"\"\"\nimport atexit\nimport concurrent.futures\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nimport os\nimport pkg_resources\nimport sys\nimport time\nimport traceback\nfrom typing import List, Optional\nimport uuid\n\nfrom bugout.app import Bugout\n\nfrom .consent import HumbugConsent\nfrom .system_information import (\n SystemInformation,\n generate as generate_system_information,\n)\n\n\n@dataclass\nclass Report:\n title: str\n content: str\n tags: List[str] = field(default_factory=list)\n\n\nclass Modes(Enum):\n DEFAULT = 0\n SYNCHRONOUS = 1\n\n\nclass Reporter:\n def __init__(\n self,\n name: str,\n consent: HumbugConsent,\n client_id: Optional[str] = None,\n session_id: Optional[str] = None,\n system_information: Optional[SystemInformation] = None,\n bugout_token: Optional[str] = None,\n bugout_journal_id: Optional[str] = None,\n timeout_seconds: int = 10,\n mode: Modes = Modes.DEFAULT,\n ):\n self.name = name\n self.consent = consent\n self.client_id = client_id\n if session_id is not None:\n self.session_id = session_id\n else:\n self.session_id = str(uuid.uuid4())\n if system_information is None:\n system_information = generate_system_information()\n self.system_information = system_information\n self.bugout = Bugout()\n self.bugout_token = bugout_token\n self.bugout_journal_id = bugout_journal_id\n self.timeout_seconds = timeout_seconds\n\n self.report_futures: List[concurrent.futures.Future] = []\n atexit.register(self.wait)\n\n self.executor: Optional[concurrent.futures.Executor] = None\n if mode == Modes.DEFAULT:\n self.executor = concurrent.futures.ThreadPoolExecutor(\n max_workers=1, thread_name_prefix=\"humbug_reporter\"\n )\n\n self.is_excepthook_set = False\n\n def wait(self) -> None:\n concurrent.futures.wait(\n self.report_futures, timeout=float(self.timeout_seconds)\n )\n if self.executor is not None:\n self.executor.shutdown()\n\n def system_tags(self) -> List[str]:\n tags = [\n \"humbug\",\n \"source:{}\".format(self.name),\n \"os:{}\".format(self.system_information.os),\n \"arch:{}\".format(self.system_information.machine),\n \"python:{}\".format(self.system_information.python_version_major),\n \"python:{}.{}\".format(\n self.system_information.python_version_major,\n self.system_information.python_version_minor,\n ),\n \"python:{}\".format(self.system_information.python_version),\n \"session:{}\".format(self.session_id),\n ]\n if self.client_id is not None:\n tags.append(\"client:{}\".format(self.client_id))\n\n return tags\n\n def publish(self, report: Report, wait: bool = False) -> None:\n if not self.consent.check():\n return\n if self.bugout_token is None or self.bugout_journal_id is None:\n return\n\n try:\n report.tags = list(set(report.tags))\n if wait or self.executor is None:\n self.bugout.create_entry(\n token=self.bugout_token,\n journal_id=self.bugout_journal_id,\n title=report.title,\n content=report.content,\n tags=report.tags,\n timeout=self.timeout_seconds,\n )\n else:\n report_future = self.executor.submit(\n self.bugout.create_entry,\n token=self.bugout_token,\n journal_id=self.bugout_journal_id,\n title=report.title,\n content=report.content,\n tags=report.tags,\n timeout=self.timeout_seconds,\n )\n self.report_futures.append(report_future)\n except:\n pass\n\n def custom_report(\n self,\n title: str,\n content: str,\n tags: Optional[List[str]] = None,\n publish: bool = True,\n wait: bool = False,\n ) -> Report:\n \"\"\"\n Generates (and optionally publishes) a custom report in which the title, tags, and content\n are defined by the caller of this method.\n \"\"\"\n if tags is None:\n tags = []\n report = Report(title=title, content=content, tags=tags)\n if publish:\n self.publish(report, wait=wait)\n return report\n\n def system_report(\n self, tags: Optional[List[str]] = None, publish: bool = True, wait: bool = False\n ) -> Report:\n title = \"{}: System information\".format(self.name)\n content = \"\"\"### User timestamp\n```\n{user_time}\n```\n\n### OS\n```\n{os}\n```\n\nRelease: `{os_release}`\n\n### Processor\n```\n{machine}\n```\n\n### Python\n```\n{python_version}\n```\"\"\".format(\n user_time=int(time.time()),\n os=self.system_information.os,\n os_release=self.system_information.os_release,\n machine=self.system_information.machine,\n python_version=self.system_information.python_version,\n )\n report = Report(title=title, content=content, tags=self.system_tags())\n if tags is not None:\n report.tags.extend(tags)\n report.tags.append(\"type:system\")\n\n if publish:\n self.publish(report, wait=wait)\n\n return report\n\n def error_report(\n self,\n error: Exception,\n tags: Optional[List[str]] = None,\n publish: bool = True,\n wait: bool = False,\n ) -> Report:\n title = \"{} - {}\".format(self.name, type(error).__name__)\n error_content = \"\"\"### User timestamp\n```\n{user_time}\n```\n\n### Exception summary\n```\n{error_summary}\n```\n\n### Traceback\n```\n{error_traceback}\n```\"\"\".format(\n user_time=int(time.time()),\n error_summary=repr(error),\n error_traceback=\"\".join(\n traceback.format_exception(\n etype=type(error),\n value=error,\n tb=error.__traceback__,\n )\n ),\n )\n if tags is None:\n tags = []\n tags.append(\"type:error\")\n tags.extend(self.system_tags())\n\n report = Report(title=title, content=error_content, tags=tags)\n\n if publish:\n self.publish(report, wait=wait)\n\n return report\n\n def env_report(\n self,\n title: Optional[str] = None,\n tags: Optional[List[str]] = None,\n publish: bool = True,\n wait: bool = False,\n ) -> Report:\n \"\"\"\n Creates and optionally publishes a report containing the environment variables defined in\n the current process.\n \"\"\"\n if title is None:\n title = \"Environment variables\"\n if tags is None:\n tags = []\n tags.append(\"type:env\")\n\n env_vars = [\"{}={}\".format(key, value) for key, value in os.environ.items()]\n content = \"```\\n{}\\n```\".format(\"\\n\".join(env_vars))\n\n report = Report(title=title, content=content, tags=tags)\n if publish:\n self.publish(report, wait=wait)\n return report\n\n def packages_report(\n self,\n title: Optional[str] = None,\n tags: Optional[List[str]] = None,\n publish: bool = True,\n wait: bool = False,\n ) -> Report:\n \"\"\"\n Creates and optionally publishes a report containing the packages (and versions of those\n packages) available in the current Python process.\n \"\"\"\n if title is None:\n title = \"Available packages\"\n if tags is None:\n tags = []\n tags.append(\"type:dependencies\")\n\n available_packages = [\n str(package_info) for package_info in pkg_resources.working_set\n ]\n content = \"```\\n{}\\n```\".format(\"\\n\".join(available_packages))\n report = Report(title, content, tags)\n if publish:\n self.publish(report, wait=wait)\n return report\n\n def compound_report(\n self,\n reports: List[Report],\n title: Optional[str] = None,\n tags: Optional[List[str]] = None,\n publish: bool = True,\n wait: bool = False,\n ) -> Report:\n if tags is None:\n tags = []\n for component in reports:\n tags.extend(component.tags)\n\n if title is None:\n title = \"Composite report\"\n\n content = \"\\n\\n- - -\\n\\n\".join(component.content for component in reports)\n\n report = Report(title=title, content=content, tags=tags)\n if publish:\n self.publish(report, wait=wait)\n return report\n\n def setup_excepthook(self, tags: Optional[List[str]] = None, publish: bool = True):\n \"\"\"\n Adds error_report with python Exceptions.\n Only one excepthook will be added to stack, no matter how many\n times you call this method.\n\n Docs: https://docs.python.org/3/library/sys.html#sys.excepthook\n \"\"\"\n if not self.is_excepthook_set:\n original_excepthook = sys.excepthook\n\n def _hook(exception_type, exception_instance, traceback):\n self.error_report(error=exception_instance, tags=tags, publish=publish)\n original_excepthook(exception_type, exception_instance, traceback)\n\n sys.excepthook = _hook\n\n self.is_excepthook_set = True\n\n def setup_notebook_excepthook(self, tags: Optional[List[str]] = None):\n \"\"\"\n Excepthook for ipython, works with jupiter notebook.\n \"\"\"\n ipython_shell = get_ipython() # type: ignore\n old_showtraceback = ipython_shell.showtraceback\n\n def showtraceback(*args, **kwargs):\n _, exc_instance, _ = sys.exc_info()\n self.error_report(exc_instance, tags=tags, publish=True)\n old_showtraceback(*args, **kwargs)\n\n ipython_shell.showtraceback = showtraceback\n self.setup_excepthook(publish=True, tags=tags)\n","sub_path":"python/humbug/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":10297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"376830562","text":"#\n# Copyright 2016 Dohop hf.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTests for the inclusion logic\n\"\"\"\n\nimport os\n\nfrom logstash_notifier import get_value_from_input\nfrom .compat import TestCase\n\n\nclass TestIncludeParser(TestCase):\n \"\"\"\n Tests the parsing of the include options\n \"\"\"\n def test_key_val_parsing(self):\n # Test parsing of keyval strings\n self.assertEqual(\n get_value_from_input('fruits=\"pear,kiwi,banana\"'),\n {'fruits': '\"pear,kiwi,banana\"'}\n )\n self.assertEqual(\n get_value_from_input('berries='),\n {'berries': ''}\n )\n self.assertEqual(\n get_value_from_input('pythagoras=a2+b2=c2'),\n {'pythagoras': 'a2+b2=c2'}\n )\n\n def test_environ_extraction(self):\n # Test inclusion of variables from the environ\n os.environ['vegetables'] = '\"carrot,peas,green beans\"'\n os.environ['smellythings'] = ''\n self.assertEqual(\n get_value_from_input('vegetables'),\n {'vegetables': '\"carrot,peas,green beans\"'}\n )\n self.assertEqual(\n get_value_from_input('smellythings'),\n {'smellythings': ''}\n )\n\n def test_combination(self):\n # Test having both environment vars and arbitrary keyvals\n os.environ['bears'] = 'polar,brown,black'\n os.environ['notbears'] = 'unicorn,griffin,sphinx,otter'\n command_line = ['bears', 'notbears', 'e=mc2', 'v=iR', 'qwertyuiop']\n expected = {\n 'bears': 'polar,brown,black',\n 'notbears': 'unicorn,griffin,sphinx,otter',\n 'e': 'mc2',\n 'v': 'iR',\n }\n result = {}\n for variable in command_line:\n result.update(get_value_from_input(variable))\n\n self.assertDictEqual(result, expected)\n","sub_path":"tests/test_include.py","file_name":"test_include.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"318711696","text":"\"\"\"\nUtility for computing ideal sonar readings\n\"\"\"\n# Updated to use the new Soar's geometry classes\nfrom soar.sim.world import *\nfrom soar.robot.pioneer import PioneerRobot\nfrom soar.sim.geometry import Pose\nfrom lib601 import sonarDist\n\nSONAR_MAX = 1.5\n\n######################################################################\n### Compute ideal readings\n######################################################################\n\ndef compute_ideal_readings(world_path, xMin, xMax, y, numStates, numObs):\n \"\"\"\n @param world_path: string naming file to read the world description from\n @param xMin: minimum x coordinate for center of robot\n @param xMax: maximum x coordinate for center of robot\n @param y: constant y coordinate for center of robot\n @param numStates: number of discrete states into which to divide\n the range of x coordinates\n @param numObs: number of discrete observations into which to\n divide the range of good sonar observations, between 0 and C{goodSonarRange}\n @returns: list of C{numStates} values, each of which is between 0\n and C{numObs-1}, which lists the ideal discretized sonar reading\n that the robot would receive if it were at the midpoint of each of\n the x bins.\n \"\"\"\n\n xStep = (xMax - xMin) / float(numStates)\n readings = []\n # Start in the middle of the first box\n x = xMin + (xStep / 2.0)\n world_namespace = {}\n exec(open(world_path, 'r').read(), world_namespace)\n world = world_namespace['world'] # Grab the world object\n for ix in range(numStates):\n # left-hand sonar reading assuming we're heading to the right\n sensor_pose = sonarDist.sonarPoses[0].x, sonarDist.sonarPoses[0].y, sonarDist.sonarPoses[0].theta\n readings.append(discrete_sonar(ideal_sonar_reading(Pose(x, y, 0), sensor_pose, world), numObs))\n x += xStep\n return readings\n \ndef ideal_sonar_reading(robot_pose, sensor_pose, world):\n \"\"\"\n @param robot_pose: C{util.Pose} representing pose of robot in world\n @param sensor_pose: c{util.Pose} representing pose of sonar sensor\n with respect to the robot\n @param world: C{soarWorld.SoarWorld} representing obstacles in the world\n @returns: length of ideal sonar reading; if the distance is\n longer than C{sonarDist.sonarMax} or there is no hit at all, then\n C{sonarDist.sonarMax} is returned. \n \"\"\"\n # Translate and turn by the robot's pose, then rotate about its center\n origin = robot_pose.transform(sensor_pose).rotate(robot_pose.point(), robot_pose.t)\n sonar_ray = Ray(origin, length=SONAR_MAX, dummy=True)\n # Find all collisions that don't take place with a robot\n collisions = world.find_all_collisions(sonar_ray, eps=1e-3, condition=lambda obj: not isinstance(obj, PioneerRobot))\n if collisions:\n distances = [origin.distance(p) for _, p in collisions]\n distances.sort()\n return distances[0]\n else:\n return SONAR_MAX\n\ndef discrete_sonar(d, numBins, sonarMax = None):\n \"\"\"\n @param d: value of a sonar reading\n @param numBins: number of bins into which to divide the interval\n between 0 and C{sonardist.sonarMax}\n @returns: number of the bin into which this sonar reading should\n fall; any reading greater than or equal to c{sonarDist.sonarMax}\n is put into bin C{numBins - 1}.\n \"\"\"\n if not sonarMax:\n sonarMax = SONAR_MAX\n binSize = sonarMax / numBins\n return int(d / binSize)\n\ndef inv_discrete_sonar(id, numBins, sonarMax = None):\n if not sonarMax:\n sonarMax = sonarDist.sonarMax\n binSize = sonarMax / numBins\n return id * binSize\n\n# Old name, defined here in case somebody depends on it...\ndiscreteSonarValue = discreteSonar = discrete_sonar\n","sub_path":"src/idealReadings.py","file_name":"idealReadings.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"274970159","text":"# import references\nimport copy\n\n\ndef isdict(x):\n return isinstance(x, dict)\ndef istuple(x):\n return isinstance(x, tuple)\ndef isiterable(x):\n return getattr(x, '__iter__', False)\n# def isgensonref(x):\n# return isinstance(x, ScopedReference)\ndef isgensonevaluable(x):\n return getattr(x, '__genson_eval__', False)\n\n\n\n\ndef resolve(x, context = []):\n if isgensonevaluable(x):\n return resolve(x.__genson_eval__(context), context)\n elif isdict(x):\n # build a new copy of the dict\n return_dict = {}\n\n # push down object context stack\n context.append(return_dict)\n\n for k,v in x.items():\n val = resolve(v, context)\n\n # check if we need to do a splat\n if istuple(k):\n if istuple(val):\n if len(k) is not len(val):\n raise Exception(\"Invalid splat\")\n\n for (splat_key,splat_val) in zip(k,val):\n return_dict[splat_key] = resolve(splat_val, context)\n else:\n for splat_key in k:\n return_dict[splat_key] = resolve(val, context)\n else:\n return_dict[k] = val\n\n # pop object context stack\n context.pop()\n\n return return_dict\n # elif isgensonref(x):\n # val = resolve_scoped_reference(copy.deepcopy(x), copy.copy(context))\n # return resolve( val, context )\n\n elif istuple(x):\n return_list = []\n for v in x:\n return_list.append(resolve(v, context))\n return tuple(return_list)\n elif isiterable(x):\n return_list = []\n for v in x:\n return_list.append(resolve(v, context))\n return return_list\n else:\n return x\n","sub_path":"genson/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"375627920","text":"import shutil\nfrom pathlib import Path\n\nimport pytest\nimport requests\n\nfrom pandas_profiling.controller import console\n\n\n@pytest.fixture(scope=\"module\")\ndef data_dir(tmpdir_factory):\n data_path = Path(str(tmpdir_factory.mktemp(\"test_console\")))\n file_name = data_path / \"rows.csv\"\n if not file_name.exists():\n data = requests.get(\n \"https://data.nasa.gov/api/views/gh4g-9sfh/rows.csv?accessType=DOWNLOAD\"\n )\n file_name.write_bytes(data.content)\n yield data_path\n shutil.rmtree(str(data_path))\n\n\ndef test_console_multiprocessing(data_dir):\n report = data_dir / \"test_samples.html\"\n console.main([\"-s\", \"--pool_size\", \"0\", str(data_dir / \"rows.csv\"), str(report)])\n assert report.exists(), \"Report should exist\"\n\n\ndef test_console_single_core(data_dir):\n report = data_dir / \"test_single_core.html\"\n console.main([\"-s\", \"--pool_size\", \"1\", str(data_dir / \"rows.csv\"), str(report)])\n assert report.exists(), \"Report should exist\"\n","sub_path":"tests/unit/test_console.py","file_name":"test_console.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"346067258","text":"import operator\n\nfrom ..utils import Sentinel\n\n\nclass IndexCallable:\n \"\"\"Provide getitem syntax for functions\n\n >>> def inc(x):\n ... return x + 1\n\n >>> I = IndexCallable(inc)\n >>> I[3]\n 4\n\n Vendored from dask\n \"\"\"\n\n __slots__ = (\"fn\",)\n\n def __init__(self, fn):\n self.fn = fn\n\n def __getitem__(self, key):\n return self.fn(key)\n\n\nclass IndexersMixin:\n \"\"\"\n Provides slicable attributes keys_indexes, items_indexer, values_indexer.\n\n Must be mixed in with a class that defines methods:\n\n * ``_item_by_index``\n * ``_keys_slice``\n * ``_items_slice``\n \"\"\"\n\n __slots__ = (\n \"keys_indexer\",\n \"items_indexer\",\n \"values_indexer\",\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.keys_indexer = IndexCallable(self._keys_indexer)\n self.items_indexer = IndexCallable(self._items_indexer)\n self.values_indexer = IndexCallable(self._values_indexer)\n\n # There is some code reptition here, but let's live with it rather than add\n # yet more depth to the call stack....\n\n def _keys_indexer(self, index_or_slice):\n if isinstance(index_or_slice, int):\n if index_or_slice < 0:\n index_or_slice = -1 - index_or_slice\n direction = -1\n else:\n direction = 1\n key, _value = self._item_by_index(index_or_slice, direction)\n return key\n elif isinstance(index_or_slice, slice):\n start, stop, direction = slice_to_interval(index_or_slice)\n return list(self._keys_slice(start, stop, direction))\n else:\n raise TypeError(\n f\"{index_or_slice} must be an int or slice, not {type(index_or_slice)}\"\n )\n\n def _items_indexer(self, index_or_slice):\n if isinstance(index_or_slice, int):\n if index_or_slice < 0:\n index_or_slice = -1 - index_or_slice\n direction = -1\n else:\n direction = 1\n return self._item_by_index(index_or_slice, direction)\n elif isinstance(index_or_slice, slice):\n start, stop, direction = slice_to_interval(index_or_slice)\n return list(self._items_slice(start, stop, direction))\n else:\n raise TypeError(\n f\"{index_or_slice} must be an int or slice, not {type(index_or_slice)}\"\n )\n\n def _values_indexer(self, index_or_slice):\n if isinstance(index_or_slice, int):\n if index_or_slice < 0:\n index_or_slice = -1 - index_or_slice\n direction = -1\n else:\n direction = 1\n _key, value = self._item_by_index(index_or_slice, direction)\n return value\n elif isinstance(index_or_slice, slice):\n start, stop, direction = slice_to_interval(index_or_slice)\n return [value for _key, value in self._items_slice(start, stop, direction)]\n else:\n raise TypeError(\n f\"{index_or_slice} must be an int or slice, not {type(index_or_slice)}\"\n )\n\n\ndef slice_to_interval(slice_):\n \"\"\"\n Convert slice object to (start, stop, direction).\n \"\"\"\n start = slice_.start or 0 # Handles case where slice_.start is None.\n step = slice_.step or 1 # Handles case where slice_.step is None.\n if step == 1:\n if start < 0:\n raise ValueError(\n \"Tree sequence slices with start < 0 must have step=-1. \"\n f\"Use for example [{slice_.start}:{slice_.stop}:-1]\"\n \"(This is a limitation of slicing on Tree sequences \"\n \"that does not apply to Python sequences in general.)\"\n )\n if (slice_.stop is not None) and (slice_.stop < start):\n raise ValueError(\n \"Tree sequence slices with step=1 must have stop >= start. \"\n \"(This is a limitation of slicing on Tree sequences \"\n \"that does not apply to Python sequences in general.)\"\n )\n start_ = start\n stop_ = slice_.stop\n direction = 1\n elif step == -1:\n if start >= 0:\n raise ValueError(\n \"Tree sequence slices with start >= 0 must have step=1. \"\n \"(This is a limitation of slicing on Tree sequences \"\n \"that does not apply to Python sequences in general.)\"\n )\n if slice_.stop is not None:\n if slice_.stop > start:\n raise ValueError(\n \"Tree sequence slices with step=-1 must have stop <= start.\"\n )\n stop_ = 1 - slice_.stop\n else:\n stop_ = slice_.stop\n start_ = 1 - start\n direction = -1\n else:\n raise ValueError(\n \"Only step of 1 or -1 is supported in a Tree sequence slice. \"\n f\"Step {slice_.step} is disallowed.\"\n )\n assert start_ >= 0\n assert (stop_ is None) or (stop_ >= start_)\n return start_, stop_, direction\n\n\nUNCHANGED = Sentinel(\"UNCHANGED\")\n\n\ndef tree_repr(tree, sample):\n sample_reprs = list(map(repr, sample))\n out = f\"<{type(tree).__name__} {{\"\n # Always show at least one.\n if sample_reprs:\n out += sample_reprs[0]\n # And then show as many more as we can fit on one line.\n counter = 1\n for sample_repr in sample_reprs[1:]:\n if len(out) + len(sample_repr) > 60: # character count\n break\n out += \", \" + sample_repr\n counter += 1\n approx_len = operator.length_hint(tree) # cheaper to compute than len(tree)\n # Are there more in the tree that what we displayed above?\n if approx_len > counter:\n out += f\", ...}} ~{approx_len} entries>\"\n else:\n out += \"}>\"\n return out\n","sub_path":"tiled/trees/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"103496269","text":"'''\nThis module contains the functions needed for parsing the necessary information\nfrom the ADF output file for calculating the Raman peak intensities based on\nthe Polarizability Tensor, centers of vibration, atom positions, bonds,\nand Polarizability Gradient.\n'''\n\nimport math\nimport re\nimport numpy as np\n\n# Finds positions for the atoms\ndef find_atoms(filename):\n\n '''\n This function finds the atoms and their x, y, z coordinates.\n '''\n\n atom_data = np.array([])\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if 'ATOMS' in line:\n atoms_num = num\n break\n for num, line in enumerate(my_file, 1):\n if 'END' in line:\n end_num = num + atoms_num\n break\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if num in range(atoms_num+1, end_num):\n full_atom = np.array(\n re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)).astype(np.float)\n atom_data = np.append(atom_data, full_atom, axis=0)\n my_file.close()\n\n atom_data = np.reshape(atom_data, (len(atom_data)/len(full_atom),\n len(full_atom)))\n atom_data = np.delete(atom_data, 0, 1)\n\n return atom_data\n\ndef find_bonds(filename, atom_data):\n '''\n Finds the bonds between atoms for plotting purposes.\n '''\n\n raw_bond_data = np.array([], dtype=int)\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if 'GUIBONDS' in line:\n atoms_num = num\n break\n for num, line in enumerate(my_file, 1):\n if 'END' in line:\n end_num = num + atoms_num\n break\n my_file.close()\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if num in range(atoms_num+1, end_num):\n full_bond = np.array(re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)).astype(np.float)\n raw_bond_data = np.append(raw_bond_data, full_bond, axis=0)\n my_file.close()\n raw_bond_data = np.reshape(raw_bond_data, (len(raw_bond_data)/\n len(full_bond),\n len(full_bond)))\n raw_bond_data = np.delete(raw_bond_data, 0, 1)\n\n bond_data_x = np.array([])\n bond_data_y = np.array([])\n bond_data_z = np.array([])\n\n bond_data_x = np.column_stack(\n (atom_data[raw_bond_data[range(raw_bond_data.shape[0]),\n [0]].astype(int)-1, [0]],\n atom_data[raw_bond_data[range(raw_bond_data.shape[0]),\n [1]].astype(int)-1, [0]]))\n bond_data_y = np.column_stack(\n (atom_data[raw_bond_data[range(raw_bond_data.shape[0]),\n [0]].astype(int)-1, [1]],\n atom_data[raw_bond_data[range(raw_bond_data.shape[0]),\n [1]].astype(int)-1, [1]]))\n bond_data_z = np.column_stack(\n (atom_data[raw_bond_data[range(raw_bond_data.shape[0]),\n [0]].astype(int)-1, [2]],\n atom_data[raw_bond_data[range(raw_bond_data.shape[0]),\n [1]].astype(int)-1, [2]]))\n\n\n return bond_data_x, bond_data_y, bond_data_z\n\ndef find_frequencies(filename):\n '''\n Finds the frequencies from the output file.\n '''\n raw_freq_data = np.array([])\n with open(filename) as my_file:\n freq_len = 0\n not_found = True\n for line in my_file:\n if 'X,X' in line:\n if not_found:\n not_found = False\n freq_len += 1\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if 'cm-1' in line:\n # print line\n freq_num = num+2\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if num in range(freq_num, freq_num+freq_len):\n full_freq = np.array(\n re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)).astype(np.float)\n raw_freq_data = np.append(raw_freq_data, full_freq, axis=0)\n raw_freq_data = np.reshape(raw_freq_data,\n (len(raw_freq_data)/len(full_freq),\n len(full_freq)))\n frequencies = raw_freq_data[range(freq_len), [0]]\n return frequencies\n\ndef find_alpha_tensor(filename):\n '''\n Find the Polarizability Tensor alpha.\n '''\n\n raw_alpha_data = np.array([])\n\n frequencies = find_frequencies(filename)\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if 'Polarizability Derivatives' in line:\n # print line\n alpha_num = num\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if num in range(alpha_num+5, alpha_num+len(frequencies)+5):\n full_alpha = np.array(\n re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)).astype(np.float)\n raw_alpha_data = np.append(raw_alpha_data, full_alpha, axis=0)\n\n raw_alpha_data = np.reshape(raw_alpha_data,\n (len(raw_alpha_data)/len(full_alpha),\n len(full_alpha)))\n raw_alpha_data = np.delete(raw_alpha_data, 0, 1)\n alpha_tensor = np.zeros((len(raw_alpha_data), 3, 3))\n for num, row in enumerate(raw_alpha_data):\n alpha_tensor[num][0][0] = row[0]\n alpha_tensor[num][0][1] = row[1]\n alpha_tensor[num][0][2] = row[4]\n alpha_tensor[num][1][0] = row[1]\n alpha_tensor[num][1][1] = row[2]\n alpha_tensor[num][1][2] = row[3]\n alpha_tensor[num][2][0] = row[4]\n alpha_tensor[num][2][1] = row[3]\n alpha_tensor[num][2][2] = row[5]\n\n return alpha_tensor\n\ndef find_a_tensor(filename):\n '''\n Finds the Gradient Polarizability Tensor denoted 'A'\n '''\n\n a_tensor = np.array([])\n raw_a_data = np.array([])\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if 'X,X' in line:\n freq_x_num = num\n break\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if 'Z,Z' in line:\n freq_z_num = num\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if num in range(freq_x_num, freq_z_num+1):\n full_a = np.array(re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)).astype(np.float)\n full_a = full_a[range(len(full_a)-3, len(full_a))]\n raw_a_data = np.append(raw_a_data, full_a, axis=0)\n\n raw_a_data = np.reshape(raw_a_data, (len(raw_a_data)/len(full_a),\n len(full_a)))\n\n a_tensor = raw_a_data.reshape(len(raw_a_data)/9, 3, 3, 3)\n\n return a_tensor\n\ndef find_displacement_indicies(filename):\n '''\n Finds the indicies for the displacement funciton.\n '''\n\n atoms_num = []\n end_num = []\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if 'Vibrations and Normal Modes' in line:\n vibration_start = num\n if 'ATOMS' in line:\n atoms_num.append(num)\n if 'END' in line:\n end_num.append(num)\n\n no_of_atoms = end_num[0] - atoms_num[0] - 1\n\n last_atom = vibration_start + 8 + no_of_atoms\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if num == last_atom:\n break\n\n last_atom_name = re.findall(r\"[-+]?\\S*\\.\\S+|\\S+\", line)[0]\n last_atoms = np.array([])\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if last_atom_name in line:\n last_atoms = np.append(last_atoms, num).astype(np.int)\n\n last_atoms = np.array([atom for atom in last_atoms if atom >= last_atom])\n first_atoms = last_atoms - no_of_atoms + 1\n\n indecies = np.hstack([range(first_atoms[i], last_atoms[i]+1) for i in\n range(len(first_atoms))])\n\n return indecies, no_of_atoms\n\ndef find_displacements(filename):\n '''\n Finds the magnitudes of displacement for each vibrational mode.\n '''\n\n indecies, no_of_atoms = find_displacement_indicies(filename)\n\n frequencies = find_frequencies(filename)\n\n temp_displacements = np.array([])\n displacements = np.zeros([len(frequencies), no_of_atoms, 3])\n\n with open(filename) as my_file:\n for num, line in enumerate(my_file, 1):\n if num in indecies:\n full_displacements = np.array(\n re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)).astype(np.float)\n temp_displacements = np.append(\n temp_displacements,\n full_displacements[range(len(full_displacements)-9,\n len(full_displacements))],\n axis=0)\n\n temp_displacements = np.reshape(temp_displacements, (frequencies.shape[0]/3,\n no_of_atoms, 9))\n for array in range(len(temp_displacements)):\n displacements[3*array, :, :] = np.column_stack(\n temp_displacements[array, :, range(3)].reshape(3, no_of_atoms))\n displacements[3*array+1, :, :] = np.column_stack(\n temp_displacements[array, :, range(3, 6)].reshape(3, no_of_atoms))\n displacements[3*array+2, :, :] = np.column_stack(\n temp_displacements[array, :, range(6, 9)].reshape(3, no_of_atoms))\n\n return displacements\n\ndef find_centers(filename, atom_data):\n '''\n Finds centers of vibration for each vibrational frequency\n '''\n\n displacements = find_displacements(filename)\n frequencies = find_frequencies(filename)\n\n # no_of_frequencies = len(frequencies)\n no_of_atoms = len(atom_data)\n\n amplitude = np.zeros(no_of_atoms)\n center_of_vibration = np.zeros(len(frequencies)*3)\n\n for freq in range(len(frequencies)):\n vibration = np.zeros(3)\n for atom_num, atom in enumerate(displacements[freq, :, :]):\n amplitude[atom_num] = math.sqrt(\n (atom[0])**2 + (atom[1])**2 + (atom[2])**2)\n total_amplitude = sum(amplitude)\n\n for atom_num in range(no_of_atoms):\n vibration += atom_data[atom_num]*amplitude[atom_num]/total_amplitude\n center_of_vibration[range(freq*3, freq*3+3)] = vibration\n\n center_of_vibration = center_of_vibration.reshape(\n len(center_of_vibration)/3, 3)\n\n return center_of_vibration\n","sub_path":"src/parse_data.py","file_name":"parse_data.py","file_ext":"py","file_size_in_byte":10738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"631425552","text":"def triangulo(x):\r\n\tfor i in range(1, x + 1):\r\n\t\tfor e in range(1,i):\r\n\t\t\tprint(\"*\", end='')\r\n\t\tprint(\"*\")\r\nreintentar = True\r\nwhile reintentar:\r\n\ttriangulo(int(input(\"ingrese el numero de lineas que desee \\n\")))\r\n\tx = int(input(\"1: reintentar. \\n 2: salir. \\n\"))\t\t\t\r\n\tif x == 1:\r\n\t\treintentar = True\r\n\telse: reintentar = False\t\t\t\t","sub_path":"practicas python/triangulo.py","file_name":"triangulo.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"539386095","text":"# Christina Ou\n# AndrewID: cou\n# term project\n# previous files and versions are in backups folder\n# Used Panda3D and Blender\n# Blender to create the models which are borrowed from turbosquid\n# and to position the models to simulate moving\n\n# Using Panda3D's libraries to enact the 3D model of my aquarium\n\n# Aquarium\n\n# Panda imports to enable what I'm doing\nfrom direct.showbase.ShowBase import ShowBase\nfrom panda3d.core import TextNode, NodePath, LightAttrib\nfrom direct.actor.Actor import Actor\nfrom direct.gui.OnscreenText import OnscreenText\nimport sys\nfrom direct.interval.IntervalGlobal import *\n\nimport random\n\n# FYI: some of the uncommented code is for my own testing purposes\n\n\nclass fishTank(ShowBase):\n\n def __init__(self):\n # Initialize the ShowBase class from which we inherit, which will\n # create a window and set up everything we need for rendering into it.\n ShowBase.__init__(self) # borrowed from Panda3D's showbase class\n\n base.setBackgroundColor(0, 0, 0) # set background black\n # This code puts the standard title and instruction text on screen\n # format borrowed\n self.title = OnscreenText(text=\"\",\n fg=(1, 1, 1, 1), parent=base.a2dBottomRight,\n align=TextNode.ARight, pos=(-0.1, 0.1),\n shadow=(0, 0, 0, .5), scale=.08)\n\n # Set up key input, borrowed\n self.accept('escape', sys.exit)\n\n # set initial camera position\n # base.disableMouse() # Disable mouse-based camera-control\n # x, y, z is actually i, k, j Position the camera\n base.trackball.node().setPos(0, 150, -20) # starting position of camera\n base.trackball.node().setHpr(0,0,0)\n\n # positioning is x, -y, z, where x is i, -y is k, z is j\n self.marker = self.loader.loadModel(\"models/marker_01\")\n self.marker.reparentTo(self.render)\n self.marker.setPos(0, 0, 30)\n\n # positioning is x, -y, z, where x is i, -y is k, z is j\n self.marker3 = self.loader.loadModel(\"models/marker_01\")\n self.marker3.reparentTo(self.render)\n self.marker3.setPos(0, 0, 0)\n\n self.wood = self.loader.loadModel(\"models/wood_0\")\n woodTex = loader.loadTexture(\"tex/L1.jpg\")\n self.wood.setTexture(woodTex)\n self.wood.reparentTo(self.render)\n self.wood.setScale(20,20,20)\n self.wood.setPos(5, 2, 0)\n\n # call method to create tank model\n self.createTank()\n # get the dimensions (scale, width, height, length of present tank)\n tankDims = self.getTankDims()\n\n # keep track of all the fish\n self.fishList = []\n # types of fish; there are 4 right now\n\n # fish1 contains models of fish rightbend, leftbend, and centered\n # also stores the image jpg of the fish for its texture\n fish1 = [\"models/fish1bend-1\", \"models/fish1bend2-1\", \n \"models/fish1front_04\", 'tex/TropicalFish01.jpg']\n # pos1 = (-22, 35, 30)\n pos1 = (0, 0, 15)\n self.fishOne = Fish(fish1, pos1, tankDims) # instantiated in fish class\n\n # repeat for a total of 4 fish\n fish2 = [\"models/tang_right_00\", \"models/tang_left_01\", \n \"models/tang_02\", \"tex/TropicalFish02.jpg\"]\n # pos2 = (22, 35, 30)\n pos2 = (0, 0, 15)\n self.fishTwo = Fish(fish2, pos2, tankDims)\n # 22 35 30 is where top left forward corner is\n\n fish3 = [\"models/nemo_right_2\", \"models/nemo_left_2\", \n \"models/nemo_front_2\", \"tex/TropicalFish12.jpg\"]\n # pos3 = (0, 0, 4)\n pos3 = (0, 0, 15)\n self.fishThree = Fish(fish3, pos3, tankDims)\n\n fish4 = [\"models/yellow_right_0\", \"models/yellow_left_0\", \n \"models/yellow_front_0\", \"tex/TropicalFish05.jpg\"]\n # pos4 = (22, -35, 30)\n pos4 = (0, 0, 15)\n self.fishFour = Fish(fish4, pos4, tankDims)\n\n self.fishList = ([self.fishOne] + [self.fishTwo] + \n [self.fishThree] + [self.fishFour])\n\n # move/run the movement\n for fish in self.fishList:\n fish.move()\n # self.fishThree.move()\n # self.fishTwo.move()\n # self.fishOne.move()\n\n\n def createTank(self):\n # loader.loadModel borrowed from Panda3D's first program example\n # models from Blender\n self.sides = self.loader.loadModel(\"models/tanksides_03\")\n # set texture from image\n sidesTex = loader.loadTexture(\"tex/blue.png\")\n self.sides.setTexture(sidesTex)\n # attach the model to render so it appears in window\n self.sides.reparentTo(self.render)\n\n self.tankLength, self.tankWidth, self.tankHeight = 3, 2, 2\n self.tankScale = 15\n\n # Apply scale and position transforms on the model.\n # make the model 15x larger\n self.sides.setScale(self.tankScale, self.tankScale, self.tankScale)\n self.sides.setPos(0, 0, 0) # set the position\n\n # do same for bottom of tank, bottom is sand color \n self.bottom = self.loader.loadModel(\"models/tankbott_05\")\n bottomTex = loader.loadTexture(\"tex/sand.jpg\")\n self.bottom.setTexture(bottomTex)\n self.bottom.reparentTo(self.render)\n self.bottom.setScale(self.tankScale, self.tankScale, self.tankScale)\n self.bottom.setPos(0,0,0)\n\n def getTankDims(self):\n return self.tankScale, self.tankLength, self.tankWidth, self.tankHeight\n\n# fish class for each instance of the fish\n# need to specify what type, which position\n# everythingFish contains all the methods necessary for a Fish\n# contains more than 1 fish needs, so it's the superclass of Fish\nclass EverythingFish(fishTank):\n\n def __init__(self, fishModel, position, tankDims):\n # I have 3 fish models for each one instance.\n # This is due to me creating an animation out of frames of the fish\n # To make them \"move\", I have a fish with 3 positions: tail straight,\n # tail bending right, and tail bending left\n\n # use loader to load the model of the fish\n # don't need panda's \"actors\" because I'm not utilizig Blender's\n # animation technique\n self.fishR = loader.loadModel(fishModel[0])\n # syntax for loading the fish from Panda3d website\n fishTex = loader.loadTexture(fishModel[3])\n self.fishR.setTexture(fishTex)\n self.fishR.reparentTo(render)\n\n # create alternate fish the left tail position (from fish perspective)\n self.fishL = loader.loadModel(fishModel[1])\n self.fishL.setTexture(fishTex)\n self.fishL.reparentTo(render)\n \n self.fishFront = loader.loadModel(fishModel[2])\n self.fishFront.setTexture(fishTex) # use same previous fishTex\n self.fishFront.reparentTo(render)\n\n # start with just one model\n self.fishR.hide()\n self.fishFront.hide()\n\n # initialize how far the fish moves each time\n self.xPosition = position[0]\n self.yPosition = position[1]\n self.zPosition = position[2]\n\n self.fishL.setPos((self.xPosition, self.yPosition, self.zPosition))\n # starting fish image set to the positions\n\n # initialize initial direction\n # change will see which direction the fish is going next time\n self.yChange = -1 # start by going forward\n self.xChange = 0\n self.zChange = 0\n\n # initialize pitch and heading move each time the fish turns\n self.pitchChange = 0\n self.headingChange = 0\n\n # set a variable so not turning every sequence call\n self.fishTurn = 0\n\n self.tankScale, self.tankLength, self.tankWidth, self.tankHeight = tankDims\n self.hitBound = False\n\n # every time I move the tail, I also move the fish. I take in how much the \n # z and x are changing and increment the positions of the fish's z and x\n\n # When I \"move\" the fish, I am essentially looking at a different snapshot \n # of the fish that is set to a new position so it gives the illusion of \n # \"moving forward\". To accomplish this, I need to hide the other images \n # of the fish, and set the new fish's position to the \"moving forward\" one\n # Then I show that fish, and it has moved forward. Voila!\n def moveTailLeft(self):\n # increment the distance of the fish\n self.checkBounds()\n self.yPosition += self.yChange\n self.xPosition += self.xChange\n self.zPosition += self.zChange\n\n self.fishR.hide()\n self.fishFront.hide()\n # fish is moving in x,z,y space\n self.fishL.setPos((self.xPosition, self.yPosition, self.zPosition))\n self.fishL.show()\n\n def moveTailRight(self):\n self.checkBounds()\n self.yPosition += self.yChange\n self.xPosition += self.xChange\n self.zPosition += self.zChange\n\n self.fishL.hide()\n self.fishFront.hide()\n self.fishR.setPos((self.xPosition, self.yPosition, self.zPosition))\n self.fishR.show()\n\n def moveTailCenter(self):\n self.checkBounds()\n self.yPosition += self.yChange\n self.xPosition += self.xChange\n self.zPosition += self.zChange\n \n self.fishL.hide()\n self.fishR.hide()\n self.fishFront.setPos((self.xPosition, self.yPosition, self.zPosition))\n self.fishFront.show()\n\n # turning the fish 45 degrees each time. need to make sure to change\n # how much x and z changes according to the direction the fish is facing\n def leftRightFish(self, numm):\n self.fishTurn += 1\n # self.fishTurn sets how far the fish moves before turning\n if self.fishTurn < 3: \n return\n else:\n self.fishTurn = 0\n # random which direction\n if numm == 0:\n self.pitchChange = (self.pitchChange+45)%360 # turn left\n elif numm == 1:\n self.pitchChange = (360 + self.pitchChange - 45)%360 # turn right\n # coordinate changes for which way the fish is facing\n # Using the x and z axis\n if self.pitchChange == 0:\n self.xChange, self.yChange = 0, -1\n elif self.pitchChange == 45:\n self.xChange, self.yChange = +1, -1\n elif self.pitchChange == 90:\n self.xChange, self.yChange = +1, 0\n elif self.pitchChange == 135:\n self.xChange, self.yChange = +1, +1\n elif self.pitchChange == 180:\n self.xChange, self.yChange = 0, +1\n elif self.pitchChange == 225:\n self.xChange, self.yChange = -1, +1\n elif self.pitchChange == 270:\n self.xChange, self.yChange = -1, 0\n elif self.pitchChange == 315:\n self.xChange, self.yChange = -1, -1\n self.fishL.setHpr((self.pitchChange, self.headingChange, 0))\n self.fishR.setHpr((self.pitchChange, self.headingChange, 0))\n self.fishFront.setHpr((self.pitchChange, self.headingChange, 0))\n\n # appends these movements for a moveStraight movement\n # eventually also create a turn method that modifies/adds on to the \n # sequence\n # This is the crux of moving the fish. Credits to Panda3D library for \n # helping me figure out how to loop through functions through its Sequence\n # method. In this sequence, I loop through moving the tail from center to\n # left back to center to right. Then I turn the fish. In between each, I \n # have the program wait for a period of time so that we can see each frame.\n def moveStraight(self, seq):\n seq.append(Wait(.15))\n seq.append(Func(self.moveTailCenter))\n seq.append(Wait(.15))\n seq.append(Func(self.moveTailLeft))\n seq.append(Wait(.15))\n seq.append(Func(self.moveTailCenter))\n seq.append(Wait(.15))\n seq.append(Func(self.moveTailRight))\n\n # move fish up or down\n # includes pointing nose of fish up or down, then \"y\" axis shift\n def upDownFish(self, numm):\n # restrict to 30 degree angle\n if numm == 0 and self.headingChange > -30: # moving up\n self.headingChange -= 30 # angle fish pointing up\n self.zChange += 1\n\n elif numm == 1 and self.headingChange < 30: # moving down\n self.headingChange += 30\n self.zChange -= 1\n\n else:\n self.headingChange = 0\n self.zChange = 0\n\n # set the new rotation for each of the models\n self.fishL.setHpr((self.pitchChange, self.headingChange, 0))\n self.fishR.setHpr((self.pitchChange, self.headingChange, 0))\n self.fishFront.setHpr((self.pitchChange, self.headingChange, 0))\n\n def checkBounds(self):\n zLimit = self.tankHeight*self.tankScale\n xLimit = self.tankScale*self.tankLength\n yLimit = self.tankScale*self.tankWidth\n zmargin = 5\n xymargin = 10\n # print(self.xPosition, self.yPosition, self.zPosition)\n if self.zPosition < zmargin - 1: # actual basically bottom is 4\n # print(\"z low\")\n self.upDownFish(0) # make fish move up\n return True\n if self.zPosition > zLimit - zmargin: # actual basically top is 30\n # print(\"z high\")\n self.upDownFish(1)\n return True\n\n if self.xPosition > xLimit - xymargin:\n if self.pitchChange < 90:\n self.leftRightFish(1)\n else: self.leftRightFish(0)\n return True\n\n if self.yPosition > yLimit - xymargin:\n # print(\"y high\")\n if self.pitchChange >= 180 or self.pitchChange == 0:\n self.leftRightFish(0)\n else: self.leftRightFish(1)\n return True\n\n if self.xPosition < -xLimit + xymargin:\n # print(\"x low\")\n if self.pitchChange < 270 and self.pitchChange != 0:\n self.leftRightFish(1)\n else: \n self.leftRightFish(0)\n return True\n\n if self.yPosition < -yLimit + zmargin:\n # print(\"y low\")\n if self.pitchChange < 180:\n self.leftRightFish(0)\n else: self.leftRightFish(1)\n return True\n return False\n\n def UDLRFish(self):\n if self.checkBounds() != True:\n numm = random.randint(0,2) # turnLR or UD 2/3 of the time\n if numm == 1 or numm == 2: # random L or R turn\n leftrightNum = random.randint(0,1)\n self.leftRightFish(leftrightNum)\n # otherwise move up or down\n elif numm == 0: # random U or D motion\n updownNum = random.randint(0, 1)\n self.upDownFish(updownNum)\n\n\n\nclass Fish(EverythingFish):\n\n def __init__(self, fishModel, position, tankDims):\n EverythingFish.__init__(self, fishModel, position, tankDims)\n\n # create a sequence, a loop of functions. call the superclasses' methods\n # to add to the sequence of these fish\n def move(self):\n seq = Sequence()\n self.moveStraight(seq)\n seq.append(Func(self.UDLRFish))\n seq.loop()\n\n\n\n\n\n# Now that our class is defined, we create an instance of it.\n# Doing so calls the __init__ method set up above\ntank = fishTank() # Create an instance of our class\ntank.run() # Run the simulation\n\n\n","sub_path":"backups/noActors.py","file_name":"noActors.py","file_ext":"py","file_size_in_byte":15249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"425996266","text":"import json\nimport urllib\n\nimport re\nfrom bs4 import BeautifulSoup\nfrom decimal import Decimal\n\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import session_with_proxy, html_to_markdown, \\\n check_ean13\n\n\nclass MegaMatute(Store):\n @classmethod\n def categories(cls):\n return [\n 'StorageDrive',\n 'ExternalStorageDrive',\n 'MemoryCard',\n 'UsbFlashDrive',\n 'SolidStateDrive',\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n category_paths = [\n ['C:/164/168/170/', 'StorageDrive'],\n ['C:/164/168/169/', 'ExternalStorageDrive'],\n ['C:/164/168/172/', 'SolidStateDrive'],\n ['C:/164/168/173/', 'UsbFlashDrive'],\n ['C:/8/116/118/', 'MemoryCard'],\n ]\n\n product_urls = []\n session = session_with_proxy(extra_args)\n\n for category_path, local_category in category_paths:\n if local_category != category:\n continue\n\n page = 1\n\n while True:\n category_url = \\\n 'http://www.megamamute.com.br/buscapagina?fq={}&PS=16&' \\\n 'sl=45e718bf-51b0-49c4-8882-725649af0594' \\\n '&cc=3&sm=0&PageNumber={}' \\\n ''.format(urllib.parse.quote(category_path), page)\n\n if page >= 10:\n raise Exception('Page overflow: ' + category_url)\n\n print(category_url)\n\n soup = BeautifulSoup(session.get(category_url).text,\n 'html.parser')\n\n containers = soup.findAll('div', 'x-product')\n\n if not containers:\n if page == 1:\n raise Exception('Empty category: ' + category_url)\n break\n\n for container in containers:\n product_url = container.find('h2').find('a')['href']\n product_urls.append(product_url)\n\n page += 1\n\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n session = session_with_proxy(extra_args)\n response = session.get(url)\n\n if response.url != url:\n return []\n\n page_source = response.text\n\n pricing_data = re.search(r'vtex.events.addData\\(([\\S\\s]+?)\\);',\n page_source).groups()[0]\n pricing_data = json.loads(pricing_data)\n\n skus_data = re.search(r'var skuJson_0 = ([\\S\\s]+?);',\n page_source).groups()[0]\n skus_data = json.loads(skus_data)\n name = '{} {}'.format(pricing_data['productBrandName'],\n pricing_data['productName'])\n normal_price = Decimal(pricing_data['productPriceTo'])\n\n soup = BeautifulSoup(page_source, 'html.parser')\n\n discount_container = soup.find('div', 'price_box-v1').fetchParents()[0]\n discount_container = discount_container.findAll('p', 'flag')\n if discount_container:\n discount_container = discount_container[-1]\n discount_value = re.search(r'(\\d+)', discount_container.text)\n discount_value = Decimal(discount_value.groups()[0])\n discount_factor = (Decimal(100) - discount_value) / Decimal(100)\n\n offer_price = normal_price * discount_factor\n offer_price = offer_price.quantize(Decimal('0.01'))\n else:\n offer_price = normal_price\n\n picture_urls = [tag['rel'][0].split('?')[0] for tag in\n soup.findAll('a', {'id': 'botaoZoom'})]\n\n description = ''\n panel_classes = ['blc_1', 'blc_2']\n\n for panel_class in panel_classes:\n panel = soup.find('div', panel_class)\n description += html_to_markdown(str(panel)) + '\\n\\n'\n\n products = []\n\n if 'productEans' in pricing_data:\n ean = pricing_data['productEans'][0]\n if len(ean) == 12:\n ean = '0' + ean\n if not check_ean13(ean):\n ean = None\n else:\n ean = None\n\n for sku_data in skus_data['skus']:\n sku = str(sku_data['sku'])\n stock = pricing_data['skuStocks'][sku]\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n sku,\n stock,\n normal_price,\n offer_price,\n 'BRL',\n sku=sku,\n ean=ean,\n description=description,\n picture_urls=picture_urls\n )\n products.append(p)\n\n return products\n","sub_path":"storescraper/stores/mega_mamute.py","file_name":"mega_mamute.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"463551384","text":"from room import Room\nfrom player import Player\n\n# # Declare all the rooms\n# outside = Room(\"Outside Cave Entrance\",\n# \"North of you, the cave mount beckons\", n_to='foyer')\n\n# foyer = Room(\"Foyer\", \"Dim light filters in from the south. Dusty passages run north and east.\",\n# s_to='outside', n_to='overlook', e_to='narrow')\n\n# overlook = Room(\"Grand Overlook\", \"A steep cliff appears before you, falling into the darkness. Ahead to the north, a light flickers in the distance, but there is no way across the chasm.\", s_to='foyer')\n\n# narrow = Room(\"Narrow Passage\", \"The narrow passage bends here from west to north. The smell of gold permeates the air.\",\n# w_to='foyer', n_to='treasure')\n\n# treasure = Room(\"Treasure Chamber\", \"You've found the long-lost treasure chamber! Sadly, it has already been completely emptied by earlier adventurers. The only exit is to the south.\", s_to='narrow')\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mouth beckons.\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n#\n# Main\n#\n\n# Make a new player object that is currently in the 'outside' room.\nplayer1 = Player(name='Jon', player_room=room['outside'])\n\nprint(f'Hello! You currently playing as {player1.name}.\\n')\nprint(\n f'Your adventure begins {player1.player_room}... {player1.player_room.description}\\n')\n\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n# Input parser\nselection = 't'\nwhile selection not in ['q']:\n selection = input(\n f'Location: {player1.player_room} | Type n, s, e, and w to move north, south, east, or west to move. Type q to quit.\\n')\n try:\n selection = str(selection)\n if selection == 'n':\n print(f'You try to move north.')\n player1.move('n')\n elif selection == 's':\n print(f'You try to move south.')\n player1.move('s')\n elif selection == 'e':\n print(f'You try to move east.')\n player1.move('e')\n elif selection == 'w':\n print(f'You try to move west.')\n player1.move('w')\n elif selection == 'q':\n print(\"So long, partner!\")\n else:\n print('Please type: n, s, e, w, or q!')\n except ValueError:\n print(\"Please enter one of the letters: n, s, e, w, or q\")","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"989542","text":"# python3\r\n# coding=\r\n\r\n\r\nfrom collections import Iterable\r\nimport warnings\r\nfrom time import sleep\r\nfrom tqdm import tqdm\r\nfrom result import Result\r\n# import rus_corpus_oop\r\n\r\n\r\n# functions = {'rus': rus_corpus_oop}\r\nfunctions = {}\r\n\r\n\r\nclass Query:\r\n def __init__(self, language):\r\n self.language = language\r\n self.__corpus = functions[self.language] \r\n # self.search.__func__.__doc__ = self.__corpus.__doc__\r\n \r\n self.results = list()\r\n self.unsuccessful = list()\r\n self.__warn = 'Nothing found for query \"%s\".\\n' \\\r\n 'Unsuccessful queries are available via Query.unsuccessful'\r\n self.__pbar_desc = 'Query \"%s\"'\r\n self.__type_except = 'Argument `query` must be of type or iterable, got <%s>'\r\n\r\n def search(self, query, sleep_time=1, sleep_each=5, *args, **kwargs):\r\n \"\"\"\r\n sleep_time: int: sleeping time in seconds\r\n sleep_each: int: sleep after each `sleep_each` request\r\n \r\n for more arguments see `params_container.Container`\r\n \r\n __________\r\n \r\n pbar bad behaviour if found < numResults\r\n pbar dies if interrupted\r\n verbose might be more stable\r\n we want to add param \"progress=['bar', 'verbose']\", dont we\r\n \"\"\"\r\n if sleep_each < 1:\r\n raise ValueError('Argument `sleep_each` must be >= 1')\r\n \r\n if isinstance(query, str):\r\n query = [query]\r\n \r\n if not isinstance(query, Iterable):\r\n raise TypeError(self.__type_except % type(query))\r\n \r\n for q in query:\r\n _r = Result(q)\r\n parser = self.__corpus.PageParser(query=q, *args, **kwargs)\r\n q_desc = self.__pbar_desc % q\r\n \r\n for t in tqdm(parser.extract(),\r\n total=kwargs['numResults'],\r\n unit='docs',\r\n desc=q_desc):\r\n _r.add(t)\r\n if _r.N % sleep_each == 0:\r\n sleep(sleep_time)\r\n \r\n self.results.append(_r)\r\n if _r.N == 0:\r\n warnings.warn(self.__warn % q)\r\n self.unsuccessful.append(q)\r\n \r\n return self.results\r\n","sub_path":"refactor2/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"550942733","text":"import sys\nimport glob\nimport os\nimport PIL.Image\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport twutils.plot as twplot\nimport twutils.pre as twpre\nfrom scipy import constants as C\n\nif len(sys.argv)<3:\n\tprint('Usage: python wigner.py slicing=slices real_file,imag_file [panels=a,b] [layout=1x2]')\n\tprint(' [dr=0.0,0.0] [color=viridis,jet] [roi=h0,h1,v0,v1/h0,h1,v0,v1]')\n\tprint('------------------Examples----------------')\n\tprint('Envelope: python wigner.py xyzt=0,0,0 e_real.dvdat,e_imag.dvdat')\n\tprint('Carrier resolved: python wigner.py xyzt=0,0,0 Ex.dvdat,1.0')\n\tprint('-------------------General Notes--------------------')\n\tprint('Extra spaces (e.g. around commas, semicolons, or equals) are not allowed.')\n\tprint('Displays two panels, the field in real space, and the field in Wigner phase space.')\n\tprint('Required arguments are positional, optional arguments are key=value pairs.')\n\tprint('Double values refer to the two plots.')\n\tprint('------------------Arguments-----------------------')\n\tprint('slicing: 4-character string, such as xyzt, where the first axis is the one to transform.')\n\tprint('slices: is a comma delimited list of 3 slice indices.')\n\tprint(' The real space plot only uses the last 2 slices.')\n\tprint('real_file: name of the file with the real part of the field')\n\tprint('imag_file: EITHER name of the file with the imaginary part of the field')\n\tprint(' or the carrier frequency if the data is real.')\n\tprint('layout: can be 1x2 or 2x1')\n\tprint('dr: 0.0 signals full range on linear scale, any other float is the number of decades spanned.')\n\tprint('color: viridis,magma,plasma,inferno,Spectral,bwr,seismic,prism,ocean,rainbow,jet,nipy_spectral')\n\tprint(' Color maps may be inverted by adding \"_r\" to the name')\n\tprint(' Note any Matplotlib color maps can be used.')\n\tprint('roi: select a subdomain to plot, otherwise the full domain is plotted.')\n\tprint('----------------------Animations----------------------')\n\tprint('Put a python range as one of the slices to generate animated GIF.')\n\tprint('For example, zxyt=0,0,2:5 would animate time slices 2,3,4.')\n\texit()\n\ndef WignerTransform(A,ds,eta0):\n\t# If A is in V/m, ds in secs, and eta0 in ohms, returns J/m^2.\n\t# Then, integration over consistent phase space units (dimensionless product) gives the actual fluence.\n\t# The frequency variable is assumed to be an angular frequency (e.g. rad/s)\n\tN = A.shape[0]\n\tM = np.int(N/2) + 1\n\tcorr = np.zeros((N,M)).astype(np.complex)\n\tAi = np.zeros(N*2-1).astype(np.complex)\n\tAi[::2] = A\n\tAi[1::2] = 0.5*(np.roll(Ai,1)+np.roll(Ai,-1))[1::2]\n\tfor j in range(M):\n\t\tcorr[:,j] = (np.conj(np.roll(Ai,j))*np.roll(Ai,-j))[::2]\n\twig = np.fft.hfft(corr,axis=1)*ds/(2*np.pi)\n\twig = np.fft.fftshift(wig,axes=1)\n\t# Fix the units\n\tdw = (2*np.pi/ds) / N\n\tfluence = 0.5*np.sum(np.abs(A)**2)*ds/eta0\n\twigfluence = np.sum(wig)*ds*dw\n\treturn wig*fluence/wigfluence\n\ndef cleanup(wildcarded_path):\n\tcleanstr = glob.glob(wildcarded_path)\n\tfor f in cleanstr:\n\t\tos.remove(f)\n\ndef ParseSlices(dims,ax_list,slice_str_list):\n\t'''Function to generate a list of slice tuples for the movie.\n\tdims = dimensions of all 4 axes\n\tax_list = slicing_spec as list of integer axis identifiers.\n\tslice_str_list = list of slice strings, can be indices or ranges.\n\tReturns slice_tuples,movie.'''\n\tslice_tuples = []\n\trange_tuples = []\n\tmovie = False\n\t# Construct list of range tuples\n\tsax = ax_list[4-len(slice_str_list):]\n\tfor saxi,slice_str in enumerate(slice_str_list):\n\t\trng = slice_str.split(':')\n\t\ttup = ()\n\t\tfor i,el in enumerate(rng):\n\t\t\tif el=='' and i==0:\n\t\t\t\tel = '0'\n\t\t\tif el=='' and i==1:\n\t\t\t\tel = str(dims[sax[saxi]])\n\t\t\tif el=='' and i==2:\n\t\t\t\tel = '1'\n\t\t\ttup += (int(el),)\n\t\trange_tuples.append(tup)\n\t# Determine the range of the movie frames\n\tframe_rng = range(1)\n\tfor rng in range_tuples:\n\t\tmovie = movie or len(rng)>1\n\t\tif len(rng)==2:\n\t\t\tframe_rng = range(rng[0],rng[1])\n\t\tif len(rng)==3:\n\t\t\tframe_rng = range(rng[0],rng[1],rng[2])\n\t# Construct list of slice tuples\n\tfor r in frame_rng:\n\t\ttup = ()\n\t\tfor rng in range_tuples:\n\t\t\tif len(rng)>1:\n\t\t\t\ttup += (r,)\n\t\t\telse:\n\t\t\t\ttup += rng\n\t\tslice_tuples.append(tup)\n\treturn slice_tuples,movie\n\n# normalization constants in mks\n\nn1 = 2.65e17*1e6\nsu = twpre.SimUnits(n1*1e-6)\nt1 = su.t1\nx1 = su.x1\nE1 = su.E1\nU1 = C.m_e*C.c*C.c\nN1 = n1*x1**3\neta0 = np.sqrt(C.mu_0/C.epsilon_0)\n\n# Matplotlib setup and default args\n\nmpl.rcParams.update({'text.usetex' : False , 'font.size' : 10})\ncolor = ['viridis','viridis']\nproportional = False\nif proportional:\n\tmy_aspect = 'equal'\nelse:\n\tmy_aspect = 'auto'\ndyn_range = [0.0,0.0]\nroi = [[],[]]\nask = 'yes'\nlayout = '1x2'\npanels = ''\n\n# Process command line arguments and setup plotter object\n\nslicing_spec = sys.argv[1].split('=')[0]\nprimitive_slices = (sys.argv[1].split('=')[1]).split(',')\nif len(primitive_slices)!=3:\n\traise ValueError('Need three slices.')\nreal_data_file = sys.argv[2].split(',')[0]\nimag_data_file = sys.argv[2].split(',')[1]\nfor keyval in sys.argv[3:]:\n\tkey = keyval.split('=')[0]\n\tval = keyval.split('=')[1]\n\tif key=='panels':\n\t\tpanels = val.split(',')\n\tif key=='layout':\n\t\tlayout = val\n\tif key=='dr':\n\t\tdyn_range = []\n\t\tdyn_range.append(float(val.split(',')[0]))\n\t\tdyn_range.append(float(val.split(',')[1]))\n\tif key=='color':\n\t\tcolor = val.split(',')\n\tif key=='roi':\n\t\tfor s in val.split('/')[0].split(','):\n\t\t\troi[0].append(int(s))\n\t\tfor s in val.split('/')[1].split(','):\n\t\t\troi[1].append(int(s))\n\nplotter_r = twplot.plotter(real_data_file,buffered=False)\ntry:\n\tcarrier = float(imag_data_file)\n\tplotter_i = 0.0\nexcept ValueError:\n\tcarrier = 0.0\n\tplotter_i = twplot.plotter(imag_data_file,buffered=False)\nplotter_r.display_info()\n\n# Set up animation slices\n\naxes = twplot.get_axis_info(slicing_spec)\ndims = plotter_r.dims4()\nslice_tuples,movie = ParseSlices(dims,axes,primitive_slices)\n\n# Check existing image files and clean\n\nimg_files = glob.glob('frame*.png')\nif len(img_files)>0 and ask=='yes':\n\tans = ''\n\twhile ans!='y' and ans!='n':\n\t\tans = input('Found some frame*.png files, OK to clean (y/n) ?')\n\tif ans=='n':\n\t\tprint('STOPPED. Please run script in a directory where there are no important files of the form frame*.png.')\n\t\texit(1)\n\nfor img_file in img_files:\n\tos.remove(img_file)\n\ndef form_envelope(real_field,carrier,dz,ax):\n\tk = 2*np.pi*np.fft.fftfreq(real_field.shape[ax],dz)\n\tdk = k[1]-k[0]\n\tkc = -k[int(real_field.shape[ax]/2)]\n\tcarrier_idx = int(real_field.shape[ax] * carrier / (2*kc))\n\tenv = np.fft.fft(real_field,axis=ax)\n\tif ax==0:\n\t\tenv[int(env.shape[0]/2):,...] = 0.0\n\telse:\n\t\tenv[...,int(env.shape[1]/2):] = 0.0\n\tenv = np.roll(env,-carrier_idx,axis=ax)\n\treturn 2*E1*np.fft.ifft(env,axis=ax)\n\ndef extract_plot_data(plotter_r,plotter_i,slice_now):\n\tif carrier!=0.0:\n\t\treal2d,dict2d = plotter_r.falsecolor2d(slicing_spec,slice_now[1:],dyn_range[0])\n\t\tabcissa,real1d,dict1d = plotter_r.lineout(slicing_spec,slice_now,dyn_range[1])\n\t\tenvelope2d = form_envelope(real2d,carrier,abcissa[1]-abcissa[0],1)\n\t\tenvelope1d = form_envelope(real1d,carrier,abcissa[1]-abcissa[0],0)\n\telse:\n\t\treal2d,dict2d = plotter_r.falsecolor2d(slicing_spec,slice_now[1:],dyn_range[0])\n\t\timag2d,dict2d = plotter_i.falsecolor2d(slicing_spec,slice_now[1:],dyn_range[0])\n\t\tabcissa,real1d,dict1d = plotter_r.lineout(slicing_spec,slice_now,dyn_range[1])\n\t\tabcissa,imag1d,dict1d = plotter_i.lineout(slicing_spec,slice_now,dyn_range[1])\n\t\tenvelope2d = E1*(real2d + 1j*imag2d)\n\t\tenvelope1d = E1*(real1d + 1j*imag1d)\n\tz_extent = list(dict1d['extent'][:2])\n\tdz = (z_extent[1]-z_extent[0]) / envelope1d.shape[0]\n\tk_extent = [-np.pi/dz,np.pi/dz]\n\twig_ext = z_extent + k_extent\n\treturn envelope2d,dict2d['extent'],WignerTransform(envelope1d,dz*x1/C.c,eta0),wig_ext\n\n\n# Determine the global color scale bounds for both plots\n# If a movie we have to do all the transforms first\n# We don't save the data, just do redundant transforms later\n\nglobal_min1 = 1e50\nglobal_max1 = -1e50\nglobal_min2 = 1e50\nglobal_max2 = -1e50\nfor slice_now in slice_tuples:\n\tenvelope2d,ext1,wigner,ext2=extract_plot_data(plotter_r,plotter_i,slice_now)\n\tlocal_min = np.min(np.abs(envelope2d))\n\tlocal_max = np.max(np.abs(envelope2d))\n\tif local_minglobal_max1:\n\t\tglobal_max1 = local_max\n\tlocal_min = np.min(wigner)\n\tlocal_max = np.max(wigner)\n\tif local_minglobal_max2:\n\t\tglobal_max2 = local_max\n\n# Make a movie or display single frame\n\nfor file_idx,slice_now in enumerate(slice_tuples):\n\n\tif layout=='1x2':\n\t\tplt.figure(file_idx,figsize=(8,3.5),dpi=150)\n\telse:\n\t\tplt.figure(file_idx,figsize=(3.5,7),dpi=150)\n\n\tenvelope2d,ext1,wigner,ext2=extract_plot_data(plotter_r,plotter_i,slice_now)\n\tif layout=='1x2':\n\t\tplt.subplot(121)\n\telse:\n\t\tplt.subplot(211)\n\tplt.imshow(np.abs(envelope2d)*1e-12*1e-2,\n\t\torigin='lower',\n\t\taspect=my_aspect,\n\t\textent=ext1,\n\t\tvmin=global_min1*1e-12*1e-2,\n\t\tvmax=global_max1*1e-12*1e-2,\n\t\tcmap=color[0])\n\tb = plt.colorbar()\n\tb.set_label(r'${\\cal E}$ (TV/cm)',size=12)\n\tif len(roi[0])==4:\n\t\tplt.xlim(roi[0][0],roi[0][1])\n\t\tplt.ylim(roi[0][2],roi[0][3])\n\telse:\n\t\troi[0] = ext1\n\tplt.xlabel(r'$\\omega_p(z/c - t)$',size=12)\n\tplt.ylabel(r'$\\omega_p\\rho/c$',size=12)\n\tif not panels=='':\n\t\tplt.text(roi[0][0],roi[0][3]+0.03*(roi[0][3]-roi[0][2]),'('+panels[0]+')')\n\n\tif layout=='1x2':\n\t\tplt.subplot(122)\n\telse:\n\t\tplt.subplot(212)\n\tplt.imshow(wigner.swapaxes(0,1)*1e-6*1e-4,\n\t\torigin='lower',\n\t\taspect=my_aspect,\n\t\textent=ext2,\n\t\tvmin=global_min2*1e-6*1e-4,\n\t\tvmax=global_max2*1e-6*1e-4,\n\t\tcmap=color[1])\n\tb = plt.colorbar()\n\tb.set_label(r'${\\cal N}$ (MJ/cm$^2$)',size=12)\n\tif len(roi[1])==4:\n\t\tplt.xlim(roi[1][0],roi[1][1])\n\t\tplt.ylim(roi[1][2],roi[1][3])\n\telse:\n\t\troi[1] = ext2\n\tplt.xlabel(r'$\\omega_p(z/c - t)$',size=12)\n\tplt.ylabel(r'$\\delta\\omega/\\omega_p$',size=12)\n\tif not panels=='':\n\t\tplt.text(roi[1][0],roi[1][3]+0.03*(roi[1][3]-roi[1][2]),'('+panels[1]+')')\n\n\tplt.tight_layout()\n\n\tif movie:\n\t\timg_file = 'frame{:03d}.png'.format(file_idx)\n\t\tprint('saving',img_file,'...')\n\t\tplt.savefig(img_file)\n\t\tplt.close()\n\nif movie:\n\tprint('Consolidating into movie file...')\n\timages = []\n\tframeRateHz = 5\n\timg_files = sorted(glob.glob('frame*.png'))\n\tfor f in img_files:\n\t\timages.append(PIL.Image.open(f))\n\timages[0].save('mov.gif',save_all=True,append_images=images[1:],duration=int(1000/frameRateHz),loop=0)\n\tcleanup('frame*.png')\n\tprint('Done.')\nelse:\n\tplt.show()\n","sub_path":"tools/extras/wigner.py","file_name":"wigner.py","file_ext":"py","file_size_in_byte":10385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"335086297","text":"#coding: utf-8\r\n\r\nfrom apiclient.discovery import build\r\nimport requests\r\nimport json\r\n\r\n\r\nAPI_KEY=\"\"\r\nTAG_TRAILLER=\" movie trailer\"\r\n\r\ndef pesquisarVideo(nomeFilme):\r\n youtube = build('youtube', 'v3', developerKey=API_KEY)\r\n req = youtube.search().list(q=nomeFilme + TAG_TRAILLER, part='snippet', type='video', maxResults=1)\r\n resp = req.execute()\r\n if(len(resp['items'])) > 0:\r\n return resp['items'][0]['id']\r\n else:\r\n return ''\r\n","sub_path":"flask_server/service/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"520900211","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_boston\nfrom sklearn.externals import joblib\n\nboston = load_boston()\nprint(boston.keys())\nprint(boston.data.shape)\n#print(boston.DESCR)\n\n#creamos el dataframe\nbos=pd.DataFrame(boston.data)\nprint(bos.head())\n\nbos.columns=boston.feature_names\nprint(bos.head())\n\n#Aquí es donde están los precios\nprint(boston.target[:5])\n\n#Los colocamos dentro del dataframe\nbos['PRICE']=boston.target\n\n#Para generar X quitamos la columna del precio\nX= bos.drop(\"PRICE\",axis=1)\nY=bos['PRICE']\n\n#Creamos el modelo\nlm= LinearRegression()\nprint(lm)\n\n#Creamos las muestras de entrenamiento y pruebas\nX_train, X_test,Y_train, Y_test = train_test_split(X,Y, test_size=0.25\n , random_state=2\n )\nprint(X_train.shape)\nprint(Y_train.shape)\nprint(X_test.shape)\nprint(Y_test.shape)\n\n#Entrenamos el Modelo\nlm.fit(X_train,Y_train)\n\nscore=lm.score(X_test,Y_test)\nprint(\"Score Modelo:\",score)\n\n\n#Guardar el modelo para usarlo más adelante\nlocalizacion_modelo=\"./modelos/modelo_regresion_linear_boston.pkl\"\njoblib.dump(lm,localizacion_modelo)\n\n#recuperar el modelo guardado anteriormente\nlm=joblib.load(localizacion_modelo)\nscore=lm.score(X_test,Y_test)\nprint(\"Score guardado:\",score)\n\n\n\nplt.scatter(lm.predict(X_train),lm.predict(X_train)- Y_train, c=\"b\",s=40, alpha=0.5)\nplt.scatter(lm.predict(X_test),lm.predict(X_test)- Y_test, c=\"g\",s=40,)\nplt.hlines(y=0, xmax=50, xmin=0)\nplt.title(\"Diagrama de dispersión de entrenamiento (azul), y pruebas (verde)\")\nplt.show()","sub_path":"04_01_01_algortimos_regresion_linear.py","file_name":"04_01_01_algortimos_regresion_linear.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"179018216","text":"from collections import deque\n\nfrom math import inf\n\n\ndef preprocess(arr: list) -> deque:\n minimum = deque()\n minimum.append(None)\n\n last = inf\n\n for i in reversed(range(len(arr))):\n e = arr[i]\n\n if e < last:\n last = e\n minimum.append(i)\n\n if e == arr[0]:\n break\n\n return minimum\n\n\ndef longest_sub_array(arr: list):\n minimum = preprocess(arr)\n\n smaller = minimum.pop()\n\n max_size = 1\n max_start = 0\n\n for i in range(len(arr)):\n while smaller is not None and arr[i] > arr[smaller]:\n if smaller - i + 1 > max_size:\n max_size = smaller - i + 1\n max_start = i\n\n smaller = minimum.pop()\n\n if smaller is None:\n break\n\n return [] if max_size == 1 else arr[max_start:max_start + max_size]\n\n\nif __name__ == '__main__':\n arr = [-5, -1, 7, 5, 1, -2]\n\n print(longest_sub_array(arr))\n print(preprocess(arr))\n","sub_path":"dynamicProgramming/SubArray.py","file_name":"SubArray.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"528683685","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTokenize string input.\nCreated on Wed Sep 18 13:18:07 2019\n\n@author: eliphat\n\"\"\"\nimport collections\nimport lcommons as commons\n\n\nwhitespaces = {' ', '\\t', '\\r', '\\n'}\nend_tokens = collections.defaultdict(list)\nfor op in commons.ops:\n end_tokens[op[0]].append(op)\nfor op in commons.spec:\n end_tokens[op[0]].append(op)\n\n\ndef put_word(tokens, string):\n if len(string) > 0:\n if string in commons.keywords:\n tokens.append(('keyword', string))\n elif string in commons.ops:\n tokens.append(('op', string))\n elif string in commons.spec:\n tokens.append(('special', string))\n else:\n tokens.append(('token', string))\n\n\ndef put_chlist(tokens, chlist):\n put_word(tokens, ''.join(chlist))\n chlist.clear()\n\n\ndef tokenize(s):\n tokens = []\n p = []\n i = 0\n while i < len(s):\n ch = s[i]\n i += 1\n if ch in whitespaces:\n put_chlist(tokens, p)\n elif ch in end_tokens:\n put_chlist(tokens, p)\n possibilities = end_tokens[ch]\n ac = False\n for end_tk in possibilities:\n if len(end_tk) == 1 or s[i - 1: i - 1 + len(end_tk)] == end_tk:\n ac = True\n put_word(tokens, end_tk)\n i = i - 1 + len(end_tk)\n if not ac:\n raise Exception(\"Requires \" + str(possibilities)\n + \"at '%s'\" % ch)\n else:\n p.append(ch)\n if p:\n put_chlist(tokens, p)\n put_word(tokens, '&EOF')\n return tokens\n","sub_path":"ltokenizer.py","file_name":"ltokenizer.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"334089918","text":"class Solution:\n def coinChange(self, coins: List[int], amount: int,memo = None) -> int:\n \n \n tab = [0] + [float('inf')] * amount\n \n for coin in coins:\n for x in range(coin,amount+1):\n tab[x] = min(tab[x],tab[x-coin] + 1)\n return tab[amount] if tab[amount] != float('inf') else -1","sub_path":"leetcode/322_coin_change.py","file_name":"322_coin_change.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"615665311","text":"import pygame as pg\nimport basic\nfrom settings import *\nimport json\nimport math\nimport random\nimport HUD\nimport importlib.util\n\nimport os\nfrom language_manager import language_text as get_text\nimport language_manager\nvec=pg.math.Vector2\n\ndef give_named_item_code(name,game):\n i=0\n while True:\n with open(items_file) as file:\n data=json.load(file)\n i+=1\n item=Item(game,0,0,i)\n item.kill()\n if item.name==name:\n break\n \n return item\nclass Player(pg.sprite.Sprite):\n\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.heroes\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game \n self.image=pg.Surface((TILESIZE,TILESIZE))\n self.image.fill(BLACK)\n self.current_frame=0\n self.last_update=0\n self.can_climb=False\n self.rect = self.image.get_rect()\n self.rect.center=(x,y)\n self.vel=vec(0,0)\n self.pos=vec(x,y)\n \n self.acc = vec(0, 0)\n\n def get_keys(self):\n keys = pg.key.get_pressed()\n def collide_with_platforms(self): \n hits = pg.sprite.spritecollide(self, self.game.platforms, False)\n if hits: \n if hits[0].is_slippery: \n if self.vel.x>0:\n self.vel.x+=0.5\n elif self.vel.x<0:\n self.vel.x-=0.5 \n if self.vel.y > 0 and self.pos.y 0:\n self.pos.x = hits[0].rect.left - self.rect.width\n if self.vel.x < 0:\n self.pos.x = hits[0].rect.right\n self.vel.x = 0\n self.rect.x = self.pos.x\n\n if dir == 'y':\n \n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vel.y > 0:\n self.pos.y = hits[0].rect.top - self.rect.height\n if hits[0].is_slippery:\n \n if self.vel.x>0:\n self.vel.x+=0.5\n elif self.vel.x<0:\n self.vel.x-=0.5\n \n\n if self.vel.y < 0:\n self.pos.y = hits[0].rect.bottom\n \n self.vel.y = 0\n self.rect.y = self.pos.y\n \n \n \n \n def jump(self):\n \n self.rect.y+=1\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n \n self.vel.y-=16 \n self.rect.y-=1\n\n self.rect.y+=1\n hits = pg.sprite.spritecollide(self, self.game.platforms, False)\n if hits:\n \n self.vel.y-=16 \n self.rect.y-=1\n\n \n def update(self):\n self.get_keys()\n self.acc = vec(0, GRAVITY)\n keys = pg.key.get_pressed()\n \n if keys[pg.K_LEFT] or keys[pg.K_a]:\n self.acc.x = -PLAYER_ACC\n if keys[pg.K_RIGHT] or keys[pg.K_d]:\n self.acc.x = PLAYER_ACC \n\n \n\n self.acc.x += self.vel.x * PLAYER_FRICTION \n self.vel += self.acc\n self.pos += self.vel + 0.5 * self.acc \n self.rect.x = self.pos.x\n self.collide('x')\n self.rect.y = self.pos.y\n self.collide('y')\n \n\n \n\n\n\n \n\nclass Obstacle(pg.sprite.Sprite):\n def __init__(self, game, x, y,w,h,is_slippery=False):\n self.groups = game.walls\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.rect=pg.Rect(x,y,w,h)\n self.x=x\n self.y=y\n self.is_slippery=is_slippery\n self.rect.x=x\n self.rect.y=y\n \n\n\nclass Enemy(pg.sprite.Sprite):\n def __init__(self,game,x,y,type,level):\n \n\n #базовое\n self.groups=game.enemies,game.all_sprites\n pg.sprite.Sprite.__init__(self,self.groups)\n self.rot='right'\n self.image=pg.Surface((TILESIZE,TILESIZE))\n self.image.fill(LIGHTGREY)\n self.game=game\n \n self.type=type\n \n if self.type=='primitive':\n self.id=0\n elif self.type=='passive':\n self.id=1\n elif self.type=='ninja':\n self.id=2\n \n elif self.type=='jumper':\n self.id=3\n elif self.type=='canon':\n self.id=4\n self.image=pg.image.load(canon_image)\n elif self.type=='star':\n self.id=5\n self.image.set_colorkey(WHITE)\n self.left_image=pg.transform.flip(self.image,True,False)\n self.right_image=self.image\n \n self.rect = self.image.get_rect()\n self.rect.center=(x,y)\n self.vel=vec(0,0)\n self.pos=vec(x,y)\n self.acc = vec(0, 0)\n self.state='right'\n self.rot=vec(1,0)\n with open(enemies_file) as file:\n self.data=json.load(file)\n self.basic=self.data[self.id]\n #характеристика моба\n \n\n self.level=level\n self.range=self.basic['range']\n self.agro_radius=self.basic['agro_radius']\n self.type=self.basic['type']\n self.speed=self.basic['speed']\n self.exp=self.basic['exp']\n #прирост\n self.inc_p_armor=self.basic['inc_p_armor']\n self.inc_m_armor=self.basic['inc_m_armor']\n self.inc_health=self.basic['inc_health']\n #Здоровье\n self.max_health=self.basic['Basic health']+self.level*self.inc_health\n self.health=self.max_health\n #Броня\n self.p_armor=self.basic[\"Basic p_armor\"]+self.level*self.inc_p_armor\n self.m_armor=self.basic[\"Basic m_armor\"]+self.level*self.inc_m_armor\n #атака\n self.damage=int(self.basic['damage']+self.level*self.basic[\"mod\"])\n self.type_of_damage=self.basic['type_of_damage']\n self.last_attack=pg.time.get_ticks()\n self.attack_speed=self.basic[\"attack speed\"]*1000\n self.exp=self.exp*self.level\n\n #логи\n self.creation_time=pg.time.get_ticks()\n self.list_of_attacks=[]\n \n\n\n def jump(self):\n self.rect.y+=1\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n self.vel.y-=16\n self.rect.y-=1\n def collide(self, dir):\n if dir == 'x':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vel.x > 0:\n self.pos.x = hits[0].rect.left - self.rect.width\n if self.vel.x < 0:\n self.pos.x = hits[0].rect.right\n self.vel.x = 0\n self.rect.x = self.pos.x\n if dir == 'y':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vel.y > 0:\n self.pos.y = hits[0].rect.top - self.rect.height\n if self.vel.y < 0:\n self.pos.y = hits[0].rect.bottom\n self.vel.y = 0\n self.rect.y = self.pos.y\n def draw_health_bar(self):\n\n if self.health > 0.6*self.max_health:\n\n col = GREEN\n\n elif self.health > 0.3*self.max_health :\n\n col = YELLOW\n\n else:\n\n col = RED\n\n width = int(self.rect.width * (self.health / self.max_health))\n\n self.health_bar = pg.Rect(0, 0, self.rect.width, 7)\n\n if self.health < self.max_health:\n\n pg.draw.rect(self.image, col, self.health_bar)\n\n def detect_enemy(self):\n self.detected=[]\n for hero in self.game.heroes:\n if math.fabs(hero.pos.x-self.pos.x)= self.attack_speed:\n self.last_attack = now \n if self.state=='right':\n self.list_of_attacks.append(Attack(self.game,vec(self.pos.x,self.pos.y),vec(1,0),self.damage,self.type_of_damage,'punch','heroes',w=self.rect.width,h=self.rect.height))\n \n elif self.state=='left':\n self.list_of_attacks.append( Attack(self.game,self.pos,vec(self.pos.x,self.pos.y),self.damage,self.type_of_damage,'punch','heroes',w=self.rect.width,h=self.rect.height))\n \n \n def can_attack_range(self):\n self.detected=[]\n for hero in self.game.heroes:\n if math.fabs(hero.pos.x-self.pos.x)=self.rect.top:\n \n self.jump()\n def dodge_melee(self,chance): #шанс увернутся в процентах\n for bullet in self.game.attacks:\n if bullet.type_of_attack=='punch':\n if bullet.danger=='enemies' or bullet.danger=='all':\n i=random.randint(1,100)\n image=self.image\n if i=self.rect.top: \n if self.rot=='right':\n self.vel.x+=100\n else:\n self.vel.x-=100\n \n \n \n\n \n def move(self,dir):\n if dir=='right':\n self.acc.x = -PLAYER_ACC*self.speed\n self.rot='right'\n \n elif dir=='left':\n \n self.acc.x = PLAYER_ACC*self.speed\n self.rot='left'\n def move_to_enemy(self):\n for enemy in self.detected:\n if enemy.pos.x-self.pos.x>0: #cлева\n self.move('left')\n self.state='left'\n \n else:\n self.move(\"right\")\n self.state='right'\n def following_jump(self):\n if self.vel.y==0 and self.vel.x==0:\n \n self.jump()\n for enemy in self.detected:\n if enemy.jumping==True:\n self.jump()\n def random_jumping(self):\n self.jump()\n def canon_shoot(self):\n for enemy in self.detected:\n if enemy.pos.x-self.pos.x>0: #cлева\n \n self.state='right'\n else:\n self.state='left'\n now = pg.time.get_ticks()\n if now - self.last_attack >= self.attack_speed:\n self.last_attack = now\n for detected in self.detected:\n self.list_of_attacks.append(Attack(self.game,vec(self.rect.centerx,self.rect.centery),vec(self.rect.centerx-detected.rect.centerx,self.rect.centery-detected.rect.centery).normalize(),self.damage,self.type_of_damage,'canon_ball','heroes'))\n \n \n \n def star_shoot(self):\n speed=10\n now = pg.time.get_ticks()\n if now - self.last_attack >= self.attack_speed:\n self.last_attack = now\n \n Attack(self.game,vec(self.pos.x,self.pos.y),vec(-1,0),self.damage,self.type_of_damage,'mini_missle','heroes')\n Attack(self.game,vec(self.pos.x,self.pos.y),vec(1,0),self.damage,self.type_of_damage,'mini_missle','heroes')\n Attack(self.game,vec(self.pos.x,self.pos.y),vec(0,-1),self.damage,self.type_of_damage,'mini_missle','heroes')\n Attack(self.game,vec(self.pos.x,self.pos.y),vec(0,1),self.damage,self.type_of_damage,'mini_missle','heroes')\n def ninja_AI(self):\n if self.detect_enemy():\n if self.can_attack():\n self.attack_enemy()\n \n self.dodge_melee(20)\n if self.can_attack()!=True:\n self.move_to_enemy()\n \n self.following_jump()\n else:\n self.dodge_range()\n def canon_AI(self):\n if self.detect_enemy():\n if self.can_attack_range():\n self.canon_shoot()\n def star_AI(self):\n if self.detect_enemy():\n self.star_shoot()\n \n def primitive_AI(self):\n if self.detect_enemy():\n if self.can_attack():\n self.attack_enemy()\n if self.can_attack()!=True:\n self.move_to_enemy() \n else:\n pass\n def passive_AI(self): \n if self.detect_enemy():\n if self.can_attack():\n self.attack_enemy()\n if self.can_attack()!=True:\n self.move_to_enemy() \n else:\n self.dodge_range()\n def jumper_AI(self):\n if self.detect_enemy():\n if self.can_attack():\n self.attack_enemy()\n if self.can_attack()!=True:\n self.move_to_enemy() \n self.random_jumping()\n else:\n pass\n def AI(self):\n if self.type=='primitive':\n self.primitive_AI()\n elif self.type=='passive':\n self.passive_AI()\n elif self.type=='ninja':\n self.ninja_AI()\n elif self.type=='jumper':\n self.jumper_AI()\n elif self.type=='canon':\n self.canon_AI()\n elif self.type=='star':\n self.star_AI()\n \n def necrologue(self):\n if self.health>0:\n live=True\n else:\n live=False\n dealed_damage=0\n accuracy=0\n hits=0\n i=0\n lasthit=False \n for attack in self.list_of_attacks:\n i+=1\n try:\n if attack.attacked:\n dealed_damage+=attack.total_damage \n hits+=1\n except:\n pass\n try:\n lasthit=attack.lasthit\n except:\n pass\n try:\n accuracy=round((hits/i),2)\n except:\n accuracy=0\n \n\n dictionary={\"type\":self.type,\"level\":self.level,\"time survived\":int((pg.time.get_ticks()-self.creation_time)/1000),\n \"dealed damage\":dealed_damage,\"accuracy\":accuracy,\"last hit\":lasthit,\"survived\":live}\n self.game.necrologue_list.append(dictionary)\n \n \n def update(self):\n if self.speed!=0:\n self.acc = vec(0, GRAVITY)\n if self.state==\"right\":\n self.image=self.right_image\n else:\n self.image=self.left_image\n self.AI()\n self.acc.x += self.vel.x * PLAYER_FRICTION \n self.vel += self.acc\n self.pos += self.vel + 0.5 * self.acc \n self.rect.x = self.pos.x\n self.collide('x')\n self.rect.y = self.pos.y\n self.collide('y')\n self.draw_health_bar()\n \n if self.health<=0:\n \n self.necrologue()\n self.kill()\n \n \n for hero in self.game.heroes:\n if self.exp>0:\n\n \n delta=hero.level-self.level\n exp=int(self.exp/len(self.game.heroes))\n hero.exp+=exp+1\n self.exp-=exp+1\n Floating_number(self.game,self.pos.x,self.pos.y,\"+\"+str(exp)+\" exp\",\"expirence\")\n \n \n \nclass Actor(pg.sprite.Sprite):\n def __init__(self,game,x,y,plot_name,sub_stat=0):\n self.game=game\n \n self.groups=game.all_sprites,game.actors\n \n pg.sprite.Sprite.__init__(self,self.groups)\n self.state='right'\n self.start=vec(x,y)\n self.plot=\"plot_\"+plot_name+\".py\"\n self.image=pg.Surface((TILESIZE,TILESIZE))\n self.image.fill(BLACK)\n self.game=game\n self.type=type\n self.active=False\n self.spawn_time=pg.time.get_ticks()\n self.rect = self.image.get_rect()\n self.rect.center=(x,y)\n self.vel=vec(0,0)\n self.pos=vec(x,y)\n self.acc = vec(0, 0)\n self.detect_trigger=1\n self.load_trigger=1\n self.execute_trigger=1\n self.condition=\"staying\"\n self.begin_time=pg.time.get_ticks()\n self.shoot_rot=vec(-1,0) \n \n \n\n \n\n self.load_plot()\n spec = importlib.util.spec_from_file_location(self.plot, PLOT_FOLDER+self.plot)\n plot = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(plot)\n self.activation_radius=plot.radius\n self.living_time=plot.life_time\n self.call_key=plot.call_key\n try:\n self.shots=plot.shots\n except:\n pass\n self.name=plot.name\n self.icon=pg.image.load(plot.icon_path)\n try:\n self.initiator=plot.initiator\n except:\n self.initiator=False\n self.image=self.icon\n self.timer=5000\n self.est_timer=pg.time.get_ticks()\n def load_plot(self):\n\n \n \n spec = importlib.util.spec_from_file_location(self.plot, PLOT_FOLDER+self.plot)\n plot = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(plot)\n \n \n def show_words_on_second(self,string,timer,time_pos,begin_time):\n now=pg.time.get_ticks()\n \n if pg.time.get_ticks()>=begin_time+time_pos*1000 and pg.time.get_ticks()<=begin_time+timer*1000+time_pos*1000:\n Floating_number(self.game,self.game.player_1.pos.x-100,self.game.player_1.pos.y+400,string,\"words\",timer)\n Floating_number(self.game,self.game.player_1.pos.x-420,self.game.player_1.pos.y+470,self.name,\"words\",timer)\n HUD.draw_player_icon(self.game.screen,self.icon,vec(0,0))\n \n \n \n\n def activate(self):\n if self.detect_trigger>0:\n for hero in self.game.heroes:\n if math.fabs(hero.pos.x-self.pos.x)=self.living_time*1000:\n \n \n self.kill()\n \n except:\n pass\n def bug_fix_1(self):\n if pg.time.get_ticks()-self.spawn_time-7000<=self.timer:\n \n self.pos=self.start\n def collide(self, dir):\n if dir == 'x':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vel.x > 0:\n self.pos.x = hits[0].rect.left - self.rect.width\n if self.vel.x < 0:\n self.pos.x = hits[0].rect.right\n self.vel.x = 0\n self.rect.x = self.pos.x\n\n if dir == 'y':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vel.y > 0:\n self.pos.y = hits[0].rect.top - self.rect.height\n if self.vel.y < 0:\n self.pos.y = hits[0].rect.bottom\n self.vel.y = 0\n self.rect.y = self.pos.y\n \n if self.vel.y==0:\n self.state='standing'\n def move(self,dir,speed):\n if dir=='right':\n self.vel.x=speed\n \n \n self.rot='right'\n \n self.shoot_rot=vec(-1,0) \n elif dir=='left':\n self.vel.x=-speed\n self.rot='left'\n self.shoot_rot=vec(1,0) \n elif dir=='up':\n self.vel.y=-speed*self.dir.x\n \n elif dir=='down':\n self.vel.y=-speed*self.dir.x\n \n\n def update(self):\n self.bug_fix_1()\n self.die_after_ending()\n self.activate()\n for hero in self.game.heroes:\n if math.fabs(hero.pos.x-self.pos.x)= self.death_time:\n self.kill()\n def set_image(self):\n \n if self.type_of_attack=='punch':\n self.image = pg.Surface([self.width,self.height], pg.SRCALPHA,32)\n \n\n elif self.type_of_attack=='arrow':\n self.image=pg.Surface([self.width,self.height])\n self.image.fill(GREEN)\n \n elif self.type_of_attack=='magic_missle':\n self.image=pg.Surface([self.width,self.height])\n self.image.fill(GREEN)\n elif self.type_of_attack=='bullet':\n self.image=pg.Surface(self.width,self.height)\n self.image.fill(GREEN)\n \n \n elif self.type_of_attack=='grenade':\n self.image=pg.Surface([self.width,self.height])\n self.image.fill(GREEN)\n \n elif self.type_of_attack=='canon_ball':\n self.image=pg.Surface([self.width,self.height])\n self.image.fill(GREEN)\n elif self.type_of_attack=='mini_missle':\n self.image=pg.Surface([self.width,self.height])\n self.image.fill(GREEN)\n elif self.type_of_attack=='meteor':\n self.image=pg.Surface([self.width,self.height])\n self.image.fill(GREEN)\n \n \n def size(self):\n if self.type_of_attack=='punch':\n \n speed_k=0\n width=1.2*self.start_w\n height=self.start_h\n self.death_time=30\n distance='melee'\n self.penetration=True\n self.dir=self.dir.reflect(vec(1,0))\n \n if self.dir.x>=-1.01 and self.dir.x<=0.99:\n self.pos.x-=self.start_w\n \n\n elif self.type_of_attack=='arrow':\n speed_k=1\n width=50\n height=10\n self.death_time=5000\n distance='ranged'\n \n elif self.type_of_attack=='magic_missle':\n speed_k=0.75\n width=20\n height=15\n self.death_time=4000\n distance='ranged'\n elif self.type_of_attack=='bullet':\n \n speed_k=1.5\n width=10\n height=10\n self.death_time=3000\n distance='ranged'\n elif self.type_of_attack=='grenade':\n \n speed_k=0.125\n width=15\n height=15\n self.death_time=4000\n distance='ranged'\n self.is_falling=True\n elif self.type_of_attack=='canon_ball':\n self.gravity=True\n width=30\n height=30\n self.death_time=6000\n speed_k=0.5\n distance='ranged'\n elif self.type_of_attack=='mini_missle':\n \n width=5\n height=5\n self.death_time=2000\n speed_k=1.2\n distance='ranged'\n elif self.type_of_attack=='meteor':\n width=32\n height=32\n self.death_time=1000\n speed_k=1\n distance='ranged'\n self.penetration=True\n try:\n if self.penetration==True:\n self.penetration=True\n except:\n self.penetration=False\n try:\n if self.is_falling==True:\n self.is_falling=True\n except:\n self.is_falling=False\n self.width=width\n self.height=height\n self.speed=speed_k*self.speed_constant\n self.distance=distance\n\n def start_timer(self):\n if pg.time.get_ticks()-self.creation>=300:\n return True\n else:\n return False\n\n def move(self):\n if self.distance!='melee':\n self.vel.x=-self.speed*self.dir.x\n self.vel.y=-self.speed*self.dir.y\n \n\n def gravity(self):\n self.acc=vec(0,GRAVITY/2)\n def collide_with_wall(self):\n \n if self.penetration==False:\n if self.start_timer():\n hits= pg.sprite.groupcollide(self.game.attacks,self.game.walls,True,False) \n if hits:\n self.collided=True\n \n \n \n def strike(self,group):\n hits=pg.sprite.spritecollide(self,group,False,False)\n if hits:\n for hit in hits:\n \n \n if self.type_of_damage=='physical':\n if self.damage-hit.p_armor<=0:\n damage=1\n else:\n damage=self.damage-hit.p_armor\n elif self.type_of_damage=='magical':\n if self.damage-hit.m_armor<=0:\n damage=1\n else:\n damage=self.damage-hit.m_armor\n elif self.type_of_damage=='clear':\n damage=self.damage\n \n hit.health-=damage\n if self.isStunning:\n hit.isStunned=True\n Floating_number(self.game,hit.pos.x,hit.pos.y,\"-\"+str(damage),self.type_of_damage)\n self.attacked=True\n self.total_damage=damage\n if hit.health<=0:\n self.lasthit=True\n self.kill()\n \n def update(self):\n\n \n \n if self.is_falling:\n self.gravity()\n if self.danger=='heroes' or self.danger=='all':\n self.strike(self.game.heroes)\n \n if self.danger=='enemies' or self.danger=='all':\n self.strike(self.game.enemies)\n \n self.move()\n self.death_timer()\n \n \n self.vel += self.acc\n self.pos += self.vel + 0.5 * self.acc\n self.rect.x=self.pos.x\n self.rect.y=self.pos.y\n self.collide_with_wall()\n\nclass Trap(pg.sprite.Sprite):\n def __init__(self,game,x,y,w,h,type):\n self.groups=game.all_sprites\n pg.sprite.Sprite.__init__(self,self.groups)\n self.image= pg.Surface([w,h], pg.SRCALPHA,32)\n self.game=game\n self.rot=0\n self.rot_speed=60\n self.last_attack=pg.time.get_ticks()\n self.type=type\n self.frame=0\n \n self.last_animation=pg.time.get_ticks()\n if type=='usual':\n self.damage=20\n self.reload=1\n self.type_of_damage=\"physical\"\n if type==\"saw\":\n self.damage=15\n self.image=pg.image.load(saw_image)\n self.image.set_colorkey(BLACK)\n self.orig=self.image\n self.reload=2\n self.type_of_damage=\"clear\"\n\n self.x=x\n self.y=y\n self.rect=self.image.get_rect()\n self.rect.x=x\n self.rect.y=y\n \n def attack_trap(self,group):\n hits=pg.sprite.spritecollide(self,group,False,False)\n \n if hits:\n for hit in hits:\n if self.damage-hit.p_armor<=0:\n damage=1\n else:\n damage=self.damage-hit.p_armor\n #self.game.DJ.play_effect('slash')\n hit.health-=damage\n Floating_number(self.game,hit.pos.x,hit.pos.y,\"-\"+str(damage)+\"hp\",self.type_of_damage)\n\n \n self.last_attack=pg.time.get_ticks()\n def attack_saw(self,group):\n hits=pg.sprite.spritecollide(self,group,False,False)\n \n if hits:\n for hit in hits:\n if pg.sprite.collide_mask(self,hit): \n damage=self.damage\n # self.game.DJ.play_effect('slash')\n hit.health-=damage\n Floating_number(self.game,hit.pos.x,hit.pos.y,\"-\"+str(damage)+\"hp\",self.type_of_damage)\n self.last_attack=pg.time.get_ticks()\n \n\n \n \n \n \n \n def update(self):\n \n if self.type=='saw':\n if pg.time.get_ticks()-self.last_attack>=self.reload*1000:\n \n self.attack_saw(self.game.enemies)\n self.attack_saw(self.game.heroes)\n self.rect.centerx=self.x\n self.rect.centery=self.y\n self.rot = (self.rot + self.rot_speed * self.game.dt) % 360\n self.image = pg.transform.rotate(self.orig,self.rot)\n\n else:\n \n if pg.time.get_ticks()-self.last_attack>=self.reload*1000:\n self.attack_trap(self.game.heroes)\n self.attack_trap(self.game.enemies)\n \n \n\n\n\n\nclass Ladder(pg.sprite.Sprite):\n def __init__(self, game, x, y,w,h):\n self.groups = game.ladders\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.rect=pg.Rect(x,y,w,h)\n self.x=x\n self.y=y\n self.rect.x=x\n self.rect.y=y\nclass Chest(pg.sprite.Sprite):\n def __init__(self, game, x, y,w,h,rarity):\n \n self.groups = game.chests,game.all_sprites\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.rect=pg.Rect(x,y,w,h)\n self.pos=vec(x,y)\n self.rect.x=self.pos.x\n self.rect.y=self.pos.y\n self.rarity=rarity\n self.num_of_items=6\n self.image = pg.Surface([w,h], pg.SRCALPHA,32)\n self.image = self.image.convert_alpha()\n self.items=[]\n self.fill_with_items()\n \n def rand_item(self,type):\n while True:\n with open(items_file) as file:\n data=json.load(file)\n i=random.randint(0,len(data)-1)\n item=Item(self.game,self.rect.x,self.rect.y,i)\n item.kill()\n if item.rarity==type:\n break\n return item\n \n def rand_selected(self,rarity,part):\n while True:\n with open(items_file) as file:\n data=json.load(file)\n i=random.randint(0,len(data)-1)\n item=Item(self.game,self.rect.x,self.rect.y,i)\n item.kill()\n if item.rarity==rarity and item.type==part:\n break\n \n return item\n \n\n def use(self):\n print(\"This is \"+self.rarity.upper()+\" chest!\")\n if len(self.items)>0:\n safe=[]\n print(\"It contains:\")\n i=1\n \n for item in self.items:\n \n print(str(i)+\") \"+item.name)\n \n i+=1\n safe.append(item)\n a=input(\"What you want to do? 1)take one item 2)take all 3)close \")\n try:\n if a.strip()=='1':\n if len(self.game.player_1.inventory)= self.death_timer:\n self.kill()\n # for hero in self.game.heroes:\n # hero.cutscene_up.clear()\n # hero.cutscene_down.clear()\n def get_text(self,name):\n with open(languages_folder+language_manager.get_language()+\"\\\\words_\"+name+\".txt\") as file:\n self.string=file.read()\n def update(self):\n \n self.float_up()\n self.die()\n self.rect.x=self.pos.x\n self.rect.y=self.pos.y\n\n\nclass SpriteList:\n\n \n\n def __init__(self, filename):\n\n self.spritelist = pg.image.load(filename).convert()\n\n\n\n def get_image(self, x, y, width, height):\n\n \n\n image = pg.Surface((width, height))\n\n image.blit(self.spritelist, (0, 0), (x, y, width, height))\n\n \n\n return image\n\n\n\n\nclass Spawn(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites,game.spawns,\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.rect=pg.Rect(x,y,1,1)\n self.pos=vec(x,y)\n self.rect.x=self.pos.x\n self.rect.y=self.pos.y\n self.it=0\n self.image = pg.Surface([1,1], pg.SRCALPHA,32)\n self.image = self.image.convert_alpha()\n self.last_spawn=pg.time.get_ticks()\n self.to_load=[]\n \n def load_global_pack(self,pack):\n \n self.global_pack=pack\n def load_local_pack(self):\n self.to_load=[]\n for enemy in self.global_pack:\n \n self.to_load.append({\"type\":enemy[\"type\"],\"level\":enemy[\"level\"],\"amount\":(enemy[\"amount\"]/len(self.game.spawns))})\n\n\n def spawn(self):\n for enemy in self.to_load:\n while enemy[\"amount\"]>0:\n enemy[\"amount\"]-=1\n spread=random.randint(-20,20)\n pos=vec(self.pos.x+spread,self.pos.y)\n type=enemy[\"type\"]\n level=enemy[\"level\"]\n archive=[pos,type,level]\n self.game.player_1.spawn_enemy(archive)\n \n def spawn_after(self,seconds):\n if pg.time.get_ticks()-self.last_spawn>=seconds*1000:\n self.spawn()\n self.last_spawn=pg.time.get_ticks()\n \n def update(self):\n self.rect.x=self.pos.x\n self.rect.y=self.pos.y\n if self.it==0:\n self.spawn()\n \n self.it-=1\n self.spawn_after(5)\n \n\nclass Platform(pg.sprite.Sprite):\n def __init__(self, game, x, y,w,h,is_slippery=False):\n self.groups = game.platforms\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.rect=pg.Rect(x,y,w,h)\n self.is_slippery=is_slippery\n self.rect.x=x\n self.rect.y=y\n\n \n\n \n \n","sub_path":"Project/Project/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":52027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"70133758","text":"#!/usr/bin/python3\n\nfrom __future__ import division\nfrom __future__ import print_function, unicode_literals\nfrom random import randint\nfrom os import urandom\nimport math as m\nimport string\nimport sys\n\n# \n# Generate binnary randons.\n#\n\ndef genbyte(length):\n return urandom(length)\n\n#\n# Generate rangdom numbers.\n#\n\ndef genran(length):\n num_list = []\n for i in range(length):\n num_list.append(randint(0,300))\n return num_list\n\n#\n# Expand number by continuous fractions.\n#\n\ndef genkey(N):\n while True:\n yield N//1\n f = N - (N//1)\n if f < 0.0001:\n break\n N = 1/f\n\n#\n# Special function to xor two strings.\n#\n\ndef xor_strings(s, t):\n \"\"\"xor two strings together\"\"\"\n if isinstance(s, str):\n # Text strings contain single characters\n return b\"\".join(chr(ord(a) ^ ord(b)) for a, b in zip(s, t))\n else:\n # Python 3 bytes objects contain integer values in the range 0-255\n return bytes([a ^ b for a, b in zip(s, t)])\n\ndef main():\n\n # Define a key.\n\n key_seed = 23.1\n garb_length = 100\n jump = 2\n\n # Create the key.\n\n clear_message = \"This is a cool message which cannot be seen !!\"\n\n # Generate the key of \n\n key = list(genkey(m.sqrt(key_seed)))\n\n # Check the size of the key in relation to the message.\n\n if (len(clear_message) > jump**len(key)):\n sys.exit(\"ERROR: You need a better seed\")\n\n # Select the key due to jump.\n\n zkey = []\n\n for i in range(0,len(key),jump):\n zkey.append(key[i])\n\n # Separate the size of the key == message.\n\n zkey_ms = zkey[0:len(clear_message)]\n\n # Convert byte arrays.\n\n clear_message_byte = clear_message.encode('utf8')\n crypt_message_byte = ' '.join(map(str,zkey_ms)).encode('utf8')\n\n print (\"+ Clear message: \", clear_message_byte, \"\\n\")\n print (\"+ The Key: \", crypt_message_byte,\"\\n\")\n\n crypt_message_safe = xor_strings(clear_message_byte, crypt_message_byte)\n\n print (\"+ Crypt message (core): \", crypt_message_safe,\"\\n\")\n\n crypt_message_safe = crypt_message_safe + genbyte(garb_length)\n\n print (\"+ Crypt message (core + backward trash): \", crypt_message_safe,\"\\n\")\n\n crypt_message_safe = genbyte(garb_length) + crypt_message_safe \n\n print (\"+ Crypt message (backward trash + trashed message): \", crypt_message_safe,\"\\n\")\n\n file_out = open(\"test.dat\",\"w\")\n for i in range(len(crypt_message_safe)):\n file_out.write(str(crypt_message_safe[i])) # dieharder -a -f test.dat\n file_out.close()\n\n # Append trash to message.\n\n crypt_message_safe = xor_strings(crypt_message_safe[garb_length:garb_length + len(clear_message)], crypt_message_byte)\n\n print (\"+ Decrypted message: \", crypt_message_safe.decode('utf8'),\"\\n\")\n\nif __name__== \"__main__\":\n main()\n\n","sub_path":"crypy.py","file_name":"crypy.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"553221885","text":"#!/usr/bin/env python3\n\n# -*- coding: UTF-8 -*-\n\nimport sys, codecs\n\nsys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())\n\n# use this and then prefix all function names with corpus.\n#import corpus\n# or use this\nfrom corpus import loadTextFromFile, tokenize, getTokenCounts, prettyPrintFrequencyProfile, relativizeTokenCounts\n\nfrom math import log\n\n\nmytext = loadTextFromFile(\"pg873.txt\")\n\n# tokenize mytext and return list of tokens\ntokens = tokenize(mytext)\n\n# count tokens\nmydict = getTokenCounts(tokens)\nrelativizeTokenCounts(mydict)\n\n# pretty-print tokens and frequencies\n#prettyPrintFrequencyProfile(mydict, sortbyfrq=True, myreverse=True)\n\nmytext = loadTextFromFile(\"sports-bbc.txt\")\nmysportsdict = getTokenCounts(tokenize(mytext))\nrelativizeTokenCounts(mysportsdict)\n\nunknowntext = \"\"\"Yesterday we scored ten goals in the last 45 minutest of the game.\"\"\"\n\n\n\"\"\"\nWimbledon 2013: Andy Murray's victory could boost British tennis\nComments (149)\nAll too often sporting moments and achievements are given a misplaced historical significance. Not on Sunday.\nAndy Murray made genuine history on Wimbledon's Centre Court by winning the men's singles in straight sets against Serb Novak Djokovic to become the first British male champion since Fred Perry in 1936.\n Murray got to the top not because of the Lawn Tennis Association and this country's development programmes but in spite of them\nComparisons of this nature are always pretty futile, but as achievements go it ranks alongside England's World Cup triumph in 1966, England's Rugby World Cup victory in 2003 and Sir Bradley Wiggins ending Britain's long wait for a winner of the Tour de France.\nBut in some ways, Murray's achievement surpasses all those. For Wimbledon to host the greatest tennis tournament in the world for so many years without a winner or even a serious challenger has been a serious embarrassment. \n\"\"\"\n\n\n\"\"\"\nThe young fisherman likes pomegranates.\n\"\"\"\n\n\nukntokens = tokenize(unknowntext)\nhpgprob = 0.0\nbbcprob = 0.0\nfor token in ukntokens:\n hpgprob += log(mydict.get(token, 0.000000000001))\n bbcprob += log(mysportsdict.get(token, 0.000000000001))\nif hpgprob > bbcprob:\n print(\"This is probably related to the House of Pomeg.\")\nelse:\n print(\"This is probably related to BBC Sports articles\")\nprint(\"hpgprob:\", hpgprob)\nprint(\"bbcprob:\", bbcprob)\n","sub_path":"src/Week3/complete-fp.py","file_name":"complete-fp.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"77227774","text":"\"\"\"\n@version: python3.7\n@author: ‘mengyuantan‘\n@contact: tanmy1016@126.com\n@desc: Consider the SNR of genetic algorithm\n\"\"\"\nimport sys, getopt\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.animation as animation\nimport tkinter.messagebox\nimport time\n\nis_debugging = False\nis_saving = False\nshow_detail = True\n\nROOM_SIZE = np.array([10, 10])\nDNA_SIZE = ROOM_SIZE[0] * ROOM_SIZE[1] # DNA length\nMIN_EFF_AREA = 0.88\nif is_debugging:\n N_GENERATIONS = 50\n POP_SIZE = 5 # population size\nelse:\n POP_SIZE = 100 # population size\n N_GENERATIONS = 3000\nCROSS_RATE = 0.8 # mating probability (DNA crossover)\nplt.figure(figsize=(12, 6)) # set the figure size\n\n# set parameters\ntethaHalf = 60\nm = np.int(- np.log(2) / np.log(np.cos(np.deg2rad(tethaHalf))))\nI0 = 0.73\nnLed = 60\nPt = 0.02 # W\nPt *= nLed * nLed\ngamma = 0.53 # A/W\ndimX, dimY, dimZ, REC_HEIGHT = 5, 5, 3, 0.85\nngx, ngy = dimX * 10, dimY * 10\nht, hr = dimZ, REC_HEIGHT\nhtr = ht - hr\nx = np.linspace(0 + 0.05, dimX - 0.05, ngx)\ny = np.linspace(0 + 0.05, dimY - 0.05, ngy)\nxr, yr = np.meshgrid(x, y)\n\n# load data\nnoise_value_data, Hn_value_data = np.array([]), np.array([])\n\nfor i in range(ROOM_SIZE[0]):\n for j in range(ROOM_SIZE[1]):\n noise_value_data = np.append(noise_value_data,\n np.load(r'noise_value_data/noise_value_%s.npy' % (str(i) + str(j))))\n Hn_value_data = np.append(Hn_value_data,\n np.load(r'Hn_value_data/Hn_value_%s.npy' % (str(i) + str(j))))\nnoise_value_data = noise_value_data.reshape(ROOM_SIZE[0], ROOM_SIZE[1], 50, 50)\nHn_value_data = Hn_value_data.reshape(ROOM_SIZE[0], ROOM_SIZE[1], 50, 50)\n\nid_num = np.load('log.npy')\n# id_num = 0\nroom_id = str(id_num).zfill(3)\nroom = np.load('room_data/%s.npy' % room_id)\n# room = np.ones((10, 10))\n#\n# room[:1, :3] = 0\n# room[-3:, :3] = 0\n\nroom_area = len(np.where(room == 1)[0])\nled_num = np.int((room_area / 25) - 1e-3) + 1\nrepeat_arr = np.ones(10, dtype=np.int) * 5\nroom_mut = np.repeat(room, repeat_arr, axis=0)\nroom_mut = np.repeat(room_mut, repeat_arr, axis=1)\nroom_xx, room_yy = np.where(room == 0)[0] / 2 + 0.25, np.where(room == 0)[1] / 2 + 0.25\n\n\ndef plotting(DNA, gen, saving_pic, is_ending):\n plt.cla()\n DNA = DNA.reshape(-1, ROOM_SIZE[0], ROOM_SIZE[1])[0]\n xt, yt = [], []\n x, y = np.array([]), np.array([])\n S, N = np.zeros((ngx, ngy)) + 1e-9, np.zeros((ngx, ngy)) + 1e-9\n indexes = np.where(DNA == 1)\n led = len(indexes[0])\n for j in range(led):\n xt.append(indexes[0][j])\n yt.append(indexes[1][j])\n x = np.append(x, indexes[0][j] / 2 + 0.25)\n y = np.append(y, indexes[1][j] / 2 + 0.25)\n\n for k in range(len(xt)):\n S += (gamma ** 2) * ((Pt * Hn_value_data[xt[k]][yt[k]]) ** 2)\n N += noise_value_data[xt[k]][yt[k]]\n SNR = 10 * np.log10(S / N) * room_mut\n effect_zone = cal_effect_zone(SNR)\n\n # ax1 = plt.subplot(212)\n # ax1.cla()\n # max_value_idx = np.argmax(value_container)\n # show_max = 'value = %s' % str(round(value_container[max_value_idx][0], 3))\n # ax1.plot(max_value_idx, value_container[max_value_idx], 'rs')\n # ax1.annotate(show_max,\n # xytext=(max_value_idx * 0.8, value_container[max_value_idx]),\n # xy=(max_value_idx, value_container[max_value_idx]))\n # if is_ending:\n # ax1.plot(range(len(value_container[:max_value_idx + 1])), value_container[:max_value_idx + 1], 'k')\n # else:\n # ax1.plot(range(len(value_container)), value_container, 'k')\n # ax1.set_xlabel('Generations')\n # ax1.set_ylabel('Effect Area (%)')\n\n # plt.subplot(221)\n # fig = plt.gcf()\n # ax = fig.add_subplot(2, 2, 1, projection='3d')\n # ax.plot_surface(xr, yr, SNR.T, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))\n # ax.set_xlabel('X (m)')\n # ax.set_ylabel('Y (m)')\n # ax.set_zlabel('SNR (dB)')\n # ax.set_title('Generations : %d ' % gen)\n\n plt.subplot(121)\n plt.cla()\n levels = np.hstack((np.linspace(np.min(SNR[SNR != 0]), 13.6 - (np.max(SNR) - 13.6) / 4, 4),\n np.linspace(13.6, np.max(SNR), 5))) if np.max(SNR) > 13.6 else np.linspace(0, np.max(SNR), 9)\n plt.contourf(xr, yr, SNR.T, levels=levels, alpha=.75)\n C = plt.contour(xr, yr, SNR.T, levels=levels, colors='black', linewidths=1)\n plt.clabel(C, fmt='%.1f', inline=True, fontsize=8)\n plt.xlabel('X (m)')\n plt.ylabel('Y (m)')\n plt.title('SNR (dB) Effect Area: {0} %'.format(round(round(effect_zone, 4) * 100, 2)))\n\n plt.subplot(122)\n plt.scatter(x, y)\n plt.scatter(room_xx, room_yy, s=[1200], marker='s', c='gray')\n plt.xlim(0, 5)\n plt.ylim(0, 5)\n plt.xlabel('X (m)')\n plt.ylabel('Y (m)')\n # plt.title('Generations : %d ' % gen)\n plt.title('room model')\n\n # if saving_pic:\n # plt.savefig('room_result_SNR_contourline/%s_fig.jpg' % room_id)\n plt.grid()\n plt.pause(0.1)\n\n\ndef cal_effect_zone(arr): return len(arr[arr >= 13.6]) / (room_area * 25)\n\n\ndef LED_fun(cur, tar): return np.abs(cur - tar)\n\n\ndef get_common(loc): return np.argsort(np.bincount(loc))[::-1][:led_num]\n\n\ndef F(source): # source shape [-1, 100]\n source = source.reshape(-1, ROOM_SIZE[0], ROOM_SIZE[1])\n value, value_orig, led_gap = np.array([]), np.array([]), np.array([])\n for i in range(source.shape[0]):\n indexes = np.where(source[i] == 1)\n xt, yt = [], []\n led = len(indexes[0])\n for j in range(led):\n xt.append(indexes[0][j])\n yt.append(indexes[1][j])\n S, N = np.zeros((ngx, ngy)) + 1e-9, np.zeros((ngx, ngy)) + 1e-9\n\n for k in range(len(xt)):\n S += (gamma ** 2) * ((Pt * Hn_value_data[xt[k]][yt[k]]) ** 2)\n N += noise_value_data[xt[k]][yt[k]]\n\n SNR = 10 * np.log10(S / N) * room_mut\n try:\n SNR_min, SNR_max, SNR_avg = np.min(SNR[SNR != 0]), np.max(SNR[SNR != 0]), np.mean(SNR[SNR != 0])\n except:\n SNR_min, SNR_max, SNR_avg = 1e-9, 1e-9, 1e-9\n\n led_gap = np.append(led_gap, LED_fun(cur=led, tar=led_num))\n value_orig = np.append(value_orig, cal_effect_zone(SNR))\n value = value_orig\n value[led_gap != 0] = 0\n\n return value, (SNR_min, SNR_max, SNR_avg) # to find the maximum of this function\n\n\n# find non-zero fitness for selection\ndef get_fitness(pred): return pred + 1e-9 # + 1e-3 - np.min(pred)\n\n\n# def get_fitness(pred): return ((pred - np.min(pred)) + 1e-3) / ((np.max(pred) - np.min(pred)) + 1e-3)\n\ndef select(pop, fitness): # nature selection wrt pop's fitness\n idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,\n p=fitness / fitness.sum())\n return pop[idx]\n\n\ndef crossover(parent, pop): # mating process (genes crossover)\n if np.random.rand() < CROSS_RATE:\n i_ = np.random.randint(0, POP_SIZE, size=1) # select another individual from pop\n cross_points = np.random.randint(0, 2, size=DNA_SIZE).astype(np.bool) # choose crossover points\n parent[cross_points] = pop[i_, cross_points] # mating and produce one child\n return parent\n\n\ndef mutate(child, rate):\n for point in range(DNA_SIZE):\n if np.random.rand() < rate:\n child[point] = 1 if child[point] == 0 else 0\n return child\n\n\ndef run(pre_pop):\n start_time = time.time()\n global value_container\n value_container = [0]\n DNA_saver = None\n MUTATION_RATE = 0.02 # mutation probability\n Flag = False\n\n # Start initialization\n if pre_pop is None:\n pop = np.array([])\n for _ in range(POP_SIZE):\n each = np.zeros((1, DNA_SIZE))\n each[0][np.random.choice(DNA_SIZE, led_num, replace=False)] = 1\n pop = np.append(pop, each)\n pop = pop.reshape(POP_SIZE, DNA_SIZE)\n else:\n pop = pre_pop\n\n pop_and = np.tile(room.reshape((1, DNA_SIZE)), (POP_SIZE, 1))\n\n plt.ion()\n\n for count in range(N_GENERATIONS):\n pop = pop * pop_and\n F_values, _ = F(pop)\n\n # GA part (evolution)\n fitness = get_fitness(F_values)\n if count % 50 == 0:\n most_fitted_DNA = pop[np.argmax(fitness), :].reshape(1, DNA_SIZE)\n value, detail = F(most_fitted_DNA)\n if show_detail:\n if value > np.max(value_container):\n DNA_saver = [most_fitted_DNA, count]\n # print('Generations: %d value = %f position: %s' %\n # (count, value, np.where(most_fitted_DNA.reshape(1, DNA_SIZE) == 1)[1]))\n value_container.append(value)\n # plotting(most_fitted_DNA, count, saving_pic=False, is_ending=False)\n elif count % 500 == 0:\n print('Generation: %d' % count)\n if np.max(value_container) > MIN_EFF_AREA:\n # np.save('log.npy', np.int(id_num + 1))\n break\n\n if count == N_GENERATIONS - 1:\n # np.save('log.npy', np.int(id_num + 1))\n if DNA_saver and np.max(value_container) > MIN_EFF_AREA:\n # np.save('room_result_SNR_contourline/%s_out.npy' % room_id, DNA_saver[0])\n # plotting(DNA_saver[0], DNA_saver[1], saving_pic=True, is_ending=True)\n print('{0} Finish.'.format(room_id))\n else:\n # np.save('room_result_SNR_contourline/%s_out_error.npy' % room_id, np.zeros((1, DNA_SIZE)))\n print('********************************************Failed.********************************************')\n # plotting(most_fitted_DNA, N_GENERATIONS, saving_pic=True, is_ending=False)\n break\n\n # crossover and mutate\n pop = select(pop, fitness) # select the parent\n pop_copy = pop.copy()\n for parent in pop:\n child = crossover(parent, pop_copy)\n child = mutate(child, MUTATION_RATE)\n parent[:] = child # parent is replaced by its child\n\n end_time = time.time()\n use_time = round(end_time - start_time, 3)\n print(\"Program running time: {}s\".format(use_time))\n time_saver = np.load(\"time_saver.npy\")\n time_saver = np.append(time_saver, use_time)\n np.save(\"time_saver.npy\", time_saver)\n # plt.ioff()\n # plt.show()\n\n\nif __name__ == '__main__':\n run(pre_pop=None)\n","sub_path":"GA_each_shape_SNR.py","file_name":"GA_each_shape_SNR.py","file_ext":"py","file_size_in_byte":10340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"46266200","text":"from . import household as household\nfrom django.contrib.auth.decorators import login_required\nfrom ..models.household_models import Household\nfrom ..forms.household_forms import HouseholdForm\n\n\ndef save_form_old(request, form):\n form_saved = False\n if form.is_valid() and form.has_changed():\n form_saved = False\n fas_object = form.save(commit=False)\n fas_object.household = household.get(request.session['household'])\n fas_object.save()\n form_saved = True\n return form_saved\n\n\ndef save_form_with_no_has_change(request, form):\n form_saved = False\n if form.is_valid():\n form_saved = False\n fas_object = form.save(commit=False)\n fas_object.household = household.get(request.session['household'])\n fas_object.save()\n form_saved = True\n return form_saved\n\n\ndef save_forms(request, forms):\n form_saved = False\n for form in forms:\n if form.is_valid() and form.has_changed():\n form_saved = False\n fas_object = form.save(commit=False)\n fas_object.household = household.get(request.session['household'])\n fas_object.save()\n form_saved = True # TODO: add proper check to verify if all forms are saved\n return form_saved\n\n\ndef save_formset(forms, model, household_id, **kwargs): #Pass key-word args for filter\n \"\"\"add, update and delete models using formset\"\"\"\n if forms.is_valid():\n active_ids = []\n for form in forms:\n try:\n form_id = form.data[form.prefix+'-id']\n except KeyError:\n form_id = None\n if form.is_valid() and form.has_changed():\n record = form.save(commit=False)\n if form_id:\n record.id = int(form_id)\n record.household = get_object_or_none(Household, household_id)\n record.save()\n active_ids.append(record.id)\n else:\n if form_id:\n active_ids.append(int(form_id))\n all_ids = list(model.objects.filter(household=household_id, **kwargs).values_list('id',flat=True))\n model.objects.filter(id__in=[ x for x in all_ids if x not in active_ids]).delete()\n return True\n return False\n\n\ndef save_form(form, household_id):\n \"\"\"add or update model using modelForm\"\"\"\n if form.is_valid():\n if form.has_changed():\n record = form.save(commit=False)\n try:\n if not form.prefix == None:\n form_id = form.data[form.prefix+'-id']\n else:\n form_id = None\n except KeyError:\n form_id = None\n if form_id:\n record.id = int(form_id)\n record.household = get_object_or_none(Household, household_id)\n record.save()\n return True\n return True\n return False\n\n\ndef get_object_or_none(model, household_id, **kwargs):\n \"\"\"get model object or return None\"\"\"\n try:\n if model == Household:\n model_object = model.objects.get(pk=household_id, **kwargs)\n else:\n model_object = model.objects.get(household=household_id, **kwargs)\n except model.DoesNotExist:\n model_object = None\n return model_object\n\n\ndef is_empty(field):\n if field is None or field == '':\n return True\n else:\n return False\n\ndef get_search_form():\n return HouseholdForm()","sub_path":"fas_questionnaire_site/fas_questionnaire/views/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"175976383","text":"import torch\nimport math\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\nimport numpy as np\nfrom torch.optim.optimizer import Optimizer\n\n\n# decoder_optim = torch.optim.Adam(model.decoder.parameters(), amsgrad=True)\n# decoder_optim = torch.optim.Adam(model.decoder.parameters())\n# decoder_optim = torch.optim.SGD(model.encoder.parameters(), lr=args.lr)\n\n\ndef get_optimizer(params, optimizer, lr):\n if optimizer == 'amsgrad': return torch.optim.Adam(params, lr=lr, amsgrad=True)\n elif optimizer == 'adam': return torch.optim.Adam(params, lr=lr)\n elif optimizer == 'adamw': return AdamW(params, lr=lr, weight_decay=1e-5)\n elif optimizer == 'sgd' or optimizer is None: return torch.optim.SGD(params, lr=lr)\n\n\ndef get_scheduler(args, optim, train_data=None):\n # if None, do the original annealing from lr = 20.\n if args.scheduler == 'cosine_anneal':\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, T_max=20000, eta_min=0, last_epoch=-1)\n elif args.scheduler == 'cosine_restart':\n scheduler = CosineLRWithRestarts(optim, args.batch_size, args.epochs, restart_period=5, t_mult=1.2)\n elif args.scheduler == 'lro':\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, mode='min')\n elif args.scheduler == 'multi_step':\n max_epochs = args.epochs * train_data.size(0) / args.bptt\n mstones = list(range(0, max_epochs, max_epochs / 10))\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=mstones, gamma=0.1)\n return scheduler\n\n\n\"\"\"\ndef get_scheduler(optim, args, train_size = None):\n if args.scheduler == 'cosine_anneal':\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, T_max=20000, eta_min=0, last_epoch=-1)\n elif args.scheduler == 'lro':\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, mode='min')\n elif args.scheduler == 'multi_step':\n if train_size is None:\n ValueError(\"Need to provide train data as argument\")\n max_epochs = args.epochs * train_size / args.bptt\n mstones = list(range(0, max_epochs, max_epochs / 10))\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=mstones, gamma=0.1)\n return scheduler\n\"\"\"\n\ndef sgd_update(pretrained, params, lr):\n if pretrained is not None:\n for name, p in params:\n if name is not 'encoder.weight':\n p.data.add_(-lr, p.grad.data)\n else:\n for p in params:\n p.data.add_(-lr, p.grad.data)\n\n\n\"\"\"Optimizer for Actor Critic Training\"\"\"\nclass Optim(object):\n def __init__(self, params, lr, is_pre,\n grad_clip, new_lr=0.0, weight_decay=0.):\n self.optimizer = torch.optim.Adam(params, lr=lr, betas=(\n 0.9, 0.98), eps=1e-09, weight_decay=weight_decay)\n self.grad_clip = grad_clip\n self.params = params\n if is_pre:\n self.step = self.pre_step\n else:\n assert new_lr != 0.0\n\n self.n_current_steps = 0\n self.new_lr = new_lr\n self.step = self.train_step\n\n def train_step(self):\n self.optimizer.step()\n\n self.n_current_steps += 1\n if self.n_current_steps == 1e6:\n self.update_learning_rate()\n\n def pre_step(self):\n self.optimizer.step()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def clip_grad_norm(self):\n torch.nn.utils.clip_grad_norm(self.params, self.grad_clip)\n\n def update_learning_rate(self):\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.new_lr\n\n\nclass Policy_optim(Optim):\n def __init__(self, params, lr, grad_clip, new_lr):\n super().__init__(params, lr, False, grad_clip, new_lr)\n\n def train_step(self, reward):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n\n p.grad = p.grad.mul(reward)\n\n self.optimizer.step()\n\n self.n_current_steps += 1\n if self.n_current_steps == 1e6:\n self.update_learning_rate()\n\n\n# Non-centered RMSprop update with shared statistics (without momentum)\nclass SharedRMSprop(torch.optim.RMSprop):\n def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0):\n super(SharedRMSprop, self).__init__(params, lr=lr, alpha=alpha, eps=eps, weight_decay=weight_decay, momentum=0, centered=False)\n\n # State initialisation (must be done before step, else will not be shared between threads)\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['step'] = p.data.new().resize_(1).zero_()\n state['square_avg'] = p.data.new().resize_as_(p.data).zero_()\n\n def share_memory(self):\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['step'].share_memory_()\n state['square_avg'].share_memory_()\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n square_avg = state['square_avg']\n alpha = group['alpha']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n # g = αg + (1 - α)Δθ^2\n square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)\n # θ ← θ - ηΔθ/√(g + ε)\n avg = square_avg.sqrt().add_(group['eps'])\n p.data.addcdiv_(-group['lr'], grad, avg)\n\n return loss\n\n \nclass SharedAdam(torch.optim.Adam):\n \"\"\"Implements Adam algorithm with shared states.\n \"\"\"\n\n def __init__(self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0):\n super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)\n\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['step'] = torch.zeros(1)\n state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()\n state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()\n\n def share_memory(self):\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['step'].share_memory_()\n state['exp_avg'].share_memory_()\n state['exp_avg_sq'].share_memory_()\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step'].item()\n bias_correction2 = 1 - beta2 ** state['step'].item()\n step_size = group['lr'] * math.sqrt(\n bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss\n\n\n\n\n\n\n\n# ------------------ Adam With Warm Restarts ----------------------- #\nclass AdamW(Optimizer):\n \"\"\"Implements Adam algorithm.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, amsgrad=False):\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, amsgrad=amsgrad)\n # super(AdamW, self).__init__(params, defaults)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n if group['weight_decay'] != 0:\n decayed_weights = torch.mul(p.data, group['weight_decay'])\n p.data.addcdiv_(-step_size, exp_avg, denom)\n p.data.sub_(decayed_weights)\n else:\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss\n\n\nclass CosineLRWithRestarts(_LRScheduler):\n \"\"\"Decays learning rate with cosine annealing, normalizes weight decay\n hyperparameter value, implements restarts.\n https://arxiv.org/abs/1711.05101\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n batch_size: minibatch size\n epoch_size: training samples per epoch\n restart_period: epoch count in the first restart period\n t_mult: multiplication factor by which the next restart period will extend/shrink\n\n\n Example:\n >>> scheduler = CosineLRWithRestarts(optimizer, 32, 1024, restart_period=5, t_mult=1.2)\n >>> for epoch in range(100):\n >>> scheduler.step()\n >>> train(...)\n >>> ...\n >>> optimizer.zero_grad()\n >>> loss.backward()\n >>> optimizer.step()\n >>> scheduler.batch_step()\n >>> validate(...)\n \"\"\"\n\n def __init__(self, optimizer, batch_size, epoch_size, restart_period=100,\n t_mult=2, last_epoch=-1, eta_threshold=1000, verbose=False):\n if not isinstance(optimizer, Optimizer):\n raise TypeError('{} is not an Optimizer'.format(\n type(optimizer).__name__))\n self.optimizer = optimizer\n if last_epoch == -1:\n for group in optimizer.param_groups:\n group.setdefault('initial_lr', group['lr'])\n else:\n for i, group in enumerate(optimizer.param_groups):\n if 'initial_lr' not in group:\n raise KeyError(\"param 'initial_lr' is not specified \"\n \"in param_groups[{}] when resuming an\"\n \" optimizer\".format(i))\n self.base_lrs = list(map(lambda group: group['initial_lr'],\n optimizer.param_groups))\n\n self.last_epoch = last_epoch\n self.batch_size = batch_size\n self.iteration = 0\n self.epoch_size = epoch_size\n self.eta_threshold = eta_threshold\n self.t_mult = t_mult\n self.verbose = verbose\n self.base_weight_decays = list(map(lambda group: group['weight_decay'],\n optimizer.param_groups))\n self.restart_period = restart_period\n self.restarts = 0\n self.t_epoch = -1\n self.batch_increments = []\n self._set_batch_increment()\n\n def _schedule_eta(self):\n \"\"\"\n Threshold value could be adjusted to shrink eta_min and eta_max values.\n \"\"\"\n eta_min = 0\n eta_max = 1\n if self.restarts <= self.eta_threshold:\n return eta_min, eta_max\n else:\n d = self.restarts - self.eta_threshold\n k = d * 0.09\n return (eta_min + k, eta_max - k)\n\n def get_lr(self, t_cur):\n eta_min, eta_max = self._schedule_eta()\n\n eta_t = (eta_min + 0.5 * (eta_max - eta_min)\n * (1. + math.cos(math.pi *\n (t_cur / self.restart_period))))\n\n weight_decay_norm_multi = math.sqrt(self.batch_size /\n (self.epoch_size *\n self.restart_period))\n lrs = [base_lr * eta_t for base_lr in self.base_lrs]\n weight_decays = [base_weight_decay * eta_t * weight_decay_norm_multi\n for base_weight_decay in self.base_weight_decays]\n\n if self.t_epoch % self.restart_period < self.t_epoch:\n if self.verbose:\n print(\"Restart at epoch {}\".format(self.last_epoch))\n self.restart_period *= self.t_mult\n self.restarts += 1\n self.t_epoch = 0\n\n return zip(lrs, weight_decays)\n\n def _set_batch_increment(self):\n d, r = divmod(self.epoch_size, self.batch_size)\n batches_in_epoch = d + 2 if r > 0 else d + 1\n self.iteration = 0\n self.batch_increments = list(np.linspace(0, 1, batches_in_epoch))\n\n def step(self):\n self.last_epoch += 1\n self.t_epoch += 1\n self._set_batch_increment()\n self.batch_step()\n\n def batch_step(self):\n try:\n t_cur = self.t_epoch + self.batch_increments[self.iteration]\n self.iteration += 1\n except (IndexError):\n raise RuntimeError(\"Epoch size and batch size used in the \"\n \"training loop and while initializing \"\n \"scheduler should be the same.\")\n\n for param_group, (lr, weight_decay) in zip(self.optimizer.param_groups,\n self.get_lr(t_cur)):\n param_group['lr'] = lr\n param_group['weight_decay'] = weight_decay\n\n\n'''A wrapper class for optimizer '''\n\n\nclass ScheduledOptim():\n '''A simple wrapper class for learning rate scheduling'''\n\n def __init__(self, optimizer, d_model, n_warmup_steps):\n self._optimizer = optimizer\n self.n_warmup_steps = n_warmup_steps\n self.n_current_steps = 0\n self.init_lr = np.power(d_model, -0.5)\n\n def step_and_update_lr(self):\n \"Step with the inner optimizer\"\n self._update_learning_rate()\n self._optimizer.step()\n\n def zero_grad(self):\n \"Zero out the gradients by the inner optimizer\"\n self._optimizer.zero_grad()\n\n def _get_lr_scale(self):\n return np.min([\n np.power(self.n_current_steps, -0.5),\n np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])\n\n def _update_learning_rate(self):\n ''' Learning rate scheduling per step '''\n\n self.n_current_steps += 1\n lr = self.init_lr * self._get_lr_scale()\n\n for param_group in self._optimizer.param_groups:\n param_group['lr'] = lr","sub_path":"models/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":18118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"561417631","text":"from django.shortcuts import get_object_or_404, render, redirect\r\nfrom django.http import HttpResponse, JsonResponse\r\nimport os\r\nimport datetime\r\nimport json\r\nfrom rest_framework import status\r\n\r\nfrom tools._models.MasterContacts import *\r\nfrom tools._models.Vendors import (VendorContact, )\r\nfrom tools._models.Contact_Customer import *\r\nfrom userprofile.models import (Profile, Broker, )\r\nfrom tools._forms.MasterContactForms import *\r\nfrom tools.serializers import *\r\n\r\ndef get_New_Contact_Number():\r\n last_object = MasterContact.objects.all().order_by('id').last()\r\n if not last_object:\r\n return 10001\r\n return last_object.Contact_Number + 1\r\n\r\ndef get_New_Vendor_Contact_Number():\r\n last_object = VendorContact.objects.all().order_by('id').last()\r\n if not last_object:\r\n return 10001\r\n return last_object.Vendor_Contact_Number + 1\r\n\r\ndef index(request):\r\n if request.user.is_superuser != True:\r\n try:\r\n profile = Profile.objects.get(user_id=request.user.id)\r\n items = MasterContact.objects.exclude(Status=Status.Inactive.value).filter(Broker_id=profile.Broker_id)\r\n except Exception:\r\n return render(request, 'base_not_found_partial.html')\r\n else:\r\n items = MasterContact.objects.exclude(Status=Status.Inactive.value)\r\n\r\n return render(request, \"tools/contact/index_partial.html\", \r\n {\r\n 'items': items,\r\n })\r\n\r\ndef add_contact(request):\r\n if request.user.is_superuser == True:\r\n return redirect('/tools/contact/')\r\n\r\n try:\r\n profile = Profile.objects.get(user_id=request.user.id)\r\n except Exception:\r\n return render(request, 'base_not_found_partial.html')\r\n\r\n if request.method == \"POST\": \r\n mform = MasterContactForm(request.POST)\r\n if mform.is_valid():\r\n new_model = mform.save(commit=False) \r\n new_model.Contact_Number = get_New_Contact_Number()\r\n new_model.Status = Status.Active.value\r\n new_model.User_id = request.user.id\r\n new_model.Broker_id = profile.Broker_id\r\n new_model.save()\r\n company = request.POST.getlist('Company')\r\n #company_data = json.loads(company)\r\n list_contact_models = []\r\n for item in company:\r\n new_contact = Contact_Customer()\r\n new_contact.Customer_id = int(item)\r\n new_contact.MasterContact_id = new_model.id\r\n list_contact_models.append(new_contact)\r\n if len(list_contact_models) > 0:\r\n # Call bulk_create to create records in a single call\r\n Contact_Customer.objects.bulk_create(list_contact_models)\r\n return redirect('/tools/contact/')\r\n else:\r\n print(mform.errors)\r\n else:\r\n mform = MasterContactForm(Broker_id=profile.Broker_id,instance=MasterContact()) \r\n\r\n return render(request, 'tools/contact/change_contact_partial.html', {\r\n 'form': mform,\r\n 'form_name': mform.__class__.__name__,\r\n 'is_change': False,\r\n })\r\n\r\ndef edit_contact(request, id): \r\n if request.user.is_superuser == True:\r\n return redirect('/tools/contact/')\r\n try:\r\n profile = Profile.objects.get(user_id=request.user.id)\r\n except Exception:\r\n return render(request, 'base_not_found_partial.html')\r\n try:\r\n if request.method == \"POST\": \r\n obj_model = MasterContact.objects.get(id=id)\r\n fac_ID = obj_model.Contact_Number\r\n status = obj_model.Status\r\n user_id = obj_model.User_id\r\n broker_id = obj_model.Broker_id\r\n mform = MasterContactForm(request.POST or None, instance=obj_model)\r\n if mform.is_valid():\r\n obj_model.Contact_Number = fac_ID\r\n obj_model.Status = status\r\n obj_model.User_id = user_id\r\n obj_model.Broker_id = broker_id\r\n obj_model.save()\r\n recommendations = Contact_Customer.objects.filter(MasterContact_id=obj_model.id)\r\n if recommendations.exists():\r\n recommendations.delete()\r\n company = request.POST.getlist('Company')\r\n #company_data = json.loads(company)\r\n list_contact_models = []\r\n for item in company:\r\n new_contact = Contact_Customer()\r\n new_contact.Customer_id = int(item)\r\n new_contact.MasterContact_id = obj_model.id\r\n list_contact_models.append(new_contact)\r\n if len(list_contact_models) > 0:\r\n # Call bulk_create to create records in a single call\r\n Contact_Customer.objects.bulk_create(list_contact_models)\r\n return redirect('/tools/contact/')\r\n else:\r\n print(mform.errors)\r\n else:\r\n obj_model = MasterContact.objects.get(id=id)\r\n mform = MasterContactForm(Broker_id=profile.Broker_id,instance=obj_model)\r\n mylist = []\r\n contact_cust_list = Contact_Customer.objects.filter(MasterContact_id = obj_model.id)\r\n for item in contact_cust_list:\r\n mylist.append(item.Customer.id)\r\n mform.fields['Company'].initial = mylist\r\n print(mylist)\r\n except Exception:\r\n return render(request, 'base_not_found_partial.html')\r\n\r\n return render(request, 'tools/contact/change_contact_partial.html', {\r\n 'form': mform,\r\n 'form_name': mform.__class__.__name__,\r\n 'is_change': True,\r\n })\r\n\r\ndef delete_contact(request):\r\n if request.user.is_superuser == True:\r\n return redirect('/tools/contact/')\r\n\r\n try:\r\n id = int(request.GET['id'])\r\n item = MasterContact.objects.get(id=id)\r\n item.Status = Status.Inactive.value\r\n item.save()\r\n except Exception:\r\n return render(request, 'base_not_found_partial.html')\r\n\r\n json = \"{\\\"success\\\": true, \\\"id\\\":\\\"\"+ str(id) +\"\\\"}\"\r\n return HttpResponse(json)\r\n\r\ndef add_contact_with_json(request):\r\n if request.method == \"POST\": \r\n try:\r\n profile = Profile.objects.get(user_id=request.user.id)\r\n \r\n mform = MasterContactForm(request.POST)\r\n if mform.is_valid():\r\n new_model = mform.save(commit=False) \r\n new_model.Contact_Number = get_New_Contact_Number()\r\n new_model.Status = Status.Active.value\r\n new_model.User_id = request.user.id\r\n new_model.Broker_id = profile.Broker_id\r\n new_model.save()\r\n company = request.POST.getlist('Company')\r\n print(company)\r\n list_contact_models = []\r\n for item in company:\r\n new_contact = Contact_Customer()\r\n new_contact.Customer_id = int(item)\r\n new_contact.MasterContact_id = new_model.id\r\n list_contact_models.append(new_contact)\r\n if len(list_contact_models) > 0:\r\n # Call bulk_create to create records in a single call\r\n Contact_Customer.objects.bulk_create(list_contact_models)\r\n # Return json result\r\n vendor_number = get_New_Vendor_Contact_Number()\r\n json_item = MasterContactSerializer(new_model)\r\n obj_json = {\r\n \"success\": True,\r\n \"Vendor_Number\": vendor_number,\r\n \"Master_Contact\": json_item.data\r\n }\r\n return JsonResponse(obj_json, status=status.HTTP_201_CREATED)\r\n except Exception as error:\r\n print('Caught this error: ' + repr(error))\r\n pass \r\n\r\n obj_json = {\r\n \"success\": False,\r\n \"Vendor_Number\": \"\",\r\n \"Master_Contact\": None\r\n }\r\n return JsonResponse(obj_json, status=status.HTTP_201_CREATED)\r\n\r\ndef get_model_using_current_contact(request):\r\n obj_id=request.GET.get('id')\r\n item = MasterContact.objects.get(id=obj_id)\r\n\r\n facilitycontact_set = item.facilitycontact_set.all()\r\n vendorcontact_set = item.vendorcontact_set.all() \r\n gen_mastercontact = item.gen_mastercontact.all()\r\n certification_set = item.certification_set.all()\r\n Order_Order_Contact = item.Order_Order_Contact.all()\r\n Order_Billing_Contact = item.Order_Billing_Contact.all()\r\n\r\n\r\n return render(request, 'tools/contact/list_of_item_using_contact.html', \r\n {\r\n 'item': item,\r\n 'facilitycontact_set': facilitycontact_set,\r\n 'vendorcontact_set': vendorcontact_set,\r\n 'gen_mastercontact': gen_mastercontact,\r\n 'certification_set': certification_set,\r\n 'Order_Order_Contact': Order_Order_Contact,\r\n 'Order_Billing_Contact': Order_Billing_Contact,\r\n })\r\n","sub_path":"tools/_views/contact_views.py","file_name":"contact_views.py","file_ext":"py","file_size_in_byte":9194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"622543277","text":"import spacy\nfrom spacy.matcher import PhraseMatcher\nfrom TweetBase import TweetBase\nimport HardQuery\nfrom spacy.symbols import *\nimport re\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom random import randint\n\nspacyNLP = spacy.load(\"en_core_web_sm\")\nsynonyms = {\"television\":[\"tv\"],\"picture\":[\"motion picture\",\"movie\",\"film\",\"feature film\"],\"film\":[\"movie\",\"feature film\"],\"movie\":[\"motion picture\",\"film\"]}\n\nclass AwardParser(object):\n def __init__(self,year,filepath = None):\n if filepath == None:\n self.datab = TweetBase(\"gg\" + str(year) + \".json\",year)\n else:\n self.datab = TweetBase(filepath,year)\n print(\"Starting cull\",year)\n self.datab.cullTwitterPunctuation()\n #self.awardFilter = self.datab.anyStringList([\"award for Best \"])\n self.actualAwards = []\n self.year = year\n self.movienames = []\n self.winners = {}\n self.HardQueries = {}\n\n def PopulateHardQueries(self):\n if len(self.HardQueries) == 0:\n self.HardQueries = self.datab.PopulateHardQueries() \n \n def HostFinder(self):\n firstcull = self.datab.anyStringList([\" is hosting\"])\n secondcull = self.datab.anyStringList([\" are hosting\"])\n \n multihost = len(secondcull) > len(firstcull)\n\n if multihost: firstcull = secondcull\n\n docs = []\n hostVote = {}\n\n for tweet in firstcull:\n docs.append(spacyNLP(tweet))\n\n for doc in docs:\n for ent in doc.ents:\n if ent.label_ == \"PERSON\":\n if len(ent.text.split(\" \")) == 1:\n continue\n if ent.lower_ in hostVote:\n hostVote[ent.lower_] += 1\n else:\n hostVote[ent.lower_] = 1\n \n if multihost:\n hVs = sorted(hostVote,key = hostVote.get)\n return [hVs[-1],hVs[-2]]\n\n return max(hostVote,key=hostVote.get)\n\n def WinnerFinder(self, award, nomType):\n self.PopulateHardQueries()\n\n firstcull = list(filter(lambda tweet: any(kw in tweet for kw in [\"wins\",\"goes to\",award + \"-\"]),self.HardQueries[award]))\n docs = []\n winVote = {}\n if nomType == \"PERSON\":\n for tweet in firstcull:\n docs.append(spacyNLP(tweet))\n \n for doc in docs:\n for ent in doc.ents:\n if ent.label_ == \"PERSON\":\n if ent.lower_ in winVote:\n winVote[ent.lower_] += 1\n else:\n winVote[ent.lower_] = 1\n else:\n for tweet in firstcull:\n titlefind = list(re.finditer(r\"([\\\"])(?:(?=(\\\\?))\\2.)*?\\1\", tweet))\n if len(titlefind) > 0:\n if titlefind[0][0] in winVote:\n winVote[titlefind[0][0]] += 1\n else:\n winVote[titlefind[0][0]] = 1 \n\n\n \n if len(winVote) == 0:\n return \"\"\n return max(winVote, key=winVote.get)\n\n def acceptActualAwards(self, actualList):\n self.actualAwards = actualList\n\n def FindAllWinners(self):\n print(\"finding winners\",self.year)\n allWinners = {}\n self.PopulateHardQueries()\n \n for aA in self.actualAwards:\n nomType = \"MEDIA\"\n if \"act\" in aA or \"direct\" in aA or \"screenp\" in aA or \"award\" in aA:\n nomType = \"PERSON\"\n \n if nomType == \"PERSON\":\n win1 = self.TheyWonAwardParser(aA,nomType)\n if not win1 or win1 == \"who\":\n win1 = self.WinnerFinder(aA,nomType)\n if win1 == '':\n win1 = self.CongratulationsParser(aA,nomType)\n if win1 == None or win1 == False:\n win1 = ''\n allWinners[aA] = win1\n else:\n win1 = self.WinnerFinder(aA,nomType)\n if win1 == '':\n win1 = self.TheyWonAwardParser(aA,nomType)\n if win1 == False:\n win1 = self.CongratulationsParser(aA,nomType)\n if win1 == None:\n win1 = ''\n win1 = win1.replace(\"\\\"\",\"\").lower()\n allWinners[aA] = win1\n\n self.winners = allWinners\n return allWinners\n\n def FindAllNominees(self):\n print(\"finding nominees\",self.year)\n allNominees = {}\n for aA in self.actualAwards:\n nomType = \"MEDIA\"\n \n if \"act\" in aA or \"direct\" in aA or \"screenp\" in aA or \"cec\" in aA:\n nomType = \"PERSON\"\n \n nN = self.newNominees(aA)\n if nN != []:\n allNominees[aA] = nN\n continue\n\n mergedNominees = self.NominatedForParser(aA,nomType)\n if aA in self.winners and self.winners[aA] not in [False,None,'']:\n mergedNominees.update(self.BeatParser([self.winners[aA]],nomType))\n\n \n if len(mergedNominees) > 9:\n allNominees[aA] = sorted(mergedNominees,key=mergedNominees.get)[-9:]\n else:\n allNominees[aA] = list(mergedNominees)\n \n return allNominees\n \n\n \"\"\"def NomineeFinder(self, award, nomType):\n firstcull = self.datab.filterStringList([award, \"\\\"\"])\n docs = []\n nomVote = {}\n\n for tweet in firstcull:\n if nomType == \"PERSON\":\n doc = spacyNLP(tweet)\n for ent in doc.ents:\n if ent.label_ == \"PERSON\":\n print(ent)\n if ent.lower_ in nomVote:\n nomVote[ent.lower_] += 1\n else:\n nomVote[ent.lower_] = 1\n elif nomType == \"TITLE\":\n titlefind = re.finditer(r\"([\\\"])(?:(?=(\\\\?))\\2.)*?\\1\", tweet)\n\n for match in titlefind:\n print(match[0])\n tfind = match[0].replace(\"\\\"\",\"\").replace(\"\\'\",\"\").lower()\n if tfind in nomVote:\n nomVote[tfind] += 1\n else:\n nomVote[tfind] = 1\"\"\"\n\n \"\"\"def PresenterFinder(self, award, winner):\n winTime = self.datab.earliestMention([award,winner,\"won\"])\n if winTime == None:\n return []\n if self.year == 2020:\n winnerTime = datetime.strptime(winTime,\"%Y-%m-%dT%H:%M:%S\")\n End = winnerTime + timedelta(minutes=5)\n Start = winnerTime - timedelta(minutes=5)\n tenMinuteList = self.datab.timeFrameFilter(str(Start).replace(\" \",\"T\"),str(End).replace(\" \",\"T\"))\n else:\n End = winTime + 300000\n Start = winTime - 300000\n tenMinuteList = self.datab.timeFrameFilter(Start,End)\n \n\n kwlist = [\" announc\",\" present\",\" on stage\",\" read\"]\n PresenterFilter = filter(lambda tweet: any(kw in tweet for kw in kwlist), tenMinuteList)\n pVote = {}\n\n for presentertweet in list(PresenterFilter):\n doc = spacyNLP(presentertweet)\n for ent in doc.ents:\n if ent.label_ == \"PERSON\" and \"olden\" not in ent.text and ent.lower_ not in winner:\n if ent.lower_ in pVote:\n pVote[ent.lower_]+=1\n else:\n pVote[ent.lower_]=1\n \n a = sorted(pVote,key=pVote.get)\n\n if len(pVote) == 0:\n return []\n elif len(pVote) == 1 or pVote[a[-2]] < pVote[a[-1]]/2:\n return [a[-1]]\n else:\n return [a[-1],a[-2]]\"\"\"\n\n \"\"\"\"def NameVariantFinder(self,person):\n variants = [person]\n personarray = person.split(\" \")\n variants.append(personarray[0])\n variants.append(personarray[1])\n variants.append(personarray[0] + personarray[1])\n\n return variants\"\"\"\n\n def TheyWonAwardParser(self,filters, nomType):\n filterb = [\"wins\",\"won\"]\n if nomType == \"MEDIA\":\n filterb.append(\"\\\"\")\n firstcull = list(filter(lambda tweet: all(kw in tweet for kw in filterb),self.HardQueries[filters]))\n winVote = {}\n docs = []\n\n for tweet in firstcull:\n mentionedMovies = list(re.finditer(r\"([\\\"'])(?:(?=(\\\\?))\\2.)*?\\1\", tweet))\n for mMovie in mentionedMovies:\n if len(mMovie[0]) > 30:\n mentionedMovies.remove(mMovie)\n \n if len(mentionedMovies) == 0:\n continue\n\n subj = \"\"\n pred = \"\"\n ignore = False\n doc = spacyNLP(tweet)\n\n for word in doc:\n if word.text == \".\":\n break\n if nomType == \"MEDIA\" and word.text == \"\\\"\":\n subj = mentionedMovies[0][0]\n if word.dep_ in [\"nsubj\",\"nsubjpass\"] and word.head.text in [\"wins\",\"won\"] and nomType == \"PERSON\":\n if word.ent_iob_ == \"I\":\n subj = next(x for x in doc.ents if word.text in x.text).lower_\n else:\n subj = word.text.lower()\n elif word.dep_ == \"dobj\" and word.head.text in [\"wins\",\"won\"]:\n pred = word.text\n\n \n if not ignore and subj != \"\" and pred != \"\":\n if subj in winVote:\n winVote[subj]+=1\n else:\n winVote[subj]=1\n \n if len(winVote) == 0:\n return False\n\n return max(winVote,key=winVote.get)\n\n def CongratulationsParser(self, filters, nomType):\n filterb = [[\"congratulations\",\"congrats\"],[\"for winning best\",\"for winning the award\",\"for best\"]]\n if nomType == \"MEDIA\":\n filterb.append(\"\\\"\")\n firstcull = list(filter(lambda tweet: all(kw in tweet for kw in filterb[0]) and all(kw in tweet for kw in filterb[1]),self.HardQueries[filters]))\n winVote = {}\n \n\n for tweet in firstcull:\n mentionedMovies = list(re.finditer(r\"([\\\"'])(?:(?=(\\\\?))\\2.)*?\\1\", tweet))\n if mentionedMovies == [] and nomType == \"MEDIA\":\n continue\n obj = \"\"\n doc = spacyNLP(tweet)\n\n for word in doc:\n if word.text == \".\":\n break\n if nomType==\"MEDIA\" and word.text==\"\\\"\":\n obj = mentionedMovies[0][0]\n break\n if word.text in [\"congratulations\", \"congrats\", \"Congratulations\", \"Congrats\"] and nomType==\"PERSON\":\n idx = word.i\n obj = next((ent for ent in doc.ents if ent.start > idx and ent.label_ == \"PERSON\"),False)\n if obj:\n obj = obj.text\n break\n \n \"\"\"if obj:\n print(tweet)\n print(obj)\"\"\"\n if obj != \"\" and obj in winVote:\n winVote[obj]+=1\n else:\n winVote[obj]=1\n\n if len(winVote) == 0:\n return None\n \n return max(winVote, key=winVote.get)\n\n \"\"\"def AwardGroupParse(self,award):\n award1 = award.lower()\n for filtere in [\".\",\", i\",\",\",\" a \",\" an \", \" or \",\"!\",\":\",\";\",\"\\'\",\"\\\"\",\"\\n\"]:\n award1 = award1.replace(filtere,\" \")\n for filterd in [\" in \", \" by \", \" - \"]:\n award1 = award1.replace(filterd,\"|\")\n\n award1.replace(\"tv\",\"television\")\n\n split1 = award1.split(\"|\")\n split2 = []\n for segment in split1:\n split2.append(segment.split(\" \"))\n \n return split2\"\"\"\n\n def NominatedForParser(self,award,nomType):\n self.PopulateHardQueries()\n\n firstcull = self.HardQueries[award]\n if nomType == \"MEDIA\":\n firstcull = list(filter(lambda tweet: all(kw in tweet for kw in [\"\\\"\",\" nomin\"]),firstcull))\n else:\n firstcull = list(filter(lambda tweet: all(kw in tweet for kw in [\" nomin\"]),firstcull))\n nominees = {}\n if nomType == \"MEDIA\":\n for tweet in firstcull:\n mentionedMovies = list(re.finditer(r\"([\\\"'])(?:(?=(\\\\?))\\2.)*?\\1\", tweet))\n for mMovie in mentionedMovies:\n if len(mMovie[0]) > 30:\n continue\n if mMovie[0] not in self.movienames:\n self.movienames.append(mMovie[0])\n a = mMovie[0].lower().replace(\"\\\"\",\"\").replace(\"\\'\",\"\")\n if a in nominees:\n nominees[a]+=1\n else:\n nominees[a]=1\n\n else:\n for tweet in firstcull:\n doc = spacyNLP(tweet)\n for ent in doc.ents:\n if ent.label_ == \"PERSON\" and \"olden\" not in ent.lower_:\n if ent.lower_ in nominees:\n nominees[ent.lower_]+=1\n else:\n nominees[ent.lower_] = 1\n\n return nominees\n \n def BeatParser(self,winner,nomtype):\n filters = [winner,[\" beat\",\" rob\",\" stole\",\" steal\"],(\" de \",\" en \",\" y \",\" lo \", \" la \", \" el \", \" los \")]\n firstcull = self.datab.ANDorFILTER(filters,True)\n \n nominees = {}\n for tweet in firstcull:\n doc = spacyNLP(tweet)\n if nomtype == \"PERSON\":\n for ent in doc.ents:\n if ent.label_ == \"PERSON\" and ent.lower_ not in winner and \"olden\" not in ent.lower_:\n if ent.lower_ in nominees:\n nominees[ent.lower_]+=1\n else:\n nominees[ent.lower_]=1\n else:\n for word in doc:\n if word.dep_ == \"dobj\" and word.head.text in [\"beat\",\"beats\",\"rob\",\"robbed\",\"stole\",\"steal\",\"steals\"] and word.text[0].isupper():\n a = self.MovieNameFinder(word.text).replace(\"\\\"\",\"\").replace(\"\\'\",\"\").lower()\n if a in nominees:\n nominees[a]+=1\n else:\n nominees[a]=1\n \n return nominees\n\n def MovieNameFinder(self, word):\n for mName in self.movienames:\n if word in mName:\n return mName\n \n filters = [[\"\\\"\"],word]\n firstcull = self.datab.ANDorFILTER(filters)\n\n vote = {}\n for tweet in firstcull:\n mentionedMovies = list(re.finditer(r\"([\\\"'])(?:(?=(\\\\?))\\2.)*?\\1\", tweet))\n for mMovie in mentionedMovies:\n if word in mMovie[0] and len(mMovie[0]) < 30:\n self.movienames.append(mMovie[0])\n if mMovie[0] in vote:\n vote[mMovie[0]]+=1\n else:\n vote[mMovie[0]] = 1\n\n if vote != {}:\n return max(vote,key=vote.get)\n return word\n \n def firstNameFinder(self, lastName):\n filters = [lastName]\n firstcull = self.datab.ANDorFILTER(filters,True)\n\n for tweet in firstcull:\n doc = spacyNLP(tweet)\n for ent in doc.ents:\n if ent.label_ == \"PERSON\" and lastName in ent.text and lastName!=ent.text:\n return ent.text\n\n def AllPresentersFinder(self):\n print(\"finding presenters\",self.year)\n presenters = {}\n for award in self.actualAwards:\n presenters[award] = self.PresenterFinder2(award)\n \n return presenters\n \n\n def getAllPresenters(self):\n stopWords = ('hate', 'love','hope','good','bad',' en ',' los ',' de ',' la ', ' un ', 'drink', 'should', 'could', 'would', ' i ', \n ' im ', ' i\\'m ', '?', '!!')\n filters = [['present', 'announc'],stopWords]\n \n firstcull = self.datab.ANDorFILTER(filters,True)\n\n presenters = {}\n fullName = re.compile(r\"^([A-Z][a-z]+ (?:[A-Z][a-z]+)*)$\")\n\n for tweet in firstcull:\n doc = spacyNLP(tweet)\n\n for ent in doc.ents:\n if ent.label_ == \"PERSON\" and fullName.match(ent.text) and ent.text in presenters:\n presenters[ent.text]+=1\n elif ent.label_ == \"PERSON\" and fullName.match(ent.text) and not ent.text in presenters and not \"olden\" in ent.text and not \"present\" in ent.lower_ and not \"win\" in ent.lower_:\n presenters[ent.text]=1\n\n '''\n finalPresenters = {}\n self.PopulateHardQueries()\n \n for name in presenters:\n for award, awardTweets in self.HardQueries.items():\n for tweet in awardTweets:\n if \"present\" in tweet and \"win\" not in tweet:\n if any(name in tweet.lower() for name in name.split()) and award in finalPresenters:\n if any(name in finalPresenters[award] for name in name.split()):\n continue\n else:\n finalPresenters[award].append(name)\n break\n elif any(name in tweet.lower() for name in name.split()) and not award in finalPresenters:\n finalPresenters[award] = [name]\n '''\n finalPresenters = {}\n self.PopulateHardQueries()\n presenters = set(presenters)\n culledPresenters = set()\n\n for presenter in presenters:\n if not 'win' in presenter.lower() and not 'olden' in presenter.lower() and not 'best' in presenter.lower() and not presenter.lower() in self.winners.values():\n culledPresenters.add(presenter)\n\n for award, awardTweets in self.HardQueries.items():\n for tweet in awardTweets:\n for name in culledPresenters:\n if name in tweet and 'present' in tweet.lower() and not 'win' in tweet.lower() and award in finalPresenters:\n if not name.lower() in finalPresenters[award]:\n finalPresenters[award].append(name.lower())\n elif name in tweet and 'present' in tweet.lower() and not 'win' in tweet.lower() and not award in finalPresenters:\n finalPresenters[award] = [name.lower()]\n\n for award in self.HardQueries:\n if not award in finalPresenters:\n finalPresenters[award] = self.PresenterFinder2(award)\n\n return finalPresenters\n\n def PresenterFinder2(self, award):\n self.PopulateHardQueries()\n firstcull = self.HardQueries[award]\n\n firstcull = list(filter(lambda tweet: any(kw in tweet for kw in [\"present\",\"announc\"]),firstcull))\n \n docs = []\n hostVote = {}\n\n for tweet in firstcull:\n docs.append(spacyNLP(tweet))\n\n for doc in docs:\n for ent in doc.ents:\n if ent.label_ == \"PERSON\" and \"olden\" not in ent.text:\n if ent.lower_ in hostVote:\n hostVote[ent.lower_] += 1\n else:\n hostVote[ent.lower_] = 1\n\n hVs = sorted(hostVote,key = hostVote.get)\n\n if len(hVs) < 5:\n return hVs\n if len(hVs) == 0:\n return []\n return hVs[-5:]\n\n def DrinkingGames(self):\n firstcull = self.datab.ANDorFILTER([[\"drinking game\", \"drink!\", \"take a drink\", \"drink every\", \"drink when\"]])\n firstcull = list(set(firstcull))\n\n games = []\n\n for tweet in firstcull:\n ltweet = tweet.lower().replace(\"\\n\",\" | \")\n if any(kw in tweet for kw in [\"|\",\"rules\",\" \"]):\n continue\n if \"drinking game:\" in ltweet:\n games.append(tweet[ltweet.index(\"game:\")+5:])\n continue\n for st in [\"when \",\" if \",\" every \"]:\n if st in ltweet:\n games.append(tweet[ltweet.index(st):])\n continue\n rns = []\n i = 0\n while i < 10:\n rand = randint(0,len(games)-1)\n if games[rand] not in rns:\n rns.append(games[rand])\n i+=1\n\n return rns\n\n \"\"\"def awardparseOpen(self):\n awards = []\n\n firstcull = self.datab.ANDorFILTER([re.compile(\"^Best [A-Z]\"),re.compile(\" \\n$\"),\"\\n\\n\"])\n if \"-\" in firstcull[0]:\n return firstcull\n for string in firstcull:\n awards.append(string[0:string.index(\"\\n\\n\")])\n \n firstcull = self.datab.ANDorFILTER([re.compile(\"^Best [A-Z]\"),re.compile(\"[a-z] $\"),[\" : \"]])\n for string in firstcull:\n awards.append(string[0:string.index(\": \")])\n secondcull = self.datab.ANDorFILTER([re.compile(\"[a-z] - Best [A-Z]\"),re.compile(\"[a-z][a-z]$\")])\n for string in secondcull:\n awards.append(string[string.index(\" - \"):]) \n\n return awards\n\n def awardParseRegex2(self):\n #regexParse = self.datab.getRegexFullMatchOnly(re.compile(\"Best(([\\s][A-Z][a-z,\\-]{2,})| in a| in an| by an| or| \\-| for)+([\\s][A-Z][a-z]{2,})\"))\n regexParse = self.datab.getRegexFullMatchOnly(re.compile(\"^BEST [A-Z\\-, ]+\"))\n awardDict = {}\n\n stopwords = [\"joke\",\"carpet\",\"dress\",\"olden\",\"look\",\"moment\",\"award\",\"the\",\"--\",\"hosts\",\"ever\",\"speech\"]\n for item in regexParse:\n add = True\n if any(kw in item.lower() for kw in stopwords) or len(item.split(\" \")) < 4:\n add = False\n\n dash = item.split(\" - \")\n if len(dash) > 2:\n item = \" - \".join([dash[0],dash[1]])\n if len(dash) == 2 and len(dash[1].split(\" \")) > 3:\n add = False\n \n if any(kw in item[-5:] for kw in [\",\",\"-\"]):\n add = False\n\n if add:\n if item in awardDict:\n awardDict[item]+=1\n else:\n awardDict[item]=1\n\n print(list(awardDict))\n\n def awardParseRegex3(self):\n #regexParse = self.datab.getRegexFullMatchOnly(re.compile(\"\\n\\nBest .*(\\n\\n|\\nW)\"))\n regexParse = self.datab.getRegexFullMatchOnly(re.compile(\"(^|\\n)Best( [A-Z][a-z]+| -| [a-z][a-z]?)+ [A-Z][a-z]+\"))\n\n awardDict = {}\n\n stopwords = [\"joke\",\"carpet\",\"dress\",\"olden\",\"look\",\"moment\",\"award\",\"the\",\"--\",\"hosts\",\"ever\",\"speech\",\"win\", \"is\",\"should\"]\n for item in regexParse:\n add = True\n if any(kw in item.lower() for kw in stopwords) or len(item.split(\" \")) < 4:\n add = False\n\n dash = item.split(\" - \")\n if len(dash) > 2:\n item = \" - \".join([dash[0],dash[1]])\n if len(dash) == 2 and len(dash[1].split(\" \")) > 3:\n add = False\n \n \n if any(kw in item[-5:] for kw in [\",\",\"-\"]):\n add = False\n\n if add:\n if item in awardDict:\n awardDict[item]+=1\n else:\n awardDict[item]=1\n\n print(list(awardDict))\"\"\"\n\n def awardParseRegexFinal(self):\n regexParse = self.datab.getRegexFullMatchOnly(re.compile(\"(- |^|:|: |,|, |'|\\n)(BEST|best|Best) [A-Za-z, \\-]+(: | : |\\n|$|[\\s]*\\n|[\\s]*$|')\"))\n awardDict = {}\n \n stopwords = HardQuery.stopwords\n for item in regexParse:\n add = True\n if any(kw in item.lower() for kw in stopwords):\n add = False\n\n dash = item.split(\" - \")\n if len(dash) > 2:\n item = \" - \".join([dash[0],dash[1]])\n if len(dash) == 2 and len(dash[1].split(\" \")) > 3:\n add = False\n \n \n if any(kw in item[-5:] for kw in [\",\",\"-\",\"or\"]):\n add = False\n item = item.lower().replace(\"tv\",\"television\").replace(\":\",\"\").replace(\"\\n\",\"\").replace(\"'\",\"\").replace(\",\",\"\")\n item = item[item.index(\"best\"):]\n\n while item[-1] == ' ':\n item = item[:-1]\n\n if len(item.split(\" \")) < 4:\n add = False\n \n if add:\n if item in awardDict:\n awardDict[item]+=1\n else:\n awardDict[item]=1\n \n #print(sorted(awardDict))\n return [x for x in awardDict if awardDict[x] > 1] \n\n\n \"\"\"def awardParseRegex(self):\n print(\"parsing awards\",self.year)\n regexParse = self.datab.getRegexFullMatchOnly(re.compile(\"Best(([\\s][A-Z\\-][a-z,]{2,})| in a| in an| by an| or| \\-| for)+\"))\n tangent = [\"on Picture\",\"Series\"]\n antiTangent = [\"Should\", \"t Ac\", \"t Su\",\" a F\",\"t Fi\",\"olden\"]\n duoTangent = [(\",\",\", L\"),(\"for\",\"for Te\")]\n if(int(self.year) <= 2015):\n tangent.append(\"Film\")\n \n r2 = list(filter(lambda match: any([kw in match for kw in tangent]),regexParse))\n r3 = list(filter(lambda cull: \n len(cull.split(\" \")) > 3 and len(cull.split(\"\\n\")) == 1 and len(cull.split(\"-\")) < 3 and not any([kw in cull for kw in antiTangent]) and\n cull[-1] not in [\"r\",\"-\",\"n\"] and not any([kw[0] in cull and not kw[1] in cull for kw in duoTangent]),r2))\n\n vote = {}\n for string in r3:\n if string.replace(\"- \",\"\") in vote:\n vote[string] = vote[string.replace(\"- \",\"\")] + 1\n del vote[string.replace(\"- \",\"\")]\n elif string in vote:\n vote[string] += 1\n else:\n vote[string] = 1\n\n return [x for x in vote if vote[x] > 1]\"\"\"\n\n\n def WeinsteinMachine(self):\n firstcull = self.datab.ANDorFILTER([[\"Weinstein\",\"Harvey Weinstein\",\"Harvey\",\"Weinzstein\",\"Wienstien\",\"Weinstien\"]])\n context = {}\n mentionTimes = len(firstcull)\n for tweet in firstcull:\n doc = spacyNLP(tweet)\n idx = 0\n for word in doc:\n if word.text in [\"Weinstein\",\"Harvey\"]:\n idx = word.i\n break\n start = max(idx - 3,0)\n end = min(idx + 3, len(list(doc)))\n for word in doc[start:end+1]: \n if word.tag_ in [\"NNP\",\"NNP\",\"NNPS\",\"NNS\",\"NN\",\"VBD\",\"VBG\",\"VBP\",\"VBZ\"] and word.text not in [\"Weinstein\",\"Harvey\",\"Golden\",\"Globes\"] and len(word.text) > 3:\n if word.text in context:\n context[word.text]+=1\n else:\n context[word.text]=1\n \n \n chunky = sorted(context,key=context.get)\n dictma = {'unique-mentions':mentionTimes,'most-associated-terms':chunky[-7:]}\n return dictma\n\n def newNominees(self,award):\n self.PopulateHardQueries()\n\n query = self.HardQueries[award]\n filledQ = list(filter(lambda tweet: \"nominees:\" in tweet and not any(kw in tweet for kw in [\"present\",\"won\",\"win\",\"not\",\"n't\",\"goes\",\"?\"]),query))\n\n for string in filledQ:\n spd = string[string.index(\"nominees:\")+9:]\n a = spd.replace(\"and\", \"\").split(\",\")\n if len(a) > 1:\n return a\n \n return []\n\n def Top5BestDressed(self):\n firstcull = self.datab.ANDorFILTER([[\"beautiful\",\"gorgeous\",\"ravishing\",\"best dressed\"]],False)\n\n docs = []\n hostVote = {}\n if len(firstcull) > 500:\n firstcull = firstcull[0:500]\n\n for tweet in firstcull:\n docs.append(spacyNLP(tweet))\n\n for doc in docs:\n for ent in doc.ents:\n if ent.label_ == \"PERSON\" and \"olden\" not in ent.text:\n if ent.lower_ in hostVote:\n hostVote[ent.lower_] += 1\n else:\n hostVote[ent.lower_] = 1\n\n hVs = sorted(hostVote,key = hostVote.get)\n return hVs[-5:]\n","sub_path":"AwardParse.py","file_name":"AwardParse.py","file_ext":"py","file_size_in_byte":28391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"145681951","text":"# %% this idea is from https://www.kaggle.com/c/porto-seguro-safe-driver-prediction/discussion/44629\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\nbase_path = '/home/tom/mywork/some_test/porto_seguro/input/'\n\nmodel_name = 'porto_seguro_dae004'\ndata_path = '/home/tom/data/porto_seguro_dae'\nmodel_path = os.path.join(data_path, model_name)\nif not os.path.exists(model_path):\n os.makedirs(model_path)\n\n# %% The last cell preprocess data. In this cell, let data go into one model\n# in dae003, try hyperopt\nimport os\nimport pandas as pd\nimport numpy as np\nimport hyperopt as hp\nfrom hyperopt import fmin, rand, tpe, hp, pyll, base, space_eval, Trials\nfrom hyperopt.pyll import Apply, as_apply, dfs, scope\nfrom hyperopt.mongoexp import MongoTrials\nfrom PPMoney.core.utils import proc_run\nfrom PPMoney.core.model.space import space_lgb_binary, space_update, sample_int\nfrom PPMoney.core.data import HDFDataSet\n\nsuggest = rand.suggest\n\ndata_root = model_path\nmodel_root = os.path.join(data_root, \"test_model\")#\"/tmp/test_model\"\n\n\nextra_param = {\n \"train_file\": os.path.join(data_path, 'mjahrer_1st_train.dataset'),\n \"model_root\": model_root,\n \"n_fold\": 5,\n \"summary_step\": 10,\n \"lgb_param\": {\n # 'objective' use default PPMoney/core/model/space.py\n # 'boosting_type' use default PPMoney/core/model/space.py\n # 'learning_rate' use default PPMoney/core/model/space.py\n 'max_bin': 2 ** sample_int(\"i_max_bins\", 5, 9) - 1,\n 'num_leaves': 2 ** sample_int(\"v_num_leaves\", 4, 8) - 1,\n 'min_data_in_leaf': 2 ** sample_int(\"idx_min_data_in_leaf\", 5, 11),\n # 'feature_fraction' use default PPMoney/core/model/space.py\n 'bagging_freq': sample_int(\"bagging_frq\", 1, 10),\n # 'bagging_fraction' use default PPMoney/core/model/space.py\n\n \"num_threads\": 80,\n \"verbose\": -1,\n \"lambda_l1\": 2 ** sample_int(\"v_lambda_l1\", 0, 4) - 1,\n \"lambda_l2\": 2 ** sample_int(\"v_lambda_l2\", 0, 4) - 1,\n }\n}\n\n# from sklearn.model_selection import StratifiedKFold\n# skf = StratifiedKFold(n_splits=extra_param['n_fold'], random_state=0)\n# train_index, valid_index = next(skf.split(X_0, y_0))\n\n@proc_run\ndef lgb_run(param):\n import pprint\n import numpy as np\n pprint.pprint(param)\n from sklearn.datasets import load_svmlight_file as load_svm\n from PPMoney.core.model import BinaryLGB\n from PPMoney.core.model.metrics import ModelMetric\n\n file_tr = param[\"train_file\"]\n model_root = param[\"model_root\"]\n n_fold = param[\"n_fold\"]\n\n from PPMoney.core.data import HDFDataSet\n dataset_load = HDFDataSet(file_tr, chunk_size=2048)\n\n X, y = dataset_load['feature'], dataset_load['label']\n label = y == 1\n print(f\"X.shape, label.shape: {X.shape, label.shape}\")\n\n from sklearn.model_selection import StratifiedKFold\n skf = StratifiedKFold(n_splits=n_fold, random_state=0)\n train_index, valid_index = next(skf.split(X, label))\n\n X_tr, X_v = X[train_index], X[valid_index]\n label_tr, label_v = label[train_index], label[valid_index]\n\n # X_tr = X[:-n_val]\n # label_tr = label[:-n_val]\n # X_v = X[-n_val:]\n # label_v = label[-n_val:]\n\n import os\n if not os.path.isdir(model_root):\n os.mkdir(model_root)\n\n v_metric = ModelMetric(feature_t=X_v, label_t=label_v, name=\"VA\", metrics=[\"AUC\"], minimize=False)\n tr_metric = ModelMetric(feature_t=X_tr, label_t=label_tr, name=\"TR\", metrics=[\"auc\"], minimize=False)\n\n model = BinaryLGB(\n model_root=model_root,\n model_metric=[v_metric, tr_metric], #可以定义多个metric,其中第一个的作为模型选择的基准\n model_name=None # for random model name\n )\n return model.fit(param, X_tr, label_tr)\n\n# 先试试单线程\n# fmin(lgb_run, space_lgb, algo=suggest, max_evals=5)\n\n\n# %%\n\nspace_lgb = space_update(space_lgb_binary, extra_param, model_key=\"lgb_bin\", n_round=2000)\n\n# mongo trials可以进行并行训练,可以同时跑多组参数\n# from hyperopt.mongoexp import MongoTrials\n# trials_lgb = MongoTrials('mongo://$addr/lgb_worker/jobs', exp_key=f'test_model')\ntrials_lgb = Trials()\n\nfrom multiprocessing.pool import ThreadPool\n\nwith ThreadPool(4) as pool:\n #提交给线程池运行,这样可以同时运行多个fmin\n res = pool.apply_async(fmin, args=(lgb_run, space_lgb), kwds={\"trials\": trials_lgb, \"algo\": suggest, \"max_evals\": 20})\n res.get()\n\n# %%\n\nfrom hyperopt.plotting import main_plot_vars, main_plot_history, main_plot_histogram\nimport matplotlib.pylab as plt\n\nfor sp, trls in zip([space_lgb], [trials_lgb]):\n domain = base.Domain(lgb_run, sp)\n # plt.figure(figsize=(20, 40))\n # main_plot_vars(trls, bandit=domain, colorize_best=30, columns=1)\n plt.figure(figsize=(20, 5))\n # plt.ylim((-0.003, 0.003))\n main_plot_history(trls, bandit=domain)\n\n# NOTE: 对trials_lgb的性能的评判应该是VA的第一个metric指标的负数,至少在这个例子里是这样\n\ntrials_lgb.trials # 所有模型的参数和结果的字典组成的一个list\ntrials_lgb.results # 返回所有实验的结果\ntrials_lgb.miscs # 返回所有实验的参数\ntrials_lgb.vals # 返回所有实验的跟space更新有关的参数\n\ntrials_lgb.trials[0]['misc']['vals']\ntmp1 = space_lgb['lgb_param']['bagging_fraction']\ntmp2 = 2 ** sample_int(\"v_lambda_l1\", 0, 2) - 1\n5**-1.6 # 0.07614615754863513\n\ntype(tmp2), dir(tmp2)\n\n# NOTE: 试验space到实际数值的映射, 可能和Apply没关系\nhp_assignment = {k:v[0] for k,v in trials_lgb.trials[0]['misc']['vals'].items()}\nhp_assignment = {k:v[0] for k,v in trials_lgb.vals.items()} # 这句和上面那句等价,这句更简洁\nspace_eval(space_lgb, hp_assignment)\n\n\ntrials_lgb.trials[0]['result'] # {'loss': -0.8737864077669903, 'status': 'ok'}\n# 返回k个最好的模型的参数dict组成的一个list\ntrials_lgb.topk_trials(k=2)\n# return_score=True就返回2个list组成的tuple\ntrials_lgb.topk_trials(2, return_score=True, ordered=True)\ntype(trials_lgb.topk_trials(2, return_score=True, ordered=True)[0][0]) # 这个类型就是个dict\n# Trials().trial_attachments的作用是,根据trial的参数字典解析出相应的model路径\ntrials_lgb.trial_attachments(trials_lgb.topk_trials(2, return_score=True, ordered=True)[0][0])[\"model\"].decode()\n# %%\n\n#返回topk的模型\nselect_models = lambda trials, k: [(trials.trial_attachments(t)[\"model\"].decode(), c) for t, c in zip(*trials.topk_trials(k, return_score=True, ordered=True))]\nfor sub_model_path, sub_model_score in select_models(trials_lgb, 3):\n print(-sub_model_score, sub_model_path)\n\nbest_auc = -trials_lgb.topk_trials(1, return_score=True, ordered=True)[1][0]\nbest_space = trials_lgb.topk_trials(1, return_score=True, ordered=True)[0][0]['misc']['vals']\nbest_hyperparam = space_eval(space_lgb, hp_assignment = {k:v[0] for k,v in best_space.items()})\nbest_model_path = select_models(trials_lgb, 1)[0][0]\n\nimport json\nwith open(os.path.join(model_path, 'best_model.json'), 'w') as f:\n json.dump({'best_auc': best_auc, 'best_hyperparam': best_hyperparam, 'best_model_path': best_model_path},\n f, ensure_ascii=False, indent=2, separators=(',', ': '))\n\n# %% reload data and the best model to predict testset\nfrom PPMoney.core.data import HDFDataSet\ndataset_load = HDFDataSet(os.path.join(data_path, 'mjahrer_1st_test.dataset'), chunk_size=2048)\n\nX_test = dataset_load['feature']\nprint(f'Shape of X_test: {X_test.shape}')\n\nbest_model_path = os.path.join(model_root, 'e1422032-dcff-11e7-966f-0cc47a64aaf0/model/00890.model')\n\nimport lightgbm as lgb\nmodel = lgb.Booster(model_file=best_model_path)\ny_test = model.predict(X_test)\n\nX_test_raw = pd.read_csv(base_path+'test.csv')\nsub = X_test_raw['id'].to_frame()\nsub['target'] = 0\nsub['target'] = y_test\nsub.to_csv(os.path.join(model_path, 'test_'+model_name+'.csv.gz'), index=False, float_format='%.5f', compression='gzip')\n# test_porto_seguro_dae004.csv.gz, eval: 0.286, PublicLB: 0.28090, PrivateLB: 0.28720\n","sub_path":"code/script_reproduce_kaggle_1st_dae004.py","file_name":"script_reproduce_kaggle_1st_dae004.py","file_ext":"py","file_size_in_byte":7949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"301110636","text":"# Create a faster factorial lookup dictionary\r\nfactorials = {'0':1,'1':1,'2':2,'3':6,'4':24,'5':120,'6':720,'7':5040,'8':40320,'9':362880}\r\n\r\ndigit_factorials = []\r\n\r\n# Check up to 3 million as 9! = 362880 which means that after 3 million or so\r\n# the number gets bigger than the maximum possible sum of the factorial of its\r\n# digits. (ie: 9!*7 which would correpond to almost 10 million is only\r\n# ~ 2.5 million)\r\nfor num in range(3,30000000):\r\n total = 0\r\n for digit in str(num):\r\n total += factorials[digit]\r\n if total == num:\r\n digit_factorials.append(num)\r\n \r\nprint(digit_factorials)","sub_path":"#34.py","file_name":"#34.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"108458605","text":"from django import template\nfrom django.db.models.query import EmptyQuerySet\nfrom django.shortcuts import render\nfrom django.http import HttpResponse # This takes http requests\nfrom . import forms\nfrom django.shortcuts import redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm\nfrom django.contrib.sessions.models import Session\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import ListView, DetailView, View, CreateView, UpdateView, DeleteView\nfrom django.utils import timezone\nfrom .models import Categoria, Producto, Carrito, ProductoAgregado\nfrom store.forms import NuevoProductoForm\nfrom django.db.models import Q, F\nimport pprint\nimport json\nimport random\n\n\n# Create your views here.\ndef index(request):\n productos = Producto.objects.all()\n productos_ordenados = productos.order_by('-fecha_creacion')\n\n index = 0\n tres_productos = []\n while index < 3:\n un_producto = productos_ordenados[index]\n tres_productos.append(un_producto)\n index +=1\n\n index = 3\n siete_productos = []\n while index < 10:\n un_producto = productos_ordenados[index]\n siete_productos.append(un_producto)\n index +=1\n\n print(productos)\n dictionary = {\n 'tres_productos': tres_productos,\n 'siete_productos': siete_productos,\n }\n return render(request, 'index.html', context=dictionary)\n\ndef acerca_de(request):\n dictionary={}\n return render(request, 'acerca_de.html', context=dictionary)\n\ndef sign_up_form(request):\n form = forms.SignUpForm() # class defined in forms.py\n dictionary = {\"form\": form}\n \n if request.method == \"POST\":\n form = forms.SignUpForm(request.POST) # creating a variable that receives the POST\n if form.is_valid(): \n password = form.clean_password2()\n user = form.save(commit=False)\n user.save()\n grupo_estandar = Group.objects.get(name='Estandar')\n user.groups.add(grupo_estandar)\n \n form.save()\n\n return redirect('resultado_registro/')\n else:\n print(\"Invalid form request\")\n error = form.errors\n print(error)\n dictionary = {\n 'error': error\n } \n else:\n form = forms.SignUpForm()\n \n dictionary = {\n 'form': form\n } \n return render(request, \"registro.html\", context=dictionary)\n\ndef resultado_registro(request):\n dictionary = {}\n return render(request, \"resultado_registro.html\", context=dictionary)\n\ndef login_form(request):\n username = 'not logged in'\n user = request.user\n form = AuthenticationForm()\n dictionary = {\n 'form': form\n }\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n if username and password:\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n request.session['username'] = username\n login(request, user)\n print('form and session started')\n return redirect('resultado_login/')\n else:\n error = form.errors\n print(error)\n dictionary = {\n 'error': error\n }\n else:\n form = AuthenticationForm()\n\n dictionary = {\n 'object_list': user,\n 'form': form,\n }\n \n return render(request, \"login.html\", context=dictionary)\n\ndef resultado_login(request):\n dictionary = {}\n return render(request, \"resultado_login.html\", context=dictionary)\n\n@login_required(login_url='/login/')\ndef sign_out(request): # my logout view\n request.session.clear()\n logout(request)\n print(\"All sessions closed\")\n return render(request, \"logout.html\")\n\nclass VistaProducto(DetailView):\n model = Producto\n template_name = \"producto.html\"\n\nclass VistaResumenCompra(LoginRequiredMixin, View):\n login_url = '/login/'\n redirect_field_name = 'redirect_to'\n def get(self, *args, **kwargs):\n try:\n compra = Carrito.objects.get(usuario=self.request.user, ya_pedido=False)\n contexto= {\n 'objeto': compra\n }\n return render(self.request, 'resumen_compra.html', context=contexto)\n except ObjectDoesNotExist:\n messages.error(self.request, 'No tiene un carrito todavía')\n return redirect('/')\n\n@login_required(login_url= '/login/')\ndef agregar_al_carrito(request, pk):\n producto = get_object_or_404(Producto, pk=pk)\n producto_agregado, creado = ProductoAgregado.objects.get_or_create(\n producto = producto,\n usuario = request.user,\n ya_agregado = False\n )\n\n producto_existente_carrito = Carrito.objects.filter(usuario=request.user, ya_pedido=False)\n\n if producto_existente_carrito.exists():\n agrega_producto = producto_existente_carrito[0]\n\n if agrega_producto.productos.filter(producto__pk=producto.pk).exists():\n producto_agregado.cantidad += 1\n producto_agregado.save()\n messages.info(request, \"Agregada/s unidad/es\")\n return redirect(\"store:resumen_compra\")\n else:\n agrega_producto.productos.add(producto_agregado)\n messages.info(request, 'Producto agregado al carrito')\n return redirect(\"store:resumen_compra\")\n else:\n fecha_pedido = timezone.now()\n agregado_al_pedido = Carrito.objects.create(usuario=request.user, fecha=fecha_pedido)\n agregado_al_pedido.productos.add(producto_agregado)\n messages.info(request, \"Producto agregado al carrito\")\n return redirect('store:resumen_compra')\n\n\n@login_required\ndef quitar_del_carrito(request, pk):\n producto = get_object_or_404(Producto, pk=pk )\n producto_existente = Carrito.objects.filter(usuario=request.user, ya_pedido=False)\n\n if producto_existente.exists():\n quita_producto = producto_existente[0]\n if quita_producto.productos.filter(producto__pk=producto.pk).exists():\n producto_en_lista = ProductoAgregado.objects.filter( producto=producto,\n usuario=request.user,\n ya_agregado=False\n )[0]\n producto_en_lista.delete()\n messages.info(request, \"Item \\\"\"+producto_en_lista.producto.titulo+\"\\\" retirado del carrito\")\n return redirect(\"store:resumen_compra\")\n else:\n messages.info(request, \"Este producto no está en su carrito\")\n return redirect(\"store:producto\", pk=pk)\n else:\n #add message doesnt have order\n messages.info(request, \"No tiene un carrito\")\n return redirect(\"store:producto\", pk = pk)\n\n@login_required\ndef eliminar_carrito(request):\n productos_del_usuario = Carrito.objects.filter(usuario=request.user, ya_pedido=False)\n\n if productos_del_usuario.exists():\n Carrito.objects.filter(usuario=request.user, ya_pedido=False).delete()\n ProductoAgregado.objects.filter(usuario=request.user, ya_agregado=False).delete()\n Carrito.objects.create(usuario=request.user)\n\n return redirect(\"store:carrito_eliminado\")\n else:\n #add message doesnt have order\n messages.info(request, \"No tiene un carrito\")\n return redirect(\"/\")\n\ndef carrito_eliminado(request):\n dictionary = {}\n return render(request, \"carrito_eliminado.html\", context=dictionary)\n\n@login_required\ndef reducir_cantidad_producto(request, pk):\n producto = get_object_or_404(Producto, pk=pk )\n producto_existente = Carrito.objects.filter(\n usuario = request.user, \n ya_pedido = False\n )\n if producto_existente.exists():\n quita_producto = producto_existente[0]\n if quita_producto.productos.filter(producto__pk=producto.pk).exists() :\n item = ProductoAgregado.objects.filter(\n producto = producto,\n usuario = request.user,\n ya_agregado = False\n )[0]\n if item.cantidad > 1:\n item.cantidad -= 1\n item.save()\n else:\n item.delete()\n messages.info(request, \"La cantidad fue modificada\")\n return redirect(\"store:resumen_compra\")\n else:\n messages.info(request, \"Este item no esta en su lista\")\n return redirect(\"store:resumen_compra\")\n else:\n #add message doesnt have order\n messages.info(request, \"No tiene un carrito\")\n return redirect(\"store:resumen_compra\")\n\nclass NuevoProductoView(LoginRequiredMixin, PermissionRequiredMixin, CreateView):\n login_url = '/login/'\n redirect_field_name = 'redirect_to'\n permission_required = 'store.can_add_productos'\n \n form_class = NuevoProductoForm\n template_name = 'nuevo_producto.html'\n success_url = 'nuevo_producto_resultado'\n\ndef nuevo_producto_resultado(request):\n dictionary = {}\n return render(request, \"nuevo_producto_resultado.html\", context=dictionary)\n\nclass ResultadoBusqueda(ListView):\n model = Producto\n template_name = 'resultado_busqueda.html'\n\n def get_queryset(self):\n query = self.request.GET.get('q')\n if query:\n queryset = Producto.objects.filter(\n Q(titulo__icontains=query) | Q(categoria_base__descripcion__icontains=query) | Q(detalle__icontains=query)\n )\n print(queryset)\n else:\n queryset = Producto.objects.all()\n\n return queryset\n\n\nclass ResultadoBusquedaCategoria(ListView):\n model = Producto\n template_name = 'resultado_busqueda_categoria.html'\n\n def get_queryset(self):\n query = self.request.GET.get('q')\n print(query)\n if query:\n queryset = Producto.objects.filter(Q(categoria_base__descripcion__icontains=query))\n print(queryset)\n else:\n queryset = Producto.objects.all()\n\n return queryset\n\nclass EditarProductoView(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):\n login_url = '/login/'\n redirect_field_name = 'redirect_to'\n permission_required = 'store.can_add_productos'\n \n model = Producto\n template_name = 'actualizar_producto.html'\n success_url = 'producto_actualizado'\n fields=[\n 'titulo','categoria_base','detalle','precio','imagen'\n ]\n\ndef producto_actualizado(request):\n dictionary = {}\n return render(request, \"producto_actualizado.html\", context=dictionary)\n\n\nclass EliminarProductoView(LoginRequiredMixin, PermissionRequiredMixin, DeleteView):\n login_url = '/login/'\n redirect_field_name = 'redirect_to'\n permission_required = 'store.can_add_productos'\n \n model = Producto\n template_name = 'eliminar_producto.html'\n success_url = 'producto_eliminado'\n fields=[\n 'titulo','categoria_base','detalle','precio','imagen'\n ]\n\ndef producto_eliminado(request):\n dictionary = {}\n return render(request, \"producto_eliminado.html\", context=dictionary)","sub_path":"src/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"212132936","text":"# Compute the probability that ten randomly selected 15-mers from the ten 600-nucleotide \n# long strings in the Subtle Motif Problem capture at least one implanted 15-mer. (Allowable error: 0.000001)\n\nimport math\n\ndna_length = 600\nkmer_length = 15\nstrands_dna = 10\n\ndef probability_implanted_kmer_in_dna(dna_length, kmer_length, strands_dna):\n # First you compute p1 - probability of not capturing the implanted k-mer (15-mer) in one string.\n # Then you notice for the entire problem we have to deal with ten similar cases, i.e. you have to\n # multiply p1 * p2... *p10, where p1 = p2 = ... = p10. So you just compute p1 to the 10th power:\n # Then you just compute the 'opposite' probability, i.e. the probability that from ten 600-length \n # nucleotide string, we capture at least one implanted 15-mer! \n p = (dna_length - kmer_length) / ((dna_length - kmer_length) + 1)\n return 1 - math.pow(p, 10)\nprint(probability_implanted_kmer_in_dna(dna_length, kmer_length, strands_dna))\n","sub_path":"Python/Bioinformatics/FindingHIddenMessagesInDNA/probability_implanted_kmer_in_dna.py","file_name":"probability_implanted_kmer_in_dna.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"46473188","text":"from django.contrib import admin\nfrom django.urls import path\nfrom onlineapp.views import *\n\nurlpatterns = [\n path('test/', testview.test, name=\"test\"),\n path('login/', LoginController.as_view(), name=\"login\"),\n path('signup/', SignUpController.as_view(), name=\"signup\"),\n path('logout/', logout_user, name=\"logout\"),\n\n path('api/v1/colleges/', college_list, name=\"rest_colleges\"),\n path('api/v1/colleges//', college_list, name=\"rest_colleges\"),\n path('api/v1/colleges//students/', student_details.as_view(), name=\"rest_students\"),\n path('api/v1/colleges//students//', student_details.as_view(), name=\"rest_students\"),\n path('api-token-auth/', CustomAuthToken.as_view()),\n\n path('colleges/', CollegeView.as_view(), name=\"colleges_html\"),\n path('colleges//', CollegeView.as_view(), name=\"college_details\"),\n path('colleges/add', AddCollegeView.as_view(), name=\"add_college\"),\n path('colleges//edit', AddCollegeView.as_view(), name=\"edit_college\"),\n path('colleges//delete', DeleteCollegeView.as_view(), name=\"delete_college\"),\n path('colleges//addstudent', AddStudentView.as_view(), name=\"add_student\"),\n path('colleges//editstudent/', AddStudentView.as_view(), name=\"edit_student\"),\n path('colleges//deletestudent/', DeleteStudentView.as_view(), name=\"delete_student\"),\n\n]\n","sub_path":"classproject/onlineapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"454872460","text":"numOfList = int(input(\"Enter the number of lists you want to make: \"))\n\nmyList = []\n\ni = 1\ncount = 0\n\nfor list in range(numOfList):\n count = count + 3\n subList = []\n while(count >= i):\n subList.append(i)\n i = i + 1\n\n myList.append(subList)\n\nprint(myList)","sub_path":"adelphi_prob.py","file_name":"adelphi_prob.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"260289073","text":"import sys\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.mlab as mlab\r\nimport numpy as np\r\nimport random\r\nimport timeit\r\n\r\nelectionFile = sys.argv[1]\r\nnomineeList = sys.argv[2].split(\",\")\r\nnomineeColumnIndex = [0 for i in range(len(nomineeList))]\r\noutputFile = \"retrievedData.txt\"\r\nanswerFile = \"myAnswer.txt\"\r\n\r\nstateList = []\r\nnomineesVotes = [[] for i in range(len(nomineeList))]\r\ncolorList = ['r','b','y','c','m']\r\n\r\nmeanArray = [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]\r\nnumUSAGreater = 0\r\npercUSAGreater = 0\r\npLevel = 0\r\nelectionDataSize = 0\r\nrandMSESize = 10000\r\n\r\ndef retrieveData(filename,nominees):\r\n\tglobal electionDataSize\r\n\tnumLines = 0\r\n\twith open(filename) as f:\r\n\t\tfirstline = 1\r\n\t\tfor line in f:\r\n\t\t\tif 1 == firstline:\r\n\t\t\t\tlineArr = line.rstrip().split(\",\")\r\n\t\t\t\tfor i in range(len(nominees)):\r\n\t\t\t\t\tnomineeColumnIndex[i] = lineArr.index(nominees[i])\r\n\t\t\telse:\r\n\t\t\t\tlineArr = line.split(\",\")\r\n\t\t\t\tstateList.append(lineArr[0])\r\n\t\t\t\tfor i in range(len(nominees)):\r\n\t\t\t\t\tnomineesVotes[i].append(int(lineArr[nomineeColumnIndex[i]]))\r\n\t\t\t\tnumLines = numLines + 1\r\n\t\t\tfirstline = 0\r\n\telectionDataSize = numLines * len(nominees)\r\n\toutputList = []\r\n\tfor i in range(len(nominees)):\r\n\t\tfor vote in nomineesVotes[i]:\r\n\t\t\toutputList.append(vote)\r\n\treturn outputList\r\n\r\nvoteList = retrieveData(electionFile,nomineeList)\r\n\r\nwith open(outputFile,\"w\") as output:\r\n\toutput.write(str(voteList))\r\n\r\noutput.close()\r\n\r\ndef DispBarPlot():\r\n\tfirstNom = 0\r\n\tsecondNom = 0\r\n\tfor i in range(len(nomineeList)):\r\n\t\tif sum(nomineesVotes[i]) > sum(nomineesVotes[firstNom]):\r\n\t\t\tfirstNom = i\r\n\t\t\tsecondNom = firstNom\r\n\t\telif sum(nomineesVotes[i]) > sum(nomineesVotes[secondNom]):\r\n\t\t\tsecondNom = i\r\n\t\telif secondNom == firstNom:\r\n\t\t\tsecondNom = i\r\n\r\n\tn_groups = len(stateList)\r\n\r\n\tfirstVotes = nomineesVotes[firstNom]\r\n\r\n\tsecondVotes = nomineesVotes[secondNom]\r\n\r\n\tfig, ax = plt.subplots(figsize=(20,10))\r\n\tindex = np.arange(n_groups)\r\n\r\n\tbar_width = 0.3\r\n\topacity = 1\r\n\terror_config = {'ecolor': '0.3'}\r\n\r\n\trects2 = plt.bar(index + 1, secondVotes, bar_width,\r\n\t\t\t\t\t alpha=opacity,\r\n\t\t\t\t\t color='r',\r\n\t\t\t\t\t yerr=None,\r\n\t\t\t\t\t error_kw=error_config,\r\n\t\t\t\t\t label=nomineeList[secondNom])\r\n\r\n\trects1 = plt.bar(index+1+bar_width, firstVotes, bar_width,\r\n\t\t\t\t\t alpha=opacity,\r\n\t\t\t\t\t color='b',\r\n\t\t\t\t\t yerr=None,\r\n\t\t\t\t\t error_kw=error_config,\r\n\t\t\t\t\t label=nomineeList[firstNom])\r\n\r\n\tplt.xlabel('State')\r\n\tplt.ylabel('Votes')\r\n\tplt.title('Votes per State')\r\n\tplt.xticks(index + bar_width + 1, stateList,rotation =90)\r\n\tplt.legend()\r\n\tplt.tight_layout()\r\n\tplt.savefig(\"ComparativeVotes.pdf\")\r\n\r\ndef calculatepersantages():\r\n\tpersantageList=[0 for i in range(len(nomineeList))]\r\n\ttotalVotes = [0 for i in range(len(nomineeList))]\r\n\tallTotal=0\r\n\tfor i in range(len(nomineeList)):\r\n\t\tfor vote in nomineesVotes[i]:\r\n\t\t\ttotalVotes[i] = totalVotes[i] + vote\r\n\t\tallTotal = allTotal + totalVotes[i]\r\n\r\n\tfor i in range(len(nomineeList)):\r\n\t\tpersantageList[i] = (totalVotes[i] / allTotal) * 100\r\n\treturn persantageList\r\n\r\ndef compareVoteonBar():\r\n\tpercentages =calculatepersantages()\r\n\tn_groups = len(percentages)\r\n\r\n\tfig, ax = plt.subplots()\r\n\r\n\tindex = np.arange(n_groups)\r\n\tbar_width = 0.7\r\n\r\n\topacity = 1\r\n\terror_config = {'ecolor': '0.3'}\r\n\r\n\trects = plt.bar(index,percentages, bar_width,\r\n\t\t\t\t\t color = colorList[0],\r\n\t\t\t\t\t alpha=opacity,\r\n\t\t\t\t\t yerr=None,\r\n\t\t\t\t\t error_kw=error_config,\r\n\t\t\t\t\t label=nomineeList[0]\r\n\t\t\t\t\t )\r\n\r\n\tfor i in range(1,len(nomineeList)):\r\n\t\tplt.bar(0,0, bar_width,\r\n\t\tcolor = colorList[i%5],\r\n\t\talpha=opacity,\r\n\t\tyerr=None,\r\n\t\terror_kw=error_config,\r\n\t\tlabel=nomineeList[i])\r\n\r\n\t\trects[i].set_color(colorList[i])\r\n\r\n\tpercentagesFormatted = []\r\n\tfor percent in percentages:\r\n\t\tpercentagesFormatted.append(\"{0:.3f}\".format(percent))\r\n\r\n\tplt.xlabel('Nominees')\r\n\tplt.ylabel('vote percentages')\r\n\tplt.xticks(index + bar_width/2, percentagesFormatted)\r\n\tplt.legend()\r\n\tplt.tight_layout()\r\n\tplt.savefig(\"CompVotePercs.pdf\")\r\n\r\ndef obtainHistogram(numbers):\r\n\tdataLen = len(numbers)\r\n\thistogramNums = []\r\n\thistogramPerc = []\r\n\ttotalDigits = 0\r\n\tfor i in range(0,10):\r\n\t\thistogramNums.append(0)\r\n\t\thistogramPerc.append(0)\r\n\tfor num in numbers:\r\n\t\tdigitOnes = int(num % 10)\r\n\t\thistogramNums[digitOnes] = histogramNums[digitOnes] + 1\r\n\t\tdigitTens = int((num/10) % 10)\r\n\t\thistogramNums[digitTens] = histogramNums[digitTens] + 1\r\n\t\ttotalDigits = totalDigits + 2\r\n\tfor i in range(0,10):\r\n\t\thistogramPerc[i] = histogramNums[i] / totalDigits\r\n\treturn histogramPerc\r\n\r\ndef plotHistogram(histData, plotColor, outputPdfFile):\r\n\tplt.clf()\r\n\tdashedLine = plt.plot(meanArray)\r\n\tplt.setp(dashedLine, color = 'g',label='Mean',linestyle=\"dashed\")\r\n\tdigitLine = plt.plot(histData)\r\n\tplt.setp(digitLine, color=plotColor,label='Digit Dist.')\r\n\tplt.title('Histogram of least sign. digits')\r\n\tplt.ylabel('Distribution')\r\n\tplt.xlabel('Digits')\r\n\tplt.legend()\r\n\tplt.savefig(outputPdfFile)\r\n\r\ndef plotHistogramWithSample():\r\n\trand_10 = [];rand_50 = []; rand_100 = []; rand_1000 = []; rand_10000 = []\r\n\tfor i in range(0,10):\r\n\t\trand_10.append(random.choice(range(0,100)))\r\n\tfor i in range(0,50):\r\n\t\trand_50.append(random.choice(range(0,100)))\r\n\tfor i in range(0,100):\r\n\t\trand_100.append(random.choice(range(0,100)))\r\n\tfor i in range(0,1000):\r\n\t\trand_1000.append(random.choice(range(0,100)))\r\n\tfor i in range(0,10000):\r\n\t\trand_10000.append(random.choice(range(0,100)))\r\n\r\n\thist_1 = obtainHistogram(rand_10)\r\n\tplotHistogram(hist_1,'r',\"HistogramofSample1.pdf\")\r\n\r\n\thist_2 = obtainHistogram(rand_50)\r\n\tplotHistogram(hist_2,'b',\"HistogramofSample2.pdf\")\r\n\r\n\thist_3 = obtainHistogram(rand_100)\r\n\tplotHistogram(hist_3,'y',\"HistogramofSample3.pdf\")\r\n\r\n\thist_4 = obtainHistogram(rand_1000)\r\n\tplotHistogram(hist_4,'c',\"HistogramofSample4.pdf\")\r\n\r\n\thist_5 = obtainHistogram(rand_10000)\r\n\tplotHistogram(hist_5,'m',\"HistogramofSample5.pdf\")\r\n\r\ndef calculateMSE(list1,list2) :\r\n\tcalcualatedVal = 0\r\n\tfor i in range(0,len(list1)):\r\n\t\tcalcualatedVal = calcualatedVal + (list2[i] - list1[i])**2\r\n\treturn calcualatedVal\r\n\r\ndef compareMSEs(usaElectionMSE):\r\n\tglobal numUSAGreater\r\n\tglobal percUSAGreater\r\n\tglobal pLevel\r\n\tfor i in range(randMSESize):\r\n\t\trand_Election = []\r\n\t\tfor i in range(0,electionDataSize):\r\n\t\t\trand_Election.append(random.choice(range(0,100)))\r\n\t\thistPercentRand = obtainHistogram(rand_Election)\r\n\t\tmseRand = calculateMSE(histPercentRand, meanArray)\r\n\t\tif usaElectionMSE > mseRand:\r\n\t\t\tnumUSAGreater = numUSAGreater + 1\r\n\tpercUSAGreater = (numUSAGreater / randMSESize) * 100\r\n\tpLevel = numUSAGreater / randMSESize\r\n\r\nDispBarPlot()\r\ncompareVoteonBar()\r\n\r\nhistPercent = obtainHistogram(voteList)\r\nplotHistogram(histPercent, 'r', \"Histogram.pdf\")\r\nplotHistogramWithSample()\r\n\r\nusaMSE = calculateMSE(histPercent, meanArray)\r\ncompareMSEs(usaMSE)\r\n\r\nprint(\"MSE value of 2012 USA election is : \" + str(usaMSE))\r\nprint(\"The number of MSE of random samples which are larger than or equal to USA election MSE is : \" + str(randMSESize-numUSAGreater))\r\nprint(\"The number of MSE of random samples which are smaller than USA election MSE is : \" + str(numUSAGreater))\r\nprint(\"2012 USA election rejection level p is : \" + str(pLevel))\r\nif percUSAGreater <= 95:\r\n\tprint(\"Finding: We reject the null hypothesis at the p = \" + str(pLevel) + \" level\")\r\nelse:\r\n\tprint(\"Finding: There is no statistical evidence to reject null\")\r\n\r\nwith open(answerFile,\"w\") as answer:\r\n\tanswer.writelines(\"MSE value of 2012 USA election is : \" + str(usaMSE) + \"\\n\")\r\n\tanswer.writelines(\"The number of MSE of random samples which are larger than or equal to USA election MSE is : \" + str(randMSESize-numUSAGreater)+ \"\\n\")\r\n\tanswer.writelines(\"The number of MSE of random samples which are smaller than USA election MSE is : \" + str(numUSAGreater)+ \"\\n\")\r\n\tanswer.writelines(\"2012 USA election rejection level p is : \" + str(pLevel) + \"\\n\")\r\n\tif percUSAGreater <= 95:\r\n\t\tanswer.writelines(\"Finding: We reject the null hypothesis at the p = \" + str(pLevel) + \" level\" + \"\\n\")\r\n\telse:\r\n\t\tanswer.writelines(\"Finding: There is no statistical evidence to reject null\")\r\nanswer.close()\r\n","sub_path":"DataVisulation.py","file_name":"DataVisulation.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"450234901","text":"import cv2\nimport numpy as np\nimport time\n\ndef getLines(image_name):\n ## Convert from RGB to gray\n img = cv2.imread(image_name)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n ## Convert to binary image\n th, threshed = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)\n \n \n ## Calculate rotated image\n pts = cv2.findNonZero(threshed)\n ret = cv2.minAreaRect(pts)\n \n (cx,cy), (w,h), ang = ret\n if w>h:\n w,h = h,w\n ang += 90\n \n M = cv2.getRotationMatrix2D((cx,cy), 0, 1.0)\n rotated = threshed\n rotated = cv2.warpAffine(threshed, M, (img.shape[1], img.shape[0]))\n #cv2.imshow(\"\", rotated)\n #cv2.waitKey()\n \n ## Draw upper and lower lines for each text line\n hist = cv2.reduce(rotated,1, cv2.REDUCE_AVG).reshape(-1)\n \n th = 10\n H,W = img.shape[:2]\n uppers = [y-5 for y in range(H-1) if hist[y]<=th and hist[y+1]>th]\n lowers = [y+5 for y in range(H-1) if hist[y]>th and hist[y+1]<=th]\n print(len(uppers))\n print(len(lowers))\n \n rotated = cv2.cvtColor(rotated, cv2.COLOR_GRAY2BGR)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n \n minDistance = 5\n count = 0\n croppedImages = []\n for i in range(len(uppers)):\n print(uppers[i])\n print(lowers[i])\n tooClose = False\n if abs(uppers[i] - lowers[i]) < minDistance:\n tooClose = True\n print(\"too close\")\n if not tooClose:\n croppedImages.append(rotated[uppers[i]:lowers[i],:])\n #cv2.line(rotated, (0,uppers[i]), (W, uppers[i]), (255,0,0), 1)\n #cv2.line(rotated, (0,lowers[i]), (W, lowers[i]), (0,255,0), 1)\n \n return np.array(croppedImages)\n #cv2.imwrite(\"result.png\", croppedImages[0])\n\ngetLines(\"index.jpg\")","sub_path":"backend/line_detect3.py","file_name":"line_detect3.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"297571947","text":"import numpy as np\r\n\r\ndef ppp(p_value):\r\n \"\"\" Use atsterisks to denote p-values. \"\"\"\r\n if p_value <= 0.01:\r\n return '^{***}'\r\n elif p_value <= 0.05:\r\n return '^{**}'\r\n elif p_value <= 0.1:\r\n return '^{*}'\r\n else:\r\n return ''\r\n\r\n\r\ndef ppf(slope, intercept, p_slope, p_intercept):\r\n \"\"\" Pretty format for regression equations. \"\"\"\r\n\r\n if (np.abs(slope) < 1e-1) | (np.abs(slope) > 10):\r\n fslope = '{:.2e}'.format(slope)\r\n else:\r\n fslope = '%.2f' % slope\r\n fslope += ppp(p_slope)\r\n\r\n if (np.abs(intercept) < 1e-1) | (np.abs(intercept) > 10):\r\n fintercept = '{:.2e}'.format(np.abs(intercept))\r\n else:\r\n fintercept = '%.2f' % (np.abs(intercept))\r\n if intercept > 0:\r\n fintercept = ' + ' + fintercept\r\n else:\r\n fintercept = ' - ' + fintercept\r\n fintercept += ppp(p_intercept)\r\n\r\n return r'$y = ' + fslope + r' \\times x' + fintercept + r'$'\r\n","sub_path":"tools/format_text.py","file_name":"format_text.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"629235114","text":"import tkinter\n\nroot = tkinter.Tk()\nlabel1 = tkinter.Label(root,text=\"label1\")\nlabel2 = tkinter.Label(root, text=\"label2\")\n\ntombol1 = tkinter.Button(root,text=\"tombol1\")\ntombol2 = tkinter.Button(root, text=\"tombol2\")\n#method positioning\nlabel1.pack()\nlabel2.pack()\ntombol1.pack()\ntombol2.pack()\n\n#method menampilkan GUI\nroot.mainloop()\n","sub_path":"oop/tkinter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"76876145","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\n\n# 使用RandomForestRegressor填补缺失的年龄属性\ndef set_miss_ages(df):\n # 把已有的数值型特征取出来丢进RandomForestRegressor中\n age_df = df[['age','pclass','embarked','sex']]\n\n # 乘客分成已知年龄和未知年龄两部分\n known_age = age_df[age_df.age.notnull()].values\n unknown_age = age_df[age_df.age.isnull()].values\n\n # y即目标年龄\n y = known_age[:, 0]\n\n # X即特征属性值\n X = known_age[:, 1:]\n\n # 建立训练模型并训练\n rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)\n rfr.fit(X, y)\n\n # 用得到的模型进行未知年龄结果预测\n predictedAges = rfr.predict(unknown_age[:, 1::])\n\n # 用得到的预测结果填补原缺失数据\n df.loc[(df.age.isnull()), 'age'] = predictedAges\n\n return df\n\ntitanic = pd.read_csv(\"Titanic\\DataSet\\\\train\\\\titanicOut3.csv\")\n\ntitanic = set_miss_ages(titanic)\n\n# 将Embarked,Sex,Pclass转换成为one-hot编码\ndummies_embarked = pd.get_dummies(titanic['embarked'], prefix='embarked')\ndummies_sex = pd.get_dummies(titanic['sex'], prefix='sex')\ndummies_pclass = pd.get_dummies(titanic['pclass'], prefix= 'pclass')\n\ndf = pd.concat([titanic, dummies_embarked, dummies_sex, dummies_pclass], axis=1)\ndf.drop(['pclass','sex','embarked'], axis=1, inplace=True)\n\nprint(df)\ndf.to_csv(\"Titanic\\DataSet\\\\train\\\\titanicOut4.csv\", index=False)\n\n# 导入测试集\ntitanic_test = pd.read_csv(\"Titanic\\DataSet\\\\test\\\\testOut.csv\")\n\n# 填补缺失的年龄\ntitanic_test = set_miss_ages(titanic_test)\n\n# 将Embarked,Sex,Pclass转换成为one-hot编码\ndummies_embarked = pd.get_dummies(titanic_test['embarked'], prefix='embarked')\ndummies_sex = pd.get_dummies(titanic_test['sex'], prefix='sex')\ndummies_pclass = pd.get_dummies(titanic_test['pclass'], prefix= 'pclass')\n\ndf2 = pd.concat([titanic_test, dummies_embarked, dummies_sex, dummies_pclass], axis=1)\ndf2.drop(['pclass','sex','embarked'], axis=1, inplace=True)\n\nprint(df2)\ndf2.to_csv(\"Titanic\\DataSet\\\\test\\\\testOut2.csv\", index=False)","sub_path":"Titanic/DataProcessing/FittingMissingAgeData.py","file_name":"FittingMissingAgeData.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"393949613","text":"#!/usr/bin/env python\n\n\"\"\"\n cifar10.py\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport sys\nimport json\nimport argparse\nimport numpy as np\nfrom time import time\nfrom PIL import Image\nfrom datetime import datetime\nfrom collections import OrderedDict\n\nfrom basenet import BaseNet\nfrom basenet.hp_schedule import HPSchedule\nfrom basenet.helpers import to_numpy, set_seeds\nfrom basenet.vision import transforms as btransforms\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\ntorch.backends.cudnn.benchmark = True\n\nfrom torchvision import transforms, datasets\n\nimport dlib\n\n# --\n# Helpers\n\ndef dlib_find_max_global(f, bounds, **kwargs):\n varnames = f.__code__.co_varnames[:f.__code__.co_argcount]\n bound1_, bound2_ = [], []\n for varname in varnames:\n bound1_.append(bounds[varname][0])\n bound2_.append(bounds[varname][1])\n \n return dlib.find_max_global(f, bound1_, bound2_, **kwargs)\n\n\n# --\n# CLI\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=10)\n parser.add_argument('--weight-decay', type=float, default=5e-4)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--batch-size', type=int, default=128)\n parser.add_argument('--seed', type=int, default=789)\n parser.add_argument('--download', action=\"store_true\")\n return parser.parse_args()\n\n# --\n# Model definition\n# Derived from models in `https://github.com/kuangliu/pytorch-cifar`\n\nclass PreActBlock(nn.Module):\n \n def __init__(self, in_channels, out_channels, stride=1):\n super(PreActBlock, self).__init__()\n \n self.bn1 = nn.BatchNorm2d(in_channels)\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)\n \n if stride != 1 or in_channels != out_channels:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)\n )\n \n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n return out + shortcut\n\n\nclass ResNet18(BaseNet):\n def __init__(self, num_blocks=[2, 2, 2, 2], num_classes=10):\n super(ResNet18, self).__init__(loss_fn=F.cross_entropy)\n \n self.in_channels = 64\n \n self.prep = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU()\n )\n \n self.layers = nn.Sequential(\n self._make_layer(64, 64, num_blocks[0], stride=1),\n self._make_layer(64, 128, num_blocks[1], stride=2),\n self._make_layer(128, 256, num_blocks[2], stride=2),\n self._make_layer(256, 256, num_blocks[3], stride=2),\n )\n \n self.classifier = nn.Linear(512, num_classes)\n \n def _make_layer(self, in_channels, out_channels, num_blocks, stride):\n \n strides = [stride] + [1] * (num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(PreActBlock(in_channels=in_channels, out_channels=out_channels, stride=stride))\n in_channels = out_channels\n \n return nn.Sequential(*layers)\n \n def forward(self, x):\n x = self.prep(x)#.half())\n \n x = self.layers(x)\n \n x_avg = F.adaptive_avg_pool2d(x, (1, 1))\n x_avg = x_avg.view(x_avg.size(0), -1)\n \n x_max = F.adaptive_max_pool2d(x, (1, 1))\n x_max = x_max.view(x_max.size(0), -1)\n \n x = torch.cat([x_avg, x_max], dim=-1)\n \n x = self.classifier(x)\n \n return x\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n \n set_seeds(args.seed)\n \n # --\n # IO\n \n print('cifar_opt.py: making dataloaders...', file=sys.stderr)\n \n transform_train = transforms.Compose([\n btransforms.ReflectionPadding(margin=(4, 4)),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n btransforms.NormalizeDataset(dataset='cifar10'),\n ])\n \n transform_test = transforms.Compose([\n transforms.ToTensor(),\n btransforms.NormalizeDataset(dataset='cifar10'),\n ])\n \n try:\n trainset = datasets.CIFAR10(root='./data', train=True, download=args.download, transform=transform_train)\n testset = datasets.CIFAR10(root='./data', train=False, download=args.download, transform=transform_test)\n except:\n raise Exception('cifar10.py: error loading data -- try rerunning w/ `--download` flag')\n \n trainloader = torch.utils.data.DataLoader(\n trainset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=16,\n pin_memory=True,\n )\n \n testloader = torch.utils.data.DataLoader(\n testset,\n batch_size=512,\n shuffle=False,\n num_workers=16,\n pin_memory=True,\n )\n \n dataloaders = {\n \"train\" : trainloader,\n \"test\" : testloader,\n }\n \n def run_one(break1, break2, val1, val2):\n \n # try:\n # set_seeds(args.seed) # Might have bad side effects\n \n if (break1 >= break2):\n return float(-1)\n \n timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n params = OrderedDict([\n (\"timestamp\", timestamp),\n (\"break1\", break1),\n (\"break2\", break2),\n (\"val1\", 10 ** val1),\n (\"val2\", 10 ** val2),\n (\"momentum\", args.momentum),\n (\"weight_decay\", args.weight_decay),\n ])\n \n model = ResNet18().cuda()#.half()\n \n lr_scheduler = HPSchedule.piecewise_linear(\n breaks=[0, break1, break2, args.epochs],\n vals=[0, 10 ** val1, 10 ** val2, 0]\n )\n \n model.init_optimizer(\n opt=torch.optim.SGD,\n params=model.parameters(),\n hp_scheduler={\"lr\" : lr_scheduler},\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n nesterov=True,\n )\n \n t = time()\n for epoch in range(args.epochs):\n train = model.train_epoch(dataloaders, mode='train')\n test = model.eval_epoch(dataloaders, mode='test')\n \n res = OrderedDict([\n (\"params\", params),\n (\"epoch\", int(epoch)),\n (\"lr\", model.hp['lr']),\n (\"test_acc\", float(test['acc'])),\n (\"time\", time() - t),\n ])\n print(json.dumps(res))\n sys.stdout.flush()\n \n return float(test['acc'])\n # except:\n # return float(-1)\n \n print('cifar_opt.py: start', file=sys.stderr)\n best_args, best_score = dlib_find_max_global(run_one, bounds={\n \"break1\" : (0, args.epochs),\n \"break2\" : (0, args.epochs),\n \"val1\" : (-3, 0),\n \"val2\" : (-3, 0),\n }, num_function_calls=100, solver_epsilon=0.001)\n \n print(best_args, file=sys.stderr)\n print(best_score, file=sys.stderr)\n print('cifar_opt.py: done', file=sys.stderr)\n","sub_path":"examples/dev/cifar_opt/cifar_opt.py","file_name":"cifar_opt.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"189006505","text":"import serial\nfrom matplotlib import pyplot as plt\nfrom drawnow import drawnow, figure\n\n\ndef draw_fig():\n plt.imshow(frame, interpolation='nearest', cmap=\"Greys_r\")\n\nw, h = 320, 200\nframe = [[0 for x in range(w)] for y in range(h)]\nfigure(figsize=(4, 3))\n\nwhile True:\n ser = serial.Serial('COM6', 921600, timeout=1)\n try:\n ser.write(chr(2).encode(encoding='UTF-8'))\n print(ser.read(1))\n print(ser.read(1))\n\n ser.write(chr(3).encode(encoding='UTF-8'))\n frame_data = ser.read(2**16);\n ser.close()\n print('line read')\n\n vpix = 0\n hline = 0\n i = 0\n\n while True:\n if hline < 200:\n if vpix < 320:\n #print(i)\n i += 1\n #print(int(frame_data[i]))\n frame[hline][vpix] = int(frame_data[i])\n vpix += 1\n else:\n vpix = 0\n hline += 1\n else:\n break\n\n drawnow(draw_fig)\n\n except:\n ser.close()\n print('Exception')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"71282317","text":"\"\"\"\nDemonstrates robust grasping policies with GQ-CNNS\nAuthor: Jeff Mahler\n\"\"\"\nimport argparse\nimport logging\nimport IPython\nimport numpy as np\nimport os\nimport sys\nimport time\n\nfrom autolab_core import RigidTransform, YamlConfig\nfrom perception import RgbdImage, RgbdSensorFactory\n\nfrom gqcnn import CrossEntropyAntipodalGraspingPolicy, RgbdImageState\nfrom gqcnn import Visualizer as vis\n\nif __name__ == '__main__':\n # set up logger\n logging.getLogger().setLevel(logging.DEBUG)\n\n # parse args\n parser = argparse.ArgumentParser(description='Capture a set of test images from the Kinect2')\n parser.add_argument('--config_filename', type=str, default='cfg/examples/policy.yaml', help='path to configuration file to use')\n args = parser.parse_args()\n config_filename = args.config_filename\n\n # read config\n config = YamlConfig(config_filename)\n sensor_type = config['sensor']['type']\n sensor_frame = config['sensor']['frame']\n inpaint_rescale_factor = config['inpaint_rescale_factor']\n policy_config = config['policy']\n\n # read camera calib\n tf_filename = '%s_to_world.tf' %(sensor_frame)\n T_camera_world = RigidTransform.load(os.path.join(config['calib_dir'], sensor_frame, tf_filename))\n\n # setup sensor\n sensor = RgbdSensorFactory.sensor(sensor_type, config['sensor'])\n sensor.start()\n camera_intr = sensor.ir_intrinsics\n\n # read images\n color_im, depth_im, _ = sensor.frames()\n color_im = color_im.inpaint(rescale_factor=inpaint_rescale_factor)\n depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)\n rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)\n state = RgbdImageState(rgbd_im, camera_intr)\n\n # init policy\n policy = CrossEntropyAntipodalGraspingPolicy(policy_config)\n policy_start = time.time()\n action = policy(state)\n logging.info('Planning took %.3f sec' %(time.time() - policy_start))\n\n # vis final grasp\n if policy_config['vis']['final_grasp']:\n vis.figure(size=(10,10))\n vis.subplot(1,2,1)\n vis.imshow(rgbd_im.color)\n vis.grasp(action.grasp, scale=1.5, show_center=False, show_axis=True)\n vis.title('Planned grasp on color (Q=%.3f)' %(action.q_value))\n vis.subplot(1,2,2)\n vis.imshow(rgbd_im.depth)\n vis.grasp(action.grasp, scale=1.5, show_center=False, show_axis=True)\n vis.title('Planned grasp on depth (Q=%.3f)' %(action.q_value))\n vis.show()\n\n","sub_path":"examples/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"386446521","text":"\"\"\"Problem: https://www.hackerrank.com/challenges/python-lists/problem\"\"\"\n\nif __name__ == '__main__':\n N = int(input())\n l = []\n \n for i in range(N):\n # Read in command, split by whitespace\n s = input().split()\n # Command is first in list, remainder are args\n command = s[0]\n arguments = [int(arg) for arg in s[1:]]\n \n # Print is not an attribute, call it separately\n if command == 'print':\n print(l)\n # Otherwise add attribute and args to list\n else: \n getattr(l, command)(*arguments)\n ","sub_path":"python/basic_data_types/list_commands.py","file_name":"list_commands.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"72520660","text":"\n# This will be useful to convert letter to ints\nfrom string import ascii_lowercase, ascii_uppercase\n\n# For mean/dev scaling. Important for PCA, and good practice in general\nfrom sklearn.preprocessing import StandardScaler\n\nimport os\n\nfrom collections import OrderedDict\n\nimport pandas as pd\n\nimport json\n\ndebug = False\n\ndef Scale(df, y='TARGET'):\n\n if debug:\n print(\"Doing StandardScaler\")\n\n# print(\"\\nInside Scale(): # of entries missing in data: \\n{0}\".format(df.isnull().sum()))\n scaler = StandardScaler()\n\n skcol = []\n for c in df.filter(regex='SK_ID*').columns:\n skcol.append(c)\n\n # Don't scale target column!!\n keepTarget = False\n if y in df.columns:\n print(\"mean of target column before handling: {0}\".format(df[y].mean()))\n keepTarget = True\n skcol.append(y)\n\n targetdf = df[skcol].copy()\n\n df = df.drop(columns=skcol)\n xcol = df.columns\n\n scaleddf = scaler.fit_transform(df)\n scaleddf = pd.DataFrame(scaleddf, columns = xcol)\n scaleddf['SK_ID_CURR'] = targetdf['SK_ID_CURR'].values \n\n scaleddf = scaleddf.merge( targetdf, on='SK_ID_CURR' )\n if debug:\n print(\"\\nInside Scale(): # rows in targetdf: {0}\".format(targetdf.shape))\n print(\"\\nInside Scale(): # rows in df: {0}\".format(df.shape))\n print(\"\\nInside Scale(): # rows in scaleddf: {0}\".format(scaleddf.shape))\n\n return scaleddf\n\n\ndef makeNameTokens(df, col, name_dict):\n\n numbervalue = name_dict.values()[-1] + 1\n for i,row in df.iterrows():\n name_cidx = df.columns.get_loc(col)\n name_str = str(row[name_cidx])\n\n if name_str not in name_dict.keys():\n if debug:\n print(\"Adding pair {0}:{1} to name dictionary\".format(name_str, numbervalue))\n name_dict[name_str] = numbervalue\n numbervalue += 1\n\n return name_dict\n\n\ndef tokenizeNames(df,col,namedict):\n\n newcol = df[col].replace(namedict, regex=True)\n return newcol\n\n\ndef preprocess(df, buildDictionaries):\n\n # test and xval df's are smaller, will run out of rows for high skipranges\n if df.shape[0] == 0:\n return df\n\n if debug:\n print(\"\\npreprocess (beginning): Number of entries missing in data: \\n{0}\".format(df.isnull().sum()))\n\n # Let's start with an ultrasimple na replace\n newdf = df.fillna(0)\n if debug:\n dtypeCount_x =[newdf.iloc[:,i].apply(type).value_counts() for i in range(newdf.shape[1])]\n print(dtypeCount_x)\n\n stringcols = []\n for c in newdf.columns[newdf.dtypes=='object']:\n stringcols.append(c)\n\n\n dictpath = \"dictionaries\"\n if buildDictionaries:\n for c in stringcols:\n \n # Name of dictionary file\n dictname = dictpath+\"/\"+c+\".txt\"\n \n if os.path.exists(dictname):\n with open(dictname) as f:\n c_dict = json.load(f)\n else:\n c_dict = {}\n c_dict[0] = 0\n \n if debug:\n print(\"Building dictionary for column {0} with example values:\\n{1}\".format(c, newdf[c].head()))\n \n c_dict = makeNameTokens(newdf, c, c_dict) # append dictionaries for each column\n \n # write the dictionary to file\n with open(dictname, 'w') as file:\n file.write(json.dumps(c_dict))\n\n # Now replace string columns with tokens\n for c in stringcols:\n # Name of dictionary file\n dictname = dictpath+\"/\"+c+\".txt\"\n\n if os.path.exists(dictname):\n with open(dictname) as f:\n c_dict = json.load(f)\n\n newdf = newdf.replace({c: c_dict})\n\n if debug:\n print(\"After tokenizing\")\n dtypeCount_x =[newdf.iloc[:,i].apply(type).value_counts() for i in range(newdf.shape[1])]\n print(dtypeCount_x)\n\n # Scale variables after strings have been converted\n newdf = Scale(newdf)\n\n if debug:\n print(\"\\npreprocess (end): Number of entries missing in data: \\n{0}\".format(newdf.isnull().sum()))\n\n return newdf\n","sub_path":"HCDRpreprocessing.py","file_name":"HCDRpreprocessing.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"104832329","text":"#================================================================= \n# AE2220-II: Computational Modelling \n# TriFEMLib: A number of classes to assist in implementing\n# finite-element methods on meshes of triangles\n#================================================================= \nimport math\nimport numpy as np\nimport matplotlib.tri as tri\nimport matplotlib.pyplot as plt\n\n\n#=================================================================\n# TriOMesh class definition.\n# TriOMesh objects create and hold data for meshes of triangles for\n# circular or airfoil-shaped domains with a single hole. \n# Either structured algebraic or Delaunay triangulation can be used.\n# To use, create an empty object, set the parameters below \n# and call \"loadMesh\"\n#=================================================================\nclass TriOMesh(object):\n\n #=========================================================\n # Public data\n #=========================================================\n x1 = 0.0; # Left boundary position\n x2 = 5.0; # Right boundary position\n y1 = 0. ; # lower boundary position\n y2 = 2.0; # Upper boundary position\n dlx = 1.5; # Delamination x position\n dly = 0.5; # Delamination x position\n dlr = 0.4; # Delamination radius\n bnx = 20; # Background elements in x\n bny = 10; # Background elements in y\n permn = 1.0; # Nominal value of permittivity\n permd = 0.001; # Permittivity of delamination\n dx = (x2-x1)/float(bnx-1); # Mesh spacing in x\n dy = (y2-y1)/float(bny-1); # Mesh spacing in y\n minTriArea = (dx+dy)/10000.; # Minimum triangle size\n refine = 1; # Mesh refinement factor\n\n vertices = None; # Mesh Vertices as list\n vertArray = None; # Mesh Vertices as array\n elements = None; # Mesh Elements\n elemArray = None; # Mesh Elements as array\n nVert = None; # Number of vertArray\n nElem = None; # Number of elements\n dtri = None; # Delaunay triangulation\n mask = None; # Triangle mask\n leftVI = None; # Left boundary vertex list\n rightVI = None; # Right boundary vertex list\n lowerVI = None; # Lower boundary vertex list\n upperVI = None; # Upper boundary vertex list\n \n #=========================================================\n # Object constructor\n #=========================================================\n def __init__(self): \n created = 1;\n\n \n #=========================================================\n # Defines triangles removed from Delaunay triangulation\n #=========================================================\n def triMask(self,triangles):\n\n out = []\n self.elements = []\n for points in triangles:\n a,b,c = points\n va = self.vertices[a]\n vb = self.vertices[b]\n vc = self.vertices[c]\n x1 = float(va[0]); y1 = float(va[1]);\n x2 = float(vb[0]); y2 = float(vb[1]);\n x3 = float(vc[0]); y3 = float(vc[1]);\n Ae = 0.5*(x2*y3 + x1*y2 + x3*y1 - x3*y2 - x1*y3 - x2*y1);\n #if (Ae1.4):\n self.addDelam(5*n,10*n);\n \n self.nVert=len(self.vertices);\n self.vertArray = np.asarray(self.vertices);\n# self.smoothVert(2,0.01);\n\n # Use Delaunay triangulation and mask bnd-only elements\n self.dtri = tri.Triangulation(self.vertArray[:,0],self.vertArray[:,1]);\n self.mask = self.triMask(self.dtri.triangles)\n self.dtri.set_mask(self.mask);\n\n \n self.nElem=len(self.elements);\n self.elemArray=np.asarray(self.elements);\n\n\n #=========================================================\n # Adds a set of vertices to increase resolution near \n # the delamination\n #=========================================================\n def addDelam(self,dlnr,dlnt):\n\n rl = self.dlr*0.9\n dvr = rl/float(dlnr-2);\n\n # Add verticies\n for i in range(dlnr):\n vr = dvr*float(i+2);\n dti = 0.; \n if (i%2==0): dti = math.pi/float(dlnt);\n for j in range(dlnt):\n vt = dti + 2*math.pi*float(j)/float(dlnt);\n xv = self.dlx + vr*math.cos(vt);\n yv = self.dly + vr*math.sin(vt);\n self.vertices.append( (xv,yv) );\n\n\n #=========================================================\n # Returns the distribution of voltage across an electrode\n #=========================================================\n def getElectrodeDist(self,x,xe,le):\n return 0.5 + 0.5*math.cos((x-xe)*2.*math.pi/le);\n\n\n #=========================================================\n # Returns the local permittivity\n #=========================================================\n def getPerm(self,x,y):\n\n dx = x-self.dlx;\n dy = y-self.dly;\n dr = math.sqrt(dx*dx+dy*dy)\n if (self.dlx<1.4):\n perm=self.permn;\n elif (dr, Eric Ma, Charlie Harris\n# License: MIT\n# Project Website: https://github.com/a-r-j/graphein\n# Code Repository: https://github.com/a-r-j/graphein\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Callable, List, Optional\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom Bio.PDB.Polypeptide import three_to_one\nfrom biopandas.pdb import PandasPdb\n\nfrom graphein.protein.config import (\n DSSPConfig,\n GetContactsConfig,\n ProteinGraphConfig,\n)\nfrom graphein.protein.edges.distance import compute_distmat\nfrom graphein.protein.resi_atoms import BACKBONE_ATOMS\nfrom graphein.protein.utils import (\n filter_dataframe,\n get_protein_name_from_filename,\n three_to_one_with_mods,\n)\nfrom graphein.utils.utils import (\n annotate_edge_metadata,\n annotate_graph_metadata,\n annotate_node_metadata,\n compute_edges,\n)\n\n# from rdkit.Chem import MolFromPDBFile\n# from graphein.protein.visualisation import protein_graph_plot_3d\n\n\nlogging.basicConfig(level=\"DEBUG\")\nlog = logging.getLogger(__name__)\n\n\ndef read_pdb_to_dataframe(\n pdb_path: Optional[str] = None,\n pdb_code: Optional[str] = None,\n verbose: bool = False,\n granularity: str = \"CA\",\n) -> pd.DataFrame:\n \"\"\"\n Reads PDB file to PandasPDB object.\n\n Returns `atomic_df`, which is a dataframe enumerating all atoms and their cartesian coordinates in 3D space. Also\n contains associated metadata.\n\n :param pdb_path: path to PDB file. Defaults to None.\n :type pdb_path: str, optional\n :param pdb_code: 4-character PDB accession. Defaults to None.\n :type pdb_code: str, optional\n :param verbose: print dataframe?\n :type verbose: bool\n :param granularity: Specifies granularity of dataframe. See graphein.protein.config.ProteinGraphConfig for further\n details.\n :type granularity: str\n :returns: Pd.DataFrame containing protein structure\n :rtype: pd.DataFrame\n \"\"\"\n if pdb_code is None and pdb_path is None:\n raise NameError(\"One of pdb_code or pdb_path must be specified!\")\n\n atomic_df = (\n PandasPdb().read_pdb(pdb_path)\n if pdb_path is not None\n else PandasPdb().fetch_pdb(pdb_code)\n )\n\n # Assign Node IDs to dataframes\n atomic_df.df[\"ATOM\"][\"node_id\"] = (\n atomic_df.df[\"ATOM\"][\"chain_id\"].apply(str)\n + \":\"\n + atomic_df.df[\"ATOM\"][\"residue_name\"]\n + \":\"\n + atomic_df.df[\"ATOM\"][\"residue_number\"].apply(str)\n )\n if granularity == \"atom\":\n atomic_df.df[\"ATOM\"][\"node_id\"] = (\n atomic_df.df[\"ATOM\"][\"node_id\"]\n + \":\"\n + atomic_df.df[\"ATOM\"][\"atom_name\"]\n )\n if verbose:\n print(atomic_df)\n return atomic_df\n\n\ndef deprotonate_structure(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Remove protons from PDB dataframe.\n\n :param df: Atomic dataframe.\n :type df: pd.DataFrame\n :returns: Atomic dataframe with all atom_name == \"H\" removed.\n :rtype: pd.DataFrame\n \"\"\"\n log.debug(\n \"Deprotonating protein. This removes H atoms from the pdb_df dataframe\"\n )\n # return df.loc[df[\"atom_name\"] != \"H\"].reset_index(drop=True)\n return filter_dataframe(\n df, by_column=\"atom_name\", list_of_values=[\"H\"], boolean=False\n )\n\n\ndef convert_structure_to_centroids(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Overwrite existing (x, y, z) coordinates with centroids of the amino acids.\n\n :param df: Pandas Dataframe config protein structure to convert into a dataframe of centroid positions\n :type df: pd.DataFrame\n :return: pd.DataFrame with atoms/residues positiions converted into centroid positions\n :rtype: pd.DataFrame\n \"\"\"\n log.debug(\n \"Converting dataframe to centroids. This averages XYZ coords of the atoms in a residue\"\n )\n\n centroids = calculate_centroid_positions(df)\n df = df.loc[df[\"atom_name\"] == \"CA\"].reset_index(drop=True)\n df[\"x_coord\"] = centroids[\"x_coord\"]\n df[\"y_coord\"] = centroids[\"y_coord\"]\n df[\"z_coord\"] = centroids[\"z_coord\"]\n\n return df\n\n\ndef subset_structure_to_atom_type(\n df: pd.DataFrame, granularity: str\n) -> pd.DataFrame:\n \"\"\"\n Return a subset of atomic dataframe that contains only certain atom names.\n\n :param df: Protein Structure dataframe to subset\n :type df: pd.DataFrame\n :returns: Subsetted protein structure dataframe\n :rtype: pd.DataFrame\n \"\"\"\n return filter_dataframe(\n df, by_column=\"atom_name\", list_of_values=[granularity], boolean=True\n )\n # return df.loc[df[\"atom_name\"] == granularity]\n\n\ndef remove_insertions(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n This function removes insertions from PDB dataframes\n\n :param df: Protein Structure dataframe to remove insertions from\n :type df: pd.DataFrame\n :return: Protein structure dataframe with insertions removed\n :rtype: pd.DataFrame\n \"\"\"\n \"\"\"Remove insertions from structure.\"\"\"\n # Remove alt_loc residues\n # Todo log.debug(f\"Detected X insertions\")\n # return df.loc[df[\"alt_loc\"].isin([\"\", \"A\"])]\n return filter_dataframe(\n df, by_column=\"alt_loc\", list_of_values=[\"\", \"A\"], boolean=True\n )\n\n\ndef filter_hetatms(\n df: pd.DataFrame, keep_hets: List[str]\n) -> List[pd.DataFrame]:\n \"\"\"Return hetatms of interest.\n\n :param df: Protein Structure dataframe to filter hetatoms from.\n :type df: pd.DataFrame\n :param keep_hets: List of hetero atom names to keep\n :returns: Protein structure dataframe with heteroatoms removed\n :rtype pd.DataFrame\n \"\"\"\n hetatms_to_keep = []\n for hetatm in keep_hets:\n hetatms_to_keep.append(df.loc[df[\"residue_name\"] == hetatm])\n return hetatms_to_keep\n\n\ndef compute_rgroup_dataframe(pdb_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Return the atoms that are in R-groups and not the backbone chain.\n\n :param pdb_df: DataFrame to compute R group dataframe from\n :type pdb_df: pd.DataFrame\n :returns: Dataframe containing R-groups only (backbone atoms removed)\n :rtype: pd.DataFrame\n \"\"\"\n rgroup_df = filter_dataframe(pdb_df, \"atom_name\", BACKBONE_ATOMS, False)\n return rgroup_df\n\n\ndef process_dataframe(\n protein_df: pd.DataFrame,\n atom_df_processing_funcs: Optional[List[Callable]] = None,\n hetatom_df_processing_funcs: Optional[List[Callable]] = None,\n granularity: str = \"centroids\",\n chain_selection: str = \"all\",\n insertions: bool = False,\n deprotonate: bool = True,\n keep_hets: List[str] = [],\n verbose: bool = False,\n) -> pd.DataFrame:\n \"\"\"\n Process ATOM and HETATM dataframes to produce singular dataframe used for graph construction\n\n :param protein_df: Dataframe to process.\n Should be the object returned from `read_pdb_to_dataframe`.\n :type protein_df: pd.DataFrame\n :param atom_df_processing_funcs: List of functions to process dataframe. These must take in a dataframe and return a\n dataframe. Defaults to None.\n :type atom_df_processing_funcs: List[Callable], optional\n :param hetatom_df_processing_funcs: List of functions to process dataframe. These must take in a dataframe and return a dataframe\n :type hetatom_df_processing_funcs: List[Callable], optional\n :param granularity: The level of granualrity for the graph.\n This determines the node definition.\n Acceptable values include:\n - \"centroids\"\n - \"atoms\"\n - any of the atom_names in the PDB file (e.g. \"CA\", \"CB\", \"OG\", etc.)\n :type granularity: str\n :param insertions: Whether or not to keep insertions.\n :param insertions: bool\n :param deprotonate: Whether or not to remove hydrogen atoms (i.e. deprotonation).\n :type deprotonate: bool\n :param keep_hets: Hetatoms to keep. Defaults to an empty list.\n To keep a hetatom, pass it inside a list of hetatom names to keep.\n :type keep_hets: List[str]\n :param verbose: Verbosity level.\n :type verbose: bool\n :param chain_selection: Which protein chain to select. Defaults to \"all\". Eg can use \"ACF\"\n to select 3 chains (A, C & F :)\n :type chain_selection: str\n :return: A protein dataframe that can be consumed by\n other graph construction functions.\n :rtype: pd.DataFrame\n \"\"\"\n # TODO: Need to properly define what \"granularity\" is supposed to do.\n atoms = protein_df.df[\"ATOM\"]\n hetatms = protein_df.df[\"HETATM\"]\n\n # This block enables processing via a list of supplied functions operating on the atom and hetatom dataframes\n # If these are provided, the dataframe returned will be computed only from these and the default workflow\n # below this block will not execute.\n if atom_df_processing_funcs is not None:\n for func in atom_df_processing_funcs:\n atoms = func(atoms)\n if hetatom_df_processing_funcs is None:\n return atoms\n\n if hetatom_df_processing_funcs is not None:\n for func in hetatom_df_processing_funcs:\n hetatms = func(hetatms)\n return pd.concat([atoms, hetatms])\n\n # Deprotonate structure by removing H atoms\n if deprotonate:\n atoms = deprotonate_structure(atoms)\n\n # Restrict DF to desired granularity\n if granularity == \"centroids\":\n atoms = convert_structure_to_centroids(atoms)\n # elif granularity == \"atom\":\n # atoms = atoms\n else:\n atoms = subset_structure_to_atom_type(atoms, granularity)\n\n protein_df = atoms\n\n if len(keep_hets) > 0:\n hetatms_to_keep = filter_hetatms(atoms, keep_hets)\n protein_df = pd.concat([atoms, hetatms_to_keep])\n\n # Remove alt_loc residues\n if not insertions:\n protein_df = remove_insertions(protein_df)\n\n # perform chain selection\n protein_df = select_chains(\n protein_df, chain_selection=chain_selection, verbose=verbose\n )\n\n \"\"\"\n # Name nodes\n protein_df[\"node_id\"] = (\n protein_df[\"chain_id\"].apply(str)\n + \":\"\n + protein_df[\"residue_name\"]\n + \":\"\n + protein_df[\"residue_number\"].apply(str)\n )\n if granularity == \"atom\":\n protein_df[\"node_id\"] = (\n protein_df[\"node_id\"] + \":\" + protein_df[\"atom_name\"]\n )\n \"\"\"\n\n log.debug(f\"Detected {len(protein_df)} total nodes\")\n\n return protein_df\n\n\ndef assign_node_id_to_dataframe(\n protein_df: pd.DataFrame, granularity: str\n) -> pd.DataFrame:\n \"\"\"\n Assigns the node ID back to the pdb_df dataframe\n\n :param protein_df: Structure Dataframe\n :type protein_df: pd.DataFrame\n :param granularity: Granularity of graph. Atom-level, resiidue (e.g. CA) or centroids\n :type granularity: str\n :return: Returns dataframe with added node_ids\n :rtype: pd.DataFrame\n \"\"\"\n protein_df[\"node_id\"] = (\n protein_df[\"chain_id\"].apply(str)\n + \":\"\n + protein_df[\"residue_name\"]\n + \":\"\n + protein_df[\"residue_number\"].apply(str)\n )\n if granularity == \"atom\":\n protein_df[\"node_id\"] = (\n protein_df[\"node_id\"] + \":\" + protein_df[\"atom_name\"]\n )\n\n\ndef select_chains(\n protein_df: pd.DataFrame, chain_selection: str, verbose: bool = False\n) -> pd.DataFrame:\n \"\"\"\n Extracts relevant chains from protein_df\n\n :param protein_df: pandas dataframe of PDB subsetted to relevant atoms (CA, CB)\n :type protein_df: pd.DataFrame\n :param chain_selection: Specifies chains that should be extracted from the larger complexed structure\n :type chain_selection: str\n :param verbose: Print dataframe\n :type verbose: bool\n :return Protein structure dataframe containing only entries in the chain selection\n :rtype: pd.DataFrame\n \"\"\"\n if chain_selection != \"all\":\n protein_df = filter_dataframe(\n protein_df,\n by_column=\"chain_id\",\n list_of_values=list(chain_selection),\n boolean=True,\n )\n\n return protein_df\n\n\ndef initialise_graph_with_metadata(\n protein_df: pd.DataFrame,\n raw_pdb_df: pd.DataFrame,\n pdb_id: str,\n granularity: str,\n) -> nx.Graph:\n \"\"\"\n Initialises the nx Graph object with initial metadata\n\n :param protein_df: Processed Dataframe of protein structure\n :type protein_df: pd.DataFrame\n :param raw_pdb_df: Unprocessed dataframe of protein structure for comparison and traceability downstream\n :type raw_pdb_df: pd.DataFrame\n :param pdb_id: PDB Accession code\n :type pdb_id: str\n :param granularity: Granularity of the graph (eg \"atom\", \"CA\", \"CB\" etc or \"centroid\")\n :type granularity: str\n :return: Returns initial protein structure graph with metadata\n :rtype: nx.Graph\n \"\"\"\n G = nx.Graph(\n name=pdb_id,\n pdb_id=pdb_id,\n chain_ids=list(protein_df[\"chain_id\"].unique()),\n pdb_df=protein_df,\n raw_pdb_df=raw_pdb_df,\n rgroup_df=compute_rgroup_dataframe(raw_pdb_df),\n coords=np.asarray(protein_df[[\"x_coord\", \"y_coord\", \"z_coord\"]]),\n )\n\n # Create graph and assign intrinsic graph-level metadata\n G.graph[\"node_type\"] = granularity\n\n # Add Sequences to graph metadata\n for c in G.graph[\"chain_ids\"]:\n G.graph[f\"sequence_{c}\"] = (\n protein_df.loc[protein_df[\"chain_id\"] == c][\"residue_name\"]\n .apply(three_to_one_with_mods)\n .str.cat()\n )\n return G\n\n\ndef add_nodes_to_graph(\n G: nx.Graph,\n protein_df: Optional[pd.DataFrame] = None,\n verbose: bool = False,\n) -> nx.Graph:\n \"\"\"Add nodes into protein graph.\n\n :param G: nx.Graph with metadata to populate with nodes\n :type G: nx.Graph\n :protein_df: DataFrame of protein structure containing nodes & initial node metadata to add to the graph\n :type protein_df: pd.DataFrame, optional\n :param verbose: Controls verbosity of this step\n :type verbose: bool\n :returns: nx.Graph with nodes added\n :rtype: nx.Graph\n \"\"\"\n\n # If no protein dataframe is supplied, use the one stored in the Graph object\n if protein_df is None:\n protein_df = G.graph[\"pdb_df\"]\n # Assign intrinsic node attributes\n chain_id = protein_df[\"chain_id\"].apply(str)\n residue_name = protein_df[\"residue_name\"]\n residue_number = protein_df[\"residue_number\"] # .apply(str)\n coords = np.asarray(protein_df[[\"x_coord\", \"y_coord\", \"z_coord\"]])\n b_factor = protein_df[\"b_factor\"]\n atom_type = protein_df[\"atom_name\"]\n nodes = protein_df[\"node_id\"]\n element_symbol = protein_df[\"element_symbol\"]\n G.add_nodes_from(nodes)\n\n # Set intrinsic node attributes\n nx.set_node_attributes(G, dict(zip(nodes, chain_id)), \"chain_id\")\n nx.set_node_attributes(G, dict(zip(nodes, residue_name)), \"residue_name\")\n nx.set_node_attributes(\n G, dict(zip(nodes, residue_number)), \"residue_number\"\n )\n nx.set_node_attributes(G, dict(zip(nodes, atom_type)), \"atom_type\")\n nx.set_node_attributes(\n G, dict(zip(nodes, element_symbol)), \"element_symbol\"\n )\n nx.set_node_attributes(G, dict(zip(nodes, coords)), \"coords\")\n nx.set_node_attributes(G, dict(zip(nodes, b_factor)), \"b_factor\")\n\n # TODO: include charge, line_idx for traceability?\n if verbose:\n print(nx.info(G))\n print(G.nodes())\n\n return G\n\n\ndef calculate_centroid_positions(\n atoms: pd.DataFrame, verbose: bool = False\n) -> pd.DataFrame:\n \"\"\"\n Calculates position of sidechain centroids\n\n :param atoms: ATOM df of protein structure\n :type atoms: pd.DataFrame\n :param verbose: bool controlling verbosity\n :type verbose: bool\n :return: centroids (df)\n :rtype: pd.DataFrame\n \"\"\"\n centroids = (\n atoms.groupby(\"residue_number\")\n .mean()[[\"x_coord\", \"y_coord\", \"z_coord\"]]\n .reset_index()\n )\n if verbose:\n print(f\"Calculated {len(centroids)} centroid nodes\")\n log.debug(f\"Calculated {len(centroids)} centroid nodes\")\n return centroids\n\n\ndef compute_edges(\n G: nx.Graph,\n funcs: List[Callable],\n get_contacts_config: Optional[GetContactsConfig] = None,\n) -> nx.Graph:\n \"\"\"\n Computes edges for the protein structure graph. Will compute an pairwise distance matrix between nodes which is\n added to the graph metadata to facilitate some edge computations.\n\n :param G: nx.Graph with nodes to add edges to\n :type G: nx.Graph\n :param funcs: List of edge construction functions\n :type funcs: List[Callable]\n :param get_contacts_config: Config object for getcontacts if intramolecular edges are being used\n :type get_contacts_config: graphein.protein.config.GetContactsConfig\n :return: Graph with added edges\n :rtype: nx.Graph\n \"\"\"\n # This control flow prevents unnecessary computation of the distance matrices\n if \"config\" in G.graph:\n if G.graph[\"config\"].granularity == \"atom\":\n G.graph[\"atomic_dist_mat\"] = compute_distmat(G.graph[\"raw_pdb_df\"])\n else:\n G.graph[\"dist_mat\"] = compute_distmat(G.graph[\"pdb_df\"])\n\n for func in funcs:\n func(G)\n\n return G\n\n\ndef construct_graph(\n config: Optional[ProteinGraphConfig] = None,\n pdb_path: Optional[str] = None,\n pdb_code: Optional[str] = None,\n chain_selection: str = \"all\",\n df_processing_funcs: Optional[List[Callable]] = None,\n edge_construction_funcs: Optional[List[Callable]] = None,\n edge_annotation_funcs: Optional[List[Callable]] = None,\n node_annotation_funcs: Optional[List[Callable]] = None,\n graph_annotation_funcs: Optional[List[Callable]] = None,\n) -> nx.Graph:\n \"\"\"\n Constructs protein structure graph from a pdb_code or pdb_path. Users can provide a ProteinGraphConfig object.\n\n However, config parameters can be overridden by passing arguments directly to the function.\n\n :param config: ProteinGraphConfig object. If None, defaults to config in graphein.protein.config\n :type config: graphein.protein.config.ProteinGraphConfig, optional\n :param pdb_path: Path to pdb_file to build graph from\n :type pdb_path: str, optional\n :param pdb_code: 4-character PDB accession pdb_code to build graph from\n :type pdb_code: str, optional\n :param chain_selection: String of polypeptide chains to include in graph. E.g \"ABDF\" or \"all\"\n :type chain_selection: str, optional\n :param df_processing_funcs: List of dataframe processing functions\n :type df_processing_funcs: List[Callable], optional\n :param edge_construction_funcs: List of edge construction functions\n :type edge_construction_funcs: List[Callable], optional\n :param edge_annotation_funcs: List of edge annotation functions\n :type edge_annotation_funcs: List[Callable], optional\n :param node_annotation_funcs: List of node annotation functions\n :type node_annotation_funcs: List[Callable], optional\n :param graph_annotation_funcs: List of graph annotation function\n :type graph_annotation_funcs: List[Callable]\n :return: Protein Structure Graph\n :type: nx.Graph\n \"\"\"\n\n # If no config is provided, use default\n if config is None:\n config = ProteinGraphConfig()\n\n # Get name from pdb_file is no pdb_code is provided\n if pdb_path and (pdb_code is None):\n pdb_code = get_protein_name_from_filename(pdb_path)\n\n # If config params are provided, overwrite them\n config.protein_df_processing_functions = (\n df_processing_funcs\n if config.protein_df_processing_functions is None\n else config.protein_df_processing_functions\n )\n config.edge_construction_functions = (\n edge_construction_funcs\n if config.edge_construction_functions is None\n else config.edge_construction_functions\n )\n config.node_metadata_functions = (\n node_annotation_funcs\n if config.node_metadata_functions is None\n else config.node_metadata_functions\n )\n config.graph_metadata_functions = (\n graph_annotation_funcs\n if config.graph_metadata_functions is None\n else config.graph_metadata_functions\n )\n config.edge_metadata_functions = (\n edge_annotation_funcs\n if config.edge_metadata_functions is None\n else config.edge_metadata_functions\n )\n\n raw_df = read_pdb_to_dataframe(\n pdb_path,\n pdb_code,\n verbose=config.verbose,\n granularity=config.granularity,\n )\n protein_df = process_dataframe(\n raw_df, chain_selection=chain_selection, granularity=config.granularity\n )\n\n # Initialise graph with metadata\n g = initialise_graph_with_metadata(\n protein_df=protein_df,\n raw_pdb_df=raw_df.df[\"ATOM\"],\n pdb_id=pdb_code,\n granularity=config.granularity,\n )\n # Add nodes to graph\n g = add_nodes_to_graph(g)\n\n # Add config to graph\n g.graph[\"config\"] = config\n\n # Annotate additional node metadata\n if config.node_metadata_functions is not None:\n g = annotate_node_metadata(g, config.node_metadata_functions)\n\n # Compute graph edges\n g = compute_edges(\n g,\n funcs=config.edge_construction_functions,\n get_contacts_config=None,\n )\n\n # Annotate additional graph metadata\n if config.graph_metadata_functions is not None:\n g = annotate_graph_metadata(g, config.graph_metadata_functions)\n\n # Annotate additional edge metadata\n if config.edge_metadata_functions is not None:\n g = annotate_edge_metadata(g, config.edge_metadata_functions)\n\n return g\n\n\nif __name__ == \"__main__\":\n from functools import partial\n\n from graphein.protein.edges.distance import add_k_nn_edges\n from graphein.protein.features.sequence.sequence import molecular_weight\n\n configs = {\n \"granularity\": \"CA\",\n \"keep_hets\": False,\n \"insertions\": False,\n \"verbose\": False,\n \"get_contacts_config\": GetContactsConfig(),\n \"dssp_config\": DSSPConfig(),\n \"graph_metadata_functions\": [molecular_weight],\n }\n config = ProteinGraphConfig(**configs)\n config.edge_construction_functions = [\n partial(add_k_nn_edges, k=3, long_interaction_threshold=0)\n ]\n # Test High-level API\n g = construct_graph(\n config=config,\n pdb_path=\"../examples/pdbs/3eiy.pdb\",\n )\n\n \"\"\"\n # Test Low-level API\n raw_df = read_pdb_to_dataframe(\n pdb_path=\"../../examples/pdbs/3eiy.pdb\",\n verbose=config.verbose,\n )\n\n processed_pdb_df = process_dataframe(\n protein_df=raw_df,\n atom_df_processing_funcs=None,\n hetatom_df_processing_funcs=None,\n granularity=\"centroids\",\n chain_selection=\"all\",\n insertions=False,\n deprotonate=True,\n keep_hets=[],\n verbose=False,\n )\n\n g = initialise_graph_with_metadata(\n protein_df=processed_pdb_df,\n raw_pdb_df=raw_df.df[\"ATOM\"],\n pdb_id=\"3eiy\",\n granularity=config.granularity,\n )\n\n g = add_nodes_to_graph(g)\n\n g = annotate_node_metadata(g, [expasy_protein_scale, meiler_embedding])\n g = compute_edges(\n g,\n config.get_contacts_config,\n [\n add_delaunay_triangulation,\n peptide_bonds,\n salt_bridge,\n add_hydrogen_bond_interactions,\n ],\n )\n\n g = annotate_graph_metadata(\n g,\n [\n esm_sequence_embedding,\n biovec_sequence_embedding,\n molecular_weight,\n ],\n )\n\n print(nx.info(g))\n colors = nx.get_edge_attributes(g, \"color\").values()\n \"\"\"\n \"\"\"\n nx.draw(\n g,\n # pos = nx.circular_layout(g),\n edge_color=colors,\n with_labels=True,\n )\n plt.show()\n \"\"\"\n","sub_path":"graphein/protein/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":23613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"266963061","text":"from django.conf.urls import include, url\nfrom django.contrib.auth.decorators import login_required, permission_required\n\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', AlbumListView.as_view(), name='album_list_view'),\n url(r'^photos/$', PhotoListView.as_view(), name='photo_list_view'),\n url(r'^(?P[\\-\\d\\w]+)/$', AlbumDetailView.as_view(), name='album_detail_view'),\n # url(r'^photo/(?P[\\-\\d\\w]+)/(?P[\\-\\d\\w]+)$', PhotoDetailView.as_view(), name='photo_detail_view'),\n url(r'''^(?:(?P[^/]+))'''\n r'''(?:\\/(?P[^/]+))?$''',\n PhotoDetailView.as_view(), name='photo_detail_view'),\n url(r'^photo/(?P[\\-\\d\\w]+)$', PhotoDetailView.as_view(), name='photo_detail_view'),\n url(r'^photo/create/$', PhotoCreateView.as_view(), name='photo_create_view'),\n # url(r'^photo/create/$', AjaxPhotoUploadView.as_view(), name='photo_upload_view'),\n url(r'^photo/edit/(?P[\\-\\d\\w]+)$', PhotoUpdateView.as_view(), name='photo_edit_view'),\n url(r'^photo/delete/(?P[\\-\\d\\w]+)$', PhotoDeleteView.as_view(), name='photo_delete_view'),\n url(r'^album/create/$', AlbumCreateView.as_view(), name='album_create_view'),\n url(r'^album/edit/(?P[\\-\\d\\w]+)$', AlbumUpdateView.as_view(), name='album_edit_view'),\n url(r'^album/delete/(?P[\\-\\d\\w]+)$', AlbumDeleteView.as_view(), name='album_delete_view'),\n]\n\n ","sub_path":"gallery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"41489403","text":"from django.core import mail\nfrom selenium.webdriver.common.keys import Keys\nimport re\n\nfrom functional_tests.base import FunctionalTest\n\nTEST_EMAIL = '1091889012@qq.com'\nSUBJECT = 'Your login link for Superlists'\n\n\nclass LoginTest(FunctionalTest):\n def test_can_get_email_link_to_log_in(self):\n # 伊迪丝访问这个很棒的超级列表网站\n # 第一次注意到导航栏有登录区域\n # 看到要求输入电子邮件地址,她便输入了\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_name('email').send_keys(TEST_EMAIL)\n self.browser.find_element_by_name('email').send_keys(Keys.ENTER)\n\n # 出现一条消息,告诉她邮件已经发出\n self.wait_for(lambda: self.assertIn(\n 'Check your email',\n self.browser.find_element_by_tag_name('body').text\n ))\n\n # 她查看邮件,看到一条消息\n email = mail.outbox[0]\n self.assertIn(TEST_EMAIL, email.to)\n self.assertEqual(email.subject, SUBJECT)\n\n # 邮件中有个URL链接\n self.assertIn('Use this link to log in', email.body)\n url_search = re.search(r'http://.+/.+$', email.body)\n if not url_search:\n self.fail('Could not find url in email body:\\n'+email.body)\n url = url_search.group(0)\n self.assertIn(self.live_server_url, url)\n\n # 她点击了链接\n self.browser.get(url)\n\n # 她登录了\n self.wait_for(\n lambda: self.browser.find_element_by_link_text('Log out')\n )\n navbar=self.browser.find_element_by_css_selector('.navbar')\n self.assertIn(TEST_EMAIL, navbar.text)\n\n # 现在她要退出\n self.wait_for(\n lambda :self.browser.find_element_by_link_text('Log out').click()\n )\n # 她退出了\n self.wait_for(\n lambda :self.browser.find_element_by_name('email')\n )\n navbar=self.browser.find_element_by_css_selector('.navbar')\n self.assertNotIn(TEST_EMAIL,navbar.text)\n","sub_path":"functional_tests/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"398092929","text":"import sys\nimport os\n\n# Using https://stackoverflow.com/questions/51520/how-to-get-an-absolute-file-path-in-python\nutils_path = os.path.abspath(\"utils\")\n\n# Using https://askubuntu.com/questions/470982/how-to-add-a-python-module-to-syspath/471168\nsys.path.insert(0, utils_path)\n\nimport getROICOG as roicog\nfrom atlasUtility import queryAtlas\nimport nibabel as nib\nimport numpy as np\n\n\"\"\"\nThe folowing script goes through the AAL atlas index locations and finds\nthe COG for each of the region and check if it lies inside the region or not.\n\"\"\"\n\nAALROIindex = [\"2001\", \"2002\", \"2101\", \"2102\", \"2111\", \"2112\", \"2201\", \"2202\", \"2211\", \"2212\", \"2301\", \"2302\", \"2311\", \"2312\", \"2321\", \"2322\", \"2331\", \"2332\", \"2401\", \"2402\", \"2501\", \"2502\", \"2601\", \"2602\",\n \"2611\", \"2612\", \"2701\", \"2702\", \"3001\", \"3002\", \"4001\", \"4002\", \"4011\", \"4012\", \"4021\", \"4022\", \"4101\", \"4102\", \"4111\", \"4112\", \"4201\", \"4202\", \"5001\", \"5002\", \"5011\", \"5012\", \"5021\", \"5022\", \"5101\",\n \"5102\", \"5201\", \"5202\", \"5301\", \"5302\", \"5401\", \"5402\", \"6001\", \"6002\", \"6101\", \"6102\", \"6201\", \"6202\", \"6211\", \"6212\", \"6221\", \"6222\", \"6301\", \"6302\", \"6401\", \"6402\", \"7001\", \"7002\", \"7011\", \"7012\",\n \"7021\", \"7022\", \"7101\", \"7102\", \"8101\", \"8102\", \"8111\", \"8112\", \"8121\", \"8122\", \"8201\", \"8202\", \"8211\", \"8212\", \"8301\", \"8302\", \"9001\", \"9002\", \"9011\", \"9012\", \"9021\", \"9022\", \"9031\", \"9032\", \"9041\",\n \"9042\", \"9051\", \"9052\", \"9061\", \"9062\", \"9071\", \"9072\", \"9081\", \"9082\", \"9100\", \"9110\", \"9120\", \"9130\", \"9140\", \"9150\", \"9160\", \"9170\"]\n\n\natlas = '/home/varun/Projects/fmri/Autism-survey-connectivity-links-analysis/aalAtlas/AAL.nii'\n\nbrain = nib.load(atlas).get_data()\n\nobj = roicog.getROICOG(atlas)\n\n\ndef getNearestVoxel(brain_data, roi, COG):\n roi_mask = np.zeros(brain_data.shape)\n roi_mask[np.where(brain_data == roi)] = 1\n roiCoord = np.where(roi_mask == 1)\n\n peak_list = []\n dist = float(np.inf)\n for [x, y, z] in zip(roiCoord[0], roiCoord[1], roiCoord[2]):\n peak = [x, y, z]\n current_dist = abs(x - COG[0]) + abs(y - COG[1]) + abs(z - COG[2])\n if current_dist < dist:\n if len(peak_list) != 0:\n peak_list = []\n peak_list.append(peak)\n dist = current_dist\n elif current_dist == dist:\n peak_list.append(peak)\n dist = current_dist\n\n # The above 'For loop' might result in miltiple peak coordinates(peak list)\n # having same distance from COG Check which of the peak list has least\n # x coordinate i.e closest to midline (My heuristic) to select one peak\n\n x = float(np.inf)\n res = []\n for coordinates in peak_list:\n current_x = abs(coordinates[0])\n if current_x < x:\n res = []\n res.append(coordinates)\n elif current_x == x:\n res.append(coordinates)\n else:\n pass\n\n # Find the MNI coordinates of the peak coordinates\n MNI = []\n for res_peak in res:\n MNI.append(queryAtlas.XYZ2MNI2mm(res_peak))\n\n return MNI\n\n\nfor roi in AALROIindex:\n roi = int(roi)\n COG = obj.getCOG(roi)\n COG = [int(COG[0]), int(COG[1]), int(COG[2])]\n print('Index %s : %s' % (roi, COG))\n XYZ = queryAtlas.MNI2XYZ2mm(COG)\n if brain[XYZ[0], XYZ[1], XYZ[2]] != roi:\n print('COG Lies outside for ROI Index %s' % roi)\n newCOG = getNearestVoxel(brain, roi, XYZ)[0]\n print('Index %s : %s (Modified)' % (roi, newCOG))\n","sub_path":"utils/getAALCOG.py","file_name":"getAALCOG.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"555274808","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import JointState\nimport copy\nimport numpy as np\n\nclass JV_Estimator:\n def __init__(self):\n rospy.init_node('jv_estimator')\n # Subscriber to /joint_states\n rospy.Subscriber('joint_states', JointState, self.callbackJS, queue_size=1)\n # Publisher of /estimated_joint_states\n self.js_pub = rospy.Publisher('estimated_joint_states', JointState, queue_size=10)\n # Buffer storing previous joint positions\n self.previous_j_pos = None\n # Buffer storing previous joint velocities\n self.previous_j_vel = 0\n\n rospy.spin()\n\n def callbackJS(self,data):\n if self.previous_j_pos is None:\n self.previous_j_pos = copy.deepcopy(data.position)\n self.js_pub.publish(data)\n else:\n velocities = self.filtering_derivative(self.previous_j_pos,data.position,self.previous_j_vel,0.008,0.01)\n data.velocity = copy.deepcopy(velocities)\n self.js_pub.publish(data)\n self.previous_j_pos = copy.deepcopy(data.position)\n\n def filtering_derivative(self, q_t0, q_t1, previous_dq_dt,T_signal, T_filter):\n \"\"\"Short summary.\n\n An higher time contant for the filter gives a smoothter derivative with higher delay.\n\n Args:\n q_t0 (type): Description of parameter `q_t0`.\n q_t1 (type): Description of parameter `q_t1`.\n T_signal (type): Description of parameter `T_signal`.\n T_filter (type): Time constant of filter.\n\n Returns:\n type: Description of returned object.\n\n \"\"\"\n a = T_signal/(T_filter + T_signal)\n dq = np.subtract(np.array(q_t1),np.array(q_t0))\n dt = np.full(len(q_t0),T_signal)\n dq_dt = np.divide(dq,dt)\n\n filtered_dq_dt = (1-a)*np.array(previous_dq_dt) + a*dq_dt\n\n return filtered_dq_dt\n\n\n\n\nif __name__ == '__main__':\n try:\n jv_estimator = JV_Estimator()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"ur_robot_server/scripts/joint_velocity_estimator.py","file_name":"joint_velocity_estimator.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"561959935","text":"from uvicorn.importer import import_from_string\nfrom uvicorn.main import Server\nfrom uvicorn.config import Config\nfrom uvicorn.supervisors import Multiprocess, StatReload\n\nfrom aioli.log import setup_logging\n\n\nclass UvicornServer(Server):\n def __init__(self, app_path, *args, **kwargs):\n super(UvicornServer, self).__init__(*args, **kwargs)\n self.app_path = app_path\n\n def run(self, *args, **kwargs):\n app = import_from_string(self.app_path)\n setup_logging(level=self.config.log_level.upper())\n app.load_units()\n\n super().run(*args, **kwargs)\n\n\ndef uvicorn_server(app_path, **kwargs):\n config = Config(app_path, **kwargs)\n setup_logging(level=config.log_level.upper())\n server = UvicornServer(app_path, config=config)\n\n if config.reload and not isinstance(app_path, str):\n config.logger_instance.warn(\n \"auto-reload only works when app is passed as an import string.\"\n )\n\n if isinstance(app_path, str) and (config.debug or config.reload):\n socket = config.bind_socket()\n supervisor = StatReload(config)\n supervisor.run(server.run, sockets=[socket])\n elif config.workers > 1:\n socket = config.bind_socket()\n supervisor = Multiprocess(config)\n supervisor.run(server.run, sockets=[socket])\n else:\n server.run()\n","sub_path":"aioli/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"535364578","text":"import nltk\nimport math \nimport operator\nimport numpy as np\nfrom pickle import dump, load\nfrom bs4 import BeautifulSoup\nfrom nltk.corpus import cess_esp\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import RegexpTokenizer,sent_tokenize\n\n'''\n+-----------------------------------------------------------------------+\n| |\n| This program obtains the context of one word |\n| and found the words similarity it. Using the |\n| Syntagmatic Relations of the words and |\n| Conditional Entropy |\n| |\n+-----------------------------------------------------------------------+\n'''\n\ndef cleaningText(text):\n soup = BeautifulSoup(text, 'lxml') \n text = soup.get_text()\n return text\n\ndef openText():\n f = open('../../Corpus/e961024.htm', encoding = 'utf-8')\n text = f.read()\n f.close()\n return text\n\ndef normalization(Sentences):\n sent=[]\n for s in Sentences:\n aux=tokens(s)\n sent.append(aux)\n return sent \n\ndef tokens(text):\n stop=stopwords.words('spanish')\n words = nltk.word_tokenize(text)\n words=[w.lower() for w in words if w.isalpha()]\n words=[w for w in words if w.lower() not in stop]\n words= \" \".join(words)\n return words\n\ndef tagSentences(Sentences,combined_tagger):\n Stag=[]\n for s in Sentences:\n aux=s.split(' ')\n s_tagged=combined_tagger.tag(aux)\n aux=cleanTagger(s_tagged)\n tag=' '.join(aux)\n Stag.append(tag)\n return Stag\n\ndef lemaSentences(Sentences,lemmas):\n sentencesL=[]\n for s in Sentences:\n aux=s.split(' ')\n newSentences=[]\n for word in aux:\n if word in lemmas:\n newSentences.append(lemmas[word])\n else:\n newSentences.append(word)\n sentencesL.append(' '.join(newSentences))\n return sentencesL\n\ndef getSentences(text):\n text=cleaningText(text)\n sentences=sent_tokenize(text)\n \n return sentences\n\ndef getVocabulary(text):\n stop=stopwords.words('spanish')\n t=cleaningText(text)\n t=nltk.word_tokenize(t)\n t=[w.lower() for w in t if w.isalpha()]\n t=[w for w in t if w.lower() not in stop]\n return t\n\ndef getProbability(word,sentences):\n suma=0\n for s in sentences:\n if(bool(s.count(word))):\n suma=suma+1\n return (suma+0.5) / ( len(sentences) + 1 )\n\ndef getProbability2(word1,word2,sentences):\n suma=0\n for s in sentences:\n if(bool(s.count(word1) and bool(s.count(word2)))):\n suma=suma+1\n return (suma + 0.25) / ( len(sentences) + 1 )\n\n'''\n-----------------------------------------------------------\n| | P1 p(w2=0) | P2 p(w2=1) |\n-----------------------------------------------------------\n| P3 p(W1=0) | P4 p(w1=0|w2=0) | P5 p(w1=0|w2=1) |\n-----------------------------------------------------------\n| P6 p(W1=1) | P7 p(w1=1|w2=0) | P8 p(w1=1|w2=1) |\n-----------------------------------------------------------\n\n'''\ndef getTableProbability( pWord1 , pWord2 , pW1AndpW2 ):\n p5 = pWord2 - pW1AndpW2\n p1 = 1 - pWord2\n p7 = pWord1 - pW1AndpW2\n p4 = p1 - p7\n p3 = p4 + p5\n table = [ p1 , pWord2 , p3 , p4 , p5 , pWord1 , p7 , pW1AndpW2 ]\n return table \n\ndef getEntropy( table ):\n x1 = table[3] * math.log2( table[3] / table[0] ) + table[6] * math.log2( table[6] / table[0] )\n x2 = table[4] * math.log2( table[4] / table[1] ) + table[7] * math.log2( table[7] / table[1] )\n H = -1 * ( x1 + x2 )\n return H \n\ndef lemmetization(tokens,lemmas):\n newTokens=[]\n for token in tokens:\n if token in lemmas:\n newTokens.append(lemmas[token])\n else:\n newTokens.append(token)\n return newTokens\n\ndef getGenerate():\n fopen=\"../../Corpus/generate.txt\"\n archivo= open(fopen,encoding='utf-8')\n lemmas={}\n for linea in archivo.readlines(): \n lemmas[linea.split(' ')[0].replace(\"#\",\"\")]=linea.split(' ')[-1][:-1]\n archivo.close()\n return lemmas\n\ndef generateTagger(): \n default_tagger=nltk.DefaultTagger('V')\n patterns=[ (r'.*o$', 'NMS'), # noun masculine singular\n (r'.*os$', 'NMP'), # noun masculine plural\n (r'.*a$', 'NFS'), # noun feminine singular\n (r'.*as$', 'NFP') # noun feminine plural\n ]\n regexp_tagger=nltk.RegexpTagger(patterns, backoff=default_tagger)\n cess_tagged_sents=cess_esp.tagged_sents()\n combined_tagger=nltk.UnigramTagger(cess_tagged_sents, backoff=regexp_tagger)\n \n return combined_tagger\n\ndef cleanTagger(s_tagged):\n list(s_tagged)\n vocabulary=[]\n for i in range(len(s_tagged)):\n vocabulary.append(s_tagged[i][0]+\" \"+s_tagged[i][1])\n \n return vocabulary\n\ndef pkl(f,info):\n output=open(f, 'wb')\n dump(info, output, -1)\n output.close()\n \ndef getPKL(fileName): \n with open(fileName,'rb') as f:\n return load(f)\n \nif __name__ == \"__main__\":\n #**********************************************************************************\n # Run the first time for generate the files .pkl\n #**********************************************************************************\n text=openText()\n #Vocabulario con lemmas\n print(\"get vocabulary\")\n lemmas = getGenerate()\n stop = stopwords.words('spanish')\n \n vocabulary= getVocabulary(text)\n vocabulary=lemmetization(vocabulary,lemmas)\n s_tagged=generateTagger(vocabulary)\n vocabulary=cleanTagger(s_tagged)\n vocabulary=sorted(set(vocabulary))\n pkl('vocabulary.pkl',vocabulary)\n \n sentences=getSentences(text)\n pkl('Sentences1.pkl',sentences)\n\n print(\"Normalization\")\n CleanSentences=normalization(sentences)\n pkl('Sentences2.pkl',CleanSentences)\n print(\"Lemmas\")\n lemmasSent=lemaSentences(CleanSentences,lemmas)\n pkl('Sentences3.pkl',lemmasSent)\n print(\"Tag\")\n print(lemmasSent)\n combined_tagger=generateTagger()\n tagSent=tagSentences(lemmasSent,combined_tagger)\n pkl('Sentences4.pkl',tagSent)\n\n #**********************************************************************************\n # Load the files .pkl\n #**********************************************************************************\n\n #vocabulary = getPKL('vocabulary.pkl')\n #sentences = getPKL('Sentences1.pkl')\n #CleanSentences = getPKL('Sentences2.pkl')\n #lemmasSent = getPKL('Sentences3.pkl')\n #tagSent = getPKL('Sentences4.pkl')\n\n #word1='grande aq0cs0'\n #word1='abastecer V'\n word1 = 'economía ncfs000'\n #word1='nacional aq0cs0'\n\n entropy = [ ]\n\n pWord1= getProbability( word1,tagSent )\n for termn in vocabulary:\n pWord2 = getProbability( termn,tagSent )\n pW1AndpW2 = getProbability2( word1 , termn , tagSent )\n table = getTableProbability( pWord1 , pWord2 , pW1AndpW2 ) \n H = getEntropy( table )\n entropy.append( (termn,H) )\n\n entropy = sorted(entropy,key=operator.itemgetter(1))\n\n fv=open('Entropy_'+ word1.split(' ')[0] +'.txt','w')\n for e in entropy:\n fv.write( '{:30}{:30}\\n'.format(e[0],e[1]) )\n\n","sub_path":"Practices/SyntagmaticRelations/Practice10/Practice10.py","file_name":"Practice10.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"212172694","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom luckycommon.account.model.account import AccountStatus\nfrom luckycommon.account.db import account as api_user\nfrom luckycommon.db import admin as admin_user\nfrom luckycommon.utils.decorator import JsonResponse\nfrom luckycommon.utils.exceptions import PermissionError\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef check_perm(url_array, perm, role):\n l = len(url_array)\n while l >= 1:\n url = '/'.join(url_array[0:l])\n if not url.endswith('/'):\n url += '/'\n k = admin_user.get_perm(url, perm)\n if k and k.min_role <= role:\n return True\n l -= 1\n return False\n\n\nclass UserMiddleware(object):\n\n \"\"\"get user_id and token from header\"\"\"\n\n def process_request(self, req):\n user_id, token = req.META.get(\n 'HTTP_X_AUTH_USER'), req.META.get('HTTP_X_AUTH_TOKEN')\n if not user_id:\n user_id, token = req.COOKIES.get('lucky_user_id'), req.COOKIES.get(\n 'lucky_user_token')\n if user_id and token:\n try:\n user_id = long(user_id)\n except ValueError:\n _LOGGER.error('user id format wrong!')\n req.user_id = None\n return\n if req.path.startswith('/api'):\n user = api_user.get_account(user_id)\n # check banned\n if not user or user.status == AccountStatus.BANNED.value:\n # raise PermissionError('forbidden')\n req.user_id = req.user = None\n return\n info = api_user.get_online_info(user_id, token)\n if info and info.deleted == 0:\n req.user_id = user_id\n for k in 'token', 'device_type', 'os_version', 'extend':\n v = getattr(info, k)\n setattr(user, k, v)\n req.user = user\n return\n else:\n _LOGGER.error(\n \"can't find user_id:%s, token:%s\", user_id, token)\n else:\n info = admin_user.get_online_info(user_id, token)\n if info and info.deleted == 0:\n req.user_id = user_id\n user = admin_user.get_user(user_id)\n if user.role > 0:\n url_array = req.path.split('/')\n if req.method == 'GET':\n need_perm = 1\n else:\n need_perm = 2\n if not check_perm(url_array, need_perm, user.role):\n return JsonResponse(dict(\n status=PermissionError.STATUS,\n msg=str('permission not enough')),\n status=PermissionError.HTTPCODE)\n else:\n req.user = user\n return\n else:\n return JsonResponse(dict(\n status=PermissionError.STATUS,\n msg=str(\"user is forbidden or not activited\")),\n status=PermissionError.HTTPCODE)\n\n req.user_id = req.user = None\n","sub_path":"luckyplatform/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"483534789","text":"# Find the contiguous subarray within an array (containing at least one number) which has the largest sum.\n# For example, given the array [−2,1,−3,4,−1,2,1,−5,4], the contiguous subarray [4,−1,2,1] has the largest sum = 6.\n\nclass Solution:\n # @param A, a list of integers\n # @return an integer\n def maxSubArray(self, nums):\n if not nums: return None\n l, g = nums[0], nums[0]\n for i in range(1, len(nums)):\n l = max(l+nums[i], nums[i])\n g = max(l, g)\n return g\n","sub_path":"LEETCODE/0053. Maximum Subarray.py","file_name":"0053. Maximum Subarray.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"179908309","text":"import pandas as pd\r\n\r\n\r\ndef merge_nearest_sample(\r\n left: pd.DataFrame,\r\n right: pd.DataFrame,\r\n dhid: str = \"bhid\",\r\n ifrom: str = \"from\",\r\n ito: str = \"to\",\r\n tolerance: float = 1.5,\r\n) -> pd.DataFrame:\r\n \"\"\"Merge right sample table with left by nearest downhole depth.\r\n\r\n Alex Trueman, 2019-08-01\r\n\r\n Useful for merging sample tables (tables with dhid, from, and to depth)\r\n where the from and to depths in the two table do not match. Rather than\r\n creating new records in the left table, the nearest record from the right\r\n table is assigned.This method can cause data loss and should be checked.\r\n\r\n Parameters\r\n ----------\r\n left : DataFrame to be updated, must have dhid, from, and to depths.\r\n right : DataFrame to merge, must have dhid, from, and to depths.\r\n dhid : Name of the hole identification column.\r\n ifrom : Name of the downhole sample from-depth column.\r\n ito : Name of the downhole sample to-depth column.\r\n tolerance : Tolerance parameter for finding nearest sample. Can be\r\n though of as a downhole distance within which a nearest sample\r\n must be found. This parameter should be tested for data loss.\r\n\r\n Return\r\n ------\r\n The left DataFrame is returned with the addition of columns from the\r\n right DataFrame.\r\n\r\n \"\"\"\r\n c_left = left.copy()\r\n c_right = right.copy()\r\n\r\n # Calculate sample mid-points.\r\n mid_samp = lambda df, ifrom, ito: df[ifrom] + (df[ito] - df[ifrom]) / 2\r\n c_left[\"mid_samp\"] = mid_samp(c_left, ifrom, ito)\r\n c_right[\"mid_samp\"] = mid_samp(c_right, ifrom, ito)\r\n\r\n # Prepare for merging.\r\n c_right.drop(columns=[ifrom, ito], inplace=True)\r\n c_right.sort_values(by=\"mid_samp\", inplace=True)\r\n c_left.sort_values(by=\"mid_samp\", inplace=True)\r\n\r\n # Merge by nearest midpoint and by hole id.\r\n df = (\r\n pd.merge_asof(\r\n c_left,\r\n c_right,\r\n on=\"mid_samp\",\r\n by=dhid,\r\n tolerance=tolerance,\r\n direction=\"nearest\",\r\n )\r\n .drop(columns=[\"mid_samp\"])\r\n .sort_values(by=[dhid, ifrom, ito])\r\n )\r\n\r\n return df\r\n","sub_path":"myutils/merge_nearest_sample.py","file_name":"merge_nearest_sample.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"616868188","text":"from __future__ import division\nimport numpy as np\nimport math\nimport collections\n\nimport xml.etree.ElementTree as ET\nfrom xml.dom import minidom\n\n\n\nclass Queue:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0,item)\n\n def dequeue(self):\n return self.items.pop()\n\nclass info_gain:\n\tdef __init__(self, file_name, delimit = ',', names = True, skiprows=1, decision_column = -1):\n\t\tself.file_name = file_name\n\t\tself.delimit = delimit\n\t\tself.decision_column = decision_column #usually the last column which contains what you want to induce\n\n\t\tself.my_data = np.genfromtxt(self.file_name, delimiter = self.delimit, names = True, dtype=None) #Delimiter using numpy\n\t\tself.column_names = self.my_data.dtype.names #all column names\n\t\tself.classify_name = self.column_names[self.decision_column] #the column name to predict/classify\n\n\t\tself.q = Queue() #Queue Instance\n\t\tself.root = ET.Element('ROOT')\n\n\tdef process(self):\n\t\tself.root.set('table', self.my_data)\n\n\t\tself.q.enqueue(self.root)\n\t\tcounter = 0\n\n\t\twhile True :\n\t\t\tcurrent_tree_element = self.q.dequeue() #the current XML (sub)element\n\t\t\tcurrent_matrix = current_tree_element.get('table') #the current matrix to be looked at\n\t\t\tcurrent_tree_element.attrib.pop('table')\n\t\t\ttemp_classify_array = current_matrix[self.classify_name] # the last column usually which contains what we are trying to predict\n\t\t\tfirst_entropy = self.calculate_entropy(temp_classify_array) #the entropy\n\t\t\tnon_decision_column_names= tuple(list(current_matrix.dtype.names)[:-1])\n\n\t\t\tif first_entropy > 0:\t#If the entropy is not zero then run\n\t\t\t\tgains = []\n\n\t\t\t\t#determine which column has the highest gain?\n\t\t\t\tfor i in non_decision_column_names:\n\t\t\t\t\tgains.append(self.calculate_gain(current_matrix,i,first_entropy)) #append gains & rounding\n\n\t\t\t\t#this column index has the highest gain\n\t\t\t\tkey = gains.index(max(gains))\n\t\t\t\tkey_name = current_matrix.dtype.names[key]\n\n\t\t\t\tcurrent_tree_element.set('column_split', key_name)#adding attribute column split by column with highest gain\n\t\t\t\tcurrent_tree_element.set('gain',str(round(max(gains),4))) #gain tag\n\n\t\t\t\t#splits columns & enqueues new elements\n\t\t\t\tself.splitting_by_column(current_matrix, key_name, current_tree_element)\n\n\t\t\t#the entropy is zero\n\t\t\telse:\n\t\t\t\tcurrent_tree_element.set('Entropy',str(1.0)) #gain tag\n\t\t\t\tsolution = current_matrix[self.classify_name][0]\n\t\t\t\ttemp_element = ET.SubElement(current_tree_element,\"LEAF\")\n\t\t\t\ttemp_element.set('answer',str(solution))\n\n\n\t\t\t\n\t\t\tif self.q.isEmpty():\n\t\t\t\tbreak\n\n\t#calculate entropy\n\tdef calculate_entropy(self,table):\n\n\t\tself.frequency = collections.Counter(table)#frequency of yes and no's (Dictionary)\n\t\ttotal = sum(self.frequency.values())#Total yes and no's from dictionary\n\t\tself.entropy = 0\n\n\t\tfor keys in self.frequency: #loop through keys of frequency \n\t\t\tp_i = self.frequency[keys]/total #probability\n\t\t\tself.entropy+= -1 * p_i * math.log(p_i,2) #entropy\n\t\t\n\t\treturn self.entropy\n\n\t#info gained from splitting items \n\tdef calculate_gain(self,data_matrix,which_column,entropy_par_):#, column_number):\n\t\tfrequency = collections.Counter(data_matrix[which_column])\n\n\t\tlocal_total = sum(frequency.values())\n\t\tsets = zip(data_matrix[which_column], data_matrix[self.classify_name])\n\t\tcollect = collections.Counter(sets)\n\t\t\n\t\tlocal_info = 0 #initiating calculation for info_A\n\n\t\tfor keys in frequency:\n\t\t\tdenom = frequency[keys] #denominator or total\n\t\t\tinside_parenthesis = 0 \t #to be added to\n\n\t\t\tfor j in collect:\n\t\t\t\tif (j[0] == keys):\n\t\t\t\t\ttemp_p_i = collect[j]/denom\n\t\t\t\t\tinside_parenthesis+= -1 * temp_p_i * math.log(temp_p_i,2)\n\n\t\t\tlocal_info += denom/local_total * inside_parenthesis\n\n\t\treturn entropy_par_ - local_info\n\n\t#process the table and add vertices's for each splitting\n\tdef splitting_by_column(self, t_table, temp_name, parent):\n\t\ttemp_frequency = collections.Counter(t_table[temp_name]) #get frequency\n\n\t\t#Renaming Column Names by removing the column that will be removed\n\t\tnew_names = list(t_table.dtype.names)\n\t\tnew_names.remove(temp_name) #removing one column\n\n\t\tfor keys in temp_frequency:\t#creating sub_tables\n\t\t\tsub_table = t_table[np.where(t_table[temp_name] == keys)]\n\t\t\t\n\t\t\tsub_table = sub_table[new_names]\n\n\t\t\tnew_sub = ET.SubElement(parent,'BRANCH')\n\t\t\tnew_sub.set('value',str(keys))\n\t\t\tnew_sub.set('table', sub_table)\n\t\t\tself.q.enqueue(new_sub) #enqueuing new subelement to further calculate later\n\n\t#Write out XML Tree\n\tdef write_xml(self,new_file_name):\n\t\ts = ET.tostring(self.root) #convert XML to String\n\t\tf = open(new_file_name, 'w') #open file\n\t\tf.write(minidom.parseString(s).toprettyxml()) #write out to file\n\t\tf.close()\n\n\t#Bin specified rows into nominal data atributes\n\tdef binning(self):\n\t\t'Will bin specified rows into numeric atributes'\n\n\t#Run the trained tree on test data set.\n\tdef testing(self, test_data_file):\n\t\taccurate = 0\n\t\tincorrect = 0\n\t\tfor row in test_data_file:\n\t\t\t'Do nothing'\n\n\n\ndef main():\n\tbase = info_gain('iris_data.csv', delimit = ',', decision_column=4)\n\tbase.process()\n\tbase.write_xml('ID3.xml')\n\t#base.testing()\nmain()\n\n","sub_path":"ID3.py","file_name":"ID3.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"527921439","text":"\"\"\" This module contains utilities for plotting histgrams comparisons.\n\"\"\"\n\nimport math\nimport random\nimport sys\nif sys.version_info <= (2, 7):\n from enum import Enum\nimport ROOT\nfrom . import root_utils\nfrom .physics import MeasuredQuantity\n\nclass CompareHistograms:\n \"\"\" Compare histograms\n \"\"\"\n def __init__(self, name):\n self.name = name\n self.baseline_histogram = None\n self.histograms = None\n self.ratios = []\n self.fit_results = []\n self.opt_spectrum_baseline = \"\"\n self.opt_spectrum = \"\"\n self.opt_ratio = \"\"\n self.y_axis_ratio = \"Ratio\"\n self.do_spectra_plot = \"logy\"\n self.do_ratio_plot = \"lineary\"\n self.canvas_spectra = None\n self.canvas_ratio = None\n self.legend_spectra = None\n self.legend_ratio = None\n self.baseline_ratio = None\n self.marker_size = 1.2\n self.colors = [ROOT.kBlack, ROOT.kBlue + 2, ROOT.kRed + 2, ROOT.kGreen + 2, ROOT.kOrange + 2, ROOT.kAzure + 2, ROOT.kMagenta + 2, ROOT.kCyan + 2, ROOT.kPink + 1, ROOT.kTeal + 2, ROOT.kYellow + 2]\n self.markers = [ROOT.kOpenCircle, ROOT.kFullCircle, ROOT.kFullSquare, ROOT.kFullTriangleUp, ROOT.kFullTriangleDown, ROOT.kFullDiamond, ROOT.kFullStar, ROOT.kStar, ROOT.kFullCross, ROOT.kMultiply, ROOT.kPlus]\n self.lines = [1, 2, 9, 5, 7, 10, 4, 3, 6, 8]\n self.line_widths = [2] * 10\n self.fill_styles = [3001, 3002, 3003, 3004, 3005, 3006, 3007]\n self.main_histogram = None\n self.main_ratio_histogram = None\n self.max_spectrum = None\n self.min_spectrum = None\n self.max_ratio = None\n self.min_ratio = None\n self.results = None\n self.n_cols_leg_ratio = 1\n self.n_cols_leg_spectrum = 1\n self.x1_leg_ratio = 0.55\n self.x2_leg_ratio = 0.90\n self.y1_leg_ratio = 0.87\n self.x1_leg_spectrum = 0.55\n self.x2_leg_spectrum = 0.90\n self.y1_leg_spectrum = 0.87\n self.log_upper_space = 10 # this factor will be used to adjust the y axis in log scale\n self.log_lower_space = 2 # this factor will be used to adjust the y axis in log scale\n self.lin_upper_space = 0.9 # this factor will be used to adjust the y axis in linear scale\n self.lin_lower_space = 0.2 # this factor will be used to adjust the y axis in linear scale\n self.leg_text_size = 18\n self.leg_line_height = 0.06\n self.fixed_lower_ratio_bound = None\n\n self.baseline_for_ratio = None\n self.separate_baseline_uncertainty = False\n self.no_error_in_baseline = False\n self.ratio_relative_uncertainty = None\n self.ratio_relative_uncertainty_title = \"Rel. Unc.\"\n self.grid_y_ratio = True\n\n self.grid_y_spectrum = False\n\n self.fit_function = \"expo(0)+expo(2)\"\n self.do_spectrum_legend = True\n self.do_ratio_legend = True\n self.units = \"\"\n self.minimum_limit = 0\n\n def set_ratio_relative_uncertainty_from_histogram(self, hist):\n self.ratio_relative_uncertainty = hist.Clone(\"{0}_unc\".format(hist.GetName()))\n self.ratio_relative_uncertainty.SetTitle(self.ratio_relative_uncertainty_title)\n for ibin in range(0, self.ratio_relative_uncertainty.GetNbinsX() + 2):\n if self.ratio_relative_uncertainty.GetBinContent(ibin) != 0:\n self.ratio_relative_uncertainty.SetBinError(ibin, self.ratio_relative_uncertainty.GetBinError(ibin) / self.ratio_relative_uncertainty.GetBinContent(ibin))\n self.ratio_relative_uncertainty.SetBinContent(ibin, 1)\n else:\n self.ratio_relative_uncertainty.SetBinError(ibin, 0)\n self.ratio_relative_uncertainty.SetBinContent(ibin, 0)\n\n def add_stat(self, h):\n mean = MeasuredQuantity(h.GetMean(), h.GetMeanError(), self.units)\n sigma = MeasuredQuantity(h.GetStdDev(), h.GetStdDevError(), self.units)\n entry = self.legend_spectra.AddEntry(ROOT.nullptr, \"#mu = {}\".format(mean.to_string()), \"\")\n entry.SetTextSize(self.leg_text_size * 0.8)\n entry = self.legend_spectra.AddEntry(ROOT.nullptr, \"#sigma = {}\".format(sigma.to_string()), \"\")\n entry.SetTextSize(self.leg_text_size * 0.8)\n\n def prepare_spectra_canvas(self):\n if not self.canvas_spectra:\n print(\"Creating new canvas {0}\".format(self.name))\n self.canvas_spectra = ROOT.TCanvas(self.name, self.name)\n\n if self.do_spectrum_legend:\n if self.do_spectrum_legend == \"stat\":\n self.leg_line_height *= 2.5\n if self.legend_spectra:\n y1 = self.legend_spectra.GetY1() - self.leg_line_height * (len(self.histograms) + 1) / self.n_cols_leg_spectrum\n if y1 < 0.2: y1 = 0.2\n self.legend_spectra.SetY1(y1)\n else:\n y1 = self.y1_leg_spectrum - self.leg_line_height * (len(self.histograms) + 1) / self.n_cols_leg_spectrum\n if y1 < 0.2: y1 = 0.2\n self.legend_spectra = ROOT.TLegend(self.x1_leg_spectrum, y1, self.x2_leg_spectrum, self.y1_leg_spectrum)\n self.legend_spectra.SetName(\"{0}_legend\".format(self.canvas_spectra.GetName()))\n self.legend_spectra.SetNColumns(self.n_cols_leg_spectrum)\n self.legend_spectra.SetFillStyle(0)\n self.legend_spectra.SetBorderSize(0)\n self.legend_spectra.SetTextFont(43)\n self.legend_spectra.SetMargin(0.1)\n self.legend_spectra.SetTextSize(self.leg_text_size)\n\n if self.grid_y_spectrum:\n self.canvas_spectra.SetGridy()\n\n if \"hist\" in self.opt_spectrum_baseline:\n self.baseline_histogram.SetLineColor(self.colors[0])\n self.baseline_histogram.SetLineWidth(self.line_widths[0])\n self.baseline_histogram.SetLineStyle(self.lines[0])\n if self.do_spectrum_legend:\n self.legend_spectra.AddEntry(self.baseline_histogram, self.baseline_histogram.GetTitle(), \"l\")\n if self.do_spectrum_legend == \"stat\":\n self.add_stat(self.baseline_histogram)\n elif \"e2\" in self.opt_spectrum_baseline:\n self.baseline_histogram.SetLineColor(self.colors[0])\n self.baseline_histogram.SetFillColor(self.colors[0])\n self.baseline_histogram.SetLineWidth(1)\n self.baseline_histogram.SetLineStyle(self.lines[0])\n self.baseline_histogram.SetFillStyle(self.fill_styles[0])\n if self.do_spectrum_legend:\n self.legend_spectra.AddEntry(self.baseline_histogram, self.baseline_histogram.GetTitle(), \"f\")\n if self.do_spectrum_legend == \"stat\":\n self.add_stat(self.baseline_histogram)\n else:\n self.baseline_histogram.SetMarkerColor(self.colors[0])\n self.baseline_histogram.SetLineColor(self.colors[0])\n self.baseline_histogram.SetMarkerStyle(self.markers[0])\n self.baseline_histogram.SetMarkerSize(self.marker_size)\n if self.do_spectrum_legend:\n self.legend_spectra.AddEntry(self.baseline_histogram, self.baseline_histogram.GetTitle(), \"pe\")\n if self.do_spectrum_legend == \"stat\":\n self.add_stat(self.baseline_histogram)\n\n if isinstance(self.baseline_histogram, ROOT.TGraph):\n if not \"a\" in self.opt_spectrum_baseline or not \"A\" in self.opt_spectrum_baseline:\n self.opt_spectrum_baseline += \"A\"\n \n if len(self.baseline_histogram.GetListOfFunctions()) > 0:\n for obj in self.baseline_histogram.GetListOfFunctions():\n if isinstance(obj, ROOT.TF1):\n obj.SetLineColor(self.colors[0])\n obj.SetLineStyle(self.lines[0])\n obj.SetLineWidth(self.line_widths[0])\n\n print(\"Plotting histogram '{0}' with option '{1}'\".format(self.baseline_histogram.GetName(), self.opt_spectrum_baseline))\n self.canvas_spectra.cd()\n self.baseline_histogram.Draw(self.opt_spectrum_baseline)\n\n if \"frac\" in self.baseline_histogram.GetYaxis().GetTitle():\n self.canvas_spectra.SetLeftMargin(0.12)\n self.baseline_histogram.GetYaxis().SetTitleOffset(1.4)\n if \"same\" not in self.opt_spectrum:\n self.opt_spectrum += \"same\"\n\n if not self.main_histogram:\n if isinstance(self.baseline_histogram, ROOT.TH1):\n self.main_histogram = self.baseline_histogram\n elif isinstance(self.baseline_histogram, ROOT.TGraph):\n self.main_histogram = self.baseline_histogram.GetHistogram()\n else:\n print(\"Type of object '{}' not recognized!\".format(self.baseline_histogram))\n self.baseline_for_ratio = self.baseline_histogram.Clone(\"{0}_copy\".format(self.baseline_histogram.GetName()))\n\n minimum = root_utils.find_minimum(self.baseline_histogram, self.minimum_limit, \"hist\" not in self.opt_spectrum_baseline)\n if not minimum is None:\n if self.min_spectrum is None:\n self.min_spectrum = minimum\n else:\n self.min_spectrum = min(self.min_spectrum, minimum)\n maximum = root_utils.find_maximum(self.baseline_histogram, self.minimum_limit, \"hist\" not in self.opt_spectrum_baseline)\n if not maximum is None:\n if self.max_spectrum is None:\n self.max_spectrum = maximum\n else:\n self.max_spectrum = max(self.max_spectrum, maximum)\n\n def prepare_ratio_canvas(self):\n cname = \"{0}_Ratio\".format(self.name)\n if not self.canvas_ratio:\n self.canvas_ratio = ROOT.TCanvas(cname, cname)\n self.canvas_ratio.cd()\n\n n = len(self.histograms)\n if self.ratio_relative_uncertainty or self.separate_baseline_uncertainty:\n n += 1\n\n if self.do_ratio_legend:\n if self.legend_ratio:\n y1 = self.legend_ratio.GetY1() - self.leg_line_height * n / self.n_cols_leg_ratio\n if y1 < 0.2: y1 = 0.2\n self.legend_ratio.SetY1(y1)\n else:\n y1 = self.y1_leg_ratio - self.leg_line_height * n / self.n_cols_leg_ratio\n if y1 < 0.2: y1 = 0.2\n self.legend_ratio = ROOT.TLegend(self.x1_leg_ratio, y1, self.x2_leg_ratio, self.y1_leg_ratio)\n self.legend_ratio.SetName(\"{0}_legend\".format(self.canvas_ratio.GetName()))\n self.legend_ratio.SetNColumns(self.n_cols_leg_ratio)\n self.legend_ratio.SetFillStyle(0)\n self.legend_ratio.SetBorderSize(0)\n self.legend_ratio.SetTextFont(43)\n self.legend_ratio.SetTextSize(self.leg_text_size)\n\n if self.grid_y_ratio:\n self.canvas_ratio.SetGridy()\n\n if self.separate_baseline_uncertainty or self.no_error_in_baseline:\n for ibin in range(0, self.baseline_for_ratio.GetNbinsX() + 2):\n self.baseline_for_ratio.SetBinError(ibin, 0)\n\n if self.separate_baseline_uncertainty:\n self.set_ratio_relative_uncertainty_from_histogram(self.baseline_histogram)\n opt = \"e2\"\n if \"same\" in self.opt_ratio:\n opt += \"same\"\n h = self.ratio_relative_uncertainty.DrawCopy(opt)\n h.SetFillColor(self.colors[0])\n h.SetFillStyle(self.fill_styles[0])\n h.SetLineColor(self.colors[0])\n h.GetYaxis().SetTitle(self.y_axis_ratio)\n if self.do_ratio_legend: self.legend_ratio.AddEntry(h, h.GetTitle(), \"f\")\n self.results.append(h)\n if \"same\" not in self.opt_ratio:\n self.opt_ratio += \"same\"\n if not self.main_ratio_histogram:\n self.main_ratio_histogram = h\n minimum = root_utils.find_minimum(h, self.minimum_limit, True)\n if not minimum is None:\n if self.min_ratio is None:\n self.min_ratio = minimum\n else:\n self.min_ratio = min(self.min_ratio, minimum)\n maximum = root_utils.find_maximum(h, self.minimum_limit, True)\n if not maximum is None:\n if self.max_ratio is None:\n self.max_ratio = maximum\n else:\n self.max_ratio = max(self.max_ratio, maximum)\n\n def plot_histogram(self, color, marker, line, lwidth, h):\n minimum = root_utils.find_minimum(h, self.minimum_limit, not \"hist\" in self.opt_spectrum)\n if not minimum is None:\n if self.min_spectrum is None:\n self.min_spectrum = minimum\n else:\n self.min_spectrum = min(self.min_spectrum, minimum)\n maximum = root_utils.find_maximum(h, self.minimum_limit, not \"hist\" in self.opt_spectrum)\n if not maximum is None:\n if self.max_spectrum is None:\n self.max_spectrum = maximum\n else:\n self.max_spectrum = max(self.max_spectrum, maximum)\n\n print(\"Plotting histogram '{0}' with option '{1}'\".format(h.GetName(), self.opt_spectrum))\n self.canvas_spectra.cd()\n h.Draw(self.opt_spectrum)\n\n if \"hist\" in self.opt_spectrum:\n h.SetLineColor(color)\n h.SetLineWidth(lwidth)\n h.SetLineStyle(line)\n if self.do_spectrum_legend:\n self.legend_spectra.AddEntry(h, h.GetTitle(), \"l\")\n if self.do_spectrum_legend == \"stat\":\n self.add_stat(h)\n else:\n h.SetMarkerColor(color)\n h.SetLineColor(color)\n h.SetMarkerStyle(marker)\n h.SetMarkerSize(self.marker_size)\n if self.do_spectrum_legend:\n self.legend_spectra.AddEntry(h, h.GetTitle(), \"pe\")\n if self.do_spectrum_legend == \"stat\":\n self.add_stat(h)\n\n if len(h.GetListOfFunctions()) > 0:\n for obj in h.GetListOfFunctions():\n if isinstance(obj, ROOT.TF1):\n obj.SetLineColor(color)\n obj.SetLineStyle(line)\n obj.SetLineWidth(lwidth)\n\n def fit_and_make_consistent(self, h, templateH):\n fit_func = ROOT.TF1(\"{0}_fit\".format(h.GetName()), self.fit_function, h.GetXaxis().GetXmin(), h.GetXaxis().GetXmax())\n fit_func.SetParameter(0, 1)\n fit_func.SetParameter(1, -0.5)\n fit_func.SetParameter(2, 0)\n fit_func.SetParameter(3, 0)\n fit_func.SetParameter(0, 1. / fit_func.Eval(h.GetXaxis().GetBinCenter(1)))\n fit_func.SetParameter(2, 1)\n fit_func.SetParameter(3, -0.2)\n fit_func.SetParameter(2, 1. / fit_func.Eval(h.GetXaxis().GetBinCenter(int(h.GetNbinsX() / 2))))\n fitR = h.Fit(fit_func, \"NS\")\n fitOk = int(fitR)\n if not fitOk == 0: return None\n h_fit = root_utils.soft_clone(templateH, \"{0}_fith\".format(h.GetName()))\n for ibin in range(1, h_fit.GetNbinsX() + 1):\n valErr = fit_func.IntegralError(h_fit.GetXaxis().GetBinLowEdge(ibin), h_fit.GetXaxis().GetBinUpEdge(ibin)) / (h_fit.GetXaxis().GetBinUpEdge(ibin) - h_fit.GetXaxis().GetBinLowEdge(ibin))\n val = fit_func.Integral(h_fit.GetXaxis().GetBinLowEdge(ibin), h_fit.GetXaxis().GetBinUpEdge(ibin)) / (h_fit.GetXaxis().GetBinUpEdge(ibin) - h_fit.GetXaxis().GetBinLowEdge(ibin))\n print(\"integral = {0:.5f}, central = {1:.5f}\".format(val, fit_func.Eval((h_fit.GetXaxis().GetBinCenter(ibin)))))\n h_fit.SetBinContent(ibin, val)\n h_fit.SetBinError(ibin, valErr)\n self.fit_results.append(fit_func)\n self.fit_results.append(h_fit)\n fit_func.SetLineColor(h.GetLineColor())\n self.canvas_spectra.cd()\n fit_func.Draw(\"same\")\n return h_fit\n\n def rebin_and_make_consistent(self, h, templateH):\n return root_utils.rebin_1D(h, templateH.GetXaxis())\n\n def plot_ratio(self, color, marker, line, lwidth, h):\n compBinning = root_utils.AxisCompare.check_consistency(h.GetXaxis(), self.baseline_for_ratio.GetXaxis())\n print(\"Result of the binning comparison between {} and {} is: {}\".format(h.GetName(), self.baseline_for_ratio.GetName(), compBinning))\n if compBinning == root_utils.AxisCompare.Identical:\n hRatio = h.Clone(\"{0}_Ratio\".format(h.GetName()))\n elif compBinning == root_utils.AxisCompare.ContainsSameBinning or compBinning == root_utils.AxisCompare.IsContainedSameBinning or compBinning == root_utils.AxisCompare.OverlapsSameBinning:\n print(\"Trying to rebin histogram {0}\".format(h.GetName()))\n hRatio = self.rebin_and_make_consistent(h, self.baseline_for_ratio)\n if not hRatio:\n print(\"Rebin unsuccessfull!\")\n return\n elif compBinning == root_utils.AxisCompare.Contains or compBinning == root_utils.AxisCompare.IsContained or compBinning == root_utils.AxisCompare.Overlaps:\n print(\"Need to rebin.\")\n bins = \"[\"\n for x in h.GetXaxis().GetXbins(): bins += \"{}, \".format(x)\n bins = bins[:-2]\n bins += \"]\"\n print(\"Original binning: {}\".format(bins))\n bins = \"[\"\n for x in self.baseline_for_ratio.GetXaxis().GetXbins(): bins += \"{}, \".format(x)\n bins = bins[:-2]\n bins += \"]\"\n print(\"Final binning: {}\".format(bins))\n print(\"Trying to fit histogram {0} with function {1}\".format(h.GetName(), self.fit_function))\n hRatio = self.fit_and_make_consistent(h, self.baseline_for_ratio)\n if not hRatio:\n print(\"Fit unsuccessfull!\")\n return\n elif compBinning == root_utils.AxisCompare.NoOverlap:\n print(\"The two histograms {}, {} have no overlap. Unable to generate a ratio.\".format(h.GetName(), self.baseline_for_ratio.GetName()))\n return\n else:\n print(\"compare_histograms, plot_ration: Should not end up here!\")\n exit(1)\n\n hRatio.GetYaxis().SetTitle(self.y_axis_ratio)\n if not self.baseline_ratio:\n self.baseline_ratio = hRatio\n if \"hist\" in self.opt_ratio:\n hRatio.SetLineColor(color)\n hRatio.SetLineWidth(lwidth)\n hRatio.SetLineStyle(line)\n if self.do_ratio_legend: self.legend_ratio.AddEntry(hRatio, h.GetTitle(), \"l\")\n else:\n hRatio.SetMarkerColor(color)\n hRatio.SetLineColor(color)\n hRatio.SetMarkerStyle(marker)\n hRatio.SetLineStyle(1)\n hRatio.SetMarkerSize(self.marker_size)\n if self.do_ratio_legend: self.legend_ratio.AddEntry(hRatio, h.GetTitle(), \"pe\")\n self.ratios.append(hRatio)\n hRatio.SetTitle(\"{0} Ratio\".format(h.GetTitle()))\n hRatio.Divide(self.baseline_for_ratio)\n self.canvas_ratio.cd()\n hRatio.Draw(self.opt_ratio)\n if not self.main_ratio_histogram:\n self.main_ratio_histogram = hRatio\n minimum = root_utils.find_minimum(hRatio, self.minimum_limit, not \"hist\" in self.opt_ratio)\n if not minimum is None:\n if self.min_ratio is None:\n self.min_ratio = minimum\n else:\n self.min_ratio = min(self.min_ratio, minimum)\n maximum = root_utils.find_maximum(hRatio, self.minimum_limit, not \"hist\" in self.opt_ratio)\n if not maximum is None:\n if self.max_ratio is None:\n self.max_ratio = maximum\n else:\n self.max_ratio = max(self.max_ratio, maximum)\n\n if not \"same\" in self.opt_ratio:\n self.opt_ratio += \" same\"\n\n def compare_spectra(self, baseline, histos):\n while len(histos) + 1 > len(self.colors): self.colors += random.sample(self.colors[1:], len(self.colors) - 1)\n while len(histos) + 1 > len(self.markers): self.markers += random.sample(self.markers[1:], len(self.markers) - 1)\n while len(histos) + 1 > len(self.lines): self.lines += random.sample(self.lines[1:], len(self.lines) - 1)\n while len(histos) + 1 > len(self.line_widths): self.line_widths += random.sample(self.line_widths[1:], len(self.line_widths) - 1)\n while len(histos) + 1 > len(self.fill_styles): self.fill_styles += random.sample(self.fill_styles[1:], len(self.fill_styles) - 1)\n self.results = []\n print(\"compare_spectra: {0}\".format(self.name))\n self.baseline_histogram = baseline\n self.histograms = histos\n if not isinstance(baseline, ROOT.TH1) and self.do_ratio_plot:\n print(\"Ratio is only available for histograms. Option is disabled.\")\n self.do_ratio_plot = False\n print(\"Baseline: {0}\".format(self.baseline_histogram.GetName()))\n for s in self.histograms:\n print(s.GetName())\n\n if self.do_spectra_plot:\n self.prepare_spectra_canvas()\n\n if self.do_ratio_plot:\n self.prepare_ratio_canvas()\n\n for color, marker, line, linew, h in zip(self.colors[1:], self.markers[1:], self.lines[1:], self.line_widths[1:], self.histograms):\n if self.do_spectra_plot:\n self.plot_histogram(color, marker, line, linew, h)\n if self.do_ratio_plot:\n self.plot_ratio(color, marker, line, linew, h)\n self.adjust_y_limits()\n self.generate_results()\n if self.main_histogram not in self.histograms:\n self.results.append(self.main_histogram)\n return self.results\n\n def compare_uncertainties(self, baseline, histos):\n baseline_unc = root_utils.get_relative_uncertainty(baseline)\n histos_unc = [root_utils.get_relative_uncertainty(h) for h in histos]\n self.CompareSpectra(baseline_unc, histos_unc)\n self.results.append(baseline_unc)\n self.results.extend(histos_unc)\n return self.results\n\n def generate_results(self):\n self.results.extend(self.ratios)\n self.results.extend(self.fit_results)\n if self.canvas_spectra:\n self.results.append(self.canvas_spectra)\n if self.canvas_ratio:\n self.results.append(self.canvas_ratio)\n if self.legend_spectra:\n self.results.append(self.legend_spectra)\n if self.legend_ratio:\n self.results.append(self.legend_ratio)\n if self.ratio_relative_uncertainty:\n self.results.append(self.ratio_relative_uncertainty)\n\n def adjust_y_limits(self):\n if not self.max_ratio is None and not self.min_ratio is None and self.do_ratio_plot:\n print(\"Adjusting y limits for Ratio\")\n if self.min_ratio <= 0 and self.do_ratio_plot == \"logy\":\n print(\"{}: requested logy ratio, but minimum is <= 0. Switching to linear scale.\".format(self.name))\n self.do_ratio_plot = \"lineary\"\n if self.do_ratio_plot == \"logy\":\n max = self.max_ratio * self.log_upper_space\n min = self.min_ratio / self.log_lower_space\n self.canvas_ratio.SetLogy()\n else:\n max = self.max_ratio + (self.max_ratio - self.min_ratio) * self.lin_upper_space\n min = self.min_ratio - (self.max_ratio - self.min_ratio) * self.lin_lower_space\n if min < 0 and self.min_ratio > 0: min = 0\n self.main_ratio_histogram.GetYaxis().UnZoom()\n self.main_ratio_histogram.SetMinimum(min)\n self.main_ratio_histogram.SetMaximum(max)\n if self.do_ratio_legend:\n self.canvas_ratio.cd()\n self.legend_ratio.Draw()\n if not self.fixed_lower_ratio_bound is None:\n self.main_ratio_histogram.SetMinimum(self.fixed_lower_ratio_bound)\n\n if not self.max_spectrum is None and not self.min_spectrum is None and self.do_spectra_plot:\n print(\"Adjusting y limits for Spectrum\")\n if self.min_spectrum <= 0 and self.do_spectra_plot == \"logy\":\n print(\"{}: requested logy spectra, but minimum is <= 0. Switching to linear scale.\".format(self.name))\n self.do_spectra_plot = \"lineary\"\n if self.do_spectra_plot == \"logy\":\n max = self.max_spectrum * self.log_upper_space\n min = self.min_spectrum / self.log_lower_space\n self.canvas_spectra.SetLogy()\n else:\n max = self.max_spectrum + (self.max_spectrum - self.min_spectrum) * self.lin_upper_space\n min = self.min_spectrum - (self.max_spectrum - self.min_spectrum) * self.lin_lower_space\n if min < 0 and self.min_spectrum > 0: min = 0\n self.main_histogram.SetMinimum(min)\n self.main_histogram.SetMaximum(max)\n if self.do_spectrum_legend:\n self.canvas_spectra.cd()\n self.legend_spectra.Draw()\n","sub_path":"pyutils/compare_histograms.py","file_name":"compare_histograms.py","file_ext":"py","file_size_in_byte":25032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"628338335","text":"def ricBinaria(v, x):\r\n inizio = 0\r\n fine = len(v) - 1\r\n while inizio <= fine:\r\n print(\"x\")\r\n med = (inizio + fine) // 2\r\n if v[med] == x:\r\n return med\r\n if v[med] > x:\r\n fine = med - 1\r\n else:\r\n inizio = med + 1\r\n return -1\r\n\r\n#Versione alternativa \r\ndef ricBinaria2(v, x, inizio, fine):\r\n print(\"x\")\r\n if inizio > fine:\r\n return -1\r\n med = (inizio + fine) // 2\r\n if v[med] == x:\r\n return med\r\n if v[med] > x:\r\n return ricBinaria2(v, x, inizio, med-1)\r\n else:\r\n return ricBinaria2(v, x, med+1, fine)\r\n return -1\r\n\r\n#Verzione per potenza di 2\r\n#Assumiamo il n elementi una potenza di 2\r\n##_INCOMPLETO_##\r\n##def ricBinariaPotenza(v, x):\r\n## inizio = 0\r\n## fine = len(v) - 1\r\n## while inizio <= fine:\r\n## med = (inizio + fine) // 2\r\n## if v[med] == x:\r\n## return med\r\n## if v[med] > x:\r\n## fine = med - 1\r\n## else:\r\n## inizio = med + 1\r\n## return -1\r\n##_INCOMPLETO_##\r\n\r\nv = [1,5,6,11,13,16,18,24,30,36,44,49,62,123,157,255]\r\nx = int(input(\"\"))\r\nl = ricBinaria(v, x)\r\n\r\nprint(l)\r\n \r\n","sub_path":"ricBinaria.py","file_name":"ricBinaria.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"531531191","text":"class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\nclass Solution(object):\n\tdef getIntersectionNode(self, headA, headB):\n\t\tnodeA, nodeB = headA, headB\n\t\twhile nodeA != nodeB:\n\t\t\tnodeA = nodeA.next if nodeA else nodeB\n\t\t\tnodeB = nodeB.next if nodeB else nodeA\n\n\t\treturn nodeA\n\n\tdef getIntersectionNode_diff(self, headA, headB):\n\t\tdef get_length(node):\n\t\t\tlength = 0\n\t\t\twhile node:\n\t\t\t\tnode = node.next\n\t\t\t\tlenght += 1\n\t\t\treturn length \n\n\t\tlen1 = get_length(headA)\n\t\tlen2 = get_length(headB)\n\n\t\tif len1 > len2:\n\t\t\tfor __ in range(len1 - len2):\n\t\t\t\theadA = headA.next\n\t\telse:\n\t\t\tfor __ in range(len2 - len1):\n\t\t\t\theadB = headB.next\n\t\twhile headA:\n\t\t\tif headA == headB:\n\t\t\t\treturn headA\n\t\t\theadA = headA.next\n\t\t\theadB = headB.next\n\t\t\t","sub_path":"Part4/Insersection_Of_Two_Linked_Lists.py","file_name":"Insersection_Of_Two_Linked_Lists.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"467543837","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.nn.init as init\r\n\r\n\r\nclass Conv2d_ReLU(nn.Module):\r\n \"\"\" Convolutional layer followed by ReLU activation\r\n\r\n Methods:\r\n forward: compute the output of the module given the input\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels, kernel_size, stride):\r\n \"\"\" Parameters:\r\n in_channels: number of input channels\r\n out_channels: number of output channels\r\n kernel_size: kernel size\r\n stride: stride\r\n \"\"\"\r\n super(Conv2d_ReLU, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=in_channels,\r\n out_channels=out_channels,\r\n kernel_size=kernel_size,\r\n stride=stride)\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, x):\r\n \"\"\" forward method\r\n\r\n Args:\r\n x: input Tensor of shape [batch_size, in_channels, hi, wi]\r\n Returns:\r\n out_conv: output Tensor of shape [batch_size, out_channels, ho, wo]\r\n \"\"\"\r\n out_conv = self.conv(x) # Convolution\r\n out_conv = self.relu(out_conv) # Activation\r\n\r\n return out_conv\r\n\r\n\r\nclass Conv2d_BN_ReLU(nn.Module):\r\n \"\"\" Convolutional layer followed by Batch Normalization and ReLU activation\r\n\r\n Methods:\r\n forward: compute the output of the module given the input\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding=1, momentum=0.99, eps=0.001):\r\n \"\"\" Parameters:\r\n in_channels: number of input channels\r\n out_channels: number of output channels\r\n kernel_size: kernel size\r\n stride: stride\r\n padding: padding for the convolution (default = 1)\r\n momentum: momentum for the Batch Normalization (default = 0.99)\r\n eps: value for numerical stability of the Batch Normalization (default = 0.001)\r\n \"\"\"\r\n super(Conv2d_BN_ReLU, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,\r\n stride=stride, padding=padding)\r\n self.batchnorm = nn.BatchNorm2d(num_features=out_channels, momentum=momentum, eps=eps)\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, x):\r\n \"\"\" forward method\r\n\r\n Args:\r\n x: input Tensor of shape [batch_size, in_channels, hi, wi]\r\n Returns:\r\n out_conv: output Tensor of shape [batch_size, out_channels, ho, wo]\r\n \"\"\"\r\n out_conv = self.conv(x)\r\n out_conv = self.relu(out_conv)\r\n out_conv = self.batchnorm(out_conv)\r\n\r\n return out_conv\r\n\r\n\r\ndef squash(x, dim=2):\r\n \"\"\" Computes the squash function of a Tensor along dimension dim\r\n\r\n Args:\r\n x: input tensor\r\n dim: dimension along which the squash must be performed (default = 2)\r\n\r\n Returns:\r\n A tensor squashed along dimension dim of the same shape of x \"\"\"\r\n norm = torch.norm(x, dim=dim, keepdim=True)\r\n return x * norm / (1 + norm ** 2)\r\n\r\n\r\ndef update_routing(votes, logits, iterations, bias):\r\n \"\"\" Dynamic routing algorithm (paper: Dynamic Routing Between Capsules, Sabour et al., 2017)\r\n\r\n Args:\r\n votes : hat{u_{j|i}} Tensor [bs, ci, co, no] / [bs, ci, co, no, ho, wo]\r\n logits : b_{ij} Tensor [bs, ci, co] / [bs, ci, co, ho, wo]\r\n iterations : Number of iterations of the algorithm\r\n bias : Bias term Tensor [co, no] / [co, no, 1, 1]\r\n\r\n Returns:\r\n activation : v_j Tensor [bs, co, no] / [bs, co, no, ho, wo]\r\n\r\n Other variables of the algorithm:\r\n votes_trans : equivalent to votes, but with shape [no, bs, ci, co] / [no, bs, ci, co, ho, wo]\r\n route : c_{ij} Tensor [bs, ci, co] / [bs, ci, co, ho, wo]\r\n preactivate_unrolled : c_{ij} * hat{u_{j|i}} Tensor [no, bs, ci, co] / [no, bs, ci, co, ho, wo]\r\n preactivate_trans : equivalent to preactivate_unrolled\r\n but with shape [bs, ci, co, no] / [bs, ci, co, no, ho, wo]\r\n preactivate : s_j Tensor [bs, co, no] / [bs, co, no, ho, wo]\r\n act_3d : equivalent to activation, but with shape [bs, 1, co, no] / [bs, 1, co, no, ho, wo]\r\n distances : \\hat{u_{j|i}} \\cdot v_j Tensor [bs, ci, co] / [bs, ci, co, ho, wo]\r\n\r\n Meaning of the dimensions:\r\n bs: batch size\r\n ci: number of input channels / number of input capsules\r\n co: number of output channels / number of output capsules\r\n ni: dimension of input capsules\r\n no: dimension of output capsules\r\n ho/wo: height/width of the output feature maps if the capsule layer is convolutional\r\n \"\"\"\r\n # Raise an error if the number of iterations is lower than 1\r\n if iterations < 1:\r\n raise ValueError('The number of iterations must be greater or equal than 1')\r\n\r\n # Perform different permutations depending on the number of dimensions of the vector (4 or 6)\r\n dimensions = len(votes.size())\r\n if dimensions == 4: # [bs, ci, co, no]\r\n votes_trans = votes.permute(3, 0, 1, 2).contiguous() # [no, bs, ci, co]\r\n\r\n else: # [bs, ci, co, no, ho, wo]\r\n votes_trans = votes.permute(3, 0, 1, 2, 4, 5).contiguous() # [no, bs, ci, co, ho, wo]\r\n\r\n for iteration in range(iterations):\r\n route = F.softmax(logits, dim=2)\r\n preactivate_unrolled = route * votes_trans\r\n if dimensions == 4:\r\n preactivate_trans = preactivate_unrolled.permute(1, 2, 3, 0).contiguous() # bs, ci, co, no\r\n else:\r\n preactivate_trans = preactivate_unrolled.permute(1, 2, 3, 0, 4, 5).contiguous() # bs, ci, co, no, ho, wo\r\n\r\n preactivate = preactivate_trans.sum(dim=1) + bias # bs, co, no, (ho, wo)\r\n activation = squash(preactivate, dim=2) # bs, co, no, (ho, wo)\r\n\r\n act_3d = activation.unsqueeze(1) # bs, 1, co, no, (ho,wo)\r\n distances = (votes * act_3d).sum(dim=3) # bs, ci, co, ho, wo\r\n logits = logits + distances\r\n\r\n return activation\r\n\r\n\r\ndef update_routing_6D_DeepCaps(votes, logits, iterations, bias): # differs from above\r\n \"\"\" Dynamic routing algorithm used in DeepCaps for the convolutional capsule layers\r\n\r\n Args:\r\n votes : hat{u_{j|i}} Tensor [bs, ci, co, no, ho, wo]\r\n logits : b_{ij} Tensor [bs, ci, co, ho, wo]\r\n iterations : Number of iterations of the algorithm\r\n bias : Bias term Tensor [bs, co, no, 1, 1]\r\n\r\n Returns:\r\n activation : v_j Tensor [bs, co, no, ho, wo]\r\n\r\n Meaning of the dimensions:\r\n bs: batch size\r\n ci: number of input channels / number of input capsules\r\n co: number of output channels / number of output capsules\r\n ni: dimension of input capsules\r\n no: dimension of output capsules\r\n ho/wo: height/width of the output feature maps if the capsule layer is convolutional\r\n \"\"\"\r\n # Raise an error if the number of iterations is lower than 1\r\n if iterations < 1:\r\n raise ValueError('The number of iterations must be greater or equal than 1')\r\n\r\n bs, ci, co, no, ho, wo = votes.size()\r\n\r\n # Perform different permutations depending on the number of dimensions of the vector (4 or 6)\r\n for iteration in range(iterations):\r\n logits_temp = logits.view(bs, ci, -1) # bs, ci, co*ho*wo\r\n route_temp = F.softmax(logits_temp, dim=2) # bs, ci, co*ho*wo\r\n route = route_temp.view(bs, ci, co, ho, wo) # bs, ci, co, ho, wo\r\n preactivate_unrolled = route.unsqueeze(3) * votes # bs, ci, co, no, ho, wo\r\n preactivate = preactivate_unrolled.sum(1) + bias # bs, co, no, ho, wo\r\n activation = squash(preactivate, dim=2) # bs, co, no, ho, wo\r\n act_3d = activation.unsqueeze(1) # bs, 1, co, no, ho, wo\r\n distances = (act_3d * votes).sum(3) # bs, ci, co, no, ho, wo --> bs, ci, co, ho, wo\r\n logits = logits + distances\r\n\r\n return activation\r\n\r\n\r\nclass ConvPixelToCapsules(nn.Module):\r\n \"\"\" Convolutional layer that transforms the traditional feature maps in capsules\r\n\r\n Methods:\r\n forward: compute the output of the layer given the input\r\n \"\"\"\r\n\r\n def __init__(self, ci, ni, co, no, kernel_size, stride, padding, iterations):\r\n \"\"\" Parameters:\r\n ci: number of input channels\r\n ni: dimension of input capsules (1 if the inputs are traditional feature maps)\r\n co: number of output channels\r\n no: dimension of output capsules\r\n kernel_size: dimension of the kernel (square kernel)\r\n stride : stride parameter (horizontal and vertical strides are equal)\r\n padding : padding applied to the input\r\n iterations: number of iterations of the dynamic routing algorithm\r\n \"\"\"\r\n super(ConvPixelToCapsules, self).__init__()\r\n\r\n self.ci = ci\r\n self.ni = ni\r\n self.co = co\r\n self.no = no\r\n self.iterations = iterations\r\n\r\n self.conv3d = nn.Conv2d(in_channels=ni,\r\n out_channels=co * no,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n bias=False)\r\n\r\n self.bias = torch.nn.Parameter(torch.zeros(co, no, 1, 1))\r\n\r\n def forward(self, x):\r\n \"\"\" forward method\r\n\r\n Args:\r\n x: input Tensor of shape [bs, ci, ni, hi, wi]\r\n Returns:\r\n activation: output Tensor of shape [bs, co, no, ho, wo]\r\n \"\"\"\r\n bs, ci, ni, hi, wi = x.size()\r\n\r\n # Reshape input and perform convolution to compute \\hat{u_{j|i}} = votes\r\n input_reshaped = x.view(bs * ci, ni, hi, wi)\r\n votes = self.conv3d(input_reshaped) # bs*ci, co*no, ho, wo\r\n _, _, ho, wo = votes.size()\r\n\r\n # Reshape votes, initialize logits and perform dynamic routing\r\n votes_reshaped = votes.view(bs, ci, self.co, self.no, ho, wo).contiguous()\r\n logits = votes_reshaped.new(bs, ci, self.co, ho, wo).zero_()\r\n\r\n activation = update_routing(votes_reshaped, logits, self.iterations, self.bias)\r\n\r\n return activation\r\n\r\n\r\nclass Capsules(nn.Module):\r\n \"\"\" Capsule layer\r\n\r\n Methods:\r\n forward: compute the output of the layer given the input\r\n \"\"\"\r\n\r\n def __init__(self, ci, ni, co, no, iterations):\r\n \"\"\" Parameters:\r\n ci: number of input channels\r\n ni: dimension of input capsules\r\n co: number of output channels\r\n no: dimension of output capsules\r\n iterations: number of iterations of the dynamic routing algorithm\r\n \"\"\"\r\n super(Capsules, self).__init__()\r\n\r\n self.weight = nn.Parameter(torch.randn(ci, ni, co * no))\r\n self.bias = nn.Parameter(torch.zeros(co, no))\r\n self.ci = ci\r\n self.co = co\r\n self.no = no\r\n self.ni = ni\r\n self.iterations = iterations\r\n\r\n init.kaiming_uniform_(self.weight)\r\n init.constant_(self.bias, 0.1)\r\n\r\n def forward(self, x):\r\n \"\"\" forward method\r\n\r\n Args:\r\n x: input Tensor of shape [bs, ci, ni]\r\n Returns:\r\n activation: output Tensor of shape [bs, co, no]\r\n \"\"\"\r\n bs = x.size(0)\r\n\r\n # Compute \\hat{u_{j|i}} = votes\r\n votes = (x.unsqueeze(3) * self.weight).sum(dim=2).view(-1, self.ci, self.co, self.no)\r\n\r\n # Initialize logits and perform dynamic routing\r\n logits = votes.new(bs, self.ci, self.co).zero_()\r\n activation = update_routing(votes, logits, self.iterations, self.bias)\r\n\r\n return activation\r\n\r\n\r\nclass Conv2DCaps(nn.Module): \r\n \"\"\" 2D Convolutional layer used in DeepCaps (no dynamic routing)\r\n\r\n Methods:\r\n forward: computes the output given the input\r\n \"\"\"\r\n def __init__(self, ci, ni, co, no, kernel_size, stride, padding):\r\n \"\"\" Parameters:\r\n ci: number of input channels\r\n ni: dimension of input capsules\r\n co: number of output channels\r\n no: dimension of output capsules\r\n kernel_size: kernel size\r\n stride: stride\r\n padding: padding\r\n \"\"\"\r\n super(Conv2DCaps, self).__init__()\r\n\r\n self.ci = ci # number of capsules in input layer\r\n self.ni = ni # atoms of capsules in input layer\r\n self.co = co # number of capsules in output layer\r\n self.no = no # atoms of capsules in output layer\r\n\r\n # input shape: bs, ci, ni, hi, wi\r\n\r\n self.conv = nn.Conv2d(in_channels=ci * ni,\r\n out_channels=co * no,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding)\r\n init.xavier_uniform_(self.conv.weight)\r\n init.zeros_(self.conv.bias)\r\n\r\n def forward(self, x):\r\n \"\"\" forward method\r\n\r\n Args:\r\n x: input Tensor of shape [batch_size, ci, ni, hi, wi]\r\n Returns:\r\n output: Tensor of shape [bathc_size, co, no, ho, wo]\r\n \"\"\"\r\n bs, ci, ni, hi, wi = x.size()\r\n input_reshaped = x.view(bs, ci * ni, hi, wi)\r\n\r\n output_reshaped = self.conv(input_reshaped) # bs, co*no, ho, wo\r\n _, _, ho, wo = output_reshaped.size()\r\n\r\n output = output_reshaped.view(bs, self.co, self.no, ho, wo) # bs, co, no, ho, wo\r\n\r\n output = squash(output, dim=2)\r\n\r\n return output\r\n\r\n\r\nclass Conv3DCaps(nn.Module):\r\n \"\"\" 3D convolutional capsule layer with dynamic routing\r\n\r\n Methods:\r\n forward: computes the output given the input\r\n \"\"\"\r\n def __init__(self, ci, ni, co, no, kernel_size, stride, padding, iterations):\r\n \"\"\" Parameters:\r\n ci: number of input channels\r\n ni: dimension of input capsules\r\n co: number of output channels\r\n no: dimension of output capsules\r\n kernel_size: kernel size\r\n stride: stride\r\n padding: padding\r\n iterations: number of iterations of the dynamic routing algorithm\r\n \"\"\"\r\n super(Conv3DCaps, self).__init__()\r\n self.ci = ci\r\n self.ni = ni\r\n self.co = co\r\n self.no = no\r\n\r\n self.conv = nn.Conv3d(in_channels=1,\r\n out_channels=co * no,\r\n kernel_size=(ni, kernel_size, kernel_size),\r\n stride=(ni, stride, stride),\r\n padding=(0, padding, padding))\r\n\r\n self.bias = nn.Parameter(torch.zeros(co, no, 1, 1))\r\n\r\n init.kaiming_uniform_(self.conv.weight)\r\n init.zeros_(self.conv.bias)\r\n init.constant_(self.bias, 0.1)\r\n\r\n self.iterations = iterations\r\n\r\n def forward(self, x):\r\n \"\"\" forward method\r\n\r\n Args:\r\n x: Tensor of shape [batch_size, ci, ni, hi, wi]\r\n Returns:\r\n activation: Tensor of shape [batch_size, co, no, ho, wo]\r\n \"\"\"\r\n bs, ci, ni, hi, wi = x.size()\r\n\r\n input_tensor_reshaped = x.view(bs, 1, ci * ni, hi, wi)\r\n\r\n conv = self.conv(input_tensor_reshaped) # bs, co*no, ci, ho, wo\r\n _, _, _, ho, wo = conv.size()\r\n\r\n votes = conv.permute(0, 2, 1, 3, 4).contiguous().view(bs, ci, self.co, self.no, ho, wo)\r\n\r\n logits = votes.new(bs, ci, self.co, ho, wo).zero_() # bs, ci, co, ho, wo\r\n\r\n activation = update_routing_6D_DeepCaps(votes, logits, self.iterations, self.bias)\r\n\r\n return activation # bs, co, no, ho, wo\r\n\r\n\r\nclass DeepCapsBlock(nn.Module):\r\n \"\"\" DeepCaps block used in the DeepCaps architecture\r\n\r\n Consists of three serial layers and one parallel layer\r\n Methods:\r\n forward: computes the output given the input\r\n \"\"\"\r\n def __init__(self, ci, ni, co, no, kernel_size, stride, padding, iterations):\r\n \"\"\"Parameters:\r\n ci: number of input channels,\r\n ni: dimension of input capsules\r\n co: number of output channels\r\n no: dimension of output capsules\r\n kernel_size: kernel size of the convolutions\r\n stride: stride of the first convolution\r\n padding: list of four elements, padding factors for the four convolutions\r\n iterations: number of iterations of the dynamic routing. If 1, no dynamic routing is performed\r\n \"\"\"\r\n super(DeepCapsBlock, self).__init__()\r\n\r\n self.l1 = Conv2DCaps(ci=ci, ni=ni, co=co, no=no, kernel_size=kernel_size, stride=stride, padding=padding[0])\r\n self.l2 = Conv2DCaps(ci=co, ni=no, co=co, no=no, kernel_size=kernel_size, stride=1, padding=padding[1])\r\n self.l3 = Conv2DCaps(ci=co, ni=no, co=co, no=no, kernel_size=kernel_size, stride=1, padding=padding[2])\r\n if iterations == 1:\r\n self.l_skip = Conv2DCaps(ci=co, ni=no, co=co, no=no, kernel_size=kernel_size, stride=1, padding=padding[2])\r\n else:\r\n self.l_skip = Conv3DCaps(ci=co, ni=no, co=co, no=no, kernel_size=kernel_size, stride=1, padding=padding[3],\r\n iterations=iterations)\r\n\r\n def forward(self, x):\r\n \"\"\" forward method\r\n Args:\r\n x: input Tensor of size [batch_size, ci, ni, hi, wi]\r\n Returns:\r\n x: output Tensor of size [batch_size, co, no, ho, wo]\r\n \"\"\"\r\n x = self.l1(x)\r\n x_skip = self.l_skip(x)\r\n x = self.l2(x)\r\n x = self.l3(x)\r\n x = x + x_skip\r\n\r\n return x\r\n","sub_path":"full_precision_layers.py","file_name":"full_precision_layers.py","file_ext":"py","file_size_in_byte":17976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"207724949","text":"# from __future__ import print_function\nfrom __future__ import division\nfrom itertools import combinations\nimport operator\nfrom pprint import pprint\n\ndef processTransactions(transactions,minsup,minconf):\n\t\"\"\"Processes transactions, returns printable data structures\n\t\n\tThe results will be printed on screen directly.\n\n\t\"\"\"\n\treturn _processTransactions(False,transactions,minsup,minconf)\t\n\ndef _processTransactions(DEBUG,transactions,minsup,minconf):\n\n\tDEBUG=DEBUG\n\n\ttransSize = len(transactions)\n\tdicts=[]\n\n\t\"\"\"Generate the itemsets with 1 item\"\"\"\n\td={}\n\tfor tran in transactions:\n\t\tfor itemSet in tran.values():\n\t\t\tfor item in itemSet:\n\t\t\t\tif frozenset([item]) in d:\n\t\t\t\t\td[frozenset([item])]+=1\n\t\t\t\telse:\n\t\t\t\t\td[frozenset([item])]=1\n\t# \n\t# pprint(d)\n\t\t\t\n\tif DEBUG:\n\t\tpprint(d)\n\t\tprint(\"============================\")\n\t\tprint(\"Above is all the items\")\n\t\tprint(\"============================\")\n\t\tnumItems = len(d.keys())\n\n\tdef delLowSupItems(d):\n\t\tfor dKey in d.keys():\n\t\t\tif d[dKey] < minsup * transSize:\n\t\t\t\tdel d[dKey]\n\t\tdicts.append(d)\n\n\tdelLowSupItems(d)\n\t# \n\t# pprint(dicts[0])\n\n\tif DEBUG:\n\t\tnumSurvive = len(dicts[0].keys())\n\t\tpprint(dicts[0])\n\t\tprint(\"============================\")\n\t\tprint(\"Above is the surviving ones\")\n\t\tprint(\"============================\")\n\t\tprint(\"With min_sup of \"+str(minsup)+\", we have \"+str(numItems)+\" items, of which \"+ str(numSurvive)+\" survived, the # of transactions is \"+str(transSize)+\".\")\n\n\n\tdef getOccurrence(subSet):\n\t\tcount=0\n\t\tfor tran in transactions:\n\t\t\tfor itemSet in tran.values():\n\t\t\t\tif subSet<=itemSet:\n\t\t\t\t\tcount+=1\n\t\treturn count\n\n\tdef inPreviousSet(setCandidate,previousSet,sizeMinusOne):\n\t\tfor subCandidate in set(combinations(setCandidate,sizeMinusOne+1)):\n\t\t\tif set(subCandidate) not in previousSet.keys():\n\t\t\t\treturn False\n\t\treturn True\n\n\t\"\"\"Compute supports\"\"\"\n\tsizeMinusOne=0\n\twhile len(d)!=0:\n\t\td={}\n\t\tsetList = dicts[sizeMinusOne].keys()\n\t\tfor setCandidate in set([k1|k2 for k1 in setList for k2 in setList if len(k1|k2)==sizeMinusOne+2]):\n\t\t\tif inPreviousSet(setCandidate,dicts[sizeMinusOne],sizeMinusOne):\n\t\t\t\tcandidateSup = getOccurrence(setCandidate)\n\t\t\t\tif candidateSup >= minsup * transSize:\n\t\t\t\t\t# print frozenset(setCandidate),candidateSup\n\t\t\t\t\td[frozenset(setCandidate)]=candidateSup\n\n\t\tsizeMinusOne+=1\n\t\tif len(d)!=0:\n\t\t\tdicts.append(d)\n\t# \n\t# print \"largest item set size :\", sizeMinusOne\n\n\t\"\"\"Compute rules\"\"\"\n\trules={}\n\tfor i in range(0,len(dicts)):\n\t\tdict_=dicts[i]\n\t\tfor largeset in dict_.keys():\n\t\t\tdividend=dict_[largeset]\n\t\t\tfor lSize in range(1,len(largeset)):\n\t\t\t\tfor lCandidate in set(combinations(largeset,lSize)):\n\t\t\t\t\tconf=dividend/dicts[lSize-1][frozenset(lCandidate)]\n\t\t\t\t\tif conf >= minconf:\n\t\t\t\t\t\trules[tuple([frozenset(lCandidate),largeset-frozenset(lCandidate)]+[dicts[lSize-1][frozenset(lCandidate)]])]=conf\n\t# \n\t# pprint(rules)\n\n\t\"\"\"Rearange and print the support dicts[] and rules{} data structures\"\"\"\n\tdSup={}\n\tfor dict_ in dicts:\n\t\tdSup.update(dict_)\n\tlargeSetToPrint = sorted(dSup.iteritems(), key=operator.itemgetter(1))\n\tprint (\"==Frequent itemsets (min_sup=\"+str(minsup*100)+\"%)\")\n\tfor line in reversed(largeSetToPrint):\n\t\tprint (str(list(line[0]))+\", \"+str(line[1]/transSize*100)+\"%\")\n\tprint ('')\n\trulesToPrint = sorted(rules.iteritems(), key=operator.itemgetter(1))\n\tprint (\"==High-confidence association rules (min_conf=\"+str(minconf*100)+\"%)\")\n\tfor line in reversed(rulesToPrint):\n\t\tleft=line[0][0]\n\t\tright=line[0][1]\n\t\truleStr=\"[\"\n\t\tfor i in range(0,len(left)):\n\t\t\tif i==len(left)-1:\n\t\t\t\truleStr=ruleStr+str(list(left)[i])+\"] => [\"\t\n\t\t\telse:\n\t\t\t\truleStr=ruleStr+str(list(left)[i])+\", \"\n\t\tfor i in range(0,len(right)):\n\t\t\tif i==len(right)-1:\n\t\t\t\truleStr=ruleStr+str(list(right)[i])+\"] (Conf: \"\t\n\t\t\telse:\n\t\t\t\truleStr=ruleStr+str(list(right)[i])+\", \"\n\t\truleStr=ruleStr+str(line[1]*100)+\"%, Supp: \"+str(line[0][2]/transSize*100)+\"%)\"\n\t\tprint (ruleStr)\n\n\tif DEBUG:\n\t\t# log = open(\"./log.txt\", \"w\")\n\t\t# print(d, file = log)\n\t\t# log.close()\n\t\tpass\n\n\n\nif __name__ == \"__main__\":\n\t_processTransactions(True,[{1:frozenset(['a','b'])}],0.3,0.3)\n\n\n\n\n","sub_path":"apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"302463975","text":"#!/usr/bin/python -u\nimport sys\nimport os\nimport time\nfrom subprocess import call\n\nimport string\ndigs = string.digits + string.letters\n\ndef base36(x):\n if x == 0: return digs[0]\n digits = []\n while x:\n digits.append(digs[x % 36].upper())\n x /= 36\n digits.reverse()\n return ''.join(digits)\n\ndef get_binary(z):\n msg = 'Binary\\n'\n for i in range(int(z)-1):\n for j in range(i+1,int(z)):\n msg += ' xe' + base36(i+1) + base36(j+1) + '\\n'\n msg += 'END\\n'\n return msg\n\n\ndef main():\n if(len(sys.argv) != 3):\n return\n maxcut_time = []\n scip_time = []\n sssf = []\n mxc_out = 'mxc.out'\n scp_out = 'scp.out'\n dmp_fle = 'dump.lp'\n z = sys.argv[1]\n binary_vars = get_binary(int(z))\n exp_num = int(sys.argv[2])\n for i in range(exp_num):\n #run maxcut\n call([\"bin/maxcut\", \"-r\",z, \"-p\", \"1\", \"-o\", mxc_out])\n with open(mxc_out) as tmp:\n for line in tmp:\n if 'Search space frac' in line:\n frac = float(line.split(':')[-1])\n sssf.append(frac)\n elif 'Total time' in line:\n t = float(line.split(':')[-1][:-2])\n maxcut_time.append(t)\n print('Parcial Mog times: ', maxcut_time)\n time.sleep(1)\n break\n os.remove(mxc_out)\n\n #modify lp file\n with open(dmp_fle, 'a') as lp:\n #erase END marker\n lp.seek(0, os.SEEK_END)\n pos = lp.tell()\n lp.seek(pos-4, os.SEEK_SET)\n lp.truncate()\n lp.write(binary_vars)\n\n #run scip\n call([\"scip\", \"-f\", dmp_fle, \"-l\", scp_out, \"-q\"])\n with open(scp_out) as tmp:\n for line in tmp:\n if 'Solving Time' in line:\n t = float(line.split(':')[-1])\n scip_time.append(t)\n print('Parcial scip times: ', scip_time)\n time.sleep(1)\n break\n os.remove(scp_out)\n\n print('')\n print('Mog times: ', maxcut_time)\n sm = sum(maxcut_time)\n print('Mog total: ', sm)\n print('Mog avg: ', sm/exp_num)\n print('sssf avg: ', sum(sssf)/exp_num)\n print('')\n\n print('scip times: ', scip_time)\n ss = sum(scip_time)\n print('scip total: ', ss)\n print('scip avg: ', ss/exp_num)\n print('')\n\n print('Mog/scip', sm/ss)\n\n\nif __name__ == '__main__':\n main()","sub_path":"test_random.py","file_name":"test_random.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"532576297","text":"#\n# Codigo RC 1.0 - 03/06/2018\n# Lago Terra Alta, 3a visita em 04/06/2018\n# Wifi desabilitado no boot\n# Sem RTC externo\n# Sem Expansion Board (sem gravar no cartão de memoria)\n# Deepsleep de 10 minutos\n#\n\nflagDebug = False\nflagRun = True\nif flagDebug or flagRun:\n from machine import Timer\n chrono = Timer.Chrono()\n chrono.start()\n\n# Builtin modules\nfrom network import LoRa\nimport socket\nimport binascii\nimport struct\nimport time\nfrom machine import Pin, I2C, ADC, deepsleep\nimport machine\nfrom onewire import DS18X20, OneWire\nimport pycom\nimport os\n\npycom.heartbeat(False)\n\n# Additional modules\nimport bme280\nimport max44009\nimport cayenneLPP\nimport config\nimport myfuncs\n\n\nif flagDebug:\n lap1 = chrono.read_ms()\n print(\"Tempo de importacao dos modulos: {} ms\".format(lap1))\n\n\n#\n# Inicialização de LoRaWan com ABP\n#\n\nlora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915)\n\n# create an ABP authentication params\ndev_addr = struct.unpack(\">l\", binascii.unhexlify('260XXXX6'))[0]\nnwk_swkey = binascii.unhexlify('50BDE0FD219E1XXXXXXXXXXXXXXXXXXX')\napp_swkey = binascii.unhexlify('F9E4XXXXXXXXXXXXXXXXX3CBD3B830ED')\n\n# remove all the channels\nfor channel in range(0, 72):\n lora.remove_channel(channel)\n\nfor channel in range(0, 72):\n lora.add_channel(channel, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=3)\n\n# join a network using ABP (Activation By Personalization)\nlora.join(activation=LoRa.ABP, auth=(dev_addr, nwk_swkey, app_swkey))\n\n# create a LoRa socket\ns = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n\n# set the LoRaWAN data rate\ns.setsockopt(socket.SOL_LORA, socket.SO_DR, config.LORA_NODE_DR)\n\n# make the socket blocking\ns.setblocking(False)\n\nif flagDebug:\n lap2 = chrono.read_ms()\n print(\"Tempo apos inicializaco LoRA: {} ms\".format(lap2))\n\n\n#\n# Inicialização e leitura dos sensores\n#\nif flagRun:\n lap1 = chrono.read_ms()\n\ni2c = I2C(0, I2C.MASTER, pins=('G10', 'G9'),baudrate=400000)\nconnectedI2C = i2c.scan()\nif flagDebug:\n print(\"Connected I2C devices: \" + str(connectedI2C))\n\now = OneWire(Pin('G8'))\ntemp = DS18X20(ow)\nconnectedOW = ow.scan()\nif flagDebug:\n print(\"Connected 1-Wire devices: \" + str(connectedOW))\n\nilum = [] # Lista com as luminosidades lidas com o MAX44009\nbmet = [] # Lista com as temperaturas lidas com o BME280\nbmeh = [] # Lista com as umiadades lidas com o BME280\nbmep = [] # Lista com as pressoes lidas com o BME280\nowTemp = [] # Lista com as temperaturas lidas com o OneWire\n\nlight_s = False # Flag para indicar se o MAX44009 esta conectado\nbme_s = False # Flag para indicar se o BME280 esta conectado\now_s = False # Flag para indicar se possui algum sensor 1-Wire conectado\nif len(connectedOW) > 0:\n ow_s = True\n\nconnected_i2c = False # Flag para indicar se possui algum sensor I2C conectado\nif len(connectedI2C) > 0:\n connected_i2c = True\n\nif connected_i2c:\n for device in connectedI2C:\n if device == 0x4A: # MAX44009 - 74\n light_sensor = max44009.MAX44009(i2c)\n light_s = True\n elif device == 0x76: # BME280 - 118\n bme = bme280.BME280(i2c=i2c, pressure_mode=bme280.OSAMPLE_8, iir=bme280.FILTER_8)\n bme_s = True\n else:\n if flagDebug:\n print(\"I2C nao reconhecido\") # Dispositivo nao cadastrado\n\nif ow_s:\n count = 0\n for sensors in connectedOW:\n temp.start_conversion(temp.roms[count])\n count += 1\n\nif light_s:\n # Le iluminancia em lux do MAX44009\n data = int(light_sensor.illuminance_lux)\n ilum.append(data)\nif bme_s:\n # Le valores BME280 com media para ter maior precisao :\n numreadings = 15\n samples_temperature = [0.0]*numreadings; mean_temperature = 0.0\n samples_pressure = [0.0]*numreadings; mean_pressure = 0.0\n samples_humidity = [0.0]*numreadings; mean_humidity = 0.0\n for i in range (numreadings):\n samples_temperature[i], samples_pressure[i], samples_humidity[i] = bme.values\n mean_temperature = sum(samples_temperature)/len(samples_temperature)\n mean_pressure = sum(samples_pressure)/len(samples_pressure)\n mean_humidity = sum(samples_humidity)/len(samples_humidity)\n t = mean_temperature\n p = mean_pressure/100 # Pa -> hectoPa\n h = mean_humidity\n bmet.append(t)\n bmep.append(p)\n bmeh.append(h)\n\nif ow_s:\n count = 0\n for sensor in connectedOW:\n tempOW = temp.read_temp_async(temp.roms[count])\n # tempOW_c = myfuncs.sensor_calibration(sensor, tempOW)\n # tempOW_clpp = str(tempOW_c)\n # owTemp.append(tempOW_clpp)\n # print(\"Sensor: \" +str(sensor)+\"| Temperatura: \"+str(tempOW) +\"| Temperatura Calibrada: \"+str(tempOW_c))\n owTemp.append(tempOW)\n count += 1\n\nif flagDebug:\n lap3 = chrono.read_ms()\n print(\"Tempo apos sensores: {} ms\".format(lap3))\n\nif flagRun:\n lap2 = chrono.read_ms()\n timeSensors=lap2-lap1\n\n#\n# Criando pacote Cayenne LPP\n#\n\nlpp = cayenneLPP.CayenneLPP(size = 100, sock = s)\n\nif bme_s:\n lpp.add_temperature(bmet[0])\n lpp.add_barometric_pressure(bmep[0])\n lpp.add_relative_humidity(bmeh[0])\nif light_s:\n lpp.add_luminosity(ilum[0])\n\nowChannel = 150\n\nif ow_s:\n for tempValue in owTemp:\n val = float(tempValue)\n lpp.add_temperature(value = val, channel = owChannel)\n owChannel += 1\n\nVBAT = myfuncs.get_batt_mV()\n\nlpp.add_generic(lpp_id=2, values = VBAT, channel = 13, data_size = 2, is_signed = False, precision = 1)\n\nif machine.wake_reason()[0] == machine.PWRON_WAKE:\n pycom.nvs_erase_all() # 1st time power on\n\npayloadcount = pycom.nvs_get('count')\nif payloadcount is not None:\n payloadcount += 1\n pycom.nvs_set('count', payloadcount)\nelse:\n payloadcount = 1\n pycom.nvs_set('count', 1) # Starts from 1\nif flagDebug:\n print(\"# pacote LoRaWan = {}\".format(payloadcount))\n\nlpp.add_luminosity( value = payloadcount, channel = 155) # Numero do Pacote enviado\n\nif flagDebug:\n print(\"Tamanho do pacote LPP = {} bytes\".format(lpp.get_size()))\n\nif flagDebug:\n lap4 = chrono.read_ms()\n print(\"Tempo antes do LPP send: {} ms\".format(lap4))\n\nif flagRun:\n lap3 = chrono.read_ms()\n totalAwake = lap3+210+3\n\nlpp.add_luminosity( value = timeSensors, channel = 158) # Tempo dos sensores (inicializacao + leitura)\nlpp.add_luminosity( value = totalAwake, channel = 159) # Tempo total acordado\nlpp.send(reset_payload = True) # Envio do pacote LoRaWan usando Cayenne LPP\n\nif flagDebug:\n lap5 = chrono.read_ms()\n print(\"Tempo apos LPP send: {} ms\".format(lap5))\n\ntime.sleep_ms(300) # pausa para garantir o envio do pacote\n\n#\n# Entrando em deepsleep\n#\n\nif flagDebug:\n print(\"Entrando em deepsleep por 600s = 10 minutos...\")\n\nmachine.deepsleep(600*1000) # deep sleep por 10 minutos\n","sub_path":"NodeABP/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"52940141","text":"import argparse\nimport os\nimport pdb\nimport sys\n\nimport cv2\n\nimport constants\n\ndef make_video_recursive(directory, new_video=False):\n if directory[-1] != '/':\n directory += '/'\n files = [directory + f for f in os.listdir(directory)]\n interaction_data_dirs = [f for f in files if os.path.isdir(f)]\n all_frame_dirs = []\n for interaction_data_dir in interaction_data_dirs:\n interaction_files = [interaction_data_dir + '/' + f for f in os.listdir(interaction_data_dir)]\n frame_dirs = [f for f in interaction_files if os.path.isdir(f)]\n for frame_dir in frame_dirs:\n frame_dir_videos = [f for f in os.listdir(frame_dir) if f[-4:] == '.mp4']\n if new_video or len(frame_dir_videos) == 0:\n all_frame_dirs.append(frame_dir)\n print('Frame directories:', all_frame_dirs, len(all_frame_dirs))\n for frame_dir in all_frame_dirs:\n make_video(frame_dir)\n\ndef make_video(frame_dir, video_name=None):\n if frame_dir[-1] != '/':\n frame_dir += '/'\n frames = sorted([frame for frame in os.listdir(frame_dir) if frame.endswith(\".png\")],\n key=lambda f: int(f.split('.png')[0]))\n frame = cv2.imread(os.path.join(frame_dir, frames[-1]))\n height, width, layers = frame.shape\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n if video_name is None:\n video_name = frame_dir.split('/')[-2] + '.mp4'\n print('video:', frame_dir + video_name)\n out = cv2.VideoWriter(frame_dir + video_name, fourcc, 10.0, (width, height))\n for frame in frames:\n out.write(cv2.imread(os.path.join(frame_dir, frame)))\n out.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('directory', \n help=\"The frame directory if not using the recursive option. Otherwise, the directory of frame direcories.\")\n parser.add_argument('-v', '--video_name', help=\"The name of the video.\")\n parser.add_argument('-r', '--recursive', action='store_true', \n help=\"If True, make videos for all of the directories inside of the given directory.\")\n parser.add_argument('-n', '--new_video', action='store_true', \n help=\"If True, make a new video to overwrite the existing one. Otherwise, don't create a new video if one already exists.\")\n args = parser.parse_args(sys.argv[1:])\n if args.recursive:\n make_video_recursive(args.directory, args.new_video)\n else:\n make_video(args.directory, args.video_name)","sub_path":"video_maker.py","file_name":"video_maker.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"229544433","text":"'''\nquestion link: \n\nhttps://practice.geeksforgeeks.org/problems/count-possible-triangles-1587115620/1/?track=DS-Python-Sorting&batchId=273\n\n\n'''\nimport timeit\n\ndef findNumberOfTriangles(arr,n):\n arr.sort()\n c=0\n for i in range(n):\n for next in range(i+1,n):\n sum=arr[i]+arr[next]\n for j in range(next+1,n):\n if sum>arr[j]:\n c+=1\n return c\n\n\ndef findTriangles(arr):\n arr.sort()\n n=len(arr)\n c=0\n for i in range(n-1,0,-1):\n l=0\n r=i-1\n while larr[i]:\n c+=r-l\n r-=1\n else:\n l+=1\n return c\n \n\nif __name__=='__main__':\n \n start=timeit.timeit()\n arr=[ 7,8,3,4,6,9]\n n=len(arr)\n c=findNumberOfTriangles(arr,n)\n end=timeit.timeit()\n print(c,end-start)\n \n s=timeit.timeit()\n d=findTriangles(arr)\n e=timeit.timeit()\n print(d,e-s)","sub_path":"practice/possible_triangles.py","file_name":"possible_triangles.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"10889837","text":"import os\nimport shutil\nimport tempfile\n\nimport pytest\n\nfrom galaxy.tool_util.deps.mulled.mulled_update_singularity_containers import docker_to_singularity, get_list_from_file, singularity_container_test\nfrom galaxy.util import which\nfrom ..util import external_dependency_management\n\n\ndef test_get_list_from_file():\n test_dir = tempfile.mkdtemp()\n try:\n list_file = os.path.join(test_dir, 'list_file.txt')\n with open(list_file, 'w') as f:\n f.write('bbmap:36.84--0\\nbiobambam:2.0.42--0\\nconnor:0.5.1--py35_0\\ndiamond:0.8.26--0\\nedd:1.1.18--py27_0')\n assert get_list_from_file(list_file) == ['bbmap:36.84--0', 'biobambam:2.0.42--0', 'connor:0.5.1--py35_0', 'diamond:0.8.26--0', 'edd:1.1.18--py27_0']\n finally:\n shutil.rmtree(test_dir)\n\n\n@external_dependency_management\n@pytest.mark.skipif(not which('singularity'), reason=\"requires singularity but singularity not on PATH\")\ndef test_docker_to_singularity(tmp_path):\n tmp_dir = str(tmp_path)\n docker_to_singularity('abundancebin:1.0.1--0', 'singularity', tmp_dir, no_sudo=True)\n assert tmp_path.joinpath('abundancebin:1.0.1--0').exists()\n\n\n@external_dependency_management\n@pytest.mark.skipif(not which('singularity'), reason=\"requires singularity but singularity not on PATH\")\ndef test_singularity_container_test(tmp_path):\n test_dir = tempfile.mkdtemp()\n try:\n for n in ['pybigwig:0.1.11--py36_0', 'samtools:1.0--1', 'yasm:1.3.0--0']:\n docker_to_singularity(n, 'singularity', test_dir, no_sudo=True)\n results = singularity_container_test({'pybigwig:0.1.11--py36_0': {'imports': ['pyBigWig'], 'commands': ['python -c \"import pyBigWig; assert(pyBigWig.numpy == 1); assert(pyBigWig.remote == 1)\"'], 'import_lang': 'python -c'}, 'samtools:1.0--1': {'commands': ['samtools --help'], 'import_lang': 'python -c', 'container': 'samtools:1.0--1'}, 'yasm:1.3.0--0': {}}, 'singularity', test_dir)\n assert 'samtools:1.0--1' in results['passed']\n assert results['failed'][0]['imports'] == ['pyBigWig']\n assert 'yasm:1.3.0--0' in results['notest']\n finally:\n shutil.rmtree(test_dir)\n","sub_path":"test/unit/tool_util/mulled/test_mulled_update_singularity_containers.py","file_name":"test_mulled_update_singularity_containers.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"130879524","text":"\r\n#合并两个顺序表\r\n\r\ndef merge_list(list1, list2):\r\n length1 = len(list1)\r\n length2 = len(list2)\r\n out_list = []\r\n i = 0\r\n j = 0\r\n while i 1:\n req.add_header('Referer', referer)\n if data:\n req.add_header('Content-Length', len(data))\n response = urlopen(req, timeout=60)\n if response.info().get('Content-Encoding') == 'gzip':\n buf = StringIO( response.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\n f.close()\n else:\n data = response.read() \n if not NoCookie:\n # Cope with problematic timestamp values on RPi on OpenElec 4.2.1\n try:\n cj.save(cookiePath)\n except: pass\n response.close()\n return data\n\n \ndef postHtml(url, form_data={}, headers={}, compression=True):\n _user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 ' + \\\n '(KHTML, like Gecko) Chrome/13.0.782.99 Safari/535.1'\n req = urllib2.Request(url)\n if form_data:\n form_data = urllib.urlencode(form_data)\n req = urllib2.Request(url, form_data)\n req.add_header('User-Agent', _user_agent)\n for k, v in headers.items():\n req.add_header(k, v)\n if compression:\n req.add_header('Accept-Encoding', 'gzip')\n response = urllib2.urlopen(req)\n data = response.read()\n cj.save(cookiePath)\n response.close()\n return data\n\n \ndef getHtml2(url):\n req = Request(url)\n response = urlopen(req, timeout=60)\n data = response.read()\n response.close()\n return data \n\n \ndef getVideoLink(url, referer):\n req2 = Request(url, '', headers)\n req2.add_header('Referer', referer)\n url2 = urlopen(req2).geturl()\n return url2\n\n\ndef cleantext(text):\n text = text.replace('–','-')\n text = text.replace('&','&')\n text = text.replace('’','\\'')\n text = text.replace('‘','\\'')\n text = text.replace('…','...')\n text = text.replace('"','\"')\n text = text.replace(''','`')\n text = text.replace('&','&')\n text = text.replace('ñ','ñ')\n return text\n\n\ndef addDownLink(name, url, mode, iconimage, desc, stream=None, fav='add', fanart=None):\n if fav == 'add': favtext = \"Add to\"\n elif fav == 'del': favtext = \"Remove from\"\n u = (sys.argv[0] +\n \"?url=\" + urllib.quote_plus(url) +\n \"&mode=\" + str(mode) +\n \"&name=\" + urllib.quote_plus(name))\n dwnld = (sys.argv[0] +\n \"?url=\" + urllib.quote_plus(url) +\n \"&mode=\" + str(mode) +\n \"&download=\" + str(1) +\n \"&name=\" + urllib.quote_plus(name))\n favorite = (sys.argv[0] +\n \"?url=\" + urllib.quote_plus(url) +\n \"&fav=\" + fav +\n \"&favmode=\" + str(mode) +\n \"&mode=\" + str('900') +\n \"&img=\" + urllib.quote_plus(iconimage) +\n \"&name=\" + urllib.quote_plus(name)) \n ok = True\n liz = xbmcgui.ListItem(name, iconImage=\"DefaultVideo.png\", thumbnailImage=iconimage)\n liz.setArt({'thumb': iconimage, 'icon': iconimage})\n if stream:\n liz.setProperty('IsPlayable', 'true')\n if len(desc) < 1:\n liz.setInfo(type=\"Video\", infoLabels={\"Title\": name})\n else:\n liz.setInfo(type=\"Video\", infoLabels={\"Title\": name, \"plot\": desc, \"plotoutline\": desc})\n if fanart:\n liz.setArt({'fanart': fanart})\n liz.addContextMenuItems([('[COLOR lime]Download Video[/COLOR]', 'xbmc.RunPlugin('+dwnld+')'),\n ('[COLOR lime]' + favtext + ' favorites[/COLOR]', 'xbmc.RunPlugin('+favorite+')')])\n ok = xbmcplugin.addDirectoryItem(handle=addon_handle, url=u, listitem=liz, isFolder=False)\n return ok\n \ndef playyt(url, name):\n iconimage = xbmc.getInfoImage(\"ListItem.Thumb\")\n liz=xbmcgui.ListItem(name, iconImage=\"DefaultFolder.png\", thumbnailImage=iconimage)\n liz.setInfo( type=\"Video\", infoLabels={ \"Title\": name } )\n xbmc.Player().play(url, liz, False)\n\ndef addDir(name, url, mode, iconimage, page=None, channel=None, section=None, keyword='', Folder=True, fanart=None):\n if url.startswith(\"plugin://\"):\n u = url\n else:\n u = (sys.argv[0] +\n \"?url=\" + urllib.quote_plus(url) +\n \"&mode=\" + str(mode) +\n \"&page=\" + str(page) +\n \"&channel=\" + str(channel) +\n \"§ion=\" + str(section) +\n \"&keyword=\" + urllib.quote_plus(keyword) +\n \"&name=\" + urllib.quote_plus(name))\n ok = True\n liz = xbmcgui.ListItem(name, iconImage=\"DefaultFolder.png\", thumbnailImage=iconimage)\n liz.setArt({'thumb': iconimage, 'icon': iconimage})\n if not fanart:\n fanart = 'https://raw.githubusercontent.com/DutchMusic/DutchMusic/master/plugin.video.DutchMusic/fanart.JPG'\n liz.setArt({'fanart': fanart})\n liz.setInfo(type=\"Video\", infoLabels={\"Title\": name})\n ok = xbmcplugin.addDirectoryItem(handle=addon_handle, url=u, listitem=liz, isFolder=Folder)\n return ok\n \ndef _get_keyboard(default=\"\", heading=\"\", hidden=False):\n \"\"\" shows a keyboard and returns a value \"\"\"\n keyboard = xbmc.Keyboard(default, heading, hidden)\n keyboard.doModal()\n if keyboard.isConfirmed():\n return unicode(keyboard.getText(), \"utf-8\")\n return default\n","sub_path":"script.dokionderhoud/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"504037744","text":"'''\naccess the information of turn loading\n'''\n\nimport sqlite3\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\nclass TurnDB():\n \n def __init__(self):\n \n self.turn_conn = sqlite3.connect('turn.db')\n self.turn_curs = self.turn_conn.cursor()\n self.turn_curs.execute('''CREATE TABLE IF NOT EXISTS turn\n (turn_addr VARCHAR(15) PRIMARY KEY,\n loading REAL,\n history REAL,\n conn_state BOOLEAN,\n resolution INTEGER,\n time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL)''')\n\n def get_turn_info(self, addr):\n '''\n get the info of the turn server\n '''\n\n self.turn_curs.execute(\n '''SELECT * from turn where turn_addr = ?''', (addr,))\n\n # return None if doesn't exist\n return self.turn_curs.fetchone()\n \n def get_min_loading_turn_addr(self):\n '''\n get the info of the turn server which has the minimum loading\n '''\n\n #self.turn_curs.execute(\n # '''SELECT turn_addr, history, loading FROM turn WHERE loading = \n # (SELECT min(loading) FROM turn)''')\n\n self.turn_curs.execute(\n '''SELECT * FROM turn WHERE conn_state = 1 AND loading < 80 ORDER BY loading''')\n \n turn_info = self.turn_curs.fetchall()\n\n if turn_info:\n for i in range(0, len(turn_info)):\n data_time = datetime.strptime(turn_info[i][5], \"%Y-%m-%d %H:%M:%S\")\n if (data_time - data_time.now()) > timedelta(seconds = 185):\n self.update_turn_info(turn_info[i][0], turn_info[i][1], turn_info[i][2], 0)\n else:\n return turn_info[i][0]\n\n return \"0.0.0.0\"\n\n def update_turn_info(self, addr, loading, history, conn_state = 1, resolution = 1080):\n '''\n update data of the turn server\n '''\n\n self.delete_turn_info(addr)\n self.add_turn_info(addr, loading, history, conn_state, resolution)\n\n def add_turn_info(self, addr, loading, history = 0.0, conn_state = 1, resolution = 1080):\n '''\n add the turn server into the database\n '''\n\n ins = 'INSERT INTO turn\\\n (turn_addr, loading, history, conn_state, resolution) VALUES(?, ?, ?, ?, ?)'\n\n self.turn_curs.execute(ins, (addr, loading, history, conn_state, resolution))\n self.turn_conn.commit()\n\n def delete_turn_info_disconnect(self):\n '''\n delete the turn server if it doesn't send data over 3 time windows(9 mins)\n '''\n\n pass\n\n def delete_turn_info(self, addr):\n '''\n delete turn server\n '''\n\n self.turn_curs.execute(\n '''DELETE FROM turn where turn_addr = ?''', (addr,))\n self.turn_conn.commit()\n\n def get_connecting_turn_info(self):\n '''\n return a turn address or return False\n '''\n\n self.turn_curs.execute(\n '''SELECT * FROM turn WHERE conn_state = 1''')\n \n return self.turn_curs.fetchone()\n","sub_path":"controller_dresolution/turn_db_manipulation.py","file_name":"turn_db_manipulation.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"75011441","text":"from django.shortcuts import render, get_object_or_404, redirect, render_to_response\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db.models import Sum\nfrom django.db.models import Q\nfrom django.http import HttpResponse, Http404, JsonResponse\nfrom django.contrib import messages\nfrom django.core.cache import cache\nfrom django.views import View\nfrom django.views.generic import ListView\nfrom django.utils.decorators import method_decorator\n\nfrom datetime import date, timedelta\nimport json\n\nfrom .forms import ExpenseForm, SelectDateExpenseForm, SelectDateRangeExpenseForm\nfrom .models import Expense, Remark\nfrom decorators import login_required_message\n\n# Create your views here.\n\n\n\nclass AddExpense(View):\n form_class = ExpenseForm\n template_name = \"add_expense.html\"\n context = {\n 'form': form_class,\n 'title': \"Add expense\"\n }\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n last_10_expenses = Expense.objects.all(user=request.user).order_by(\n '-created_at', '-timestamp',\n )[:10]\n \n self.context['objects'] = last_10_expenses\n self.context['total'] = Expense.objects.amount_sum(user=request.user)\n return render(request, self.template_name, self.context)\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n amount = form.cleaned_data.get('amount')\n remark = form.cleaned_data.get('remark', '').title()\n timestamp = form.cleaned_data.get('timestamp')\n\n expense = Expense.objects.create(\n user = request.user,\n amount = amount,\n timestamp = timestamp\n )\n\n if remark:\n try:\n rem = Remark.objects.get(user=request.user, name=remark)\n except:\n rem = Remark.objects.create(user=request.user, name=remark)\n expense.remark = rem\n expense.save()\n \n\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=400)\n\n\n\nclass UpdateExpense(View):\n form_class = ExpenseForm\n template_name = \"update_expense.html\"\n context = {\n \"title\": \"Update expense\"\n }\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get_object(self, request, *args, **kwargs):\n id = int(kwargs['id'])\n instance = Expense.objects.all(user=request.user).filter(id=id).first()\n\n if not instance:\n raise Http404\n return instance\n\n def get(self, request, *args, **kwargs):\n instance = self.get_object(request, *args, **kwargs)\n\n # thirty_day = date.today() - timedelta(days=30)\n # if not instance.timestamp >= thirty_day:\n # return HttpResponse(\"
Too late! Cannot be changed now.
\", status=400)\n\n initial_data = {\n 'amount': instance.amount,\n 'remark': instance.remark,\n 'timestamp': instance.timestamp\n }\n form = self.form_class(initial=initial_data)\n self.context['form'] = form\n return render(request, self.template_name, self.context)\n\n\n def post(self, request, *args, **kwargs):\n instance = self.get_object(request, *args, **kwargs)\n\n form = self.form_class(request.POST)\n if form.is_valid():\n amount = form.cleaned_data.get('amount')\n remark = form.cleaned_data.get('remark', '').title()\n timestamp = form.cleaned_data.get('timestamp')\n\n instance.amount = amount\n instance.timestamp = timestamp\n rem = None\n if remark:\n try:\n rem = Remark.objects.get(user=request.user, name=remark)\n except:\n rem = Remark.objects.create(user=request.user, name=remark)\n \n instance.remark = rem\n instance.save()\n\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=400)\n\n\n\nclass ExpenseList(View):\n template_name = \"expense_list.html\"\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\n def get(self, request, *args, **kwargs):\n objects_list = Expense.objects.all(user=request.user)\n first_date = None\n\n if objects_list:\n first_date = objects_list.last().timestamp or None\n\n q = request.GET.get(\"q\")\n object_total = None\n if q:\n objects_list = objects_list.filter(\n Q(remark__name__icontains=q) |\n Q(amount__icontains=q)\n ).distinct()\n object_total = objects_list.aggregate(Sum('amount'))['amount__sum']\n\n order_field = request.GET.get(\"field\")\n if order_field:\n ordering = request.GET.get(\"order\", \"\")+order_field\n print(ordering)\n objects_list = objects_list.order_by(ordering)\n\n paginator = Paginator(objects_list, 15)\n\n page = request.GET.get('page')\n\n try:\n objects = paginator.page(page)\n except PageNotAnInteger:\n objects = paginator.page(1)\n except EmptyPage:\n objects = paginator.page(paginator.num_pages)\n\n total = Expense.objects.amount_sum(user=request.user)\n\n context = {\n \"title\": \"Expenses\",\n \"objects\": objects,\n \"total\": total,\n \"first_date\": first_date,\n \"object_total\": object_total,\n }\n\n return render(request, self.template_name, context)\n\n\n# class DayWiseExpense(ListView):\n\n# template_name = \"day-expense.html\"\n# paginate_by = 30\n\n# @method_decorator(login_required)\n# def dispatch(self, *args, **kwargs):\n# return super().dispatch(*args, **kwargs)\n\n# def get_queryset(self, *args, **kwargs):\n# return Expense.objects.all(user=self.request.user).dates('timestamp', 'day').order_by('-timestamp')\n\n# def get_context_data(self, *args, **kwargs):\n# context = super().get_context_data(*args, **kwargs)\n# queryset = self.get_queryset()\n# days = queryset\n# data = []\n# for day in days:\n# sum_day = queryset.filter(timestamp=day).aggregate(Sum('amount'))['amount__sum']\n# data.append((day, sum_day))\n \n# context['data'] = data\n# context['title'] = 'Day Wise Expense'\n# return context\n\n\n\nclass DayWiseExpense(View):\n\n template_name = \"day-expense.html\"\n context = {\n 'title': 'Day Wise Expense',\n }\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n expense = Expense.objects.all(user=request.user)\n days = expense.dates('timestamp', 'day').order_by('-timestamp')\n data = []\n for day in days:\n sum_day = expense.filter(timestamp=day).aggregate(Sum('amount'))['amount__sum']\n data.append((day, sum_day))\n \n self.context['data'] = data\n return render(request, self.template_name, self.context)\n\n\n\nclass MonthWiseExpense(View):\n template_name = \"month-expense.html\"\n context = {\n 'title': 'Monthly Expense',\n }\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n dates = Expense.objects.all(user=request.user).dates('timestamp', 'month')\n data = []\n for date in dates:\n amount = Expense.objects.this_month(\n user=request.user, year=date.year, month=date.month\n ).aggregate(Sum('amount'))['amount__sum']\n data.append((date, amount))\n self.context['data'] = data\n return render(request, self.template_name, self.context)\n\n\n\nclass DateSearch(View):\n date_form_class = SelectDateExpenseForm\n range_form_class = SelectDateRangeExpenseForm\n template_name = \"search.html\"\n context = {\n \"title\": \"Search\"\n }\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n self.context['date_form'] = self.date_form_class()\n self.context['range_form'] = self.range_form_class()\n\n if request.GET:\n date_form = self.date_form_class(request.GET)\n range_form = self.range_form_class(request.GET)\n\n objects = None\n\n if range_form.is_valid():\n remark = range_form.cleaned_data.get('remark')\n f_dt = range_form.cleaned_data.get('from_date')\n t_dt = range_form.cleaned_data.get('to_date')\n objects = Expense.objects.all(user=request.user).filter(timestamp__range=(f_dt, t_dt))\n if remark:\n objects = objects.filter(remark__name__icontains=remark)\n else:\n range_form = self.range_form_class()\n\n if date_form.is_valid():\n remark = date_form.cleaned_data.get('remark')\n dt = date_form.cleaned_data.get('date')\n objects = Expense.objects.all(user=request.user).filter(timestamp=dt)\n if remark:\n objects = objects.filter(remark__name__icontains=remark)\n else:\n date_form = self.date_form_class()\n\n if objects:\n object_total = objects.aggregate(Sum('amount'))['amount__sum']\n else:\n object_total = None\n\n \n self.context['date_form'] = date_form\n self.context['range_form'] = range_form\n self.context['objects'] = objects\n self.context['object_total'] = object_total\n\n return render(request, self.template_name, self.context)\n\n\n\nclass GoToExpense(View):\n \"\"\"\n provies expenses for particular day, month or year.\n \"\"\"\n template_name = 'goto.html'\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n day = kwargs.get('day')\n month = kwargs.get('month')\n year = kwargs.get('year')\n \n if day:\n objects = Expense.objects.this_day(user=request.user, year=year, month=month, day=day)\n elif month:\n objects = Expense.objects.this_month(user=request.user, year=year, month=month)\n elif year:\n objects = Expense.objects.this_year(user=request.user, year=year)\n\n goto_total = objects.aggregate(Sum('amount'))['amount__sum']\n\n paginator = Paginator(objects, 50)\n\n page = request.GET.get('page')\n\n try:\n objects = paginator.page(page)\n except PageNotAnInteger:\n objects = paginator.page(1)\n except EmptyPage:\n objects = paginator.page(paginator.num_pages)\n\n context = {\n \"title\": \"Expenses\",\n \"objects\": objects,\n \"goto_total\": goto_total,\n }\n\n return render(request, self.template_name, context)\n\n\n\nclass GoToRemarkWiseExpense(View):\n \"\"\"\n provies expenses for particular day, month or year.\n \"\"\"\n template_name = 'remark-month-expense.html'\n\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n day = int(kwargs.get('day', 0))\n month = int(kwargs.get('month', 0))\n year = int(kwargs.get('year', 0))\n _day = None\n _month = None\n _year = None\n \n if day:\n objects = Expense.objects.this_day(user=request.user, year=year, month=month, day=day)\n _day = date(year, month, day)\n elif month:\n objects = Expense.objects.this_month(user=request.user, year=year, month=month)\n _month = date(year, month, 1)\n elif year:\n objects = Expense.objects.this_year(user=request.user, year=year)\n _year = date(year, 1, 1)\n\n objects = objects.select_related('remark')\n remarks = set()\n for instance in objects:\n remarks.add(instance.remark)\n\n remark_dict = {}\n for remark in remarks:\n remark_dict[remark] = objects.filter(remark=remark).aggregate(\n Sum('amount')\n )['amount__sum']\n\n remark_dict = sorted(remark_dict.items(), key=lambda x: x[1], reverse=True)\n\n total = objects.aggregate(Sum('amount'))['amount__sum']\n\n context = {\n \"title\": \"Expenses\",\n \"remarks\": remark_dict,\n \"total\": total,\n \"day\": _day,\n \"month\": _month,\n \"year\": _year,\n }\n\n return render(request, self.template_name, context)\n\n\n\nclass GetRemark(View):\n \"\"\"\n will be used to autocomplete the remarks\n \"\"\"\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n if request.is_ajax():\n q = request.GET.get('term', '')\n # remarks = Expense.objects.all(user=request.user).filter(remark__icontains=q).order_by(\n # ).values_list('remark', flat=True).distinct()\n remarks = Remark.objects.filter(user=request.user).filter(name__icontains=q).order_by(\n ).values_list('name', flat=True)\n results = []\n for remark in remarks:\n remark_json = {}\n remark_json['value'] = remark\n results.append(remark_json)\n data = json.dumps(results)\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n\n\nclass GetYear(View):\n \"\"\"\n return all the year in which expenses are registered.\n \"\"\"\n @method_decorator(login_required_message)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n cache_key = 'expense_year'\n cache_time = 15768000 # 182.5 days\n data = cache.get(cache_key)\n\n if not data:\n years = Expense.objects.all(user=request.user).dates('timestamp', 'year')\n result = []\n\n for y in years:\n result.append(y.year)\n data = json.dumps(result)\n\n cache.set(cache_key, data, cache_time)\n\n return HttpResponse(data, content_type='application/json')\n\n\n\nclass Error404(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"404.html\", {})\n\n\nclass Error500(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"500.html\", {})\n\n\nclass Error400(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"400.html\", {})\n\n\nclass Error403(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"403.html\", {})","sub_path":"expense/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"297874060","text":"from PIL import Image\n\nimages = []\n\nhand = input(\"Please input hand: \")\nhand_length = 0\n\nfolder = \"hai\"\nfilenames = []\nkakan_idx = []\nseperates = []\n\ntiles = []\nfor c in hand.split()[0]:\n if c.isdigit():\n tiles.append(c)\n hand_length += 1\n elif c.isalpha():\n tiles = [num+c for num in tiles]\n for tile in tiles:\n filenames.append(f\"{folder}/{tile}.png\")\n tiles = []\n\nseperates.append(hand_length-1)\n\nfor file in filenames:\n images.append(Image.open(file))\n\ni = 0\ntiles = []\ncall = \" \".join(hand.split()[1:])\nwhile i < len(call):\n c = call[i]\n if c == \"p\":\n from_whom = call[i+1]\n idx = 0\n if from_whom == \"l\":\n idx = 0\n elif from_whom == \"m\":\n idx = 1\n elif from_whom == \"r\":\n idx = 2\n i += 2\n\n name = call[i:i+2]\n name = f\"{folder}/{name}.png\"\n i += 2\n for j in range(0, 3):\n if j == idx:\n img = Image.open(name)\n rotated = Image.new(\"RGBA\", (img.size[1], img.size[1]))\n rotated.paste(img.rotate(90, expand=True), (0, img.size[1]-img.size[0]))\n images.append(rotated)\n else:\n img = Image.open(name)\n images.append(img)\n seperates.append(seperates[-1]+3)\n elif c == \"c\":\n from_whom = call[i+1]\n idx = 0\n if from_whom == \"l\":\n idx = 0\n elif from_whom == \"m\":\n idx = 1\n elif from_whom == \"r\":\n idx = 2\n i += 2\n\n names = [call[i], call[i+1], call[i+2]]\n names = [f\"{folder}/{name+call[i+3]}.png\" for name in names]\n i += 3\n for j in range(0, 3):\n if j == idx:\n img = Image.open(names[j])\n rotated = Image.new(\"RGBA\", (img.size[1], img.size[1]))\n rotated.paste(img.rotate(90, expand=True), (0, img.size[1]-img.size[0]))\n images.append(rotated)\n else:\n img = Image.open(names[j])\n images.append(img)\n seperates.append(seperates[-1]+3)\n elif c == \"k\":\n\n mode = call[i+1]\n i += 1\n\n idx = 0\n if mode != \"a\" or mode != \"k\":\n from_whom = call[i+1]\n if from_whom == \"l\":\n idx = 0\n elif from_whom == \"m\":\n idx = 1\n elif from_whom == \"r\":\n idx = 3\n if mode == \"k\":\n from_whom = call[i+1]\n if from_whom == \"l\":\n idx = 0\n elif from_whom == \"m\":\n idx = 1\n elif from_whom == \"r\":\n idx = 2\n if mode != \"a\":\n i += 2\n else:\n i += 1\n\n if mode == \"a\":\n name = call[i:i+2]\n name = f\"{folder}/{name}.png\"\n images.append(Image.open(f\"{folder}/b.png\"))\n images.append(Image.open(name))\n images.append(Image.open(name))\n images.append(Image.open(f\"{folder}/b.png\"))\n i += 1\n seperates.append(seperates[-1]+4)\n elif mode == \"k\":\n name = call[i:i+2]\n name = f\"{folder}/{name}.png\"\n for j in range(3):\n if j == idx:\n kakan_idx.append(len(images))\n\n img = Image.open(name)\n rotated = Image.new(\"RGBA\", (img.size[1], 2*img.size[0]))\n rotated.paste(img.rotate(90, expand=True), (0, 0))\n rotated.paste(img.rotate(90, expand=True), (0, img.size[0]))\n images.append(rotated)\n else:\n img = Image.open(name)\n images.append(img)\n i += 2\n seperates.append(seperates[-1]+3)\n elif mode == \"m\":\n name = call[i:i+2]\n name = f\"{folder}/{name}.png\"\n i += 2\n for j in range(4):\n if j == idx:\n img = Image.open(name)\n rotated = Image.new(\"RGBA\", (img.size[1], img.size[1]))\n rotated.paste(img.rotate(90, expand=True), (0, img.size[1]-img.size[0]))\n images.append(rotated)\n else:\n img = Image.open(name)\n images.append(img)\n seperates.append(seperates[-1]+4)\n\n i += 1\n\nwidths, heights = zip(*(img.size for img in images))\ntotal_width = sum(widths)\nif call != \"\":\n total_width += max(widths)*(len(seperates)-1)\n\nmax_height = max(heights)\n\nnew_image = Image.new(\"RGBA\", (total_width, max_height))\n\nx_offset = 0\ny_offset = max(heights)-max(widths) if len(kakan_idx) > 0 else 0\n\nseperates.pop()\nfor i, img in enumerate(images):\n if i in kakan_idx:\n new_image.paste(img, (x_offset, 0))\n else:\n new_image.paste(img, (x_offset, y_offset))\n\n if i in seperates:\n x_offset += max(widths) + img.size[0]\n else:\n x_offset += img.size[0]\n\nnew_image.save(f\"{hand}.png\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"568757110","text":"import logging\n#ip初始评分\nMAX_SCORE=50\n#日志默认配置\nLOG_LEVEL=logging.INFO #默认级别\nLOG_FMT='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'\nLOG_DATEFMT='%Y-%M-%d %H:%M:%S'\nLOG_FILENAME='log.log'\n\n#测试代理的ip超时时间\nTEST_TIMEOUT=15\n\n#MongoDB的数据的的URL\nMONGO_URL='mongodb://127.0.0.1:27017'\n#获取每个网页的间隔\nSLEEP_TIME=1\n\n# 配置代理爬虫列表\nPROXIES_SPIDERS = [\n 'core.proxy_spider.proxy_spider.IphaiSpider',\n 'core.proxy_spider.proxy_spider.XiciSpider',\n 'core.proxy_spider.proxy_spider.ProxylistplusSpider',\n]\n\n#爬虫的时间间隔\nRUN_SPIDERS_INTERVAL=12\n\n#用于配置检测代理ip的异步数量\nTEST_PROXIES_ASYNC_COUNT=10\n\n#检测代理ip的时间间隔\nRUN_PROXY_TEST_INTERVAL=2\n\n#获取最大IP的数量;值越下,可以信越高,随机性就越差\nPROXIES_MAX_COUNT=10","sub_path":"ip代理池/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"141195611","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.forms import ModelForm\nfrom models import Subscription\n\nclass SubscriptionForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(SubscriptionForm, self).__init__(*args, **kwargs)\n\n self.fields['user_name'].required = True\n self.fields['email'].required = True\n\n class Meta:\n model = Subscription\n fields = ['user_name', 'email', ]\n widjets = {\n 'user_name': forms.TextInput(attrs={'placeholder': 'Имя пользователя'}),\n 'email': forms.EmailInput(attrs={'placeholder': 'Имейл'}),\n }","sub_path":"src/apps/subscription/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"492533388","text":"from typing import List\nfrom fastapi import APIRouter\nfrom fastapi import Depends\nfrom fastapi import FastAPI\nfrom fastapi import HTTPException\nfrom fastapi import Response\nfrom fastapi import status\n\nfrom ..exseptions import EntityConflictError, EntiyDoesNotExistError\nfrom ..accounts.schemas import Account\nfrom ..accounts.auth import get_current_user\nfrom .schemas import ShopName, Shop\nfrom .service import ShopsServices\n\n\nrouter = APIRouter(prefix='/shops')\n\n\ndef initialize_app(app: FastAPI):\n app.include_router(router)\n\n\n@router.get('', response_model=List[Shop], tags=['Shops'])\ndef get_user_shops(\n current_account: Account = Depends(get_current_user),\n shop_service: ShopsServices = Depends(),\n):\n return shop_service.get_shops(current_account)\n\n\n@router.post(\n '',\n response_model=Shop,\n status_code=status.HTTP_201_CREATED,\n tags=['Shops'],\n)\ndef add_new_shop(\n new_shop: ShopName,\n current_account: Account = Depends(get_current_user),\n shop_service: ShopsServices = Depends(),\n):\n try:\n return shop_service.add_new_shop(new_shop, current_account)\n except EntityConflictError:\n raise HTTPException(status.HTTP_409_CONFLICT)\n\n\n@router.patch('/{shop_id}', response_model=Shop, tags=['Shops'])\ndef edit_shop(\n shop_id: int,\n shop: ShopName,\n current_account: Account = Depends(get_current_user),\n shop_service: ShopsServices = Depends(),\n):\n edit_shop = Shop(\n id=shop_id,\n name=shop.name,\n )\n try:\n return shop_service.edit_shop(edit_shop, current_account)\n except EntiyDoesNotExistError:\n raise HTTPException(status.HTTP_404_NOT_FOUND)\n\n\n@router.delete(\n '/{shop_id}',\n status_code=status.HTTP_204_NO_CONTENT,\n tags=['Shops'],\n)\ndef delete_shop(\n shop_id: int,\n current_account: Account = Depends(get_current_user),\n shop_service: ShopsServices = Depends(),\n):\n try:\n shop_service.delete_shop(shop_id, current_account)\n return Response()\n except EntiyDoesNotExistError:\n raise HTTPException(status.HTTP_404_NOT_FOUND)\n","sub_path":"src/ShoppingAPIService/shops/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"320954409","text":"from __future__ import annotations\nfrom enum import Enum\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/sharedUprofile_v20_0\"\n\n\nclass TypeProfileComponentType1(Enum):\n \"\"\"Specifies the component names.\n\n (i.e AccountInfo, AirPreference, TravelDocument etc)\n \"\"\"\n ACCOUNT_INFO = \"AccountInfo\"\n TRAVELER_INFO = \"TravelerInfo\"\n TRAVELER_IDENTITY_INFORMATION = \"TravelerIdentityInformation\"\n TRAVEL_DOCUMENT = \"TravelDocument\"\n ACCOUNTING_REFERENCE = \"AccountingReference\"\n POLICY_REFERENCE = \"PolicyReference\"\n LOYALTY_PROGRAM_ENROLLMENT = \"LoyaltyProgramEnrollment\"\n CONTRACT = \"Contract\"\n COMMISSION = \"Commission\"\n SERVICE_FEE = \"ServiceFee\"\n REMARK = \"Remark\"\n ALTERNATE_CONTACT = \"AlternateContact\"\n ALTERNATE_CONTACT_ADDRESS = \"AlternateContactAddress\"\n ALTERNATE_CONTACT_PHONE = \"AlternateContactPhone\"\n ALTERNATE_CONTACT_ELECTRONIC_ADDRESS = \"AlternateContactElectronicAddress\"\n COMMISSION_REFERENCE = \"CommissionReference\"\n ADDRESS = \"Address\"\n PHONE = \"Phone\"\n ELECTRONIC_ADDRESS = \"ElectronicAddress\"\n AIR_PREFERENCE = \"AirPreference\"\n VEHICLE_PREFERENCE = \"VehiclePreference\"\n HOTEL_PREFERENCE = \"HotelPreference\"\n RAIL_PREFERENCE = \"RailPreference\"\n PROFILE_PARENT_HISTORY = \"ProfileParentHistory\"\n FIELD_DATA = \"FieldData\"\n FIELD_GROUP_DATA = \"FieldGroupData\"\n ADVISORY = \"Advisory\"\n AGENCY_GROUP_INFO = \"AgencyGroupInfo\"\n AGENCY_INFO = \"AgencyInfo\"\n BRANCH_GROUP_INFO = \"BranchGroupInfo\"\n BRANCH_INFO = \"BranchInfo\"\n AGENT_INFO = \"AgentInfo\"\n TRAVELER_GROUP_INFO = \"TravelerGroupInfo\"\n PROFILE_STATUS = \"ProfileStatus\"\n PROFILE_LINK = \"ProfileLink\"\n OTHER_PREFERENCE = \"OtherPreference\"\n FORM_OF_PAYMENT = \"FormOfPayment\"\n EXTERNAL_IDENTIFIER = \"ExternalIdentifier\"\n","sub_path":"travelport/models/type_profile_component_type_1.py","file_name":"type_profile_component_type_1.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"100704461","text":"\nimport csv\nimport os\nfrom elasticsearch import Elasticsearch\n\n#Initalize Object#\nes = Elasticsearch(HOST=\"http://localhost\", PORT=9200)\n\n#Create an index in ES, ignore status code 400 (index already exists)\nes.indices.create(index = \"wrasslers\",ignore=400)\n\n#Build the csv indices using your csv data#\nwith open(os.path.expanduser(\"~/Projects/Elastic-Search/data/wwf.csv\")) as f:\n reader = csv.DictReader(f)\n for line in reader:\n #If uniaue ID in excel file ne _source unique ID\n es.index(index=\"wrasslers\", doc_type = \"world_champions\", body = line)\n\n \n\n#Match Query#\n#http://localhost:9200/wrasslers/_search?q=cena\nes.search(index=\"wrasslers\", body={\n \"query\": {\n \"multi_match\" : {\n \"query\" : \"kane\",\n \"fields\" : [\"Date\", \"Days\", \"Description\", \"ID\", \"Location\", \"Reign\", \"Show\", \"Wrestler^3\"]\n }\n }\n }\n)\n\n#Boost Ressults using certain field#\nes.search(index=\"wrasslers\", body={\n \"query\": {\n \"multi_match\" : {\n \"query\" : \"cena\",\n \"fields\" : [\"Date\", \"Days\", \"Description\", \"ID\", \"Location\", \"Reign\", \"Show\", \"Wrestler^3\"]\n }\n }\n }\n)\n \nes.search(index=\"wrasslers\", body={\n \"query\": {\n \"multi_match\" : {\n \"query\" : \"wes bailey\",\n \"fields\" : [\"Date\", \"Days\", \"Description^5\", \"ID\", \"Location\", \"Reign\", \"Show\", \"Wrestler\"]\n }\n }\n }\n)\n","sub_path":"code/examples/wrasslers_examples.py","file_name":"wrasslers_examples.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"571076718","text":"import datetime\nimport json\nimport os\n\nfrom log import logger, color\n\ndb_dir = \".db\"\nif not os.path.isdir(db_dir):\n os.mkdir(db_dir)\n\n\ndef load_db() -> dict:\n with open(localdb_file, 'r', encoding='utf-8') as fp:\n return json.load(fp)\n\n\ndef save_db(db):\n with open(localdb_file, 'w', encoding='utf-8') as fp:\n json.dump(db, fp, ensure_ascii=False, indent=4)\n\n\ndef update_db(callback):\n db = load_db()\n\n callback(db)\n\n save_db(db)\n\n return db\n\n\ndef load_db_for(accountName):\n db = load_db()\n\n accountDb = get_account_from_db(accountName, db)\n\n return accountDb\n\n\ndef save_db_for(accountName, accountDb):\n db = load_db()\n\n set_account_to_db(accountName, db, accountDb)\n\n save_db(db)\n\n\ndef update_db_for(accountName, callback):\n db = load_db()\n\n accountDb = get_account_from_db(accountName, db)\n callback(accountDb)\n\n save_db(db)\n\n return db\n\n\ndef get_account_from_db(accountName, db):\n if \"accounts\" not in db:\n db[\"accounts\"] = {}\n accounts = db[\"accounts\"]\n if accountName not in accounts:\n accounts[accountName] = {}\n account = accounts[accountName]\n return account\n\n\ndef set_account_to_db(accountName, db, accountDb):\n if \"accounts\" not in db:\n db[\"accounts\"] = {}\n accounts = db[\"accounts\"]\n\n accounts[accountName] = accountDb\n\n\ndef init_db():\n save_db({\n \"created_at\": datetime.datetime.now().timestamp(),\n \"created_at_str\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n })\n logger.info(color(\"bold_green\") + \"数据库已初始化完毕\")\n\n\nlocaldb_file = os.path.join(db_dir, 'db.json')\n# 如果未创建,则初始化\nif not os.path.isfile(localdb_file):\n init_db()\n# 检测数据库是否损坏,若损坏则重新初始化\ntry:\n load_db()\nexcept json.decoder.JSONDecodeError as e:\n logger.error(\"数据库似乎损坏了,具体出错情况如下,将重新初始化数据库文件\", exc_info=e)\n init_db()\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"116311539","text":"import math\nfrom datetime import datetime, timedelta\nfrom random import randint\nfrom itertools import combinations\nfrom typing import List\n\nfrom ..model import TaskState, TugState, ShipState, ChargeTypeList, Tug, Task, Ship, ChargeType\nfrom ..port import get_pier_latlng\nfrom ..settings import PENALTY, TUG_SPEED\nfrom .cutil import c_move_dis_to_time, c_count_dis\nfrom typing import Union\n\ndef count_move_dis(start, to):\n \"\"\"Calculate moving distance from a coordinate to a pier\n\n Args:\n start ((float, float)): latitide and longitude\n to (int): pier number\n\n Returns:\n (float): distance in km\n \"\"\"\n dest = get_pier_latlng(to)\n dis = count_dis(float(start[0]), float(start[1]), float(dest[0]), float(dest[1]))\n return dis\n\n\ndef move_dis_to_time(dis, velo=TUG_SPEED):\n \"\"\"Convert moving distance to time\n \"\"\"\n return timedelta(hours=c_move_dis_to_time(dis, velo))\n\n\ndef count_move_time(start, to, velo=TUG_SPEED):\n \"\"\"Calculate moving time from a coordinate to a pier\n\n Args:\n start ((float, float))\n to (int)\n\n Returns:\n (timedelta): moving time\n \"\"\"\n dis = count_move_dis(start, to)\n return move_dis_to_time(dis)\n\n\ndef count_dis(base_lat, base_lng, lat, lng):\n \"\"\"Calculate the Euclidean distance\n\n Args:\n base_lat (float)\n base_lng (float)\n lat (float)\n lng (float)\n\n Returns:\n (float): distance in km\n \"\"\"\n return c_count_dis(base_lng, base_lat, lng ,lat)\n\n\ndef get_oil_price(hp):\n \"\"\"Provide oil cost per km from horsepower\n\n Arg:\n hp (int): horsepower of tugs\n\n Return:\n (float): oil price ($/km)\n \"\"\"\n hp_price = {\n 1800: 134.1075498270909,\n 2400: 185.0696699493183,\n 3200: 257.887743955886,\n 3300: 267.3812284262817,\n 3400: 276.9616518343608,\n 3500: 286.6290141801232,\n 3600: 296.3833154635688,\n 4000: 336.2699099741844,\n 4200: 356.734840855592,\n 4400: 377.5475274877326,\n 4500: 388.0842792103279,\n 5200: 464.2758315236272,\n 6400: 604.8009600994644,\n }\n return hp_price[hp]\n\n\ndef get_prices(req_types, tugs):\n \"\"\"Convert types of tugs to revenue per hour according to comparison \n between required types and dispatched tugs\n\n Args:\n req_types ([ChargeType]): a list of required types of a task\n tugs ([Tug]): a list of tugs assigned to a task\n\n Return:\n ([int]): a list of prices ($/hour)\n \"\"\"\n\n assert len(req_types) <= len(tugs)\n table = {117: 7395, 118: 10846, 119: 19720, 120: 22310, 121: 32000}\n req_types.sort()\n tugs.sort(key=lambda tug: tug.type)\n \n prices = []\n i = 0\n while i < len(req_types):\n prices.append(table[min(req_types[i], tugs[i].type)])\n i += 1\n while i < len(tugs):\n prices.append(table[tugs[i].type])\n i += 1\n return prices\n\n\ndef calculate_revenue(times: List[timedelta], req_types: List[ChargeType], tugs: List[Tug]) -> Union[float, list]:\n \"\"\"Calculate revenue for a dispatched task\n\n Args:\n times ([timedelta]): a list of timestamps when the tugs started moving\n req_types ([ChargeType]): a list of required types for a task\n tugs ([Tug]): a list of tugs assigned to a task\n sep (bool): to separate profit between tugs\n\n Return:\n (float): revenue or ([float]): list of revenue\n\n \"\"\"\n\n if not (len(times) and len(req_types) and len(tugs)):\n return 0\n \n assert len(times) == len(req_types) and len(times) == len(tugs), \"Lists length differ\"\n\n revenue = 0\n prices = get_prices(req_types, tugs)\n\n for time, price in zip(times, prices):\n cycles = math.ceil((time - timedelta(minutes=60)).seconds / 60 / 30) * 0.5 + 1 \\\n if time > timedelta(minutes=60) else 1\n revenue += price * cycles\n return revenue\n\n\n","sub_path":"nturesell/algo/utils/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"301985570","text":"#!/usr/bin/python3\n\nimport sys\nimport os\n\nif sys.platform != 'win32':\n print(\"This script only runs on Windows\")\n sys.exit(1)\n\n# just launch a command\nos.system('ipconfig')\n\n# open a command and read its output\nd = os.popen(r'dir ..\\DATA')\n\nfor entry in d:\n print(entry, end=' ')\n\n# backticks (``) equiv\nhostname = os.popen('hostname').read()[:-1]\n\nprint('Hostname is', hostname)\n\n\n","sub_path":"EXAMPLES/external_win.py","file_name":"external_win.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"460336436","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport subprocess\nimport readline\nimport pandas as pd\nimport numpy as np\nfrom plio.io.io_bae import read_gpf, save_gpf\nfrom appl_tools.surfacefit import run_pc_align, update_gpf\n\n## Create an argument parser\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description = \"\"\"Transform points in a Socet Set Ground Point File (GPF).\nThe transformed latitude, longitude, and height values from the Tie Points are then written to a new GPF with their sigmas set equal to 1 and the \"known\" flag \nchanged from \"1\" (Tie Point) to \"3\" (XYZ Control). Non-Tie Points from the original GPF are written to the new GPF with their \"known\" flags changed to \"1.\" \nTie Points from the original GPF that were not active (\"stat\" = 0) are copied \"as-is\" into the new GPF. The output GPF preserves the order of the ground points from the original GPF.\n\nIf it is desired to update all active points in the input GPF, use the '--all-points' flag. The modified points will still have their \"known\" flag set to \"3\" (XYZ Control) in the output GPF.\n\nThe Ames Stereo Pipeline program pc_align must be available in the user's path or somewhere else where Python can find it. \nMore information about the Ames Stereo Pipeline is available on the project's Git repository: https://github.com/NeoGeographyToolkit/StereoPipeline\"\"\")\n parser.add_argument(\"socet_gpf\",\n help = \"The name of the Socet Ground Point File to transform.\")\n parser.add_argument(\"transform_matrix\",\n help = \"\"\"Name of a pc_align-compatible transformation matrix to apply to the input GPF.\"\"\")\n parser.add_argument(\"tfm_socet_gpf\",\n help = \"\"\"Name to use for the output (transformed) ground point file. Must include \".gpf\" extension.\"\"\")\n parser.add_argument(\"--gxp\",\n action='store_true',\n help = \"Flag to indicate input GPF is in Socet GXP format. Output GPF will be in legacy Socet Set format.\")\n parser.add_argument(\"--all-points\",\n action='store_true',\n help = \"This flag will force updating of all active (stat = 1) points in the input GPF, not just tie points.\")\n parser.add_argument(\"--s_srs\",\n help = \"\"\"PROJ string describing the projected spatial reference system of the input GPF. If omitted, script assumes a geographic SRS with shape defined by --datum or --radius.\"\"\",\n nargs='?',\n type=str)\n refshape = parser.add_mutually_exclusive_group(required=True)\n refshape.add_argument(\"--datum\",\n nargs=1,\n choices=['D_MARS', 'D_MOON', 'MOLA', 'NAD27', 'NAD83', 'WGS72', 'WGS_1984'],\n help = \"\"\"Use this datum for heights in the input GPF.\"\"\")\n refshape.add_argument(\"--radii\",\n nargs=2,\n metavar=('semi-major-axis','semi-minor-axis'),\n type=float,\n help=\"\"\"Semi-major and semi-minor axes, expressed in meters, that define the ellipsoid that heights in the input GPF are referenced to.\"\"\")\n args = parser.parse_args()\n return args\n\ndef main(user_args):\n\n socet_gpf = user_args.socet_gpf\n tfm_socet_gpf = user_args.tfm_socet_gpf\n all_points = user_args.all_points\n transform_matrix = user_args.transform_matrix\n datum = user_args.datum\n radii = user_args.radii\n s_srs = user_args.s_srs\n gxp = user_args.gxp\n\n \n if os.path.splitext(tfm_socet_gpf)[1] != \".gpf\":\n print(\"\"\"USER ERROR: Output file name must use \".gpf\" extension\"\"\")\n sys.exit(1)\n\n # Read in the Socet ground point file using plio's read_gpf()\n if gxp:\n gpf_df = read_gpf(socet_gpf, gxp=True)\n # Modify DataFrame to resemble legacy Socet Set format\n # Rename \"use\" and \"point_type\" to their Socet Set equivalents\n gpf_df.rename(columns={'use':'stat', 'point_type':'known'}, inplace=True)\n if not s_srs:\n gpf_df.lat_Y_North = np.radians(gpf_df['lat_Y_North'])\n gpf_df.long_X_East = np.radians(((gpf_df['long_X_East'] + 180) % 360) - 180)\n else:\n gpf_df = read_gpf(socet_gpf)\n\n # Set the index of the GPF dataframe to be the point_id column\n gpf_df.set_index('point_id', drop=False, inplace=True)\n\n # If user passed \"--all-points\" option, copy *all active* points to new data frame\n # Otherwise, copy active tie points (point_type == 0) only\n # Note that DataFrame is named \"tp_df\" regardless of whether it includes only tiepoints or not\n if all_points:\n tp_df = gpf_df[(gpf_df.stat == 1)].copy()\n else:\n tp_df = gpf_df[(gpf_df.known == 0) & (gpf_df.stat == 1)].copy()\n\n if not s_srs:\n tp_df.lat_Y_North = np.degrees(tp_df.lat_Y_North)\n tp_df.long_X_East = ((360 + np.degrees(tp_df.long_X_East)) % 360)\n\n gpf_align_prefix = os.path.splitext(tfm_socet_gpf)[0]\n\n # Write out CSV (compatible with pc_align) containing lat/long/height of points to be updated\n socet_gpf_csv = ((os.path.splitext(socet_gpf)[0]) + '.csv')\n tp_df.to_csv(path_or_buf=socet_gpf_csv,\n header=False,\n index=False,\n columns=['lat_Y_North','long_X_East','ht'])\n\n # Build arguments list and apply transformation to selected points from GPF using pc_align\n # Set num-iterations = 0 and turn off max-displacement (-1) because only going to apply existing transform\n apply_tfm_args = [\"--initial-transform\",transform_matrix,\n \"--num-iterations\",\"0\",\n \"--max-displacement\",\"-1\",\n \"--save-transformed-source-points\",\n \"-o\", gpf_align_prefix ]\n \n ## Extend the list of arguments for pc_align to include the datum or radii as necessary\n if datum is not None:\n apply_tfm_args.extend([\"--datum\", str(datum[0])])\n elif radii is not None:\n apply_tfm_args.extend([\"--semi-major-axis\", str(radii[0]), \"--semi-minor-axis\", str(radii[1])])\n\n if s_srs:\n apply_tfm_args.extend([\"--csv-proj4\", str(s_srs)])\n apply_tfm_args.extend([\"--csv-format\", str('''2:easting 1:northing 3:height_above_datum''')])\n\n # Extend the list to place point clouds at the end of the list of arguments for pc_align\n # Note that we're specifying the same file as the reference and source clouds because pc_align requires 2 files as input,\n # even if we're only applying a transform and not iterating\n apply_tfm_args.extend([socet_gpf_csv,socet_gpf_csv])\n\n # Apply transform from previous pc_align run to tie points CSV\n print(\"Calling pc_align with 0 iterations to apply transform from previous run to Tie Points from GPF\")\n try:\n run_align = run_pc_align(apply_tfm_args)\n except subprocess.CalledProcessError as e:\n print(e)\n sys.exit(1)\n\n # mergeTransformedGPFTies\n # Convert the transformed tie points from CSV to a pandas DataFrame\n t = np.genfromtxt((gpf_align_prefix + '-trans_source.csv'),delimiter=',',\n skip_header=3,dtype='unicode')\n id_list = tp_df['point_id'].tolist()\n tfm_index = pd.Index(id_list)\n tfm_tp_df = pd.DataFrame(t, index=tfm_index, columns=['lat_Y_North','long_X_East','ht'])\n tfm_tp_df = tfm_tp_df.apply(pd.to_numeric)\n\n # Update the original tiepoint DataFrame with the transformed lat/long/height values from pc_align\n print(\"Updating GPF coordinates\")\n tp_df.update(tfm_tp_df)\n\n # Convert long from 0-360 to +/-180 and convert lat/long to radians\n # Note: Even if gxp==True, plio only knows how to write legacy Socet Set-style GPFs,\n # so must convert to radians on output if not s_srs\n if not s_srs:\n tp_df.lat_Y_North = np.radians(tp_df['lat_Y_North'])\n tp_df.long_X_East = np.radians(((tp_df['long_X_East'] + 180) % 360) - 180)\n\n # Apply updates to the original GPF DataFrame, and save transformed GPF file\n update_gpf(gpf_df,tp_df,tfm_socet_gpf,all_pts=all_points)\n\n # Write list of pointIDs of the transformed tiepoints to a file\n # Included for legacy compatibility, not actually used for anything\n tp_df.to_csv(path_or_buf=((os.path.splitext(socet_gpf)[0]) + '.tiePointIds.txt'),\n sep=' ', header=False,\n index=False,\n columns=['point_id'])\n\n\nif __name__ == \"__main__\":\n sys.exit(main(parse_args()))\n","sub_path":"SurfaceFit/gpf_transform.py","file_name":"gpf_transform.py","file_ext":"py","file_size_in_byte":8619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"251592439","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\turl(\n\t\tregex=r\"^api/$\",\n\t\tview=views.ContactCreateReadView.as_view(),\n\t\tname=\"flavor_rest_api\"\n\t),\n\turl(\n\t\tregex=r\"^api/(?P[-\\w]+)/$\",\n\t\tview=views.ContactReadUpdateDeleteView.as_view(),\n\t\tname=\"flavor_rest_api\"\n\t)\n]","sub_path":"datapp/contacts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"344883094","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 8 20:57:43 2019\n\n@author: Roy\n\"\"\"\n\n## conslidate learning with own example. Simple currency converter\n\ncurrency_1 = float(input('Please input amount of to convert\\t >>>'))\nconversion_rate = float(input('Please input the conversion factor\\t >>>'))\ncurrency_2 = currency_1*conversion_rate\nprint(f'At conversion rate {conversion_rate} your {currency_1} will be worth {currency_2}')\n","sub_path":"Part4/VeryBasicCurrencyConverter.py","file_name":"VeryBasicCurrencyConverter.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"252134355","text":"#Quadrato con sopra un triangolo, una casetta\r\nimport turtle\r\nwn = turtle.Screen()\r\nporta = turtle.Turtle()\r\npen = turtle.Turtle()\r\nretro = turtle.Turtle()\r\npen.pensize(4)\r\nporta.pensize(4)\r\nretro.pensize(4)\r\npen.forward(80)\r\npen.left(90)\r\npen.forward(80)\r\npen.left(90)\r\npen.forward(80)\r\npen.left(90)\r\npen.forward(80)\r\npen.left(180)\r\npen.forward(80)\r\npen.right(30)\r\npen.forward(80)\r\npen.right(120)\r\npen.forward(80)\r\nporta.forward(30)\r\nporta.left(90)\r\nporta.forward(35)\r\nporta.right(90)\r\nporta.forward(20)\r\nporta.right(90)\r\nporta.forward(35)\r\nporta.left(90)\r\nporta.forward(30)\r\nretro.left(160)\r\nretro.forward(100)\r\nretro.right(70)\r\nretro.forward(80)\r\nretro.right(110)\r\nretro.forward(100)\r\nretro.right(180)\r\nretro.forward(100)\r\nretro.right(100)\r\nretro.forward(80)\r\nretro.right(80)\r\nretro.forward(100)\r\n\r\n\r\n","sub_path":"casetta.py","file_name":"casetta.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"43638867","text":"#! python3\n\n# cmp-rbvpid.py\n# Compare the comma delimited list of RBVPIDs provided to excel spreadsheet and column provided\n\n# Module initialization\nimport csv\nimport openpyxl\nimport os\nimport sys\n\n__author__ = 'rrehders'\n\n# create a function to only extract valid RBVPIDs\ndef is_rbvpid(var):\n try:\n tmp = int(var)\n return tmp\n except:\n return False\n\n\n# Validate the correct number of command line args\nif 2 < len(sys.argv) < 3:\n print('USAGE: cmp-rbvpid [file1.csv] [file2.xlsx] {col}')\n sys.exit()\n\n# Validate 1st command line arg is a file\nfname = sys.argv[1]\nif not os.path.isfile(fname):\n print('ERR: '+fname+' is not a file')\n sys.exit()\n\n# Validate 2nd command line arg is a file\nfname = sys.argv[2]\nif not os.path.isfile(fname):\n print('ERR: '+fname+' is not a file')\n sys.exit()\n\n# Set default column if not specified\nif len(sys.argv) <= 3:\n col = 1\nelse:\n col = int(sys.argv[3])\n\n# Read in the CSV and extract RBVPIDs\ninfile = open(sys.argv[1])\ninreader = csv.reader(infile)\nidlist = set([int(rbvpid[0]) for rbvpid in inreader if is_rbvpid(rbvpid[0])])\nprint(idlist)\n\n# load the target workbook\ntry:\n wb = openpyxl.load_workbook(sys.argv[2], data_only=True)\nexcept Exception as err:\n print('ERR: '+fname+' '+str(err))\ntabs = wb.get_sheet_names()\nws = wb.get_sheet_by_name(tabs[0])\n\n# Extract the Base RBVPIP list\ncolumn = ws.columns[col]\ninvlist = set([cell.value for cell in column if is_rbvpid(cell.value)])\n\n# Compare the lists\nexceptionlist = idlist - invlist\n\n# Explain the results\nprint('There are ', len(idlist), ' IDs in the test list')\nprint('Being compared to ', len(invlist), ' IDs in the origin list')\nif len(exceptionlist) == 0:\n print('All IDs are present')\nelse:\n print('The following ', len(exceptionlist), ' IDs were missed from the list')\n print(exceptionlist)\n","sub_path":"cmp-rbvpid.py","file_name":"cmp-rbvpid.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"470069702","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nfrom sklearn import mixture\nfrom scipy import linalg\nimport matplotlib as mpl\nfrom scipy import spatial\n\n#F = np.loadtxt(\"mapping\", unpack = True)\n#G = np.loadtxt(\"mapped\", unpack = True)\n\nX = np.loadtxt(\"samples2\", unpack=True)\n\ngmm = mixture.GMM(n_components=4, covariance_type='full')\ngmm.fit(X)\n\nX_test = np.linspace(-6, 4)\nY_test = 2*np.sin(X_test) -3\n#plt.scatter(X_test, Y_test)\n\nX_t = np.ndarray((50, 2))\n\n\n\nX_t[:, 0] = X_test\nX_t[:, 1] = Y_test\n\ncolor_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])\n\nfor i, (clf, title) in enumerate([(gmm, 'GMM')]):\n splot = plt.subplot(1, 1, i + 1)\n Y_ = clf.predict(X_t)\n for i, (mean, covar, color) in enumerate(zip(\n clf.means_, clf._get_covars(), color_iter)):\n v, w = linalg.eigh(covar)\n u = w[0] / linalg.norm(w[0])\n\n if not np.any(Y_ == i):\n continue\n plt.scatter(X_t[Y_ == i, 0], X_t[Y_ == i, 1], 2, color=color)\n\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan(u[1] / u[0])\n angle = 180 * angle / np.pi\n ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)\n ell.set_clip_box(splot.bbox)\n ell.set_alpha(0.5)\n splot.add_artist(ell)\n\n\n\n\n\n\n'''\n#F_tree = spatial.KDTree(F)\n#G_tree = spatial.KDTree(G)\n\n\nZ = []\n\nfor i in range(len(X_test)):\n point = np.array([X_test[i], Y_test[i]])\n #ind = find_nearest(F, value)\n distance, index = F_tree.query(point)\n Z.append(G[index])\n\n\n\nZ = np.array(Z)\nplt.scatter(Z[:, 0], Z[:, 1], color= 'k')\n\n\n'''\n\nplt.show()\n","sub_path":"LearnedMap.py","file_name":"LearnedMap.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"546122665","text":"try:\n import os\n import subprocess\n import shutil\n import stat\n import json\n import sys\n import platform\n import tarfile\n import hashlib\n\n PYTHON2 = 2\n PYTHON3 = 3\n\n pyversion = None\n installpath = \"/usr/bin\"\n\n # Detect OS and architecture\n if sys.version_info[0] < 3:\n pyversion = PYTHON2\n import urllib2 as urllib\n else:\n pyversion = PYTHON3\n import urllib.request as urllib\n\n if platform.machine() != \"x86_64\":\n if platform.machine() != \"\":\n print(\"System architecture unsupported, build from source instead\")\n exit()\n elif pyversion == PYTHON3:\n q = input(\n \"Unable to detect system architecture, are you using x86_64 (AMD64) [y/N]: \")\n if q.lower() != \"y\":\n print(\"System architecture unsupported, build from source instead\")\n exit(1)\n else:\n q = raw_input(\n \"Unable to detect system architecture, are you using x86_64 (AMD64) [y/N]: \")\n if q.lower() != \"y\":\n print(\"System architecture unsupported, build from source instead\")\n exit(1)\n\n if platform.system() != \"Linux\" and platform.system() != \"Darwin\":\n if platform.system() == \"Windows\":\n print(\n \"Windows is not supported by the Python installer, use pre-compiled binaries instead\")\n else:\n print(\"OS unsupported, build from source instead\")\n exit(1)\n\n if platform.system() == \"Darwin\":\n installpath = \"/usr/local/bin\"\n\n # Check for existance and write permissions\n if not os.path.isdir(installpath):\n print(\"Unable to locate \" + installpath)\n exit(1)\n if not os.path.isdir(\"/tmp\"):\n print(\"Unable to locate /tmp\")\n exit(1)\n\n if not os.access(installpath, os.W_OK):\n print(\"Insufficient permissions, please elevate\")\n exit(1)\n if not os.access(\"/tmp\", os.W_OK):\n print(\"Insufficient permissions, please elevate\")\n exit(1)\n\n # Get latest release\n print(\"Gathering system info ...\")\n if os.path.isfile(installpath + \"/kryer\"):\n if pyversion == PYTHON2:\n q = raw_input(\n \"Kryer is already installed, reinstall/update [Y/n]: \")\n else:\n q = str(\n input(\"Kryer is already installed, reinstall/update [Y/n]: \"))\n\n if q.lower() == \"n\":\n exit(0)\n\n print(\"Removing \" + installpath + \"/kryer...\")\n\n try:\n os.remove(installpath + \"/kryer\")\n except OSError:\n print(\"Insufficient permissions, please elevate\")\n exit(1)\n\n print(\"Removed \" + installpath + \"/kryer...\")\n\n print(\"Getting release information from https://api.github.com/repos/cfschilham/kryer/releases...\")\n response = urllib.urlopen(\n \"https://api.github.com/repos/cfschilham/kryer/releases\")\n releases = json.loads(response.read())\n\n for release in releases:\n print(\"Getting asset list from \" + release[\"assets_url\"] + \"...\")\n response = urllib.urlopen(release[\"assets_url\"])\n assets = json.loads(response.read())\n\n for asset in assets:\n if platform.system().lower() in asset[\"name\"] and \"tar.gz\" in asset[\"name\"]:\n print(\"Downloading \" + asset[\"browser_download_url\"] + \"...\")\n if(pyversion == PYTHON2):\n response = urllib.urlopen(asset[\"browser_download_url\"])\n rawfile = response.read()\n\n with open(\"/tmp/kryer.tar.gz\", \"wb\") as FILE:\n FILE.write(rawfile)\n\n else:\n urllib.urlretrieve(\n asset[\"browser_download_url\"], \"/tmp/kryer.tar.gz\")\n break\n\n if os.path.isfile(\"/tmp/kryer.tar.gz\"):\n break\n\n if not os.path.isfile(\"/tmp/kryer.tar.gz\"):\n print(\"There aren't any compatible pre-compiled binaries available for your system, build from source instead\")\n exit(1)\n\n print(\"Extracting \" + asset[\"name\"] + \"...\")\n filename = asset[\"name\"].replace(\".tar.gz\", \"\")\n for asset in assets:\n if asset[\"name\"] == filename + \".sha256\":\n print(\"Getting SHA256 checksum from \" +\n asset[\"browser_download_url\"] + \"...\")\n if pyversion == PYTHON2:\n response = urllib.urlopen(asset[\"browser_download_url\"])\n rawfile = response.read()\n\n with open(\"/tmp/kryer.sha256\", \"wb\") as FILE:\n FILE.write(rawfile)\n\n else:\n urllib.urlretrieve(\n asset[\"browser_download_url\"], \"/tmp/kryer.sha256\")\n break\n\n if os.path.isfile(\"/tmp/kryer.sha256\"):\n with open(\"/tmp/kryer.sha256\", \"r\") as checksum:\n checksum = checksum.read().split(\" \")[0]\n print(\"Verifying checksum \" + checksum + \"...\")\n\n checksumFILE = hashlib.sha256()\n with open(\"/tmp/kryer.tar.gz\", \"rb\") as FILE:\n for chunk in iter(lambda: FILE.read(4096), b\"\"):\n checksumFILE.update(chunk)\n\n if checksum.strip() != checksumFILE.hexdigest().strip():\n print(\"Integrity check failed, invalid checksum\")\n print(\"Calculated checksum: \" + checksumFILE.hexdigest().strip())\n print(\"Provided checksum: \" + checksum.strip())\n\n if os.path.isfile(\"/tmp/kryer\"):\n os.remove(\"/tmp/kryer\")\n if os.path.isdir(\"/tmp/kryer.sha256\"):\n shutil.rmtree(\"/tmp/kryer.sha256\")\n if os.path.isfile(\"/tmp/kryer.tar.gz\"):\n os.remove(\"/tmp/kryer.tar.gz\")\n exit(1)\n\n print(\"Integrity check successful\")\n else:\n print(\"Can't find checksum for file, skipping integrity check...\")\n\n print(\"Extracting \" + asset[\"name\"] + \"...\")\n os.mkdir(\"/tmp/kryer\")\n tar = tarfile.TarFile.open(\"/tmp/kryer.tar.gz\", \"r:gz\")\n tar.extractall(\"/tmp/kryer\")\n tar.close()\n\n print(\"Creating files in \" + installpath + \"...\")\n shutil.move(\"/tmp/kryer/\" + filename + \"/kryer\", installpath + \"/kryer\")\n\n shutil.rmtree(\"/tmp/kryer\")\n os.remove(\"/tmp/kryer.tar.gz\")\n os.remove(\"/tmp/kryer.sha256\")\n print(\"Installation successful, use kryer command to start\")\n\nexcept KeyboardInterrupt:\n print(\"\\nKeyboard interrupt detected\")\n print(\"Cleaning up...\")\n\n if os.path.isdir(\"/tmp/kryer\"):\n shutil.rmtree(\"/tmp/kryer\")\n if os.path.isfile(\"/tmp/kryer.tar.gz\"):\n os.remove(\"/tmp/kryer.tar.gz\")\n if os.path.isfile(\"/tmp/kryer.sha256\"):\n os.remove(\"/tmp/kryer.sha256\")\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":6724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"517850770","text":"import re\nfrom scrapy import log, FormRequest, Spider, Request\nfrom Presidency.items import Address\nfrom Presidency.pipelines import CSVHandler\nfrom scrapy import signals\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom string import punctuation\nimport time, datetime\nimport Presidency.settings\n\n__author__ = 'Shay Goldman, shay@goldman.io'\n\nclass Spidresident(Spider):\n name = 'Spidresident'\n log_name = name.upper()\n allowed_domains = Presidency.settings.DOMAINS\n csv_handler = CSVHandler()\n\n def __init__(self, **kwargs):\n super(Spidresident, self).__init__(**kwargs)\n dispatcher.connect(self.spider_quit, signals.spider_closed)\n\n def spider_quit(self):\n self.csv_handler.close_file_handle()\n self.csv_handler.set_running(False)\n\n def get_csv_handler(self):\n return self.csv_handler\n\n def start_requests(self):\n log.msg(self.log_name + \": Building initial requests\", level=log.DEBUG)\n for category_id, category_name in Presidency.settings.CATEGORIES.iteritems():\n for year in range(Presidency.settings.MIN_YEAR, Presidency.settings.MAX_YEAR + 1):\n altered_formdata = Presidency.settings.FORM_BASE_DATA\n altered_formdata[Presidency.settings.FORM_FIELD_YEAR_START] = str(year)\n altered_formdata[Presidency.settings.FORM_FIELD_YEAR_END] = str(year)\n altered_formdata[Presidency.settings.FORM_FIELD_CATEGORY] = str(category_id)\n\n log.msg(self.log_name + \": Yielding category \" + str(category_id) + \" for \" + str(year), level=log.DEBUG)\n yield FormRequest(\n Presidency.settings.SITE_URL,\n formdata=altered_formdata,\n callback=lambda g, category_id=category_id:\n self.request_addresses(g, category_id))\n\n log.msg(self.log_name + \": Initial requests built\", level=log.INFO)\n\n def extract_pid(self, link):\n pid_pattern = re.compile('=\\d+&')\n pid_match = pid_pattern.search(link)\n pid = link[pid_match.start() + 1:pid_match.end() - 1]\n return pid\n\n def extract_day(self, date_pre):\n day_pattern = re.compile('\\s\\d*,')\n day_match = day_pattern.search(date_pre)\n day = date_pre[day_match.start() + 1:day_match.end() - 1]\n if (int(day) < 10):\n day = '0' + (str(day))\n return day\n\n def extract_month(self, date_pre):\n month_pattern = re.compile('^[a-zA-z]*\\s')\n month_match = month_pattern.search(date_pre)\n month = date_pre[month_match.start():month_match.end() - 1]\n if (month in Presidency.settings.MONTH_MAP.keys()):\n month = Presidency.settings.MONTH_MAP[month]\n return month\n\n def extract_year(self, date_pre):\n year_pattern = re.compile('\\d+$')\n year_match = year_pattern.search(date_pre)\n year = date_pre[year_match.start():year_match.end()]\n return year\n\n def request_addresses(self, response, category_id):\n results_table_xpath = '//html/body/table/tr[2]/td/table/tr/td[2]/table/tr/table/tr'\n results = response.selector.xpath(results_table_xpath)\n for result_num in range(1, results.__len__() - 2): # Skip header and footer of results\n link_element = results[result_num].xpath('./td[4]//a')\n\n link = unicode(''.join(link_element.xpath('./@href').extract()).strip())\n title = unicode(''.join(link_element.xpath('./text()').extract()).strip())\n pid = self.extract_pid(link)\n\n date_pre = unicode(''.join(results[result_num].xpath('./td[1]/text()').extract()).strip())\n day = self.extract_day(date_pre)\n month = self.extract_month(date_pre)\n year = self.extract_year(date_pre)\n date = year + '-' + month + '-' + day\n\n president = unicode(''.join(results[result_num].xpath('./td[2]/text()').extract()).strip())\n\n yield Request(\n url=Presidency.settings.SITE_URL_FOLDER + link,\n callback=lambda g, title=title, pid=pid, date=date, president=president:\n self.parse_address(g, title, pid, date, president, category_id))\n log.msg(self.log_name + \": Yielded address \" + title, level=log.DEBUG)\n\n def parse_address(self, response, title, pid, date, president, category_id):\n address = Address()\n url = response.url.lower()\n timestamp = time.time()\n address[Presidency.settings.FIELD_COLLECTED_AT] = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')\n address[Presidency.settings.FIELD_PRESIDENT] = president\n address[Presidency.settings.FIELD_DATE] = date\n address[Presidency.settings.FIELD_TITLE] = title\n address[Presidency.settings.FIELD_PID] = str(pid)\n address[Presidency.settings.FIELD_CATEGORY] = Presidency.settings.CATEGORIES[category_id]\n address[Presidency.settings.FIELD_CATEGORY_ID] = str(category_id)\n\n toolbox_ver10_list = response.selector.css('.toolbox').css('.ver10::text').extract()\n\n for i in range(0, toolbox_ver10_list.__len__() - 1):\n if (toolbox_ver10_list[i] == u'Location:'):\n address[Presidency.settings.FIELD_LOCATION1] = toolbox_ver10_list[i + 1]\n address[Presidency.settings.FIELD_LOCATION2] = toolbox_ver10_list[i + 2]\n break\n\n address[Presidency.settings.FIELD_URL] = url\n\n raw_content = response.selector.css('.displaytext').xpath('.//text()').extract()\n str_raw_content = ''.join(raw_content)\n address[Presidency.settings.FIELD_CONTENT] = str_raw_content\n r = re.compile(r'[{}]'.format(punctuation))\n fixed_str_raw_content = r.sub(' ',str_raw_content)\n word_list = fixed_str_raw_content.split()\n\n address[Presidency.settings.FIELD_WORDS] = len(word_list)\n\n log.msg(self.log_name + \": Parsed address \" + title, level=log.DEBUG)\n yield address\n","sub_path":"Presidency/spiders/Spidresident.py","file_name":"Spidresident.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"100694324","text":"import os\nimport logging\nimport re\n\nimport numpy as np\nfrom typing import *\n\nfrom Bio.Blast import NCBIXML\nfrom Bio.Blast.Applications import NcbiblastpCommandline\nfrom Bio.Blast.Record import Alignment, HSP\nfrom Bio.Seq import Seq\nfrom Bio.SubsMat import MatrixInfo as matlist\nfrom tqdm import tqdm\n\nfrom sbsp_alg.feature_computation import add_gaps_to_nt_based_on_aa\nfrom sbsp_alg.phylogeny import k2p_distance, global_alignment_aa_with_gap\nfrom sbsp_container.genome_list import GenomeInfoList, GenomeInfo\nfrom sbsp_general import Environment\nfrom sbsp_general.blast import run_blast, convert_blast_output_to_csv, create_blast_database, run_blast_alignment\nfrom sbsp_general.general import get_value\nfrom sbsp_general.labels import Labels, Label, create_gene_key_from_label\nfrom sbsp_io.general import mkdir_p\nfrom sbsp_io.labels import read_labels_from_file\nfrom sbsp_io.sequences import read_fasta_into_hash\nfrom sbsp_options.sbsp import SBSPOptions\n\nlog = logging.getLogger(__name__)\n\nvalid_starts_pos = [\"ATG\", \"GTG\", \"TTG\"]\nvalid_starts_neg = [\"CAT\", \"CAC\", \"CAA\"]\n\nvalid_stops_pos = [\"TAA\", \"TGA\", \"TAG\"]\nvalid_stops_neg = [\"TTA\", \"TCA\", \"CTA\"]\n\n\ndef is_valid_start(codon, strand):\n if strand == \"+\":\n return codon in valid_starts_pos\n else:\n return codon in valid_starts_neg\n\n\ndef is_valid_stop(codon, strand):\n if strand == \"+\":\n return codon in valid_stops_pos\n else:\n return codon in valid_stops_neg\n\n\ndef get_orthologs_from_files_deprecated(env, pf_q_list, pf_t_list, pf_output, **kwargs):\n # type: (Environment, str, str, str, Dict[str, Any]) -> str\n\n clean = get_value(kwargs, \"clean\", False)\n\n # pf_q_list = data[\"pf-q-list\"]\n # pf_t_list = data[\"pf-t-list\"]\n\n pd_work = env[\"pd-work\"]\n\n mkdir_p(pd_work)\n\n # run blast\n fn_blast_out = \"blast.xml\"\n pf_blast_out = os.path.join(pd_work, fn_blast_out)\n\n run_blast(env, pf_q_list, pf_t_list, pf_blast_out, **kwargs)\n\n # convert blast output to csv\n convert_blast_output_to_csv(pf_blast_out, pf_output, select_best_alignment_per_qt_pair=True)\n\n if clean:\n try:\n os.remove(pf_blast_out)\n except OSError:\n pass\n\n return pf_output\n\n\ndef pack_fasta_header(label, gi, **kwargs):\n # type: (Label, GenomeInfo, Dict[str, Any]) -> str\n\n def create_tags_from_dict(**local_kwargs):\n # type: (Dict[str, Any]) -> str\n out = \"\"\n for k, v in local_kwargs.items():\n out += \";{}={}\".format(k, v)\n return out\n\n return \"{} accession={};genome={};left={};right={};strand={}\".format(\n label.seqname(),\n label.seqname(),\n gi.name,\n label.left() + 1,\n label.right() + 1,\n label.strand(),\n ) + create_tags_from_dict(**kwargs)\n\n\ndef unpack_fasta_header(header):\n # type: (str) -> Dict[str, Any]\n\n fields = {}\n\n def get_key_value_from_definition_line(key, l_defline):\n m = re.match(\".*(?:^|;)\" + str(key) + \"=([^;]*)\", l_defline)\n\n if m:\n return m.group(1)\n\n raise ValueError(\"Key \" + str(key) + \" not in definition line\")\n\n # remove first accession\n header = header.strip().split(maxsplit=1)[1]\n\n # get all keys in string\n keys = re.findall(r\"([^;=]+)=\", header)\n\n type_mapper = {\n \"left\": int,\n \"right\": int,\n \"gc\": float,\n \"offset\": int,\n \"upstream_left\": int,\n \"upstream_right\": int\n }\n\n # for each key, get value\n for k in keys:\n fields[k] = get_key_value_from_definition_line(k, header)\n\n if k in type_mapper.keys():\n fields[k] = type_mapper[k](fields[k])\n\n return fields\n\n\ndef select_representative_hsp(alignment, hsp_criteria):\n # type: (Alignment, str) -> Union[HSP, None]\n\n selected_hsp = None\n max_length = None\n for hsp in alignment.hsps:\n if max_length is None:\n selected_hsp = hsp\n max_length = hsp.align_length\n else:\n if hsp.align_length > max_length:\n selected_hsp = hsp\n max_length = hsp.align_length\n\n return selected_hsp\n\n\ndef map_aligned_aa_to_aligned_nt(q_aligned_seq_aa, original_q_nt, q_start_aa, q_end_aa, offset_nt=0):\n # type: (Seq, Seq, int, int) -> Seq\n if len(original_q_nt) < (q_end_aa - q_start_aa + 1) * 3:\n raise ValueError(\"Nucleotide sequence length less than aligned amino acid fragment: {} < {}\".format(\n len(original_q_nt), (q_end_aa - q_start_aa + 1) * 3\n ))\n\n output = Seq(\"\")\n pos_nt_no_gaps = offset_nt + q_start_aa * 3\n max_pos_nt_no_gaps = offset_nt + (q_end_aa + 1) * 3\n\n for pos_aa in range(min(200, len(q_aligned_seq_aa))):\n curr_aa = q_aligned_seq_aa[pos_aa]\n\n if curr_aa == \"-\":\n output += \"---\"\n else:\n output += original_q_nt[pos_nt_no_gaps:pos_nt_no_gaps + 3]\n pos_nt_no_gaps += 3\n\n if pos_nt_no_gaps >= max_pos_nt_no_gaps:\n break\n\n return output\n\n\ndef compute_distance_based_on_local_alignment(query_info, target_info, hsp, **kwargs):\n # type: (Dict[str, Any], Dict[str, Any], HSP, Dict[str, Any]) -> float\n\n original_q_nt = get_value(kwargs, \"original_q_nt\", required=True)\n original_t_nt = get_value(kwargs, \"original_t_nt\", required=True)\n original_q_nt_offset = get_value(kwargs, \"original_q_nt_offset\", default=0)\n original_t_nt_offset = get_value(kwargs, \"original_t_nt_offset\", default=0)\n\n # aligned fragments (aa)\n q_aligned_seq_aa = hsp.query\n t_aligned_seq_aa = hsp.sbjct\n\n # indices of where alignment starts in original sequences\n q_start, q_end = hsp.query_start - 1, hsp.query_end - 2 # -2 to make inclusive\n t_start, t_end = hsp.sbjct_start - 1, hsp.sbjct_end - 1 # -2 to make inclusive\n\n # aligned fragments (nt)\n try:\n q_aligned_seq_nt = map_aligned_aa_to_aligned_nt(q_aligned_seq_aa, original_q_nt, q_start, q_end, offset_nt=original_q_nt_offset)\n t_aligned_seq_nt = map_aligned_aa_to_aligned_nt(t_aligned_seq_aa, original_t_nt, t_start, t_end, offset_nt=original_t_nt_offset)\n except ValueError:\n return 100 # FIXME: the hell is going on\n\n # compute distance metric\n try:\n distance = k2p_distance(q_aligned_seq_nt, t_aligned_seq_nt)\n except ValueError:\n distance = 100\n\n return distance\n\n\ndef create_info_for_query_target_pair(query_info, target_info, hsp, **kwargs):\n # type: (Dict[str, Any], Dict[str, Any], HSP, Dict[str, Any]) -> Dict[str, Any]\n output = {\n \"evalue\": hsp.expect,\n }\n\n for k, v in kwargs.items():\n output[k] = v\n\n source_to_info = {\"q\": query_info, \"t\": target_info}\n for source in [\"q\", \"t\"]:\n\n for key in [\"genome\", \"accession\", \"left\", \"right\", \"strand\", \"upstream_left\", \"upstream_right\", \"upstream_strand\", \"offset\"]:\n output[\"{}-{}\".format(source, key)] = source_to_info[source][key]\n\n output[\"q-prot-pos-5prime-in-frag-msa\"] = query_info[\"offset\"] / 3\n output[\"q-nucl-pos-5prime-in-frag-msa\"] = query_info[\"offset\"]\n output[\"q-prot-position-of-5prime-in-msa-fragment-no-gaps\"] = query_info[\"offset\"] / 3\n output[\"q-nucl-position-of-5prime-in-msa-fragment-no-gaps\"] = query_info[\"offset\"]\n output[\"t-prot-pos-5prime-in-frag-msa\"] = target_info[\"offset\"] / 3\n output[\"t-nucl-pos-5prime-in-frag-msa\"] = target_info[\"offset\"]\n output[\"t-prot-position-of-5prime-in-msa-fragment-no-gaps\"] = target_info[\"offset\"] / 3\n output[\"t-nucl-position-of-5prime-in-msa-fragment-no-gaps\"] = target_info[\"offset\"]\n\n output[\"q-key\"] = \"{};{};{};{}\".format(\n query_info[\"accession\"], query_info[\"left\"], query_info[\"right\"], query_info[\"strand\"]\n )\n\n #output[\"q-prot-msa\"] = Seq(query_info[\"lorf_nt\"]).translate()._data\n #output[\"t-prot-msa\"] = Seq(target_info[\"lorf_nt\"]).translate()._data\n\n output[\"q-lorf_nt\"] = Seq(query_info[\"lorf_nt\"])._data\n output[\"t-lorf_nt\"] = Seq(target_info[\"lorf_nt\"])._data\n\n return output\n\n\ndef compute_distance_based_on_global_alignment_from_sequences(q_sequence, t_sequence, q_sequence_nt, t_sequence_nt,\n matrix):\n [q_align, t_align, _, _, _] = \\\n global_alignment_aa_with_gap(q_sequence, t_sequence, matrix)\n\n q_align_nt = add_gaps_to_nt_based_on_aa(q_sequence_nt, q_align)\n t_align_nt = add_gaps_to_nt_based_on_aa(t_sequence_nt, t_align)\n\n # count number of positions without gaps\n len_without_gaps = sum([1 for i in range(len(q_align)) if q_align[i] != \"-\" and t_align[i] != \"-\"])\n\n try:\n distance = k2p_distance(q_align_nt, t_align_nt)\n except ValueError:\n distance = 100\n\n return (distance, len(q_align), len_without_gaps)\n\n\ndef parse_filter_and_convert_to_csv(pf_blast_results, pf_output, **kwargs):\n # type: (str, str, Dict[str, Any]) -> None\n\n hsp_criteria = get_value(kwargs, \"hsp_criteria\", None)\n pf_q_original_nt = get_value(kwargs, \"pf_q_original_nt\", required=True)\n pf_t_original_nt = get_value(kwargs, \"pf_t_original_nt\", required=True)\n pf_q_original_aa = get_value(kwargs, \"pf_q_original_aa\", required=True)\n pf_t_original_aa = get_value(kwargs, \"pf_t_original_aa\", required=True)\n distance_min = get_value(kwargs, \"distance_min\", 0.001, default_if_none=True)\n distance_max = get_value(kwargs, \"distance_max\", 0.4, default_if_none=True)\n\n # open csv file for writing\n try:\n f_output = open(pf_output, \"w\")\n except OSError as e:\n log.warning(\"Could not open csv file for writing converted blast output: {}\".format(pf_output))\n raise e\n\n try:\n f_blast_results = open(pf_blast_results, \"r\")\n except OSError as e:\n log.warning(\"Could not open blast results file: {}\".format(pf_blast_results))\n raise e\n\n # read original nucleotide sequences (for computing distances)\n q_original_sequences_nt = read_fasta_into_hash(pf_q_original_nt)\n t_original_sequences_nt = read_fasta_into_hash(pf_t_original_nt)\n\n # read original sequences for trying out pairwise alignment ;)\n q_original_sequences_aa = read_fasta_into_hash(pf_q_original_aa)\n t_original_sequences_aa = read_fasta_into_hash(pf_t_original_aa)\n\n matrix = matlist.blosum62\n import sbsp_alg.phylogeny\n sbsp_alg.phylogeny.add_stop_codon_to_blosum(matrix)\n\n # open blast stream\n records = NCBIXML.parse(f_blast_results)\n header_written = False\n\n # for each blast query\n for r in records:\n\n query_info = unpack_fasta_header(r.query)\n num_selected_targets_for_query = 0\n\n # for each alignment to a target protein for the current query\n for alignment in r.alignments:\n\n if num_selected_targets_for_query >= 100:\n logger.debug(\"Stopping at 100 targets (from {}) for query\".format(len(r.alignments)))\n break\n\n hsp = select_representative_hsp(alignment, hsp_criteria)\n\n target_info = unpack_fasta_header(alignment.hit_id)\n\n original_q_nt = q_original_sequences_nt[r.query]\n original_t_nt = t_original_sequences_nt[alignment.hit_id]\n\n distance = compute_distance_based_on_local_alignment(query_info, target_info, hsp,\n original_q_nt=original_q_nt,\n original_t_nt=original_t_nt,\n **kwargs)\n\n original_q_aa = q_original_sequences_aa[r.query]\n original_t_aa = t_original_sequences_aa[alignment.hit_id]\n\n #global_distance, global_length, global_length_without_gaps = compute_distance_based_on_global_alignment_from_sequences(\n # original_q_aa, original_t_aa, original_q_nt, original_t_nt, matrix\n #)\n global_distance = global_length = global_length_without_gaps = 0\n\n # FIXME: thresholds should be from input configuration files\n if distance > distance_min and distance < distance_max:\n #if True:\n num_selected_targets_for_query += 1\n\n output_info = create_info_for_query_target_pair(\n query_info, target_info, hsp,\n distance_blast=distance,\n distance=distance,\n global_distance=global_distance,\n global_length=global_length,\n global_length_without_gaps=global_length_without_gaps,\n local_distance=distance,\n local_length=hsp.align_length,\n local_length_without_gaps=sum([\n 1 for i in range(len(hsp.query)) if hsp.query[i] != \"-\" and hsp.sbjct[i] != \"-\"\n ])\n )\n\n sorted_header = sorted(output_info.keys())\n\n # if header not yet written, write it\n if not header_written:\n f_output.write(\"{}\\n\".format(\",\".join(sorted_header)))\n header_written = True\n\n # write values in sorted order\n f_output.write(\"{}\\n\".format(\n \",\".join([str(output_info[x]) for x in sorted_header])\n ))\n\n f_output.close()\n\n\n# def convert_blast_output_to_csv(pf_blast_output, pf_csv, select_best_alignment_per_qt_pair=True, delimiter=\",\", **kwargs):\n# # type: (str, str, bool) -> None\n#\n# def get_lowest_evalue_of_hsps(alignment):\n# return np.min([hsp.expect for hsp in alignment.hsps])\n#\n# def set_alignment_if_lowest_evalue(data, genome_name, alignment):\n# # type: (dict, str, dict) -> None\n#\n# if genome_name not in data:\n# data[genome_name] = alignment\n# # otherwise, check if it has a lower E-value than what's already there\n# else:\n# evalue_existing = get_lowest_evalue_of_hsps(data[genome_name])\n# evalue_new = get_lowest_evalue_of_hsps(alignment)\n#\n# if evalue_new < evalue_existing:\n# data[genome_name] = alignment\n#\n# # open output file for writing\n# with open(pf_csv, \"w\") as f_csv:\n#\n# header = None\n#\n# import sbsp_io.blast\n# records = sbsp_io.blast.read_hits(pf_blast_output)\n#\n# for r in records:\n#\n# q_def = r.query # query definition line\n# q_genome = sbsp_general.general.get_genome_name_from_defition_line(q_def)\n# if select_best_alignment_per_qt_pair:\n# # Structure:\n# # Key : target genome name\n# # Value : alignment with the lowest evalue for that target\n# best_alignment_per_genome = {}\n#\n# # for each target genome, only select the best (lowest e-value) alignment\n# for alignment in r.alignments:\n# t_def = alignment.hit_id\n# t_genome = sbsp_general.general.get_genome_name_from_defition_line(t_def)\n#\n# # keep track of the best alignment for this genome\n# set_alignment_if_lowest_evalue(best_alignment_per_genome, t_genome, alignment)\n#\n# for t_genome in best_alignment_per_genome.keys():\n# t_def = best_alignment_per_genome[t_genome].hit_id\n# curr_alignment = best_alignment_per_genome[t_genome]\n# evalue = get_lowest_evalue_of_hsps(curr_alignment)\n#\n#\n# if not blast_on_query_database:\n# # now print\n# info = {\n# \"q-def\": q_def, \"t-def\" : t_def\n# }\n#\n# q_info = sbsp_general.general.expand_definition_line(q_def, key_prefix=\"q-\")\n# t_info = sbsp_general.general.expand_definition_line(t_def, key_prefix=\"t-\")\n# else:\n# info = {\n# \"q-def\": t_def, \"t-def\": q_def\n# }\n#\n# q_info = sbsp_general.general.expand_definition_line(q_def, key_prefix=\"t-\")\n# t_info = sbsp_general.general.expand_definition_line(t_def, key_prefix=\"q-\")\n#\n# def is_frame_shifted(tmp_info, key_prefix):\n# # type: (Dict[str, Any], str) -> bool\n# return (int(tmp_info[key_prefix+\"right\"]) - int(tmp_info[key_prefix+\"left\"]) + 1) % 3 != 0\n#\n# if is_frame_shifted(q_info, \"q-\") or is_frame_shifted(t_info, \"t-\"):\n# continue\n#\n#\n# info.update(q_info)\n# info.update(t_info)\n#\n# info[\"evalue\"] = evalue\n#\n# # if first line, write header\n# if header is None:\n# header = sorted(info.keys())\n# line = \"{}\".format(delimiter).join(header) + \"\\n\"\n# f_csv.write(line)\n# # if not first line, make sure header is consistent\n# else:\n# curr_header = sorted(info.keys())\n# if curr_header != header:\n# raise ValueError(\"CSV rows don't have the same columns\")\n#\n# # write data\n# line = \"{}\".format(delimiter).join([str(info[h]) for h in header]) + \"\\n\"\n# f_csv.write(line)\n\n\ndef append_sequences_to_file(sequences, f):\n # type: (Dict[str, Seq], IO[AnyStr]) -> None\n for header, sequence in sequences.items():\n f.write(\">{}\\n{}\\n\".format(\n header, sequence\n ))\n\n\ndef get_pf_sequences_for_genome(env, gi, **kwargs):\n # type: (Environment, GenomeInfo, Dict[str, Any]) -> str\n fn_sequences = get_value(kwargs, \"fn_sequences\", \"sequence.fasta\")\n return os.path.join(env['pd-data'], gi.name, fn_sequences)\n\n\ndef get_pf_labels_for_genome(env, gi, **kwargs):\n # type: (Environment, GenomeInfo, Dict[str, Any]) -> str\n fn_labels = get_value(kwargs, \"fn_labels\", \"ncbi.gff\")\n return os.path.join(env['pd-data'], gi.name, fn_labels)\n\n\ndef get_lorf(label, sequences):\n # type: (Label, Dict[str, Seq]) -> Seq\n\n if label.strand() == \"+\":\n\n curr_pos = label.left()\n pos_lorf = curr_pos\n\n while curr_pos >= 0:\n codon = sequences[label.seqname()][curr_pos:curr_pos + 3]._data\n if is_valid_start(codon, label.strand()):\n pos_lorf = curr_pos\n if is_valid_stop(codon, label.strand()):\n break\n\n curr_pos -= 3\n\n lorf_seq = sequences[label.seqname()][pos_lorf:label.right() + 1]\n\n else:\n\n curr_pos = label.right()\n pos_lorf = curr_pos\n seq_len = len(sequences[label.seqname()])\n\n while curr_pos < seq_len:\n codon = sequences[label.seqname()][curr_pos - 2:curr_pos + 1]._data\n if is_valid_start(codon, label.strand()):\n pos_lorf = curr_pos\n if is_valid_stop(codon, label.strand()):\n break\n\n curr_pos += 3\n\n lorf_seq = sequences[label.seqname()][label.left():pos_lorf + 1]\n\n return lorf_seq\n\n\ndef extract_labeled_sequence(label, sequences, **kwargs):\n # type: (Label, Dict[str, Seq], Dict[str, Any]) -> Seq\n reverse_complement = get_value(kwargs, \"reverse_complement\", False)\n lorf = get_value(kwargs, \"lorf\", False)\n\n if lorf:\n frag = get_lorf(label, sequences)\n else:\n frag = sequences[label.seqname()][label.left():label.right() + 1]\n\n if label.strand() == \"-\" and reverse_complement:\n frag = frag.reverse_complement()\n\n return frag\n\n # return \"{}:tag={};11;:gc={}:pos={};{};{}:cds={};{};{}:type={}:key={};{};{}\".format(\n # label.seqname(),\n # gi.name,\n # gc,\n # label.left() + 1,\n # label.right() + 1,\n # label.strand(),\n # label.left() + 1,\n # label.right() + 1,\n # label.strand(),\n # seq_type,\n # label.seqname(),\n # label.right() if label.strand() == \"+\" else label.left(),\n # label.strand()\n # )\n\n\ndef get_upstream_label_per_label(labels):\n # type: (Labels) -> Dict[str, Label]\n\n sorted_labels = [l for l in labels.sort_by(\"left\")]\n num_labels = len(sorted_labels)\n\n gene_key_to_upstream_label = dict() # type: Dict[str, Label]\n\n for i, label in enumerate(sorted_labels):\n\n prev_label = None\n\n if label.strand() == \"+\":\n if i > 0:\n prev_i = i - 1\n prev_label = sorted_labels[prev_i]\n else:\n if i < num_labels - 1:\n prev_i = i + 1\n prev_label = sorted_labels[prev_i]\n\n gene_key_to_upstream_label[create_gene_key_from_label(label)] = prev_label\n\n return gene_key_to_upstream_label\n\n\ndef extract_labeled_sequences(sequences, labels, **kwargs):\n # type: (Dict[str, Seq], Labels, Dict[str, Any]) -> Dict[str, Seq]\n\n func_fasta_header_creator = get_value(kwargs, \"func_fhc\", None)\n kwargs_fasta_header_creator = get_value(kwargs, \"kwargs_fhc\", None)\n\n dict_labeled_sequences = dict() # type: Dict[str, Seq]\n\n gene_key_to_upstream_label = get_upstream_label_per_label(labels)\n\n for i, label in enumerate(labels):\n labeled_sequence = extract_labeled_sequence(label, sequences, **kwargs)\n lorf_nt = extract_labeled_sequence(label, sequences, lorf=True, **kwargs)\n offset = len(lorf_nt) - (label.right() - label.left() + 1)\n\n upstream_label = gene_key_to_upstream_label[create_gene_key_from_label(label)]\n upstream_left = upstream_right = -1\n upstream_strand = \"\"\n\n if upstream_label is not None:\n upstream_left = upstream_label.left() + 1\n upstream_right = upstream_label.right() + 1\n upstream_strand = upstream_label.strand()\n\n fasta_header = str(i)\n if func_fasta_header_creator is not None:\n if kwargs_fasta_header_creator is not None:\n fasta_header = func_fasta_header_creator(label, offset=offset, lorf_nt=lorf_nt,\n upstream_left=upstream_left,\n upstream_right=upstream_right,\n upstream_strand=upstream_strand,\n **kwargs_fasta_header_creator)\n else:\n fasta_header = func_fasta_header_creator(label, offset=offset, lorf_nt=lorf_nt)\n\n dict_labeled_sequences[fasta_header] = labeled_sequence\n\n return dict_labeled_sequences\n\n\ndef extract_labeled_sequences_for_genome(env, gi, **kwargs):\n # type: (Environment, GenomeInfo, Dict[str, Any]) -> Dict[str, Seq]\n\n pf_sequences = get_pf_sequences_for_genome(env, gi)\n pf_labels = get_pf_labels_for_genome(env, gi, **kwargs)\n\n try:\n sequences = read_fasta_into_hash(pf_sequences)\n labels = read_labels_from_file(pf_labels, **kwargs)\n except IOError as e:\n log.warning(\"Could not read sequence/labels files for genome: {}\".format(gi.name))\n raise e\n\n return extract_labeled_sequences(sequences, labels, **kwargs)\n\n\ndef translate_sequences_to_aa(sequences_nt):\n # type: (Dict[str, Seq]) -> Dict[str, Seq]\n\n sequences_aa = dict()\n for k, v in sequences_nt.items():\n try:\n v_aa = v.translate()\n sequences_aa[k] = v_aa\n except ValueError:\n log.warning(\"Could not translate sequence:\\n{}\".format(v))\n\n return sequences_aa\n\n\ndef dict_intersection_by_key(dict_a, dict_b):\n # type: (Dict, Dict) -> None\n \"\"\"Removes elements from both dictionaries with keys not in both\"\"\"\n keys_a = set(dict_a.keys())\n keys_b = set(dict_b.keys())\n keys_intersection = keys_a.intersection(keys_b)\n\n keys_a_unique = keys_a.difference(keys_intersection)\n keys_b_unique = keys_b.difference(keys_intersection)\n\n def remove_keys_from_dict(d, keys):\n # type: (Dict, Iterable) -> None\n for k in keys:\n del d[k]\n\n remove_keys_from_dict(dict_a, keys_a_unique)\n remove_keys_from_dict(dict_b, keys_b_unique)\n\n\ndef extract_labeled_sequences_for_genomes(env, gil, pf_output, **kwargs):\n # type: (Environment, GenomeInfoList, str, Dict[str, Any]) -> str\n\n # open file for writing\n try:\n f_aa = open(pf_output, \"w\")\n\n for gi in tqdm(gil, total=len(gil)):\n func_fasta_header_creator = pack_fasta_header\n kwargs_fasta_header_creator = {\"gi\": gi}\n try:\n sequences_nt = extract_labeled_sequences_for_genome(\n env, gi,\n func_fhc=func_fasta_header_creator,\n kwargs_fhc=kwargs_fasta_header_creator,\n **kwargs\n )\n sequences_aa = translate_sequences_to_aa(sequences_nt)\n\n # only keep sequences that have been translated\n dict_intersection_by_key(sequences_nt, sequences_aa)\n\n append_sequences_to_file(sequences_aa, f_aa)\n except IOError:\n pass\n\n f_aa.close()\n except OSError:\n log.warning(\"Could not open file for writing sequences:\\n{}\".format(pf_output))\n\n return pf_output\n\n\ndef run_blast_on_sequence_file(env, pf_q_aa, pf_db, pf_blast_output, **kwargs):\n # type: (Environment, str, str, str, Dict[str, Any]) -> None\n run_blast_alignment(pf_q_aa, pf_db, pf_blast_output, use_diamond=True, **kwargs)\n\n\ndef get_orthologs_from_files(env, pf_q_list, pf_t_list, pf_output, **kwargs):\n # type: (Environment, str, str, str, Dict[str, Any]) -> str\n\n sbsp_options = get_value(kwargs, \"sbsp_options\", SBSPOptions(env)) # type: SBSPOptions\n\n fn_q_labels = get_value(kwargs, \"fn_q_labels\", \"ncbi.gff\")\n fn_t_labels = get_value(kwargs, \"fn_t_labels\", \"ncbi.gff\")\n\n q_gil = GenomeInfoList.init_from_file(pf_q_list)\n t_gil = GenomeInfoList.init_from_file(pf_t_list)\n\n pd_work = env[\"pd-work\"]\n\n # Extract data for blast run\n pf_q_aa = os.path.join(pd_work, \"q.faa\")\n pf_q_nt = os.path.join(pd_work, \"q.fnt\")\n pf_t_aa = os.path.join(pd_work, \"t.faa\")\n pf_t_nt = os.path.join(pd_work, \"t.fnt\")\n\n custom = {\n \"reverse_complement\": True,\n \"ignore_frameshifted\": True,\n \"ignore_partial\": True\n }\n\n extract_labeled_sequences_for_genomes(env, q_gil, pf_q_aa, fn_labels=fn_q_labels, **custom, **kwargs)\n extract_labeled_sequences_for_genomes(env, t_gil, pf_t_aa, fn_labels=fn_t_labels, **custom, **kwargs)\n\n pf_blast_db = os.path.join(pd_work, \"blast.db\")\n create_blast_database(pf_t_aa, pf_blast_db, seq_type=\"prot\", use_diamond=True) # FIXME: cleanup\n\n # Run blast\n pf_blast_results = os.path.join(pd_work, \"blast.xml\")\n run_blast_on_sequence_file(env, pf_q_aa, pf_blast_db, pf_blast_results, **kwargs)\n\n # Parse data, filter, and write to CSV\n parse_filter_and_convert_to_csv(pf_blast_results, pf_output,\n pf_q_original_nt=pf_q_nt,\n pf_t_original_nt=pf_t_nt,\n pf_q_original_aa=pf_q_aa,\n pf_t_original_aa=pf_t_aa,\n distance_min=sbsp_options.safe_get(\"filter-min-distance\"),\n distance_max=sbsp_options.safe_get(\"filter-max-distance\"),\n **kwargs)\n\n return pf_output\n","sub_path":"code/python/lib/sbsp_alg/ortholog_finder.py","file_name":"ortholog_finder.py","file_ext":"py","file_size_in_byte":27709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"262739809","text":"from django.conf.urls import patterns, url\n\n# application imoprts\nfrom .views import Home, BarView, PieView\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('finance.views',\n url(r'^$', Home.as_view(), name='home'),\n url(r'^bar/$', BarView.as_view(), name='bar'),\n url(r'^pie/$', PieView.as_view(), name='pie'),\n\n url(r'^balance/(?Pget|recalculate)/$', 'balance', name='balance'),\n\n)\n","sub_path":"finance/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"317884572","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\nfrom collections import Counter\n\nclass MachineLearningCommonLib():\n \n def calc_classification_error(self, accuracy):\n return 1 - accuracy\n \n # Transform categorical data into binary features\n # ===============================================\n \n # In this assignment, we will implement binary decision trees (decision trees for binary features, \n # a specific case of categorical variables taking on two values, e.g., true/false). \n # Since all of our features are currently categorical features, we want to turn them into binary features.\n \n def unpack(self, dict_data):\n return pd.Series(dict_data)\n \n '''\n This function is to convert the categories belong to a row to columns for each category and then remove the column that has the categories\n '''\n def one_hot_encoding(self, df_data):\n categorical_variables = []\n for feature_name, feature_type in zip(df_data.columns.values, df_data.dtypes):\n if feature_type == object:\n categorical_variables.append(feature_name)\n \n for feature in categorical_variables:\n print('Processing feature: {} now'.format(feature))\n df_data_one_hot_encoded = df_data[feature].apply(lambda x: {x: 1})\n df_data_unpacked = df_data_one_hot_encoded.apply(lambda x: self.unpack(x)) \n \n # Change None's to 0's\n df_data_unpacked = df_data_unpacked[df_data_unpacked.columns.values].fillna(0)\n df_data = df_data.drop(feature, 1)\n \n df_data[df_data_unpacked.columns.values] = df_data_unpacked\n \n return df_data\n \n # For instance, the home_ownership feature represents the home ownership status of the loanee, which is either own, mortgage or rent. \n # For example, if a data point has the feature\n \n # {'home_ownership': 'RENT'}\n \n # we want to turn this into three features:\n \n # { \n # 'home_ownership = OWN' : 0, \n # 'home_ownership = MORTGAGE' : 0, \n # 'home_ownership = RENT' : 1\n # }\n \n # Decision tree implementation\n # ============================\n \n # In this section, we will implement binary decision trees from scratch. There are several steps involved in building a decision tree. \n # For that reason, we have split the entire assignment into several sections.\n \n # Function to count number of mistakes while predicting majority class\n # --------------------------------------------------------------------\n \n # Recall from the lecture that prediction at an intermediate node works by predicting the majority class for all data points that belong to this node. \n # Now, we will write a function that calculates the number of misclassified examples when predicting the majority class. \n # This will be used to help determine which feature is the best to split on at a given node of the tree.\n \n # Note: Keep in mind that in order to compute the number of mistakes for a majority classifier, we only need the label (y values) of the data points in the node.\n \n # Steps to follow:\n \n # - Step 1: Calculate the number of safe loans and risky loans.\n # - Step 2: Since we are assuming majority class prediction, all the data points that are not in the majority class are considered mistakes.\n # - Step 3: Return the number of mistakes.\n \n # 7. Now, let us write the function intermediate_node_num_mistakes which computes the number of misclassified examples of an intermediate node \n # given the set of labels (y values) of the data points contained in the node. Your code should be analogous to\n def intermediate_node_num_mistakes(self, labels_in_node):\n # Corner case: If labels_in_node is empty, return 0\n if len(labels_in_node) == 0:\n return 0 \n \n #labels_in_node = [-1, -1, 1, 1, 1]\n elem_count = Counter(labels_in_node)\n majority_classifier = max(elem_count.keys(), key=(lambda key: elem_count[key]))\n #print(\"Major classifier = %s.\" % majority_classifier)\n # Count the number of 1's (safe loans)\n safe_loans_num = elem_count['1'] \n # Count the number of -1's (risky loans)\n risky_loans_num = elem_count['-1'] \n # Return the number of mistakes that the majority classifier makes.\n #labels_in_node_arr = pd.DataFrame(data=labels_in_node)\n #mistake_labels = labels_in_node_arr[labels_in_node_arr[0] != majority_classifier]\n mistake_labels_size = 0\n for label in labels_in_node:\n if label != majority_classifier:\n mistake_labels_size += 1\n \n return mistake_labels_size\n \n # Function to pick best feature to split on\n # =========================================\n \n # The function best_splitting_feature takes 3 arguments:\n \n # 1.The data\n # 2.The features to consider for splits (a list of strings of column names to consider for splits)\n # 3.The name of the target/label column (string)\n \n # The function will loop through the list of possible features, and consider splitting on each of them. \n # It will calculate the classification error of each split and return the feature that had the smallest classification error when split on.\n \n # Recall that the classification error is defined as follows:\n \n #accuracy = #correctly classified examples / #total examples\n \n # 9. Follow these steps to implement best_splitting_feature:\n \n # - Step 1: Loop over each feature in the feature list\n # - Step 2: Within the loop, split the data into two groups: one group where all of the data has feature value 0 or False (we will call this the left split), and one group where all of the data has feature value 1 or True (we will call this the right split). Make sure the left split corresponds with 0 and the right split corresponds with 1 to ensure your implementation fits with our implementation of the tree building process.\n # - Step 3: Calculate the number of misclassified examples in both groups of data and use the above formula to compute theclassification error.\n # - Step 4: If the computed error is smaller than the best error found so far, store this feature and its error.\n \n # Note: Remember that since we are only dealing with binary features, we do not have to consider thresholds for real-valued features. \n # This makes the implementation of this function much easier.\n \n # Your code should be analogous to\n \n def best_splitting_feature(self, data, features, target):\n \n target_values = data[target]\n best_feature = None # Keep track of the best feature \n best_error = 10 # Keep track of the best error so far \n # Note: Since error is always <= 1, we should intialize it with something larger than 1.\n \n # Convert to float to make sure error gets computed correctly.\n num_data_points = float(len(data)) \n \n # Loop through each feature to consider splitting on that feature\n for feature in features:\n \n # The left split will have all data points where the feature value is 0\n left_split = data[data[feature] == 0]\n \n # The right split will have all data points where the feature value is 1\n right_split = data[data[feature] == 1]\n \n # Calculate the number of misclassified examples in the left split.\n # Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes)\n left_mistakes = self.intermediate_node_num_mistakes(left_split[target]) \n \n # Calculate the number of misclassified examples in the right split.\n right_mistakes = self.intermediate_node_num_mistakes(right_split[target])\n \n # Compute the classification error of this split.\n # Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points)\n error = (left_mistakes + right_mistakes) / num_data_points\n \n # If this is the best error we have found so far, store the feature as best_feature and the error as best_error\n if error < best_error:\n best_feature = feature\n best_error = error\n \n return best_feature # Return the best feature we found\n \n \n # Building the tree\n # =================\n \n # With the above functions implemented correctly, we are now ready to build our decision tree. \n # Each node in the decision tree is represented as a dictionary which contains the following keys and possible values:\n # { \n # 'is_leaf' : True/False.\n # 'prediction' : Prediction at the leaf node.\n # 'left' : (dictionary corresponding to the left tree).\n # 'right' : (dictionary corresponding to the right tree).\n # 'splitting_feature' : The feature that this node splits on.\n # }\n \n # 10. First, we will write a function that creates a leaf node given a set of target values. Your code should be analogous to\n def create_leaf(self, target_values): \n # Create a leaf node\n leaf = {\n 'splitting_feature' : None,\n 'left' : None,\n 'right' : None,\n 'is_leaf': True,\n 'prediction': 0\n } \n \n # Count the number of data points that are +1 and -1 in this node.\n num_ones = len(target_values[target_values == +1])\n num_minus_ones = len(target_values[target_values == -1]) \n \n # For the leaf node, set the prediction to be the majority class.\n # Store the predicted class (1 or -1) in leaf['prediction']\n if num_ones > num_minus_ones:\n leaf['prediction'] = 1 \n else:\n leaf['prediction'] = -1 \n \n # Return the leaf node\n return leaf \n \n # Early stopping methods for decision trees\n # =========================================\n\n # In this section, we will extend the binary tree implementation from the previous assignment in order to handle some early stopping conditions. \n # Recall the 3 early stopping methods that were discussed in lecture:\n\n # Reached a maximum depth. (set by parameter max_depth).\n # Reached a minimum node size. (set by parameter min_node_size).\n # Don't split if the gain in error reduction is too small. (set by parameter min_error_reduction).\n \n # Early stopping condition 1: Maximum depth\n # =========================================\n\n # Recall that we already implemented the maximum depth stopping condition in the previous assignment. In this assignment, we will experiment with this condition a bit more \n # and also write code to implement the 2nd and 3rd early stopping conditions. \n\n # Early stopping condition 2: Minimum node size\n # =============================================\n\n # The function reached_minimum_node_size takes 2 arguments:\n \n # 1. The data (from a node)\n # 2. The minimum number of data points that a node is allowed to split on, min_node_size.\n \n # 7. This function simply calculates whether the number of data points at a given node is less than or equal to the specified minimum node size. \n # This function will be used to detect this early stopping condition in the decision_tree_create function. Your code should be analogous to\n \n def reached_minimum_node_size(self, data, min_node_size):\n # Return True if the number of data points is less than or equal to the minimum node size.\n return True if len(data) <= min_node_size else False\n \n # Early stopping condition 3: Minimum gain in error reduction\n # ===========================================================\n \n # The function error_reduction takes 2 arguments:\n \n # 1. The error before a split, error_before_split.\n # 2. The error after a split, error_after_split.\n \n # 8. This function computes the gain in error reduction, i.e., the difference between the error before the split and that after the split. \n # This function will be used to detect this early stopping condition in the decision_tree_create function. Your code should be analogous to\n \n def error_reduction(self, error_before_split, error_after_split):\n # Return the error before the split minus the error after the split.\n return error_before_split - error_after_split\n \n # Implementing early stopping condition 2: minimum node size:\n # ===========================================================\n\n # - Step 1: Use the function reached_minimum_node_size that you implemented earlier to write an if condition to detect whether we have hit the base case, i.e., the node does not have enough data points and should be turned into a leaf. Don't forget to use the min_node_size argument.\n # - Step 2: Return a leaf. This line of code should be the same as the other (pre-implemented) stopping conditions.\n\n # Implementing early stopping condition 3: minimum error reduction:\n # =================================================================\n\n # Note: This has to come after finding the best splitting feature so we can calculate the error after splitting in order to calculate the error reduction. \n # Recall that classification error is defined as:\n\n # classification error = # mistakes / # total examples\n\n # - Step 1: Calculate the classification error before splitting.\n # - Step 2: Calculate the classification error after splitting. This requires calculating the number of mistakes in the left and right splits, and then dividing by the total number \n # of examples.\n # - Step 3: Use the function error_reduction to that you implemented earlier to write an if condition to detect whether the reduction in error is less than the \n # constant provided (min_error_reduction). Don't forget to use that argument.\n \n # 11. Now, we will provide a Python skeleton of the learning algorithm. Note that this code is not complete; it needs to be completed by you if you are using Python. \n # Otherwise, your code should be analogous to\n \n def build_binary_decision_tree(self, data, features, target, current_depth = 0, max_depth = 10):\n remaining_features = features[:] # Make a copy of the features.\n \n target_values = data[target]\n print(\"--------------------------------------------------------------------\")\n print(\"Subtree, depth = %s (%s data points).\" % (current_depth, len(target_values)))\n \n # Stopping condition 1\n # (Check if there are mistakes at current node.\n # Recall you wrote a function intermediate_node_num_mistakes to compute this.)\n if self.intermediate_node_num_mistakes(target_values) == 0: \n print(\"Stopping condition 1 reached.\") \n # If not mistakes at current node, make current node a leaf node\n return self.create_leaf(target_values)\n \n # Stopping condition 2 (check if there are remaining features to consider splitting on)\n if remaining_features == None: \n print(\"Stopping condition 2 reached.\") \n # If there are no remaining features to consider, make current node a leaf node\n return self.create_leaf(target_values) \n \n # Early stopping condition 1: Reached max depth limit.\n if current_depth >= max_depth:\n print(\"Reached maximum depth. Stopping for now.\")\n # If the max tree depth has been reached, make current node a leaf node\n return self.create_leaf(target_values)\n \n # Early stopping condition 2: Reached the minimum node size.\n # If the number of data points is less than or equal to the minimum size, return a leaf.\n if self.reached_minimum_node_size(target_values, 10):\n print(\"Early stopping condition 2 reached. Reached minimum node size.\")\n return self.create_leaf(target_values)\n \n # Find the best splitting feature (recall the function best_splitting_feature implemented above)\n splitting_feature = self.best_splitting_feature(data, features, target)\n \n # Split on the best feature that we found. \n left_split = data[data[splitting_feature] == 0]\n right_split = data[data[splitting_feature] == 1]\n \n # Early stopping condition 3: Minimum error reduction\n # Calculate the error before splitting (number of misclassified examples \n # divided by the total number of examples)\n error_before_split = self.intermediate_node_num_mistakes(target_values) / float(len(data))\n \n # Calculate the error after splitting (number of misclassified examples \n # in both groups divided by the total number of examples)\n left_mistakes = self.intermediate_node_num_mistakes(left_split[target]) \n right_mistakes = self.intermediate_node_num_mistakes(right_split[target]) \n error_after_split = (left_mistakes + right_mistakes) / float(len(data))\n \n # If the error reduction is LESS THAN OR EQUAL TO min_error_reduction, return a leaf. sample values: 0.0, 0.05, 0.1, and 0.14\n if self.error_reduction(error_before_split, error_after_split) <= 0.05:\n print(\"Early stopping condition 3 reached. Minimum error reduction.\")\n return self.create_leaf(target_values)\n \n remaining_features.remove(splitting_feature)\n print(\"Split on feature %s. (%s, %s)\" % (splitting_feature, len(left_split), len(right_split)))\n \n # Create a leaf node if the split is \"perfect\"\n if len(left_split) == len(data):\n print(\"Creating leaf node.\")\n return self.create_leaf(left_split[target])\n if len(right_split) == len(data):\n print(\"Creating leaf node.\")\n return self.create_leaf(right_split[target])\n \n # Repeat (recurse) on left and right subtrees\n left_tree = self.build_binary_decision_tree(left_split, remaining_features, target, current_depth + 1, max_depth) \n right_tree = self.build_binary_decision_tree(right_split, remaining_features, target, current_depth + 1, max_depth) \n \n return {'is_leaf' : False, \n 'prediction' : None,\n 'splitting_feature': splitting_feature,\n 'left' : left_tree, \n 'right' : right_tree}\n \n \n","sub_path":"python/coursera/ml_common/commonlib.py","file_name":"commonlib.py","file_ext":"py","file_size_in_byte":18859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"244212083","text":"'''\r\n@author: Bradley \r\n'''\r\nimport random\r\n\r\n\r\nimport tkinter as tk\r\n\r\n# Window?\r\ngame=tk.Tk()\r\ngame.title(\"Dice Game\")\r\ngame.geometry(\"600x400\")\r\n\r\nlabel = tk.Label(text = \"GAME\")\r\nlabel.grid(column=0,row=0)\r\n\r\nroll = tk.Button(game,text=\"Roll\")\r\nroll.grid(column=5,row=5)\r\n\r\n\r\n\r\n\r\n\r\n\r\ngame.mainloop()\r\n\r\n# Use to add more dice\r\n'''\r\n[-----]\\n[0 0]\\n[0 0]\\n[0 0]\\n[-----]\r\n'''\r\n\r\n\r\n#die = [\"[-----]\\n[ ]\\n[ 0 ]\\n[ ]\\n[-----]\",\"[-----]\\n[0 ]\\n[ ]\\n[ 0]\\n[-----]\",\"[-----]\\n[0 ]\\n[ 0 ]\\n[ 0]\\n[-----]\",\"[-----]\\n[0 0]\\n[ ]\\n[0 0]\\n[-----]\",\"[-----]\\n[0 0]\\n[ 0 ]\\n[0 0]\\n[-----]\",\"[-----]\\n[0 0]\\n[0 0]\\n[0 0]\\n[-----]\"]\r\n\r\n#run = True\r\n\r\n#while run:\r\n# num = random.randint(0,5)\r\n# print(die[num])\r\n# go = str(input(\"Type \\\"stop\\\" to stop rolling, otherwise hit enter \\n\"))\r\n# if go == \"stop\":\r\n# run = False\r\n\r\n\r\n\r\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"416931282","text":"# -*- coding: utf-8 -*-\nimport os\nfrom alchemydb import Base, engine\nfrom sqlalchemy import Column, Integer, String, Text, text, DateTime\nfrom sqlalchemy.sql.functions import current_timestamp\nfrom datetime import datetime\n\nclass Tasks(Base):\n __tablename__ = 'tasks'\n\n id = Column(\n Integer,\n primary_key=True,\n autoincrement=True\n )\n name = Column(String(256))\n text = Column(String(256))\n created_at = Column(\n DateTime,\n default=datetime.now(),\n nullable=False,\n server_default=current_timestamp()\n )\n updated_at = Column(\n DateTime,\n default=datetime.now(),\n nullable=False,\n onupdate=datetime.now()\n # server_default=text(\n # 'CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'\n # )\n )\n\nif __name__ == \"__main__\":\n Base.metadata.create_all(engine)","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"63491241","text":"import os \nfrom bs4 import BeautifulSoup\n\ndef loadStudentNamesAndAliases():\n # Load in student names and their aliases\n # Names and associated aliases are stored in a text file where each line is:\n # Anonymous Name, Student Name, Alias\n dAliases = dict()\n with open(\"Student Names and Aliases.txt\") as fh:\n fh.readline() # Disregard the header\n for line in fh:\n anonName, realName, alias = [i.strip() for i in line.split(\",\")]\n dAliases[anonName] = (realName, alias)\n return dAliases\n\n\n# Load student names\ndAliases = loadStudentNamesAndAliases()\n\n\n# Path to where the posts should be put\npathToPosts = os.path.join(\"..\", \"_posts\")\n\n\nfor anonName in dAliases:\n\trealName, alias = dAliases[anonName]\n\n\t# HTML Game Post (if it exists)\n\tpathToStudentDir = os.path.join(\"..\", \"games\", \"HTML\", anonName)\n\tif os.path.exists(pathToStudentDir):\n\t\tpathToPost = os.path.join(pathToPosts, \"2014-08-04-\"+alias+\"_HTML.html\")\n\t\twith open(pathToPost, \"w\") as fh:\n\t\t\t# yaml front matter\n\t\t\tfh.write(\"---\\n\")\n\t\t\tfh.write(\"layout: htmlGame\\n\")\n\t\t\tfh.write(\"title: {0}\\n\".format(alias))\n\t\t\tfh.write(\"tags: HTML\\n\")\n\t\t\tfh.write(\"studentName: {0}\\n\".format(alias))\n\t\t\tfh.write(\"screenshotURL: ../../../games/HTML/{0}/screenshot.png\\n\".format(anonName))\n\t\t\tfh.write(\"---\\n\")\n\n\t\t\t# yaml content\n\t\t\tfh.write(\"\")\n\n\n\t# JavaScript Game Post (if it exists)\n\tpathToStudentDir = os.path.join(\"..\", \"games\", \"JavaScript\", anonName)\n\tif os.path.exists(pathToStudentDir):\n\t\tpathToPost = os.path.join(pathToPosts, \"2014-08-04-\"+alias+\"_JavaScript.html\")\n\t\tpathToJSGame = os.path.join(\"..\", \"games\", \"JavaScript\", anonName, \"index.html\")\n\t\tsoup = BeautifulSoup(open(pathToJSGame))\n\t\tscriptString = str(soup.body.script)\n\t\twith open(pathToPost, \"w\") as fh:\n\t\t\t# yaml front matter\n\t\t\tfh.write(\"---\\n\")\n\t\t\tfh.write(\"layout: javascriptGame\\n\")\n\t\t\tfh.write(\"title: {0}\\n\".format(alias))\n\t\t\tfh.write(\"tags: JavaScript\\n\")\n\t\t\tfh.write(\"studentName: {0}\\n\".format(alias))\n\t\t\tfh.write(\"screenshotURL: ../../../games/JavaScript/{0}/screenshot.png\\n\".format(anonName))\n\t\t\tfh.write(\"---\\n\")\n\n\t\t\t# yaml content\n\t\t\tfh.write(scriptString)\n\n\t# Stencyl Game Post (if it exists)\n\tpathToStudentDir = os.path.join(\"..\", \"games\", \"stencyl\", anonName)\n\tif os.path.exists(pathToStudentDir):\n\t\tpathToPost = os.path.join(pathToPosts, \"2014-08-04-\"+alias+\"_Stencyl.html\")\n\n\t\t# Get game resolution and filename\n\t\tpathToResolution = os.path.join(pathToStudentDir, \"Game Info.txt\")\n\t\twidth, height = \"0\", \"0\"\n\t\twith open(pathToResolution, \"r\") as fh:\n\t\t\ttitle = fh.readline().split(\":\")[-1].strip()\n\t\t\twidth, height = fh.readline().split(\":\")[-1].split(\",\")\n\t\t\twidth, height = width.strip(), height.strip()\n\t\tswfFilename = title+\".swf\"\n\t\tscreenshotFilename = title+\".png\"\n\n\t\twith open(pathToPost, \"w\") as fh:\n\t\t\t# yaml front matter\n\t\t\tfh.write(\"---\\n\")\n\t\t\tfh.write(\"layout: stencylGame\\n\")\n\t\t\tfh.write(\"title: {0}\\n\".format(alias))\n\t\t\tfh.write(\"tags: stencyl\\n\")\n\t\t\tfh.write(\"studentName: {0}\\n\".format(alias))\n\t\t\tfh.write(\"screenshotURL: ../../../games/Stencyl/{0}/{1}\\n\".format(anonName, screenshotFilename))\n\t\t\tfh.write(\"---\\n\")\n\n\t\t\t# yaml content\n\t\t\tfh.write(\"\".format(width, height))\n\t\t\tfh.write(\"\")\n\t\t\tfh.write(\"\")","sub_path":"scripts/generateGamePosts.py","file_name":"generateGamePosts.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"343581786","text":"import math\nimport torch\nimport numpy as np\nfrom torch.distributions import Poisson\nfrom torch import nn\nfrom torch import autograd\n\nclass PoissonProcess(object):\n \"\"\"docstring for PoissonProcess\"\"\"\n\n def __init__(self, intensity_grid, log_intensity=None, observations=None):\n super(PoissonProcess, self).__init__()\n\n self.intensity_grid = intensity_grid\n if log_intensity is not None:\n self.log_intensity = nn.Parameter(log_intensity)\n else:\n self.log_intensity = nn.Parameter(torch.zeros(intensity_grid.size(0)))\n self.observations = observations\n self.in_dim = self.intensity_grid.shape[-1]\n self.n_pts = self.intensity_grid.shape[0]\n self.delta_x = torch.abs(self.intensity_grid[0, -1] - self.intensity_grid[1, -1])\n\n def simulate(self, intensity_grid=None, log_intensity=None):\n\n if intensity_grid is not None:\n self.update_grid(intensity_grid)\n if log_intensity is not None:\n self.update_val(log_intensity)\n\n sim_points = torch.tensor([])\n\n for pt in range(self.n_pts):\n ## draw points from poisson ##\n rate = torch.exp(self.log_intensity[pt]) * (self.delta_x ** self.in_dim)\n dist = Poisson(rate)\n n_draw = int(dist.sample().item())\n\n samples = torch.zeros(n_draw, self.in_dim)\n ## sample their locations ##\n for dim in range(self.in_dim):\n samples[:, dim] = self.delta_x * torch.rand(n_draw)\n samples[:, dim] += self.intensity_grid[pt, dim] + self.delta_x/2\n\n ## append to sim_points ##\n sim_points = torch.cat((sim_points, samples))\n\n return sim_points\n\n def update_grid(self, grid):\n self.intensity_grid = grid\n\n def update_val(self, val):\n self.log_intensity.data = val\n\n def update_obs(self, obs):\n self.observations = obs\n\n def compute_obs_distance(self, observations=None):\n n = observations.size(0)\n m = self.intensity_grid.size(0)\n d = observations.size(1)\n\n xx = observations.unsqueeze(1).expand(n, m, d)\n yy = self.intensity_grid.unsqueeze(0).expand(n, m, d)\n\n dist = torch.pow(xx - yy, 2).sum(2)\n return dist\n\n def likelihood(self, observations=None):\n\n if observations is not None:\n self.update_obs(observations)\n\n dist = Poisson(self.delta_x.pow(self.in_dim) * self.log_intensity.exp())\n\n if type(observations) is list:\n ## if we're storing multiple draws ##\n lh = 0\n for obs in observations:\n obs_dist = self.compute_obs_distance(obs)\n samples_from = obs_dist.min(1)[1]\n\n counts_per_bin = torch.zeros(self.intensity_grid.size(0))\n for smp in samples_from:\n counts_per_bin[smp] += 1\n\n lh += dist.log_prob(counts_per_bin).sum()\n return lh\n else:\n ## storing a single draw\n obs_dist = self.compute_obs_distance(observations)\n samples_from = obs_dist.min(1)[1]\n\n counts_per_bin = torch.zeros(self.intensity_grid.size(0))\n for smp in samples_from:\n counts_per_bin[smp] += 1\n\n lh = dist.log_prob(counts_per_bin).sum()\n return lh\n\n def compute_grad(self, observations=None):\n if observations is not None:\n self.update_obs(observations)\n lh = self.likelihood(observations)\n lh.backward()\n return self.log_intensity.grad\n\n def compute_hessian(self, observations=None):\n if observations is not None:\n self.update_obs(observations)\n lh = self.likelihood(observations)\n grad = autograd.grad(lh, self.log_intensity, create_graph=True)[0]\n grad = autograd.grad(grad.sum(), self.log_intensity)[0]\n return torch.diag(grad)\n","sub_path":"cox-process/poisson_process.py","file_name":"poisson_process.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"390693989","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\HDi\\Google Drive\\ProgramCodes\\PythonCodes\\gaeio\\src\\gui\\importhorizonfile.py\n# Compiled at: 2020-04-25 15:07:54\n# Size of source mod 2**32: 16534 bytes\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport numpy as np, sys, os\nsys.path.append(os.path.dirname(__file__)[:-4][:-4][:-6])\nfrom gaeio.src.basic.data import data as basic_data\nfrom gaeio.src.horizon.inputoutput import inputoutput as horizon_io\nfrom gaeio.src.horizon.analysis import analysis as horizon_ays\nfrom gaeio.src.vis.messager import messager as vis_msg\nQtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)\nQtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)\n\nclass importhorizonfile(object):\n horizondata = {}\n rootpath = ''\n iconpath = os.path.dirname(__file__)\n dialog = None\n filelist = []\n\n def setupGUI(self, ImportHorionFile):\n ImportHorionFile.setObjectName('ImportHorionFile')\n ImportHorionFile.setFixedSize(600, 320)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(os.path.join(self.iconpath, 'icons/copy.png')), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n ImportHorionFile.setWindowIcon(icon)\n self.lblfile = QtWidgets.QLabel(ImportHorionFile)\n self.lblfile.setObjectName('lblfile')\n self.lblfile.setGeometry(QtCore.QRect(10, 10, 110, 30))\n self.ldtfile = QtWidgets.QLineEdit(ImportHorionFile)\n self.ldtfile.setObjectName('ldtfile')\n self.ldtfile.setGeometry(QtCore.QRect(130, 10, 390, 30))\n self.btnfile = QtWidgets.QPushButton(ImportHorionFile)\n self.btnfile.setObjectName('btnfile')\n self.btnfile.setGeometry(QtCore.QRect(530, 10, 60, 30))\n self.lbltype = QtWidgets.QLabel(ImportHorionFile)\n self.lbltype.setObjectName('lbltype')\n self.lbltype.setGeometry(QtCore.QRect(30, 50, 100, 30))\n self.cbbtype = QtWidgets.QComboBox(ImportHorionFile)\n self.cbbtype.setObjectName('cbbtype')\n self.cbbtype.setGeometry(QtCore.QRect(130, 50, 460, 30))\n self.lblpara = QtWidgets.QLabel(ImportHorionFile)\n self.lblpara.setObjectName('lblpara')\n self.lblpara.setGeometry(QtCore.QRect(10, 100, 110, 30))\n self.lblinl = QtWidgets.QLabel(ImportHorionFile)\n self.lblinl.setObjectName('lblinl')\n self.lblinl.setGeometry(QtCore.QRect(20, 140, 100, 30))\n self.cbbinl = QtWidgets.QComboBox(ImportHorionFile)\n self.cbbinl.setObjectName('cbbinl')\n self.cbbinl.setGeometry(QtCore.QRect(130, 140, 60, 30))\n self.lblxl = QtWidgets.QLabel(ImportHorionFile)\n self.lblxl.setObjectName('lblxl')\n self.lblxl.setGeometry(QtCore.QRect(20, 180, 100, 30))\n self.cbbxl = QtWidgets.QComboBox(ImportHorionFile)\n self.cbbxl.setObjectName('cbbxl')\n self.cbbxl.setGeometry(QtCore.QRect(130, 180, 60, 30))\n self.lblz = QtWidgets.QLabel(ImportHorionFile)\n self.lblz.setObjectName('lbz')\n self.lblz.setGeometry(QtCore.QRect(20, 220, 100, 30))\n self.cbbz = QtWidgets.QComboBox(ImportHorionFile)\n self.cbbz.setObjectName('cbbz')\n self.cbbz.setGeometry(QtCore.QRect(130, 220, 60, 30))\n self.lblcomment = QtWidgets.QLabel(ImportHorionFile)\n self.lblcomment.setObjectName('lblcomment')\n self.lblcomment.setGeometry(QtCore.QRect(220, 140, 100, 30))\n self.cbbcomment = QtWidgets.QComboBox(ImportHorionFile)\n self.cbbcomment.setObjectName('cbbcomment')\n self.cbbcomment.setGeometry(QtCore.QRect(330, 140, 60, 30))\n self.lbldelimiter = QtWidgets.QLabel(ImportHorionFile)\n self.lbldelimiter.setObjectName('lbldelimiter')\n self.lbldelimiter.setGeometry(QtCore.QRect(220, 180, 100, 30))\n self.cbbdelimiter = QtWidgets.QComboBox(ImportHorionFile)\n self.cbbdelimiter.setObjectName('cbbdelimiter')\n self.cbbdelimiter.setGeometry(QtCore.QRect(330, 180, 60, 30))\n self.lblvalueundefined = QtWidgets.QLabel(ImportHorionFile)\n self.lblvalueundefined.setObjectName('lblvalueundefined')\n self.lblvalueundefined.setGeometry(QtCore.QRect(420, 140, 100, 30))\n self.ldtvalueundefined = QtWidgets.QLineEdit(ImportHorionFile)\n self.ldtvalueundefined.setObjectName('ldtvalueundefined')\n self.ldtvalueundefined.setGeometry(QtCore.QRect(530, 140, 60, 30))\n self.lblvaluefillup = QtWidgets.QLabel(ImportHorionFile)\n self.lblvaluefillup.setObjectName('lblvaluefillup')\n self.lblvaluefillup.setGeometry(QtCore.QRect(420, 180, 100, 30))\n self.ldtvaluefillup = QtWidgets.QLineEdit(ImportHorionFile)\n self.ldtvaluefillup.setObjectName('ldtvaluefillup')\n self.ldtvaluefillup.setGeometry(QtCore.QRect(530, 180, 60, 30))\n self.btnimport = QtWidgets.QPushButton(ImportHorionFile)\n self.btnimport.setObjectName('btnimport')\n self.btnimport.setGeometry(QtCore.QRect(220, 270, 160, 30))\n self.btnimport.setIcon(icon)\n self.msgbox = QtWidgets.QMessageBox(ImportHorionFile)\n self.msgbox.setObjectName('msgbox')\n _center_x = ImportHorionFile.geometry().center().x()\n _center_y = ImportHorionFile.geometry().center().y()\n self.msgbox.setGeometry(QtCore.QRect(_center_x - 150, _center_y - 50, 300, 100))\n self.retranslateGUI(ImportHorionFile)\n QtCore.QMetaObject.connectSlotsByName(ImportHorionFile)\n\n def retranslateGUI(self, ImportHorionFile):\n self.dialog = ImportHorionFile\n _translate = QtCore.QCoreApplication.translate\n ImportHorionFile.setWindowTitle(_translate('ImportHorionFile', 'Import Horizon from File'))\n self.lblfile.setText(_translate('ImportHorionFile', 'Select horizon files:'))\n self.lblfile.setAlignment(QtCore.Qt.AlignCenter)\n self.ldtfile.setText(_translate('ImportHorionFile', os.path.abspath(self.rootpath)))\n self.btnfile.setText(_translate('ImportHorionFile', 'Browse'))\n self.btnfile.clicked.connect(self.clickBtnFile)\n self.lbltype.setText(_translate('ImportHorionFile', '\\t Type:'))\n self.cbbtype.addItems(['Kingdom 3D interpretation lines (ASCII) (*.*)',\n 'Seisworks 3D interpretation (ASCII) (*.*)',\n 'Customized (ASCII) (*.*)'])\n self.cbbtype.currentIndexChanged.connect(self.changeCbbType)\n self.lblpara.setText(_translate('ImportHorionFile', 'Settings:'))\n self.lblinl.setText(_translate('ImportHorionFile', 'Inline column'))\n self.lblinl.setAlignment(QtCore.Qt.AlignRight)\n self.cbbinl.addItems([str(i + 1) for i in range(10)])\n self.cbbinl.setCurrentIndex(2)\n self.cbbinl.setEnabled(False)\n self.lblxl.setText(_translate('ImportHorionFile', 'Crossline column:'))\n self.lblxl.setAlignment(QtCore.Qt.AlignRight)\n self.cbbxl.addItems([str(i + 1) for i in range(10)])\n self.cbbxl.setCurrentIndex(3)\n self.cbbxl.setEnabled(False)\n self.lblz.setText(_translate('ImportHorionFile', 'Time/depth column:'))\n self.lblz.setAlignment(QtCore.Qt.AlignRight)\n self.cbbz.addItems([str(i + 1) for i in range(10)])\n self.cbbz.setCurrentIndex(4)\n self.cbbz.setEnabled(False)\n self.lblcomment.setText(_translate('ImportHorionFile', 'Header with '))\n self.lblcomment.setAlignment(QtCore.Qt.AlignRight)\n self.cbbcomment.addItems(['None', '#', '!'])\n self.cbbcomment.setCurrentIndex(0)\n self.cbbcomment.setEnabled(False)\n self.lbldelimiter.setText(_translate('ImportHorionFile', 'Delimiter: '))\n self.lbldelimiter.setAlignment(QtCore.Qt.AlignRight)\n self.cbbdelimiter.addItems(['Space', 'Comma'])\n self.cbbdelimiter.setCurrentIndex(0)\n self.cbbdelimiter.setEnabled(False)\n self.lblvalueundefined.setText(_translate('ImportHorionFile', 'Undefined value:'))\n self.lblvalueundefined.setAlignment(QtCore.Qt.AlignRight)\n self.ldtvalueundefined.setText(_translate('ImportHorionFile', '-999'))\n self.lblvaluefillup.setText(_translate('ImportHorionFile', 'Filling-up value:'))\n self.lblvaluefillup.setAlignment(QtCore.Qt.AlignRight)\n self.ldtvaluefillup.setText(_translate('ImportHorionFile', 'NaN'))\n self.btnimport.setText(_translate('ImportHorionFile', 'Import Horizon'))\n self.btnimport.clicked.connect(self.clickBtnImportHorionFile)\n\n def clickBtnFile(self):\n _dialog = QtWidgets.QFileDialog()\n _file = _dialog.getOpenFileNames(None, 'Select Horizon File(s)', (self.rootpath), filter='All files (*.*)')\n if len(_file[0]) > 0:\n self.filelist = _file[0]\n self.ldtfile.setText(str(_file[0]))\n\n def changeCbbType(self):\n if self.cbbtype.currentIndex() == 0:\n self.cbbinl.setCurrentIndex(2)\n self.cbbinl.setEnabled(False)\n self.cbbxl.setCurrentIndex(3)\n self.cbbxl.setEnabled(False)\n self.cbbz.setCurrentIndex(4)\n self.cbbz.setEnabled(False)\n self.cbbcomment.setCurrentIndex(0)\n self.cbbcomment.setEnabled(False)\n self.cbbdelimiter.setCurrentIndex(0)\n self.cbbdelimiter.setEnabled(False)\n else:\n if self.cbbtype.currentIndex() == 1:\n self.cbbinl.setCurrentIndex(0)\n self.cbbinl.setEnabled(False)\n self.cbbxl.setCurrentIndex(1)\n self.cbbxl.setEnabled(False)\n self.cbbz.setCurrentIndex(4)\n self.cbbz.setEnabled(False)\n self.cbbcomment.setCurrentIndex(0)\n self.cbbcomment.setEnabled(False)\n self.cbbdelimiter.setCurrentIndex(0)\n self.cbbdelimiter.setEnabled(False)\n if self.cbbtype.currentIndex() == 2:\n self.cbbinl.setCurrentIndex(4)\n self.cbbinl.setEnabled(True)\n self.cbbxl.setCurrentIndex(3)\n self.cbbxl.setEnabled(True)\n self.cbbz.setCurrentIndex(2)\n self.cbbz.setEnabled(True)\n self.cbbcomment.setCurrentIndex(1)\n self.cbbcomment.setEnabled(True)\n self.cbbdelimiter.setCurrentIndex(0)\n self.cbbdelimiter.setEnabled(True)\n\n def clickBtnImportHorionFile(self):\n self.refreshMsgBox()\n _nfile = len(self.filelist)\n if _nfile <= 0:\n vis_msg.print('ERROR in ImportHorizonFile: No file selected for import', type='error')\n QtWidgets.QMessageBox.critical(self.msgbox, 'Import Horizon from File', 'No file selected for import')\n return\n _undefined_value = basic_data.str2float(self.ldtvalueundefined.text())\n if _undefined_value is False:\n vis_msg.print('ERROR in ImportHorizonFile: Non-float undefined value', type='error')\n QtWidgets.QMessageBox.critical(self.msgbox, 'Import Horizon from File', 'Nom-float undefined value')\n return\n _fillup_value = basic_data.str2float(self.ldtvaluefillup.text())\n if _fillup_value is False:\n vis_msg.print('ERROR in ImportHorizonFile: Non-float filled-up value', type='error')\n QtWidgets.QMessageBox.critical(self.msgbox, 'Import Horizon from File', 'Nom-float filled-up value')\n return\n _pgsdlg = QtWidgets.QProgressDialog()\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(os.path.join(self.iconpath, 'icons/point.png')), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n _pgsdlg.setWindowIcon(icon)\n _pgsdlg.setWindowTitle('Import ' + str(_nfile) + ' Horizon files')\n _pgsdlg.setCancelButton(None)\n _pgsdlg.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)\n _pgsdlg.forceShow()\n _pgsdlg.setFixedWidth(400)\n _pgsdlg.setMaximum(_nfile)\n _horizondata = {}\n for i in range(_nfile):\n QtCore.QCoreApplication.instance().processEvents()\n _filename = self.filelist[i]\n print('ImportHorionFile: Import %d of %d horizon files: %s' % (i + 1, _nfile, _filename))\n _comment = None\n if self.cbbcomment.currentIndex() == 0:\n _comment = None\n if self.cbbcomment.currentIndex() == 1:\n _comment = '#'\n if self.cbbcomment.currentIndex() == 2:\n _comment = '!'\n _delimiter = None\n if self.cbbdelimiter.currentIndex() == 1:\n _delimiter = ','\n _filenamemain = os.path.splitext(os.path.basename(_filename))[0]\n _horizondata[_filenamemain] = horizon_io.readHorizonFromAscii(_filename, comment=_comment,\n delimiter=_delimiter,\n inlcol=(self.cbbinl.currentIndex()),\n xlcol=(self.cbbxl.currentIndex()),\n zcol=(self.cbbz.currentIndex()),\n filling_up_value=_fillup_value,\n undefined_value=_undefined_value)\n if 'Z' in _horizondata[_filenamemain]['HorizonData'].keys():\n _z = _horizondata[_filenamemain]['HorizonData']['Z']\n if np.min(_z[(~np.isnan(_z))]) >= 0:\n _horizondata[_filenamemain]['HorizonData']['Z'] *= -1.0\n _pgsdlg.setValue(i + 1)\n\n for key in _horizondata.keys():\n if key in self.horizondata.keys():\n if checkHorizonData(self.horizondata[key]):\n reply = QtWidgets.QMessageBox.question(self.msgbox, 'Import Horizon from File', key + ' already exists. Overwrite?', QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)\n if reply == QtWidgets.QMessageBox.No:\n return\n self.horizondata[key] = _horizondata[key]\n\n QtWidgets.QMessageBox.information(self.msgbox, 'Import Horizon from File', str(_nfile) + ' file(s) imported successfully')\n\n def refreshMsgBox(self):\n _center_x = self.dialog.geometry().center().x()\n _center_y = self.dialog.geometry().center().y()\n self.msgbox.setGeometry(QtCore.QRect(_center_x - 150, _center_y - 50, 300, 100))\n\n\ndef checkHorizonData(horizon):\n return horizon_ays.checkHorizon(horizon)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ImportHorionFile = QtWidgets.QWidget()\n gui = importhorizonfile()\n gui.setupGUI(ImportHorionFile)\n ImportHorionFile.show()\n sys.exit(app.exec_())","sub_path":"pycfiles/gaeio-1.0.tar/importhorizonfile.cpython-36.py","file_name":"importhorizonfile.cpython-36.py","file_ext":"py","file_size_in_byte":14565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"545965949","text":"import copy\n\nwith open('input') as f:\n cases = [list(line.rstrip()) for line in f.readlines()]\n\ndef getOccupiedCount(seatMatrix, char=\"#\"):\n return sum([i.count(char) for i in seatMatrix])\n\ndef countVisiblyAdjacent(seatMatrix, i, j):\n visiblyAdjacentSeats, rows, columns = 0, len(seatMatrix), len(seatMatrix[0])\n\n for row_factor in [-1, 0, 1]:\n for column_factor in [-1, 0, 1]:\n if( not (row_factor == column_factor == 0)):\n k, l = i + row_factor, j + column_factor\n\n while ( k >= 0 and l >= 0 and k < rows and l < columns):\n if(seatMatrix[k][l] == \"L\"):\n break\n if(seatMatrix[k][l] == \"#\"):\n visiblyAdjacentSeats += 1\n break\n k += row_factor\n l += column_factor\n\n return visiblyAdjacentSeats\n\ndef performTransformations(seatMatrix, findSymbol, replaceSymbol, isValidAdjacentCount):\n boardCopy = copy.deepcopy(seatMatrix)\n for i in range(0, len(seatMatrix)):\n for j in range(0, len(seatMatrix[i])):\n adjacentSeats = countVisiblyAdjacent(boardCopy, i, j)\n if(boardCopy[i][j] == findSymbol and isValidAdjacentCount(adjacentSeats)):\n seatMatrix[i][j] = replaceSymbol\n\n return getOccupiedCount(seatMatrix) if boardCopy == seatMatrix else 0\n\nwhile True:\n count = performTransformations(cases, 'L', '#', (lambda adjacentSeats: adjacentSeats == 0)) or performTransformations(cases, '#', 'L', (lambda adjacentSeats: adjacentSeats >= 5))\n if(count):\n print(count)\n exit()\n","sub_path":"2020/11/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"477895653","text":"import os\n\nfrom PySide import QtGui\nfrom PySide import QtCore\n\nfrom libs.central_widget import CentralWidget\nfrom libs.statusbar import Statusbar\n\n\nclass Window(QtGui.QMainWindow):\n tab_key_pressed = QtCore.Signal()\n\n def __init__(self, static_dir):\n self._static_dir = static_dir\n\n super(Window, self).__init__()\n\n self._statusbar = Statusbar(self)\n\n self.initUI()\n\n def initUI(self):\n \"\"\"Initialize UI.\"\"\"\n # Central widget\n central_widget = CentralWidget(self)\n self.setCentralWidget(central_widget)\n\n # Status bar\n self._statusbar.show_message(\"Ready\")\n\n # Window\n self.resize(int(os.getenv(\"WINDOW_WIDTH\")), int(os.getenv(\"WINDOW_HEIGHT\")))\n self.center()\n self.setWindowTitle(os.getenv(\"WINDOW_TITLE\"))\n self.setWindowIcon(QtGui.QIcon(os.path.join(self._static_dir, \"images\", os.getenv(\"WINDOW_ICON\"))))\n\n def center(self):\n qr = self.frameGeometry()\n cp = QtGui.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def keyPressEvent(self, e):\n \"\"\"Handler for keyboard key press event.\"\"\"\n # F12 - Exit\n if e.key() == QtCore.Qt.Key_F12:\n self.close()\n # Tab activate workarea\n elif e.key() == QtCore.Qt.Key_F8:\n self.tab_key_pressed.emit()\n # elif e.key() == QtCore.Qt.Key_Tab:\n # return True\n\n def closeEvent(self, event):\n reply = QtGui.QMessageBox.question(self,\n \"Quit\",\n \"Are you sure to quit?\",\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n QtGui.QMessageBox.No)\n\n if reply == QtGui.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n","sub_path":"libs/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"565709782","text":"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, The QuTiP Project.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\nimport numpy as np\nfrom qutip.lattice import *\nfrom qutip import (Qobj, tensor, basis, qeye, isherm, sigmax)\n# from numpy.testing import (assert_equal, assert_, assert_almost_equal,\n# run_module_suite)\nfrom numpy.testing import (assert_, run_module_suite)\n\n\nclass TestLattice:\n \"\"\"\n Tests for `qutip.lattice` class.\n \"\"\"\n def test_hamiltonian(self):\n \"\"\"\n lattice: Test the method Lattice1d.Hamiltonian().\n \"\"\"\n # num_cell = 1\n # Four different instances\n Periodic_Atom_Chain = Lattice1d(num_cell=1, boundary=\"periodic\")\n Aperiodic_Atom_Chain = Lattice1d(num_cell=1, boundary=\"aperiodic\")\n p_1223 = Lattice1d(num_cell=1, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[2, 3])\n ap_1223 = Lattice1d(num_cell=1, boundary=\"aperiodic\",\n cell_num_site=2, cell_site_dof=[2, 3])\n\n # Their Hamiltonians\n pHamt1 = Periodic_Atom_Chain.Hamiltonian()\n apHamt1 = Aperiodic_Atom_Chain.Hamiltonian()\n pHamt1223 = p_1223.Hamiltonian()\n apHamt1223 = ap_1223.Hamiltonian()\n\n # Benchmark answers\n pHamt1_C = Qobj([[0.]], dims=[[1], [1]])\n apHamt1_C = Qobj([[0.]], dims=[[1], [1]])\n site1223 = np.diag(np.zeros(2-1)-1, 1) + np.diag(np.zeros(2-1)-1, -1)\n Rpap1223 = tensor(Qobj(site1223), qeye([2, 3]))\n pap1223 = Qobj(Rpap1223, dims=[[2, 2, 3], [2, 2, 3]])\n\n # Checks for num_cell = 1\n assert_(pHamt1 == pHamt1_C)\n assert_(apHamt1 == apHamt1_C)\n assert_(pHamt1223 == pap1223) # for num_cell=1, periodic and\n assert_(apHamt1223 == pap1223) # aperiodic B.C. have same Hamiltonian\n\n # num_cell = 2\n # Four different instances\n Periodic_Atom_Chain = Lattice1d(num_cell=2, boundary=\"periodic\")\n Aperiodic_Atom_Chain = Lattice1d(num_cell=2, boundary=\"aperiodic\")\n\n p_2222 = Lattice1d(num_cell=2, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[2, 2])\n ap_2222 = Lattice1d(num_cell=2, boundary=\"aperiodic\",\n cell_num_site=2, cell_site_dof=[2, 2])\n\n # Their Hamiltonians\n pHamt2222 = p_2222.Hamiltonian()\n apHamt2222 = ap_2222.Hamiltonian()\n pHamt2 = Periodic_Atom_Chain.Hamiltonian()\n apHamt2 = Aperiodic_Atom_Chain.Hamiltonian()\n\n # Benchmark answers\n pHamt2_C = Qobj([[0., -1.],\n [-1., 0.]], dims=[[2], [2]])\n apHamt2_C = Qobj([[0., -1.],\n [-1., 0.]], dims=[[2], [2]])\n\n pap_2222 = np.zeros((16, 16), dtype=complex)\n pap_2222[0:4, 4:8] = -np.eye(4)\n pap_2222[4:8, 0:4] = -np.eye(4)\n pap_2222[4:8, 8:12] = -np.eye(4)\n pap_2222[8:12, 4:8] = -np.eye(4)\n pap_2222[8:12, 12:16] = -np.eye(4)\n pap_2222[12:16, 8:12] = -np.eye(4)\n pap_2222 = Qobj(pap_2222, dims=[[2, 2, 2, 2], [2, 2, 2, 2]])\n\n # Checks for num_cell = 2\n assert_(pHamt2 == pHamt2_C)\n assert_(apHamt2 == apHamt2_C)\n assert_(pHamt2222 == pap_2222) # for num_cell=2, periodic and\n assert_(apHamt2222 == pap_2222) # aperiodic B.C. have same Hamiltonian\n\n # num_cell = 3 # checking any num_cell >= 3 is pretty much equivalent\n Periodic_Atom_Chain = Lattice1d(num_cell=3, boundary=\"periodic\")\n Aperiodic_Atom_Chain = Lattice1d(num_cell=3, boundary=\"aperiodic\")\n p_3122 = Lattice1d(num_cell=3, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2, 2])\n ap_3122 = Lattice1d(num_cell=3, boundary=\"aperiodic\",\n cell_num_site=1, cell_site_dof=[2, 2])\n\n # Their Hamiltonians\n pHamt3 = Periodic_Atom_Chain.Hamiltonian()\n apHamt3 = Aperiodic_Atom_Chain.Hamiltonian()\n pHamt3122 = p_3122.Hamiltonian()\n apHamt3122 = ap_3122.Hamiltonian()\n\n # Benchmark answers\n pHamt3_C = Qobj([[0., -1., -1.],\n [-1., 0., -1.],\n [-1., -1., 0.]], dims=[[3], [3]])\n apHamt3_C = Qobj([[0., -1., 0.],\n [-1., 0., -1.],\n [0., -1., 0.]], dims=[[3], [3]])\n\n Hp_3122 = np.zeros((12, 12), dtype=complex)\n Hp_3122[0:8, 4:12] = Hp_3122[0:8, 4:12] - np.eye(8)\n Hp_3122[4:12, 0:8] = Hp_3122[4:12, 0:8] - np.eye(8)\n Hp_3122[0:4, 8:12] = Hp_3122[0:4, 8:12] - np.eye(4)\n Hp_3122[8:12, 0:4] = Hp_3122[8:12, 0:4] - np.eye(4)\n Hp_3122 = Qobj(Hp_3122, dims=[[3, 2, 2], [3, 2, 2]])\n\n Hap_3122 = np.zeros((12, 12), dtype=complex)\n Hap_3122[0:8, 4:12] = Hap_3122[0:8, 4:12] - np.eye(8)\n Hap_3122[4:12, 0:8] = Hap_3122[4:12, 0:8] - np.eye(8)\n Hap_3122 = Qobj(Hap_3122, dims=[[3, 2, 2], [3, 2, 2]])\n\n # Checks for num_cell = 3\n assert_(pHamt3 == pHamt3_C)\n assert_(apHamt3 == apHamt3_C)\n assert_(pHamt3122 == Hp_3122)\n assert_(apHamt3122 == Hap_3122)\n\n def test_cell_structures(self):\n \"\"\"\n lattice: Test the method Lattice1d.cell_structures().\n \"\"\"\n val_s = ['site0', 'site1']\n val_t = [' orb0', 'orb1']\n (H_cell_form, inter_cell_T_form, H_cell,\n inter_cell_T) = cell_structures(val_s, val_t)\n c_H_form = [['',\n '',\n '',\n ''],\n ['',\n '',\n '',\n ''],\n ['',\n '',\n '',\n ''],\n ['',\n '',\n '',\n '']]\n\n i_cell_T_form = [['',\n '',\n '',\n ''],\n ['',\n '',\n '',\n ''],\n ['',\n '',\n '',\n ''],\n ['',\n '',\n '',\n '']]\n c_H = np.zeros((4, 4), dtype=complex)\n i_cell_T = np.zeros((4, 4), dtype=complex)\n assert_(H_cell_form == c_H_form)\n assert_(inter_cell_T_form == i_cell_T_form)\n assert_((H_cell == c_H).all())\n assert_((inter_cell_T == i_cell_T).all())\n\n def test_basis(self):\n \"\"\"\n lattice: Test the method Lattice1d.basis().\n \"\"\"\n lattice_3242 = Lattice1d(num_cell=3, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[4, 2])\n psi0 = lattice_3242.basis(1, 0, [2, 1])\n psi0dag_a = np.zeros((1, 48), dtype=complex)\n psi0dag_a[0, 21] = 1\n psi0dag = Qobj(psi0dag_a, dims=[[1, 1, 1, 1], [3, 2, 4, 2]])\n assert_(psi0 == psi0dag.dag())\n\n def test_distribute_operator(self):\n \"\"\"\n lattice: Test the method Lattice1d.distribute_operator().\n \"\"\"\n lattice_412 = Lattice1d(num_cell=4, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2])\n op = Qobj(np.array([[0, 1], [1, 0]]))\n op_all = lattice_412.distribute_operator(op)\n sv_op_all = tensor(qeye(4), sigmax())\n assert_(op_all == sv_op_all)\n\n def test_operator_at_cells(self):\n \"\"\"\n lattice: Test the method Lattice1d.operator_between_cells().\n \"\"\"\n p_2222 = Lattice1d(num_cell=2, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[2, 2])\n op_0 = basis(2, 0) * basis(2, 1).dag()\n op_c = tensor(op_0, qeye([2, 2]))\n OP = p_2222.operator_between_cells(op_c, 1, 0)\n T = basis(2, 1) * basis(2, 0).dag()\n QP = tensor(T, op_c)\n assert_(OP == QP)\n\n def test_operator_between_cells(self):\n \"\"\"\n lattice: Test the method Lattice1d.operator_at_cells().\n \"\"\"\n lattice_412 = Lattice1d(num_cell=4, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2])\n op = Qobj(np.array([[0, 1], [1, 0]]))\n op_sp = lattice_412.operator_at_cells(op, cells=[1, 2])\n\n aop_sp = np.zeros((8, 8), dtype=complex)\n aop_sp[2:4, 2:4] = sigmax()\n aop_sp[4:6, 4:6] = sigmax()\n sv_op_sp = Qobj(aop_sp, dims=[[4, 2], [4, 2]])\n assert_(op_sp == sv_op_sp)\n\n def test_x(self):\n \"\"\"\n lattice: Test the method Lattice1d.x().\n \"\"\"\n lattice_3223 = Lattice1d(num_cell=3, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[2, 3])\n R = lattice_3223.x()\n npR = R.full()\n # count the number of off-diagonal elements n_off which should be 0\n n_off = np.count_nonzero(npR - np.diag(np.diagonal(npR)))\n assert_(n_off == 0)\n assert_((np.diag(R) == np.kron(range(3), np.ones(12))).all())\n\n def test_k(self):\n \"\"\"\n lattice: Test the method Lattice1d.k().\n \"\"\"\n L = 7\n lattice_L123 = Lattice1d(num_cell=L, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2, 3])\n kq = lattice_L123.k()\n kop = np.zeros((L, L), dtype=complex)\n for row in range(L):\n for col in range(L):\n if row == col:\n kop[row, col] = (L-1)/2\n else:\n kop[row, col] = 1 / (np.exp(2j * np.pi * (row - col)/L)-1)\n\n kt = np.kron(kop * 2 * np.pi / L, np.eye(6))\n dim_H = [[2, 2, 3], [2, 2, 3]]\n kt = Qobj(kt, dims=dim_H)\n\n [k_q, Vq] = kq.eigenstates()\n [k_t, Vt] = kt.eigenstates()\n k_tC = k_t - 2*np.pi/L*((L-1)//2)\n # k_ts = [(i-(L-1)//2)*2*np.pi/L for i in range(L)]\n # k_w = np.kron((np.array(k_ts)).T, np.ones((1,6)))\n assert_((np.abs(k_tC - k_q) < 1E-13).all())\n\n def test_get_dispersion(self):\n \"\"\"\n lattice: Test the method Lattice1d.get_dispersion().\n \"\"\"\n Periodic_Atom_Chain = Lattice1d(num_cell=8, boundary=\"periodic\")\n [knxA, val_kns] = Periodic_Atom_Chain.get_dispersion()\n kB = np.array([[-3.14159265],\n [-2.35619449],\n [-1.57079633],\n [-0.78539816],\n [0.],\n [0.78539816],\n [1.57079633],\n [2.35619449]])\n valB = np.array([[2., 1.41421356, 0., -1.41421356, -2.,\n -1.41421356, 0., 1.41421356]])\n assert_(np.max(abs(knxA-kB)) < 1.0E-6)\n assert_(np.max(abs(val_kns-valB)) < 1.0E-6)\n\n # SSH model with num_cell = 4 and two orbitals, two spins\n # cell_site_dof = [2,2]\n t_intra = -0.5\n t_inter = -0.6\n H_cell = tensor(\n Qobj(np.array([[0, t_intra], [t_intra, 0]])), qeye([2, 2]))\n inter_cell_T = tensor(\n Qobj(np.array([[0, 0], [t_inter, 0]])), qeye([2, 2]))\n\n SSH_comp = Lattice1d(num_cell=6, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[2, 2],\n Hamiltonian_of_cell=H_cell, inter_hop=inter_cell_T)\n [kScomp, vcomp] = SSH_comp.get_dispersion()\n kS = np.array([[-3.14159265],\n [-2.0943951],\n [-1.04719755],\n [0.],\n [1.04719755],\n [2.0943951]])\n Oband = np.array([-0.1, -0.55677644, -0.9539392, -1.1,\n -0.9539392, -0.55677644])\n vS = np.array([Oband, Oband, Oband, Oband, -Oband, -Oband, -Oband,\n -Oband])\n assert_(np.max(abs(kScomp-kS)) < 1.0E-6)\n assert_(np.max(abs(vcomp-vS)) < 1.0E-6)\n\n def test_cell_periodic_parts(self):\n \"\"\"\n lattice: Test the method Lattice1d.array_of_unk().\n \"\"\"\n # Coupled Resonator Optical Waveguide(CROW) Example(PhysRevB.99.224201)\n J = 2\n eta = np.pi/4\n H_cell = Qobj(np.array([[0, J*np.sin(eta)], [J*np.sin(eta), 0]]))\n inter_cell_T0 = (J/2) * Qobj(np.array(\n [[np.exp(eta * 1j), 0], [0, np.exp(-eta*1j)]]))\n inter_cell_T1 = (J/2) * Qobj(np.array([[0, 1], [1, 0]]))\n\n inter_cell_T = [inter_cell_T0, inter_cell_T1]\n\n CROW_lattice = Lattice1d(num_cell=4, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2],\n Hamiltonian_of_cell=H_cell,\n inter_hop=inter_cell_T)\n (kxA, val_kns) = CROW_lattice.get_dispersion()\n (knxA, vec_kns) = CROW_lattice.cell_periodic_parts()\n (knxA, qH_ks) = CROW_lattice.bulk_Hamiltonians()\n for i in range(4):\n for j in range(2):\n if val_kns[j][i] == 0:\n E_V = Qobj(vec_kns[i, j, :])\n eE_V = qH_ks[i] * E_V\n assert_(np.max(abs(eE_V)) < 1.0E-12)\n else:\n E_V = Qobj(vec_kns[i, j, :])\n eE_V = qH_ks[i] * E_V\n qE_V = np.divide(eE_V, E_V)\n oE = val_kns[j][i] * np.ones((2, 1))\n assert_(np.max(abs(oE-qE_V)) < 1.0E-12)\n\n def test_bulk_Hamiltonians(self):\n \"\"\"\n lattice: Test the method Lattice1d.bulk_Hamiltonian_array().\n \"\"\"\n # Coupled Resonator Optical Waveguide(CROW) Example(PhysRevB.99.224201)\n J = 2\n eta = np.pi/2\n H_cell = Qobj(np.array([[0, J*np.sin(eta)], [J*np.sin(eta), 0]]))\n inter_cell_T0 = (J/2)*Qobj(np.array([[np.exp(eta * 1j), 0],\n [0, np.exp(-eta*1j)]]))\n inter_cell_T1 = (J/2)*Qobj(np.array([[0, 1], [1, 0]]))\n inter_cell_T = [inter_cell_T0, inter_cell_T1]\n\n CROW_lattice = Lattice1d(num_cell=4, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2],\n Hamiltonian_of_cell=H_cell,\n inter_hop=inter_cell_T)\n (knxA, qH_ks) = CROW_lattice.bulk_Hamiltonians()\n Hk0 = np.array([[0.+0.j, 0.+0.j],\n [0.+0.j, 0.+0.j]])\n Hk1 = np.array([[2.+0.j, 2.+0.j],\n [2.+0.j, -2.+0.j]])\n Hk2 = np.array([[0.+0.j, 4.+0.j],\n [4.+0.j, 0.+0.j]])\n Hk3 = np.array([[-2.+0.j, 2.+0.j],\n [2.+0.j, 2.+0.j]])\n qHks = np.array([None for i in range(4)])\n qHks[0] = Qobj(Hk0)\n qHks[1] = Qobj(Hk1)\n qHks[2] = Qobj(Hk2)\n qHks[3] = Qobj(Hk3)\n for i in range(4):\n np.testing.assert_array_almost_equal(qH_ks[i], qHks[i], decimal=8)\n\n def test_bloch_wave_functions(self):\n \"\"\"\n lattice: Test the method Lattice1d.bloch_wave_functions().\n \"\"\"\n # Coupled Resonator Optical Waveguide(CROW) Example(PhysRevB.99.224201)\n J = 2\n eta = np.pi/2\n H_cell = Qobj(np.array([[0, J*np.sin(eta)], [J*np.sin(eta), 0]]))\n inter_cell_T0 = (J/2)*Qobj(np.array([[np.exp(eta * 1j), 0], [0,\n np.exp(-eta*1j)]]))\n inter_cell_T1 = (J/2)*Qobj(np.array([[0, 1], [1, 0]]))\n\n inter_cell_T = [inter_cell_T0, inter_cell_T1]\n\n CROW_lattice = Lattice1d(num_cell=4, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2],\n Hamiltonian_of_cell=H_cell,\n inter_hop=inter_cell_T)\n CROW_Haml = CROW_lattice.Hamiltonian()\n\n H_CROW = Qobj(np.array([[0.+0.j, 2.+0.j, 0.+1.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.-1.j, 1.+0.j],\n [2.+0.j, 0.+0.j, 1.+0.j, 0.-1.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+1.j],\n [0.-1.j, 1.+0.j, 0.+0.j, 2.+0.j, 0.+1.j, 1.+0.j, 0.+0.j, 0.+0.j],\n [1.+0.j, 0.+1.j, 2.+0.j, 0.+0.j, 1.+0.j, 0.-1.j, 0.+0.j, 0.+0.j],\n [0.+0.j, 0.+0.j, 0.-1.j, 1.+0.j, 0.+0.j, 2.+0.j, 0.+1.j, 1.+0.j],\n [0.+0.j, 0.+0.j, 1.+0.j, 0.+1.j, 2.+0.j, 0.+0.j, 1.+0.j, 0.-1.j],\n [0.+1.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.-1.j, 1.+0.j, 0.+0.j, 2.+0.j],\n [1.+0.j, 0.-1.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+1.j, 2.+0.j, 0.+0.j]]),\n dims=[[4, 2], [4, 2]])\n # Check for CROW with num_cell = 4\n assert_(np.max(abs(CROW_Haml-H_CROW)) < 1.0E-6) # 1.0E-8 worked too\n eigen_states = CROW_lattice.bloch_wave_functions()\n for i in range(8):\n if eigen_states[i][0] == 0:\n E_V = eigen_states[i][1]\n eE_V = CROW_Haml * E_V\n assert_(np.max(abs(eE_V)) < 1.0E-10)\n else:\n E_V = eigen_states[i][1]\n eE_V = CROW_Haml * E_V\n qE_V = np.divide(eE_V, E_V)\n oE = eigen_states[i][0] * np.ones((8, 1))\n assert_(np.max(abs(oE-qE_V)) < 1.0E-10)\n\n def test_CROW(self):\n \"\"\"\n lattice: Test the methods of Lattice1d in a CROW model.\n \"\"\"\n # Coupled Resonator Optical Waveguide(CROW) Example(PhysRevB.99.224201)\n J = 2\n eta = np.pi/4\n H_cell = Qobj(np.array([[0, J * np.sin(eta)], [J * np.sin(eta), 0]]))\n inter_cell_T0 = (J/2)*Qobj(np.array([[np.exp(eta * 1j), 0],\n [0, np.exp(-eta*1j)]]))\n inter_cell_T1 = (J/2)*Qobj(np.array([[0, 1], [1, 0]]))\n\n inter_cell_T = [inter_cell_T0, inter_cell_T1]\n\n CROW_lattice = Lattice1d(num_cell=4, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2],\n Hamiltonian_of_cell=H_cell,\n inter_hop=inter_cell_T)\n CROW_Haml = CROW_lattice.Hamiltonian()\n\n # Benchmark answers\n H_CROW = Qobj([[0.+0.j, 1.41421356+0.j, 0.70710678+0.70710678j, 1.+0.j,\n 0.+0.j, 0.+0.j, 0.70710678-0.70710678j, 1.+0.j],\n [1.41421356+0.j, 0.+0.j, 1.+0.j, 0.70710678-0.70710678j,\n 0.+0.j, 0.+0.j, 1.+0.j, 0.70710678+0.70710678j],\n [0.70710678-0.70710678j, 1.+0.j, 0.+0.j, 1.41421356+0.j,\n 0.70710678+0.70710678j, 1.+0.j, 0.+0.j, 0.+0.j],\n [1.+0.j, 0.70710678+0.70710678j, 1.41421356+0.j, 0.+0.j,\n 1.+0.j, 0.70710678-0.70710678j, 0.+0.j, 0.+0.j],\n [0.+0.j, 0.+0.j, 0.70710678-0.70710678j, 1.+0.j,\n 0.+0.j, 1.41421356+0.j, 0.70710678+0.70710678j, 1.+0.j],\n [0.+0.j, 0.+0.j, 1.+0.j, 0.70710678+0.70710678j,\n 1.41421356+0.j, 0.+0.j, 1.+0.j, 0.70710678-0.70710678j],\n [0.70710678+0.70710678j, 1.+0.j, 0.+0.j, 0.+0.j,\n 0.70710678-0.70710678j, 1.+0.j, 0.+0.j, 1.41421356+0.j],\n [1.+0.j, 0.70710678-0.70710678j, 0.+0.j, 0.+0.j,\n 1.+0.j, 0.70710678+0.70710678j, 1.41421356+0.j, 0.+0.j]],\n dims=[[4, 2], [4, 2]])\n\n # Check for CROW with num_cell = 4\n assert_(np.max(abs(CROW_Haml-H_CROW)) < 1.0E-6) # 1.0E-8 worked too\n\n (kxA, val_kns) = CROW_lattice.get_dispersion()\n kCR = np.array([[-3.14159265],\n [-1.57079633],\n [0.],\n [1.57079633]])\n vCR = np.array([[-2. , -2. , -2. , -2.],\n [-0.82842712, 2. , 4.82842712 , 2.]])\n assert_(np.max(abs(kxA-kCR)) < 1.0E-6)\n assert_(np.max(abs(val_kns-vCR)) < 1.0E-6)\n\n eigen_states = CROW_lattice.bloch_wave_functions()\n for i in range(8):\n E_V = eigen_states[i][1]\n eE_V = CROW_Haml * E_V\n qE_V = np.divide(eE_V, E_V)\n oE = eigen_states[i][0] * np.ones((8, 1))\n assert_(np.max(abs(oE-qE_V)) < 1.0E-10)\n (knxA, qH_ks) = CROW_lattice.bulk_Hamiltonians()\n (knxA, vec_kns) = CROW_lattice.cell_periodic_parts()\n\n Hk0 = np.array([[-1.41421356+0.j, -0.58578644+0.j],\n [-0.58578644+0.j, -1.41421356+0.j]])\n Hk1 = np.array([[1.41421356+0.j, 1.41421356+0.j],\n [1.41421356+0.j, -1.41421356+0.j]])\n Hk2 = np.array([[1.41421356+0.j, 3.41421356+0.j],\n [3.41421356+0.j, 1.41421356+0.j]])\n Hk3 = np.array([[-1.41421356+0.j, 1.41421356+0.j],\n [1.41421356+0.j, 1.41421356+0.j]])\n qHks = np.array([None for i in range(4)])\n qHks[0] = Qobj(Hk0)\n qHks[1] = Qobj(Hk1)\n qHks[2] = Qobj(Hk2)\n qHks[3] = Qobj(Hk3)\n for i in range(4):\n np.testing.assert_array_almost_equal(qH_ks[i], qHks[i], decimal=8)\n for i in range(4):\n for j in range(2):\n E_V = Qobj(vec_kns[i, j, :])\n eE_V = qH_ks[i] * E_V\n qE_V = np.divide(eE_V, E_V)\n oE = val_kns[j][i]*np.ones((2, 1))\n assert_(np.max(abs(oE-qE_V)) < 1.0E-12)\n\n # A test on CROW lattice dispersion with a random number of cells and\n # random values of eta\n J = 1\n num_cell = np.random.randint(2, 60)\n eta = 2*np.pi*np.random.random()\n\n H_cell = Qobj(np.array([[0, J * np.sin(eta)], [J * np.sin(eta), 0]]))\n inter_cell_T0 = (J/2) * Qobj(np.array([[np.exp(eta * 1j), 0],\n [0, np.exp(-eta*1j)]]))\n inter_cell_T1 = (J/2) * Qobj(np.array([[0, 1], [1, 0]]))\n inter_cell_T = [inter_cell_T0, inter_cell_T1]\n\n CROW_Random = Lattice1d(num_cell=num_cell, boundary=\"periodic\",\n cell_num_site=1, cell_site_dof=[2],\n Hamiltonian_of_cell=H_cell,\n inter_hop=inter_cell_T)\n a = 1 # The unit cell length is always considered 1\n kn_start = 0\n kn_end = 2*np.pi/a\n Ana_val_kns = np.zeros((2, num_cell), dtype=float)\n knxA = np.zeros((num_cell, 1), dtype=float)\n\n for ks in range(num_cell):\n knx = kn_start + (ks*(kn_end-kn_start)/num_cell)\n if knx >= np.pi:\n knxA[ks, 0] = knx - 2 * np.pi\n else:\n knxA[ks, 0] = knx\n knxA = np.roll(knxA, np.floor_divide(num_cell, 2))\n\n for ks in range(num_cell):\n knx = knxA[ks, 0]\n # Ana_val_kns are the analytical bands\n val0 = np.cos(knx) * np.cos(eta) + np.sqrt(2 * np.sin(eta) ** 2\n + (np.cos(knx) * np.cos(eta)) ** 2\n + 2 * np.sin(eta) * np.cos(knx))\n val1 = np.cos(knx) * np.cos(eta) - np.sqrt(2 * np.sin(eta) ** 2\n + (np.cos(knx) * np.cos(eta)) ** 2\n + 2 * np.sin(eta) * np.cos(knx))\n vals = [val0, val1]\n Ana_val_kns[0, ks] = np.min(vals)\n Ana_val_kns[1, ks] = np.max(vals)\n\n (kxA, val_kns) = CROW_Random.get_dispersion()\n assert_(np.max(abs(kxA-knxA)) < 1.0E-8)\n assert_(np.max(abs(val_kns-Ana_val_kns)) < 1.0E-8)\n\n def test_SSH(self):\n \"\"\"\n lattice: Test the methods of Lattice1d in a SSH model.\n \"\"\"\n # SSH model with num_cell = 4 and two orbitals, two spins\n # cell_site_dof = [2,2]\n t_intra = -0.5\n t_inter = -0.6\n H_cell = Qobj(np.array([[0, t_intra], [t_intra, 0]]))\n inter_cell_T = Qobj(np.array([[0, 0], [t_inter, 0]]))\n\n SSH_lattice = Lattice1d(num_cell=5, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[1],\n Hamiltonian_of_cell=H_cell,\n inter_hop=inter_cell_T) \n Ham_Sc = SSH_lattice.Hamiltonian()\n\n Hin = Qobj([[0, -0.5], [-0.5, 0]])\n Ht = Qobj([[0, 0], [-0.6, 0]])\n D = qeye(5)\n T = np.diag(np.zeros(4)+1, 1)\n Tdag = np.diag(np.zeros(4)+1, -1)\n Tdag[0][4] = 1\n T[4][0] = 1\n T = Qobj(T)\n Tdag = Qobj(Tdag)\n H_Sc = tensor(D, Hin) + tensor(T, Ht) + tensor(Tdag, Ht.dag())\n # check for SSH model with num_cell = 5 \n assert_(Ham_Sc == H_Sc)\n\n (kxA,val_ks) = SSH_lattice.get_dispersion()\n kSSH = np.array([[-2.51327412],\n [-1.25663706],\n [ 0. ],\n [ 1.25663706],\n [ 2.51327412]])\n vSSH = np.array([[-0.35297281, -0.89185772, -1.1, -0.89185772, -0.35297281],\n [ 0.35297281, 0.89185772, 1.1, 0.89185772, 0.35297281]])\n assert_(np.max(abs(kxA-kSSH)) < 1.0E-6)\n assert_(np.max(abs(val_ks-vSSH)) < 1.0E-6)\n\n # A test on SSH lattice dispersion with a random number of cells and\n # random values of t_inter and t_intra\n num_cell = np.random.randint(2, 60)\n t_intra = -np.random.random()\n t_inter = -np.random.random()\n H_cell = Qobj(np.array([[0, t_intra], [t_intra, 0]]))\n inter_cell_T = Qobj(np.array([[0, 0], [t_inter, 0]]))\n SSH_Random = Lattice1d(num_cell=num_cell, boundary=\"periodic\",\n cell_num_site=2, cell_site_dof=[1],\n Hamiltonian_of_cell=H_cell,\n inter_hop=inter_cell_T)\n a = 1 # The unit cell length is always considered 1\n kn_start = 0\n kn_end = 2*np.pi/a\n Ana_val_kns = np.zeros((2, num_cell), dtype=float)\n knxA = np.zeros((num_cell, 1), dtype=float)\n\n for ks in range(num_cell):\n knx = kn_start + (ks * (kn_end-kn_start)/num_cell)\n if knx >= np.pi:\n knxA[ks, 0] = knx - 2 * np.pi\n else:\n knxA[ks, 0] = knx\n knxA = np.roll(knxA, np.floor_divide(num_cell, 2))\n\n for ks in range(num_cell):\n knx = knxA[ks, 0]\n # Ana_val_kns are the analytical bands\n Ana_val_kns[0, ks] = -np.sqrt(t_intra ** 2 + t_inter ** 2\n + 2 * t_intra * t_inter * np.cos(knx))\n Ana_val_kns[1, ks] = np.sqrt(t_intra ** 2 + t_inter ** 2\n + 2 * t_intra * t_inter * np.cos(knx))\n (kxA, val_kns) = SSH_Random.get_dispersion()\n assert_(np.max(abs(val_kns-Ana_val_kns)) < 1.0E-13)\n\nif __name__ == \"__main__\":\n run_module_suite()\n","sub_path":"qutip/tests/test_lattice.py","file_name":"test_lattice.py","file_ext":"py","file_size_in_byte":29484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"611545164","text":"'''PlayerSkeletonA.py\nThe beginnings of an agent that might someday play Baroque Chess.\n\n'''\n\nimport BC_state_etc as BC\nimport piece_movement as PM\nimport winTester as WT\nfrom random import randint\nimport zobrist as Z\n\nWHITE_PIECES = 0\nBLACK_PIECES = 0\nNORTH = 0; SOUTH = 1; WEST = 2; EAST = 3; NW = 4; NE = 5; SW = 6; SE = 7\nDIRECTIONS = [(-1,0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]\nPIECES = {0:'-',2:'p',3:'P',4:'c',5:'C',6:'l',7:'L',8:'i',9:'I',\n 10:'w',11:'W',12:'k',13:'K',14:'f',15:'F'}\nALPHA_BETA_CUTOFFS = 0\nSTATES_EXPANDED = 0\nSTATIC_EVALS_PERFORMED = 0\n\nZOBRIST_INDEXES = {'p':0, 'P':1, 'c':2, 'C':3, 'l':4, 'L':5, 'i':6, 'I':7,\n 'w':8, 'W':9, 'k':10, 'K':11, 'f':12, 'F':13, }\nZOBRIST_NUMBERS = []\nZHASH = None\n\nTRANSPOSITION_TABLE = []\nTABLE_SIZE = 0\n\nclass Hash_Entry:\n def __init__(self, key, eval=None, type=None, ply=None, best_move=None):\n self.key = key\n self.eval = eval\n self.type = type\n self.ply = ply\n self.best_move = best_move\n\ndef makeMove(currentState, currentRemark, timelimit):\n global ALPHA_BETA_CUTOFFS\n ALPHA_BETA_CUTOFFS = 0\n # Compute the new state for a move.\n # This is a placeholder that just copies the current state.\n hash = Z.zhash(currentState.board)\n #print(hash)\n current_state = BC.BC_state(currentState.board, currentState.whose_move, hash)\n #print(current_state.hash)\n newState = BC.BC_state(currentState.board)\n\n board = newState.board\n # Fix up whose turn it will be.\n newState.whose_move = 1 - currentState.whose_move\n\n # Construct a representation of the move that goes from the\n # currentState to the newState.\n # Here is a placeholder in the right format but with made-up\n # numbers:\n score, move = iterative_deepening_alpha_beta(current_state, current_state.whose_move, 3)\n new_state = PM.move(move[0], move[1], current_state, move[2])\n #print(move)\n move = move[0:2]\n #print(score)\n #print(newState)\n # Make up a new remark\n newRemark = \"I'll think harder in some future game. Here's my move\"\n print(ALPHA_BETA_CUTOFFS)\n print(score)\n print(STATES_EXPANDED)\n print(STATIC_EVALS_PERFORMED)\n return [[move, new_state], newRemark]\n\ndef nickname():\n return \"Newman\"\n\ndef introduce():\n return \"I'm Newman Barry, a newbie Baroque Chess agent.\"\n\ndef prepare(player2Nickname):\n global ZOBRIST_NUMBERS; global TRANSPOSITION_TABLE; global TABLE_SIZE\n print(\"hello\")\n TRANPOSITION_TABLE = [None] * 1000000\n TABLE_SIZE = len(TRANSPOSITION_TABLE)\n Z.init_zhash()\n print(\"ok\")\n pass\n\n\ndef iterative_deepening_alpha_beta(state, whoseMove, ply):\n alpha = -100000\n beta = 100000\n best_move = None\n for i in range(1, ply):\n score, move = minimax(state, whoseMove, i, alpha, beta)\n if whoseMove == 1:\n if score > alpha:\n alpha = score\n best_move = move\n else:\n if score < beta:\n beta = score\n best_move = move\n #whoseMove = best_move.whose_move\n return score, best_move\n\ndef minimax(state, whoseMove, plyLeft, alpha, beta):\n global ALPHA_BETA_CUTOFFS; global STATES_EXPANDED\n\n # if plyLeft == 0 or if the state results in a win return its evaluation\n if plyLeft == 0 or WT.winTester(state) != \"No win\":\n return (static_eval(state), state)\n\n # check hashtable for state information and see if the agent can return stored values instead of searching\n index = state.hash % TABLE_LENGTH\n hash_entry = ZHASH_TABLE[index]\n if hash_entry != None and hash_entry.key == state.hash and hash_entry.ply >= plyLeft:\n if hash_entry.type == 'Exact':\n return hash_entry.eval, hash_entry.best_move\n if hash_entry.type == 'Beta Eval':\n if hash_entry.eval >= beta:\n return hash_entry.eval, hash_entry.best_move\n if hash_entry.type == 'Alpha Eval':\n if hash_entry.eval <= alpha:\n return hash_entry.eval, hash_entry.best_move\n\n if plyLeft > 1:\n ZHASH_TABLE[index] = hash_entry(state.hash)\n # recursively go through the successor states\n best_move = None\n for move in successors(state, whoseMove):\n STATES_EXPANDED += 1\n s = PM.move(move[0], move[1], state, move[2])\n new_value, newS = minimax(s, s.whose_move, plyLeft - 1, alpha, beta)\n if (whoseMove == 1 and new_value > alpha) \\\n or (whoseMove == 0 and new_value < beta):\n if whoseMove == 1:\n alpha = newVal\n is_exact = True\n else:\n is_exact = True\n beta = newVal\n best_move = move\n\n # prune off remaining children as the best move has already been found\n if beta <= alpha:\n ALPHA_BETA_CUTOFFS += 1\n if plyLeft > 1:\n ZHASH_TABLE[index].eval = new_value\n ZHASH_TABLE[index].ply = plyLeft\n ZHASH_TABLE[index].type = 'Beta Eval'\n ZHASH_TABLE[index].best_move = best_move\n break\n\n # this state has fully evaluated all its children\n is_exact = True\n score = alpha\n if whoseMove == 0:\n score = beta\n return score, best_move\n\ndef successors(state, whoseMove):\n board = state.board\n successors = []\n if WT.winTester(state) != \"No win\":\n return successors\n for r in range(8):\n for c in range(8):\n piece = board[r][c]\n if piece != 0 and piece % 2 == whoseMove:\n if not PM.is_frozen((r,c), board, whoseMove):\n #print(piece)\n limit = 8\n if PIECES[piece] in ['p', 'P']:\n limit = 4\n for i in range(limit):\n get_moves((r,c), state, i, whoseMove, successors)\n #print(len(successors))\n #print(\"\\n\")\n return successors\n\ndef get_moves(location, state, dir, whoseMove, successors):\n #successors = []\n dest = PM.get_next_space(location, dir)\n piece = state.board[location[0]][location[1]]\n enemy = 1 - whoseMove\n while dest is not None:\n dest_piece = state.board[dest[0]][dest[1]]\n if dest_piece != 0 and dest_piece % 2 == whoseMove: break\n if PIECES[piece] not in ['i', 'I', 'l', 'L', 'k','K']:\n if dest_piece != 0 and dest_piece % 2 == enemy: break\n if PM.can_move(location, dest, state.board, dir):\n #print(location)\n #print(dest)\n #print(\"\\n\")\n move = (location, dest, dir)\n successors.append(move)\n #new_state = PM.move(location, dest, state, dir)\n #successors.append(new_state)\n if piece in ['k', 'K']: break\n dest = PM.get_next_space(dest, dir)\n #print(dest)\n #print(\"\\n\")\n return successors\n\ndef static_eval(state):\n global STATIC_EVALS_PERFORMED\n STATIC_EVALS_PERFORMED += 1\n if PIECES[state.board[2][7]] =='P':\n return 1000\n else:\n return -100\n #copy = BC.BC_state(board)\n board = state.board\n possible_win = WT.winTester(state)\n if possible_win != \"No win\":\n if possible_win == \"Win for WHITE\":\n return 1000\n else:\n return -1000\n return evaluate_piece_strength(board, 1) + evaluate_piece_strength(board, 0)\\\n + round(0.25 * (mobility(board, 1) - mobility(board, 0)))\\\n + round(0.25 * center_control(board, 1) - center_control(board, 0))\\\n + (king_safety(board, 1) - king_safety(board, 0))\n\ndef mobility(board, side):\n enemy = 1 - side\n moves = 0\n #copy = BC.BC_state(board, side)\n #print(copy)\n for r in range(8):\n for c in range(8):\n piece = board[r][c]\n if PIECES[piece] != '-' and piece % 2 == side:\n limit = 8\n if PIECES[piece] in ['p','P']:\n limit = 4\n for i in range(limit):\n dest = PM.get_next_space((r,c), i)\n while dest is not None:\n dest_piece = board[dest[0]][dest[1]]\n if PIECES[dest_piece] != '-':\n if dest_piece % 2 == side: break\n if PIECES[piece] not in ['i','I','l','L','k','K']:\n if dest_piece % 2 == enemy: break\n if PM.can_move((r,c), dest, board, i):\n moves += 1\n if PIECES[piece] in ['k','K']: break\n dest = PM.get_next_space(dest, i)\n #print(\"\\n\")\n #print(moves)\n #print(copy)\n return moves\n\n\ndef evaluate_piece_strength(board, side):\n strength = 0\n for r in range(8):\n for c in range(8):\n piece = board[r][c]\n if piece != 0 and piece % 2 == side:\n strength += piece_weights(PIECES[piece], side)\n return strength\n\ndef center_control(board, side):\n spaces = 0\n line1 = PM.get_line((3,0), (3,7), board, 3)\n line2 = PM.get_line((4,0), (4,7), board, 3)\n for i in range(3, 5):\n for j in range(8):\n if board[i][j] != 0 and board[i][j] % 2 == side:\n spaces += 5\n return spaces\n\ndef king_safety(board, side):\n score = 0\n king = 12\n if side == 1:\n king = 13\n enemy = 1 - side\n king_location = PM.get_piece_location(king, board)\n if PM.is_king_in_check(board, king_location, side):\n return 100\n else:\n for i in range(8):\n next = PM.get_next_space(king_location, i)\n if next == None: continue\n if board[next[0]][next[1]] == 0:\n score -= 2\n elif board[next[0]][next[1]] % 2 == enemy:\n score -= 10\n else:\n score += 2\n return score\n\n\n\ndef piece_weights(piece, side):\n multiplier = [-1, 1]\n mult = multiplier[side]\n if piece in ['p''P']:\n return 1 * mult\n if piece in ['w','W']:\n return 3 * mult\n if piece in ['l','L']:\n return 5 * mult\n if piece in ['c', 'C', 'f','F']:\n return 7 * mult\n if piece in ['i', 'I']:\n return 9 * mult\n else:\n return 0\n\ndef initZhash():\n global ZOBRIST_NUMBERS\n for i in range(8):\n for j in range(8):\n for p in range(14):\n zobristnum[i][j][p] = \\\n randint(0, \\\n 4294967296)\ndef zHash(board):\n global ZOBRIST_NUMBERS\n hash = 0\n for r in range(8):\n for c in range(8):\n piece = PIECES[board[r][c]]\n if piece != '-':\n index = ZOBRIST_INDEXES[piece]\n hash ^= ZOBRIST_NUMBERS[r][c][index]\n return hash\n\ndef update_zhash_piece_movement(start, dest, piece, hash):\n piece = PIECES[piece]\n index = ZOBRIST_INDEXES[piece]\n hash ^= ZOBRIST_NUMBERS[start[0]][start[1]][index]\n hash ^= ZOBRIST_NUMBERS[dest[0]][dest[1]][index]\n return hash\n\ndef update_zhash_remove_piece(location, piece, hash):\n piece = PIECES[piece]\n index = ZOBRIST_INDEXES[piece]\n hash ^= ZOBRIST_NUMBERS[location[0]][location[1]][index]\n return hash\n\n","sub_path":"agent3.py","file_name":"agent3.py","file_ext":"py","file_size_in_byte":11268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"249961356","text":"#!/usr/bin/env python3\n\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom pathlib import Path\n\nfrom data import Config, ExerciseInfo\n\n# Allow high-performance tests to be skipped\nALLOW_SKIP = ['alphametics', 'largest-series-product']\n\n\ndef check_assignment(exercise: ExerciseInfo, quiet=False) -> int:\n # Returns the exit code of the tests\n workdir = Path(tempfile.mkdtemp(exercise.slug))\n solution_file = exercise.solution_stub.name\n try:\n test_file_out = workdir / exercise.test_file.name\n if exercise.slug in ALLOW_SKIP:\n shutil.copyfile(exercise.test_file, test_file_out)\n else:\n with exercise.test_file.open('r') as src_file:\n lines = [line for line in src_file.readlines()\n if not line.strip().startswith('@unittest.skip')]\n with test_file_out.open('w') as dst_file:\n dst_file.writelines(lines)\n shutil.copyfile(exercise.exemplar_file, workdir / solution_file)\n kwargs = {}\n if quiet:\n kwargs['stdout'] = subprocess.DEVNULL\n kwargs['stderr'] = subprocess.DEVNULL\n return subprocess.run([sys.executable, '-m', 'pytest', test_file_out], **kwargs).returncode\n finally:\n shutil.rmtree(workdir)\n\n\ndef main():\n config = Config.load()\n exercises = config.exercises.all()\n if len(sys.argv) >= 2:\n # test specific exercises\n exercises = [\n e for e in exercises if e.slug in sys.argv[1:]\n ]\n\n failures = []\n for exercise in exercises:\n print('# ', exercise.slug)\n if not exercise.test_file:\n print('FAIL: File with test cases not found')\n failures.append('{} (FileNotFound)'.format(exercise.slug))\n else:\n if check_assignment(exercise):\n failures.append('{} (TestFailed)'.format(exercise.slug))\n print('')\n\n print('TestEnvironment:', sys.executable.capitalize(), '\\n\\n')\n\n if failures:\n print('FAILURES: ', ', '.join(failures))\n raise SystemExit(1)\n else:\n print('SUCCESS!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/test_exercises.py","file_name":"test_exercises.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"35088353","text":"import datetime\nimport quandl\nimport pandas as pd\nimport numpy as np\nimport pandas_datareader as pdr\nimport fix_yahoo_finance as yf\n\n\n#aapl = quandl.get(\"WIKI/AAPL\", start_date=\"2006-10-01\",\n# end_date=\"2012-01-01\")\n\n\naapl = yf.download('AAPL',\n start=datetime.datetime(2006, 10, 1),\n end=datetime.datetime(2012, 1, 1))\n\n\n# inspect columns\nprint(aapl.columns)\n\n\n# Assign 'Adj Close' to 'daily_close'\ndaily_close = aapl[['Adj Close']]\n\n\n# Daily returns\ndaily_pct_change = daily_close.pct_change()\n\n\n# Replace NA values with 0\ndaily_pct_change.fillna(0, inplace=True)\n\n# Inspect daily returns\nprint(daily_pct_change)\n\n\n# Daily log returns\ndaily_log_returns = np.log(daily_close.pct_change()+1)\n\nprint('daily log returns')\nprint(daily_log_returns)\n\n\n###\n# Monthly and Quarterly Returns\n##\n\nprint('Monthly Returns')\n# Resample 'aapl' to business months, take last observation as value\nmonthly = aapl.resample('BM').apply(lambda x: x[-1])\n\n# Calculate the monthly percentage change\nmonthly.pct_change()\n\n\n# Resample 'aapl' to quarters, take the mean as value per quarter\nquarter = aapl.resample(\"4M\").mean()\n\n# Calculate the quarterly percentage change\nquarter.pct_change()","sub_path":"returns.py","file_name":"returns.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"59555571","text":"import random\nfrom pathlib import Path\nfrom typing import Union\n\nimport ujson as json\nfrom botoy import FriendMsg, GroupMsg, MsgTypes, S\nfrom botoy import async_decorators as deco\n\ncurFileDir = Path(__file__).parent # 当前文件路径\n\nwith open(curFileDir / \"onset.json\", \"r\", encoding=\"utf-8\") as f:\n data: list = json.load(f)[\"data\"]\n\n\n@deco.ignore_botself\n@deco.these_msgtypes(MsgTypes.TextMsg)\n@deco.startswith(\"发病\")\nasync def main(ctx: Union[GroupMsg, FriendMsg]):\n name = ctx.Content[2:].strip()\n if name.isspace() or len(name) == 0 or \"[ATALL()]\" in name:\n await S.atext(\"要对谁发病捏?\")\n return\n content: str = random.choice(data)[\"content\"]\n await S.atext(content.replace(\"{{user.name}}\", name))\n\n\nreceive_group_msg = receive_friend_msg = main\n","sub_path":"plugins/bot_onset/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"399912004","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n #Time - O(n) ; n is length of linkedlist\n #Space - O(1)\n if head is None or head.next is None: #handling edge cases.\n return head\n #Intiate variables cur, prev, next to store the 3 nodes temporarily which we are working on currently.\n cur = head.next\n prev = head\n prev.next = None\n next = cur.next\n while not next is None:\n cur.next = prev #reversing the link\n #Then slide the variables one one position right.\n prev = cur\n cur = next\n next = next.next\n cur.next = prev #Finally after traversing the linkedlist, reverse the last link\n return cur","sub_path":"week4/ReverseLinkedlist.py","file_name":"ReverseLinkedlist.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"586796803","text":"class Node(object):\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\n# 前序 递归\ndef pre_order_recur(root):\n if not root:\n return\n print(root.val)\n pre_order_recur(root.left)\n pre_order_recur(root.right)\n\n\n# 中序 递归\ndef in_order_recur(root):\n if not root:\n return\n in_order_recur(root.left)\n print(root.val)\n in_order_recur(root.right)\n\n\n# 后序 递归\ndef post_order_recur(root):\n if not root:\n return\n post_order_recur(root.left)\n post_order_recur(root.right)\n print(root.val)\n\n\n# 前序 非递归\ndef pre_order_none_recur(root):\n if not root:\n return\n stack = [root]\n while stack:\n cur = stack.pop()\n print(cur.val)\n if cur.right:\n stack.append(cur.right)\n if cur.left:\n stack.append(cur.left)\n\n\n# 中序 非递归\ndef in_order_none_recur(root):\n if not root:\n return\n stack = []\n while stack or root:\n if root:\n stack.append(root)\n root = root.left\n else:\n root = stack.pop()\n print(root.val)\n root = root.right\n\n\n# 后序 非递归\ndef post_order_none_recur(root):\n if not root:\n return\n stack = []\n mark_node = None\n while stack or root:\n if root:\n stack.append(root)\n root = root.left\n elif stack[-1].right != mark_node:\n root = stack[-1].right\n mark_node = None\n else:\n mark_node = stack.pop()\n print(mark_node.val)\n\n\nif __name__ == '__main__':\n root = Node(1, Node(2, Node(4), Node(5, Node(7))), Node(3, Node(6)))\n print('前序递归的结果:'), pre_order_recur(root)\n print('前序非递归的结果:'), pre_order_none_recur(root)\n print('中序递归的结果:'), in_order_recur(root)\n print('中序非递归的结果:'), in_order_none_recur(root)\n print('后序递归的结果:'), post_order_recur(root)\n print('后序非递归的结果:'), post_order_none_recur(root)\n","sub_path":"study/suanfa/2tree.py","file_name":"2tree.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"281724558","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom management import views\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^home/', views.home, name='home'),\n url(r'^cart/', views.cart, name='cart'),\n url(r'^clear_cart/',views.clear_cart, name='clear_cart'),\n url(r'^history/order_list/$', views.order_list,name='order_list'),\n url(r'^history/', views.history, name='history'),\n url(r'^about/', views.about, name='about'),\n url(r'^$', views.index, name='index'),\n url(r'^signup/$', views.signup,name='signup'),\n url(r'^order/$', views.order,name='order'),\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^setpassword/$', views.setpassword, name='set_password'),\n url(r'^add/$', views.add, name='add_instrument'),\n url(r'^add_to_cart/$', views.add_to_cart, name='add_to_cart'),\n url(r'^delete_from_cart/$', views.delete_from_cart, name='delete_from_cart'),\n url(r'^view/$', views.view, name='view_instrument'),\n url(r'^view/detail/$', views.detail, name='view_detail'),\n url(r'^image/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_PATH}),\n)\n","sub_path":"weborder/weborder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"253113393","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport MySQLdb\n\n# 打开数据库连接\ndb = MySQLdb.connect(\"localhost\",\"testuser\",\"test123\",\"TESTDB\" )\n\n# 使用cursor()方法获取操作游标 \ncursor = db.cursor()\n\nsql = \"DELETE FROM EMPLOYEE WHERE AGE > '%d'\"%(20)\ntry:\n cursor.execute()\n db.commit()\nexcept:\n db.rollback()\n\ndb.close()\n","sub_path":"python/py MySQL4.py","file_name":"py MySQL4.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"44380985","text":"#!/usr/bin/python3\n'''\n This module contains a Python script that takes in a string and sends\n a search request to the Star Wars API\n'''\n\n\nif __name__ == '__main__':\n import requests\n from sys import argv\n\n search = argv[1]\n url = 'https://swapi.co/api/people/' + '?' + 'search={}'.format(search)\n\n r = requests.get(url)\n r_json = r.json()\n\n count = r_json['count']\n peeps = r_json['results']\n print(\"Number of results: {}\".format(count))\n for peep in peeps:\n print('{}'.format(peep['name']))\n \n\n","sub_path":"0x11-python-network_1/9-starwars.py","file_name":"9-starwars.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"496346620","text":"from capreolus.extractor.bagofwords import BagOfWords\nfrom capreolus.extractor.embedtext import EmbedText\nfrom capreolus.reranker.reranker import Reranker\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DSSM_class(nn.Module):\n def __init__(self, stoi, p):\n super(DSSM_class, self).__init__()\n self.p = p\n nvocab = len(stoi)\n nhiddens = [nvocab] + list(map(int, p[\"nhiddens\"].split()))\n print(nhiddens)\n\n self.ffw = nn.Sequential()\n for i in range(len(nhiddens) - 1):\n self.ffw.add_module(\"linear%d\" % i, nn.Linear(nhiddens[i], nhiddens[i + 1]))\n self.ffw.add_module(\"activate%d\" % i, nn.ReLU())\n self.ffw.add_module(\"dropout%i\" % i, nn.Dropout(0.5))\n\n self.output_layer = nn.Sigmoid()\n\n def forward(self, sentence, query):\n query = self.ffw(query)\n sentence = self.ffw(sentence)\n\n query_norm = query.norm(dim=-1)[:, None] + 1e-7\n sentence_norm = sentence.norm(dim=-1)[:, None] + 1e-7\n\n query = query / query_norm\n sentence = sentence / sentence_norm\n\n cos_x = (query * sentence).sum(dim=-1, keepdim=True)\n\n score = self.output_layer(cos_x)\n return score\n\n\ndtype = torch.FloatTensor\n\n\n@Reranker.register\nclass DSSM(Reranker):\n description = \"\"\"Po-Sen Huang, Xiaodong He, Jianfeng Gao, Li Deng, Alex Acero, and Larry Heck. 2013. Learning deep structured semantic models for web search using clickthrough data. In CIKM'13.\"\"\"\n EXTRACTORS = [BagOfWords]\n\n @staticmethod\n def config():\n # hidden layer dimentions, should be a list of space-separated number in a string, e.g. '56 128 32', the i-th value represents the output dim of the i-th hidden layer\n nhiddens = \"56\"\n lr = 0.0001\n return locals().copy() # ignored by sacred\n\n @staticmethod\n def required_params():\n # Used for validation. Returns a set of params required by the class defined in get_model_class()\n return {\"nhiddens\", \"nvocab\", \"maxdoclen\", \"maxqlen\"}\n\n @classmethod\n def get_model_class(cls):\n return DSSM_class\n\n def build(self):\n self.model = DSSM_class(self.embeddings, self.config)\n return self.model\n\n def score(self, data):\n query_idf = data[\"query_idf\"].to(self.device)\n query_sentence = data[\"query\"].to(self.device)\n pos_sentence, neg_sentence = data[\"posdoc\"].to(self.device), data[\"negdoc\"].to(self.device)\n\n return [self.model(pos_sentence, query_sentence).view(-1), self.model(neg_sentence, query_sentence).view(-1)]\n\n def test(self, query_sentence, query_idf, pos_sentence, *args, **kwargs):\n query_sentence = query_sentence.to(self.device)\n pos_sentence = pos_sentence.to(self.device)\n\n return self.model(pos_sentence, query_sentence).view(-1)\n\n def zero_grad(self, *args, **kwargs):\n self.model.zero_grad(*args, **kwargs)\n","sub_path":"capreolus/reranker/DSSM.py","file_name":"DSSM.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"494955470","text":"import pyglm.utils.fastinv\nreload(pyglm.utils.fastinv)\nfrom pyglm.utils.fastinv import *\n\ndef test_submatrix_class():\n N = 10\n X = np.random.randn(1000,N)\n A = X.T.dot(X)\n\n mat = BigInvertibleMatrix(A)\n\n # Get a random submatrix\n for itr in xrange(10):\n inds = np.where(np.random.rand(N) < 0.5)[0]\n # inds = np.arange(itr+1).astype(np.int)\n Asub = A[np.ix_(inds, inds)]\n\n pinds, pinv, pdet = mat.compute_submatrix_inverse(inds)\n Ainv_true = np.linalg.inv(Asub)\n Adet_true = np.linalg.slogdet(Asub)[1]\n\n assert np.allclose(pinv, Ainv_true, atol=1e-8)\n assert np.allclose(pdet, Adet_true, atol=1e-8)\n\n # Update\n mat.update(pinds, pinv, pdet)\n\ndef test_block_inverse_add_row():\n N = 4\n X = np.random.randn(100,N)\n A = X.T.dot(X)\n # A = np.random.randn(4,4)\n # A = A.dot(A.T)\n # A += 4*np.eye(4)\n Ainv = np.linalg.inv(A)\n\n # Get a subset of A and add rows\n end = 3\n Bm = A[:end,end:]\n Cm = Bm.T\n Dm = A[end:,end:]\n\n Pt,Qt,Rt,St = block_inverse_add_rows(Ainv[:end,:end], Bm, Cm, Dm, symm=True)\n assert np.allclose(Ainv[:end,:end], Pt, atol=1e-3)\n assert np.allclose(Ainv[:end,end:], Qt, atol=1e-3)\n assert np.allclose(Ainv[end:,:end], Rt, atol=1e-3)\n assert np.allclose(Ainv[end:,end:], St, atol=1e-3)\n\ndef test_block_inverse_remove_row():\n N = 4\n X = np.random.randn(100,N)\n A = X.T.dot(X)\n # A = np.random.randn(4,4)\n # A = A.dot(A.T)\n # A += 4*np.eye(4)\n Ainv = np.linalg.inv(A)\n\n # Get a subset of A and add rows\n for end in xrange(1,4):\n Am = A[:end,:end]\n Pmt = np.linalg.inv(Am)\n\n Pmt_tilde = block_inverse_remove_rows(Ainv, end, symm=True)\n assert np.allclose(Pmt, Pmt_tilde, atol=1e-3)\n\n\nif __name__ == \"__main__\":\n # test_block_inverse_add_row()\n # test_block_inverse_remove_row()\n test_submatrix_class()","sub_path":"test/test_matrix_inverse.py","file_name":"test_matrix_inverse.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"395293977","text":"import sys\nimport os\nimport unittest\nimport logging\n\nfrom server.Game import Game\nfrom server.BaseClass import BaseClass\nfrom server.UnoException import *\nimport time\n\nclass TestGame(unittest.TestCase):\n def setUp(self):\n self.com = MockCom(self)\n self.game = Game(self.com, 3, 4, 30, 30, True)\n \n self.game.dealer = MockDealer(self)\n self.game.setup_new_game()\n self.game.dealer = MockDealer(self)\n self.dealer = self.game.dealer\n\n def tearDown(self):\n pass\n\n def test_chat(self):\n pass\n\n def test_join(self):\n #try to pass in a shitty name\n self.com.exeEvt(\"join\", 1, \"|shit\")\n self.assertTrue(self.com.invalid[\"called\"])\n self.assertEqual(self.com.invalid[\"p\"][1], \"|shit\")\n self.com.reset_p()\n\n\n self.com.exeEvt(\"join\", 3, \"player1\")\n\n self.com.exeEvt(\"join\", 4, \"player2\")\n\n self.assertEqual(len(self.game.clients), 2)\n\n #now add 3rd player, check if it starts to count down\n self.com.exeEvt(\"join\", 5, \"player3\")\n self.assertEqual(self.game.status, 1)\n\n started_time = self.game.count_time\n\n #now add 4th players, check if it resets the count down\n self.com.exeEvt(\"join\", 10, \"player4\")\n self.assertNotEqual(self.game.count_time, started_time)\n started_time = self.game.count_time\n\n #now add 5th players, check if it politely ask the player to wait\n #and not reset the count down\n self.com.exeEvt(\"join\", 11, \"player5\")\n self.assertEqual(self.game.count_time, started_time)\n self.assertTrue(self.com.wait[\"called\"])\n self.assertEqual(self.com.wait[\"p\"][0], 11)\n self.assertEqual(self.com.wait[\"p\"][1], \"player5\")\n self.com.reset_p()\n\n #now try to put more player than the queue size\n #supposed to kick them out\n self.game.maxQueue = 4\n self.com.exeEvt(\"join\", \"player6\", 14)\n self.assertTrue(self.com.kick[\"called\"])\n self.assertEqual(self.com.kick[\"p\"][0], 14)\n\n def test_check_time(self):\n\n self.game.check_time()\n\n self.com.exeEvt(\"join\", 3, \"player1\")\n self.com.exeEvt(\"join\", 4, \"player2\")\n self.com.exeEvt(\"join\", 5, \"player3\")\n\n self.game.cd_join = 0\n self.game.check_time()\n self.assertTrue(self.dealer.sg_c[\"called\"])\n self.assertEqual(self.dealer.sg_c[\"p\"][0], 3)\n\n self.assertTrue(self.com.start[\"called\"])\n self.assertEqual(self.com.start[\"p\"][0], \"player1,player2,player3\")\n\n\nclass MockCom(BaseClass):\n def __init__(self, a):\n self.a = a\n BaseClass.__init__(self)\n self.reset_p()\n\n def reset_p(self):\n self.wait = {\"called\": False, \"p\": []}\n self.chat = {\"called\": False, \"p\": []}\n self.kick = {\"called\": False, \"p\": []}\n self.invalid = {\"called\": False, \"p\": []}\n self.accept = {\"called\": False, \"p\": []}\n self.start = {\"called\": False, \"p\": []}\n\n\n def bc_chat(self):\n pass\n\n def s_kick(self, sockId):\n self.kick[\"called\"] = True\n self.kick[\"p\"] = [sockId]\n\n\n def s_invalid(self, sockId, msg):\n self.invalid[\"called\"] = True\n self.invalid[\"p\"] = [sockId, msg]\n\n def s_accept(self, sockId, msg):\n pass\n\n def s_wait(self, sockId, msg):\n self.wait[\"called\"] = True\n self.wait[\"p\"] = [sockId, msg]\n\n def bc_won(self, sockId):\n self.won[\"called\"] = True\n self.won[\"p\"] = [sockId]\n\n def bc_start(self, msg):\n self.start[\"called\"] = True\n self.start[\"p\"] = [msg]\n\n\nclass MockDealer(BaseClass):\n def __init__(self, a):\n self.a = a\n BaseClass.__init__(self)\n self.playing = False\n self.reset_p()\n \n def reset_p(self):\n #sg_c: start_game_called\n self.sg_c = {\"called\": False, \"p\": []}\n\n def start_game(self, n):\n self.sg_c[\"called\"] = True\n self.sg_c[\"p\"] = [n]\n\n\nif __name__ == \"__main__\": \n unittest.main()\n","sub_path":"tests/server/test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"348512049","text":"import tellopy\nimport cv2\nimport numpy as np\nimport queue\nimport platform\nimport subprocess\nimport time\nimport vision\nimport traceback\nimport enum\nimport logging\n\n# Make the drone land after 20 seconds in case it went rogue\nEXPERIMENT_TIMEOUT = 60*3\n\ndef orderselectui():\n selections = []\n regions = {\n 'red': ((20,60), (20+150, 60+150)),\n 'green': ((190,60), (190+150, 60+150)),\n 'blue': ((360,60), (360+150, 60+150)),\n 'yellow': ((530,60), (530+150, 60+150)),\n }\n def select(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n for sel, ((sx, sy), (ex, ey)) in regions.items():\n if sel in selections:\n continue\n if sx <= x <= ex and sy <= y <= ey:\n selections.append(sel)\n return\n\n def draw():\n display = np.ones((230, 700, 3), dtype=np.uint8)*255\n cv2.putText(display, f\"Select colors in popping order\", (40,40), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.rectangle(display, *regions['red'], (0,0,255),-1)\n cv2.rectangle(display, *regions['green'], (0,255,0),-1)\n cv2.rectangle(display, *regions['blue'], (255,0,0),-1)\n cv2.rectangle(display, *regions['yellow'], (0,255,255),-1)\n for i, sel in enumerate(selections):\n cv2.rectangle(display, *regions[sel], (0,0,0), 3)\n pt = regions[sel][0]\n pt = (pt[0] + 70, pt[1] + 70)\n cv2.putText(display, f\"{i+1}\", pt, cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,0))\n cv2.imshow('Display', display)\n key = cv2.waitKey(50)\n if key == 27:\n raise Exception(\"Not all order selections made\")\n\n cv2.namedWindow(\"Display\")\n cv2.setMouseCallback(\"Display\", select)\n while len(selections) < 4:\n draw()\n return selections\n\n\ndef statusmessageui(message):\n display = np.ones((230, 700, 3), dtype=np.uint8)*255\n cv2.putText(display, message, (40,40), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.imshow('Display', display)\n cv2.waitKey(1)\n\n\ndef handlaunchui(drone):\n global v, q\n display = np.ones((230, 700, 3), dtype=np.uint8)*255\n cv2.putText(display, \"Place the drone in the palm on your hand, then\", (40,40), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.putText(display, \"press SPACEBAR when ready.\", (40,80), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.putText(display, \"After the propellers start,\", (40,120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.putText(display, \"toss the drone up.\", (40,160), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.imshow('UI', display)\n while True:\n key = cv2.waitKey(1)\n if key == 32:\n drone.throw_and_go()\n display = np.ones((230, 700, 3), dtype=np.uint8)*255\n cv2.putText(display, \"After the propellers start,\", (40,40), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.putText(display, \"toss the drone up.\", (40,80), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.putText(display, f\"Do this within 5 seconds!\", (40,120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255))\n cv2.imshow('UI', display)\n cv2.waitKey(1)\n dronesleep(5)\n cv2.destroyWindow('UI')\n break\n elif key == 27:\n raise Exception(\"Early quit\")\n\n droneclear(waitkey=False)\n\n\nclass FinalAttackState(enum.Enum):\n THRUST = 1\n REVERSE = 2\n CONFIRM = 3\n\n\nstatus = None\nDEFAULT_RESTING_HEIGHT = 6\nv = None\nq = None\n\ndef droneclear(**kwargs):\n global v, q\n if q.full():\n q.get()\n v.check_new_frame(q, str(status), **kwargs)\n\ndef dronesleep(t, **kwargs):\n global v, q\n expiry = time.time() + t\n while time.time() < expiry:\n v.check_new_frame(q, str(status), **kwargs)\n droneclear()\n\ndef droneloop():\n global v, q\n logging.basicConfig(filename=f\"{time.time()}.txt\", filemode='w',\n level=logging.DEBUG, format='%(asctime)s [%(levelname)s] %(name)s: %(message)s')\n logger = logging.getLogger(__name__)\n\n resting_height = DEFAULT_RESTING_HEIGHT\n\n # Get the configuration for this match\n remainingBalloons = orderselectui()\n \n statusmessageui(\"Waiting for WiFi network switch...\")\n # Wait for the user to switch WiFi networks\n while ping('192.168.10.1') == False:\n print('Drone is offline, retrying...')\n print('Connected!')\n\n statusmessageui(\"Connecting to drone...\")\n\n def handler(event, sender, data, **args):\n global status\n drone = sender\n if event is drone.EVENT_FLIGHT_DATA:\n status = data\n\n # Connect to Tello\n drone = tellopy.Tello()\n\n try:\n drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)\n\n drone.connect()\n drone.wait_for_connection(60.0)\n\n logger.info(\"Connected to drone\")\n statusmessageui(\"Video feed starting...\")\n\n v = vision.Vision(record=True)\n q = queue.Queue(maxsize=1)\n v.open_input(drone.get_video_stream())\n\n # Wait for video to stabilize\n dronesleep(10)\n logger.info(\"Vision presumed stable\")\n\n handlaunchui(drone)\n logger.info(\"Hand launch completed\")\n\n last_xrot = 0\n final_state = None\n final_target = None\n final_timer = 0\n start_time = time.time()\n search_start = None\n\n # Main control loop\n while len(remainingBalloons) > 0:\n target = remainingBalloons[0]\n\n # Get vision sol'n\n while q.empty():\n v.check_new_frame(q, str(status))\n data = q.get()\n\n # Check for early exit\n if data['type'] == 'quit':\n logger.info(\"User initiated shutdown by ESC\")\n break\n\n logger.info(\"Vision solutions: \"\n + \",\".join({c for c in {'red','green','blue','yellow'} if data[c] is not None}))\n\n # Check for final attack state\n if final_state is not None:\n logger.info(f\"In final attack state {final_state}\")\n resting_height = DEFAULT_RESTING_HEIGHT\n if final_state == FinalAttackState.THRUST:\n if time.time() < final_timer + 6:\n # move forward slowly for 6 seconds\n drone.forward(8)\n else:\n # next state transition\n drone.forward(0)\n final_state = FinalAttackState.REVERSE\n final_timer = time.time()\n\n elif final_state == FinalAttackState.REVERSE:\n if time.time() < final_timer + 1:\n # back up quickly for 1 second\n drone.backward(20)\n else:\n # next state transition\n drone.backward(0)\n final_state = FinalAttackState.CONFIRM\n final_timer = time.time()\n\n elif final_state == FinalAttackState.CONFIRM:\n # check vision to make sure we popped it\n if data[final_target] is None:\n # we did\n remainingBalloons.pop(0)\n # if this list gets to len 0, then it will land next loop iter\n else:\n # damn, guess we gotta go for it again\n pass\n final_state = None\n\n continue\n\n # If we only have 20 seconds left, pop whatever we can find\n if data[target] is None and (time.time() - start_time) > 160:\n for c in {'red', 'green', 'blue', 'yellow'}:\n if data[c] is not None:\n target = c\n break\n\n # Check for search state\n if data[target] is None:\n if search_start is None:\n search_start = time.time()\n duration = time.time() - search_start\n if duration > 15:\n resting_height -= 2\n search_start = time.time()\n logger.info(f\"Current target {target} not in sight, resting height {resting_height}', searching for {duration}s\")\n drone.right(0)\n drone.forward(6)\n # Spin clockwise slowly (or in the direction of last seen balloon\n rate = 35\n if last_xrot < 0:\n drone.counter_clockwise(rate)\n else:\n drone.clockwise(rate)\n # Ascend to \"10\"\n if status.height < resting_height:\n drone.up(20)\n elif status.height > resting_height:\n drone.down(20)\n else:\n drone.up(0)\n continue\n\n search_start = None\n\n # Align with balloon\n xrot, height, distance = data[target]\n rot_ontarget = False\n height_ontarget = False\n\n logger.info(f\"Tracking target {target}: xrot={xrot}deg height={height}cm, dist={distance}cm\")\n\n max_xrot = 4\n if distance < 50:\n max_xrot = 8\n\n # Rotate to face the balloon if needed\n if xrot < max_xrot * -1:\n drone.counter_clockwise(20)\n elif xrot > max_xrot:\n drone.clockwise(20)\n else:\n drone.clockwise(0)\n rot_ontarget = True\n\n elevSpeeed = 15\n if distance < 100:\n elevSpeeed = 25\n\n # change elevation to match balloon if needed\n if height < -17: # increase this to favor attacking from bottom\n drone.down(elevSpeeed)\n elif height > 17: # decrease this to favor attacking from top\n drone.up(elevSpeeed)\n else:\n drone.up(0)\n height_ontarget = True\n\n # head in for the kill\n if distance > 100:\n logger.info(\"Moving forward, 1st stage\")\n drone.forward(20)\n elif distance > 50 and rot_ontarget and height_ontarget:\n logger.info(\"Moving forward, locked on\")\n drone.forward(20)\n elif rot_ontarget and height_ontarget:\n print('\\a')\n logger.info(\"Taking the shot\")\n # final kill for 6 seconds\n final_state = FinalAttackState.THRUST\n final_timer = time.time()\n final_target = target\n else:\n logger.info(\"Still aligning with target\")\n drone.forward(0)\n\n last_xrot = xrot\n \n logger.info(\"Landing drone\")\n cv2.destroyAllWindows()\n drone.right(0)\n drone.forward(0)\n drone.up(0)\n drone.clockwise(0)\n drone.land()\n time.sleep(5)\n except Exception as ex:\n traceback.print_exc()\n finally:\n drone.quit()\n\n logger.info(\"Connection closed\")\n\n\ndef ping(host):\n \"\"\"\n Returns True if host (str) responds to a ping request.\n Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.\n \"\"\"\n\n # Option for the number of packets as a function of\n param = '-n' if platform.system().lower()=='windows' else '-c'\n\n # Building the command. Ex: \"ping -c 1 google.com\"\n command = ['ping', param, '1', host]\n\n return subprocess.call(command) == 0\n\n\nif __name__ == \"__main__\":\n droneloop()\n","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":11709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"254086185","text":"# TODO LIST:\n# -- Dijkstra's Algorithm for pathfinding?\n# -- Look at more than 1 cell deep to see if we can capture territory earlier?\n# -- How to identify which direction to focus growth? Looking at production map at beginning to see.\n# -- Attack patterns? What's the best strategy for attacking / retreating / reinforcing?\n# -- Varying production multiplier by distance to border?\n\n# Version 1: Basic bot implementation - Modifications from random bot: To be added\n# Version 2: \n# -- Moved hlt file to single file. \n# -- consolidate_strength: Completely rewritten. Searches all border tiles and then sees if we can consolidate. Ranked by production strength, then sees if we can consolidate.\n# -- find_nearest_enemy_direction: Calculate ALL distances, and pick the shortest with lowest production if there is a tie. Otherwise, pick randomly.\n# -- heuristic: look at not just the cell but adjacent cells to determine the value of capturing the cell.\n# -- smallest strength cells move first. \n# Version 3: Uploaded incorrectly\n# Version 4:\n# -- move_to_target: old implementation might move into uncontrolled territory. Not good. New implementation moves only though adjacent owned territory, if possible. If multiple\n# routes exist, then it takes the direction with the lowest production.\n# -- consolidate_strength: Split into two subroutines. One which is the old multi-attacker into a cell, the other looks outwards to gather strength to attack a cell.\n# -- Idea: Can we expand multi-attacking into a cell to also look and see if we can capture a cell by moving units INTO adjacent cells??\n# Version 5: Rewrote heuristic function. Tries not to overvalue expanding into cells bordering enemy territory too much.\n# Version 6: ??\n# Version 7: Rewrote move to border function. Now squares will try to move towards higher production cells instead of the nearest border.\n# -- Complete code overhaul. Remove GameMap class, add Square class.\n\n###########\n# Imports #\n###########\nimport math\nfrom itertools import chain\nimport sys\nimport logging\nimport numpy\nimport random\n\n\n#############\n# Variables #\n#############\n\nbotname = \"shummie v7.7\"\nproduction_decay = 0.50\nproduction_influence_max_distance = 5\nbuildup_multiplier = 5\nstrength_buffer = 0\n\n \n\n \n \n#################\n# GameMap Class #\n#################\n\nclass GameMap:\n def __init__(self):\n \n self.initialize_game()\n\n def initialize_game(self):\n # This should only be called once, and at the beginning of the game\n self.my_id = int(get_string())\n map_size_string = get_string()\n production_map_string = get_string()\n \n self.width, self.height = tuple(map(int, map_size_string.split()))\n self.frame = 0\n \n self.production_map = numpy.array(list(map(int, production_map_string.split()))).reshape((self.height, self.width)).transpose()\n\n self.get_frame()\n \n # Initialize all the maps that this stores\n \n self.projected_owner_map = numpy.ones((self.width, self.height)) * -1\n self.projected_strength_map = numpy.ones((self.width, self.height)) * -1\n\n self.starting_player_count = numpy.amax(self.owner_map) # Note, for range you'd need to increase the range by 1\n \n self.next_uncapped_strength_map = numpy.zeros((self.starting_player_count + 1, self.width, self.height))\n \n # Send the botname\n send_string(botname)\n \n\n\n \n def get_frame(self, map_string = None):\n # Updates the map information from the latest frame provided by the game environment\n if map_string is None:\n map_string = get_string()\n split_string = map_string.split()\n \n # The state of the map (including owner and strength values, but excluding production values) is sent in the following way:\n # One integer, COUNTER, representing the number of tiles with the same owner consecutively.\n # One integer, OWNER, representing the owner of the tiles COUNTER encodes.\n # The above repeats until the COUNTER total is equal to the area of the map. \n # It fills in the map from row 1 to row HEIGHT and within a row from column 1 to column WIDTH. \n # Please be aware that the top row is the first row, as Halite uses screen-type coordinates.\n owners = list()\n while len(owners) < self.width * self.height:\n counter = int(split_string.pop(0))\n owner = int(split_string.pop(0))\n owners.extend([owner] * counter)\n assert len(owners) == self.width * self.height\n \n self.owner_map = numpy.array(owners).reshape((self.height, self.width)).transpose()\n \n # This is then followed by WIDTH * HEIGHT integers, representing the strength values of the tiles in the map. \n # It fills in the map in the same way owner values fill in the map. \n assert len(split_string) == self.width * self.height\n str_list = list(map(int, split_string))\n \n self.strength_map = numpy.array(str_list).reshape((self.height, self.width)).transpose()\n \n # Create all squares for the GameMap\n self.squares = numpy.empty((self.width, self.height), dtype = numpy.object)\n #self.squares = [[None for y in range(self.height)] for x in range(self.width)]\n for x in range(self.width):\n for y in range(self.height):\n self.squares[x, y] = Square(self, x, y, self.owner_map[x, y], self.strength_map[x, y], self.production_map[x, y])\n \n # Reset the move_map\n self.move_map = numpy.ones((self.width, self.height)) * -1 # Could possibly expand this in the future to consider enemy moves...\n if self.frame > 1:\n self.next_uncapped_strength_map = numpy.zeros((self.starting_player_count + 1, self.width, self.height))\n \n self.frame += 1\n \n def __iter__(self):\n # Allows direct iteration over all squares\n return chain.from_iterable(self.squares)\n \n\n \n def is_npc_border(self, square):\n # Looks at a square and sees if it's an NPC border square\n # Defined as a square which is owned by 0 and has a neighbor of my_id\n if square.owner != 0: return False\n for n in self.neighbors(square):\n if n.owner == self.my_id:\n return True\n return False\n \n def get_distance(self, sq1, sq2):\n dx = abs(sq1.x - sq2.x)\n dy = abs(sq1.y - sq2.y)\n if dx > self.width / 2:\n dx = self.width - dx\n if dy > self.height / 2:\n dy = self.height - dy\n return dx + dy\n \n def get_target(self, square, direction):\n # This function might be unnecessary?\n dx, dy = ((0, -1), (1, 0), (0, 1), (-1, 0), (0, 0))[direction]\n return self.squares[(square.x + dx) % self.width][(square.y + dy) % self.height]\n\n def get_coord(self, sourcex, sourcey, dx, dy):\n return ((sourcex + dx) % self.width, (sourcey + dy) % self.height)\n \n def make_move(self, square, direction):\n # Queues up the move to be made.\n # First, store the move in the move_map for easy reference\n self.move_map[square.x, square.y] = direction\n # Update square to new direction\n square.make_move(direction)\n \n def send_frame(self):\n # Goes through each square and get the list of moves.\n move_list = []\n for sq in chain.from_iterable(self.squares):\n if sq.owner == self.my_id:\n if sq.move == -1:\n # In the event we didn't actually assign a move, make sure it's coded to STILL\n sq.move = 4\n move_list.append(sq)\n \n send_string(' '.join(str(square.x) + ' ' + str(square.y) + ' ' + str(translate_cardinal(square.move)) for square in move_list))\n \n def calculate_uncapped_next_strength(self):\n # Given the move_map, calculate the uncapped strength in each cell.\n for x in range(self.width):\n for y in range(self.height):\n owner = self.owner_map[x, y]\n # 4. Add strength to pieces which choose to remain where they are.\n # Treat all cells that have a move value of -1 or 4 to be increasing in strength.\n # In practice, this is not true for enemy pieces, but for now, let's make this assumption\n if self.move_map[x, y] == 4 or self.move_map[x, y] == -1:\n self.next_uncapped_strength_map[owner, x, y] += self.strength_map[x, y] + self.production_map[x, y] if owner > 0 else 0\n # 5. Simultaneously move (and combine if necessary) all player's pieces.\n else: \n direction = self.move_map[x, y]\n dx, dy = ((0, -1), (1, 0), (0, 1), (-1, 0))[int(direction)]\n self.next_uncapped_strength_map[owner, (x + dx) % self.width, (y + dy) % self.height] += self.strength_map[x, y]\n\n def create_production_influence_map(self):\n # Lots of tweaking to do...\n # Start with a basic prod/strength evaluation for npc cells\n for x in range(self.width):\n for y in range(self.height): \n prod_value = self.production_map[x, y]\n if self.owner_map[y, x] == 0:\n str_value = max(1, self.strength_map[x, y])\n else:\n # If we want to do something differently with strengths in enemy territories, we can alter it here.\n str_value = max(1, self.strength_map[x, y]) # This will cause cells to avoid border cells...\n\n value = prod_value / str_value\n combos = ((dx, dy) for dy in range(-production_influence_max_distance, production_influence_max_distance+1) for dx in range(-production_influence_max_distance, production_influence_max_distance+1) if abs(dx) + abs(dy) <= production_influence_max_distance)\n for c in combos:\n distance = abs(c[0]) + abs(c[1])\n decay_factor = math.exp(-production_decay * distance)\n self.influence_production_map[self.owner_map[x, y], (x + c[0]) % self.width, (y + c[1]) % self.height] += value * decay_factor\n\n def get_best_move(self, square):\n # For a given square, find the \"best\" move we can\n border = False\n \n targets = []\n for d in (NORTH, EAST, SOUTH, WEST):\n target = game_map.get_target(square, d)\n if target.owner != self.my_id:\n border = True\n val = heuristic(target, square)\n targets.append((target, val))\n\n targets.sort(key = lambda x: x[1], reverse = True) # Sorts targets from high to low\n \n # We have a list of all adjacent cells. If targets is not None, let's see what we can do\n if len(targets) > 0:\n # Go through the list and see if we can attack one\n for t in targets:\n if t[0].strength < square.strength:\n return square.move_to_target(t[0], False)\n \n # if we don't have enough strength to make it worth moving yet, stay still\n if square.strength < (square.production * buildup_multiplier):\n # Don't actually set a cell to STILL unless we want it to stay still\n return True\n # If we aren't at a border, move towards the closest one\n elif not border:\n return self.go_to_border(square)\n # Else, we're at a border, don't have the strength to attack another cell, and have less than the buildup multipler. Let other functions handle movement\n else:\n return True\n \n def go_to_border(self, square):\n # Going to do a simple search for the closest border then determine which of the 4 directions we should go\n target = (None, 0)\n max_distance = min(game_map.width, game_map.height) / 2\n for d in (NORTH, EAST, SOUTH, WEST):\n distance = 1\n location = self.get_target(square, d)\n while location.owner == self.my_id and distance < max_distance:\n distance += 1\n location = self.get_target(location, d)\n border_value = location.influence_production_npc()\n #scaled_value = border_value / distance\n scaled_value = border_value \n if scaled_value > target[1]:\n target = (location, scaled_value)\n if target[0] != None:\n square.move_to_target(target[0], True)\n else:\n # If all cardinal directions are owned, is it possible to actually not move?\n # Move randomly then?\n #self.make_move(square, random.choice(range(1)))\n self.make_move(square, NORTH)\n\n\n def prevent_overstrength(self):\n # Tries to prevent wasting strength by having multiple cells move into the same square\n # Calculate the next turn's projected strengths based on moves so far.\n self.calculate_uncapped_next_strength()\n \n # Check the list of cells that will be capped\n cells_over = []\n for x in range(self.width):\n for y in range(self.height):\n if self.owner_map[x, y] == self.my_id: # We only care about our own cells\n if self.next_uncapped_strength_map[self.my_id, x, y] > (255 + strength_buffer):\n cells_over.append(self.squares[x, y])\n \n # cells_over contains a list of squares which will be over the strength cap\n cells_over_count = len(cells_over) # We'll be popping squares out so keep the initial count so we can return it later\n while len(cells_over) > 0:\n square = cells_over.pop(0) \n \n # Case 1: There should never be a reason we are staying still and being too strong. In the event this happens... what?\n # Case 2: We are not moving, let's move this square into a square moving into us\n if (square.move == -1 or square.move == STILL):\n # Try to move into another square which is moving into us\n if len(square.moving_here) > 0:\n square.move_to_target(random.choice(square.moving_here), True)\n else:\n # We are moving but the squares that are moving into here are going to collide.\n # See if we can reroute one of them perpendicular to where they are going, going the opposite direction is likely guaranteed to be counter productive\n if len(square.moving_here) > 1:\n square_to_move = random.choice(square.moving_here)\n option1dx, option1dy = get_offset((square_to_move.move + 1) % 4)\n option2dx, option2dy = get_offset((square_to_move.move + 3) % 4)\n \n # Move to the square that would cause the smallest loss in strength\n option1 = square_to_move.strength + self.next_uncapped_strength_map[self.my_id, (square_to_move.x + option1dx) % self.width, (square_to_move.y + option1dy) % self.height]\n option2 = square_to_move.strength + self.next_uncapped_strength_map[self.my_id, (square_to_move.x + option2dx) % self.width, (square_to_move.y + option2dy) % self.height]\n option0 = self.next_uncapped_strength_map[self.my_id, square.x, square.y]\n\n if option1 == min(option1, option2, option0):\n self.make_move(square_to_move, (square_to_move.move + 1) % 4)\n elif option2 == min(option1, option2, option0):\n self.make_move(square_to_move, (square_to_move.move + 3) % 4)\n else:\n # Do nothing\n continue\n \n return cells_over_count\n\n \n def attack_border_multiple_pieces(self):\n # Looks to see if there are any border cells which can be attacked right now by multiple pieces at the same time.\n # Looks only at cells whose move value is -1 and are bordering a neighboring cell.\n border_squares = []\n for square in chain.from_iterable(self.squares):\n if square.is_npc_border():\n border_squares.append((square, heuristic(square)))\n \n border_squares.sort(key = lambda x: x[1], reverse = True)\n \n for border_square in border_squares:\n # For each border square, starting with the most valuable, attempt to capture it.\n friendly_neighbors = [x for x in border_square[0].neighbors() if x.owner == self.my_id]\n available_strength = 0\n # TODO: There's a more pythonic way to do this instead of the loop below. \n for f in friendly_neighbors:\n if f.move == -1:\n available_strength += f.strength\n \n if available_strength > border_square[0].strength:\n attacking_strength = 0\n for f in friendly_neighbors:\n if f.move == -1 and attacking_strength <= border_square[0].strength:\n attacking_strength += f.strength\n f.move_to_target(border_square[0], False)\n \n def consolidate_strength(self, cells_out = 1):\n # Looks at border cells and sees if there is an opportunity to look N neighbors out to consolidate strength to capture a territory.\n border_squares = []\n for square in chain.from_iterable(self.squares):\n if square.is_npc_border():\n border_squares.append((square, heuristic(square)))\n \n border_squares.sort(key = lambda x: x[1], reverse = True) # Sorts by all border cells which will not be taken next turn by the heuristic above. \n\n distance = 1\n while distance <= cells_out:\n self.consolidate_n_out(border_squares, distance)\n distance += 1\n \n def consolidate_n_out(self, border_squares_list, cells_out):\n # For each border_square, we want to look at each friendly neighbor and see if we can take over this square in cells_out turns from now.\n \n for border_square_tuple in border_squares_list:\n border_square = border_square_tuple[0]\n # Get a list of all friendly neighbors to this square: These are the TARGET squares to move to.\n friendly_neighbors = [x for x in border_square.neighbors() if x.owner == self.my_id]\n \n for f in friendly_neighbors:\n # How much strength do we need and can we get it cells_out away?\n needed_strength = border_square.strength + 1\n \n moving_cells = False\n \n # Check friendly neighboring cells.\n for distance_out in range(1, cells_out + 1):\n neighbor_strength = 0\n # Are we currently moving? If not, we can add this to the strength\n if f.move == -1:\n # While we can check if f.move == STILL, it's likely that it's STILL for a reason and we don't want to cause conflicts.\n neighbor_strength += (f.strength + (f.production * distance_out))\n f_neighbors = [x for x in f.neighbors(distance_out) if x.owner == self.my_id]\n # This returns a list of ALL neighbors between 1 and distance_out inclusive.\n f_neighbors_minus_one = []\n if distance_out > 1:\n f_neighbors_minus_one = [x for x in f.neighbors(distance_out - 1) if x.owner == self.my_id]\n f_neighbors_at_cells_out = list(set(f_neighbors) - set(f_neighbors_minus_one))\n # Ok, now we have a list of all cells AT distance_out and all cells LESS than distance_out\n # Why is this necessary? We only want to MOVE cells at distance_out and let all squares LESS than distance_out produce\n \n # Ok, first, check needed strength for all squares LESS than distance_out\n for f_n in f_neighbors_minus_one:\n if f_n.move == -1:\n neighbor_strength += f_n.strength + f_n.production * self.get_distance(f_n, f)\n # Now, check if moving neighbors will produce enough strength.\n needed_strength_at_cells_out = needed_strength - neighbor_strength\n for f_n in f_neighbors_at_cells_out:\n if f_n.move == -1:\n neighbor_strength += f_n.strength\n # Do we have enough strength?\n if neighbor_strength > needed_strength:\n # Yes! Let's move the outside squares towards f_n.\n f_neighbors_at_cells_out.sort(key = lambda x: x.strength, reverse = True)\n for f_n in f_neighbors_at_cells_out:\n if f_n.move == -1 and needed_strength_at_cells_out > 0:\n f_n.move_to_target(f, True) \n # There may be edge cases where we can't actually move to a square, or that it takes more turns than expected. Might need to make a new function that looks at distance through friendly squares\n needed_strength_at_cells_out -= f_n.strength\n moving_cells = True\n if f.move == -1:\n self.make_move(f, STILL)\n # Stop looking any further out\n break\n \n if moving_cells:\n # We've found something to attack this border square eventually, let's move to the next.\n break\n \n \n################\n# Square class # \n################\n\nclass Square:\n def __init__(self, game_map, x, y, owner, strength, production):\n self.game_map = game_map\n self.x = x\n self.y = y\n self.owner = owner\n self.strength = strength\n self.production = production\n self.move = -1\n self.target = None\n self.moving_here = []\n self._is_border = None\n self._is_npc_border = None\n self._influence_production_npc = None\n \n\n def make_move(self, direction):\n # This should ONLY be called through the GameMap make_move function. Calling this function directly may screw things up\n # Update this square's move\n # Have we set this square's move already?\n dx, dy = get_offset(direction)\n \n if self.move != -1:\n # Yes, let's reset information\n self.target.moving_here.remove(self)\n \n self.move = direction\n self.target = self.game_map.get_target(self, direction)\n self.target.moving_here.append(self)\n \n def neighbors(self, n = 1, include_self = False):\n # Returns a list containing all neighbors within n squares, excluding self unless include_self = True\n assert isinstance(include_self, bool)\n assert isinstance(n, int) and n > 0\n if n == 1:\n combos = ((0, -1), (1, 0), (0, 1), (-1, 0), (0, 0)) # N, E, S, W, STILL\n else:\n combos = ((dx, dy) for dy in range(-n, n+1) for dx in range(-n, n+1) if abs(dx) + abs(dy) <= n)\n return (self.game_map.squares[(self.x + dx) % self.game_map.width][(self.y + dy) % self.game_map.height] for dx, dy in combos if include_self or dx or dy)\n \n def is_border(self):\n # looks at a square and sees if it's a border.\n # Looks at all neighbors and see if the owner != my_id\n # Have we done this calculation already? It shouldn't change within a frame\n if self._is_border == None:\n for n in self.neighbors():\n if n.owner != self.game_map.my_id:\n self._is_border = True\n return True\n self._is_border = False\n return self._is_border\n \n def is_npc_border(self):\n # Looks at a square and sees if it's an NPC border square\n # Defined as a square which is owned by 0 and has a neighbor of my_id\n # Have we done this calculation already? It shouldn't change within a frame\n if self._is_npc_border == None:\n if self.owner != 0:\n self._is_npc_border = False\n return False\n for n in self.neighbors():\n if n.owner == self.game_map.my_id:\n self._is_npc_border = True\n return True\n self._is_npc_border = False\n return False\n return self._is_npc_border\n \n def move_to_target(self, destination, through_friendly):\n dist_w = (self.x - destination.x) % self.game_map.width\n dist_e = (destination.x - self.x) % self.game_map.width\n dist_n = (self.y - destination.y) % self.game_map.height\n dist_s = (destination.y - self.y) % self.game_map.height\n\n if dist_w == 0 and dist_n == 0:\n return self.game_map.make_move(self, STILL)\n \n # Prioritize in the following order:\n # 1: Move through OWN territory\n # 2: Move CLOSER to the destination\n # 3: Move through LOWER production square\n possible_moves = [] \n\n possible_moves.append((NORTH, self.game_map.owner_map[(self.x + 0) % self.game_map.width, (self.y - 1) % self.game_map.height] == self.game_map.my_id, dist_n if dist_n > 0 else 999, self.game_map.production_map[(self.x + 0) % self.game_map.width, (self.y - 1) % self.game_map.height]))\n possible_moves.append((SOUTH, self.game_map.owner_map[(self.x + 0) % self.game_map.width, (self.y + 1) % self.game_map.height] == self.game_map.my_id, dist_s if dist_s > 0 else 999, self.game_map.production_map[(self.x + 0) % self.game_map.width, (self.y + 1) % self.game_map.height]))\n possible_moves.append((EAST, self.game_map.owner_map[(self.x + 1) % self.game_map.width, (self.y + 0) % self.game_map.height] == self.game_map.my_id, dist_e if dist_e > 0 else 999, self.game_map.production_map[(self.x + 1) % self.game_map.width, (self.y + 0) % self.game_map.height]))\n possible_moves.append((WEST, self.game_map.owner_map[(self.x - 1) % self.game_map.width, (self.y + 0) % self.game_map.height] == self.game_map.my_id, dist_w if dist_w > 0 else 999, self.game_map.production_map[(self.x - 1) % self.game_map.width, (self.y + 0) % self.game_map.height]))\n\n # Sort. Note sorts need to happen in reverse order of priority.\n random.shuffle(possible_moves) # Shuffle so we don't bias direction.\n possible_moves.sort(key = lambda x: x[3]) # Sort production, smaller is better\n possible_moves.sort(key = lambda x: x[2]) # Sort distance, smaller is better\n if through_friendly:\n possible_moves.sort(key = lambda x: x[1], reverse = True) # Sort owner, True = 1, False = 0\n #logging.debug(str(possible_moves))\n # The smallest move is the one we'll take.\n self.game_map.make_move(self, possible_moves[0][0]) \n \n def influence_production_npc(self):\n # So that we don't have to calculate the entire map every tick, and to prevent recalcs, calculate and store into the square so we can reference it whenever we want\n # Lots of tweaking to do.\n if self._influence_production_npc == None:\n self._influence_production_npc = 0\n if self.owner == 0:\n # I think for any purpose we would use here, if we own the territory, we don't actually care about this value\n return self._influence_production_npc\n \n neighbors = self.neighbors(production_influence_max_distance, True)\n \n for n in neighbors:\n distance = self.game_map.get_distance(self, n)\n prod_n = n.production\n if n.owner == 0:\n str_value = max(1, n.strength)\n elif n.owner == self.game_map.my_id:\n # Do not assign any influence for cells we own\n prod_n = 0\n str_value = 1 # This doesn't matter i think since value will just equal 0\n else:\n # If we want to do something differently with strengths in enemy terrotiry, we can alter it here.\n str_value = max(1, n.strength)\n \n decay_factor = math.exp(-production_decay * distance)\n value = prod_n / str_value\n \n self._influence_production_npc += value * decay_factor\n \n return self._influence_production_npc\n \n \n####################\n# Helper Functions #\n####################\n\ndef get_offset(direction):\n return ((0, -1), (1, 0), (0, 1), (-1, 0), (0, 0))[direction]\n \ndef distance_between(x1, y1, x2, y2):\n dx = abs(x1 - x2)\n dy = abs(y1 - y2)\n if dx > game_map.width / 2:\n dx = game_map.width - dx\n if dy > game_map.height / 2:\n dy = game_map.height - dy\n return dx + dy\n \ndef opposite_direction(direction):\n return (direction + 2) % 4 if direction != STILL else STILL\n\n########################\n# Core logic functions # \n########################\n\ndef heuristic(cell, source = None):\n\n # Currently, don't assign any value to moving into a friendly cell. This should be done through a different call.\n if cell.owner == game_map.my_id:\n return 0\n \n # If other cells are moving into this square, we don't want to duplicate effort. Especially if there are no enemy cells around\n other_cells_moving_into_cell = cell.moving_here\n cell_neighbors = cell.neighbors()\n\n bordered_by_hostile = False\n \n for c in cell_neighbors:\n if c.owner != 0:\n bordered_by_hostile = True\n \n if len(other_cells_moving_into_cell) > 0 and not bordered_by_hostile:\n # Someone else is capturing this neutral territory already.\n return 0\n \n # Calculate how much attack damage we would do by moving into here (assumes everyone else stays still)\n total_damage = 0\n \n # Calculate the strength of other cells moving into here\n total_attack_strength = 0\n for c in other_cells_moving_into_cell:\n if c.owner == game_map.my_id:\n total_attack_strength += c.strength\n \n directions = [NORTH, EAST, SOUTH, WEST]\n for d in directions:\n target = game_map.get_target(cell, d)\n if target.owner != 0 and target.owner != game_map.my_id:\n damage = max(target.strength - total_attack_strength, 0)\n if source != None:\n damage = min(source.strength, damage)\n total_damage += damage\n\n value = 0 \n neighbor_values = []\n if cell.owner == 0:\n #value = max(1, cell.strength) / cell.production # Number of turns to recover. LOWER is better.\n production_value = cell.production / max(cell.strength, 1)\n for c in cell_neighbors:\n if c.owner == 0:\n neighbor_values.append(c.production / max(c.strength, 1))\n value = production_value + 0.1 * sum(neighbor_values)\n \n # This should be changed, but we'll keep it at this for now:\n \n return value + total_damage # Total damage is going to totally overpower value...\n\n\n\n\n#############\n# Game Loop #\n#############\ndef game_loop():\n \n game_map.get_frame()\n #game_map.create_production_influence_map()\n logging.debug(\"\\nFrame: \" + str(game_map.frame))\n # Have each individual square decide on their own movement\n square_move_list = []\n for square in game_map:\n if square.owner == game_map.my_id: \n square_move_list.append(square)\n # Have smaller strength pieces move first. Mainly since otherwise especially for attacking, large pieces bounce back and forth when we want them to attack instead.\n square_move_list.sort(key = lambda x: x.strength) \n #percent_owned = len(square_move_list) / (game_map.width * game_map.height)\n\n for square in square_move_list:\n game_map.get_best_move(square)\n # Project the state of the board assuming for now that enemy pieces do not move \n #game_map.create_projection() \n # Do stuff\n\n #game_map.attack_border_multiple_pieces()\n #consolidate_strength()\n #if game_map.frame < 10:\n # consolidate_strength(3)\n #elif game_map.frame < 20:\n # consolidate_strength(2)\n #elif game_map.frame < 40:\n #game_map.consolidate_strength(1)\n \n over_count = game_map.width * game_map.height\n \n #new_over_count = game_map.prevent_overstrength()\n \n #while new_over_count < over_count:\n # over_count = new_over_count\n # new_over_count = game_map.prevent_overstrength()\n \n game_map.send_frame()\n\n\n\n\n#####################################################################################################################\n# Functions for communicating with the Halite game environment (formerly contained in separate module networking.py #\n#####################################################################################################################\n\ndef translate_cardinal(direction):\n # Cardinal index used by the framework is:\n # NORTH = 0, EAST = 1, SOUTH = 2, WEST = 3, STILL = 4\n # Cardinal index used by the game is:\n # STILL = 0, NORTH = 1, EAST = 2, SOUTH = 3, WEST = 4\n return int((direction + 1) % 5)\n\ndef send_string(to_be_sent):\n sys.stdout.write(to_be_sent + \"\\n\")\n sys.stdout.flush()\n\ndef get_string():\n return sys.stdin.readline().rstrip('\\n')\n\n\n######################\n# Game run-time code #\n######################\n\nlogging.basicConfig(filename='logging.log',level=logging.DEBUG)\n# logging.debug('your message here')\nNORTH, EAST, SOUTH, WEST, STILL = range(5)\n\n\ngame_map = GameMap()\n\nwhile True:\n game_loop()\n \n \n \n \n \n \n \n ","sub_path":"docker/shummie/Bots/shummiev7-7.py","file_name":"shummiev7-7.py","file_ext":"py","file_size_in_byte":34308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"483933818","text":"# -*- coding: utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\nfrom tvb.models import Forum81Item\n\nimport requests\nimport re\nfrom datetime import date\n\nclass Command(BaseCommand):\n args = ''\n help = 'Download an from the thread.'\n\n def handle(self, *args, **options):\n if len(args) != 2:\n self.stdout.write(\"Specify 2 arguments: \")\n quit()\n\n thread = args[0]\n episode = int(args[1])\n\n originalUrl = 'http://www.tvboxnow.com/%s' % thread\n loginUrl = 'http://www.tvboxnow.com/logging.php?action=login&loginsubmit=yes'\n\n session = requests.Session()\n postdata = {'password':'abc123', 'username':'lordgul'}\n res = session.post(loginUrl, data=postdata, headers={'referer':originalUrl})\n res = session.get(originalUrl, headers={'referer':loginUrl})\n res.encoding = 'utf-8'\n #print >>open(newsThread+'.debug', 'w'), res.text.encode('utf-8')\n lines = res.text.encode('utf-8').split('\\n')\n for line in lines:\n #if today in line:\n if re.search('%d\\.torrent' % episode, line):\n if re.search('DIVX', line):\n continue\n m = re.search('a href=\\\"(.*?)\\\"', line)\n if m:\n attach = m.group(1)\n break\n\n #attach = \"attachment.php?aid=3044607&k=7110c105d6f63d6fde2249ce295d0234&t=1424300372&fid=497&sid=e986eAA1gE2VIt23Bf3grtN%2BzV1QW6PpNUT0AN9WWcgzXxk\"\n\n res = session.get('http://www.tvboxnow.com/%s' % attach,\n headers={'referer':originalUrl})\n res.encoding = 'utf-8'\n #print >>open('attach.debug', 'w'), res.text.encode('utf-8')\n lines = res.text.encode('utf-8').split('\\n')\n for line in lines:\n if 'window.location.href' in line:\n m = re.search('window.location.href =\\'(.*?)\\'', line)\n if m:\n attach2 = m.group(1)\n break\n\n #attach2 = \"attachment.php?aid=3044607&k=6d08894a1401dbcaed228810df69b923&t=1424300529&ck=75275fec&sid=eb69a4mAPMfsfN7MXQKrYCzaoNwJX4IE%2BiONdlm98dg3%2F%2BU\"\n\n\n misc = res.url\n res = session.get('http://www.tvboxnow.com/%s' % attach2,\n headers={'referer':misc}, stream=True)\n with open(\"%s-%d.torrent\" % (thread, episode), \"wb\") as f:\n for chunk in res.iter_content():\n f.write(chunk)\n\n","sub_path":"frontend/tvb/management/commands/getepisode.py","file_name":"getepisode.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"53615097","text":"import asyncio\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nimport discord\nimport feedparser\n\n\n # channel = self.get_channel(232917647250030592)\n\nclass Crisis:\n \"\"\"Parses krisinformationen.se for new crisis in Sweden\"\"\"\n def __init__(self, bot):\n self.bot = bot\n self.scheduler = AsyncIOScheduler()\n self.scheduler.add_job(self.at_time, 'cron', minute=40)\n self.scheduler.start()\n self.path = \"/root/discord-bot/\"\n self.id_log = self.path + \"data/crisis.log\"\n\n def check_log(self, entry):\n \"\"\"Checks if string exists in the log file\"\"\"\n with open(self.id_log, \"r+\") as file:\n for line in file:\n if entry in line.split():\n return True\n return False\n\n def string_to_log(self, entry):\n \"\"\"Adds the string to the log file\"\"\"\n with open(self.id_log, \"a\") as file:\n file.write(entry + \"\\n\")\n\n async def at_time(self):\n \"\"\"Parses krisinformationen and sends the informationen to the discord channel\"\"\"\n channel = self.bot.get_channel(232917647250030592)\n feed = feedparser.parse(\"http://api.krisinformation.se/v1/feed?format=xml\")\n suffix = \"en?format=xml\"\n for entry in feed.entries:\n if entry.id.endswith(suffix):\n break\n else:\n if not self.check_log(entry.id):\n text = entry.title + \"\\n\" + entry.summary # + \"\\n\" + entry.link\n self.string_to_log(entry.id)\n await channel.send(\"```\" + text + \"```\")\n\ndef setup(bot):\n bot.add_cog(Crisis(bot))\n","sub_path":"cogs/crisis.py","file_name":"crisis.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"482234139","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"HSCPAnalysis\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\n\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )\n\nprocess.GlobalTag.globaltag = 'GR_P_V14::All'\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n '/store/data/Run2011B/MET/USER/EXOHSCP-PromptSkim-v1/0000/F6FA7586-9B02-E111-B438-001A92810AEC.root'\n# '/store/data/Run2011A/SingleMu/USER/EXOHSCP-05Aug2011-v1/0000/FC150FF5-0FC5-E011-913E-002618943876.root',\n# '/store/data/Run2011A/SingleMu/USER/EXOHSCP-05Aug2011-v1/0000/FA8B74F8-0FC5-E011-9EE5-001A928116B4.root',\n# '/store/data/Run2011A/SingleMu/USER/EXOHSCP-05Aug2011-v1/0000/FA432D78-CEC5-E011-B4F3-00261894394F.root'\n )\n)\nprocess.source.inputCommands = cms.untracked.vstring(\"keep *\", \"drop *_MEtoEDMConverter_*_*\")\n\n\n\n########################################################################\nprocess.load('SUSYBSMAnalysis.Skimming.EXOHSCP_cff')\nprocess.load(\"SUSYBSMAnalysis.HSCP.HSCParticleProducerFromSkim_cff\") #IF RUNNING ON HSCP SKIM\nprocess.load(\"SUSYBSMAnalysis.HSCP.HSCPTreeBuilder_cff\")\n\n######################################################################## INCREASING HSCP TRIGGER TRESHOLD FOR OLD DATA\n\nprocess.HSCPHLTFilter = cms.EDFilter(\"HSCPHLTFilter\",\n TriggerProcess = cms.string(\"HLT\"),\n RemoveDuplicates = cms.bool(False),\n MuonTrigger1Mask = cms.int32(1), #Activated\n PFMetTriggerMask = cms.int32(0), #Deactivated\n)\n\n######################################################################## SPECIAL CASE FOR DATA\n\nprocess.GlobalTag.toGet = cms.VPSet(\n cms.PSet( record = cms.string('SiStripDeDxMip_3D_Rcd'),\n tag = cms.string('Data7TeV_Deco_3D_Rcd_38X'),\n connect = cms.untracked.string(\"sqlite_file:Data7TeV_Deco_SiStripDeDxMip_3D_Rcd.db\")),\n)\n\nprocess.load(\"RecoLocalMuon.DTSegment.dt4DSegments_MTPatternReco4D_LinearDriftFromDBLoose_cfi\")\nprocess.dt4DSegments.Reco4DAlgoConfig.Reco2DAlgoConfig.AlphaMaxPhi = 1.0\nprocess.dt4DSegments.Reco4DAlgoConfig.Reco2DAlgoConfig.AlphaMaxTheta = 0.9\nprocess.dt4DSegments.Reco4DAlgoConfig.Reco2DAlgoConfig.segmCleanerMode = 2\nprocess.dt4DSegments.Reco4DAlgoConfig.Reco2DAlgoConfig.MaxChi2 = 1.0\nprocess.dt4DSegmentsMT = process.dt4DSegments.clone()\nprocess.dt4DSegmentsMT.Reco4DAlgoConfig.recAlgoConfig.stepTwoFromDigi = True\nprocess.dt4DSegmentsMT.Reco4DAlgoConfig.Reco2DAlgoConfig.recAlgoConfig.stepTwoFromDigi = True\n\nprocess.muontiming.TimingFillerParameters.DTTimingParameters.MatchParameters.DTsegments = \"dt4DSegmentsMT\"\nprocess.muontiming.TimingFillerParameters.DTTimingParameters.HitsMin = 3\nprocess.muontiming.TimingFillerParameters.DTTimingParameters.RequireBothProjections = False\nprocess.muontiming.TimingFillerParameters.DTTimingParameters.DropTheta = True\nprocess.muontiming.TimingFillerParameters.DTTimingParameters.DoWireCorr = True\nprocess.muontiming.TimingFillerParameters.DTTimingParameters.MatchParameters.DTradius = 1.0\n\n\n########################################################################\nprocess.nEventsBefEDM = cms.EDProducer(\"EventCountProducer\")\n########################################################################\n\nprocess.Out = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring(\n \"drop *\",\n 'keep EventAux_*_*_*',\n 'keep LumiSummary_*_*_*',\n 'keep edmMergeableCounter_*_*_*',\n \"keep *_genParticles_*_*\",\n \"keep GenEventInfoProduct_generator_*_*\",\n \"keep *_offlinePrimaryVertices_*_*\",\n #\"keep *_cscSegments_*_*\",\n #\"keep *_rpcRecHits_*_*\",\n #\"keep *_dt4DSegments_*_*\",\n \"keep SiStripClusteredmNewDetSetVector_generalTracksSkim_*_*\",\n \"keep SiPixelClusteredmNewDetSetVector_generalTracksSkim_*_*\",\n #\"keep *_reducedHSCPhbhereco_*_*\", #\n #\"keep *_reducedHSCPEcalRecHitsEB_*_*\", #\n #\"keep *_reducedHSCPEcalRecHitsEE_*_*\", #\n \"keep *_TrackRefitter_*_*\",\n \"drop TrajectorysToOnerecoTracksAssociation_TrackRefitter__\",\n \"keep *_standAloneMuons_*_*\",\n \"drop recoTracks_standAloneMuons__*\",\n \"keep *_globalMuons_*_*\", #\n \"keep *_muonsSkim_*_*\",\n \"keep edmTriggerResults_TriggerResults_*_*\",\n \"keep recoPFJets_ak5PFJets__*\", #\n \"keep recoPFMETs_pfMet__*\", #\n \"keep *_HSCParticleProducer_*_*\",\n \"keep *_HSCPIsolation01__*\",\n \"keep *_HSCPIsolation03__*\",\n \"keep *_HSCPIsolation05__*\",\n \"keep *_dedx*_*_HSCPAnalysis\",\n \"keep *_muontiming_*_HSCPAnalysis\",\n \"keep triggerTriggerEvent_hltTriggerSummaryAOD_*_*\",\n ),\n fileName = cms.untracked.string('HSCP.root'),\n SelectEvents = cms.untracked.PSet(\n SelectEvents = cms.vstring('p1')\n ),\n)\n\nfrom CondCore.DBCommon.CondDBSetup_cfi import CondDBSetup \nprocess.tTrigDB = cms.ESSource(\"PoolDBESSource\",\n CondDBSetup,\n timetype = cms.string('runnumber'),\n toGet = cms.VPSet(cms.PSet(\n record = cms.string('DTTtrigRcd'),\n tag = cms.string('DTTtrig_offline_prep_V03'),\n label = cms.untracked.string('')\n )),\n connect = cms.string('frontier://FrontierPrep/CMS_COND_DT'),\n authenticationMethod = cms.untracked.uint32(0)\n )\n#process.tTrigDB.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb'\nprocess.es_prefer_tTrigDB = cms.ESPrefer('PoolDBESSource','tTrigDB')\n\nprocess.vDriftDB = cms.ESSource(\"PoolDBESSource\",\n CondDBSetup,\n timetype = cms.string('runnumber'),\n toGet = cms.VPSet(cms.PSet(\n record = cms.string('DTMtimeRcd'),\n tag = cms.string('DTVdrift_offline_prep_V03'),\n label = cms.untracked.string('')\n )),\n connect = cms.string('frontier://FrontierPrep/CMS_COND_DT'),\n authenticationMethod = cms.untracked.uint32(0)\n )\n#process.vDriftDB.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb'\nprocess.es_prefer_vDriftDB = cms.ESPrefer('PoolDBESSource','vDriftDB')\n\n########################################################################\n\n\n#LOOK AT SD PASSED PATH IN ORDER to avoid as much as possible duplicated events (make the merging of .root file faster)\nprocess.p1 = cms.Path(process.nEventsBefEDM * process.HSCPHLTFilter * process.dt4DSegmentsMT * process.HSCParticleProducerSeq)\n#process.p1 = cms.Path(process.HSCParticleProducerSeq)\nprocess.endPath1 = cms.EndPath(process.Out)\nprocess.schedule = cms.Schedule( process.p1, process.endPath1)\n\n\n","sub_path":"SUSYBSMAnalysis/HSCP/test/BuildHSCParticles/Data/HSCParticleProducer_cfg.py","file_name":"HSCParticleProducer_cfg.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"47646192","text":"from __future__ import print_function\nfrom ortools.sat.python import cp_model\n\n\ndef main():\n # Instantiate a cp model.\n node_cpu = [2, 3, 5, 1]\n\n task_cpu = [2, 3, 2, 2, 1]\n\n num_nodes = len(node_cpu)\n num_tasks = len(task_cpu)\n\n all_nodes = range(num_nodes)\n all_tasks = range(num_tasks)\n\n model = cp_model.CpModel()\n # Variables\n x = []\n for i in all_nodes:\n t = []\n for j in all_tasks:\n t.append(model.NewBoolVar('x[%i,%i]' % (i, j)))\n x.append(t)\n\n # Constraints\n\n # Each task is assigned to exactly one worker.\n [model.Add(sum(x[i][j] for i in all_nodes) == 1) for j in all_tasks]\n\n # Each node is not overcommitted\n for i in all_nodes:\n model.Add(sum(task_cpu[j] * x[i][j]\n for j in all_tasks) <= node_cpu[i])\n\n # Objective: overall idle resources\n idle_cpu = model.NewIntVar(\n 0, sum(node_cpu[i] for i in all_nodes), 'idle_cpu')\n model.Add(idle_cpu == sum(node_cpu[i] for i in all_nodes) - sum(x[i][j] * task_cpu[j]\n for j in all_tasks for i in all_nodes))\n model.Maximize(idle_cpu)\n\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n\n if status == cp_model.OPTIMAL:\n print('Total idle resources = %i' % solver.ObjectiveValue())\n print()\n for i in all_nodes:\n print('Worker ', i, ':')\n for j in all_tasks:\n if solver.Value(x[i][j]) == 1:\n print('- task ', j)\n print()\n\n print()\n\n print(solver.ResponseStats())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"misc/ortools/ortools-test.py","file_name":"ortools-test.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"302094707","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# share_file.py\n# \n# Copyright 2012 Emil \n# \n\nfile_types = frozenset(['good', 'bad', 'empty'])\n\nclass ShareFile:\n \n def __init__(self, content):\n \n assert content in file_types\n \n self.content = content\n \n return\n\n","sub_path":"share_file.py","file_name":"share_file.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"335788772","text":"import zipfile\r\nimport os\r\nimport shutil\r\n\r\nAPCSPATH = \"C:\\\\Users\\\\mlgpr\\\\Desktop\\\\APCS\"\r\n\r\ndef zipfolder(foldername):\r\n shutil.make_archive(\"temp\",\"zip\",APCSPATH+\"\\\\\"+foldername)\r\n\r\ndef getFolder():\r\n folders = [name for name in os.listdir(APCSPATH)]\r\n count = 1\r\n for folder in folders:\r\n print(f\"{count}: {folder}\")\r\n count+=1\r\n return folders[int(input(\"Enter selection :: \"))-1]\r\n\r\ndef delTemp():\r\n os.remove(\"temp.zip\")\r\n\r\nusrfolder = getFolder()\r\nzipfolder(usrfolder)\r\n","sub_path":"attachLabAutomation/attachlabs.py","file_name":"attachlabs.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"542836994","text":"number = 24\n\ndef solution(N):\n # write your code in Python 3.6\n i, result = 1, 0\n while (i * i < N):\n if (N % i == 0):\n result += 2\n i += 1\n if (i * i == N):\n result += 1\n return result\n\nprint(solution(number))","sub_path":"Lesson 10.1.py","file_name":"Lesson 10.1.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"500782386","text":"# -*- coding: utf-8 -*- #\n# Copyright 2015 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Triggers execution of a Google Cloud Function.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport json\n\nfrom googlecloudsdk.api_lib.functions import util\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.calliope import exceptions\nfrom googlecloudsdk.command_lib.functions import flags\n\nimport six\n\n\nclass Call(base.Command):\n \"\"\"Trigger execution of a Google Cloud Function.\n\n ## EXAMPLES\n\n To call a function, giving it 'Hello World!' in the message field of its event\n argument (depending on your environment you might need to escape\n characters in `--data` flag value differently), run:\n\n $ {command} helloWorld --data='{\"message\": \"Hello World!\"}'\n\n \"\"\"\n\n @staticmethod\n def Args(parser):\n \"\"\"Register flags for this command.\"\"\"\n flags.AddFunctionResourceArg(parser, 'to execute')\n parser.add_argument(\n '--data',\n help='JSON string with data that will be passed to the function.')\n\n @util.CatchHTTPErrorRaiseHTTPException\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n Function call results (error or result with execution id)\n \"\"\"\n if args.data:\n try:\n json.loads(args.data)\n except ValueError as e:\n raise exceptions.InvalidArgumentException(\n '--data', 'Is not a valid JSON: ' + six.text_type(e))\n client = util.GetApiClientInstance()\n function_ref = args.CONCEPTS.name.Parse()\n # Do not retry calling function - most likely user want to know that the\n # call failed and debug.\n client.projects_locations_functions.client.num_retries = 0\n messages = client.MESSAGES_MODULE\n return client.projects_locations_functions.Call(\n messages.CloudfunctionsProjectsLocationsFunctionsCallRequest(\n name=function_ref.RelativeName(),\n callFunctionRequest=messages.CallFunctionRequest(data=args.data)))\n","sub_path":"google-cloud-sdk/lib/surface/functions/call.py","file_name":"call.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"122873468","text":"import numpy as np\nimport pickle\n\ndata_path = '../data/profile_snapshots.dat'\noutput_dir = '../plots/theory/'\n\n\ndef average_replicas(data):\n het_global_averaged = np.zeros(data[1].shape[0])\n het_local_averaged = np.zeros(data[1].shape[0])\n replicas = 0\n for key in data.keys():\n het_global, het_local = calculate_heterozygosities(data[key])\n het_global_averaged += het_global\n het_local_averaged += het_local\n replicas += 1\n\n first_key = list(data.keys())[0]\n time, _, _ = separate_data(data[first_key])\n return time, het_global_averaged / replicas, het_local_averaged / replicas\n\n\ndef calculate_heterozygosities(profile):\n t_array, n1_array, n2_array = separate_data(profile)\n het_global = het_global_average(n1_array, n2_array)\n f_array = calculate_f(n1_array, n2_array)\n het_local = het_local_average(f_array, n1_array + n2_array)\n return het_global, het_local\n\ndef separate_data(profile):\n t_list = []\n n1_list = []\n n2_list = []\n for snapshot in profile:\n t_list.append(snapshot[0])\n n1_list.append([])\n n2_list.append([])\n\n for i in range(1, len(snapshot), 2):\n n1_list[-1].append(snapshot[i])\n n2_list[-1].append(snapshot[i + 1])\n return np.array(t_list), np.array(n1_list), np.array(n2_list)\n\ndef calculate_f(n1_array, n2_array):\n n_array = n1_array + n2_array\n f_array = np.zeros(n_array.shape)\n non_zero = np.where(n_array > 0)\n f_array[non_zero] = n1_array[non_zero] / n_array[non_zero]\n return f_array\n\ndef het_global_average(n1_array, n2_array):\n n1_total = np.sum(n1_array, axis=1)\n n_total = np.sum(n1_array + n2_array, axis=1)\n f_global = n1_total / n_total\n return 2 * f_global * (1 - f_global)\n\ndef het_local_average(f_array, n_array):\n f_average = []\n for i, n in enumerate(n_array):\n f_average.append(np.mean(f_array[i][np.where(n > 0)[0]]))\n f_average = np.array(f_average)\n return 2 * f_average * (1 - f_average)\n\nif __name__ == '__main__':\n with open(data_path, 'rb') as f_in:\n data = pickle.load(f_in)\n calculate_heterozygosities(data[1])\n time, het_global_averaged, het_local_averaged = average_replicas(data)\n het_averaged = {'time':time, 'global':het_global_averaged, 'local':het_local_averaged}\n\n with open('../data/het_average.dat', 'wb') as f_out:\n pickle.dump(het_averaged, f_out)\n","sub_path":"scripts/average_heterozygosities.py","file_name":"average_heterozygosities.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"536987127","text":"\n\ndef quicksort(list):\n if len(list) < 2:\n return list\n tmp = list[0]\n less = [x for x in list[1:] if x <= tmp]\n more = [x for x in list[1:] if x > tmp]\n return quicksort(less) + [tmp] + quicksort(more)\n\nquicksort([6,5,4,2,1])","sub_path":"algorithm/my_quicksort_0615.py","file_name":"my_quicksort_0615.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"106122882","text":"class Solution:\n def findKthPositive(self, arr: List[int], k: int) -> int:\n missingNumbersLength, lastMissedNumber = 0, 0\n number, i = 1, 0\n while missingNumbersLength < k:\n if i < len(arr) and number == arr[i]:\n number += 1\n i += 1\n else:\n lastMissedNumber = number\n missingNumbersLength += 1\n number += 1\n return lastMissedNumber","sub_path":"LeetCode/Kth Missing Positive Number.py","file_name":"Kth Missing Positive Number.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"517045916","text":"#!/usr/bin/env python3\n\n\"\"\"\nApple EFI Split\nApple EFI IM4P Splitter\nCopyright (C) 2018-2019 Plato Mavropoulos\n\"\"\"\n\nprint('Apple EFI IM4P Splitter v1.3')\n\nimport os\nimport re\nimport sys\n\nim4p = re.compile(br'\\x16\\x04\\x49\\x4D\\x34\\x50\\x16\\x04') # Apple IM4P\nifd = re.compile(br'\\x5A\\xA5\\xF0\\x0F.{172}\\xFF{16}', re.DOTALL) # Intel Flash Descriptor\n\n# Get input catalog file paths\nif len(sys.argv) >= 3 and sys.argv[1] == '-skip' :\n\t# Called via Apple_EFI_Package\n\tapple_im4p = sys.argv[2:]\n\tskip_pause = True\nelif len(sys.argv) >= 2 :\n\t# Drag & Drop or CLI\n\tapple_im4p = sys.argv[1:]\n\tskip_pause = False\nelse :\n\t# Folder path\n\tapple_im4p = []\n\tskip_pause = False\n\tin_path = input('\\nEnter the full folder path: ')\n\tprint('\\nWorking...')\n\tfor root, dirs, files in os.walk(in_path):\n\t\tfor name in files :\n\t\t\tapple_im4p.append(os.path.join(root, name))\n\nfor input_file in apple_im4p :\n\tfile_path = os.path.abspath(input_file)\n\tfile_name = os.path.basename(input_file)\n\tfile_dir = os.path.dirname(file_path)\n\tfile_ext = os.path.splitext(file_path)[1]\n\t\n\tprint('\\nFile: %s%s' % (file_name, file_ext))\n\t\n\t# Must be IM4P file because its size is 0x0 dependent\n\tif file_ext not in ('.im4p','.IM4P') :\n\t\tprint('\\n Error: Could not find IM4P file extension at %s!' % file_name)\n\t\tcontinue\n\t\n\twith open(input_file, 'rb') as in_file : buffer = in_file.read()\n\t\n\tis_im4p = im4p.search(buffer) # Detect IM4P pattern\n\t\n\tif not is_im4p :\n\t\tprint('\\n Error: Could not find IM4P pattern at %s!' % file_name)\n\t\tcontinue\n\t\n\tim4p_size = int.from_bytes(buffer[2:is_im4p.start()], 'big') # Variable, from 0x2 - IM4P\n\tim4p_type = buffer[is_im4p.end():is_im4p.end() + 0x4].decode('utf-8') # mefi\n\t\n\tif im4p_type != 'mefi' :\n\t\tprint('\\n Error: Could not find \"mefi\" IM4P Type at %s!' % file_name)\n\t\tcontinue\n\t\n\tpayload_start = is_im4p.start() + buffer[is_im4p.start() - 0x1]\n\tpayload_size = int.from_bytes(buffer[is_im4p.end() + 0x9:is_im4p.end() + 0xD], 'big')\n\t\n\tifd_count = list(ifd.finditer(buffer)) # Count the Intel FD(s) to determine each SPI size and offset\n\t\n\t# After IM4P mefi (0x15), multi SPI payloads have _MEFIBIN (0x100, difficult to reverse without varying samples)\n\tspi_start = payload_start + 0x100 if buffer[payload_start:payload_start + 0x8] == b'_MEFIBIN' else payload_start\n\t\n\tspi_size = int(len(buffer[spi_start:]) / len(ifd_count)) # Each SPI should be of the same size (1st PRD, 2nd PRE)\n\t\n\t# Parse all Intel FD and extract each SPI image\n\tfor fd in range(len(ifd_count)) :\n\t\tfile_path_new = os.path.join(file_dir, '%s_%d.fd' % (file_name[:-5], fd + 1))\n\t\t\n\t\twith open(file_path_new, 'wb') as spi_image : spi_image.write(buffer[spi_start:spi_start + spi_size])\n\t\t\n\t\tspi_start += spi_size\n\t\t\n\tprint('\\n Split IM4P file into %d SPI/BIOS image(s)!' % len(ifd_count))\n\t\t\nif not skip_pause : input('\\nDone!')","sub_path":"Apple EFI IM4P Splitter/Apple_EFI_Split.py","file_name":"Apple_EFI_Split.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"7063617","text":"# Simple Robot Program\nimport logging\nimport os\n\nfrom RobotMenu import RobotMenu\nfrom Movement import Movement\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\ndirectory = '/home/pi/logs'\n\nif not os.path.exists(directory):\n os.makedirs(directory)\n\nhandler = logging.FileHandler(directory + '/TurtleThoughts.log')\nhandler.setLevel(logging.INFO)\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\nlogger.addHandler(handler)\n\nlogger.info('Started Run Robot')\n\nm = Movement(logger)\nr = RobotMenu(logger, m)\n\nr.execute()\n\nfrom Movement import Movement\nfrom DanceRoutines import DanceRoutines\nimport logging\n\nlogging.basicConfig(filename='example.log', filemode='w', level=logging.DEBUG)\nlogger = logging.getLogger('basic')\n\n\nm = Movement('test')\nd = DanceRoutines(m, logger)","sub_path":"RunRobot.py","file_name":"RunRobot.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"498022149","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', views.index),\n path('conversation', views.conversation),\n path('conversation/index', views.conversation),\n path('conversation/draft', views.drafts),\n path('conversation/sent', views.sent),\n path('conversation/trash', views.trash),\n path('conversation/create', views.creat_mail),\n path('user_role', views.user_roles),\n path('user_role/index', views.user_roles),\n path('user_role/add', views.add_role),\n path('user_role/delete/', views.delete_role),\n path('user_role/edit/', views.edit_role),\n path('user_role/update_role/', views.update_role),\n path('dashboard', views.dashboard),\n path('logout', views.logout),\n path('users/index', views.users),\n path('users', views.users),\n path('users/view/', views.view_user),\n path('users/edit/', views.edit_user),\n path('users/delete/', views.delete_user),\n path('users/add', views.add_user),\n path('api_settings', views.api_settings),\n path('api_settings/index', views.api_settings),\n path('api_settings/add', views.add_api),\n path('api_settings/edit/', views.edit_api),\n path('api_settings/delete/', views.delete_api),\n path('system_setting', views.system_setting),\n path('system_setting/index', views.system_setting),\n path('system_setting/edit/', views.system_setting_edit),\n path('tweets_gathering', views.tweets_gathering),\n path('twitter_users', views.twitter_users),\n path('twitter_users/view', views.view_profile),\n path('analysis', views.analysis),\n path('analysis_view', views.analysis_view),\n path('categorization', views.categorization),\n path('category_analysis', views.category_analysis),\n path('fake_users', views.faked_account)\n # path('tweets_gathering')\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"fyp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"545730529","text":"# This is the final version of the server program for the front unit\nfrom socket import *\nfrom errno import *\n\n# for decoding and displaying the image\nimport base64\nfrom PIL import Image, ImageTk\nimport tkinter\n\nfrom time import *\n\nimport signal\nimport sys\nimport subprocess\n\nimport RPi.GPIO as GPIO\n\n# ------------------\n# Defining Variables\n# ------------------\n\n# Set variables\nnoConnect = True # Boolean for stating connection of a client\nhasLost = False # Boolean for packet loss\nfrontLEDs = 0\nsettleSleep = 0.5\nmeasurementSleep = 0.00001\nMSS = 9999 # Max Segment Size\npicture = \"/home/pi/Capstone_Networking/test_decode.jpg\"\nlogo = \"/home/pi/Capstone_Networking/logo.jpg\"\nencode_string = []\ncheck_pt = 0 # where packets need to be checked for loss\ncp_value = 1 # SS/cp_value = num of packets sent per check\npacketsRec = [0] * MSS # stores packets that have been received\n\n# Dictionaries for Flag Values\ndictRec = {'0':\"INIT_SYN\",'1':\"INIT_SYNACK\",'2':\"INIT_ACK\",'3':\"FULL_DATA_SYN\",'4':\"FULL_DATA_ACK\",'5':\"SYNC_SYN\",'6':\"SYNC_ACK\",'7':\"DATA_SYN\",'8':\"DATA_ACK\",'9':\"DATA_CAM\",'A':\"DATA_SEN\",'B':\"MODE_SYN\",'C':\"MODE_ACK\", 'D':\"DCNT\"}\n\ndictSend = {\"INIT_SYN\":'0',\"INIT_SYNACK\":'1',\"INIT_ACK\":'2',\"FULL_DATA_SYN\":'3',\"FULL_DATA_ACK\":'4',\"SYNC_SYN\":'5',\"SYNC_ACK\":'6',\"DATA_SYN\":'7',\"DATA_ACK\":'8',\"DATA_CAM\":'9',\"DATA_SEN\":'A',\"MODE_SYN\":'B',\"MODE_ACK\":'C',\"DCNT\":'D'}\n\n# GPIO pins (BCM) and their purpose\nGPIO.setmode(GPIO.BCM)\nGPIO_MODE_SEL = 16\nGPIO_TRIGGER = 23\nGPIO_ECHO = 20\nGPIO_LEDS_RIGHT = 21\nGPIO_LEDS_LEFT = 27\nGPIO_LED_SEL0 = 2\nGPIO_LED_SEL1 = 14\nGPIO_LED_SEL2 = 4 #MSB\nGPIO_LED_EN = 17\nGPIO_LED_STAT = 11\nGPIO_SAFE_SD = 3\nGPIO_LBO = 26\n\n# Set pins as output and input\nGPIO.setup(GPIO_MODE_SEL,GPIO.IN)\nGPIO.setup(GPIO_TRIGGER,GPIO.OUT)\nGPIO.setup(GPIO_ECHO,GPIO.IN)\nGPIO.setup(GPIO_LEDS_RIGHT,GPIO.OUT)\nGPIO.setup(GPIO_LEDS_LEFT,GPIO.OUT)\nGPIO.setup(GPIO_LED_SEL0,GPIO.OUT)\nGPIO.setup(GPIO_LED_SEL1,GPIO.OUT)\nGPIO.setup(GPIO_LED_SEL2,GPIO.OUT)\nGPIO.setup(GPIO_LED_EN,GPIO.OUT)\nGPIO.setup(GPIO_LED_STAT, GPIO.OUT)\n\nGPIO.setup(GPIO_SAFE_SD, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.add_event_detect(GPIO_SAFE_SD, GPIO.FALLING)\n\nGPIO.setup(GPIO_LBO, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\nGPIO.add_event_detect(GPIO_LBO, GPIO.RISING)\n\nGPIO.output(GPIO_TRIGGER,False)\nGPIO.output(GPIO_LEDS_RIGHT, False)\nGPIO.output(GPIO_LEDS_LEFT, False)\nGPIO.output(GPIO_LED_SEL0, False)\nGPIO.output(GPIO_LED_SEL1, False)\nGPIO.output(GPIO_LED_SEL2, False)\nGPIO.output(GPIO_LED_EN, False)\nGPIO.output(GPIO_LED_STAT, False)\n\n# ------------------\n# Defining Functions\n# ------------------\n\n# -----------------------------------------------\n# decode_string decodes the image from the client\n# -----------------------------------------------\ndef decode_string(image_64_encode):\n global encode_string\n encode_string = []\n image_64_decode = base64.decodestring(image_64_encode)\n with open(picture,'wb') as image_result:\n image_result.write(image_64_decode)\n update_image()\n\n# ----------------------------------------------\n# update_image: updates the image being displayed\n# ----------------------------------------------\ndef update_image():\n global camImg\n camImg = ImageTk.PhotoImage(Image.open(picture))\n label.config(image = camImg)\n label.pack()\n\n# ---------------------------------------------------------------------------\n# check_point: sends to client a data_ack indicating if there was packet loss\n# ---------------------------------------------------------------------------\ndef check_point(SegmentSize):\n global check_pt\n global packetsRec\n\n packet_dropped = -1\n for i in range (check_pt,(check_pt + (SegmentSize//cp_value))):\n if packetsRec[i] == 0:\n packet_dropped = i\n break\n\n # Sending DATA_ACK\n message = dictSend[\"DATA_ACK\"] + \",0001,0001,CAM!\" + str(packet_dropped)\n\n serverSocket.sendto(message.encode(),clientAddress)\n\n return packet_dropped\n\n# --------------------------------------------\n# splitData is used to split data based on '!'\n# --------------------------------------------\ndef splitData(data):\n data_decoded = data.decode()\n newData = data_decoded.split('!')\n data1 = newData[0]\n data2 = newData[1]\n\n return data1, data2\n\n# ------------------------------------------------\n# MeasureLidar takes measurement to nearest object\n# in front of the rider. Returns the distance to\n# that object\n# ------------------------------------------------\ndef MeasureLidar():\n # This function measures a distance\n GPIO.output(GPIO_TRIGGER, True)\n # Wait 10us\n sleep(measurementSleep)\n GPIO.output(GPIO_TRIGGER, False)\n start = time()\n\n while GPIO.input(GPIO_ECHO) == 0:\n start = time()\n\n while GPIO.input(GPIO_ECHO) == 1:\n stop = time()\n\n stop = time()\n\n elapsed = stop - start # every 10 microseconds = 1 cm\n distance = elapsed * (10 ** 5) # in cm\n distance = distance * 0.0328084 # in feet\n\n return distance\n\n# -----------------------------------------\n# UpdateLidar takes 'n' measurements and\n# sets the value for the number of LEDs to\n# be turned on.\n# -----------------------------------------\ndef UpdateLidar():\n global frontLEDs\n\n n = 3 # num of measurements taken\n\n listDist = []\n for i in range(0,n):\n listDist.append(MeasureLidar())\n\n last = listDist.pop()\n for d in listDist:\n\n if d < 12 and last < 12:\n frontLEDs = 8\n break\n elif d >= 12 and d < 24 and last >= 12 and last < 24:\n frontLEDs = 7\n break\n elif d >= 24 and d < 36 and last >= 24 and last < 36:\n frontLEDs = 6\n break\n elif d >= 36 and d < 48 and last >= 36 and last < 48:\n frontLEDs = 5\n break\n elif d >= 48 and d < 60 and last >= 48 and last < 60:\n frontLEDs = 4\n break\n elif d >= 60 and d < 72 and last >= 60 and last < 72:\n frontLEDs = 3\n break\n elif d >= 72 and d < 80 and last >= 72 and last < 80:\n frontLEDs = 2\n break\n elif d >= 80 and d < 100 and last >= 80 and last < 100:\n frontLEDs = 1\n break\n\ndef disconnect():\n print(\"Front Unit Shutting Down\")\n GPIO.cleanup()\n serverSocket.close()\n subprocess.call(['shutdown','-h','now'], shell=False)\n sys.exit(0)\n\n# ---------------\n# Main Script\n# ---------------\n\n# Allow for time for setting up GPIO\nsleep(settleSleep)\n\n# Setting up socket\nserver_port = 12000\nserverSocket = socket(AF_INET,SOCK_DGRAM)\nserverSocket.bind(('',server_port))\n\n# Setting up gui for displaying image\nw = tkinter.Tk()\nim = Image.open(logo)\ncamImg = ImageTk.PhotoImage(im)\nlabel = tkinter.Label(w,image=camImg)\nlabel.pack()\nw.update()\n\n# Setting the mode of the system\nloop_count = 0\ncolor = False\nwhile loop_count < 15:\n color = not color\n sleep(.1)\n loop_count = loop_count + 1\n\nif GPIO.input(GPIO_MODE_SEL):\n sys_mode = \"FB\"\nelse:\n sys_mode = \"BS\"\n\nloop_count = 0\nwhile loop_count < 150:\n if sys_mode == \"BS\":\n GPIO.output(GPIO_LED_STAT, False)\n sleep(.01)\n GPIO.output(GPIO_LED_STAT, True)\n sleep(.01)\n else:\n GPIO.output(GPIO_LED_STAT, True)\n sleep(.02)\n loop_count = loop_count + 1\n\nloop_count = 0\ncolor = False\nserverSocket.setblocking(False) # to allow for the loop to process\n# Initial Handshaking loop\nwhile noConnect:\n sleep(0.1)\n color = not color\n GPIO.output(GPIO_LED_STAT, color)\n try:\n message_rec = True\n response, clientAddress = serverSocket.recvfrom(2048)\n except error as e:\n if e.errno is 107 or e.errno is 11:\n message_rec = False\n if message_rec:\n\n splitPacket = response.split(b',')\n\n if dictRec[splitPacket[0].decode()] == \"INIT_SYN\":\n # send back INIT_SYNACK\n message = dictSend[\"INIT_SYNACK\"] + \",0001,0001,VOID\"\n\n serverSocket.sendto(message.encode(),clientAddress)\n elif dictRec[splitPacket[0].decode()] == \"INIT_ACK\":\n # Send back MODE_SYN\n message = dictSend[\"MODE_SYN\"] + \",0001,0001,\" + sys_mode\n serverSocket.sendto(message.encode(),clientAddress)\n\n noConnect = False\n message = \"Connected\"\n serverSocket.setblocking(True)\n # Wait for MODE_ACK, DATA = \"MODE\"\n response, clientAddress = serverSocket.recvfrom(2048)\n\n # send back FULL_DATA_ACK, DATA = \"MODE!VOID\"\n message = dictSend[\"FULL_DATA_ACK\"] + \",0001,0001,\" + sys_mode + \"!VOID\"\n serverSocket.sendto(message.encode(),clientAddress)\n\n elif dictRec[splitPacket[0].decode()] == \"DCNT\":\n disconnect()\n\nGPIO.setup(GPIO_LED_STAT, GPIO.IN)\nled_flag = False\n\n# begin loop\nwhile True:\n if led_flag:\n GPIO.setup(GPIO_LED_STAT, GPIO.OUT)\n GPIO.output(GPIO_LED_STAT, True)\n else:\n GPIO.setup(GPIO_LED_STAT, GPIO.IN)\n response, clientAddress = serverSocket.recvfrom(2048)\n splitPacket = response.split(b',')\n print(dictRec[splitPacket[0].decode()])\n\n if GPIO.event_detected(GPIO_SAFE_SD) or GPIO.event_detected(GPIO_LBO):\n message = dictSend[\"DCNT\"] + \",0001,0001,VOID\"\n serverSocket.sendto(message.encode(),clientAddress)\n disconnect()\n\n if dictRec[splitPacket[0].decode()] == \"FULL_DATA_SYN\":\n\n sys_mode,data_type = splitData(splitPacket[3])\n\n if data_type == \"CAM\":\n led_flag = not led_flag\n #reset values\n check_pt = 0\n packetsRec = [0] * MSS\n\n full_string = b''\n i = 0\n\n while(i < len(encode_string)):\n full_string = full_string + encode_string[i]\n i = i + 1\n\n decode_string(full_string)\n\n message = dictSend[\"FULL_DATA_ACK\"] + \",0001,0001,\" + sys_mode + \"!CAM\"\n serverSocket.sendto(message.encode(),clientAddress)\n elif data_type == \"SEN\":\n message = dictSend[\"FULL_DATA_ACK\"] + \",0001,0001,\" + sys_mode + \"!SEN\"\n serverSocket.sendto(message.encode(),clientAddress)\n\n elif dictRec[splitPacket[0].decode()] == \"SYNC_SYN\":\n\n data_type,SS = splitData(splitPacket[3])\n SegmentSize = int(SS)\n\n message = dictSend[\"SYNC_ACK\"] + \",0001,0001,\" + data_type + '!' + SS\n serverSocket.sendto(message.encode(), clientAddress)\n\n elif dictRec[splitPacket[0].decode()] == \"DATA_SYN\":\n\n data_type,other_data = splitData(splitPacket[3])\n\n if data_type == \"CAM\":\n #check for packet loss\n\n SegmentSize = int(other_data)\n\n packet_dropped = check_point(SegmentSize)\n\n if (packet_dropped == -1):\n hasLost = False\n check_pt = check_pt + (SegmentSize//cp_value)\n else:\n hasLost = True\n elif data_type == \"SEN\":\n message = dictSend[\"DATA_ACK\"] + \",0001,0001,SEN!VOID\"\n serverSocket.sendto(message.encode(), clientAddress)\n\n elif dictRec[splitPacket[0].decode()] == \"DATA_CAM\":\n\n SegmentSize = int(splitPacket[1])\n SegmentNum = int(splitPacket[2])\n\n packetsRec[SegmentNum] = 1\n\n # Append the encoded image data\n if (SegmentNum == len(encode_string) or hasLost == False):\n encode_string.append(splitPacket[3])\n else:\n encode_string[SegmentNum] = splitPacket[3]\n\n elif dictRec[splitPacket[0].decode()] == \"DATA_SEN\":\n # handle the sensor data\n LS,RS = splitData(splitPacket[3])\n\n UpdateLidar()\n\n if frontLEDs == 0:\n GPIO.output(GPIO_LED_SEL0, False)\n GPIO.output(GPIO_LED_SEL1, False)\n GPIO.output(GPIO_LED_SEL2, False)\n GPIO.output(GPIO_LED_EN, False)\n elif frontLEDs == 1:\n GPIO.output(GPIO_LED_SEL0, False)\n GPIO.output(GPIO_LED_SEL1, False)\n GPIO.output(GPIO_LED_SEL2, False)\n GPIO.output(GPIO_LED_EN, True)\n elif frontLEDs == 2:\n GPIO.output(GPIO_LED_SEL0, True)\n GPIO.output(GPIO_LED_SEL1, False)\n GPIO.output(GPIO_LED_SEL2, False)\n GPIO.output(GPIO_LED_EN, True)\n elif frontLEDs == 3:\n GPIO.output(GPIO_LED_SEL0, False)\n GPIO.output(GPIO_LED_SEL1, True)\n GPIO.output(GPIO_LED_SEL2, False)\n GPIO.output(GPIO_LED_EN, True)\n elif frontLEDs == 4:\n GPIO.output(GPIO_LED_SEL0, True)\n GPIO.output(GPIO_LED_SEL1, True)\n GPIO.output(GPIO_LED_SEL2, False)\n GPIO.output(GPIO_LED_EN, True)\n elif frontLEDs == 5:\n GPIO.output(GPIO_LED_SEL0, False)\n GPIO.output(GPIO_LED_SEL1, False)\n GPIO.output(GPIO_LED_SEL2, True)\n GPIO.output(GPIO_LED_EN, True)\n elif frontLEDs == 6:\n GPIO.output(GPIO_LED_SEL0, True)\n GPIO.output(GPIO_LED_SEL1, False)\n GPIO.output(GPIO_LED_SEL2, True)\n GPIO.output(GPIO_LED_EN, True)\n elif frontLEDs == 7:\n GPIO.output(GPIO_LED_SEL0, False)\n GPIO.output(GPIO_LED_SEL1, True)\n GPIO.output(GPIO_LED_SEL2, True)\n GPIO.output(GPIO_LED_EN, True)\n elif frontLEDs == 8:\n GPIO.output(GPIO_LED_SEL0, True)\n GPIO.output(GPIO_LED_SEL1, True)\n GPIO.output(GPIO_LED_SEL2, True)\n GPIO.output(GPIO_LED_EN, True)\n\n if RS == \"Y\":\n GPIO.output(GPIO_LEDS_RIGHT,True)\n else:\n GPIO.output(GPIO_LEDS_RIGHT,False)\n if LS == \"Y\":\n GPIO.output(GPIO_LEDS_LEFT,True)\n else:\n GPIO.output(GPIO_LEDS_LEFT,False)\n\n elif dictRec[splitPacket[0].decode()] == \"DCNT\":\n # Handle Disconnect from Client\n print(\"Handle DCNT\")\n disconnect()\n\n w.update()\n w.update_idletasks()\n","sub_path":"RP_Server.py","file_name":"RP_Server.py","file_ext":"py","file_size_in_byte":13976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"39547030","text":"# -----------------------------------------------------------------------------\n#\n# Copyright (C) 2021 CERN & Newcastle University for the benefit of the\n# BioDynaMo collaboration. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# See the LICENSE file distributed with this work for details.\n# See the NOTICE file distributed with this work for additional information\n# regarding copyright ownership.\n#\n# -----------------------------------------------------------------------------\n\nimport os, platform\nimport subprocess as sp\nfrom print_command import Print\nfrom build_command import BuildCommand\nfrom util import GetBinaryName\n\n\n## The BioDynaMo CLI command to run a simulation\n##\n## @param sim_name The simulation name\n##\ndef RunCommand(args, debug=False):\n sim_name = GetBinaryName()\n args_str = \" \".join(args)\n cmd = \"./build/\" + sim_name\n if platform.system() == \"Darwin\":\n launcher = os.environ[\"BDMSYS\"] + \"/bin/launcher.sh\"\n else:\n launcher = \"\"\n\n try:\n BuildCommand()\n Print.new_step(\"Run \" + sim_name + \" \" + args_str)\n if debug:\n sp.check_output(\n [launcher + \" \" + cmd, \"&>\", \"debug/runtime_output.log\"])\n else:\n print(\n sp.check_output([launcher + \" \" + cmd, args_str],\n stderr=sp.STDOUT,\n shell=True).decode(\"utf-8\"))\n Print.success(\"Finished successfully\")\n except sp.CalledProcessError as err:\n print(err.output.decode(\"utf-8\"))\n Print.error(\"Error during execution of {0}\".format(cmd))\n","sub_path":"cli/run_command.py","file_name":"run_command.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"535875661","text":"import urllib2\r\nimport xlrd\r\nimport json\r\nimport pandas as pd\r\nfrom datetime import datetime, timedelta, date\r\nimport calendar\r\nimport pprint\r\n\r\n#_JSON RESPONSE_\r\ndef clean_attribute_value(raw_value):\r\n # Sample raw_value = [u'Email not registered.']\r\n # Count the raw_value length, remove the first three and last two char to extract the message only\r\n return str(raw_value)[3:len(str(raw_value)) - 2]\r\n\r\ndef get_specific_nested_attribute_value(response, parent_attribute, child_attribute):\r\n return clean_attribute_value(json.loads(response)[parent_attribute][child_attribute])\r\n\r\ndef get_specific_data_attribute_value(response, attribute):\r\n return json.loads(response)['data'][attribute]\r\n\r\ndef store_json_response_data_as_dictionary(response):\r\n return json.loads(response)['data'][0]\r\n\r\ndef compare_dictionaries(actual_dictionary, expected_dictionary):\r\n d1_keys = set(actual_dictionary.keys())\r\n d2_keys = set(expected_dictionary.keys())\r\n intersect_keys = d1_keys.intersection(d2_keys)\r\n unexpected_actual = d1_keys - d2_keys\r\n missing_expected = d2_keys - d1_keys\r\n different_values = {o: (actual_dictionary[o], expected_dictionary[o]) for o in intersect_keys\r\n if actual_dictionary[o] !=expected_dictionary[o]}\r\n same_values = set(o for o in intersect_keys if actual_dictionary[o] == expected_dictionary[o])\r\n\r\n if not(unexpected_actual == set([])):\r\n raise ValueError('Unexpected actual attribute/s found: ', unexpected_actual)\r\n\r\n if not(missing_expected == set([])):\r\n raise ValueError('Missing expected attribute/s: ', missing_expected)\r\n\r\n if not(different_values == {}):\r\n raise ValueError('Actual and expected attribute values do not match: ', different_values)\r\n\r\n if not(len(same_values) == len(expected_dictionary)):\r\n raise ValueError('Only the following attributes matched: ', same_values)\r\n\r\ndef get_specific_nested_json_data_attribute(response, parent_attribute, child_attribute):\r\n return json.loads(response)['data'][parent_attribute][child_attribute]\r\n\r\ndef split_and_store_attribute_list(list):\r\n attribute_list = []\r\n attributes = list.split()\r\n for attribute in attributes:\r\n attribute_list.append(attribute)\r\n return attribute_list\r\n\r\ndef check_if_specific_data_key_exists(data, attribute_list, index):\r\n expected = str(attribute_list[index])\r\n found = False\r\n for key in data:\r\n if str(key) == expected:\r\n found = True\r\n if found == False:\r\n raise ValueError('Attribute ', expected, ' does not exist on the response!', data)\r\n\r\ndef check_if_specific_data_attribute_exists(data, attribute_list, index):\r\n expected = str(attribute_list[index])\r\n found = False\r\n\r\n if any(expected in key for key in data):\r\n found = True\r\n if found == False:\r\n raise ValueError('Attribute ', expected, ' does not exist on the response!', data)\r\n\r\ndef get_month_days_count(date_preset):\r\n if date_preset == \"last_month\":\r\n return str(calendar.monthrange(datetime.now().year,datetime.now().month-1)[1])\r\n elif date_preset == \"this_month\":\r\n return str(datetime.now().day)\r\n\r\ndef get_date_preset_count(date_preset):\r\n # switcher = {\r\n # \"today\": \"1\",\r\n # \"yesterday\": \"1\",\r\n # \"last_3d\": \"3\",\r\n # \"last_7d\": \"7\",\r\n # \"last_14d\": \"14\",\r\n # \"last_28d\": \"28\",\r\n # \"last_30d\": \"30\",\r\n # \"last_month\": get_month_days_count(date_preset),\r\n # \"this_month\": get_month_days_count(date_preset)\r\n # }\r\n # return switcher.get(date_preset, \"Invalid date preset!\")\r\n\r\n if date_preset == \"today\" or date_preset == \"yesterday\":\r\n return 1\r\n elif date_preset == \"last_3d\":\r\n return 3\r\n elif date_preset == \"last_7d\":\r\n return 7\r\n elif date_preset == \"last_14d\":\r\n return 14\r\n elif date_preset == \"last_28d\":\r\n return 28\r\n elif date_preset == \"last_30d\":\r\n return 30\r\n elif date_preset == \"last_month\":\r\n return get_month_days_count(date_preset)\r\n elif date_preset == \"this_month\":\r\n return get_month_days_count(date_preset)\r\n else:\r\n raise ValueError('Value ', date_preset, ' is not a valid date_preset!')\r\n\r\ndef loop_check_and_store_attributes(response, attribute_list, date_preset):\r\n date_preset_count = get_date_preset_count(str(date_preset.encode(\"utf-8\")))\r\n length = 1\r\n index = 0\r\n data = json.loads(response)['data']\r\n datetime_stamp_values = []\r\n kpi_values = []\r\n\r\n for count in range(0, int(date_preset_count)):\r\n while int(length) <= len(attribute_list):\r\n # check if each data attribute exists\r\n check_if_specific_data_attribute_exists(data, attribute_list, index)\r\n\r\n # get and store each attribute count\r\n datetime_stamp_values.append(json.loads(response)['data'][index]['datetime_stamp'])\r\n kpi_values.append(json.loads(response)['data'][index]['aggregated_likes_kpi_value'])\r\n\r\n length = length + 1\r\n index = index + 1\r\n return date_preset_count, datetime_stamp_values, kpi_values\r\n\r\ndef daterange(date1, date2):\r\n for n in range(int((date2 - date1).days) + 1):\r\n yield date1 + timedelta(n)\r\n\r\ndef get_last_month_or_this_month_days_list(date_preset, current_month):\r\n expected_datetime_list = []\r\n # get number of days\r\n if date_preset == \"last_month\":\r\n month_count = current_month - 1\r\n total_days_count = calendar.monthrange(datetime.now().year, current_month - 1)[1]\r\n elif date_preset == \"this_month\":\r\n month_count = current_month\r\n total_days_count = datetime.now().day\r\n\r\n # create list of days\r\n start_dt = date(datetime.now().year, month_count, 1)\r\n end_dt = date(datetime.now().year, month_count, total_days_count)\r\n for dt in daterange(start_dt, end_dt):\r\n expected_datetime_list.append(dt)\r\n return expected_datetime_list\r\n\r\ndef create_datetime_list(date_preset, date_preset_count):\r\n expected_datetime_list = []\r\n current_month = datetime.now().month\r\n if date_preset not in (\"last_month\", \"this_month\", \"today\", \"yesterday\"):\r\n for count in range(1, (date_preset_count+1)):\r\n expected_datetime_list.append(datetime.now() - timedelta(days=count))\r\n elif date_preset == \"last_month\":\r\n expected_datetime_list = get_last_month_or_this_month_days_list(date_preset, current_month)\r\n elif date_preset == \"this_month\":\r\n expected_datetime_list = get_last_month_or_this_month_days_list(date_preset, current_month)\r\n return expected_datetime_list\r\n\r\ndef create_kpi_list(date_preset, date_preset_count, kpi_value):\r\n expected_kpi_list = []\r\n current_month = datetime.now().month\r\n if date_preset == \"last_month\":\r\n date_preset_count = calendar.monthrange(datetime.now().year, current_month - 1)[1]\r\n elif date_preset == \"this_month\":\r\n date_preset_count = datetime.now().day\r\n\r\n for count in range(1, (date_preset_count + 1)):\r\n expected_kpi_list.append(kpi_value * count)\r\n return expected_kpi_list\r\n\r\ndef convert_to_timestamp(timestamp):\r\n return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')\r\n # value = datetime.fromtimestamp(timestamp)\r\n # return value.strftime('%Y-%m-%d')\r\n\r\ndef check_and_store_nested_attributes(response, attribute_list):\r\n length = 1\r\n index = 0\r\n data = json.loads(response)['data']\r\n\r\n count_values = []\r\n actual_percentage_values = []\r\n formatted_percentage_values = []\r\n\r\n while int(length) <= len(attribute_list):\r\n # check if each data attribute exists\r\n check_if_specific_data_key_exists(data, attribute_list, index)\r\n\r\n # get and store each attribute count\r\n count = get_specific_nested_json_data_attribute(response, attribute_list[index], 'count')\r\n percentage = get_specific_nested_json_data_attribute(response, attribute_list[index], 'percentage')\r\n formatted_percentage = get_specific_nested_json_data_attribute(response, attribute_list[index],\r\n 'formatted_percentage')\r\n count_values.append(count)\r\n actual_percentage_values.append(percentage)\r\n formatted_percentage_values.append(formatted_percentage)\r\n\r\n length = length + 1\r\n index = index + 1\r\n\r\n return count_values, actual_percentage_values, formatted_percentage_values\r\n\r\ndef compute_breakdown_percentages(count_values, attribute_list):\r\n # get each percentage and check if sum is 100\r\n total = sum(count_values)\r\n length = 1\r\n index = 0\r\n computed_percentage_values = []\r\n while int(length) <= len(attribute_list):\r\n computed_percentage = 100 * float(count_values[index]) / float(total)\r\n computed_percentage_values.append(computed_percentage)\r\n length = length + 1\r\n index = index + 1\r\n if not(sum(computed_percentage_values) == 100):\r\n raise ValueError('Total percentage ', sum(computed_percentage_values), ' is not equal to 100!')\r\n return computed_percentage_values\r\n\r\ndef roundoff_list_values(list_data):\r\n return ['%.2f' % elem for elem in list_data]\r\n\r\ndef compare_breakdown_percentages(attribute_list, actual_percentage_values, computed_percentage_values):\r\n #store computed percentages in dictionary\r\n computed_dictionary = dict(zip(attribute_list, roundoff_list_values(computed_percentage_values)))\r\n #store actual percentages in dictionary\r\n actual_dictionary = dict(zip(attribute_list, roundoff_list_values(actual_percentage_values)))\r\n #compare actual and computed percentage values\r\n compare_dictionaries(actual_dictionary, computed_dictionary)\r\n\r\ndef compute_and_check_count(response, attribute_list):\r\n # get each count and check if sum is 100\r\n length = 1\r\n index = 0\r\n count_values = []\r\n while int(length) <= len(attribute_list):\r\n count_values.append(get_specific_data_attribute_value(response, attribute_list[index]))\r\n length = length + 1\r\n index = index + 1\r\n if not(sum(count_values) == 100):\r\n raise ValueError('Total percentage ', sum(count_values), ' is not equal to 100!')\r\n\r\n#_EXCEL DATA_\r\ndef get_specific_value_from_data_framework(file_name, sheet_name, column_header):\r\n sheet_data = pd.read_excel(file_name, sheet_name=sheet_name)\r\n return sheet_data.ix[0, column_header]\r\n\r\ndef split_data(original_value):\r\n first_value, second_value = original_value.split(' ')\r\n return (first_value, second_value)\r\n\r\ndef open_workbook_by_sheet_index(file_path, sheet_index):\r\n return xlrd.open_workbook(file_path).sheet_by_index(int(sheet_index))\r\n\r\ndef store_excel_values_as_dictionary(file_path, sheet_index, start_index):\r\n excel_dictionary = {}\r\n sheet = open_workbook_by_sheet_index(file_path, sheet_index)\r\n\r\n while int(start_index) <= sheet.ncols-1:\r\n column_header = sheet.cell(0, int(start_index)).value\r\n column_value = sheet.cell(1, int(start_index)).value\r\n if(column_header == 'tagger_id'):\r\n column_value = str(int(column_value))\r\n excel_dictionary[column_header] = column_value\r\n start_index = int(start_index) + 1\r\n return excel_dictionary\r\n\r\n#_OTHERS_\r\ndef get_json_error_attribute_value(server, link, attribute):\r\n url = server + link\r\n try:\r\n request = urllib2.Request(url)\r\n response = urllib2.urlopen(request)\r\n except urllib2.HTTPError as e:\r\n error_body = e.read()\r\n resp_dict = json.loads(error_body)\r\n return resp_dict.get(attribute)\r\n\r\n","sub_path":"resources/utility_backend.py","file_name":"utility_backend.py","file_ext":"py","file_size_in_byte":11664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"480809357","text":"from flask import current_app as app\nfrom flask import jsonify, request\nimport flask_login, pymysql\n\n\n@flask_login.login_required\ndef _groups_get_all():\n \"\"\"\n Returns id, name, hacking space of all groups\n \"\"\"\n\n connection = app.config[\"PYMYSQL_CONNECTION\"]\n\n query = \"SELECT \\\n group_id as id, \\\n name, \\\n space as hacking_space \\\n FROM `group`;\"\n with connection.cursor() as cursor:\n cursor.execute(query)\n query_result = cursor.fetchall()\n cursor.close()\n\n # format output\n output = {\"groups_get_all\": [], \"_groups_count\": 0}\n\n for each_group in query_result:\n temp_group_details = {\n \"id\": each_group[\"id\"], \n \"name\": each_group[\"name\"], \n \"hacking_space\": each_group[\"hacking_space\"]\n }\n \n output[\"groups_get_all\"].append(temp_group_details)\n output[\"_groups_count\"] += 1\n\n return jsonify(output), 200\n","sub_path":"backend/app/backend/groups_get_all.py","file_name":"groups_get_all.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"413101851","text":"\nfrom dynaconf import Dynaconf, Validator\n\ndef check_sort_order(keys):\n valid_attrs = ['status', 'title', 'id', 'body', 'list_id', 'createdDateTime', 'dueDateTime', 'lastModifiedDateTime', 'importance', 'isReminderOn']\n for key in keys:\n if len(key) > 0 and key[0] == '-':\n # Then we need to chop out that char for validation\n key = key[1:]\n if key not in valid_attrs:\n return False\n return True\n\n\nsettings = Dynaconf(\n envvar_prefix=\"DYNACONF\",\n settings_files=['settings.toml', '.secrets.toml'],\n validators=[\n Validator('To_Do_Widget', must_exist=True),\n Validator('To_Do_Widget.update_interval', is_type_of=int),\n Validator('To_Do_Widget.incomplete_task_visibility', is_type_of=bool),\n Validator('To_Do_Widget.lists_to_use', is_type_of=list),\n Validator('To_Do_Widget.task_sort_order', is_type_of=list),\n Validator('Weather_Widget', must_exist=True),\n Validator('Weather_Widget.city_name', is_type_of=str),\n Validator('Weather_Widget.units', is_type_of=str),\n Validator('Weather_Widget.update_interval', is_type_of=int),\n Validator('To_Do_Widget.task_sort_order', condition=check_sort_order)\n ],\n)","sub_path":"dynaconf_settings.py","file_name":"dynaconf_settings.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"233562086","text":"from django.conf.urls import url, include\nfrom . import views\nfrom django.urls import path\n\nurlpatterns = [\n\turl(r'^doctor/$', views.doctor_login),\n\turl(r'^receptionist/$', views.receptionist_login),\n\turl(r'^$', views.first_page),\n\turl(r'^doctor/home/$', views.doctor_home),\n\turl(r'^receptionist/home/$', views.receptionist_home),\n\tpath('doctor/home/patient//', views.patient_doctor),\n\tpath('doctor/home/prescription//', views.prescription),\n\turl(r'^receptionist/home/appointment_new/$', views.receptionist_new_patient),\n\turl(r'^receptionist/home/appointment_exist/$', views.receptionist_existing_patient),\n\tpath('receptionist/home/appointment//edit', views.receptionist_edit_appointment),\n\turl(r'^receptionist/bill/$', views.bill_info),\n\tpath('receptionist/view_bill/',views.get_bill_print),\n\turl(r'^receptionist/view_bill', views.bill_find),\n\tpath('receptionist/bill_print/',views.bill_print),\n\tpath('receptionist/home/edit_patient//edit/', views.receptionist_edit_patient),\n\turl(r'^receptionist/home/patient_edit$', views.view_patient_edit),\n]\n","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"515307800","text":"from os import remove\nfrom tempfile import gettempdir\nimport gzip\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom janggu_layers import Reverse,Complement\n\nfrom pysster.Data import Data\nimport pysster.utils as io\nimport keras \n\nimport innvestigate\n\nimport innvestigate.utils as iutils\n#import innvestigate.utils.tests.networks.imagenet\n#import innvestigate.utils.visualizations as ivis\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\n\nimport utils_imagenet as imgnetutils\n\nfrom keras.models import load_model, Model\nfrom keras import backend as K\nimport keras\n\nimport vizsequence\nfrom vizsequence import viz_sequence\n\n\ndef change_activation(model):\n print(\"changing activation...\")\n custom_objects={'Reverse':Reverse,'Complement':Complement}\n path = \"{}/temp_model_file\".format(gettempdir())\n model.save(path, overwrite = True)\n model = load_model(path, custom_objects=custom_objects)\n model.layers[-1].activation = keras.activations.linear\n model.save(path, overwrite = True)\n K.clear_session()\n model = load_model(path, custom_objects=custom_objects)\n model.layers[0].name=\"in_dna\"\n remove(path)\n return model\n\ndef create_lists(analyzers,name,SEQ,methods,SCORE):\n np.save(open(name+\"_score.npy\",\"wb\"), SCORE)\n analysis=np.zeros(shape=SEQ.shape)\n print(analysis.shape)\n for j in range(0, len(analyzers)):\n for i,seq in enumerate(SEQ):\n analysis[i]=(analyzers[j].analyze(np.expand_dims(seq,axis=0)))\n print(analysis.shape)\n np.save(open(name+\"_\"+methods[j][0]+\"_iNN.npy\",\"wb\"),analysis)\n \n\n # Create analyzers.\n# patterns = net[\"patterns\"]\nmethods = [\n # NAME OPT.PARAMS POSTPROC FXN TITLE\n\n # Show input.\n# (\"input\", {}, imgnetutils.image, \"Input\"),\n\n # Function\n# (\"gradient\", {}, imgnetutils.graymap, \"Gradient\"),\n (\"integrated_gradients\", {}, imgnetutils.graymap, \"Integrated Gradients\"),\n\n # Signal\n# (\"deconvnet\", {}, imgnetutils.bk_proj, \"Deconvnet\"),\n# (\"guided_backprop\", {}, imgnetutils.bk_proj, \"Guided Backprop\"),\n #(\"pattern.net\", {\"patterns\": patterns}, imgnetutils.bk_proj, \"PatterNet\"),\n\n # Interaction\n #(\"pattern.attribution\", {\"patterns\": patterns}, imgnetutils.heatmap, \"Pattern Attribution\"),\n\n\n (\"lrp.epsilon\", {\"epsilon\": 1}, imgnetutils.heatmap, \"LRP Epsilon\"),\n\n\n #(\"lrp.sequential_preset_a_flat\", {\"epsilon\": 1}, imgnetutils.heatmap, \"LRP-PresetAFlat\"),\n #(\"lrp.sequential_preset_b_flat\", {\"epsilon\": 1}, imgnetutils.heatmap, \"LRP-PresetBFlat\"),\n]\n \nnames=[\"artificial\"]\n#names=[\"lung\"]\nfor name in names:\n\n #load data\n data=io.load_data(name+\"/data.pkl\")\n \n SEQ,IF=data._get_data(\"test\")\n \n #load model\n custom_objects={'Reverse':Reverse, 'Complement':Complement}\n model=load_model(name+\"/model.pkl.h5\", custom_objects = custom_objects)\n SCORE=model.predict(SEQ, batch_size=1000, verbose=1)\n\n pattern_type = \"relu\"\n channels_first = K.image_data_format == \"channels_first\"\n \n # model_wo_softmax = iutils.keras.graph.model_wo_softmax(model)\n model=change_activation(model)\n\n # Create analyzers.\n print(\"analyse\")\n analyzers = []\n for method in methods:\n try:\n analyzer = innvestigate.create_analyzer(method[0],\n model,\n **method[1])\n except innvestigate.NotAnalyzeableModelException:\n print(\"ERROR\")\n analyzer = None\n analyzers.append(analyzer)\n create_lists(analyzers,name,SEQ,methods,SCORE)\n \n","sub_path":"pysster/iNNvestigate_create_files.py","file_name":"iNNvestigate_create_files.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"248367610","text":"\n##Function definitions##\n#----------------------#\n\ndef display():\n for i in range(len(twr1)):\n print (\" \" + (\" \" if twr1[i] == 0 else str(twr1[i])) +\\\n \" \" + (\" \" if twr2[i] == 0 else str(twr2[i])) +\\\n \" \" + (\" \" if twr3[i] == 0 else str(twr3[i])))\n \n print (\"--- --- ---\")\n print (\" A B C \")\n\n \ndef getInput():\n x = input();\n \n if x == 'ab':\n move(twr1,twr2)\n elif x == 'ac':\n move(twr1,twr3)\n elif x == 'ba':\n move(twr2,twr1)\n elif x == 'bc':\n move(twr2,twr3)\n elif x == 'ca':\n move(twr3,twr1)\n elif x == 'cb':\n move(twr3,twr2)\n elif x == 'solve':\n setTowers()\n solve(disks, twr1,twr2,twr3, \"A\", \"B\", \"C\")\n setTowers()\n print(\"\\nnow you try!\")\n else:\n print(\"invalid input: format should be \\\"ab\\\" \\(means from A to B\\)\")\n \n\ndef move(twrA, twrB):\n\n position = -1\n top = len(twrB)-1\n\n for i in range(len(twrA)):\n if(twrA[i]!=0):\n position = i\n break\n \n if(position == -1):\n print(\"No disks left\")\n return\n\n for i in range(len(twrB)-1, -1, -1):\n top = i;\n if(twrB[i] == 0):\n break\n \n if(twrB[len(twrB) - 1]!=0 and twrB[top+1] <= twrA[position]):\n print(\"illegal move\")\n \n else:\n twrB[top] = twrA[position]\n twrA[position] = 0\n\n \ndef solve(n, frm, using, to, fromName, usingName, toName):\n if (n == 1):\n move(frm, to)\n display()\n print (\"disk moved form {x} to {y}\".format(x=fromName, y = toName))\n input(\"press enter to continue\")\n else:\n solve(n - 1, frm, to, using, fromName, toName, usingName)\n move(frm, to)\n display()\n print (\"disk moved form {x} to {y}\".format(x=fromName, y = toName))\n input(\"press enter to continue\")\n solve(n - 1, using, frm, to, usingName, fromName, toName)\n \n\ndef setTowers(): \n for i in range(0, disks):\n twr1[i] = i+1\n twr2[i] = 0\n twr3[i] = 0\n\n \n\n##Start Program##\n#---------------#\n\ndisks = 0\ntotalMoves = 0\n\nwhile(disks < 1):\n disks = int(input(\"Enter the number of disks you would like to use\"))\n \n \ntwr1 = [0]*disks\ntwr2 = [0]*disks\ntwr3 = [0]*disks\n\nsetTowers()\n\nprint (\"Enter a move. format should be \\\"FromTo\\\" for example \\\"ab\\\" means from A to B\")\n\nwhile(twr3[0] == 0):\n display()\n getInput()\n totalMoves += 1\n \ndisplay();\nprint (\"YAY! You Win! Your total moves were \",totalMoves);\n\n","sub_path":"pythonTowers.py","file_name":"pythonTowers.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"188170453","text":"def binary_classification_metrics(prediction, ground_truth):\n '''\n Computes metrics for binary classification\n\n Arguments:\n prediction, np array of bool (num_samples) - model predictions\n ground_truth, np array of bool (num_samples) - true labels\n\n Returns:\n precision, recall, f1, accuracy - classification metrics\n '''\n\n tp = fp = tn = fn = 0\n\n for x, y in zip(prediction, ground_truth):\n if x and y:\n tp += 1\n elif not x and not y:\n tn += 1\n elif x and not y:\n fp += 1\n else:\n fn += 1\n accuracy = (tp + tn) / prediction.shape[0]\n precision = tp / (tp + fp) if tp + fp > 0 else 0\n recall = tp / (tp + fn) if tp + fn > 0 else 0\n\n f1 = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0\n\n # implement metrics!\n # Some helpful links:\n # https://en.wikipedia.org/wiki/Precision_and_recall\n # https://en.wikipedia.org/wiki/F1_score\n\n return precision, recall, f1, accuracy\n\n\ndef multiclass_accuracy(prediction, ground_truth):\n '''\n Computes metrics for multiclass classification\n\n Arguments:\n prediction, np array of int (num_samples) - model predictions\n ground_truth, np array of int (num_samples) - true labels\n\n Returns:\n accuracy - ratio of accurate predictions to total samples\n '''\n\n # Implement computing accuracy\n\n return sum(1 for x, y in zip(prediction, ground_truth) if x == y) / prediction.shape[0]\n","sub_path":"assignments/assignment1/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"472496804","text":"import time\r\nfrom selenium import webdriver\r\nfrom bs4 import BeautifulSoup as BS\r\nimport json\r\n# 目標URL網址\r\nURL = \"https://stats.nba.com/teams/\"\r\n\r\nclass TeamPictureCrawler:\r\n def __init__(self, url):\r\n self.url_to_crawl = url\r\n self.links = []\r\n self.result = []\r\n\r\n def start_driver(self):\r\n print(\"啟動 WebDriver...\")\r\n self.driver = webdriver.Chrome(\"./chromedriver\")\r\n self.driver.implicitly_wait(10)\r\n\r\n def close_driver(self):\r\n self.driver.quit()\r\n print(\"關閉 WebDriver...\")\r\n\r\n def get_page(self, url):\r\n print(\"取得網頁...\")\r\n self.driver.get(url)\r\n time.sleep(2)\r\n\r\n def parse(self):\r\n self.start_driver() # 開啟 WebDriver\r\n self.get_page(self.url_to_crawl)\r\n self.get_links()\r\n self.close_driver() # 關閉 WebDriver\r\n\r\n def get_links(self):\r\n res = BS(self.driver.page_source, 'lxml')\r\n temps = res.select('.stats-team-list__link')\r\n for link in temps:\r\n self.links.append(link.get('href'))\r\n\r\n def get_team(self):\r\n res = BS(self.driver.page_source, 'lxml')\r\n team = {}\r\n team_city = res.select('.stats-team-summary__city')[0].text\r\n team_name = res.select('.stats-team-summary__name')[0].text\r\n team[\"name\"] = team_city + \" \" + team_name\r\n team[\"pic_url\"] = self.driver.find_element_by_xpath(\"*//img[@class='team-logo away team-img']\").get_attribute('src')\r\n self.result.append(team)\r\n\r\n def parse_detail(self):\r\n domain = \"https://stats.nba.com\"\r\n self.start_driver()\r\n for link in self.links:\r\n self.get_page(domain + link)\r\n self.get_team()\r\n self.close_driver()\r\n\r\ndef save_to_json(result):\r\n with open(\"team_pic.json\", 'w') as file_object:\r\n json.dump(result, file_object)\r\n\r\nif __name__ == '__main__':\r\n crawler = TeamPictureCrawler(URL)\r\n crawler.parse()\r\n crawler.parse_detail()\r\n save_to_json(crawler.result)\r\n","sub_path":"python/Crawler/teamPicCrawler.py","file_name":"teamPicCrawler.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"127742305","text":"import unittest\nimport snapshottest\nfrom src.data_loaders.BablTask1Loader import parse_data, split_parse_data\n\nclass BablTask1LoaderTest(snapshottest.TestCase):\n\n def test_converts_data_to_dataframe(self):\n df = parse_data('test_data/babl_task1_excerpt.txt')\n self.assertMatchSnapshot(df.to_json())\n\n def test_splits_dataframe_into_X_and_y(self):\n X, y = split_parse_data('test_data/babl_task1_excerpt.txt')\n self.assertMatchSnapshot(X.to_json())\n self.assertMatchSnapshot(y.to_json())\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/data_loaders/test_BablTask1Loader.py","file_name":"test_BablTask1Loader.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"268939151","text":"def add_to_inventory(inventory, added_items): # inventory -> dictionary, added_items -> list\n for value in added_items:\n inventory[value] = inventory.setdefault(value, 0) + 1\n\n\ndef display_inventory(inventory): # inventory -> dictionary\n print('Inventory : ')\n item_total = 0\n for key, value in inventory.items():\n print(str(value) + ' ' + str(key))\n item_total += value\n print('Total number of items : ' + str(item_total))\n\n\nmy_inventory = {'gold coin': 42, 'rope': 1}\ndragon_loot = ['gold coin', 'ruby', 'sapphire', 'gold coin', 'sword', 'gold coin']\nadd_to_inventory(my_inventory, dragon_loot)\ndisplay_inventory(my_inventory)\n","sub_path":"Studies/exercises/add_list_to_dictionary.py","file_name":"add_list_to_dictionary.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"160854877","text":"# Video 1: While loop\nerror=50.0\nwhile error > 1 :\n error=error/4\n prinet(error)\n \n \n# Ej 2: Basic while loop\n## Initialize offset\noffset=8\n## Code the while loop\nwhile offset != 0 :\n print(\"correcting...\")\n offset=offset-1\n print(offset)\n\n# Ej 3: Add conditionals\n ## Initialize offset\n offset = -6\n ## Code the while loop\n while offset != 0 :\n print(\"correcting...\")\n if offset > 0 :\n offset = offset - 1\n else :\n offset = offset + 1\n print(offset)\n\n# Videio 4: for loop\n ## loop:\n fam = [1.73, 1.68, 1.71, 1.89]\n for height in fam :\n print(height)\n ## enumerate:\n fam = [1.73, 1.68, 1.71, 1.89]\n for index, height in enumerate(fam) :\n print(\"index\" + str(index) +\":\" + str (height))\n ## Loop over string:\n for c in \"family\" :\n print(c.capitalize())\n\n# Ej 5: Loop over a list\n ## areas list\n areas = [11.25, 18.0, 20.0, 10.75, 9.50]\n ## Code the for loop\n for c in areas :\n print(c)\n\n# Ej 6: Indexes and values (1)\n ## areas list\n areas = [11.25, 18.0, 20.0, 10.75, 9.50]\n ## Code the for loop\n for index, area in enumerate(areas) :\n print(\"room \" + str(index) + \": \" + str(area)) \n\n# Ej 7: Indexes and values (2)\n ## areas list\n areas = [11.25, 18.0, 20.0, 10.75, 9.50]\n ## Code the for loop\n for index, area in enumerate(areas) :\n print(\"room \" + str(index+1) + \": \" + str(area))\n\n# Ej 8: Loop over list of lists\n ## house list of lists\n house = [[\"hallway\", 11.25], \n [\"kitchen\", 18.0], \n [\"living room\", 20.0], \n [\"bedroom\", 10.75], \n [\"bathroom\", 9.50]]\n ## Build a for loop from scratch\n for x in house:\n print(\"the \" + str(x[0]) + \" is \" + str(x[1]) + \" sqm\")\n\n# Video 9: Looping Data Structures, Part 1\n ## Dictionary:\n world = {\"afghanistan\":30.55,\n \"albania\":2.77,\n \"algeria\":39.21 }\n\n for key, value in world.items(): # con world solo da error\n print(key + \"--\" + str(value))\n ## Numpy Arrays:\n import nump as np\n np_height = np.array([1.73, 1.68, 1.71, 1.89, 1.79])\n np_weight = np.array([65.4, 59.2, 63.6, 88.4, 68.7])\n bmi= np_weight/np_height**2\n for val in bmi:\n print(val)\n ## 2D Numpy Arrays\n meas= np.array([np_height, np_weight])\n for val in meas:\n print(val)\n ## Recap:\n ### Dictionary: for key, val in my_dict.items() :\n ### Numpy array: for val in np.nditer(my_array) :\n\n# Ej 10: Loop over dictionary\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Intermediate Python for Data Science/4_Loops.py","file_name":"4_Loops.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"59801262","text":"# coding=utf-8\n# /usr/bin/env python3\n'''\nAuthor:Fuxin Jiang\nEmail:jiangfuxin17@mails.ucas.ac.cn\n'''\n'''\n数据说明,比赛数据(脱敏后)抽取的时间范是���连续30天的数据。总体上看,训练分为训练集数据文件、测试集数据文件、用户基本特征数据集、用户行为类汇总特征\n数据集、用户激活过的app列表、30天的APP使用日志、APP类别元数据\nage_train.csv代表训练样本,各字段之间由逗号隔开 1代表小于18岁、2代表19-23周岁、3代表24-34岁、4代表35-44岁、5代表45-54岁、6代表大于等于55周岁\n训练数据总共2010000,测试数据502500\n'''\n'''\n用户基本特征数据集user_basic_info.csv每一行代表一个用户的基本信息,包含用户人口属性、设备基本属性、各字段之间由逗号分隔,格式为:\n\"uld, gender, city, prodName, ramCapacity, ramLeftRation, romCapacity, romLeftRation, color, fontSize, ct,carrier, os \"\n用户标识(uId) 匿名化处理后的用户唯一标识(ID取值从1000001开始,依次递增)\n性别(gender) 男/女(取值空间0,1)\n常住地(city) 如深圳市、南京市等(匿名化处理,实际取值c001,c002….)\n手机型号(prodName) 如mate10、honor 10等(匿名化处理,实际取值p001、p002……)\n手机ram容量(ramCapacity) 手机ram的大小,以G为单位\nram剩余容量占比(ramLeftRation) 手机剩余的容量占总容量的比例\nrom容量(romCapacity) 手机rom的大小,以G为单位\nrom剩余容量占比(romLeftRation) 手机剩余rom容量占总rom容量的比例\n手机颜色(color) 手机机身的颜色\n字体大小(fontSize) 手机设置的字体大小\n上网类型(ct) 2G/3G/4G/WIFI\n移动运营商(carrier) 移动/联通/电信/其他\n手机系统版本(os)AndroId操作系统的版本号\n总共2512500条\n'''\n'''\n用户行为类汇总特征数据集user_behavior_info.csv每行代表一个用户的行为类信息,包含对设备的使用行为汇总数据。\n用户标识(uId) 匿名化处理后的用户唯一标识(ID取值从1000001开始,依次递增)\n开机次数(bootTimes) 一段时间内(30天)手机的总开机次数\n手机A特性使用次数(AFuncTimes) 一段时间内(30天) 手机A特性使用次数\n手机B特性使用次数(BFuncTimes) 一段时间内(30天) 手机B特性使用次数\n手机C特性使用次数(CFuncTimes) 一段时间内(30天) 手机C特性使用次数\n手机D特性使用次数(DFuncTimes) 一段时间内(30天) 手机D特性使用次数\n手机E特性使用次数(EFuncTimes) 一段时间内(30天) 手机E特性使用次数\n手机F特性使用次数(FFuncTimes) 一段时间内(30天) 手机F特性使用次数\n手机G特性使用情况(FFuncSum) 一段时间内(30天)G特性使用情况(数值)\n总共2512500条\n'''\n'''\n用户的激活APP列表文件user_app_actived.csv 每一行代表一条用户激活app的记录(APP激活的含义为用户安装并使用该APP)。特征文件格式为:\n\"uld, appld# appld# appld# appld# appld......\"uld为用户标识,appld为app应用的唯一标识,多个app以\"#\"分隔\n用户标识(uId) 匿名化处理后的用户唯一标识(ID取值从1000001开始,依次递增)\n应用标识(appId) 匿名化处理后的app唯一标识\n总共2512500条\n'''\n'''\napp使用行为日志文件user_app_usage.csv存放了30天内按天统计每个用户对具体某个app的累计打开次数和使用时长,\n用户标识(uId) 匿名化处理后的用户唯一标识(ID取值从1000001开始,依次递增)\n应用标识(appId) 匿名化处理后的app唯一标识\n使用时长(duration) 1天内用户对某app的累计使用时长\n打开次数(times) 1天内用户对某app的累计打开次数\n使用日期(use_date) 用户对某app的使用日期\n总共651007719条\n'''\n\n'''\napp对应类别文件app_info.csv每一行代表一条app的信息,格式如下:\n应用标识(appId) appId为app应用的唯一标识\n应用类型(category) app所属的应用类型\n总共188864条\n'''\nimport pandas as pd\nfrom collections import Counter\ndef data_pre():\n\n data_train = pd.read_csv(\"age_train.csv\", header=None)\n data_train.columns = ['uid', 'label']\n\n data_test = pd.read_csv(\"age_test.csv\", header=None)\n data_test.columns = ['uid']\n\n user_basic_info = pd.read_csv(\"user_basic_info.csv\", header=None)\n user_basic_info.columns = ['uid', 'gender', 'city', 'prodName', 'ramCapacity', 'ramLeftRation', 'romCapacity',\n 'romLeftRation', 'color', 'fontSize', 'ct', 'carrier', 'os']\n prodName_mapping = {label: idx for idx, label in enumerate(set(user_basic_info['prodName']))}\n user_basic_info['prodName'] = user_basic_info['prodName'].map(prodName_mapping)\n\n city_mapping = {label: idx for idx, label in enumerate(set(user_basic_info['city']))}\n user_basic_info['city'] = user_basic_info['city'].map(city_mapping)\n\n carrier_mapping = {label: idx for idx, label in enumerate(set(user_basic_info['carrier']))}\n user_basic_info['carrier'] = user_basic_info['carrier'].map(carrier_mapping)\n\n color_mapping = {label: idx for idx, label in enumerate(set(user_basic_info['color']))}\n user_basic_info['color'] = user_basic_info['color'].map(color_mapping)\n\n ct_mapping = {label: idx for idx, label in enumerate(set(user_basic_info['ct']))}\n user_basic_info['ct'] = user_basic_info['ct'].map(ct_mapping)\n\n\n user_behavior_info = pd.read_csv(\"user_behavior_info.csv\", header=None)\n user_behavior_info.columns = ['uid', 'bootTimes', 'AFuncTimes', 'BFuncTimes', 'CFuncTimes', 'DFuncTimes',\n 'EFuncTimes', 'FFuncTimes', 'GFuncTimes']\n\n app_info = pd.read_csv(\"app_info.csv\", header=None)\n app_info.columns = ['app_id', 'app_class']\n print(set(app_info['app_class']))\n app_class_to_id_dict = {}\n for class_name in set(app_info['app_class']):\n app_class_to_id_dict[class_name] = list(app_info.loc[app_info['app_class'] == class_name, 'app_id'])\n print(\"字典建立完毕!\")\n\n user_app_actived = pd.read_csv(\"user_app_actived.csv\", header=None)\n user_app_actived.columns = ['uid', 'app_ids']\n\n\n for class_name in set(app_info['app_class']):\n user_app_actived[class_name] = user_app_actived['app_ids'].apply(\n lambda x: len(set(x.strip().split('#')) & set(app_class_to_id_dict[class_name])))\n \n user_app_actived.drop(['app_ids'], axis=1, inplace=True)\n \n data_train = pd.merge(data_train, user_basic_info, how='left', on='uid')\n data_train = pd.merge(data_train, user_behavior_info, how='left', on='uid')\n data_train = pd.merge(data_train, user_app_actived, how='left', on='uid')\n data_test = pd.merge(data_test, user_basic_info, how='left', on='uid')\n data_test = pd.merge(data_test, user_behavior_info, how='left', on='uid')\n data_test = pd.merge(data_test, user_app_actived, how='left', on='uid')\n #返回分类属性\n categorical_feature = ['gender', 'city', 'prodName', 'color', 'ct', 'carrier']\n\n data_train.to_csv(\"data_train.csv\", index=False, encoding=\"utf-8\")\n data_test.to_csv(\"data_test.csv\", index=False, encoding=\"utf-8\")\n\n return data_train, data_test, categorical_feature\n\nif __name__ == \"__main__\":\n data_train, data_test ,categorical_feature = data_pre()\n print(data_train.head())\n print(data_test.head())\n app_info = pd.read_csv(\"app_info.csv\", header = None)\n app_info.columns = ['app_id', 'app_class']\n print(set(app_info['app_class']))\n app_class_to_id_dict = {}\n for class_name in set(app_info['app_class']):\n app_class_to_id_dict[class_name] = list(app_info.loc[app_info['app_class'] == class_name, 'app_id'])\n print(\"字典建立完毕!\")\n\n user_app_actived = pd.read_csv(\"user_app_actived.csv\", header = None)\n user_app_actived.columns = ['uid', 'app_ids']\n\n for class_name in set(app_info['app_class']):\n user_app_actived[class_name] = user_app_actived['app_ids'].apply(lambda x :len(set(x.strip().split('#'))&set(app_class_to_id_dict[class_name])))\n print(user_app_actived.head())\n","sub_path":"process_data_programs/datapre.py","file_name":"datapre.py","file_ext":"py","file_size_in_byte":8197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"427568199","text":"# Uses python3\nimport sys\n\n'''\nMajority Element Problem\n\nCheck whether a given sequence of numbers contains an element that appears more than 1/2 of the times.\n\nInput: A sequence of n integers\nOutput: \n Either:\n * 1, if there is an element that is repeated more than n/2 times\n * 0, if otherwise\n'''\ndef binary_search(list_, x):\n low, high = 0, len(list_) - 1\n mid = len(list_) // 2\n while high >= low:\n mid = (low + high) // 2\n\n if (list_[mid] == x):\n return mid\n elif (x > list_[mid]):\n low = mid + 1\n else:\n high = mid - 1\n\n return -1\n\ndef get_majority_in_half(numbers):\n check_num = numbers[0]\n count = 1\n for i in range(1, len(numbers)):\n if numbers[i] == check_num:\n count += 1\n else:\n check_num = numbers[i]\n count = 1\n\n if count > (len(numbers)/2):\n return (a[i], count)\n \n return (-1, -1)\n\ndef get_majority_in_half2(numbers, total_count): # [2, 2, 9]\n numbers_length = len(numbers)\n if (numbers_length == 1):\n return (numbers[0], False)\n \n # nums = numbers.copy()\n count = 1\n # current_num = nums[0]\n # i = 1\n # while (len(nums)):\n # binary_search(nums, nums[i])\n\n # [2, 2, 9, 2]\n # c i\n i = 1\n current_num = numbers[0]\n while (i + 1 < numbers_length):\n try:\n matching_number_idx = numbers.index(current_num, i)\n count += 1\n i = matching_number_idx\n except:\n print(f'ValueError. i: {i}, numbers[i]: {numbers[i]}')\n\n i += 1\n\ndef get_majority_element(a, left, right):\n total_length = len(a)\n if (total_length <= 1):\n return a[0]\n \n if (total_length == 2):\n if (a[0] != a[1]):\n return -1\n return a[0]\n\n a.sort()\n print(f'a: {a}, lefty {left}, righty {right}') # a: [2, 3, 9, 2, 2], left 0, right 5\n mid = (len(a)//2) + 1\n list_left = a[:mid] # [2, 2, 2] \n list_right = a[mid:] # [3, 9]\n len_left = len(list_left)\n \n left_majority, left_count = get_majority_in_half(list_left)\n \n # so our numbers list is at least length 3, b/c of the 2 if-statements at the beginning short-circuiting\n left_countyyy = 1\n for i in range(1, len(list_left)):\n if (list_left[i] == list_left[i - 1]):\n left_countyyy += 1\n \n\n if (left_count > total_length/2): \n return left_majority\n\n if (left_majority != -1): \n # binary search isn't actually counting all instances of element\n # need to ACTUALLY change \"something\" to count all instances of element\n \n right_majority, right_count = binary_search(list_right, left_majority)\n if ((right_count + left_count) > total_length/2):\n return left_majority\n\n if (left_majority == -1): \n right_majority = get_majority_in_half(list_right) \n if (right_majority == -1): \n return -1\n else:\n left_majority, left_count = binary_search(list_left, right_majority)\n if((right_count + left_count) > total_length/2):\n return right_majority\n\n return -1\n\n# if __name__ == '__main__':\n# # input = sys.stdin.read()\n# input = '5 2 3 9 2 2'\n# # input = '5 2 3 9 1 0'\n# #input = '5 2 2 9 9 9'\n# n, *a = list(map(int, input.split()))\n# if get_majority_element(a, 0, n) != -1:\n# print(1)\n# else:\n# print(0)\n\n\nget_majority_in_half2([2, 2, 9, 2], 5)\n\n'''\n2 2 2 3 9 \n1. Divide the array into 2 sub arrays\n 2 2 2 3 9 \n2. binary search on each subarray, starting with idx 0 as target element to count\n\n 2 2 2 3 9 \n count 3 \n\n if subarray count is majority for entire a, then return the number so that we have majority\n if subarray count is not majority, then look for more counts of the element in other array\n => check if count of all arrays is majority\n\n total count = 2\n\n3. if count is majority in 1 subarray, use binary search to see if element exists in other array and get count\n'''\n\n\n\n'''\n count = 1\n for i in range(len_left - 1):\n idx = binary_search(list_left, list_left[i])\n exists = idx != -1\n if (exists):\n count += 1\n list_left.remove(list_left[idx])\n test_length = len(list_left)\n'''\n\n","sub_path":"algorithmic-toolbox/4.divide-n-conquer/majority_binary.py","file_name":"majority_binary.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"623594922","text":"from fastapi import APIRouter\nfrom starlette.background import BackgroundTasks\nfrom typing import List, Dict\nfrom models import User, UserDetails, FileDetails\nfrom fastapi import HTTPException, Request\nfrom database.crud import delete_user, get_files, read_user, read_users, create_user\nimport utils\n\nrouter = APIRouter(tags=[\"users\"], prefix=\"/api/users\")\n\nscopes = {\"user\":\"scopes\"}\n\n@router.get(\"/\", response_model=List[UserDetails])\nasync def users_list(request: Request, limit: int = 10):\n return read_users(limit = limit)\n\n\n@router.get(\"/{username}\", response_model=UserDetails)\nasync def user_detail(username: str):\n user_detail = read_user(username)\n if user_detail is None:\n raise HTTPException(status_code=404, detail=\"user not found\")\n return user_detail\n\n\n@router.get(\"/{username}/files\", response_model=List[FileDetails])\nasync def get_files_list(username: str):\n response = get_files(username)\n if response is None:\n raise HTTPException(status_code=404, detail = \"user not found\")\n else:\n return response\n\n\n@router.post(\"/\", response_model=UserDetails)\nasync def add_user(user: User):\n user_detail = create_user(name=user.name, username=user.username)\n return user_detail\n\n\n@router.delete(\"/{username}\", response_model=Dict[str(\"detail\"), str])\nasync def remove_user(username: str, task: BackgroundTasks):\n response = delete_user(username = username)\n if not response:\n raise HTTPException(404, detail=\"user not found!!\")\n for path in response:\n task.add_task(utils.file_delete, path=path)\n return {\"detail\": \"operation successful\"}","sub_path":"routers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"456888656","text":"import cv2\nfrom PIL import Image\nimport numpy as np\nfrom scipy.ndimage.filters import median_filter\nfrom scipy import ndimage\nimport time\n\n# filling holes. works!\n\n# Read image\nim = cv2.imread('hole.jpg')\n\nblue = im[:, :, 0]\nthresh = blue > 50\nim[thresh] = 255\n\nimg = Image.fromarray(im)\nimg.show()\n\noriginal = thresh.copy()\na = time.time()\ndilationStruct = np.array([[False, True, False], [True, True, True], [False, True, False]])\n\nfor x in xrange(0, 1):\n thresh = ndimage.binary_dilation(thresh, structure=dilationStruct)\n thresh = median_filter(thresh, size=(3, 3))\n\ndilatedgarbage = thresh * (~ original)\n\nready = thresh.copy()\nthresh = thresh.astype(np.uint8)\n#thresh = cv2.bitwise_not(thresh)\n\ncontour, hier = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n\nfor cnt in contour:\n cv2.drawContours(thresh, [cnt], 0, 255, -1)\nb = time.time()\nprint (b-a)*1000\n\nimg = Image.fromarray(thresh)\nimg.show()\n\nthresh *= (~ dilatedgarbage)\nthresh *= (~ original)\n#thresh = median_filter(thresh, size=(8, 8))\n\nimg = Image.fromarray(thresh)\nimg.show()\n#gray = cv2.bitwise_not(thresh)","sub_path":"Masking/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"118197381","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Alien(Sprite):\n \"\"\" A class to manage alien invaders \"\"\"\n\n def __init__(self, screen, ai_settings):\n \"\"\" Create a alien object in upper left corner \"\"\"\n super().__init__()\n self.screen = screen\n\n # Load the alien image and get its rect.\n self.image = pygame.image.load('images/alien.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.screen_height = ai_settings.screen_height\n\n # Start each new alien near the top of the screen.\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n # Store a decimal value for the ship's center\n self.center = float(self.rect.centerx)\n\n # Store the alien's y-coord as a decimal value\n self.y = float(self.rect.y)\n self.x = float(self.rect.x)\n\n self.color = ai_settings.bullet_color\n self.speed_factor = ai_settings.alien_speed_factor\n self.ai_settings = ai_settings\n\n def update(self):\n \"\"\"Move the alien right or left.\"\"\"\n self.x += (self.speed_factor * self.ai_settings.fleet_direction)\n self.rect.x = self.x\n\n def check_edges(self):\n \"\"\" Return True if alien is at right or left edge of screen \"\"\"\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n if self.rect.left <= screen_rect.left:\n return True\n\n def check_bottom(self):\n \"\"\" Return True if alien is at the bottom of screen \"\"\"\n # print(self.rect.bottom, self.screen_height)\n if self.rect.bottom >= self.screen_height:\n return True\n else:\n return False\n\n def blitme(self):\n \"\"\" Draw the alien at its current location \"\"\"\n self.screen.blit(self.image, self.rect)\n","sub_path":"alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"543856225","text":"import sys\ninput = sys.stdin.readline\n\ndx = [-1, 0, 1, 0] # x 가중치\ndy = [0, -1, 0, 1] # y 가중치\n\ndef bfs(s_x , s_y, R, C, graph, answer):\n # start x, start y, graph값 queue에 입력\n queue = set([(s_x, s_y, graph[s_x][s_y])])\n while queue:\n x, y, ans = queue.pop()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 0 or ny < 0 or nx >= R or ny >= C:\n continue\n # 만약 ans 안에 중복되는 알파벳이 없다면\n elif graph[nx][ny] not in ans:\n queue.add((nx, ny, ans + graph[nx][ny])) #추가\n answer = max(answer, len(ans) + 1) # answer 업데이트\n return answer\n \n\nR, C = map(int, input().split()) # 세로 R, 가로 C\ngraph = [list(input().strip()) for _ in range(R)] # 알파벳 그래프\nanswer = 1\nanswer = bfs(0, 0, R, C, graph, answer) # 탐색\nprint(answer)\n","sub_path":"Algorithm/JAEHYEON/1987.py","file_name":"1987.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"299255057","text":"#coding:utf-8\n#推荐系统\nimport time\nimport random\n\n\nclass Recommend(object):\n \"\"\"\n like 是反应用户最近的趋势,趋势用value[0]表示,value[1]表示其开始时间\n learn 用于再学习,info参数接受一个用户选择的结果(例如一个商品)\n result 是获取test集合的推荐结果,返回下标值\n 遍历like,其中的标签若过期了,则自动删除,不过期则于该样本比对,符合一个该样本的推荐系数+1\n \"\"\"\n def __init__(self,limit_time):\n self._like = dict() #{'key1':(value[0],value[1]),'key2':(value[0],value[1])} value[0]是喜欢的程度,value[1]是开始时间\n self._time = limit_time #有效期,按秒算\n\n def learn(self,info:dict):\n \"\"\"\n 根据info的内容学习,学习内容会连同当前的时间记录下来\n :param info: info是代表选择的属性,例如电影会具有{'title':'abc','type':'喜剧'}的形式,这些就是它需要学习的地方\n :return:\n \"\"\"\n for key in info.values():\n if key in self._like: #info的value值是self._like的键值\n self._like[key] = (self._like[key][0]+1,time.time()) #修改self._like的喜欢程度,开始时间\n else: #info的value值在self._like中没有相应的键\n self._like[key] = (1,time.time()) #新增self._like中的键值对,初始化喜欢程度\n\n def result(self,test_set:list,number:int):\n \"\"\"\n 给出测试集合,会与记录中的用户偏好进行比较,选出标签符合数最多的集合在测试集合中的下标\n :param test_set: 列表,其单个元素的内容是{p1:v1,p2:v2},v会用于与self._like的key比较\n :param number: 需要返回的选择数目\n :return: 返回一个列表,内容是test_set的下标\n \"\"\"\n #第一次\n if len(self._like) == 0:\n #返回number个,在[0,len(test_set))中的随机数字\n return [random.randrange(0,len(test_set),1) for x in range(number)]\n #已经有资料了:\n t = time.time() #t是当前时间\n res = dict() #推荐系数,与test_set中的电影一一对应\n for index in range(len(test_set)): #index是测试集合的所有下标\n res[index] = 0\n #选出self._like中前5个标签(近段时间的喜好)\n # 将self._like按value[0]也就是喜欢程度降序排列,key_list是排序后键值组成的列表\n key_list = sorted(self._like,key=lambda x:int(self._like[x][0]),reverse=True)\n if len(key_list) >= 5:\n key_list = key_list[:5]\n for like in key_list:\n w,begin_time = self._like[like]\n if begin_time >= t-self._time: #开始时间大于当前时间减有效期,说明没有过期\n if like in test_set[index].values():\n #给test_set中某项的推荐系数加一\n res[index]+=1\n else: #过期,删除self._like中的过期项\n del self._like[like]\n return sorted(res,key=lambda x:res[x],reverse=True)[:number] #返回推荐系数的列表下标\n","sub_path":"myTest2/myApp/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"648180942","text":"from typing import List, Optional, Set, Union\n\nfrom spacy.language import Language\nfrom spacy.tokens import Doc, Span, Token\n\nfrom edsnlp.pipelines.qualifiers.base import Qualifier\nfrom edsnlp.pipelines.terminations import termination\nfrom edsnlp.utils.filter import consume_spans, filter_spans, get_spans\nfrom edsnlp.utils.inclusion import check_inclusion\nfrom edsnlp.utils.resources import get_verbs\n\nfrom .patterns import following, preceding, pseudo, verbs_eds, verbs_hyp\n\n\nclass Hypothesis(Qualifier):\n \"\"\"\n Hypothesis detection with spaCy.\n\n The component looks for five kinds of expressions in the text :\n\n - preceding hypothesis, ie cues that precede a hypothetical expression\n - following hypothesis, ie cues that follow a hypothetical expression\n - pseudo hypothesis : contain a hypothesis cue, but are not hypothesis\n (eg \"pas de doute\"/\"no doubt\")\n - hypothetical verbs : verbs indicating hypothesis (eg \"douter\")\n - classic verbs conjugated to the conditional, thus indicating hypothesis\n\n Parameters\n ----------\n nlp : Language\n spaCy nlp pipeline to use for matching.\n pseudo : Optional[List[str]]\n List of pseudo hypothesis cues.\n preceding : Optional[List[str]]\n List of preceding hypothesis cues\n following : Optional[List[str]]\n List of following hypothesis cues.\n verbs_hyp : Optional[List[str]]\n List of hypothetical verbs.\n verbs_eds : Optional[List[str]]\n List of mainstream verbs.\n attr : str\n spaCy's attribute to use:\n a string with the value \"TEXT\" or \"NORM\", or a dict with the key 'term_attr'\n we can also add a key for each regex.\n on_ents_only : Union[bool, str, List[str], Set[str]]\n Whether to look for matches around detected entities only.\n Useful for faster inference in downstream tasks.\n\n - If True, will look in all ents located in `doc.ents` only\n - If an iterable of string is passed, will additionally look in `doc.spans[key]`\n for each key in the iterable\n within_ents : bool\n Whether to consider cues within entities.\n explain : bool\n Whether to keep track of cues for each entity.\n \"\"\"\n\n defaults = dict(\n following=following,\n preceding=preceding,\n pseudo=pseudo,\n termination=termination,\n verbs_eds=verbs_eds,\n verbs_hyp=verbs_hyp,\n )\n\n def __init__(\n self,\n nlp: Language,\n attr: str,\n pseudo: Optional[List[str]],\n preceding: Optional[List[str]],\n following: Optional[List[str]],\n termination: Optional[List[str]],\n verbs_eds: Optional[List[str]],\n verbs_hyp: Optional[List[str]],\n on_ents_only: Union[bool, str, List[str], Set[str]],\n within_ents: bool,\n explain: bool,\n ):\n\n terms = self.get_defaults(\n pseudo=pseudo,\n preceding=preceding,\n following=following,\n termination=termination,\n verbs_eds=verbs_eds,\n verbs_hyp=verbs_hyp,\n )\n terms[\"verbs_preceding\"], terms[\"verbs_following\"] = self.load_verbs(\n verbs_hyp=terms.pop(\"verbs_hyp\"),\n verbs_eds=terms.pop(\"verbs_eds\"),\n )\n\n super().__init__(\n nlp=nlp,\n attr=attr,\n on_ents_only=on_ents_only,\n explain=explain,\n **terms,\n )\n\n self.within_ents = within_ents\n self.set_extensions()\n\n @classmethod\n def set_extensions(cls) -> None:\n if not Token.has_extension(\"hypothesis\"):\n Token.set_extension(\"hypothesis\", default=False)\n\n if not Token.has_extension(\"hypothesis_\"):\n Token.set_extension(\n \"hypothesis_\",\n getter=lambda token: \"HYP\" if token._.hypothesis else \"CERT\",\n )\n\n if not Span.has_extension(\"hypothesis\"):\n Span.set_extension(\"hypothesis\", default=False)\n\n if not Span.has_extension(\"hypothesis_\"):\n Span.set_extension(\n \"hypothesis_\",\n getter=lambda span: \"HYP\" if span._.hypothesis else \"CERT\",\n )\n\n if not Span.has_extension(\"hypothesis_cues\"):\n Span.set_extension(\"hypothesis_cues\", default=[])\n\n if not Doc.has_extension(\"hypothesis\"):\n Doc.set_extension(\"hypothesis\", default=[])\n\n def load_verbs(\n self,\n verbs_hyp: List[str],\n verbs_eds: List[str],\n ) -> List[str]:\n \"\"\"\n Conjugate \"classic\" verbs to conditional, and add hypothesis\n verbs conjugated to all tenses.\n\n Parameters\n ----------\n verbs_hyp: List of verbs that specifically imply an hypothesis.\n verbs_eds: List of general verbs.\n\n Returns\n -------\n list of hypothesis verbs conjugated at all tenses and classic\n verbs conjugated to conditional.\n \"\"\"\n\n classic_verbs = get_verbs(verbs_eds)\n classic_verbs = classic_verbs.loc[classic_verbs[\"mode\"] == \"Conditionnel\"]\n list_classic_verbs = list(classic_verbs[\"term\"].unique())\n\n hypo_verbs = get_verbs(verbs_hyp)\n list_hypo_verbs_preceding = list(hypo_verbs[\"term\"].unique())\n\n hypo_verbs_following = hypo_verbs.loc[hypo_verbs[\"tense\"] == \"Participe Passé\"]\n list_hypo_verbs_following = list(hypo_verbs_following[\"term\"].unique())\n\n return (\n list_hypo_verbs_preceding + list_classic_verbs,\n list_hypo_verbs_following,\n )\n\n def process(self, doc: Doc) -> Doc:\n \"\"\"\n Finds entities related to hypothesis.\n\n Parameters\n ----------\n doc: spaCy Doc object\n\n Returns\n -------\n doc: spaCy Doc object, annotated for hypothesis\n \"\"\"\n\n matches = self.get_matches(doc)\n\n terminations = get_spans(matches, \"termination\")\n boundaries = self._boundaries(doc, terminations)\n\n # Removes duplicate matches and pseudo-expressions in one statement\n matches = filter_spans(matches, label_to_remove=\"pseudo\")\n\n entities = list(self.get_spans(doc))\n ents = None\n\n for start, end in boundaries:\n\n ents, entities = consume_spans(\n entities,\n filter=lambda s: check_inclusion(s, start, end),\n second_chance=ents,\n )\n\n sub_matches, matches = consume_spans(\n matches, lambda s: start <= s.start < end\n )\n\n if self.on_ents_only and not ents:\n continue\n\n sub_preceding = get_spans(sub_matches, \"preceding\")\n sub_following = get_spans(sub_matches, \"following\")\n sub_preceding += get_spans(sub_matches, \"verbs_preceding\")\n sub_following += get_spans(sub_matches, \"verbs_following\")\n\n if not sub_preceding + sub_following:\n continue\n\n if not self.on_ents_only:\n for token in doc[start:end]:\n token._.hypothesis = any(\n m.end <= token.i for m in sub_preceding\n ) or any(m.start > token.i for m in sub_following)\n\n for ent in ents:\n\n if self.within_ents:\n cues = [m for m in sub_preceding if m.end <= ent.end]\n cues += [m for m in sub_following if m.start >= ent.start]\n else:\n cues = [m for m in sub_preceding if m.end <= ent.start]\n cues += [m for m in sub_following if m.start >= ent.end]\n\n hypothesis = ent._.hypothesis or bool(cues)\n\n ent._.hypothesis = hypothesis\n\n if self.explain and hypothesis:\n ent._.hypothesis_cues += cues\n\n if not self.on_ents_only and hypothesis:\n for token in ent:\n token._.hypothesis = True\n\n return doc\n","sub_path":"edsnlp/pipelines/qualifiers/hypothesis/hypothesis.py","file_name":"hypothesis.py","file_ext":"py","file_size_in_byte":8001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"474155137","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n DsgTools\n A QGIS plugin\n Brazilian Army Cartographic Production Tools\n -------------------\n begin : 2016-02-18\n git sha : $Format:%H$\n copyright : (C) 2016 by Philipe Borba - Cartographic Engineer @ Brazilian Army\n email : borba@dsg.eb.mil.br\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\nimport os\nimport json\n\nfrom PyQt4 import QtGui, uic\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import QMessageBox, QFileDialog\nfrom fileinput import filename\nfrom DsgTools.Utils.utils import Utils\n\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'setupEarthCoverage.ui'))\n\nclass SetupEarthCoverage(QtGui.QWizard, FORM_CLASS):\n coverageChanged = pyqtSignal()\n def __init__(self, abstractDb, areas, lines, oldCoverage, parent=None):\n '''\n Constructor\n '''\n super(self.__class__, self).__init__()\n # Set up the user interface from Designer.\n # After setupUI you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)\n self.utils = Utils()\n self.areas = areas\n self.lines = lines\n self.abstractDb = abstractDb\n self.areasCustomSelector.setTitle(self.tr('Areas'))\n self.linesCustomSelector.setTitle(self.tr('Lines'))\n self.setupWizard(oldCoverage)\n self.areasCustomSelector.selectionChanged.connect(self.populateClasses)\n self.linesCustomSelector.selectionChanged.connect(self.populateDelimiters)\n self.button(QtGui.QWizard.FinishButton).clicked.connect(self.writeIntoDb)\n\n def setupFromFile(self):\n '''\n Opens a earth coverage file\n '''\n if QMessageBox.question(self, self.tr('Question'), self.tr('Do you want to open an earth coverage file?'), QMessageBox.Ok|QMessageBox.Cancel) == QMessageBox.Cancel:\n return\n filename = QFileDialog.getOpenFileName(self, self.tr('Open Earth Coverage Setup configuration'), '', self.tr('Earth Coverage Files (*.json)'))\n return filename\n\n def setupWizard(self, oldCoverage):\n '''\n Prepares the wizard\n oldCoverage: old configuration\n '''\n if oldCoverage:\n self.abstractDb.dropCentroids(oldCoverage.keys())\n filename = self.setupFromFile()\n if filename:\n setupDict = self.utils.readJsonFile(filename)\n areasToList = setupDict.keys()\n linesToList = []\n for key in areasToList:\n lines = setupDict[key]\n for line in lines:\n if line not in linesToList:\n linesToList.append(line)\n areasFromList = []\n linesFromList = []\n for area in self.areas:\n if area not in areasToList:\n areasFromList.append(area)\n for line in self.lines:\n if line not in linesToList:\n linesFromList.append(line)\n self.areasCustomSelector.setToList(areasToList)\n self.areasCustomSelector.setFromList(areasFromList)\n self.linesCustomSelector.setToList(linesToList)\n self.linesCustomSelector.setFromList(linesToList) \n self.populateClasses() \n self.populateDelimiters() \n self.checkDelimiters(setupDict)\n else:\n self.areasCustomSelector.setFromList(self.areas)\n self.linesCustomSelector.setFromList(self.lines)\n\n def checkDelimiters(self, setupDict):\n '''\n Check delimiters\n '''\n for i in range(self.treeWidget.invisibleRootItem().childCount()):\n areaItem = self.treeWidget.invisibleRootItem().child(i)\n for j in range(self.treeWidget.invisibleRootItem().child(i).childCount()):\n delimiterItem = areaItem.child(j)\n if areaItem.text(0) in setupDict.keys():\n if delimiterItem.text(1) not in setupDict[areaItem.text(0)]:\n delimiterItem.setCheckState(1,Qt.Unchecked)\n\n def loadJson(self, filename):\n '''\n Loads a json file\n '''\n filename = QFileDialog.getOpenFileName(self, self.tr('Open Field Setup configuration'), self.folder, self.tr('Field Setup Files (*.json)'))\n if not filename:\n return\n return self.readJsonFile(filename)\n\n def populateClasses(self):\n '''\n Populates area classes\n '''\n self.treeWidget.clear()\n selectedAreaClasses = []\n for i in range(self.areasCustomSelector.toList.__len__()):\n selectedAreaClasses.append(self.areasCustomSelector.toList.item(i).text())\n selectedAreaClasses.sort()\n for i in range(len(selectedAreaClasses)):\n treeItem = QtGui.QTreeWidgetItem()\n treeItem.setText(0,selectedAreaClasses[i])\n self.treeWidget.insertTopLevelItem(0,treeItem)\n self.linesCustomSelector.selectionChanged.emit()\n\n def populateDelimiters(self):\n '''\n Populates line classes (area delimiters)\n '''\n delimiterList = []\n for i in range(self.linesCustomSelector.toList.__len__()):\n delimiterList.append(self.linesCustomSelector.toList.item(i).text())\n for i in range(self.treeWidget.invisibleRootItem().childCount()):\n for delimiter in delimiterList:\n treeItem = QtGui.QTreeWidgetItem(self.treeWidget.invisibleRootItem().child(i))\n treeItem.setText(1,delimiter)\n treeItem.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)\n treeItem.setCheckState(1,Qt.Checked)\n self.treeWidget.invisibleRootItem().child(i).setExpanded(True)\n\n def getEarthCoverageDictFromTree(self):\n '''\n Gets earth coverage configuration from the tree widget\n '''\n invRootItem = self.treeWidget.invisibleRootItem()\n earthCoverageDict = dict()\n for i in range(invRootItem.childCount()):\n childClass = invRootItem.child(i)\n earthCoverageDict[childClass.text(0)] = []\n for j in range(childClass.childCount()):\n if childClass.child(j).checkState(1) == Qt.Checked:\n earthCoverageDict[childClass.text(0)].append(childClass.child(j).text(1))\n return earthCoverageDict\n\n def writeIntoDb(self):\n '''\n Writes the configuration in the database\n '''\n try:\n earthDict = self.getEarthCoverageDictFromTree()\n self.abstractDb.setEarthCoverageDict(json.dumps(earthDict))\n self.abstractDb.createCentroidAuxStruct(earthDict.keys())\n self.coverageChanged.emit()\n if QMessageBox.question(self, self.tr('Question'), self.tr('Do you want to save this earth coverage setup?'), QMessageBox.Ok|QMessageBox.Cancel) == QMessageBox.Cancel:\n return\n filename = QFileDialog.getSaveFileName(self, self.tr('Save Earth Coverage Setup configuration'), '', self.tr('Earth Coverage Files (*.json)'))\n if not filename:\n QMessageBox.critical(self, self.tr('Critical!'), self.tr('Define a name for the earth coverage file!'))\n return\n with open(filename, 'w') as outfile:\n json.dump(earthDict, outfile, sort_keys=True, indent=4)\n QMessageBox.information(self, self.tr('Information!'), self.tr('Field setup file saved successfully!'))\n \n except Exception as e:\n self.abstractDb.rollbackEarthCoverage(earthDict.keys())\n QtGui.QMessageBox.warning(self, self.tr('Warning!'), self.tr('Problem saving into database! \\n')+e.args[0])\n return\n","sub_path":"ValidationTools/setupEarthCoverage.py","file_name":"setupEarthCoverage.py","file_ext":"py","file_size_in_byte":8759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"436495961","text":"import sys\nimport wos_parser\nimport wos_graph\nimport wos_clasterization\nimport test_articles\n\nif __name__ == \"__main__\":\n if \"help\" in sys.argv:\n print(\"Parameters:\")\n print(\"showbm - show articles before modification\")\n print(\"showam - show articles after modification\")\n print(\"addta - add test articles\")\n print(\"showtfidf - show tf-idf matrix, 500 columns\")\n print(\"showmds - show multidimensional scaling\")\n print(\"nodownload - don't download new article, use old\")\n print(\"showts - show titles for every claster\")\n print(\"showhdc - show hierarchical document clustering\")\n print(\"showlda - show latent Dirichlet allocation\")\n sys.exit()\n print(\"Add 'help' for showing parameters\")\n print(\"Input search string\")\n topic_name = input()\n\n if \"nodownload\" in sys.argv:\n print(\"Input count of articles\")\n cnt_articles = int(input())\n else:\n # site_parser - скачивает html-страницы каждой статьи,\n # выданной по запросу пользователя topic_name.\n # Возвращает количество статей\n cnt_articles = wos_parser.site_parser(topic_name)\n articles = []\n for i in range(1, cnt_articles+1):\n # article_parser - парсит из html страницы статьи имя,\n # автора, абстракт и статьи, на которые она ссылается\n # Эти данные добавляются в лист articles\n wos_parser.article_parser(topic_name + str(i), articles)\n\n if \"showbm\" in sys.argv:\n # show_articles - выводит на экран статьи, добавленные\n # в articles\n wos_parser.show_articles(articles)\n # correct_articles - удаляет статьи без названий, приводит\n # имена авторов в удобный для идентификации статей формат\n articles = wos_parser.correct_articles(articles)\n if \"addta\" in sys.argv:\n # add_test_articles - добавляет в конец articles тестовые\n # статьи. Они используются для проверки корректности графа\n test_articles.add_test_articles(articles)\n if \"showam\" in sys.argv:\n # show_correct_articles - выводит на экран статьи после\n # всех модификаций\n wos_parser.show_correct_articles(articles)\n\n # build_graph - строит граф, сохраняет его в формате gexf.\n # Возвращает articles без повторяющихся статей\n articles = wos_graph.build_graph(articles, topic_name)\n\n # Описания этих параметров приведены выше при обработке\n # параметра 'help'\n showtfidf = 0\n showmds = 0\n showts = 0\n showhdc = 0\n showlda = 0\n if \"showtfidf\" in sys.argv:\n showtfidf = 1\n if \"showmds\" in sys.argv:\n showmds = 1\n if \"showts\" in sys.argv:\n showts = 1\n if \"showhdc\" in sys.argv:\n showhdc = 1\n if \"showlda\" in sys.argv:\n showlda = 1\n\n # build_csv - строит базу данных по articles, сохраняет ее\n wos_clasterization.build_csv(articles, topic_name)\n # article_clasterization - разбивает статьи на кластеры\n wos_clasterization.article_clasterization(topic_name, showtfidf, showmds,\n showts, showhdc, showlda)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"416705074","text":"import characters\nCharacters = characters.Characters\n\ndef main_menu():\n \"\"\"The main menu function is called after any game event.\n\n Allows user to perform a variety of actions, such as check stats,\n use items, or equip armor.\"\"\"\n \n while True:\n print(\"\\nWhat will you do?\")\n choice = input(\"\\nEquip (e), Use Item (i), Check Something (c), done (d): \")\n if choice.lower() == \"e\":\n equip_menu()\n elif choice.lower() == \"i\":\n item_menu()\n elif choice.lower() == \"c\":\n check_menu()\n elif choice.lower() == \"d\":\n return\n\ndef equip_menu():\n char_list = [char for char in Characters.party.keys()]\n while True:\n print(\"\\nWhich character will you equip?\")\n print(\"\\n{}\".format(\" \".join(char_list)))\n choice = input(\"\\nEnter the name of the character. Check Characters (c), Back (b): \")\n if choice in Characters.party:\n who = Characters.party[choice]\n while True:\n print(f\"\\nWhat will {who.name} do?\") \n choice = input(\"\\nEquip (e), Unequip (u), Back (b): \")\n if choice.lower() == \"e\":\n while True:\n print(f\"\\nWhat equipment will {who.name} equip?\")\n choice = input(\"\\nEnter the name of the equipment. Check Equipment (c), Back (b): \")\n if choice.title() in Characters.armory:\n print()\n what = Characters.armory[choice.title()][0]\n who.equip(what)\n while True:\n choice = input(\"\\nCheck Character? Yes (y), Continue (c), Done Equipping (d): \")\n if choice.lower() == \"y\":\n print()\n who.check_member()\n break\n elif choice.lower() == \"c\":\n break\n elif choice.lower() == \"d\":\n \t return\n continue\n continue\n elif choice.lower() == \"c\":\n print()\n Characters.check_armory()\n continue\n elif choice.lower() == \"b\":\n break\n print(\"\\nYou don't have any to equip.\")\n elif choice.lower() == \"u\":\n while True:\n print(f\"\\nWhich piece of equipment will {who.name} unequip?\")\n choice = input(\"\\nWeapon (w), Armor (a), Accessory (y), All (l), Back (b): \")\n if choice.lower() in \"way\":\n choices = {\"w\": \"Weapon\", \"a\": \"Armor\", \"y\": \"Accessory\"}\n print()\n who.unequip(choices[choice])\n while True:\n choice = input(\"\\nCheck Character? Yes (y), Continue (c), Done Equipping (d): \")\n if choice.lower() == \"y\":\n print()\n who.check_member()\n break\n elif choice.lower() == \"c\":\n break\n elif choice.lower() == \"d\":\n return\n elif choice.lower() == \"l\":\n print()\n who.unequip_character()\n while True:\n choice = input(\"\\nCheck Character? Yes (y), Continue (c), Done Equipping (d): \")\n if choice.lower() == \"y\":\n print()\n who.check_member()\n break\n elif choice.lower() == \"c\":\n break\n elif choice.lower() == \"d\":\n return\n elif choice.lower() == \"b\":\n break\n elif choice.lower() == \"b\":\n break\n continue\n elif choice.lower() == \"c\":\n Characters.check_members()\n continue\n elif choice.lower() == \"b\":\n break\n print(\"\\nI don't know who that is.\")\n\n\ndef item_menu():\n char_list = [char for char in Characters.party.keys()]\n while True:\n print(\"\\nWhich item will you use?\")\n choice = input(\"\\nEnter the item name. Check Items (c), Back (b): \")\n if choice.title() in Characters.inventory:\n what = Characters.inventory[choice.title()][0] \n if what.targets == \"Single\":\n while True:\n print(\"\\nWhich character will use {}?\".format(what.name))\n print(\"\\n{}\".format(\" \".join(char_list)))\n choice = input(\"\\nEnter the name of the character. Check Characters (c), Back (b): \")\n if choice in Characters.party:\n who = Characters.party[choice]\n print()\n who.use_item(what)\n while True:\n choice = input(\"\\nCheck Character? Yes (y), Continue (c), Done Using Items (d): \")\n if choice.lower() == \"y\":\n print()\n who.check_member()\n break\n elif choice.lower() == \"c\":\n break\n elif choice.lower() == \"d\":\n return\n break\n elif choice.lower() == \"c\":\n Characters.check_members()\n continue\n elif choice.lower() == \"b\":\n break\n print(\"\\nI don't know who that is.\")\n else:\n Characters.party_item(what)\n continue\n elif choice.lower() == \"c\":\n print()\n Characters.check_items()\n continue\n elif choice.lower() == \"b\":\n break\n print(\"\\nYou don't have any to use!\")\n\ndef check_menu():\n while True:\n print(\"\\nWhat would you like to check?\")\n choice = input(\"\\nCharacters (c), GP (g), Equipment (e), Items (i), Back (b): \")\n if choice.lower() in \"cgei\":\n print()\n check(choice)\n elif choice.lower() == \"b\":\n break\n\ndef check(choice):\n if choice == \"c\":\n Characters.check_members()\n if choice == \"g\":\n Characters.check_gp()\n if choice == \"e\":\n Characters.check_armory()\n if choice == \"i\":\n Characters.check_items()\n return \n\ndef prompt():\n prompt = input() \n return\n\ndef get_name():\n while True:\n name = input(\"\\nChoose a name for your character: \")\n if len(name) < 1 or name in \" \":\n print(\"\\nYou have to give your character a name!\")\n continue\n elif name not in Characters.party:\n return name\n print(\"\\nYou already have a character with that name!\")","sub_path":"menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"562414485","text":"from typing import List\n\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n if matrix is None or len(matrix) < 1:\n return 0\n num_cols = len(matrix[0])\n num_rows = len(matrix)\n dp = [[0]*(num_cols+1) for _ in range(num_rows+1)]\n max_side = 0\n for i in range(num_rows):\n for j in range(num_cols):\n if matrix[i][j] == \"1\":\n dp[i+1][j+1] = min(dp[i][j+1], dp[i][j], dp[i+1][j]) + 1\n max_side = max(max_side, dp[i+1][j+1])\n return max_side * max_side","sub_path":"leetcode/221_maximal_square.py","file_name":"221_maximal_square.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"239457888","text":"import tensorflow as tf\n\nvgg_model_path = \"data/vgg16/vgg16-20160129.tfmodel\"\npic_path = \"data/vgg16/test.png\"\n\ndef view_ops():\n \"\"\"Help the programmer to inspect the ops names\n\n :return:\n \"\"\"\n pictures = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3], name=\"images\")\n\n with open(vgg_model_path, mode='rb') as f:\n file_content = f.read()\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(file_content)\n\n tf.import_graph_def(graph_def, input_map={\"images\": pictures})\n\n logits = tf.get_default_graph().get_tensor_by_name(\"import/fc7/BiasAdd:0\")\n\n predictions = tf.contrib.layers.fully_connected(\n inputs=logits,\n num_outputs=5\n )\n\n for op in [n.name for n in tf.get_default_graph().as_graph_def().node]:\n print(op)\n\n print(logits)\n\nif __name__ == \"__main__\":\n view_ops()","sub_path":"data/vgg16/vgg_helper.py","file_name":"vgg_helper.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"390145348","text":"import os\nimport platform\nimport pathlib\n\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.chrome.options import Options\n\nfrom insta_utils.config_helper import ConfigHelper\n\n\ndef get_this_path():\n return str(pathlib.Path(__file__).parent.absolute())\n\n\ndef get_chrome_driver():\n driver_bin_map = {\n \"Windows\": \"chromedriver.exe\",\n \"Darwin\": \"chromedriver\",\n \"Linux\": \"chromedriver\"\n }\n driver_path = os.path.join(get_this_path(), driver_bin_map[platform.system()])\n chrome_options = Options()\n chrome_options.add_argument(\"--lang=en\")\n driver = Chrome(executable_path=driver_path, service_args=[\"--verbose\"], options=chrome_options)\n return driver\n\n\ndef get_firefox_driver():\n driver_bin_map = {\n \"Windows\": \"geckodriver.exe\",\n \"Darwin\": \"geckodriver\",\n \"Linux\": \"geckodriver\"\n }\n driver_path = os.path.join(get_this_path(), driver_bin_map[platform.system()])\n firefox_bin = ConfigHelper().get_firefix_binary_path()\n driver = Firefox(firefox_binary=firefox_bin, executable_path=driver_path)\n return driver\n","sub_path":"insta_utils/browser_drivers/browser_helper.py","file_name":"browser_helper.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"180481693","text":"#!/usr/bin/python\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import linprog\nfrom ast import literal_eval\nimport argparse\nfrom itertools import product\nimport time\nimport os\nimport sys\n\n# see the accompanying notebook version for more information on the general strategy (how to prioritize the\n# matchings) and the linear program (the idea of a 3D tensor, constraints, etc.)\n# it also shows the form of intermediate data (e.g. the data frames of tutor/tutee info) which may be useful to see\n\n\ndef load_tutor_info(data_path, verbose=False):\n \"\"\"\n Reads in the tutor data which must be in a file named tutor_info.txt in the directory specified by data_path.\n Columns expected in the tutor_info file are tutor id, tutor name, list of classes they will tutor, number of\n hours of availability, and number of matches they already have.\n :param data_path: path to the directory with the tutor info file\n :param verbose: whether to print out information about minor issues in the input data and how they're handled\n \"\"\"\n \n tutor_info = pd.read_csv(data_path + 'tutor_info.txt', sep='\\t', header=None, index_col=0,\n names=['id', 'name', 'classes', 'avail_hours', 'n_matches']).sort_index()\n tutor_info.classes = tutor_info.classes.apply(literal_eval)\n\n n_zero_hours = (tutor_info.avail_hours == 0).sum()\n if n_zero_hours > 0:\n if verbose:\n print(\"{} tutors had 0 hours available and are thus being dropped.\".format(n_zero_hours))\n tutor_info.drop(tutor_info[tutor_info.avail_hours == 0].index, inplace=True)\n\n max_matches = 3\n n_max_matches = (tutor_info.n_matches >= max_matches).sum()\n if n_max_matches > 0:\n if verbose:\n print(\"{} tutors had {} matches already and are thus being dropped.\".format(n_max_matches, max_matches))\n tutor_info.drop(tutor_info[tutor_info.n_matches == max_matches].index, inplace=True)\n\n n_no_classes = (tutor_info.classes.apply(len) == 0).sum()\n if n_no_classes > 0:\n if verbose:\n print(\"{} tutors had an empty class list and are thus being dropped.\".format(n_no_classes))\n tutor_info.drop(tutor_info[tutor_info.classes.apply(len) == 0].index, inplace=True)\n return tutor_info\n\n\ndef load_tutee_info(data_path, verbose=False):\n \"\"\"\n Reads in the tutee data which must be in a file named tutee_info.txt in the directory specified by data_path\n (entered as a command-line argument with default value of the current directory).\n Columns expected in the tutee_info file are tutee id, tutee name, list of classes requested for tutoring and\n number of matches they already have.\n :param data_path: path to the directory with the tutee info file\n :param verbose: whether to print out information about minor issues in the input data and how they're handled\n \"\"\"\n \n tutee_info = pd.read_csv(data_path + 'tutee_info.txt', sep='\\t', header=None, index_col=0,\n names=['id', 'name', 'classes', 'n_matches']).sort_index()\n tutee_info.classes = tutee_info.classes.apply(literal_eval)\n\n n_no_classes = (tutee_info.classes.apply(len) == 0).sum()\n if n_no_classes > 0:\n if verbose:\n print(\"{} tutees had an empty class list and are thus being dropped.\".format(n_no_classes))\n tutee_info.drop(tutee_info[tutee_info.classes.apply(len) == 0].index, inplace=True)\n\n return tutee_info\n\n\ndef get_class_priority_and_mappings(tutor_info, tutee_info):\n \"\"\"\n Computes the priority assigned to each class as the number of tutees requesting that class divided by the number of tutors available\n for that class (a priority of 0 is always given if a class doesn't have both tutees and tutors)\n Also returns a few useful mappings to/from class names.\n :returns: class_priority: pandas dataframe with columns class_name, priority\n class_to_id: map from class names to ids (as given in the input files)\n class_to_idx: map from class names to indices, which are [0:n_classes] and given in order according to class_priority\n idx_to_class: inverse map of class_to_idx\n \"\"\"\n \n # extract just a list of names of classes per tutor/tutee; reduce these into one long list; make a Series mapping names to counts\n # then set priority = n_tutees / n_tutors for each class\n tutees_per_class = pd.Series(reduce(lambda x, y: x + y, tutee_info.classes.apply(lambda x: [elem[1] for elem in x]))).value_counts()\n tutors_per_class = pd.Series(reduce(lambda x, y: x + y, tutor_info.classes.apply(lambda x: [elem[1] for elem in x]))).value_counts()\n class_priority = (tutees_per_class / tutors_per_class).fillna(0).reset_index() # NA occurs when a class doesn't have both tutors and tutees\n class_priority.rename(columns={'index': 'class_name', 0: 'priority'}, inplace=True)\n class_priority.sort_values('priority', inplace=True)\n class_priority.priority /= class_priority.priority.sum() # normalize\n \n class_id_name = np.concatenate((tutee_info.classes.map(lambda class_list: [class_elem[:2] for class_elem in class_list]).values,\n tutor_info.classes.values))\n class_to_id = {name: idx for (idx, name) in reduce(lambda x, y: x + y, class_id_name)} # id is whatever was in the input file\n class_to_idx = {class_priority.class_name.values[i]: i for i in xrange(len(class_priority))} # idx is [0 : n_classes]\n idx_to_class = {val: key for (key, val) in class_to_idx.items()}\n return class_priority, class_to_id, class_to_idx, idx_to_class\n\n\ndef get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes):\n \"\"\"\n Computes the index in the 1D array corresponding to (tutor_idx, tutee_idx, class_idx) in the\n imagined 3D tensor\n \"\"\"\n assert tutor_idx < n_tutors\n assert tutee_idx < n_tutees\n assert class_idx < n_classes\n return tutee_idx + n_tutees * tutor_idx + n_tutees * n_tutors * class_idx\n\n\ndef get_triple_idx(idx, n_tutees, n_tutors):\n \"\"\"\n Does the inverse of get_idx: returns the (tutor_idx, tutee_idx, class_idx) corresponding to idx\n \"\"\"\n class_idx = 0\n while idx - (n_tutees * n_tutors) >= 0:\n class_idx += 1\n idx -= (n_tutees * n_tutors)\n \n tutor_idx = 0\n while idx - n_tutees >= 0:\n tutor_idx += 1\n idx -= n_tutees\n tutee_idx = idx\n return tutor_idx, tutee_idx, class_idx\n\n\ndef get_class_list_constraints(tutor_info, tutee_info, n_variables, get_class_idx):\n \"\"\"\n Computes and returns constraints & bounds that will enforce that no matching occurs between a tutor and tutee unless it\n is in a class which occurs in both of their class lists.\n :returns: class_list_bounds, class_list_constraints (both are numpy arrays that can be passed to scipy.optimize.linprog\n as constraints / bounds, respectively)\n \"\"\"\n \n n_tutees = len(tutee_info)\n n_tutors = len(tutor_info)\n n_classes = n_variables / (n_tutees * n_tutors)\n \n class_list_bounds = 0\n class_list_constraints = np.ones((1, n_variables))\n\n # set indices to 0 where the proposed matchings are valid; then any >= 0 value is possible for those matchings\n # the others will be forced to be 0 because we'll constrain their sum to be 0\n for tutor_idx in xrange(n_tutors):\n tutor_class_indices = get_class_idx([elem[1] for elem in tutor_info.classes.iloc[tutor_idx]]) # elem[1] is class name\n for class_idx in tutor_class_indices:\n for tutee_idx in xrange(n_tutees):\n tutee_class_indices = get_class_idx([elem[1] for elem in tutee_info.classes.iloc[tutee_idx]])\n if class_idx in tutee_class_indices:\n class_list_constraints[0, get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes)] = 0\n \n return class_list_bounds, class_list_constraints\n\n\ndef get_hours_constraints(tutor_info, tutee_info, n_variables, get_class_idx, iterative_matching):\n \"\"\"\n Computes and returns constraints & bounds that will enforce that no tutor tutors more hours than they have available\n and that no tutee receives more tutoring in a class than they requested.\n :returns: hours_bounds, hours_constraints (both are numpy arrays that can be passed to scipy.optimize.linprog\n as constraints / bounds, respectively)\n \"\"\"\n n_tutors = len(tutor_info)\n n_tutees = len(tutee_info)\n n_classes = n_variables / (n_tutors * n_tutees)\n \n hours_constraints = []\n hours_bounds = []\n\n # tutees need one constraint per class (# hours requested is per class)\n for tutor_idx in xrange(n_tutors):\n class_indices = get_class_idx([elem[1] for elem in tutor_info.classes.iloc[tutor_idx]]) # elem[1] is class name\n \n if iterative_matching:\n hours_bounds.append(1)\n else:\n hours_bounds.append(tutor_info.avail_hours.iloc[tutor_idx])\n \n constraint = np.zeros((1, n_variables)) # set indices to 1 where the proposed class is valid for this tutor\n for class_idx in class_indices:\n for tutee_idx in xrange(n_tutees):\n constraint[0, get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes)] = 1\n hours_constraints.append(constraint)\n\n for tutee_idx in xrange(n_tutees):\n class_indices = get_class_idx([elem[1] for elem in tutee_info.classes.iloc[tutee_idx]]) # elem[1] is class name\n hours_requested = [elem[2] for elem in tutee_info.classes.iloc[tutee_idx]]\n for i in xrange(len(class_indices)):\n class_idx = class_indices[i]\n \n if iterative_matching:\n hours_bounds.append(1)\n else:\n hours_bounds.append(hours_requested[i])\n \n constraint = np.zeros((1, n_variables)) # set indices to 1 where the proposed class is valid for this tutee\n for tutor_idx in xrange(n_tutors):\n constraint[0, get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes)] = 1\n hours_constraints.append(constraint)\n\n hours_constraints = np.concatenate(hours_constraints, axis=0)\n hours_bounds = np.array(hours_bounds)\n return hours_bounds, hours_constraints\n\n\ndef get_objective(lambda_classes, lambda_students, class_priority, n_tutors, n_tutees, n_classes, get_class_idx, tutee_info):\n \"\"\"\n Generates an objective function that can be optimized using a linear program.\n What is actually returned is a 1D numpy array whose size is the number of variables.\n Each variable is represented by one index in the array. If we call the array A and the\n variables V, then the function to be optimized is\n sum_i V_i * A_i.\n That is, we maximize the weighted sum of the variables. The variables are implicit as far\n as the optimization is concerned: they are not explicitly encoded; one needs to know what\n each index corresponds to.\n Here, the weights are based on the per-class priorities and whether the students have priority\n for a given class.\n :param lambda_classes: how much weight to put on the per-class priorities. The larger the lambda\n values are, the more focus is given to high priority classes/students (even\n at the expense of matching less tutoring hours overall)\n :param lambda_students: how much weight to put on the student priorities for given classes\n :returns: a 1D numpy array where each value is the coefficient for the implicit variable at that\n index\n \"\"\"\n n_variables = n_tutors * n_tutees * n_classes\n # scale priorities by lambdas\n scaled_class_priorities = lambda_classes * class_priority.priority.values\n objective_function = np.ones(n_variables)\n\n for class_idx in xrange(n_classes):\n priority = scaled_class_priorities[class_idx]\n for tutor_idx in xrange(n_tutors):\n for tutee_idx in xrange(n_tutees):\n objective_function[get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes)] *= priority\n\n for tutee_idx in xrange(n_tutees):\n class_indices = get_class_idx([elem[1] for elem in tutee_info.classes.iloc[tutee_idx]]) # elem[1] is class name\n priorities = [elem[3] for elem in tutee_info.classes.iloc[tutee_idx]]\n for i in xrange(len(class_indices)):\n class_idx = class_indices[i]\n priority = .01 + lambda_students * priorities[i] # so priority of 0 -> 1; we don't want to ignore students with no priority\n for tutor_idx in xrange(n_tutors):\n before = objective_function[get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes)]\n objective_function[get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes)] *= priority\n after = objective_function[get_idx(tutor_idx, tutee_idx, class_idx, n_tutees, n_tutors, n_classes)]\n return objective_function\n\n\ndef solve(objective_function, hours_constraints, hours_bounds, class_list_constraints, class_list_bounds, var_bounds, verbose=False):\n \"\"\"\n Uses a linear program to maximize the given objective function subject to the constraints\n which must be present as global variables: hours_constraints, hours_bounds, class_list_constraint,\n and class_list_bound.\n Attempts first a quick program with fewer bounds. If this fails (in that the solution is outside the desired bounds)\n a slower, completely bounded program is run.\n :param objective_function: a 1D numpy array as specified as the return value of get_objective.\n :returns: A scipy.optimize.OptimizeResult consisting of the following fields:\n x : (numpy ndarray) The independent variable vector which optimizes the linear programming problem.\n slack : (numpy ndarray) The values of the slack variables. Each slack variable corresponds to an inequality\n constraint. If the slack is zero, then the corresponding constraint is active.\n success : (bool) Returns True if the algorithm succeeded in finding an optimal solution.\n status : (int) An integer representing the exit status of the optimization:\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n nit : (int) The number of iterations performed.\n message : (str) A string descriptor of the exit status of the optimization.\n \"\"\"\n solution = linprog(-objective_function, options={'disp': verbose},\n A_ub=hours_constraints, b_ub=hours_bounds,\n A_eq=class_list_constraints, b_eq=class_list_bounds)\n \n max_hours = var_bounds[1]\n if solution.x.max() > max_hours:\n if verbose:\n print('Quick solution exceeded max_hours ({} hours in a matching; max is {}).'.format(solution.x.max(), max_hours))\n print('Running slower, bounded program.')\n solution = linprog(-objective_function, bounds=var_bounds, options={'disp': verbose},\n A_ub=hours_constraints, b_ub=hours_bounds,\n A_eq=class_list_constraints, b_eq=class_list_bounds)\n return solution\n\n\ndef update_info(tutor_info, tutee_info, matching, max_tutees_per_tutor=3):\n \"\"\"\n Updates tutor_info and tutee_info based on the given matchings:\n Determines the number of hours to assign for each matching\n Removes the appropriate number of hours available from the tutors\n Removes matched classes from the classes list of tutees\n Drops tutors with 0 hours left\n Drops tutees with no classes left\n WARNING: all parameters are modified in place.\n \"\"\"\n \n for match_idx in xrange(len(matching)):\n tutor_id, tutee_id, class_id = matching.loc[match_idx, ['tutor_id', 'tutee_id', 'class_id']]\n avail_hours = tutor_info.loc[tutor_id].avail_hours\n classes = tutee_info.loc[tutee_id].classes\n request_hours = filter(lambda class_elem: class_elem[0] == class_id, classes)[0][2]\n\n if request_hours > 4:\n assign_hours = 3\n elif request_hours > 2:\n assign_hours = 2\n else:\n assign_hours = 1\n\n assign_hours = min(avail_hours, assign_hours)\n matching.loc[match_idx, 'n_hours'] = assign_hours\n\n hours_left = avail_hours - assign_hours\n n_matches = tutor_info.loc[tutor_id, 'n_matches'] + 1\n if hours_left > 0 and n_matches < max_tutees_per_tutor:\n tutor_info.loc[tutor_id, 'avail_hours'] = hours_left\n tutor_info.loc[tutor_id, 'n_matches'] = n_matches\n else:\n tutor_info.drop(tutor_id, inplace=True)\n\n classes_left = filter(lambda class_elem: class_elem[0] != class_id, classes)\n if len(classes_left) > 0:\n tutee_info = tutee_info.set_value(tutee_id, 'classes', classes_left) # because value is a list, need this syntax\n else:\n tutee_info.drop(tutee_id, inplace=True)\n\n\ndef get_matching(solution, tutor_info, tutee_info, idx_to_class, class_to_id, lambda_classes, lambda_students,\n iterative_matching, return_matching=False, verbose=False):\n \"\"\"\n Converts the solution to the tutor-tutee matching linear program into the desired output file format:\n a tsv with columns ['tutor_id', 'tutor_name', 'tutee_id', 'tutee_name', 'class_id', 'class_name', 'n_hours']\n which specifies all tutor-tutee matchings.\n :param solution: a scipy.optimize.OptimizeResult as returned from scipy.optimize.linprog (e.g. through the solve function)\n :param return_matching: whether to save the matching file to disk (if False) or to return it\n :param verbose: whether to print the name of the matching file.\n \"\"\"\n \n n_tutees = len(tutee_info)\n n_tutors = len(tutor_info)\n tutor_to_idx = {tutor_info.index.values[i]: i for i in xrange(n_tutors)}\n tutee_to_idx = {tutee_info.index.values[i]: i for i in xrange(n_tutees)}\n idx_to_tutor = {val: key for (key, val) in tutor_to_idx.items()}\n idx_to_tutee = {val: key for (key, val) in tutee_to_idx.items()}\n \n solution.x = solution.x.astype(np.int32)\n \n matched_indices = np.argwhere(solution.x != 0).ravel()\n matches = []\n for matched_idx in matched_indices:\n tutor_idx, tutee_idx, class_idx = get_triple_idx(matched_idx, n_tutees, n_tutors)\n tutor_id = idx_to_tutor[tutor_idx]\n tutor_name = tutor_info.name.loc[tutor_id]\n tutee_id = idx_to_tutee[tutee_idx]\n tutee_name = tutee_info.name.loc[tutee_id]\n class_name = idx_to_class[class_idx]\n class_id = class_to_id[class_name]\n n_hours = solution.x[matched_idx]\n matches.append([tutor_id, tutor_name, tutee_id, tutee_name, class_id, class_name, n_hours])\n matches = pd.DataFrame(matches,\n columns=['tutor_id', 'tutor_name', 'tutee_id', 'tutee_name', 'class_id', 'class_name', 'n_hours'])\n if return_matching:\n return matches\n else:\n fname = '{}matches_lc_{}_ls_{}_{}.tsv'.format(data_path, lambda_classes, lambda_students,\n 'iter' if iterative_matching else 'single')\n matchings.to_csv(fname, sep='\\t', index=False)\n print(\"Saved matching to {}\".format(fname))\n\n\ndef main(data_path, max_hours, verbose, use_product, lambda_classes, lambda_students, iterative_matching, return_matching=False):\n \"\"\"\n :param return_matching: if True, only one matching is computed and then returned (i.e. one can't use multiple lambda values\n and only one iteration of matching will be done, regardless of the value of iterative_matching)\n \"\"\"\n \n if use_product:\n lambdas = list(product(lambda_classes, lambda_students))\n else:\n lambdas = zip(lambda_classes, lambda_students)\n \n tutor_info_complete = load_tutor_info(data_path, verbose)\n tutee_info_complete = load_tutee_info(data_path, verbose)\n \n for lambda_idx in xrange(len(lambdas)):\n \n print(\"\\nSolving LP with lambda_classes = {}, lambda_students = {}.\".format(*lambdas[lambda_idx]))\n \n tutor_info = tutor_info_complete.copy()\n tutee_info = tutee_info_complete.copy()\n \n matchings = []\n iter_number = 0\n while True:\n iter_number += 1\n if verbose:\n print(\"\\nOn iteration\", iter_number)\n \n n_tutors = len(tutor_info)\n n_tutees = len(tutee_info)\n\n ### class priorities and info\n class_priority, class_to_id, class_to_idx, idx_to_class = get_class_priority_and_mappings(tutor_info, tutee_info)\n n_classes = len(class_priority)\n get_class_idx = np.vectorize(class_to_idx.get)\n\n ### bounds/constraints on the linear program\n\n n_variables = n_tutors * n_tutees * n_classes\n\n var_bounds = (0, max_hours) # same bound for all matchings\n class_list_bounds, class_list_constraints = get_class_list_constraints(tutor_info, tutee_info, n_variables, get_class_idx)\n hours_bounds, hours_constraints = get_hours_constraints(tutor_info, tutee_info, n_variables, get_class_idx, iterative_matching)\n\n objective_function = get_objective(lambdas[lambda_idx][0], lambdas[lambda_idx][1], class_priority,\n n_tutors, n_tutees, n_classes, get_class_idx, tutee_info)\n solution = solve(objective_function, hours_constraints, hours_bounds, class_list_constraints, class_list_bounds, var_bounds,\n verbose)\n \n if not iterative_matching and not return_matching:\n get_matching(solution, tutor_info, tutee_info, idx_to_class, class_to_id,\n lambdas[lambda_idx][0], lambdas[lambda_idx][1], iterative_matching, verbose=verbose)\n sys.exit()\n \n if return_matching:\n return get_matching(solution, tutor_info, tutee_info, idx_to_class, class_to_id, lambdas[lambda_idx][0],\n lambdas[lambda_idx][1], iterative_matching,verbose=verbose, return_matching=True)\n \n # otherwise: iterative matching and should save final result instead of returning 1 iteration of it\n matching = get_matching(solution, tutor_info, tutee_info, idx_to_class, class_to_id,\n lambdas[lambda_idx][0], lambdas[lambda_idx][1], iterative_matching, return_matching=True)\n \n if len(matching) == 0:\n break\n\n matchings.append(matching)\n update_info(tutor_info, tutee_info, matching)\n\n if len(tutor_info) == 0 or len(tutee_info) == 0:\n break\n\n matchings = pd.concat(matchings).reset_index(drop=True)\n \n ### give swap in tutors with 0 matches for those who have multiple\n zero_match_tutors = tutor_info[tutor_info.n_matches == 0]\n \n n_zero_match_tutors = len(zero_match_tutors)\n if verbose:\n if n_zero_match_tutors:\n print(\"\\nFound {} tutors with 0 matches. Attempting to swap them in.\".format(n_zero_match_tutors))\n else:\n print(\"\\nNo tutors with 0 matches found.\")\n \n for tutor_id in zero_match_tutors.index:\n class_ids = map(lambda class_elem: class_elem[0], zero_match_tutors.loc[tutor_id, 'classes']) # just get the ids\n matching_to_swap = None\n most_matches = 0\n for row in matchings.index:\n if matchings.loc[row, 'class_id'] in class_ids:\n swapped_tutor_id = matchings.loc[row, 'tutor_id']\n n_matches = matchings.tutor_id.value_counts().loc[swapped_tutor_id]\n if n_matches > max(1, most_matches):\n most_matches = n_matches\n matching_to_swap = [row, swapped_tutor_id, matchings.loc[row, 'n_hours'], matchings.loc[row, 'tutee_id'],\n matchings.loc[row, 'class_id']]\n\n if matching_to_swap: # not None\n # update the matching to use the zero-match tutor swapped for the old one\n row, swapped_tutor_id, swapped_hours, tutee_id, class_id = matching_to_swap\n hours_requested = filter(lambda class_elem: class_elem[0] == class_id, tutee_info_complete.loc[tutee_id, 'classes'])[0][2]\n hours_matched = min(zero_match_tutors.loc[tutor_id, 'avail_hours'], hours_requested)\n matchings.loc[row, 'tutor_id'] = tutor_id\n matchings.loc[row, 'tutor_name'] = zero_match_tutors.loc[tutor_id, 'name']\n matchings.loc[row, 'n_hours'] = hours_matched\n\n # also update the tutor info based on the swap; we'll try one more LP, just in case\n tutor_info.loc[tutor_id, 'n_matches'] += 1\n tutor_info.loc[tutor_id, 'avail_hours'] -= hours_matched\n\n if swapped_tutor_id in tutor_info.index: # edit existing entry\n tutor_info.loc[swapped_tutor_id, 'n_matches'] -= 1\n tutor_info.loc[swapped_tutor_id, 'avail_hours'] += swapped_hours\n else: # put an entry back in; make sure to update hours and matchings based on existing matchings\n tutor_info = tutor_info.append(tutor_info_complete.loc[swapped_tutor_id])\n tutor_info.loc[swapped_tutor_id, 'n_matches'] = len(matchings.loc[[swapped_tutor_id]])\n tutor_info.loc[swapped_tutor_id, 'avail_hours'] -= matchings.loc[[swapped_tutor_id], 'n_hours'].sum()\n\n if tutor_info.loc[tutor_id, 'avail_hours'] == 0:\n tutor_info.drop(tutor_id, inplace=True)\n \n if n_zero_match_tutors:\n # do one more matching, just in case anybody swapped out could be matched still\n tutor_info.to_csv('tmp/tutor_info.txt', sep='\\t', header=None)\n tutee_info.to_csv('tmp/tutee_info.txt', sep='\\t', header=None)\n\n matching = main('tmp/', max_hours, verbose, use_product, [lambdas[lambda_idx][0]], [lambdas[lambda_idx][1]],\n iterative_matching=True, return_matching=True)\n update_info(tutor_info, tutee_info, matching)\n matchings = pd.concat((matchings, matching))\n \n ### increase the number of hours in matchings where allowable\n hours_remaining = tutor_info_complete.avail_hours - matchings.groupby('tutor_id').n_hours.sum().sort_index()\n hours_remaining = hours_remaining[hours_remaining > 0]\n \n if verbose:\n print(\"\\nExpanding the number of hours for matches where possible.\")\n for tutor_id in hours_remaining.index:\n matchings.set_index('tutor_id', inplace=True)\n tutor_info_tmp = tutor_info_complete.loc[[tutor_id]].copy() # selecting with \"[]\" keeps it a dataframe instead of a series\n tutor_info_tmp.loc[:, 'avail_hours'] = hours_remaining.loc[tutor_id]\n tutee_info_tmp = tutee_info_complete.loc[matchings.loc[[tutor_id]].tutee_id].copy()\n\n for i in xrange(len(tutee_info_tmp)):\n tutee_id = tutee_info_tmp.index[i]\n matched_class = filter(lambda class_elem: class_elem[0] == matchings.loc[[tutor_id]].iloc[i].class_id,\n tutee_info_tmp.iloc[i].classes)\n tutee_info_tmp.set_value(tutee_id, 'classes', matched_class)\n\n tutor_info_tmp.to_csv('tmp/tutor_info.txt', sep='\\t', header=None)\n tutee_info_tmp.to_csv('tmp/tutee_info.txt', sep='\\t', header=None)\n\n matching = main('tmp/', max_hours, verbose, use_product, [lambdas[lambda_idx][0]], [lambdas[lambda_idx][1]],\n iterative_matching=False, return_matching=True)\n matchings = matchings.reset_index().set_index(['tutor_id', 'tutee_id'])\n \n for row_idx in xrange(len(matching)):\n tutee_id, n_hours = matching.iloc[row_idx].loc[['tutee_id', 'n_hours']]\n matchings.loc[(tutor_id, tutee_id), 'n_hours'] += n_hours\n matchings.reset_index(inplace=True)\n \n assert all(matchings.groupby('class_id').tutee_id.value_counts() == 1)\n assert all(matchings.tutor_id.value_counts() <= 3)\n \n fname = '{}matches_lc_{}_ls_{}_{}.tsv'.format(data_path, lambdas[lambda_idx][0], lambdas[lambda_idx][1],\n 'iter' if iterative_matching else 'single')\n matchings.to_csv(fname, sep='\\t', index=False)\n print(\"Saved matching to {}\".format(fname))\n\n if verbose:\n runtime = time.time() - start_time\n print(\"\\nRuntime: {:.0f} seconds\".format(runtime))\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('-lc', '--lambda_classes', nargs='+', type=float, default=[1],\n help=\"The coefficients that determine how much weight is given to prioritizing 'harder'\\\n classes (those with more tutees compared to tutors); must be strictly positive. Default: 1\")\n parser.add_argument('-ls', '--lambda_students', nargs='+', type=float, default=[1],\n help=\"The coefficients that determine how much weight is given to prioritizing students\\\n in especial need (those marked as priority for a given class); must be strictly positive. Default: 1\")\n parser.add_argument('-p', '--data_path', help=\"Path to the input files (tutee_info.txt and tutor_info.txt). Default: ./\",\n default='./')\n parser.add_argument('-m', '--max_hours', help=\"Maximum number of hours allowable in one match (if doing iterative matching, this\\\n will always be set to 1). Default: 3\", type=int, default=3)\n parser.add_argument('-v', '--verbose', action='store_true',\n help=\"Whether to print addition information while running. Default: False\", default=False)\n parser.add_argument('-sm', '--single_matching', action='store_true', help=\"Whether to use iterative matching (see notebook for\\\n more on this approach) or single matching; Default: False (i.e. use iterative matching).\", default=False)\n parser.add_argument('-prod', '--cartesian_product', action='store_true', default=False,\n help=\"If this flag is given, one matching is computed for each combination of\\\n lambda_classes and lambda_students. Otherwise, the two are zipped. Example: if\\\n lambda_students = [2, 5] and lambda_classes = [2, 3] then without this flag, 2 matchings\\\n will be computed with lambdas: (2, 2) and (5, 3). With this flag set, 4 matchings will\\\n be computed: (2, 2), (2, 3), (5, 2), (5, 3). Default: False.\")\n \n args = parser.parse_args()\n data_path = args.data_path\n verbose = args.verbose\n use_product = args.cartesian_product\n lambda_classes = args.lambda_classes\n lambda_students = args.lambda_students\n iterative_matching = not args.single_matching\n \n if iterative_matching:\n max_hours = 1\n \n if verbose:\n print(\"Data path:\", data_path)\n print(\"lambda_students:\", lambda_students)\n print(\"lambda_classes:\", lambda_classes)\n print(\"Use Cartesian product of lambdas?\", use_product)\n print(\"max_hours:\", max_hours, end='\\n\\n')\n print(\"iterative_matching?\", iterative_matching)\n\n main(data_path, max_hours, verbose, use_product, lambda_classes, lambda_students, iterative_matching)","sub_path":"matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":32207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"398415721","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport xadmin\nfrom apps.car.models import Carinfo, Carimageinfo\n\n\nclass CarinfoAdmin(object):\n list_display = ['CarId', 'CarName', 'ImageUrl', 'Load', 'LengthWidthHeight', 'CarType']\n # search_fields = ['CarName', ]\n # list_filter = ['CarId', 'CarName', 'Load', 'LengthWidthHeight', 'CarType']\n # class CarimageinfoInline(object):\n # model = Carimageinfo\n # exclude = [\"CarId\", ]\n # extra = 1\n # style = 'tab'\n #\n # inlines = [CarimageinfoInline]\n\nclass CarimageinfoAdmin(object):\n list_display = ['CarImageId', 'CarId', 'ImageUrl', 'IsCover']\n\n\nxadmin.site.register(Carinfo, CarinfoAdmin)\nxadmin.site.register(Carimageinfo, CarimageinfoAdmin)\n","sub_path":"XJSExpress/apps/car/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"139997890","text":"import json\nimport requests\nimport re\nfrom collections import Counter\n\noutput_file = \"liu_arleen_lab2part1step4.txt\"\n\ndata = requests.get(\"http://www.recipepuppy.com/api/?\", params={'i':'beans,cheese', \"q\":'burrito', \"p\":'3'}).json()\n\nresults = []\n\ncount=0\nmatches=[]\n\nfor i in data['results']:\n\n\tif (count<3):\n\t\tprint (i['title'])\n\t\tresults.append(i['title'])\t\t\n \n\tcount=count+1\n\ntracker = 0\nnewMatch = []\n\nfor i in data['results']:\n\n\tif (tracker<3):\n\t\tmatches=re.sub(\", \",\"\\n\",str(i['ingredients']))\n\t\tnewMatch = newMatch + re.findall('.*\\n', matches)\n\t\t#results.append(str(matches))\n\t\t#print (str(matches))\n\n\ttracker=tracker+1\t\n\n#print(str(Counter(newMatch).items()))\nnewMatch = Counter(newMatch).items()\nnewMatch = sorted(newMatch)\n\ndef getKey(item):\n\treturn item[0]\n\nfor i in newMatch:\n\tprint(str(i[1])+\" \"+str(i[0]))\n\tresults.append(str(i[1])+\" \"+str(i[0]))\n\nwith open(output_file, 'w') as file:\n\tfor i in results:\n\t\tfile.write(str(i))","sub_path":"lab2/liu_arleen_lab2part1step4.py","file_name":"liu_arleen_lab2part1step4.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"586516005","text":"# coding: utf-8\nfrom blog.models import Post, Komment\nfrom django.views.generic import ListView, DetailView\nimport datetime\nfrom django.utils import timezone\nfrom django.shortcuts import get_object_or_404,render_to_response, redirect\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom blog.forms import ContactForm\nfrom django.template import RequestContext\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\ndef PostsListView(request): \n post = Post.objects.all()\n post = post.order_by('-datetime')\n paginator = Paginator(post, 10) \n stran = paginator.page_range\n page = request.GET.get('page')\n try:\n post = paginator.page(page)\n except PageNotAnInteger: \n post = paginator.page(1)\n except EmptyPage:\n post = paginator.page(paginator.num_pages)\n\n return render_to_response('blog/post_list.html', {\"post\": post, \"stran\": stran})\n \ndef full(request,pk):\n komment = Komment.objects.filter(id_post__exact=pk)\n komment = komment.order_by('-datetime')\n post = Post.objects.filter(id__exact=pk)\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['subject']\n content = form.cleaned_data['message']\n p=Komment(\n name = name,\n content = content,\n id_post = pk,\n )\n p.save()\n results=Komment.objects.all()\n return redirect('/'+pk)\n else:\n form = ContactForm()\n return render_to_response('blog/post_detail.html', {'form': form, 'komment': komment, 'post': post, },context_instance=RequestContext(request))\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"228763100","text":"import sys\nimport re\n# from numpy.random import choice\nimport numpy as np\nimport os\nfrom multiprocessing import Process, Manager, Pool\nimport time\nfrom random import random\nfrom numpy.random import RandomState\n\ndef hash_djb2(s): \n\thash = 5381\n\tfor x in s:\n\t\thash = (( hash << 5) + hash) + ord(x)\n\treturn hash & 0xFFFFFFFF\n\ndef rand(minimum, maximum):\n\treturn minimum + (maximum - minimum) * random()\n\nclass Sparse:\n\tdef __init__(self, dim, count, limit):\n\t\tself.dim = dim\n\t\tself.sparse = {}\n\t\tfor i in range(count):\n\t\t\tself.sparse[int(rand(0, dim))] = rand(-limit, limit)\n#dim = dimension of vector\n#count = number of non-zero values\n#limit = range of the non-zero values\n\n\tdef value(self):\n\t\ta = []\n\t\tfor i in range(self.dim):\n\t\t\ttry:\n\t\t\t\ta.append(self.sparse[i])\n\t\t\texcept:\n\t\t\t\ta.append(0)\n\t\treturn a\n\ndef add(a, b, weight=1):\n\tc = a\n\tfor i in b.sparse:\n\t\ttry:\n\t\t\tc.sparse[i] += (weight * b.sparse[i])\n\t\texcept:\n\t\t\tc.sparse[i] = (weight * b.sparse[i])\n\treturn c\n\ndef cleanhtml(raw_html):\n\tcleanr = re.compile('<.*?>')\n\tcleantext = re.sub(cleanr, ' ', raw_html)\n\treturn cleantext\n\nclass MySentences(object):\n\tdef __init__(self, dirname):\n\t\tself.dirname = dirname\n\n\tdef __iter__(self):\n\t\tpunct = '!\"#$%&\\'()*+,.:;<=>?@[\\\\]^`{|}~'\n\t\tfor root, dirs, files in os.walk(self.dirname):\n\t\t\tfor filename in files:\n\t\t\t\tfile_path = root + '/' + filename\n\t\t\t\tfor line in open(file_path):\n\t\t\t\t\tsline = line.strip()\n\t\t\t\t\tif sline == \"\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif sline.startswith('')[0].replace(' ', '_')\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tsline = 'title/err'\n\t\t\t\t\t\t\tprint(line)\n\t\t\t\t\trline = cleanhtml(sline)\n\t\t\t\t\t# print(file_path)\n\t\t\t\t\tyield re.sub(r'[%s]' % punct, '', rline).lower().split()\n\n#using numpy\n# def randomVector(num):\n# q = 1./30\n# return choice([0, 1], size=num, p=[1 - q, q])\n\n# generate sparse random vectors fast using time.time()\n\n#using fastrand\n# def randomVector(dim):\n# rv = np.zeros(dim)\n# for i in range(10):\n# rv[fastrand.pcg32bounded(dim)] = fastrand.pcg32bounded(5)\n# return rv\n\ndef generateEmbeddings(embeddings, sentence, title):\n\tdim = 500#vector dimens\n\twindow = 6#window for context words\n\tcount = 2#number of non-zero values\n\tlimit = 5#range of non-zero values\n\twt = 1\n\t# try:\n\t# \tindex[title]\n\t# except:\n\t# \tindex[title] = Sparse(dim, count, limit)\n\n\tif len(sentence) >= window:\n\t\tfor i in range(len(sentence) - window):\n\t\t\tif sentence[i].startswith(\"resource/\"):\n\t\t\t\t#add index vector of title entity\n\t\t\t\ttry:\n\t\t\t\t\t# embeddings[sentence[i]] = add(embeddings[sentence[i]], index[title], 5)\n\t\t\t\t\tembeddings[sentence[i]] = embeddings[sentence[i]] + (5 * RandomState(hash_djb2(title)).normal(0, 0.1, dim))\n\t\t\t\texcept:\n\t\t\t\t\t# embeddings[sentence[i]] = Sparse(dim, 0, 1)\n\t\t\t\t\t# embeddings[sentence[i]] = add(embeddings[sentence[i]], index[title], 5)s\n\t\t\t\t\tembeddings[sentence[i]] = 5 * RandomState(hash_djb2(title)).normal(0, 0.1, dim)\n\t\t\t\t#neighbouring words\n\t\t\t\t# print(embeddings[sentence[i]])\n\t\t\t\tfor j in range(int(i - window/2), i):#left context\n\t\t\t\t\tif sentence[j].startswith(\"resource/\"):\n\t\t\t\t\t\twt = 3\n\t\t\t\t\telse:\n\t\t\t\t\t\twt = 1\n\t\t\t\t\t# try:\n\t\t\t\t\t# \tembeddings[sentence[j]]\n\t\t\t\t\t# \twt = 3\n\t\t\t\t\t# except:\n\t\t\t\t\t# \twt = 1\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# embeddings[sentence[i]] = add(embeddings[sentence[i]], index[sentence[j]], wt)\n\t\t\t\t\t\tembeddings[sentence[i]] = embeddings[sentence[i]] + (wt * RandomState(hash_djb2(sentence[j])).normal(0, 0.1, dim))\n\t\t\t\t\texcept:\n\t\t\t\t\t\t# index[sentence[j]] = Sparse(dim, count, limit)\n\t\t\t\t\t\t# embeddings[sentence[i]] = add(embeddings[sentence[i]], index[sentence[j]], wt)\n\t\t\t\t\t\tembeddings[sentence[i]] = (wt * RandomState(hash_djb2(sentence[j])).normal(0, 0.1, dim))\n\t\t\t\tfor j in range(i + 1, int(i + (window/2) + 1)):#right context\n\t\t\t\t\tif sentence[j].startswith(\"resource/\"):\n\t\t\t\t\t\twt = 3\n\t\t\t\t\telse:\n\t\t\t\t\t\twt = 1\n\t\t\t\t\ttry:\n\t\t\t\t\t\tembeddings[sentence[i]] = embeddings[sentence[i]] + (wt * RandomState(hash_djb2(sentence[j])).normal(0, 0.1, dim))\n\t\t\t\t\texcept:\n\t\t\t\t\t\t# index[sentence[j]] = Sparse(dim, count, limit)\n\t\t\t\t\t\t# embeddings[sentence[i]] = add(embeddings[sentence[i]], index[sentence[j]], wt)\n\t\t\t\t\t\tembeddings[sentence[i]] = (wt * RandomState(hash_djb2(sentence[j])).normal(0, 0.1, dim))\n\n\t# print(\"Processed \", str(wc), \" words.\", end=\"\\r\")\n\t\t\t# print(sentence[i] + '(' + str(embeddings[sentence[i]]) + ')')\n\t\t\t# print(sentence[i], end=' ')\n\nif __name__ == '__main__':\n\tdirectory = sys.argv[1]\n\tsentences = MySentences(directory)\n\tmanager = Manager()\n\tembeddings = manager.dict()\n\twc = 0\n\ttitle = ''\n\tnow = time.time()\n\n\tpool = Pool()\n\tfor sentence in sentences:\n\t\twc += len(sentence)\n\t\tif len(sentence) > 0 and sentence[0].startswith('title/'):\n\t\t\ttitle = sentence[0].split('title/')[1]\n\t\t\tprint(title)\n\t\telse:\n\t\t\tpool.apply_async(generateEmbeddings, args=(embeddings, sentence, title))\n\tpool.close()\n\tpool.join()\n\n\tprint(\"Processed \", str(wc), \" words.\")\n\n\tprint(\"Time elapsed: \", str(time.time() - now), 's')\n\n\twith open('embeddings', 'w+') as output:\n\t\twith open('labels', 'w+') as op:\n\t\t\tfor word in embeddings.keys():\n\t\t\t\t# output.write(word + ' ==')\n\t\t\t\top.write(word + '\\n')\n\t\t\t\tfor i in embeddings[word]:\n\t\t\t\t\toutput.write(' ' + str(i))\n\t\t\t\toutput.write('\\n')","sub_path":"gsoc2017-akshay/RVA_random.py","file_name":"RVA_random.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"497287938","text":"import os, subprocess, shutil, tempfile\nimport os.path as op\nimport nibabel as nib\nimport numpy as np\nimport pdb\n\n#pdb.set_trace()\n#subprocess.call('source /hd1/scsnl/scripts/face_blur/lin64/bin/maskface_setup.sh',shell=True, executable=\"/bin/bash\")\n\ndef reorient_like(reo_img, ref_img):\n # There is probably a better way, but fslswapdim/reorient2std will not do it\n ref = nib.load(ref_img)\n ref_aff = ref.get_affine()\n ref_ori = nib.orientations.io_orientation(ref_aff)\n reo = nib.load(reo_img)\n reo_ori = nib.orientations.io_orientation(reo.get_affine())\n reo2ref_ori_xfm = nib.orientations.ornt_transform(reo_ori, ref_ori)\n reo_data = nib.orientations.apply_orientation(reo.get_data(), reo2ref_ori_xfm)\n nib.save(nib.Nifti1Image(reo_data, ref_aff), reo_img)\n\ndef mask_face(anat_fullfile):\n temp_dir = tempfile.mkdtemp()\n anat_name = op.basename(anat_fullfile).split('.')[0]\n temp_img = op.join(temp_dir, anat_name + '.img')\n #pdb.set_trace()\n subprocess.call(['fslchfiletype ANALYZE '+ anat_fullfile + ' '+ temp_img],shell=True)\n cwd = os.getcwd()\n os.chdir(temp_dir)\n #pdb.set_trace()\n with open(os.devnull, 'wb') as DEVNULL:\n subprocess.call(['mask_face', anat_name, '-a', '-s', '0.75', '-v', '0', '-m', 'normfilter'],\n stdout = DEVNULL,\n stderr = DEVNULL,\n shell=True\n )\n #pdb.set_trace()\n mask_face_img = op.join(temp_dir, 'maskface', '%s_full_normfilter.img' % anat_name)\n mask_face_nii = op.join(op.dirname(anat_fullfile), anat_name + '_defaced.nii.gz')\n #pdb.set_trace()\n subprocess.call(['fslchfiletype NIFTI_GZ ' + anat_name + ' '+ mask_face_nii],shell=True)\n os.chdir(cwd)\n #shutil.rmtree(temp_dir)\n #reorient_like(mask_face_nii, anat_fullfile)\n return mask_face_nii\n\ndef unmask_brain(raw, defaced):\n anat_name = op.basename(raw).split('.')[0]\n anat_dir = op.dirname(raw)\n raw_nii = nib.load(raw)\n raw_data = raw_nii.get_data().astype(np.float32)\n #pdb.set_trace()\n deface_nii = nib.load(defaced)\n deface_data = deface_nii.get_data().astype(np.float32)\n face_mask = deface_data != raw_data\n # run watershed, get the brain mask, unmask any brain voxels\n stripped = op.join(anat_dir, '%s_watershed.nii.gz' % anat_name)\n # pdb.set_trace()\n #subprocess.call(['/usr/local/freesurfer/bin/mri_watershed ' + raw + ' ' + stripped],shell=True)\n #strip_nii = nib.load(stripped)\n #brain_mask = strip_nii.get_data() > 0\n #mask = op.join(anat_dir, '%s_facemask.nii.gz' % anat_name)\n #eface_data[brain_mask] = raw_data[brain_mask]\n #nib.save(nib.Nifti1Image(face_mask.astype(np.int16), deface_nii.get_affine()), mask)\n #nib.save(nib.Nifti1Image(deface_data, deface_nii.get_affine()), defaced)\n #os.remove(stripped)\n\nraw_anat_file = '/oak/stanford/groups/menon/rawdata/scsnl/100509/visit1/session1/mri/dti/100509_1_1.3T2/dwi_006.nii.gz'\ndefaced_anat_file = mask_face(raw_anat_file) # deface\n#pdb.set_trace()\n#unmask_brain(raw_anat_file, defaced_anat_file) # unblur any voxels mri_watershed thinks are brain\n\n# source freesurfer 6\n# FREESURFER_HOME=...\n# source /hd1/scsnl/scripts/face_blur/lin64/bin/maskface_setup.sh\n","sub_path":"brainImaging/mri/fmri/anonymization/deface/old/deface_gist.py","file_name":"deface_gist.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"559208058","text":"import pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB\r\nfrom sklearn import svm\r\nfrom sklearn.model_selection import GridSearchCV \r\n\r\n\r\n# import warnings filter\r\nfrom warnings import simplefilter\r\n# ignore all future warnings\r\nsimplefilter(action='ignore', category=FutureWarning)\r\n\r\n# Load Dataset\r\ndf = pd.read_csv('spam.csv')\r\n\r\n# Split into training and testing dataset\r\nx = df['EmailText']\r\ny = df['Label']\r\n\r\nx_train, y_train = x[0:4457], y[0:4457]\r\nx_test, y_test = x[4457:], y[4457]\r\n\r\n# Extract features\r\ncv = CountVectorizer()\r\nfeatures = cv.fit_transform(x_train)\r\n\r\n# Build Model\r\ntuned_parameters = {\r\n 'kernel': ['linear', 'rbf'],\r\n 'gamma': [1e-3, 1e-4],\r\n 'C': [1, 10, 100, 1000]\r\n}\r\n\r\nmodel = GridSearchCV(svm.SVC(), tuned_parameters)\r\nmodel.fit(features, y_train)\r\n\r\nprint(model.best_params_)\r\n# Test Accuracy\r\nfeatures_test = cv.transform(x_test)\r\nprint('Accuracy of the model is: ', model.score(features_test, y_test))","sub_path":"spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"628094951","text":"# # Methods for I/O operations on image\n\n# imports\nfrom custom_exceptions import GenericException\n\nfrom common.cv_constants import bgr\n\nimport cv2\n\n\ndef read(filename, mode=bgr):\n \"\"\"\n Return image from image_file. Default Mode is bgr (see constants).\n \"\"\"\n image = cv2.imread(filename, mode)\n if image is None:\n raise GenericException('Image file not found!')\n return image\n\n\ndef show(image, window_name='Image_window', delay=0):\n \"\"\"\n Read image from filename, show it in a window named with window_name.\n - delay is the number if milliseconds to wait key press for\n (default 0 means infinitely).\n \"\"\"\n cv2.imshow(window_name, image)\n cv2.waitKey(delay)\n return\n\n\ndef save(image, image_name, file_format='.jpg'):\n \"\"\"\n Save image to filename. Extension will be JPEG if not specified.\n \"\"\"\n cv2.imwrite(image_name + file_format, image)\n return\n","sub_path":"common/IO_image.py","file_name":"IO_image.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"417730251","text":"import logging\nimport os\nimport threading\nimport time\n\nimport pykka\nimport requests\nfrom mopidy import core\n\nimport netifaces\n\nfrom . import Extension\nfrom .brainz import Brainz\n\nlogger = logging.getLogger(__name__)\n\n\nclass OLEDConfig:\n def __init__(self, config=None):\n self.size = 128\n\nclass OLEDFrontend(pykka.ThreadingActor, core.CoreListener):\n def __init__(self, config, core):\n super().__init__()\n self.core = core\n self.config = config\n self.current_track = None\n self._mode = 1\n self._playlistSize = 0\n self._playlistNum = 0\n self._playlist = None\n\n def on_start(self):\n self.display = OLED(self.config)\n self.display.start()\n self.display.update(volume=self.core.mixer.get_volume().get())\n if \"http\" in self.config:\n ifaces = netifaces.interfaces()\n ifaces.remove(\"lo\")\n\n http = self.config[\"http\"]\n if http.get(\"enabled\", False):\n hostname = http.get(\"hostname\", \"127.0.0.1\")\n port = http.get(\"port\", 6680)\n if hostname in [\"::\", \"0.0.0.0\"]:\n family = (\n netifaces.AF_INET6 if hostname == \"::\" else netifaces.AF_INET\n )\n for iface in ifaces:\n hostname = self.get_ifaddress(iface, family)\n if hostname is not None:\n break\n if hostname is not None:\n self.display.update(\n title=f\"Visit http://{hostname}:{port} to select content.\"\n )\n self.display.update_album_art(art=\"\")\n\n def on_stop(self):\n self.display.stop()\n self.display = None\n\n def get_ifaddress(self, iface, family):\n try:\n return netifaces.ifaddresses(iface)[family][0][\"addr\"]\n except (IndexError, KeyError):\n return None\n\n def mute_changed(self, mute):\n pass\n\n def options_changed(self):\n self.display.update(\n shuffle=self.core.tracklist.get_random(),\n repeat=self.core.tracklist.get_repeat(),\n )\n\n def playlist_changed(self, playlist):\n pass\n\n def playlist_deleted(self, playlist):\n pass\n\n def playlists_loaded(self):\n pass\n\n def seeked(self, time_position):\n self.update_elapsed(time_position)\n\n def stream_title_changed(self, title):\n self.display.update(title=title)\n\n def track_playback_ended(self, tl_track, time_position):\n self.update_elapsed(time_position)\n self.display.update(state=\"pause\")\n\n def track_playback_paused(self, tl_track, time_position):\n self.update_elapsed(time_position)\n self.display.update(state=\"pause\")\n\n def track_playback_resumed(self, tl_track, time_position):\n self.update_elapsed(time_position)\n self.display.update(state=\"play\")\n\n def track_playback_started(self, tl_track):\n self.update_track(tl_track.track, 0)\n self.display.update(state=\"play\")\n\n def update_elapsed(self, time_position):\n self.display.update(elapsed=float(time_position))\n\n def update_track(self, track, time_position=None):\n if track is None:\n track = self.core.playback.get_current_track().get()\n\n title = \"\"\n album = \"\"\n artist = \"\"\n\n if track.name is not None:\n title = track.name\n\n if track.album is not None and track.album.name is not None:\n album = track.album.name\n\n if track.artists is not None:\n artist = \", \".join([artist.name for artist in track.artists])\n\n self.display.update(title=title, album=album, artist=artist)\n\n if time_position is not None:\n length = track.length\n # Default to 60s long and loop the transport bar\n if length is None:\n length = 60\n time_position %= length\n\n self.display.update(elapsed=float(time_position), length=float(length))\n\n def tracklist_changed(self):\n pass\n\n def volume_changed(self, volume):\n if volume is None:\n return\n\n self.display.update(volume=volume)\n\n def playlist_list(self):\n self._playlist = self.core.playlists.as_list().get()\n self._playlistSize = len(self._playlist)\n self._playlistNum = 0\n if self._mode == 0:\n self.display.update2(self._playlist, self._playlistNum)\n\n def playlist_prev(self):\n self._playlistNum = self._playlistNum - 1\n if self._playlistNum < 0:\n self._playlistNum = self._playlistSize - 1\n self.display.update2(self._playlist, self._playlistNum)\n\n def playlist_next(self):\n self._playlistNum = (self._playlistNum + 1) % self._playlistSize\n self.display.update2(self._playlist, self._playlistNum)\n\n def playlist_select(self):\n playlist_items = self.core.playlists.get_items(self._playlist[self._playlistNum].uri).get()\n itemURIs = []\n for item in playlist_items:\n itemURIs.append(item.uri)\n self.core.tracklist.clear()\n self.core.tracklist.add(uris=itemURIs)\n\n def custom_command(self, **kwargs):\n target = kwargs.get(\"target\")\n if target == 'oled':\n self._mode = kwargs.get(\"mode\", self._mode)\n self.display.update(mode=self._mode)\n playlist = kwargs.get(\"playlist\")\n if playlist == \"list\":\n self.playlist_list()\n elif playlist == \"next\":\n self.playlist_next()\n elif playlist == \"prev\":\n self.playlist_prev()\n elif playlist == \"select\":\n self.playlist_select()\n\n\nclass OLED:\n def __init__(self, config):\n self.config = config\n self.cache_dir = Extension.get_data_dir(config)\n self.display_config = OLEDConfig(config[\"oled\"])\n self.display_class = Extension.get_display_types()[\n self.config[\"oled\"][\"display\"]\n ]\n\n self._brainz = Brainz(cache_dir=self.cache_dir)\n self._display = self.display_class(self.display_config)\n self._running = threading.Event()\n self._delay = 1.0 / 30\n self._thread = None\n\n self._mode = 1\n\n self.shuffle = False\n self.repeat = False\n self.state = \"stop\"\n self.volume = 100\n self.progress = 0\n self.elapsed = 0\n self.length = 0\n self.title = \"\"\n self.album = \"\"\n self.artist = \"\"\n self._last_progress_update = time.time()\n self._last_progress_value = 0\n self._last_art = \"\"\n\n def start(self):\n if self._thread is not None:\n return\n\n self._running = threading.Event()\n self._running.set()\n self._thread = threading.Thread(target=self._loop)\n self._thread.start()\n\n def stop(self):\n self._running.clear()\n self._thread.join()\n self._thread = None\n self._display.stop()\n\n def _handle_album_art(self, art):\n pass\n\n def update_album_art(self, art=None):\n pass\n\n def update(self, **kwargs):\n self.shuffle = kwargs.get(\"shuffle\", self.shuffle)\n self.repeat = kwargs.get(\"repeat\", self.repeat)\n self.state = kwargs.get(\"state\", self.state)\n self.volume = kwargs.get(\"volume\", self.volume)\n # self.progress = kwargs.get('progress', self.progress)\n self.elapsed = kwargs.get(\"elapsed\", self.elapsed)\n self.length = kwargs.get(\"length\", self.length)\n self.title = kwargs.get(\"title\", self.title)\n self.album = kwargs.get(\"album\", self.album)\n self.artist = kwargs.get(\"artist\", self.artist)\n self._mode = kwargs.get(\"mode\", self._mode)\n\n if \"elapsed\" in kwargs:\n if \"length\" in kwargs:\n self.progress = float(self.elapsed) / float(self.length)\n self._last_elapsed_update = time.time()\n self._last_elapsed_value = kwargs[\"elapsed\"]\n\n def _loop(self):\n while self._running.is_set():\n if self.state == \"play\":\n t_elapsed_ms = (time.time() - self._last_elapsed_update) * 1000\n self.elapsed = float(self._last_elapsed_value + t_elapsed_ms)\n self.progress = self.elapsed / self.length\n if self._mode == 1:\n self._display.update_overlay(\n self.shuffle,\n self.repeat,\n self.state,\n self.volume,\n self.progress,\n self.elapsed,\n self.title,\n self.album,\n self.artist,\n )\n\n if self._mode == 1:\n self._display.redraw()\n time.sleep(self._delay)\n\n def update2(self, playlist, index):\n self._display.update_playlist(playlist, index)\n","sub_path":"mopidy_oled/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"431194847","text":"# Imports\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom collections import deque\n\n# Environment\nclass Chain:\n\n def __init__ (self):\n self.state = 0\n self.dings = 0\n\n def reset (self):\n self.state = 0\n self.dings = 0\n return 0\n\n def step (self, action):\n\n if action == 1:\n self.state = 0\n return 1\n\n if self.state == 4:\n self.dings += 1\n return 10\n\n self.state += 1\n return 0\n\nenv = Chain ()\n\ndef state_to_array (index):\n z = np.zeros (5)\n z[index] = 1.0\n return z\n\n# Hyperparameters\nnum_episodes = 25\nnum_testepis = 1\nnum_steps = 50\ntau = 10\n\nsize_batch = 100\nsize_memory = 500\n\nlr = 0.8\ngamma = 0.7\n\nmax_eps = 1.0\nmin_eps = 0.01\n\nuse_double = False\ndata_filename = \"DataAllDeepQLearning.csv\"\n\n# Memory\nclass SumTree:\n\n def __init__ (self, capacity):\n self.capacity = capacity\n self.index = 0\n self.data = np.zeros (capacity, dtype=object)\n self.tree = np.zeros (2*capacity - 1)\n # 0\n # ,---'---,\n # 1 2\n # ,-'-, ,-'-,\n # 3 4 5 6\n\n def add (self, priority, data):\n tree_index = self.index + self.capacity - 1\n self.data[self.index] = data\n self.update (tree_index, priority)\n self.index = (self.index + 1) % self.capacity\n\n def update (self, index, priority):\n delta = priority - self.tree[index]\n self.tree[index] = priority\n # cascade up the tree\n while index != 0:\n index = (index - 1)//2\n self.tree[index] += delta\n\n def get_leaf (self, val):\n parent = 0\n while True:\n left = 2*parent + 1\n right = 2*parent + 2\n \n if left >= len (self.tree):\n # if the left child node would exceed the bounds of the tree\n # then the current node is a leaf\n break\n\n if val <= self.tree[left]:\n parent = left\n else:\n val -= self.tree[left]\n parent = right\n\n data = parent - self.capacity + 1\n return parent, self.tree[parent], self.data[data]\n \n @property\n def max_priority (self):\n return np.max (self.tree[-self.capacity:])\n \n @property\n def min_priority (self):\n return np.min (self.tree[-self.capacity:])\n \n @property\n def total_priority (self):\n return self.tree[0]\n\nclass Memory:\n \n def __init__ (self, capacity):\n self.e = 0.01\n self.a = 0.6\n self.b = 0.4\n self.clip = 1.0\n self.tree = SumTree (capacity)\n\n def append (self, experience):\n max_priority = self.tree.max_priority\n if max_priority == 0:\n max_priority = self.clip\n self.tree.add (max_priority, experience)\n\n def sample (self, num):\n batch = []\n b_index = np.empty ((num,), dtype = np.int32)\n b_weight = np.empty ((num,), dtype = np.float32)\n\n priority_segment = self.tree.total_priority / num\n max_weight = num*self.tree.min_priority / self.tree.total_priority\n max_weight = max_weight**(-self.b)\n\n for i in range (num):\n a, b = priority_segment*i, priority_segment*(i + 1)\n value = np.random.uniform (a, b)\n index, priority, data = self.tree.get_leaf (value)\n sampling_probability = priority / self.tree.total_priority\n \n b_weight[i] = (num*sampling_probability)**(-self.b) / max_weight\n b_index[i] = index\n batch.append (data)\n b_weight = np.array (b_weight)\n\n return b_index, batch, b_weight\n\n def batch_update (self, index, err):\n err += self.e\n err = np.minimum (err, self.clip)\n err = np.power (err, self.a)\n for i, e in zip (index, err):\n self.tree.update (i, e)\n\nclass Model (tf.keras.Model):\n\n def __init__ (self):\n super (Model, self).__init__ ()\n l = tf.keras.layers\n self.action_layer = l.Dense (2, input_dim = 5, activation = 'relu')\n self.value_layer = l.Dense (1, input_dim = 5, activation = 'relu')\n self.average_layer = l.Lambda (lambda x: x - tf.reduce_mean (x))\n self.q_layer = l.Add ()\n\n self.compile (optimizer = 'adam',\n loss = 'mean_squared_error',\n metrics = [])\n\n def call (self, x_in):\n x_a = self.action_layer (x_in)\n x_a = self.average_layer (x_a)\n x_v = self.value_layer (x_in)\n return self.q_layer ([x_v, x_a])\n\npolicy_a = Model ()\ntarget_a = Model ()\ntarget_a.set_weights (policy_a.get_weights ())\npolicy_b = Model ()\ntarget_b = Model ()\ntarget_b.set_weights (policy_b.get_weights ())\n\nmemory = Memory (size_memory)\n\n# Populate Memory\n\nfor i in range (size_memory):\n state0 = state_to_array (env.state)\n action = random.randrange (2)\n reward = env.step (action)\n state1 = state_to_array (env.state)\n memory.append ((state0, action, reward, state1))\n\n# Training\n\nscores = []\nfor episode in range (num_episodes):\n eps = max_eps*(min_eps/max_eps)**(episode/num_episodes)\n\n state0 = state_to_array (env.reset ())\n score = 0\n for step in range (num_steps):\n\n # choose action\n action = None\n if random.random () > eps:\n q_vals = policy_a.predict (np.array ([state0]))\n if use_double:\n q_vals = q_vals + policy_b.predict (np.array ([state0]))\n action = np.argmax (q_vals)\n else:\n action = random.randrange (2)\n\n # perform\n reward = env.step (action)\n score += reward\n state1 = state_to_array (env.state)\n memory.append ((state0, action, reward, state1))\n state0 = state1\n\n # create training batch\n b_index, batch, b_weight = memory.sample (size_batch)\n batch = list (zip (*batch))\n b_state0 = np.array (batch[0])\n b_action = np.array (batch[1])\n b_reward = np.array (batch[2])\n b_state1 = np.array (batch[3])\n\n # predict Q-values\n policy = None\n qf_val = None\n a_star = np.empty (size_batch)\n\n if use_double:\n target = None\n othert = None\n if random.random () < 0.5:\n policy = policy_a\n target = target_a\n othert = target_b\n else:\n policy = policy_b\n target = target_b\n othert = target_a\n\n qp_val = target.predict (b_state1)\n qf_val = othert.predict (b_state1)\n a_star = np.argmax (qp_val, axis = 1)\n else:\n policy = policy_a\n target = target_a\n\n qf_val = target.predict (b_state1)\n a_star = np.argmax (qf_val, axis = 1)\n\n b_target = policy.predict (b_state0)\n for i in range (size_batch):\n a = b_action[i]\n a_s = a_star[i]\n r = b_reward[i]\n b_target[i,a] += lr*(r + gamma*qf_val[i,a_s] - b_target[i,a])\n\n # fit values\n policy.fit (b_state0,\n b_target,\n epochs = 10,\n batch_size = size_batch,\n verbose = 0)\n \n # update fixed q-values\n if (step + 1) % tau == 0:\n target_a.set_weights (policy_a.get_weights ())\n target_b.set_weights (policy_b.get_weights ())\n\n # display episode results\n print (\"episode {0:3d}, score {1:3d}, dings {2:2d}, eps {3:6f}\"\n .format (episode, score, env.dings, eps))\n scores.append (score)\n\n# write results to file\nif data_filename:\n with open (data_filename, 'w') as f:\n for i in range (num_episodes):\n f.write (\"{0:d},{1:d}\\n\".format (i, scores[i]))\n\nplt.plot (scores)\nplt.show ()\n\n# Play\nfor episode in range (num_testepis):\n state0 = state_to_array (env.reset ())\n score = 0\n for step in range (num_steps):\n q_vals = policy_a.predict (np.array ([state0]))\n if use_double:\n q_vals = q_vals + policy_b.predict (np.array ([state0]))\n action = np.argmax (q_vals)\n reward = env.step (action)\n score += reward\n state = state_to_array (env.state)\n\n print (\"score\", score)\n\n","sub_path":"Code/RLChain/AllDeepQLearning.py","file_name":"AllDeepQLearning.py","file_ext":"py","file_size_in_byte":8463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"607086571","text":"\n#Function 1: TRANSLATE Reads in a DNA sequence fasta file,\nstandard_code = {\n \"UUU\": \"F\", \"UUC\": \"F\", \"UUA\": \"L\", \"UUG\": \"L\", \"UCU\": \"S\",\n \"UCC\": \"S\", \"UCA\": \"S\", \"UCG\": \"S\", \"UAU\": \"Y\", \"UAC\": \"Y\",\n \"UAA\": \"*\", \"UAG\": \"*\", \"UGA\": \"*\", \"UGU\": \"C\", \"UGC\": \"C\",\n \"UGG\": \"W\", \"CUU\": \"L\", \"CUC\": \"L\", \"CUA\": \"L\", \"CUG\": \"L\",\n \"CCU\": \"P\", \"CCC\": \"P\", \"CCA\": \"P\", \"CCG\": \"P\", \"CAU\": \"H\",\n \"CAC\": \"H\", \"CAA\": \"Q\", \"CAG\": \"Q\", \"CGU\": \"R\", \"CGC\": \"R\",\n \"CGA\": \"R\", \"CGG\": \"R\", \"AUU\": \"I\", \"AUC\": \"I\", \"AUA\": \"I\",\n \"AUG\": \"M\", \"ACU\": \"T\", \"ACC\": \"T\", \"ACA\": \"T\", \"ACG\": \"T\",\n \"AAU\": \"N\", \"AAC\": \"N\", \"AAA\": \"K\", \"AAG\": \"K\", \"AGU\": \"S\",\n \"AGC\": \"S\", \"AGA\": \"R\", \"AGG\": \"R\", \"GUU\": \"V\", \"GUC\": \"V\",\n \"GUA\": \"V\", \"GUG\": \"V\", \"GCU\": \"A\", \"GCC\": \"A\", \"GCA\": \"A\",\n \"GCG\": \"A\", \"GAU\": \"D\", \"GAC\": \"D\", \"GAA\": \"E\", \"GAG\": \"E\",\n \"GGU\": \"G\", \"GGC\": \"G\", \"GGA\": \"G\", \"GGG\": \"G\"}\n\ndef DNA2Prot(f1, f2=\"translated_fasta.txt\"):\n fn=open(f1,'U')\n fout=open(f2,'w')\n for line in fn:\n if \">\" in line:\n fout.write(line)\n pass\n else:\n codons=[]\n i=0\n protein=\"\"\n line=line.replace(\"T\",\"U\")\n line=line.strip()\n for i in range(0,len(line),3):\n codon=line[i:i+3]\n if len(codon)== 3:\n codons.append(codon)\n amino_acid = standard_code[codon]\n protein = protein + amino_acid\n else:\n pass\n fout.write(protein)\n fout.write(\"\\n\")\n\n fn.close()\n fout.close()\n return 1\n \n\n#Function 2: Reads in a fasta file of protein sequences,\n\ndef AAfreq(f1,f2=\"aatable.xls\"):\n fn = open(f1, 'r')\n fout = open(f2, 'w')\n count = 0\n countA = 0\n fseq = fn.readlines()\n fdict = {}\n for line in fseq:\n if \">\" in line:\n fseq.remove(line)\n for line in fseq:\n for line2 in fseq(countA):\n if (line2 != \"\\n\"):\n if line2 not in fdict:\n fdict[line2] = 1\n else:\n fdict[line2] += 1\n countA += 1\n if (count == 0):\n fout.write(\"AminoAcid\\t\")\n for line in fseq:\n fout.write(str(line) + \"\\t\")\n fout.write(\"\\n\")\n fout.write(\"Seq\" +str(count) + \"\\t\")\n fout.write(\"\\n\")\n count += 1\n fout.close()\n\n return f2\n\n#Function 3: Reads in a fasta file and searches for a set of motifs.\n\nimport re\n\ndef MotifFinder(f1, motif, f2=\"motifs.xls\"):\n fn = open(f1, 'r')\n fout = open(f2, 'w')\n count = 1\n fseq = fn.readlines()\n for line in fseq:\n if \">\" in line:\n fseq.remove(line)\n fout.write(\"SeqName\" + \"\\t\" + \"M1\" + \"\\t\" + \"Hits\" + \"\\t\" + \"M2\" + \"\\t\" + \"Hits\"\"\\n\")\n for line in fseq:\n fout.write(\"Seq\" +str(count))\n count += 1\n for line2 in motif:\n motifs=re.findall(line2,line)\n fout.write(\"\\t\")\n fout.write(line2)\n fout.write(\"\\t\")\n fout.write(str(len(motifs)))\n fout.write(\"\\n\")\n fout.close()\n return f2\n","sub_path":"PythonProject.py","file_name":"PythonProject.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"232314063","text":"import colorama\nimport socket\nfrom typing import List\n\nimport paramiko.client\nfrom .worker_pool import WorkerPoolManager\n\n\nclass WorkerPoolSSHConnection:\n def __init__(self, manager: WorkerPoolManager):\n self.manager = manager\n\n def connect(self):\n instances = self.manager.list_worker_instances()\n addrs = [_get_external_ip(instance) for instance in instances]\n hosts = {ip: instance[\"name\"] for ip, instance in zip(addrs, instances)}\n clients = {}\n for ip, name in hosts.items():\n client = paramiko.client.SSHClient()\n client.set_missing_host_key_policy(_IgnoreMissingHostKeys)\n client.connect(hostname=ip)\n clients[name] = client\n return clients\n\n def stream_logs(self, colorize: bool = True):\n clients = self.connect()\n # First, establish connections to the workers.\n channels = {}\n for name, client in clients.items():\n transport = client.get_transport()\n channel = transport.open_session(timeout=1)\n channel.get_pty()\n channel.exec_command(\"journalctl -o cat -f -u thor-worker.service\")\n channel.settimeout(0.05)\n stdout = channel.makefile(\"r\", 4096)\n channels[name] = stdout\n\n # Set up pretty colors, if requested.\n if colorize:\n printer = ColorizedPrinter(list(clients.keys()))\n else:\n printer = PlainPrinter()\n\n # Loop over the open connections, printing output as we get it.\n while True:\n # Keep track of any connections that appear to be closed. We should\n # remove them from the list that we loop over.\n closed = set()\n\n for instance, stdout in channels.items():\n # If any channel greedily emits at least 1024 lines, then pause\n # and move on to other connections to give them a chance to spam\n # us too.\n i = 0\n while i < 1024:\n try:\n line = stdout.readline()[:-1]\n i += 1\n printer.print(instance, line)\n except socket.timeout:\n # Wait for more input - exit the loop.\n break\n except OSError:\n # Closed - exit the loop.\n closed.add(instance)\n break\n\n # Clean up and close channels to any commands that exited.\n for closed_instance in closed:\n client = clients[closed_instance]\n client.close()\n clients.pop(closed_instance)\n\n if len(clients) == 0:\n return\n\n\nclass ColorizedPrinter:\n def __init__(self, hosts: List[str]):\n colors = [\n colorama.Fore.GREEN,\n colorama.Fore.YELLOW,\n colorama.Fore.CYAN,\n colorama.Style.BRIGHT + colorama.Fore.BLUE,\n colorama.Style.DIM + colorama.Fore.YELLOW,\n colorama.Style.DIM + colorama.Fore.RED,\n colorama.Style.DIM + colorama.Fore.GREEN,\n colorama.Fore.RED,\n colorama.Style.BRIGHT + colorama.Fore.GREEN,\n colorama.Fore.WHITE,\n colorama.Fore.MAGENTA,\n ]\n self.colors_by_name = {}\n for i, name in enumerate(hosts):\n color = colors[i % len(colors)]\n self.colors_by_name[name] = color\n\n def print(self, hostname, message):\n color = self.colors_by_name.get(hostname, \"\")\n reset = colorama.Style.RESET_ALL\n print(f\"{color}{hostname}{reset}: {message}\")\n\n\nclass PlainPrinter:\n def __init__(self):\n pass\n\n def print(self, hostname, message):\n print(f\"{hostname}: {message}\")\n\n\ndef _get_external_ip(instance_description: dict) -> str:\n networks = instance_description.get(\"networkInterfaces\", [])\n for net in networks:\n access_configs = net.get(\"accessConfigs\", [])\n for ac in access_configs:\n if ac.get(\"natIP\", None) is not None:\n return ac[\"natIP\"]\n raise ValueError(\n f\"no external IP address found for instance {instance_description['name']}\"\n )\n\n\nclass _IgnoreMissingHostKeys(paramiko.client.MissingHostKeyPolicy):\n def missing_host_key(self, client, hostname, key):\n return\n","sub_path":"thorctl/sshconn.py","file_name":"sshconn.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"636272900","text":"import binascii\n\ndef gcd(x, y):\n while y:\n x, y = y, x%y\n return x\n\ndef lcm(x, y):\n return x*y //gcd(x, y)\n\ndef exgcd(x, y):\n c0, c1 = x, y\n a0, a1 = 1, 0\n b0, b1 = 0, 1\n \n while c1 != 0:\n m = c0 % c1\n q = c0 // c1\n \n c0, c1 = c1, m\n a0, a1 = a1, (a0 - q * a1)\n b0, b1 = b1, (b0 - q * b1)\n \n return c0, a0, b0\n\ndef modinv(x, n):\n return x % n\n\ndef main():\n p=16764777456439012943\n q=14588483238879488297\n\n N = p*q\n e = 65537\n\n lam = lcm(p-1, q-1)\n\n c, a, b = exgcd(e, lam)\n\n a = modinv(a, lam)\n\n with open('flag.enc', 'r') as f:\n crypt = f.read()\n crypt = encode('hex')\n crypt = int(crypt('hex'), 16)\n\n ans = pow(crypt, a, N)\n\n ans = hex(ans)[2:]\n \n print(binascii.a2b_hex(ans).decode('utf-8'))\n\n\n# print(binascii.a2b_hex(ans).decode('utf-8'))\n\nif __name__ == '__main__':\n main()\n","sub_path":"crypro4b_3rd_2017/easy/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"412404515","text":"import scipy.optimize\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict, Counter\nfrom lower import Lower, Lower2, donnel\n\nR = 10\nSTEPS = 100\neps = 1e-8\n\ndef L(wx,wy,w,t1,t2):\n if w == min(wx,wy):\n l1 = (t1-t2)*(1-wx) / ((1-t1)*(wx-wy))\n l2 = t2*(wx-wy) / ((t1-t2)*wy)\n v = (1-t1)/(1-wx)\n return t1*np.log(l1) + t2*np.log(l2) + np.log(v)\n #if not wx', facecolor='black', connectionstyle='arc3,rad=0')\n )\n\ndef corollary1(wx,wy,w1,w2, c):\n # t1=1-wy, t2=1-wx\n l1, l2 = L(wx,wy,w1,1-wy,1-wx), L(wx,wy,w2,1-wy,1-wx)\n pq = (l1-di(1-wy,wx))/(l2-di(1-wy,wx))\n pu = (l1-di(1-wx,wy))/(l2-di(1-wy,wx))\n md = min(di(1-wy,wx), di(1-wx,wy))\n sr = (l1-md)/(l2-md)\n plt.scatter([pq],[pu], marker='o', c=c)\n #plt.scatter([sl],[sr], marker='o', c=c)\n plt.annotate(\n 'Corollary 1', xy=(pq, pu),\n xytext=(-50, -20),\n textcoords='offset points', ha='left', va='bottom',\n arrowprops=dict(arrowstyle = '-|>', facecolor='black', connectionstyle='arc3,rad=0')\n )\n\n\ndef chosenpath(wx,wy,w1,w2,c):\n # Chosen Path\n r = np.log(w1/max(wx,wy))/np.log(w2/max(wx,wy))\n plt.scatter([r],[r], marker='o', color=c)\n plt.annotate(\n 'Chosen Path', xy=(r, r),\n xytext=(30, -10),\n textcoords='offset points', ha='left', va='bottom',\n arrowprops=dict(arrowstyle = '-|>', facecolor='black', connectionstyle='arc3,rad=0')\n )\n\ndef bitsampling(wx,wy,w1,w2,c):\n rq = ru = np.log(1-wx-wy+2*w1)/np.log(1-wx-wy+2*w2)\n plt.scatter([rq],[ru], marker='o', color=c)\n plt.annotate(\n 'Bit Sampling', xy=(rq, ru),\n xytext=(30, -10),\n textcoords='offset points', ha='left', va='bottom',\n arrowprops=dict(arrowstyle = '-|>', facecolor='black', connectionstyle='arc3,rad=0')\n )\n\ndef spherical(wx,wy,w1,w2,c):\n a = (w1-wx*wy)/(wx*(1-wx)*wy*(1-wy))**.5\n b = (w2-wx*wy)/(wx*(1-wx)*wy*(1-wy))**.5\n # Tradeoff\n def spherical(l):\n return (1-a**(1+l))**2/(1-a**2) / ( (1-a**l*b)**2/(1-b**2)) ,\\\n (a**l - a)**2/(1-a**2) / ( (1-a**l*b)**2/(1-b**2))\n xy = [spherical(l) for l in np.linspace(-1,1)]\n x, y = zip(*xy)\n plt.plot(x, y, '--', label='Spherical Trade-off', color=c)\n # Make an extra dot on the LSH-Regime?\n # mx, my = spherical(0)\n #plt.scatter([x[0], mx, x[-1]], [y[0], my, y[-1]], marker='o', color=c)\n plt.scatter([x[0], x[-1]], [y[0], y[-1]], marker='o', color=c)\n return x[-1]\n\ndef cp_balance(wx,wy,w1,w2,c):\n if np.sign(np.log(w2/wx)*np.log(1+(-w1+wx)/(-1+wy))\n - np.log(w1/wy)*np.log((-1-w2+wx+wy)/(-1+wx))) \\\n == np.sign(np.log(w2/wx)*np.log((-1 - w1 + wx + wy)/(-1 + wx))\n - np.log(w1/wx)*np.log((-1 - w2 + wx + wy)/(-1 + wx))):\n # No trade-off if both curves are monotone in the same direction\n return\n def f(k):\n n = (k-1)*np.log((1-wx-wy+w2)/(1-wx)) + np.log(w2/wx)\n return ((k-1)*np.log((1-wx-wy+w1)/(1-wx)) + np.log(w1/wx))/n, \\\n ((k-1)*np.log((1-wx-wy+w1)/(1-wy)) + np.log(w1/wy))/n\n xy = [f(k**2) for k in range(1,100)]\n x, y = zip(*xy)\n plt.plot(x, y, '--', label='CP Trade-off', color=c)\n\ndef hyperplane(wx,wy,w1,w2,srq,c):\n # Hyper plane\n def hyper():\n a = (w1-wx*wy)/(wx*(1-wx)*wy*(1-wy))**.5\n b = (w2-wx*wy)/(wx*(1-wx)*wy*(1-wy))**.5\n return np.log(1-np.arccos(a)/np.pi)/np.log(1-np.arccos(b)/np.pi)\n hyper = hyper()\n if hyper < srq:\n plt.scatter([hyper],[hyper], marker='o', color=c)\n plt.annotate(\n 'Hyperplane', xy=(hyper, hyper),\n xytext=(-20, 20),\n textcoords='offset points', ha='right', va='bottom',\n arrowprops=dict(arrowstyle = '-|>', facecolor='black', connectionstyle='arc3,rad=0')\n )\n else:\n hyper = srq\n\n # LSH Area\n plt.plot(np.linspace(0,max(srq,hyper)), np.linspace(0,max(srq,hyper)), ':', label='LSH Regime', color=c, zorder=-1)\n\ndef t1wx_t2wy(wx,wy,w1,w2):\n rq = ((w1-wx*wy)**2*(w2**2-2*w2*wx*wy+wx*wy*(-1+wx+wy)))/((w2-wx*wy)**2*(w1**2-2*w1*wx*wy+wx*wy*(-1+wx+wy)))\n ru = ((w1-wx*wy)**2*(w2**2-2*w2*wx*wy+wx*wy*(-1+wx+wy)))/((-1+wx)*wx*(-1+wy)*wy*(w1**2-2*w1*wx*wy+wx*wy*(-1+wx+wy)))\n plt.scatter([rq],[ru], label='t1wy t2wy')\n\n\ndef theorem(wx,wy,w1,w2,c):\n upper = Upper(wx,wy,w1,w2)\n (max_rq, max_ru), _ = upper.endpoints()\n plt.scatter([max_rq, 0], [0, max_ru], marker='o', c=c)\n\n x, y = zip(*upper.calc())\n plt.plot(x, y, label='General Upper Bound', c=c)\n return upper\n\n\ndef main():\n plt.style.use('seaborn-whitegrid')\n cs = plt.rcParams['axes.prop_cycle'].by_key()['color']\n cs[0],cs[1] = cs[1], cs[0]\n\n matplotlib.rcParams['mathtext.fontset'] = 'cm'\n # Pyplot will warn about this, but we need it for tex\n matplotlib.rcParams['font.family'] = 'cmu serif'\n # This is nice in pyplot, but messes things up in tex\n #matplotlib.rcParams['font.family'] = 'STIXGeneral'\n matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'\n matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'\n matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'\n\n #wx, wy, w1 = 0.03, 0.025, 0.024; w2 = wx*wy\n #wx, wy, w1 = 0.3, 0.2, 0.1; w2 = wx*wy\n #wx, wy, w1, w2 = 0.333, 0.333, .3, 0.033\n #wx = np.random.uniform(0,.3)\n #wy = wx\n wx = round(np.random.uniform(0,.3), 3)\n wy = round(np.random.uniform(0,wx), 3)\n w1 = round(np.random.uniform(wx*wy,min(wx,wy)), 3)\n #w1 = min(wx,wy)\n w2 = round(np.random.uniform(wx*wy,w1),3)\n #w2 = wx*wy\n #wx, wy, w1, w2 = 0.49, 0.49, 0.267, 0.0925\n\n # Subset plots for Rasmus\n #wx, wy, w1 = .002, .003, .00199; w2 = wx*wy\n #wx, wy, w1 = .02, .03, .0199; w2 = wx*wy\n #wx, wy, w1 = .01, .04, .0099; w2 = wx*wy\n #wx, wy, w1 = .1, .4, .099; w2 = wx*wy\n #wx, wy, w1 = .001, .004, .00099; w2 = wx*wy\n wx, wy, w1 = .01, .03, .0099; w2 = wx*wy\n\n # Nice other plots to check\n #wx,wy,w1,w2= 0.3789422425179251, 0.35844786217710467 ,0.2739354568368429 ,0.04288314539608652\n #wx,wy,w1,w2= 0.03276672262651542, 0.02740135136984209, 0.021632606352215717, 0.0011691647201981042\n\n\n\n fig = plt.figure(figsize=(3.5, 4), frameon=True)\n print(wx,wy,w1,w2)\n #plt.title(f'$w_x={wx}, w_y={wy}, w_1={w1}, w_2={w2}$')\n #ax = fig.add_subplot(111)\n plt.xlabel(r'$\\rho_q$')\n plt.ylabel(r'$\\rho_u$')\n #ax.set_xlabel(r'$\\rho_q$')\n #ax.set_ylabel(r'$\\rho_u$')\n #ax.margins(0, 0)\n plt.subplots_adjust(left=.225, right=.9, bottom=.125, top=.9, wspace=.2, hspace=.2)\n\n\n\n\n minhash(wx,wy,w1,w2, cs[1])\n corollary1(wx,wy,w1,w2, cs[0])\n chosenpath(wx,wy,w1,w2, cs[1])\n #bitsampling(wx,wy,w1,w2, cs[1])\n srq = spherical(wx,wy,w1,w2, cs[2])\n #cp_balance(wx,wy,w1,w2, cs[5])\n hyperplane(wx,wy,w1,w2, srq, cs[1])\n #t1wx_t2wy(wx,wy,w1,w2)\n upper = theorem(wx,wy,w1,w2, cs[0])\n #donnel(wx,wy,w1,w2,cs[5])\n\n if w2 == wx*wy:\n x, y = Lower(wx,wy,w1).calc()\n plt.plot(x, y, '-.', label='Lower bound 1', color=cs[3])\n pts, _ = Lower(wx,wy,w1).endpoints()\n #plt.scatter(*zip(*pts), color=cs[3])\n\n pts = Lower2(wx,wy,w1).calc()\n plt.plot(*zip(*pts), '-.', label='Lower bound 2 (Conj.)', color=cs[4])\n\n\n plt.legend(loc=1)\n\n name = f'fig_wx{str(wx).replace(\".\",\"_\")}.pgf'\n print(f'Saving as {name}')\n plt.savefig(name,\n bbox_inches='tight'\n #, bbox_inches =None, pad_inches = 1, transparent=True, frameon=True\n )\n plt.show()\n\n\n # T path\n x, y = zip(*[t for t,_ in upper.tpath])\n cmap = matplotlib.cm.rainbow(np.linspace(0.0, 1.0, len(x)))\n plt.scatter(x, y, color=cmap)\n #print(upper.tpath)\n _, (x, y) = min((abs(rq-ru),t) for t,(rq,ru) in upper.tpath)\n plt.scatter([x], [y], color=cs[0])\n #plt.plot(x, y)\n #plt.scatter([x[0],x[-1]],[y[0],y[-1]])\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"thesis/parts/supermajority/nbs/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"169711401","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport FinanceDataReader as fdr\nimport datetime\nfrom dateutil.relativedelta import relativedelta # 몇달 전, 몇달 후, 몇년 전, 몇년 후 를 구하고 싶다면 relativedelta\nfrom pypfopt.efficient_frontier import EfficientFrontier\nfrom pypfopt import risk_models\nfrom pypfopt import expected_returns\nfrom pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\nimport warnings\nwarnings.filterwarnings(action='ignore')\n\n\n# 하루 상승빈도\n# up_down_zero_df = st.get_up_down_zero_df()\n# up_down_zero_df.to_csv(\"up_down_zero_df.csv\")\nup_down_zero_df = pd.read_csv(\"up_down_zero_df.csv\")\nup_down_zero_df.index = up_down_zero_df['Unnamed: 0']\nup_down_zero_df = up_down_zero_df.drop('Unnamed: 0', axis=1)\nidx_list = up_down_zero_df.index[:30]\nsymbol_udz = [] # 종목 코드만 가져오기\nfor i in idx_list:\n symbol_udz.append(up_down_zero_df.loc[i][0])\n\ndef Updown_sharpe():\n # 종목 이름 및 코드\n kospi_temp = fdr.StockListing('KOSPI')[['Symbol', 'Name']]\n kosdaq_temp = fdr.StockListing('KOSDAQ')[['Symbol', 'Name']]\n code_name_dict = pd.concat([kospi_temp, kosdaq_temp])\n code_name_dict = code_name_dict.set_index('Symbol').to_dict().get('Name') # {'095570': 'AJ네트웍스',\n\n assets = np.array(symbol_udz, dtype='object')\n start_date = datetime.datetime.today() - relativedelta(years=3)\n start_date = start_date.strftime('%Y%m%d')\n # start_date = '2018-08-13'\n today = datetime.datetime.today().strftime(\"%Y%m%d\")\n end_date = today\n df = pd.DataFrame()\n\n for s in assets:\n df[s] = fdr.DataReader(s, start_date, end_date)['Close']\n\n # drop null\n dfnull = df.dropna(axis=1)\n\n # 수익률의 공분산\n mu = expected_returns.mean_historical_return(dfnull)\n S = risk_models.sample_cov(dfnull)\n # print(plotting.plot_covariance(S))\n\n # 포폴 최적화 (Max sharp ratio) - 급등주\n ef = EfficientFrontier(mu, S, solver=\"SCS\")\n weights = ef.max_sharpe()\n cleaned_weights = ef.clean_weights()\n print(ef.portfolio_performance(verbose=True))\n\n one_million = 1000000\n portfolio_val = 15 * one_million\n latest_prices = get_latest_prices(dfnull)\n weights = cleaned_weights\n da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)\n allocation, leftover = da.lp_portfolio(verbose=False)\n rmse = da._allocation_rmse_error(verbose=False)\n\n # 각 종목별 실제 투자 금액\n inv_total_price = {}\n for i in allocation.keys():\n inv_total_price[i] = latest_prices.loc[i] * allocation[i]\n inv_total_price\n\n # 총 투자금액\n investment = 0\n for i in inv_total_price.values():\n investment += i\n print(investment)\n\n # 각 종목별 실제 투자 비중\n inv_total_weight = {}\n for i in allocation.keys():\n inv_total_weight[i] = inv_total_price[i] / investment\n inv_total_weight\n\n # 투자비중의 합계\n investment_w = 0\n for i in inv_total_weight.values():\n investment_w += i\n print(investment_w)\n\n # 결과값으로 불러올 값을 리스트로 저장\n name_list = [] # 종목명(회사이름)\n total_price_stock = [] # 각 종목별 실제 투자 금액\n total_weight_stock = [] # 각 종목별 실제 투자 비중\n for i in allocation.keys(): # i = 포트폴리오에 할당된 종목의 종목코드\n name_list.append(code_name_dict.get(i))\n total_price_stock.append(inv_total_price.get(i))\n total_weight_stock.append(inv_total_weight.get(i))\n\n # Get the discrete allocation values\n discrete_allocation_list = []\n for symbol in allocation:\n discrete_allocation_list.append(allocation.get(symbol))\n print(discrete_allocation_list)\n\n portfolio_df = pd.DataFrame(columns=['종목명', '종목코드', '수량(주)', '투자금액(원)', '투자비중'])\n portfolio_df['종목명'] = name_list\n portfolio_df['종목코드'] = allocation\n portfolio_df['수량(주)'] = discrete_allocation_list\n portfolio_df['투자금액(원)'] = total_price_stock\n portfolio_df['투자비중'] = total_weight_stock\n portfolio_df_sorted = portfolio_df.sort_values('투자비중', ascending=False)\n portfolio_df_sorted = portfolio_df_sorted.reset_index(drop=True)\n # 투��� 금액에 따라 최적화된 포트폴리오 종목별 수량\n portfolio_df_sorted.loc[\"합계\", 2:] = portfolio_df_sorted.sum()\n\n ################# 코스피랑 비교 ####################\n # 각 일자별, 종목별 종가에 해당 weights를 곱해주기\n for i, weight in cleaned_weights.items():\n dfnull[i] = dfnull[i] * weight\n\n # 일자별 종목의 (종가*비중) 합계를 Port열에 저장\n dfnull['Port'] = dfnull.sum(axis=1)\n\n # 일자별 종가의 전일대비 변동률(수익률)을 portfolio라는 데이터프레임으로 저장\n portfolio = dfnull[['Port']].pct_change()\n\n # 코스피지수 불러오기\n kospi = fdr.DataReader('KS11', start_date, end_date)[['Close']]\n\n # 코스피지수의 변동률(수익률) 구하기\n # 변동률(수익률) = (당일가격-전일가격) / 전일가격\n # 7/20의 변동률(수익률) = (7/20 가격-7-19 가격) / 7/19 가격\n kospi_pct = kospi.pct_change()\n\n # 코스피와 포트폴리오 합치기\n result = kospi_pct.join(portfolio)\n\n # 1열을 0으로 (Nan 값을 0으로)\n result.iloc[0] = 0\n\n # 열 이름 변경\n result.columns = ['KOSPI', 'PORTFOLIO']\n\n # 1에서 시작해서, 전일대비 변동률(수익률)을 적용하여 수치화하기\n wealth = (1 + result).cumprod()\n\n # 포트폴리오와 KOSPI 지수의 '누적 수익률 추이'를 시각화하여 비교\n\n # matplotlib.pyplot 스타일시트 설정\n plt.style.use('fivethirtyeight')\n\n plt.figure(figsize=(18, 5))\n plt.plot(wealth.index, wealth.KOSPI, 'r', label='KOSPI')\n plt.plot(wealth.index, wealth.PORTFOLIO, 'b', label=\"PORTFOLIO(Up.Down.Zero)\")\n plt.grid(True)\n plt.title('Return Trend')\n plt.xlabel('Date', fontsize=18, labelpad=7)\n plt.ylabel('Return', fontsize=18, labelpad=7)\n plt.legend(loc='best')\n plt.savefig('Updown_sharpe_return.png', dpi=100)\n plt.show()\n\n # 변동률 비교\n plt.figure(figsize=(18, 10))\n\n plt.subplot(2, 1, 1)\n plt.title('Volatility Trend')\n\n plt.plot(result.index, result.KOSPI, 'r', label='KOSPI')\n plt.yticks([-0.15, -0.10, -0.05, 0.00, 0.05, 0.10, 0.15])\n plt.grid(True)\n plt.ylabel('Volatility', fontsize=18, labelpad=7)\n plt.legend(loc='best')\n\n plt.subplot(2, 1, 2)\n plt.title('Volatility Trend')\n plt.plot(result.index, result.PORTFOLIO, 'b', label=\"PORTFOLIO(Up.Down.Zero)\")\n plt.yticks([-0.15, -0.10, -0.05, 0.00, 0.05, 0.10, 0.15])\n plt.ylabel('Volatility', fontsize=18, labelpad=7)\n plt.legend(loc='best')\n\n plt.grid(True)\n plt.savefig('Updown_sharpe_votality.png', dpi=100)\n plt.show()\n\n # print(portfolio_df_sorted) # 데이터 프레임 출력시 시간이 걸림.\n\n print('----- Up.Down.Zero portfolio performance -----')\n # Show Funds Remaining\n print('Funds Remaining: ', leftover, ' KRW')\n\n # Show Portfolio performance\n print(ef.portfolio_performance(verbose=True))","sub_path":"CSV/def_Updown_sharpe.py","file_name":"def_Updown_sharpe.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"104659301","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport time\nimport pprint\nimport itertools\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.decomposition import PCA, FastICA, TruncatedSVD\nfrom sklearn.random_projection import GaussianRandomProjection\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.neural_network import MLPClassifier\n#from sklearn.model_selection import cross_val_score\n\ndef plot_stuff(cluster_counts, k_means, expect_max, name):\n font = { 'family': 'Arial', 'size': 18 }\n plt.rc('font', **font)\n plt.plot(cluster_counts, k_means, label='MyKMeans')\n plt.plot(cluster_counts, expect_max, label='MyExpectMax')\n plt.ylabel('Average Silhouette', fontsize=18, fontname='Arial')\n plt.xlabel('Clusters', fontsize=18, fontname='Arial')\n plt.tight_layout()\n plt.legend()\n plt.savefig(f\"{name}.png\")\n# plt.show()\n\ndef lowest_label_error(labels1, labels2):\n # Get arrays of unique labels.\n unique_labels1 = np.unique(labels1)\n unique_labels2 = np.unique(labels2)\n\n # Check that the two inputs have the same number of unique labels.\n n_labels = unique_labels1.shape[0]\n if n_labels is not unique_labels2.shape[0]:\n print('unique_labels1', unique_labels1)\n print('unique_labels2', unique_labels2)\n raise Exception('oh no! labels are not the same length!')\n\n # Init empty masks.\n masks1 = np.zeros((n_labels, labels1.shape[0]), dtype=bool)\n masks2 = np.copy(masks1)\n\n # Create an array for each unique value and add it to a mask.\n for i in range(n_labels):\n masks1[i] = np.array([label == unique_labels1[i] for label in labels1])\n for i in range(n_labels):\n masks2[i] = np.array([label == unique_labels2[i] for label in labels2])\n\n # Find the lowest error between mask1 and every permutation of mask2.\n lowest_error = np.inf\n for masks2_perm in itertools.permutations(masks2):\n masks2_perm = np.array(masks2_perm)\n error = np.count_nonzero(masks1 != masks2_perm)\n if error < lowest_error:\n lowest_error = error\n\n # Return the error percentage.\n return lowest_error / masks1.size\n\nRS = 11\n\n# Wine Quality\nname = 'wq'\ntarget = 'quality'\ntrain = pd.read_csv(f'wine_train.csv')\ntest = pd.read_csv(f'wine_test.csv')\n\ny_train = train.loc[:,target]\nX_train = train.drop(target, axis=1)\ny_test = test.loc[:,target]\nX_test = test.drop(target, axis=1)\n\ntransformers = [\n None,\n PCA(n_components=1, random_state=RS),\n FastICA(random_state=RS),\n GaussianRandomProjection(random_state=RS, n_components=9),\n TruncatedSVD(n_components=1, random_state=RS)\n]\n\nnp.random.seed(RS)\nrandom_states = np.random.choice(range(1000), size=5, replace=False)\ntotal_start_time = time.time()\nmetrics = {\n 'None': {},\n 'PCA': {},\n 'FastICA': {},\n 'GaussianRandomProjection': {},\n 'TruncatedSVD': {}\n}\n\nfor transformer in transformers:\n # Transform the data (or not).\n if transformer is None:\n key = 'None'\n X_train_new = np.copy(X_train)\n X_test_new = np.copy(X_test)\n else:\n key = transformer.__class__.__name__\n transform_start_time = time.time()\n transformer.fit(X_train)\n X_train_new = transformer.transform(X_train)\n X_test_new = transformer.transform(X_test)\n metrics[key]['transform_time'] = round(time.time() - transform_start_time, 3)\n print(key)\n\n try:\n # Perform a grid seach to find the best hyper parameters.\n grid = {\n 'random_state': [RS],\n 'hidden_layer_sizes': [(5,), (10,), (50,), (100,)],\n 'alpha': [0.0001, 0.0002, 0.0003, 0.0004],\n 'learning_rate_init': [1e-4, 1e-3, 1e-2],\n 'max_iter': [500]\n }\n clf = GridSearchCV(MLPClassifier(), grid, n_jobs=-1, cv=5)\n grid_start_time = time.time()\n clf.fit(X_train_new, y_train)\n metrics[key]['grid_time'] = round(time.time() - grid_start_time, 3)\n metrics[key]['best_params'] = clf.best_params_\n\n # Define new classifier based on best hyperparamters\n clf = MLPClassifier(**clf.best_params_)\n\n # Train the new classifier on all training data.\n train_start_time = time.time()\n clf.fit(X_train_new, y_train)\n metrics[key]['iters'] = clf.n_iter_\n metrics[key]['train_time'] = round(time.time() - train_start_time, 3)\n\n # Calculate the final scores.\n metrics[key]['final_train_score'] = clf.score(X_train_new, y_train)\n metrics[key]['final_test_score'] = clf.score(X_test_new, y_test)\n except Exception as e:\n print('EXCEPTION!')\n print(e)\n\npprint.PrettyPrinter(indent=4).pprint(metrics)\nprint('total_time:', time.time() - total_start_time)\n","sub_path":"a3/nn_dim_reduce.py","file_name":"nn_dim_reduce.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"365840672","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.3'\n# jupytext_version: 0.8.6\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\nimport warnings\n\nimport pandas as pd\nimport numpy as np\n\nimport scipy.stats as st\n\nimport iqplot\n\nimport bebi103\n\nimport bokeh.io\nbokeh.io.output_notebook()\nimport holoviews as hv\nhv.extension('bokeh')\nimport panel as pn\n\nimport tidy_data\n\ndata_path = \"../data/\"\nrg = np.random.default_rng()\n\n# +\n##########\n# Don't comment out!!1!!\n##########\n\ndf = tidy_data.tidy_concentrations()\n\n#made ndarrays of various concentrations\nconcentrations = df.concentration.unique()\n\n#make ndarrays of catastrophe times for different concentrations\n#catastrophe_times[0] is for the lowest concentration, while cat_times[4] is for the highest concentration\ncatastrophe_times = np.array([\n df.loc[df['concentration'] == concent, 'catastrophe time'] for concent in df.concentration.unique()\n])\n\n# +\ndef plot_overlaid_ecdfs(alpha, time, concentration):\n \"\"\"\n ecdfs of catastrophe times,\n colored by concentration\n also includes gamma distribution overlaid\n Output:\n bokeh figure object\n \"\"\"\n if concentration != 'all':\n sub_df = df.loc[df['concentration_int'] == concentration]\n else:\n sub_df = df\n \n #plot actual data\n p = iqplot.ecdf(\n data = sub_df,\n q = 'catastrophe time',\n cats = 'concentration',\n marker_kwargs=dict(line_width=0.3, alpha = 0.6),\n show_legend = True,\n palette=bokeh.palettes.Magma8[1:-2][::-1],\n tooltips=[\n ('concentration', '@{concentration}'),\n ('catastrophe time', '@{catastrophe time}')\n ],\n )\n p.xaxis.axis_label = \"catastrophe times (s)\"\n \n #get points to plot line\n x = np.linspace(0, 2000)\n y = st.gamma.cdf(x, alpha, scale=time)\n \n #overlay ecdf, can be scaled by widgets\n p.line(\n x = x,\n y = y,\n color = 'yellowgreen',\n width = 3\n )\n \n p.title.text = 'ECDF of catastrophe times by concentration'\n return p\n\n\n# #uncomment to show\n# p = plot_exploratory_ecdfs()\n# bokeh.io.show(p)\n\n# +\nalpha_slider = pn.widgets.FloatSlider(\n name='alpha',\n start=1.8,\n end=4.2,\n step=0.1,\n value=2.4075\n)\n\ntime_slider = pn.widgets.FloatSlider(\n name='average interarrival time (s)',\n start=75,\n end=215,\n step=10,\n value=1 / 0.005462\n)\n\nconcentration_slider = pn.widgets.Select(\n name='concentration (uM)',\n options=[7, 9, 10, 12, 14, 'all',],\n value = 'all',\n)\n# -\n\n@pn.depends(\n alpha=alpha_slider.param.value, \n time=time_slider.param.value,\n concentration = concentration_slider.param.value\n)\ndef plot_overlaid_ecdfs_pn(alpha, time, concentration):\n return plot_overlaid_ecdfs(alpha, time, concentration)\n\nwidgets = pn.Column(pn.Spacer(width=30), alpha_slider, time_slider, concentration_slider, width=500)\npanel = pn.Column(plot_overlaid_ecdfs_pn, widgets)\npanel.save('interactive', embed = True, max_opts = 40)\n\ndef main():\n p = plot_overlaid_ecdfs(2, 1 / .005, 'all')\n bokeh.io.show(p)\n\n \n \n return True\nif __name__ == '__main__': main()\n\n\n# +\n#!jupytext --to python viz_dashboard.ipynb\n","sub_path":"sandbox_code/viz_dashboard.py","file_name":"viz_dashboard.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"576607255","text":"\n\n#calss header\nclass _TONIC():\n\tdef __init__(self,): \n\t\tself.name = \"TONIC\"\n\t\tself.definitions = [u'a liquid medicine that has the general effect of making you feel better rather than treating a particular health problem that you might have', u'something that makes you feel stronger or happier: ', u'carbonated (= with bubbles) water with a bitter taste that can be drunk on its own or added to alcoholic drinks: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_tonic.py","file_name":"_tonic.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"60113752","text":"import os\n\nfrom daiquiri.core.settings.base import BASE_DIR\nDAIQUIRI_APPS = [\n 'daiquiri.archive',\n 'daiquiri.auth',\n 'daiquiri.contact',\n 'daiquiri.core',\n 'daiquiri.files',\n 'daiquiri.jobs',\n 'daiquiri.meetings',\n 'daiquiri.metadata',\n 'daiquiri.query',\n 'daiquiri.serve',\n 'daiquiri.stats',\n 'daiquiri.tap',\n 'daiquiri.uws'\n]\n\nINSTALLED_APPS = []\n\nFIXTURE_DIRS = (\n os.path.join(BASE_DIR, 'fixtures'),\n)\n\nREST_FRAMEWORK = {\n 'DEFAULT_THROTTLE_CLASSES': (\n 'rest_framework.throttling.ScopedRateThrottle',\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'query.create': '1000/second'\n }\n}\n\nAUTH_SIGNUP = True\nAUTH_WORKFLOW = 'confirmation'\n\nARCHIVE_ANONYMOUS = False\nARCHIVE_BASE_PATH = os.path.join(BASE_DIR, 'files')\n\nMEETINGS_PARTICIPANT_DETAIL_KEYS = [\n {\n 'key': 'affiliation',\n 'label': 'Affiliation',\n 'data_type': 'text',\n 'required': True\n },\n {\n 'key': 'dinner',\n 'label': 'Conference dinner',\n 'data_type': 'radio',\n 'required': True,\n 'options': [\n {'id': 'yes', 'label': 'yes'},\n {'id': 'no', 'label': 'no'}\n ]\n }\n]\n\nSERVE_DOWNLOAD_DIR = os.path.join(BASE_DIR, 'files')\n","sub_path":"testing/config/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"636079109","text":"from socket import *\nimport struct\nimport time\nimport json\nimport subprocess\n\nip_port = ('127.0.0.1', 8080)\nBUFSIZE = 1024\n\ntcp_socket_server = socket(AF_INET, SOCK_STREAM)\ntcp_socket_server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\ntcp_socket_server.bind(ip_port)\ntcp_socket_server.listen(5)\n\nwhile True:\n print(\"server 开始运行,等待客户端连接\")\n conn, addr = tcp_socket_server.accept() # 等待连接\n print('客户端', addr)\n\n while True:\n try:\n print(\"server开始接受消息\")\n cmd = conn.recv(BUFSIZE)\n if len(cmd) == 0:\n break\n print(\"接收到\", cmd)\n res = subprocess.Popen(cmd.decode('utf-8'), # 字节解码为字符串\n shell=True,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n # 解析命令的返回值\n result = (res.stderr.read() + res.stdout.read()).decode('gbk').encode('utf-8') # 注意是utf-8,client必须用utf-8来解码\n # 4.获得真实数据的字节长度\n total_res_bytes = len(result)\n # 5.自定制字典报头\n head_dic = {\n 'time': time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),\n 'size': total_res_bytes, # 字节长度\n 'MD5': '8f6fbf8347faa4924a76856701edb0f3',\n 'file_name': 'badminton.txt',\n }\n # 6. 序列化字典 ,并将其转换成字节形式\n head_dic_bytes = json.dumps(head_dic).encode('utf-8')\n # 7.使用 struct 封装报头字典head_dic_bytes ,固定长度(4个字节)\n # 封装成字节,发送给客户端,还是按照字节取出来.\n head = struct.pack('i', len(head_dic_bytes))\n # 8. 先将固定头发送给客户端\n conn.send(head)\n # 9 . 再将自定制报头发送给客户端\n conn.send(head_dic_bytes)\n # 10. 最后将真实结果发送给客户端\n conn.send(result)\n # 这里就是拼接字节\n # 格式: 固定头 + 自定义报头 +真实数据\n\n # 如果命令没有返回值,但是也能执行成功,例如cd ..\n if not result:\n result = \"run command successfully\"\n conn.send(result.encode('utf-8'))\n print(\"server端处理一条消息\")\n except Exception:\n break\n conn.close()\ntcp_socket_server.close()","sub_path":"socket/packet_splicing/tcp_server_resolve_splicing.py","file_name":"tcp_server_resolve_splicing.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"276396115","text":"# Copyright (c) 2016 Matt Davis, \n# Chris Houseknecht, \n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nimport os\nimport re\nimport types\nimport copy\nimport inspect\nimport traceback\nimport json\n\nfrom os.path import expanduser\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\ntry:\n from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION\nexcept Exception:\n ANSIBLE_VERSION = 'unknown'\nfrom ansible.module_utils.six.moves import configparser\nimport ansible.module_utils.six.moves.urllib.parse as urlparse\n\nAZURE_COMMON_ARGS = dict(\n auth_source=dict(\n type='str',\n choices=['auto', 'cli', 'env', 'credential_file', 'msi']\n ),\n profile=dict(type='str'),\n subscription_id=dict(type='str'),\n client_id=dict(type='str', no_log=True),\n secret=dict(type='str', no_log=True),\n tenant=dict(type='str', no_log=True),\n ad_user=dict(type='str', no_log=True),\n password=dict(type='str', no_log=True),\n cloud_environment=dict(type='str', default='AzureCloud'),\n cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),\n api_profile=dict(type='str', default='latest'),\n adfs_authority_url=dict(type='str', default=None)\n)\n\nAZURE_CREDENTIAL_ENV_MAPPING = dict(\n profile='AZURE_PROFILE',\n subscription_id='AZURE_SUBSCRIPTION_ID',\n client_id='AZURE_CLIENT_ID',\n secret='AZURE_SECRET',\n tenant='AZURE_TENANT',\n ad_user='AZURE_AD_USER',\n password='AZURE_PASSWORD',\n cloud_environment='AZURE_CLOUD_ENVIRONMENT',\n cert_validation_mode='AZURE_CERT_VALIDATION_MODE',\n adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'\n)\n\n# FUTURE: this should come from the SDK or an external location.\n# For now, we have to copy from azure-cli\nAZURE_API_PROFILES = {\n 'latest': {\n 'ContainerInstanceManagementClient': '2018-02-01-preview',\n 'ComputeManagementClient': dict(\n default_api_version='2018-10-01',\n resource_skus='2018-10-01',\n disks='2018-06-01',\n snapshots='2018-10-01',\n virtual_machine_run_commands='2018-10-01'\n ),\n 'NetworkManagementClient': '2018-08-01',\n 'ResourceManagementClient': '2017-05-10',\n 'StorageManagementClient': '2017-10-01',\n 'WebSiteManagementClient': '2018-02-01',\n 'PostgreSQLManagementClient': '2017-12-01',\n 'MySQLManagementClient': '2017-12-01',\n 'MariaDBManagementClient': '2019-03-01',\n 'ManagementLockClient': '2016-09-01'\n },\n\n '2017-03-09-profile': {\n 'ComputeManagementClient': '2016-03-30',\n 'NetworkManagementClient': '2015-06-15',\n 'ResourceManagementClient': '2016-02-01',\n 'StorageManagementClient': '2016-01-01'\n }\n}\n\nAZURE_TAG_ARGS = dict(\n tags=dict(type='dict'),\n append_tags=dict(type='bool', default=True),\n)\n\nAZURE_COMMON_REQUIRED_IF = [\n ('log_mode', 'file', ['log_path'])\n]\n\nANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)\nCLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'\nVSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'\n\nCIDR_PATTERN = re.compile(r\"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1\"\n r\"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))\")\n\nAZURE_SUCCESS_STATE = \"Succeeded\"\nAZURE_FAILED_STATE = \"Failed\"\n\nHAS_AZURE = True\nHAS_AZURE_EXC = None\nHAS_AZURE_CLI_CORE = True\nHAS_AZURE_CLI_CORE_EXC = None\n\nHAS_MSRESTAZURE = True\nHAS_MSRESTAZURE_EXC = None\n\ntry:\n import importlib\nexcept ImportError:\n # This passes the sanity import test, but does not provide a user friendly error message.\n # Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.\n importlib = None\n\ntry:\n from packaging.version import Version\n HAS_PACKAGING_VERSION = True\n HAS_PACKAGING_VERSION_EXC = None\nexcept ImportError:\n Version = None\n HAS_PACKAGING_VERSION = False\n HAS_PACKAGING_VERSION_EXC = traceback.format_exc()\n\n# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately\ntry:\n from msrest.serialization import Serializer\nexcept ImportError:\n HAS_MSRESTAZURE_EXC = traceback.format_exc()\n HAS_MSRESTAZURE = False\n\ntry:\n from enum import Enum\n from msrestazure.azure_active_directory import AADTokenCredentials\n from msrestazure.azure_exceptions import CloudError\n from msrestazure.azure_active_directory import MSIAuthentication\n from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id\n from msrestazure import azure_cloud\n from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials\n from azure.mgmt.monitor.version import VERSION as monitor_client_version\n from azure.mgmt.network.version import VERSION as network_client_version\n from azure.mgmt.storage.version import VERSION as storage_client_version\n from azure.mgmt.compute.version import VERSION as compute_client_version\n from azure.mgmt.resource.version import VERSION as resource_client_version\n from azure.mgmt.dns.version import VERSION as dns_client_version\n from azure.mgmt.web.version import VERSION as web_client_version\n from azure.mgmt.network import NetworkManagementClient\n from azure.mgmt.resource.resources import ResourceManagementClient\n from azure.mgmt.resource.subscriptions import SubscriptionClient\n from azure.mgmt.storage import StorageManagementClient\n from azure.mgmt.compute import ComputeManagementClient\n from azure.mgmt.dns import DnsManagementClient\n from azure.mgmt.monitor import MonitorManagementClient\n from azure.mgmt.web import WebSiteManagementClient\n from azure.mgmt.containerservice import ContainerServiceClient\n from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements\n from azure.mgmt.trafficmanager import TrafficManagerManagementClient\n from azure.storage.cloudstorageaccount import CloudStorageAccount\n from azure.storage.blob import PageBlobService, BlockBlobService\n from adal.authentication_context import AuthenticationContext\n from azure.mgmt.sql import SqlManagementClient\n from azure.mgmt.servicebus import ServiceBusManagementClient\n import azure.mgmt.servicebus.models as ServicebusModel\n from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient\n from azure.mgmt.rdbms.mysql import MySQLManagementClient\n from azure.mgmt.rdbms.mariadb import MariaDBManagementClient\n from azure.mgmt.containerregistry import ContainerRegistryManagementClient\n from azure.mgmt.containerinstance import ContainerInstanceManagementClient\n from azure.mgmt.loganalytics import LogAnalyticsManagementClient\n import azure.mgmt.loganalytics.models as LogAnalyticsModels\n from azure.mgmt.automation import AutomationClient\n import azure.mgmt.automation.models as AutomationModel\n from azure.mgmt.iothub import IotHubClient\n from azure.mgmt.iothub import models as IoTHubModels\n from msrest.service_client import ServiceClient\n from msrestazure import AzureConfiguration\n from msrest.authentication import Authentication\n from azure.mgmt.resource.locks import ManagementLockClient\nexcept ImportError as exc:\n Authentication = object\n HAS_AZURE_EXC = traceback.format_exc()\n HAS_AZURE = False\n\nfrom base64 import b64encode, b64decode\nfrom hashlib import sha256\nfrom hmac import HMAC\nfrom time import time\n\ntry:\n from urllib import (urlencode, quote_plus)\nexcept ImportError:\n from urllib.parse import (urlencode, quote_plus)\n\ntry:\n from azure.cli.core.util import CLIError\n from azure.common.credentials import get_azure_cli_credentials, get_cli_profile\n from azure.common.cloud import get_cli_active_cloud\nexcept ImportError:\n HAS_AZURE_CLI_CORE = False\n HAS_AZURE_CLI_CORE_EXC = None\n CLIError = Exception\n\n\ndef azure_id_to_dict(id):\n pieces = re.sub(r'^\\/', '', id).split('/')\n result = {}\n index = 0\n while index < len(pieces) - 1:\n result[pieces[index]] = pieces[index + 1]\n index += 1\n return result\n\n\ndef format_resource_id(val, subscription_id, namespace, types, resource_group):\n return resource_id(name=val,\n resource_group=resource_group,\n namespace=namespace,\n type=types,\n subscription=subscription_id) if not is_valid_resource_id(val) else val\n\n\ndef normalize_location_name(name):\n return name.replace(' ', '').lower()\n\n\n# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)\n# or generate the requirements files from this so we only have one source of truth to maintain...\nAZURE_PKG_VERSIONS = {\n 'StorageManagementClient': {\n 'package_name': 'storage',\n 'expected_version': '3.1.0'\n },\n 'ComputeManagementClient': {\n 'package_name': 'compute',\n 'expected_version': '4.4.0'\n },\n 'ContainerInstanceManagementClient': {\n 'package_name': 'containerinstance',\n 'expected_version': '0.4.0'\n },\n 'NetworkManagementClient': {\n 'package_name': 'network',\n 'expected_version': '2.3.0'\n },\n 'ResourceManagementClient': {\n 'package_name': 'resource',\n 'expected_version': '2.1.0'\n },\n 'DnsManagementClient': {\n 'package_name': 'dns',\n 'expected_version': '2.1.0'\n },\n 'WebSiteManagementClient': {\n 'package_name': 'web',\n 'expected_version': '0.41.0'\n },\n 'TrafficManagerManagementClient': {\n 'package_name': 'trafficmanager',\n 'expected_version': '0.50.0'\n },\n} if HAS_AZURE else {}\n\n\nAZURE_MIN_RELEASE = '2.0.0'\n\n\nclass AzureRMModuleBase(object):\n def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,\n check_invalid_arguments=None, mutually_exclusive=None, required_together=None,\n required_one_of=None, add_file_common_args=False, supports_check_mode=False,\n required_if=None, supports_tags=True, facts_module=False, skip_exec=False):\n\n merged_arg_spec = dict()\n merged_arg_spec.update(AZURE_COMMON_ARGS)\n if supports_tags:\n merged_arg_spec.update(AZURE_TAG_ARGS)\n\n if derived_arg_spec:\n merged_arg_spec.update(derived_arg_spec)\n\n merged_required_if = list(AZURE_COMMON_REQUIRED_IF)\n if required_if:\n merged_required_if += required_if\n\n self.module = AnsibleModule(argument_spec=merged_arg_spec,\n bypass_checks=bypass_checks,\n no_log=no_log,\n check_invalid_arguments=check_invalid_arguments,\n mutually_exclusive=mutually_exclusive,\n required_together=required_together,\n required_one_of=required_one_of,\n add_file_common_args=add_file_common_args,\n supports_check_mode=supports_check_mode,\n required_if=merged_required_if)\n\n if not HAS_PACKAGING_VERSION:\n self.fail(msg=missing_required_lib('packaging'),\n exception=HAS_PACKAGING_VERSION_EXC)\n\n if not HAS_MSRESTAZURE:\n self.fail(msg=missing_required_lib('msrestazure'),\n exception=HAS_MSRESTAZURE_EXC)\n\n if not HAS_AZURE:\n self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)),\n exception=HAS_AZURE_EXC)\n\n self._network_client = None\n self._storage_client = None\n self._resource_client = None\n self._compute_client = None\n self._dns_client = None\n self._web_client = None\n self._marketplace_client = None\n self._sql_client = None\n self._mysql_client = None\n self._mariadb_client = None\n self._postgresql_client = None\n self._containerregistry_client = None\n self._containerinstance_client = None\n self._containerservice_client = None\n self._managedcluster_client = None\n self._traffic_manager_management_client = None\n self._monitor_client = None\n self._resource = None\n self._log_analytics_client = None\n self._servicebus_client = None\n self._automation_client = None\n self._IoThub_client = None\n self._lock_client = None\n\n self.check_mode = self.module.check_mode\n self.api_profile = self.module.params.get('api_profile')\n self.facts_module = facts_module\n # self.debug = self.module.params.get('debug')\n\n # delegate auth to AzureRMAuth class (shared with all plugin types)\n self.azure_auth = AzureRMAuth(fail_impl=self.fail, **self.module.params)\n\n # common parameter validation\n if self.module.params.get('tags'):\n self.validate_tags(self.module.params['tags'])\n\n if not skip_exec:\n res = self.exec_module(**self.module.params)\n self.module.exit_json(**res)\n\n def check_client_version(self, client_type):\n # Ensure Azure modules are at least 2.0.0rc5.\n package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)\n if package_version is not None:\n client_name = package_version.get('package_name')\n try:\n client_module = importlib.import_module(client_type.__module__)\n client_version = client_module.VERSION\n except RuntimeError:\n # can't get at the module version for some reason, just fail silently...\n return\n expected_version = package_version.get('expected_version')\n if Version(client_version) < Version(expected_version):\n self.fail(\"Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try \"\n \"`pip install ansible[azure]`\".format(client_name, client_version, expected_version))\n if Version(client_version) != Version(expected_version):\n self.module.warn(\"Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try \"\n \"`pip install ansible[azure]`\".format(client_name, client_version, expected_version))\n\n def exec_module(self, **kwargs):\n self.fail(\"Error: {0} failed to implement exec_module method.\".format(self.__class__.__name__))\n\n def fail(self, msg, **kwargs):\n '''\n Shortcut for calling module.fail()\n\n :param msg: Error message text.\n :param kwargs: Any key=value pairs\n :return: None\n '''\n self.module.fail_json(msg=msg, **kwargs)\n\n def deprecate(self, msg, version=None):\n self.module.deprecate(msg, version)\n\n def log(self, msg, pretty_print=False):\n if pretty_print:\n self.module.debug(json.dumps(msg, indent=4, sort_keys=True))\n else:\n self.module.debug(msg)\n\n def validate_tags(self, tags):\n '''\n Check if tags dictionary contains string:string pairs.\n\n :param tags: dictionary of string:string pairs\n :return: None\n '''\n if not self.facts_module:\n if not isinstance(tags, dict):\n self.fail(\"Tags must be a dictionary of string:string values.\")\n for key, value in tags.items():\n if not isinstance(value, str):\n self.fail(\"Tags values must be strings. Found {0}:{1}\".format(str(key), str(value)))\n\n def update_tags(self, tags):\n '''\n Call from the module to update metadata tags. Returns tuple\n with bool indicating if there was a change and dict of new\n tags to assign to the object.\n\n :param tags: metadata tags from the object\n :return: bool, dict\n '''\n tags = tags or dict()\n new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()\n param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict()\n append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True\n changed = False\n # check add or update\n for key, value in param_tags.items():\n if not new_tags.get(key) or new_tags[key] != value:\n changed = True\n new_tags[key] = value\n # check remove\n if not append_tags:\n for key, value in tags.items():\n if not param_tags.get(key):\n new_tags.pop(key)\n changed = True\n return changed, new_tags\n\n def has_tags(self, obj_tags, tag_list):\n '''\n Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags\n exists in object tags.\n\n :param obj_tags: dictionary of tags from an Azure object.\n :param tag_list: list of tag keys or tag key:value pairs\n :return: bool\n '''\n\n if not obj_tags and tag_list:\n return False\n\n if not tag_list:\n return True\n\n matches = 0\n result = False\n for tag in tag_list:\n tag_key = tag\n tag_value = None\n if ':' in tag:\n tag_key, tag_value = tag.split(':')\n if tag_value and obj_tags.get(tag_key) == tag_value:\n matches += 1\n elif not tag_value and obj_tags.get(tag_key):\n matches += 1\n if matches == len(tag_list):\n result = True\n return result\n\n def get_resource_group(self, resource_group):\n '''\n Fetch a resource group.\n\n :param resource_group: name of a resource group\n :return: resource group object\n '''\n try:\n return self.rm_client.resource_groups.get(resource_group)\n except CloudError as cloud_error:\n self.fail(\"Error retrieving resource group {0} - {1}\".format(resource_group, cloud_error.message))\n except Exception as exc:\n self.fail(\"Error retrieving resource group {0} - {1}\".format(resource_group, str(exc)))\n\n def parse_resource_to_dict(self, resource):\n '''\n Return a dict of the give resource, which contains name and resource group.\n\n :param resource: It can be a resource name, id or a dict contains name and resource group.\n '''\n resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource\n resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group)\n resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id)\n return resource_dict\n\n def serialize_obj(self, obj, class_name, enum_modules=None):\n '''\n Return a JSON representation of an Azure object.\n\n :param obj: Azure object\n :param class_name: Name of the object's class\n :param enum_modules: List of module names to build enum dependencies from.\n :return: serialized result\n '''\n enum_modules = [] if enum_modules is None else enum_modules\n\n dependencies = dict()\n if enum_modules:\n for module_name in enum_modules:\n mod = importlib.import_module(module_name)\n for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):\n dependencies[mod_class_name] = mod_class_obj\n self.log(\"dependencies: \")\n self.log(str(dependencies))\n serializer = Serializer(classes=dependencies)\n return serializer.body(obj, class_name, keep_readonly=True)\n\n def get_poller_result(self, poller, wait=5):\n '''\n Consistent method of waiting on and retrieving results from Azure's long poller\n\n :param poller Azure poller object\n :return object resulting from the original request\n '''\n try:\n delay = wait\n while not poller.done():\n self.log(\"Waiting for {0} sec\".format(delay))\n poller.wait(timeout=delay)\n return poller.result()\n except Exception as exc:\n self.log(str(exc))\n raise\n\n def check_provisioning_state(self, azure_object, requested_state='present'):\n '''\n Check an Azure object's provisioning state. If something did not complete the provisioning\n process, then we cannot operate on it.\n\n :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state\n and name attributes.\n :return None\n '''\n\n if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \\\n hasattr(azure_object, 'name'):\n # resource group object fits this model\n if isinstance(azure_object.properties.provisioning_state, Enum):\n if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \\\n requested_state != 'absent':\n self.fail(\"Error {0} has a provisioning state of {1}. Expecting state to be {2}.\".format(\n azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))\n return\n if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \\\n requested_state != 'absent':\n self.fail(\"Error {0} has a provisioning state of {1}. Expecting state to be {2}.\".format(\n azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))\n return\n\n if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):\n if isinstance(azure_object.provisioning_state, Enum):\n if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':\n self.fail(\"Error {0} has a provisioning state of {1}. Expecting state to be {2}.\".format(\n azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))\n return\n if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':\n self.fail(\"Error {0} has a provisioning state of {1}. Expecting state to be {2}.\".format(\n azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))\n\n def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):\n keys = dict()\n try:\n # Get keys from the storage account\n self.log('Getting keys')\n account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)\n except Exception as exc:\n self.fail(\"Error getting keys for account {0} - {1}\".format(storage_account_name, str(exc)))\n\n try:\n self.log('Create blob service')\n if storage_blob_type == 'page':\n return PageBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,\n account_name=storage_account_name,\n account_key=account_keys.keys[0].value)\n elif storage_blob_type == 'block':\n return BlockBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,\n account_name=storage_account_name,\n account_key=account_keys.keys[0].value)\n else:\n raise Exception(\"Invalid storage blob type defined.\")\n except Exception as exc:\n self.fail(\"Error creating blob service client for storage account {0} - {1}\".format(storage_account_name,\n str(exc)))\n\n def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None):\n '''\n Create a default public IP address to associate with a network interface.\n If a PIP address matching exists, return it. Otherwise, create one.\n\n :param resource_group: name of an existing resource group\n :param location: a valid azure location\n :param public_ip_name: base name to assign the public IP address\n :param allocation_method: one of 'Static' or 'Dynamic'\n :param sku: sku\n :return: PIP object\n '''\n pip = None\n\n self.log(\"Starting create_default_pip {0}\".format(public_ip_name))\n self.log(\"Check to see if public IP {0} exists\".format(public_ip_name))\n try:\n pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)\n except CloudError:\n pass\n\n if pip:\n self.log(\"Public ip {0} found.\".format(public_ip_name))\n self.check_provisioning_state(pip)\n return pip\n\n params = self.network_models.PublicIPAddress(\n location=location,\n public_ip_allocation_method=allocation_method,\n sku=sku\n )\n self.log('Creating default public IP {0}'.format(public_ip_name))\n try:\n poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)\n except Exception as exc:\n self.fail(\"Error creating {0} - {1}\".format(public_ip_name, str(exc)))\n\n return self.get_poller_result(poller)\n\n def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):\n '''\n Create a default security group to associate with a network interface. If a security group matching\n exists, return it. Otherwise, create one.\n\n :param resource_group: Resource group name\n :param location: azure location name\n :param security_group_name: base name to use for the security group\n :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.\n :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.\n :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.\n :return: security_group object\n '''\n group = None\n\n self.log(\"Create security group {0}\".format(security_group_name))\n self.log(\"Check to see if security group {0} exists\".format(security_group_name))\n try:\n group = self.network_client.network_security_groups.get(resource_group, security_group_name)\n except CloudError:\n pass\n\n if group:\n self.log(\"Security group {0} found.\".format(security_group_name))\n self.check_provisioning_state(group)\n return group\n\n parameters = self.network_models.NetworkSecurityGroup()\n parameters.location = location\n\n if not open_ports:\n # Open default ports based on OS type\n if os_type == 'Linux':\n # add an inbound SSH rule\n parameters.security_rules = [\n self.network_models.SecurityRule(protocol='Tcp',\n source_address_prefix='*',\n destination_address_prefix='*',\n access='Allow',\n direction='Inbound',\n description='Allow SSH Access',\n source_port_range='*',\n destination_port_range='22',\n priority=100,\n name='SSH')\n ]\n parameters.location = location\n else:\n # for windows add inbound RDP and WinRM rules\n parameters.security_rules = [\n self.network_models.SecurityRule(protocol='Tcp',\n source_address_prefix='*',\n destination_address_prefix='*',\n access='Allow',\n direction='Inbound',\n description='Allow RDP port 3389',\n source_port_range='*',\n destination_port_range='3389',\n priority=100,\n name='RDP01'),\n self.network_models.SecurityRule(protocol='Tcp',\n source_address_prefix='*',\n destination_address_prefix='*',\n access='Allow',\n direction='Inbound',\n description='Allow WinRM HTTPS port 5986',\n source_port_range='*',\n destination_port_range='5986',\n priority=101,\n name='WinRM01'),\n ]\n else:\n # Open custom ports\n parameters.security_rules = []\n priority = 100\n for port in open_ports:\n priority += 1\n rule_name = \"Rule_{0}\".format(priority)\n parameters.security_rules.append(\n self.network_models.SecurityRule(protocol='Tcp',\n source_address_prefix='*',\n destination_address_prefix='*',\n access='Allow',\n direction='Inbound',\n source_port_range='*',\n destination_port_range=str(port),\n priority=priority,\n name=rule_name)\n )\n\n self.log('Creating default security group {0}'.format(security_group_name))\n try:\n poller = self.network_client.network_security_groups.create_or_update(resource_group,\n security_group_name,\n parameters)\n except Exception as exc:\n self.fail(\"Error creating default security rule {0} - {1}\".format(security_group_name, str(exc)))\n\n return self.get_poller_result(poller)\n\n @staticmethod\n def _validation_ignore_callback(session, global_config, local_config, **kwargs):\n session.verify = False\n\n def get_api_profile(self, client_type_name, api_profile_name):\n profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)\n\n if not profile_all_clients:\n raise KeyError(\"unknown Azure API profile: {0}\".format(api_profile_name))\n\n profile_raw = profile_all_clients.get(client_type_name, None)\n\n if not profile_raw:\n self.module.warn(\"Azure API profile {0} does not define an entry for {1}\".format(api_profile_name, client_type_name))\n\n if isinstance(profile_raw, dict):\n if not profile_raw.get('default_api_version'):\n raise KeyError(\"Azure API profile {0} does not define 'default_api_version'\".format(api_profile_name))\n return profile_raw\n\n # wrap basic strings in a dict that just defines the default\n return dict(default_api_version=profile_raw)\n\n def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):\n self.log('Getting management service client {0}'.format(client_type.__name__))\n self.check_client_version(client_type)\n\n client_argspec = inspect.getargspec(client_type.__init__)\n\n if not base_url:\n # most things are resource_manager, don't make everyone specify\n base_url = self.azure_auth._cloud_environment.endpoints.resource_manager\n\n client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url)\n\n api_profile_dict = {}\n\n if self.api_profile:\n api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)\n\n # unversioned clients won't accept profile; only send it if necessary\n # clients without a version specified in the profile will use the default\n if api_profile_dict and 'profile' in client_argspec.args:\n client_kwargs['profile'] = api_profile_dict\n\n # If the client doesn't accept api_version, it's unversioned.\n # If it does, favor explicitly-specified api_version, fall back to api_profile\n if 'api_version' in client_argspec.args:\n profile_default_version = api_profile_dict.get('default_api_version', None)\n if api_version or profile_default_version:\n client_kwargs['api_version'] = api_version or profile_default_version\n if 'profile' in client_kwargs:\n # remove profile; only pass API version if specified\n client_kwargs.pop('profile')\n\n client = client_type(**client_kwargs)\n\n # FUTURE: remove this once everything exposes models directly (eg, containerinstance)\n try:\n getattr(client, \"models\")\n except AttributeError:\n def _ansible_get_models(self, *arg, **kwarg):\n return self._ansible_models\n\n setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)\n client.models = types.MethodType(_ansible_get_models, client)\n\n client.config = self.add_user_agent(client.config)\n\n if self.azure_auth._cert_validation_mode == 'ignore':\n client.config.session_configuration_callback = self._validation_ignore_callback\n\n return client\n\n def add_user_agent(self, config):\n # Add user agent for Ansible\n config.add_user_agent(ANSIBLE_USER_AGENT)\n # Add user agent when running from Cloud Shell\n if CLOUDSHELL_USER_AGENT_KEY in os.environ:\n config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])\n # Add user agent when running from VSCode extension\n if VSCODEEXT_USER_AGENT_KEY in os.environ:\n config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])\n return config\n\n def generate_sas_token(self, **kwags):\n base_url = kwags.get('base_url', None)\n expiry = kwags.get('expiry', time() + 3600)\n key = kwags.get('key', None)\n policy = kwags.get('policy', None)\n url = quote_plus(base_url)\n ttl = int(expiry)\n sign_key = '{0}\\n{1}'.format(url, ttl)\n signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())\n result = {\n 'sr': url,\n 'sig': signature,\n 'se': str(ttl),\n }\n if policy:\n result['skn'] = policy\n return 'SharedAccessSignature ' + urlencode(result)\n\n def get_data_svc_client(self, **kwags):\n url = kwags.get('base_url', None)\n config = AzureConfiguration(base_url='https://{0}'.format(url))\n config.credentials = AzureSASAuthentication(token=self.generate_sas_token(**kwags))\n config = self.add_user_agent(config)\n return ServiceClient(creds=config.credentials, config=config)\n\n # passthru methods to AzureAuth instance for backcompat\n @property\n def credentials(self):\n return self.azure_auth.credentials\n\n @property\n def _cloud_environment(self):\n return self.azure_auth._cloud_environment\n\n @property\n def subscription_id(self):\n return self.azure_auth.subscription_id\n\n @property\n def storage_client(self):\n self.log('Getting storage client...')\n if not self._storage_client:\n self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2018-07-01')\n return self._storage_client\n\n @property\n def storage_models(self):\n return StorageManagementClient.models(\"2018-07-01\")\n\n @property\n def network_client(self):\n self.log('Getting network client')\n if not self._network_client:\n self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2018-08-01')\n return self._network_client\n\n @property\n def network_models(self):\n self.log(\"Getting network models...\")\n return NetworkManagementClient.models(\"2018-08-01\")\n\n @property\n def rm_client(self):\n self.log('Getting resource manager client')\n if not self._resource_client:\n self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2017-05-10')\n return self._resource_client\n\n @property\n def rm_models(self):\n self.log(\"Getting resource manager models\")\n return ResourceManagementClient.models(\"2017-05-10\")\n\n @property\n def compute_client(self):\n self.log('Getting compute client')\n if not self._compute_client:\n self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2018-06-01')\n return self._compute_client\n\n @property\n def compute_models(self):\n self.log(\"Getting compute models\")\n return ComputeManagementClient.models(\"2018-06-01\")\n\n @property\n def dns_client(self):\n self.log('Getting dns client')\n if not self._dns_client:\n self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2018-05-01')\n return self._dns_client\n\n @property\n def dns_models(self):\n self.log(\"Getting dns models...\")\n return DnsManagementClient.models('2018-05-01')\n\n @property\n def web_client(self):\n self.log('Getting web client')\n if not self._web_client:\n self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2018-02-01')\n return self._web_client\n\n @property\n def containerservice_client(self):\n self.log('Getting container service client')\n if not self._containerservice_client:\n self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2017-07-01')\n return self._containerservice_client\n\n @property\n def managedcluster_models(self):\n self.log(\"Getting container service models\")\n return ContainerServiceClient.models('2018-03-31')\n\n @property\n def managedcluster_client(self):\n self.log('Getting container service client')\n if not self._managedcluster_client:\n self._managedcluster_client = self.get_mgmt_svc_client(ContainerServiceClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2018-03-31')\n return self._managedcluster_client\n\n @property\n def sql_client(self):\n self.log('Getting SQL client')\n if not self._sql_client:\n self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._sql_client\n\n @property\n def postgresql_client(self):\n self.log('Getting PostgreSQL client')\n if not self._postgresql_client:\n self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._postgresql_client\n\n @property\n def mysql_client(self):\n self.log('Getting MySQL client')\n if not self._mysql_client:\n self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._mysql_client\n\n @property\n def mariadb_client(self):\n self.log('Getting MariaDB client')\n if not self._mariadb_client:\n self._mariadb_client = self.get_mgmt_svc_client(MariaDBManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._mariadb_client\n\n @property\n def sql_client(self):\n self.log('Getting SQL client')\n if not self._sql_client:\n self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._sql_client\n\n @property\n def containerregistry_client(self):\n self.log('Getting container registry mgmt client')\n if not self._containerregistry_client:\n self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2017-10-01')\n\n return self._containerregistry_client\n\n @property\n def containerinstance_client(self):\n self.log('Getting container instance mgmt client')\n if not self._containerinstance_client:\n self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2018-06-01')\n\n return self._containerinstance_client\n\n @property\n def marketplace_client(self):\n self.log('Getting marketplace agreement client')\n if not self._marketplace_client:\n self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._marketplace_client\n\n @property\n def traffic_manager_management_client(self):\n self.log('Getting traffic manager client')\n if not self._traffic_manager_management_client:\n self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._traffic_manager_management_client\n\n @property\n def monitor_client(self):\n self.log('Getting monitor client')\n if not self._monitor_client:\n self._monitor_client = self.get_mgmt_svc_client(MonitorManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._monitor_client\n\n @property\n def log_analytics_client(self):\n self.log('Getting log analytics client')\n if not self._log_analytics_client:\n self._log_analytics_client = self.get_mgmt_svc_client(LogAnalyticsManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._log_analytics_client\n\n @property\n def log_analytics_models(self):\n self.log('Getting log analytics models')\n return LogAnalyticsModels\n\n @property\n def servicebus_client(self):\n self.log('Getting servicebus client')\n if not self._servicebus_client:\n self._servicebus_client = self.get_mgmt_svc_client(ServiceBusManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._servicebus_client\n\n @property\n def servicebus_models(self):\n return ServicebusModel\n\n @property\n def automation_client(self):\n self.log('Getting automation client')\n if not self._automation_client:\n self._automation_client = self.get_mgmt_svc_client(AutomationClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._automation_client\n\n @property\n def automation_models(self):\n return AutomationModel\n\n @property\n def IoThub_client(self):\n self.log('Getting iothub client')\n if not self._IoThub_client:\n self._IoThub_client = self.get_mgmt_svc_client(IotHubClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._IoThub_client\n\n @property\n def IoThub_models(self):\n return IoTHubModels\n\n @property\n def automation_client(self):\n self.log('Getting automation client')\n if not self._automation_client:\n self._automation_client = self.get_mgmt_svc_client(AutomationClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n return self._automation_client\n\n @property\n def automation_models(self):\n return AutomationModel\n\n @property\n def lock_client(self):\n self.log('Getting lock client')\n if not self._lock_client:\n self._lock_client = self.get_mgmt_svc_client(ManagementLockClient,\n base_url=self._cloud_environment.endpoints.resource_manager,\n api_version='2016-09-01')\n return self._lock_client\n\n @property\n def lock_models(self):\n self.log(\"Getting lock models\")\n return ManagementLockClient.models('2016-09-01')\n\n\nclass AzureSASAuthentication(Authentication):\n \"\"\"Simple SAS Authentication.\n An implementation of Authentication in\n https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py\n\n :param str token: SAS token\n \"\"\"\n def __init__(self, token):\n self.token = token\n\n def signed_session(self):\n session = super(AzureSASAuthentication, self).signed_session()\n session.headers['Authorization'] = self.token\n return session\n\n\nclass AzureRMAuthException(Exception):\n pass\n\n\nclass AzureRMAuth(object):\n def __init__(self, auth_source='auto', profile=None, subscription_id=None, client_id=None, secret=None,\n tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate',\n api_profile='latest', adfs_authority_url=None, fail_impl=None, **kwargs):\n\n if fail_impl:\n self._fail_impl = fail_impl\n else:\n self._fail_impl = self._default_fail_impl\n\n self._cloud_environment = None\n self._adfs_authority_url = None\n\n # authenticate\n self.credentials = self._get_credentials(\n dict(auth_source=auth_source, profile=profile, subscription_id=subscription_id, client_id=client_id, secret=secret,\n tenant=tenant, ad_user=ad_user, password=password, cloud_environment=cloud_environment,\n cert_validation_mode=cert_validation_mode, api_profile=api_profile, adfs_authority_url=adfs_authority_url))\n\n if not self.credentials:\n if HAS_AZURE_CLI_CORE:\n self.fail(\"Failed to get credentials. Either pass as parameters, set environment variables, \"\n \"define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).\")\n else:\n self.fail(\"Failed to get credentials. Either pass as parameters, set environment variables, \"\n \"define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).\")\n\n # cert validation mode precedence: module-arg, credential profile, env, \"validate\"\n self._cert_validation_mode = cert_validation_mode or self.credentials.get('cert_validation_mode') or \\\n os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'\n\n if self._cert_validation_mode not in ['validate', 'ignore']:\n self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))\n\n # if cloud_environment specified, look up/build Cloud object\n raw_cloud_env = self.credentials.get('cloud_environment')\n if self.credentials.get('credentials') is not None and raw_cloud_env is not None:\n self._cloud_environment = raw_cloud_env\n elif not raw_cloud_env:\n self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default\n else:\n # try to look up \"well-known\" values via the name attribute on azure_cloud members\n all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]\n matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]\n if len(matched_clouds) == 1:\n self._cloud_environment = matched_clouds[0]\n elif len(matched_clouds) > 1:\n self.fail(\"Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'\".format(raw_cloud_env))\n else:\n if not urlparse.urlparse(raw_cloud_env).scheme:\n self.fail(\"cloud_environment must be an endpoint discovery URL or one of {0}\".format([x.name for x in all_clouds]))\n try:\n self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)\n except Exception as e:\n self.fail(\"cloud_environment {0} could not be resolved: {1}\".format(raw_cloud_env, e.message), exception=traceback.format_exc())\n\n if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:\n self.fail(\"Credentials did not include a subscription_id value.\")\n self.log(\"setting subscription_id\")\n self.subscription_id = self.credentials['subscription_id']\n\n # get authentication authority\n # for adfs, user could pass in authority or not.\n # for others, use default authority from cloud environment\n if self.credentials.get('adfs_authority_url') is None:\n self._adfs_authority_url = self._cloud_environment.endpoints.active_directory\n else:\n self._adfs_authority_url = self.credentials.get('adfs_authority_url')\n\n # get resource from cloud environment\n self._resource = self._cloud_environment.endpoints.active_directory_resource_id\n\n if self.credentials.get('credentials') is not None:\n # AzureCLI credentials\n self.azure_credentials = self.credentials['credentials']\n elif self.credentials.get('client_id') is not None and \\\n self.credentials.get('secret') is not None and \\\n self.credentials.get('tenant') is not None:\n self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],\n secret=self.credentials['secret'],\n tenant=self.credentials['tenant'],\n cloud_environment=self._cloud_environment,\n verify=self._cert_validation_mode == 'validate')\n\n elif self.credentials.get('ad_user') is not None and \\\n self.credentials.get('password') is not None and \\\n self.credentials.get('client_id') is not None and \\\n self.credentials.get('tenant') is not None:\n\n self.azure_credentials = self.acquire_token_with_username_password(\n self._adfs_authority_url,\n self._resource,\n self.credentials['ad_user'],\n self.credentials['password'],\n self.credentials['client_id'],\n self.credentials['tenant'])\n\n elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:\n tenant = self.credentials.get('tenant')\n if not tenant:\n tenant = 'common' # SDK default\n\n self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],\n self.credentials['password'],\n tenant=tenant,\n cloud_environment=self._cloud_environment,\n verify=self._cert_validation_mode == 'validate')\n else:\n self.fail(\"Failed to authenticate with provided credentials. Some attributes were missing. \"\n \"Credentials must include client_id, secret and tenant or ad_user and password, or \"\n \"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or \"\n \"be logged in using AzureCLI.\")\n\n def fail(self, msg, exception=None, **kwargs):\n self._fail_impl(msg)\n\n def _default_fail_impl(self, msg, exception=None, **kwargs):\n raise AzureRMAuthException(msg)\n\n def _get_profile(self, profile=\"default\"):\n path = expanduser(\"~/.azure/credentials\")\n try:\n config = configparser.ConfigParser()\n config.read(path)\n except Exception as exc:\n self.fail(\"Failed to access {0}. Check that the file exists and you have read \"\n \"access. {1}\".format(path, str(exc)))\n credentials = dict()\n for key in AZURE_CREDENTIAL_ENV_MAPPING:\n try:\n credentials[key] = config.get(profile, key, raw=True)\n except Exception:\n pass\n\n if credentials.get('subscription_id'):\n return credentials\n\n return None\n\n def _get_msi_credentials(self, subscription_id_param=None, **kwargs):\n client_id = kwargs.get('client_id', None)\n credentials = MSIAuthentication(client_id=client_id)\n subscription_id = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)\n if not subscription_id:\n try:\n # use the first subscription of the MSI\n subscription_client = SubscriptionClient(credentials)\n subscription = next(subscription_client.subscriptions.list())\n subscription_id = str(subscription.subscription_id)\n except Exception as exc:\n self.fail(\"Failed to get MSI token: {0}. \"\n \"Please check whether your machine enabled MSI or grant access to any subscription.\".format(str(exc)))\n return {\n 'credentials': credentials,\n 'subscription_id': subscription_id\n }\n\n def _get_azure_cli_credentials(self):\n credentials, subscription_id = get_azure_cli_credentials()\n cloud_environment = get_cli_active_cloud()\n\n cli_credentials = {\n 'credentials': credentials,\n 'subscription_id': subscription_id,\n 'cloud_environment': cloud_environment\n }\n return cli_credentials\n\n def _get_env_credentials(self):\n env_credentials = dict()\n for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():\n env_credentials[attribute] = os.environ.get(env_variable, None)\n\n if env_credentials['profile']:\n credentials = self._get_profile(env_credentials['profile'])\n return credentials\n\n if env_credentials.get('subscription_id') is not None:\n return env_credentials\n\n return None\n\n # TODO: use explicit kwargs instead of intermediate dict\n def _get_credentials(self, params):\n # Get authentication credentials.\n self.log('Getting credentials')\n\n arg_credentials = dict()\n for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():\n arg_credentials[attribute] = params.get(attribute, None)\n\n auth_source = params.get('auth_source', None)\n if not auth_source:\n auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')\n\n if auth_source == 'msi':\n self.log('Retrieving credenitals from MSI')\n return self._get_msi_credentials(arg_credentials['subscription_id'], client_id=params.get('client_id', None))\n\n if auth_source == 'cli':\n if not HAS_AZURE_CLI_CORE:\n self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'),\n exception=HAS_AZURE_CLI_CORE_EXC)\n try:\n self.log('Retrieving credentials from Azure CLI profile')\n cli_credentials = self._get_azure_cli_credentials()\n return cli_credentials\n except CLIError as err:\n self.fail(\"Azure CLI profile cannot be loaded - {0}\".format(err))\n\n if auth_source == 'env':\n self.log('Retrieving credentials from environment')\n env_credentials = self._get_env_credentials()\n return env_credentials\n\n if auth_source == 'credential_file':\n self.log(\"Retrieving credentials from credential file\")\n profile = params.get('profile') or 'default'\n default_credentials = self._get_profile(profile)\n return default_credentials\n\n # auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials\n # try module params\n if arg_credentials['profile'] is not None:\n self.log('Retrieving credentials with profile parameter.')\n credentials = self._get_profile(arg_credentials['profile'])\n return credentials\n\n if arg_credentials['subscription_id']:\n self.log('Received credentials from parameters.')\n return arg_credentials\n\n # try environment\n env_credentials = self._get_env_credentials()\n if env_credentials:\n self.log('Received credentials from env.')\n return env_credentials\n\n # try default profile from ~./azure/credentials\n default_credentials = self._get_profile()\n if default_credentials:\n self.log('Retrieved default profile credentials from ~/.azure/credentials.')\n return default_credentials\n\n try:\n if HAS_AZURE_CLI_CORE:\n self.log('Retrieving credentials from AzureCLI profile')\n cli_credentials = self._get_azure_cli_credentials()\n return cli_credentials\n except CLIError as ce:\n self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))\n\n return None\n\n def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):\n authority_uri = authority\n\n if tenant is not None:\n authority_uri = authority + '/' + tenant\n\n context = AuthenticationContext(authority_uri)\n token_response = context.acquire_token_with_username_password(resource, username, password, client_id)\n\n return AADTokenCredentials(token_response)\n\n def log(self, msg, pretty_print=False):\n pass\n # Use only during module development\n # if self.debug:\n # log_file = open('azure_rm.log', 'a')\n # if pretty_print:\n # log_file.write(json.dumps(msg, indent=4, sort_keys=True))\n # else:\n # log_file.write(msg + u'\\n')\n","sub_path":"env/lib/python3.9/site-packages/ansible/module_utils/azure_rm_common.py","file_name":"azure_rm_common.py","file_ext":"py","file_size_in_byte":62599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"41293104","text":"import discord\r\nimport asyncio\r\nimport random\r\nimport pickle\r\nfrom discord.ext import commands\r\nimport os\r\n\r\nclient = commands.Bot(command_prefix=':G ')\r\n\r\nfortune_point = [-6, -3, -1, 1, 4, 7, 10, 14, 19]\r\ncategory = [\"Aces\", \"Deuces\", \"Threes\", \"Fours\", \"Fives\", \"Sixes\", \"subtotal\", \"Choice\", \"4 of a Kind\", \"Full House\",\r\n \"S.Straight\", \"L.Straight\", \"Yacht\", \"Total\"]\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"Bot ID : \" + str(client.user.id))\r\n print(\"System Online\")\r\n game = discord.Game(\"미니게임 준비\")\r\n await client.change_presence(status=discord.Status.online, activity=game)\r\n\r\n\r\n# score.bin 파일 불러오는 함수\r\ndef scoreFileRead(*user_name: discord.Member):\r\n try:\r\n with open(\"score.bin\", \"rb\") as f: # score 파일 읽기\r\n score_data = pickle.load(f)\r\n\r\n except FileNotFoundError: # 파일이 없으면\r\n with open(\"score.bin\", \"wb+\") as f: # 파일을 만들기\r\n score_data = dict()\r\n pickle.dump(score_data, f) # 저장\r\n\r\n for i in range(len(user_name)):\r\n if str(user_name[i].id) not in score_data:\r\n score_data[str(user_name[i].id)] = 0\r\n\r\n return score_data\r\n\r\n\r\n# rsp.bin 파일 불러오는 함수\r\ndef rspFileRead(*user_name: discord.Member):\r\n try:\r\n with open(\"rsp.bin\", \"rb\") as f: # score 파일 읽기\r\n rsp_data = pickle.load(f)\r\n\r\n except FileNotFoundError: # 파일이 없으면\r\n with open(\"rsp.bin\", \"wb+\") as f: # 파일을 만들기\r\n rsp_data = dict()\r\n pickle.dump(rsp_data, f) # 저장\r\n\r\n for i in range(len(user_name)):\r\n if (str(user_name[i].id), \"win\") not in rsp_data:\r\n rsp_data[str(user_name[i].id), \"win\"] = 0\r\n rsp_data[str(user_name[i].id), \"lose\"] = 0\r\n rsp_data[str(user_name[i].id), \"draw\"] = 0\r\n\r\n return rsp_data\r\n\r\n\r\n# fortune.bin 파일 불러오는 함수\r\ndef fortuneFileRead(*user_name: discord.Member):\r\n try:\r\n with open(\"fortune.bin\", \"rb\") as f: # score 파일 읽기\r\n fortune_data = pickle.load(f)\r\n\r\n except FileNotFoundError: # 파일이 없으면\r\n with open(\"fortune.bin\", \"wb+\") as f: # 파일을 만들기\r\n fortune_data = dict()\r\n pickle.dump(fortune_data, f) # 저장\r\n\r\n for i in range(len(user_name)):\r\n if (str(user_name[i].id), \"7\") not in fortune_data:\r\n fortune_data[str(user_name[i].id), \"7\"] = 0\r\n fortune_data[str(user_name[i].id), \"8\"] = 0\r\n fortune_data[str(user_name[i].id), \"9\"] = 0\r\n fortune_data[str(user_name[i].id), \"Clear\"] = 0\r\n\r\n return fortune_data\r\n\r\n\r\n# yacht.bin 파일 불러오는 함수\r\ndef yachtFileRead(*user_name: discord.Member):\r\n try:\r\n with open(\"yacht.bin\", \"rb\") as f: # score 파일 읽기\r\n yacht_data = pickle.load(f)\r\n\r\n except FileNotFoundError: # 파일이 없으면\r\n with open(\"yacht.bin\", \"wb+\") as f: # 파일을 만들기\r\n yacht_data = dict()\r\n pickle.dump(yacht_data, f) # 저장\r\n\r\n for i in range(len(user_name)):\r\n if (str(user_name[i].id), \"win\") not in yacht_data:\r\n yacht_data[str(user_name[i].id), \"win\"] = 0\r\n yacht_data[str(user_name[i].id), \"lose\"] = 0\r\n yacht_data[str(user_name[i].id), \"draw\"] = 0\r\n yacht_data[str(user_name[i].id), \"max\"] = 0\r\n\r\n return yacht_data\r\n\r\n\r\n@client.command(name=\"전적_검색\", pass_context=True)\r\n# No.100 점수 확인 명령\r\nasync def showName(ctx, user_name: discord.Member):\r\n score_data = scoreFileRead(user_name)\r\n rsp_data = rspFileRead(user_name)\r\n fortune_data = fortuneFileRead(user_name)\r\n yacht_data = yachtFileRead(user_name)\r\n\r\n # point\r\n embed = discord.Embed(title=\"[\" + str(user_name) + \"] Info\", description=\" \", color=0xffff00)\r\n embed.add_field(name=\"포인트\", value=str(score_data[str(user_name.id)]) + \" point\", inline=False)\r\n # 가위바위보\r\n if rsp_data[str(user_name.id), \"win\"] == rsp_data[str(user_name.id), \"lose\"] == rsp_data[\r\n str(user_name.id), \"draw\"] == 0:\r\n embed.add_field(name=\"가위바위보\", value=\"승률 : 0% [승무패 : 0 / 0 / 0]\", inline=False)\r\n else:\r\n embed.add_field(name=\"가위바위보\",\r\n value=\"승률 : \" + \"{0:.3f}\".format(100 * rsp_data[str(user_name.id), \"win\"] / (\r\n rsp_data[str(user_name.id), \"win\"] + rsp_data[str(user_name.id), \"lose\"]))\r\n + \"% [승무패 : \" + str(rsp_data[str(user_name.id), \"win\"]) + \" / \"\r\n + str(rsp_data[str(user_name.id), \"draw\"]) + \" / \"\r\n + str(rsp_data[str(user_name.id), \"lose\"]) + \"]\", inline=False)\r\n # 운빨망겜\r\n embed.add_field(name=\"운빨망겜\", value=\"stage 7 : \" + str(fortune_data[str(user_name.id), \"7\"])\r\n + \"회 / stage 8 : \" + str(fortune_data[str(user_name.id), \"8\"])\r\n + \"회\\nstage 9 : \" + str(fortune_data[str(user_name.id), \"9\"])\r\n + \"회 / All Stage Clear : \" + str(\r\n fortune_data[str(user_name.id), \"Clear\"]) + \"회\", inline=False)\r\n # 야추\r\n if yacht_data[str(user_name.id), \"win\"] == yacht_data[str(user_name.id), \"lose\"] == yacht_data[\r\n str(user_name.id), \"draw\"] == 0:\r\n embed.add_field(name=\"야추\",\r\n value=\"승률 : 0% [승무패 : 0 / 0 / 0]\\n최고 점수 : \" + str(yacht_data[str(user_name.id), \"max\"]),\r\n inline=False)\r\n else:\r\n embed.add_field(name=\"야추\",\r\n value=\"승률 : \" + \"{0:.3f}\".format(100 * yacht_data[str(user_name.id), \"win\"] / (\r\n yacht_data[str(user_name.id), \"win\"] + yacht_data[str(user_name.id), \"lose\"]))\r\n + \"% [승무패 : \" + str(yacht_data[str(user_name.id), \"win\"]) + \"/\"\r\n + str(yacht_data[str(user_name.id), \"draw\"]) + \"/\"\r\n + str(yacht_data[str(user_name.id), \"lose\"])\r\n + \"]\\n최고 점수 : \" + str(yacht_data[str(user_name.id), \"max\"]), inline=False)\r\n await ctx.send(embed=embed)\r\n\r\n with open(\"score.bin\", \"wb\") as f:\r\n pickle.dump(score_data, f) # 저장하기\r\n with open(\"rsp.bin\", \"wb\") as f:\r\n pickle.dump(rsp_data, f)\r\n with open(\"fortune.bin\", \"wb\") as f:\r\n pickle.dump(fortune_data, f)\r\n with open(\"yacht.bin\", \"wb\") as f:\r\n pickle.dump(yacht_data, f)\r\n\r\n\r\n@client.command(name=\"가위바위보\", pass_context=True)\r\n# No.101 가위바위보 게임 명령\r\nasync def rsp(ctx):\r\n def rsp_text(num):\r\n if num == 0:\r\n return \"가위\"\r\n elif num == 1:\r\n return \"바위\"\r\n elif num == 2:\r\n return \"보\"\r\n\r\n score_data = scoreFileRead(ctx.author)\r\n rsp_data = rspFileRead(ctx.author)\r\n\r\n embed = discord.Embed(title=\"가위바위보 [Player : \" + str(ctx.author) + \"]\",\r\n description=\":가위, :바위, :보 중 하나를 입력해 주세요.\", color=0xaaaaaa)\r\n await ctx.send(embed=embed)\r\n\r\n while True:\r\n def check(m):\r\n return m.author == ctx.author and m.channel == ctx.channel # 입력한 사람이 본인인지 확인\r\n\r\n msg = await client.wait_for(\"message\", check=check)\r\n if msg.content == \":가위\":\r\n user_choose = 0\r\n break\r\n elif msg.content == \":바위\":\r\n user_choose = 1\r\n break\r\n elif msg.content == \":보\":\r\n user_choose = 2\r\n break\r\n else:\r\n await ctx.send(\"잘못된 입력입니다. 다시 입력해주세요.\")\r\n\r\n AI_choose = random.randint(0, 2)\r\n\r\n if user_choose == AI_choose: # 비긴 경우\r\n rsp_data[str(ctx.author.id), \"draw\"] += 1\r\n embed = discord.Embed(title=\"Player : \" + rsp_text(user_choose) + \" vs. \" + rsp_text(AI_choose) + \" : Bot\",\r\n description=\"비겼습니다. 0 point 획득 [현재 point : \" + str(score_data[str(ctx.author.id)]) + \"]\",\r\n color=0xaaaaaa)\r\n\r\n elif user_choose - AI_choose == 1 or user_choose - AI_choose == -2: # 이긴 경우\r\n score_data[str(ctx.author.id)] += 5\r\n rsp_data[str(ctx.author.id), \"win\"] += 1\r\n embed = discord.Embed(title=\"Player : \" + rsp_text(user_choose) + \" vs. \" + rsp_text(AI_choose) + \" : Bot\",\r\n description=\"이겼습니다! 5 point 획득 [현재 point : \" + str(score_data[str(ctx.author.id)]) + \"]\",\r\n color=0xaaaaaa)\r\n\r\n elif user_choose - AI_choose == -1 or user_choose - AI_choose == 2: # 진 경우\r\n score_data[str(ctx.author.id)] -= 3\r\n if score_data[str(ctx.author.id)] < 0:\r\n score_data[str(ctx.author.id)] = 0\r\n rsp_data[str(ctx.author.id), \"lose\"] += 1\r\n embed = discord.Embed(title=\"Player : \" + rsp_text(user_choose) + \" vs. \" + rsp_text(AI_choose) + \" : Bot\",\r\n description=\"졌습니다. -3 point 획득 [현재 point : \" + str(score_data[str(ctx.author.id)]) + \"]\",\r\n color=0xaaaaaa)\r\n\r\n await ctx.send(embed=embed)\r\n\r\n with open(\"score.bin\", \"wb\") as f:\r\n pickle.dump(score_data, f) # 저장하기\r\n with open(\"rsp.bin\", \"wb\") as f:\r\n pickle.dump(rsp_data, f)\r\n\r\n\r\n@client.command(name=\"운빨망겜\", pass_context=True)\r\n# No.102 운빨망겜 명령\r\nasync def fortune(ctx):\r\n score_data = scoreFileRead(ctx.author)\r\n fortune_data = fortuneFileRead(ctx.author)\r\n\r\n embed = discord.Embed(title=\"운빨테스트 [Player : \" + str(ctx.author) + \"]\",\r\n description=\"게임 설명 : 1~10 중 아무 숫자나 하나를 입력하면 됩니다.\", color=0x62d4a8)\r\n await ctx.send(embed=embed)\r\n\r\n global fortune_point\r\n stage = 1\r\n percent = 90\r\n get_point = fortune_point[0]\r\n num = [0 for p in range(10)]\r\n while True:\r\n embed = discord.Embed(title=\"운빨테스트 [Stage \" + str(stage) + \" / 확률 : \" + str(percent) + \".0%]\",\r\n description=\" \", color=0x62d4a8)\r\n await ctx.send(embed=embed)\r\n\r\n erase_two = 0\r\n while True: # 숫자 입력 받기\r\n def check(m):\r\n return m.author == ctx.author and m.channel == ctx.channel # 입력한 사람이 본인인지 확인\r\n\r\n try:\r\n msg = await client.wait_for(\"message\", check=check)\r\n isRange = (1 <= int(msg.content) <= 10)\r\n\r\n except ValueError:\r\n erase_two += 1\r\n await ctx.channel.purge(limit=1)\r\n await ctx.send(\"잘못된 입력입니다. 1~10 사이의 정수를 입력해주세요.\")\r\n\r\n else:\r\n if isRange:\r\n break\r\n else:\r\n erase_two += 1\r\n await ctx.channel.purge(limit=1)\r\n await ctx.send(\"잘못된 입력입니다. 1~10 사이의 정수를 입력해주세요.\")\r\n\r\n i = 0 # 랜덤 뽑기\r\n for j in range(10): # 값 초기화\r\n num[j] = False\r\n while i < (percent / 10): # 랜덤 뽑기\r\n a = random.randint(0, 9)\r\n if not num[a]:\r\n i += 1\r\n num[a] = True\r\n\r\n if stage == 1:\r\n await ctx.channel.purge(limit=(2 + erase_two))\r\n else:\r\n await ctx.channel.purge(limit=(3 + erase_two))\r\n if num[int(msg.content) - 1]: # 당첨인 경우\r\n if stage == 9: # 마지막 스테이지까지 클리어 한 경우\r\n score_data[str(ctx.author.id)] += 25\r\n fortune_data[str(ctx.author.id), \"Clear\"] += 1\r\n embed = discord.Embed(title=\"마지막 9 stage 까지 클리어 했습니다! [Latest Stage : 어캐헀누!!]\",\r\n description=\"27 point 획득 [현재 point : \" + str(\r\n score_data[str(ctx.author.id)]) + \"]\", color=0x62d4a8)\r\n await ctx.send(embed=embed)\r\n\r\n with open(\"score.bin\", \"wb\") as f:\r\n pickle.dump(score_data, f) # 저장하기\r\n break\r\n else:\r\n stage += 1\r\n percent -= 10\r\n get_point = fortune_point[stage - 1]\r\n embed = discord.Embed(title=str(msg.content) + \"은(는) 꽝이 아니었습니다!\", description=\" \", color=0x62d4a8)\r\n await ctx.send(embed=embed)\r\n else:\r\n score_data[str(ctx.author.id)] += get_point\r\n if score_data[str(ctx.author.id)] < 0:\r\n score_data[str(ctx.author.id)] = 0\r\n if stage == 7:\r\n fortune_data[str(ctx.author.id), \"7\"] += 1\r\n if stage == 8:\r\n fortune_data[str(ctx.author.id), \"8\"] += 1\r\n if stage == 9:\r\n fortune_data[str(ctx.author.id), \"9\"] += 1\r\n\r\n p = 0\r\n clear_num: list = [0 for p in range(10 - stage)]\r\n for i in range(10):\r\n if num[i]:\r\n clear_num[p] = i + 1\r\n p += 1\r\n\r\n embed = discord.Embed(title=str(msg.content) + \"은(는) 꽝입니다. [Latest Stage : \" + str(stage) + \"]\",\r\n description=str(get_point) + \" point 획득 [현재 point : \" + str(\r\n score_data[str(ctx.author.id)]) + \"]\\n\\n클리어 숫자 :\", color=0x62d4a8)\r\n for i in range(10 - stage):\r\n embed.add_field(name=\"번호\", value=str(clear_num[i]), inline=True)\r\n await ctx.send(embed=embed)\r\n\r\n with open(\"score.bin\", \"wb\") as f:\r\n pickle.dump(score_data, f) # 저장하기\r\n with open(\"fortune.bin\", \"wb\") as f:\r\n pickle.dump(fortune_data, f)\r\n break\r\n\r\n\r\n@client.command(name=\"야추\", pass_context=True)\r\n# No.103 Yacht 명령\r\nasync def yacht(ctx, opponent: discord.Member):\r\n score = [[0] * 2 for p in range(14)]\r\n cate = [[True] * 2 for p in range(14)]\r\n surren = [False, False]\r\n\r\n def check_ctx(m): # player 입력 받는 조건 함수\r\n return m.author == ctx.author\r\n\r\n def check_opponent(m): # opponent 입력 받는 조건 함수\r\n return m.author == opponent\r\n\r\n score_data = scoreFileRead(ctx.author, opponent)\r\n yacht_data = yachtFileRead(ctx.author, opponent)\r\n\r\n time_out = False\r\n start = False\r\n go = False\r\n\r\n if ctx.author == opponent:\r\n embed = discord.Embed(title=\"Yacht Dice [Single Play]\",\r\n description=\"Player A : [\" + str(ctx.author) + \"] vs. [\" + str(opponent) + \"] : Player B\",\r\n color=0x62d4a8)\r\n embed.add_field(name=\"[주의 사항]\", value=\"1) 싱글 플레이에서는 승무패와 포인트가 저장되지 않습니다.\\n\"\r\n + \"2) 텍스트가 나오자마자 채팅을 입력하면 오작동할 가능성이 있습니다.\")\r\n await ctx.send(embed=embed)\r\n go = True\r\n await asyncio.sleep(2.0)\r\n else:\r\n # 나중에 카테고리가 만두인 경우로 변경\r\n if opponent.id == 763786586684391498 or opponent.id == 762352756652244996 or opponent.id == 762303766145269760:\r\n embed = discord.Embed(title=\"Yacht Dice [warning]\",\r\n description=\"Bot과는 게임을 돌릴 수 없습니다.\",\r\n color=0x62d4a8)\r\n await ctx.send(embed=embed)\r\n else: \r\n embed = discord.Embed(title=\"Yacht Dice [2 Player]\",\r\n description=\"Player A : [\" + str(ctx.author) + \"] vs. [\" + str(\r\n opponent) + \"] : Player B\\n\"\r\n + str(opponent) + \"님은 게임울 수락하려면 \\\"수락\\\"을, 아니면 \\\"거절\\\"을 입력하세요.\", color=0x62d4a8)\r\n embed.add_field(name=\"[주의 사항]\", value=\"1) 원활한 게임 진행을 위해 두 플레이어는 같은 체널을 사용해주세요.\\n\"\r\n + \"2) 텍스트가 나오자마자 채팅을 입력하면 오작동할 가능성이 있습니다.\")\r\n await ctx.send(embed=embed)\r\n\r\n while True: # 상대방 입장 받기\r\n try:\r\n msg = await client.wait_for(\"message\", check=check_opponent, timeout=10.0)\r\n except asyncio.TimeoutError:\r\n time_out = True\r\n break\r\n else:\r\n if msg.content == \"수락\":\r\n start = True\r\n break\r\n elif msg.content == \"거절\":\r\n start = False\r\n break\r\n else:\r\n await ctx.send(\"잘못된 입력입니다. 다시 입력해주세요.\")\r\n\r\n if time_out: # 시간이 초과된 경우\r\n embed = discord.Embed(title=\"Yacht Dice [2 Player]\",\r\n description=\"Player A : \" + str(ctx.author) + \" vs. \" + str(\r\n opponent) + \" : Player B\\n\"\r\n + \"입력 시간이 초과되었습니다.\", color=0x62d4a8)\r\n await ctx.send(embed=embed)\r\n \r\n elif not start: # 게임이 거절된 경우\r\n embed = discord.Embed(title=\"Yacht Dice [2 Player]\",\r\n description=\"Player A : \" + str(ctx.author) + \" vs. \" + str(\r\n opponent) + \" : Player B\\n\"\r\n + str(opponent) + \"에 의해 게임이 거절되었습니다.\", color=0x62d4a8)\r\n await ctx.send(embed=embed)\r\n \r\n else: # 게임을 수락한 경우\r\n go = True\r\n \r\n if go: # 게임이 수락된 경우\r\n for turn in range(24):\r\n fixed = [False, False, False, False, False]\r\n dice = [0 for p in range(5)]\r\n erase_two = 0\r\n\r\n if turn % 2 == 0:\r\n embed = discord.Embed(title=\"Yacht Dice [Player A's Turn (\" + str(int(turn / 2) + 1) + \" / 12)]\",\r\n description=\"Player A : [\" + str(ctx.author) + \"] vs. [\" + str(\r\n opponent) + \"] : Player B\", color=0xaa0000)\r\n else:\r\n embed = discord.Embed(title=\"Yacht Dice [Player B's Turn (\" + str(int(turn / 2) + 1) + \" / 12)]\",\r\n description=\"Player A : [\" + str(ctx.author) + \"] vs. [\" + str(\r\n opponent) + \"] : Player B\", color=0x0000aa)\r\n for i in range(len(category)):\r\n if i == 6:\r\n if cate[i][turn % 2]:\r\n embed.add_field(name=str(category[i]),\r\n value=str(score[i][0]) + \" / \" + str(\r\n score[i][1]) + \" [Aces ~ Sixes가 63점 이상이면 보너스 35점]\", inline=False)\r\n elif i == 13:\r\n embed.add_field(name=str(category[i]),\r\n value=str(score[i][0]) + \" / \" + str(score[i][1]), inline=True)\r\n else:\r\n if cate[i][turn % 2]:\r\n embed.add_field(name=\"[\" + str(i + 1) + \".] \" + str(category[i]),\r\n value=str(score[i][0]) + \" / \" + str(score[i][1]), inline=True)\r\n else:\r\n embed.add_field(name=str(i + 1) + \". \" + str(category[i]),\r\n value=str(score[i][0]) + \" / \" + str(score[i][1]), inline=True)\r\n embed.add_field(name=\"----------------------------------------------------------\",\r\n value=\"\\\"roll\\\"을 입력해서 주사위를 던지세요.\\n게임을 그만두려면 \\\"항복\\\"을 입력하세요.\", inline=False)\r\n await ctx.send(embed=embed)\r\n # 주사위 굴리기\r\n for p in range(3):\r\n def text(fix):\r\n if fix:\r\n return \"x \"\r\n else:\r\n return \"o \"\r\n\r\n if p != 0:\r\n if turn % 2 == 0:\r\n embed = discord.Embed(title=\"현재 주사위 [Player A]\", color=0xaa0000)\r\n else:\r\n embed = discord.Embed(title=\"현재 주사위 [Player B]\", color=0x0000aa)\r\n embed.add_field(name=\"----------------------------------------------------------\",\r\n value=str(dice[0]) + \" \" + str(dice[1]) + \" \" + str(dice[2]) + \" \" + str(\r\n dice[3]) + \" \" + str(dice[4])\r\n + \"\\n\" + text(fixed[0]) + \" \" + text(fixed[1]) + \" \" + text(\r\n fixed[2]) + \" \" + text(fixed[3]) + \" \" + text(fixed[4]), inline=False)\r\n await ctx.send(embed=embed)\r\n\r\n erase_two = 0\r\n while True: # Player 입력받기 [roll]\r\n if turn % 2 == 0:\r\n msg = await client.wait_for(\"message\", check=check_ctx)\r\n else:\r\n msg = await client.wait_for(\"message\", check=check_opponent)\r\n if msg.content == \"roll\":\r\n break\r\n elif msg.content == \"항복\":\r\n if turn % 2 == 0: # Player A가 항복한 경우\r\n embed = discord.Embed(title=\"[Player A]\", description=\"정말로 항복하시겠습니까?\\n [yes] / [no]\",\r\n color=0xaa0000)\r\n else:\r\n embed = discord.Embed(title=\"[Player B]\", description=\"정말로 항복하시겠습니까?\\n [yes] / [no]\",\r\n color=0x0000aa)\r\n await ctx.send(embed=embed)\r\n\r\n while True: # Player 입력받기\r\n if turn % 2 == 0:\r\n msg = await client.wait_for(\"message\", check=check_ctx)\r\n else:\r\n msg = await client.wait_for(\"message\", check=check_opponent)\r\n\r\n if msg.content == \"yes\":\r\n surren[turn % 2] = True\r\n await ctx.channel.purge(limit=(4 + erase_two))\r\n break\r\n elif msg.content == \"no\":\r\n await ctx.channel.purge(limit=(3 + erase_two))\r\n erase_two = 0\r\n break\r\n else:\r\n await ctx.channel.purge(limit=1)\r\n erase_two += 1\r\n await ctx.send(\"잘못된 입력입니다. 다시 입력해주세요.\")\r\n if msg.content == \"yes\":\r\n break\r\n else:\r\n await ctx.channel.purge(limit=1)\r\n erase_two += 1\r\n await ctx.send(\"잘못된 입력입니���. 다시 입력해주세요.\")\r\n\r\n if True in surren:\r\n break\r\n\r\n for i in range(5): # 주사위 굴리기\r\n if not fixed[i]:\r\n dice[i] = random.randint(1, 6)\r\n\r\n if p == 2: # 마지막 차례는 고정 선택이 아니라 바로 점수 선택으로 건너뛰기\r\n break\r\n\r\n if p == 0:\r\n await ctx.channel.purge(limit=(1 + erase_two))\r\n else:\r\n await ctx.channel.purge(limit=(2 + erase_two))\r\n if turn % 2 == 0:\r\n embed = discord.Embed(title=\"현재 주사위 [Player A]\", color=0xaa0000)\r\n else:\r\n embed = discord.Embed(title=\"현재 주사위 [Player B]\", color=0x0000aa)\r\n embed.add_field(name=\"----------------------------------------------------------\",\r\n value=str(dice[0]) + \" \" + str(dice[1]) + \" \" + str(dice[2]) + \" \" + str(\r\n dice[3]) + \" \" + str(dice[4])\r\n + \"\\n\" + text(fixed[0]) + \" \" + text(fixed[1]) + \" \" + text(\r\n fixed[2]) + \" \" + text(fixed[3]) + \" \" + text(fixed[4])\r\n + \"\\n[다시 굴릴 주사위는 o, 고정할 주사위는 x로 입력해주세요. ex) x x o o x]\", inline=False)\r\n await ctx.send(embed=embed)\r\n\r\n # 고정할 주사위 선택하는 부분\r\n erase_two = 0\r\n while True:\r\n error = False\r\n if turn % 2 == 0:\r\n msg = await client.wait_for(\"message\", check=check_ctx)\r\n else:\r\n msg = await client.wait_for(\"message\", check=check_opponent)\r\n param = msg.content.split()\r\n if len(param) == 5:\r\n for i in range(5):\r\n if param[i] == \"x\":\r\n fixed[i] = True\r\n elif param[i] == \"o\":\r\n fixed[i] = False\r\n else:\r\n error = True\r\n else:\r\n error = True\r\n\r\n if not error:\r\n break\r\n else:\r\n if msg.content == \"항복\":\r\n if turn % 2 == 0: # Player A가 항복한 경우\r\n embed = discord.Embed(title=\"[Player A]\", description=\"정말로 항복하시겠습니까?\\n [yes] / [no]\",\r\n color=0xaa0000)\r\n else:\r\n embed = discord.Embed(title=\"[Player B]\", description=\"정말로 항복하시겠습니까?\\n [yes] / [no]\",\r\n color=0x0000aa)\r\n await ctx.send(embed=embed)\r\n\r\n while True: # Player 입력받기\r\n if turn % 2 == 0:\r\n msg = await client.wait_for(\"message\", check=check_ctx)\r\n else:\r\n msg = await client.wait_for(\"message\", check=check_opponent)\r\n\r\n if msg.content == \"yes\":\r\n surren[turn % 2] = True\r\n await ctx.channel.purge(limit=(4 + erase_two))\r\n break\r\n elif msg.content == \"no\":\r\n await ctx.channel.purge(limit=(3 + erase_two))\r\n erase_two = 0\r\n break\r\n else:\r\n await ctx.channel.purge(limit=1)\r\n erase_two += 1\r\n await ctx.send(\"잘못된 입력입니다. 다시 입력해주세요.\")\r\n if msg.content == \"yes\":\r\n break\r\n else:\r\n erase_two += 1\r\n await ctx.channel.purge(limit=1)\r\n await ctx.send(\"잘못된 입력입니다. 다시 입력해주세요.\")\r\n\r\n if True in surren:\r\n break\r\n # 고정이 다 O 인 경우 반복 종료하고 점수 배정으로 넘어가기\r\n skip = 0\r\n for i in range(5):\r\n if fixed[i]:\r\n skip += 1\r\n if skip == 5:\r\n break\r\n\r\n await ctx.channel.purge(limit=(2 + erase_two))\r\n # 점수 배정 하기\r\n if True in surren:\r\n break\r\n\r\n await ctx.channel.purge(limit=(2 + erase_two))\r\n if turn % 2 == 0:\r\n embed = discord.Embed(title=\"주사위 현황 [Player A]\", color=0xaa0000)\r\n else:\r\n embed = discord.Embed(title=\"주사위 현황 [Player B]\", color=0x0000aa)\r\n embed.add_field(name=\"----------------------------------------------------------\",\r\n value=str(dice[0]) + \" \" + str(dice[1]) + \" \" + str(dice[2]) + \" \" + str(\r\n dice[3]) + \" \" + str(dice[4])\r\n + \"\\n[점수를 배정할 카테고리의 번호를 입력하세요.]\", inline=False)\r\n await ctx.send(embed=embed)\r\n\r\n erase_two = 0\r\n while True: # Player 입력받기 [카테고리]\r\n error = False\r\n choice = 0\r\n try:\r\n if turn % 2 == 0:\r\n msg = await client.wait_for(\"message\", check=check_ctx)\r\n else:\r\n msg = await client.wait_for(\"message\", check=check_opponent)\r\n\r\n if msg.content == \"항복\":\r\n if turn % 2 == 0: # Player A가 항복한 경우\r\n embed = discord.Embed(title=\"[Player A]\", description=\"정말로 항복하시겠습니까?\\n [yes] / [no]\",\r\n color=0xaa0000)\r\n else:\r\n embed = discord.Embed(title=\"[Player B]\", description=\"정말로 항복하시겠습니까?\\n [yes] / [no]\",\r\n color=0x0000aa)\r\n await ctx.send(embed=embed)\r\n\r\n while True: # Player 입력받기\r\n if turn % 2 == 0:\r\n msg = await client.wait_for(\"message\", check=check_ctx)\r\n else:\r\n msg = await client.wait_for(\"message\", check=check_opponent)\r\n\r\n if msg.content == \"yes\":\r\n surren[turn % 2] = True\r\n await ctx.channel.purge(limit=(4 + erase_two))\r\n break\r\n elif msg.content == \"no\":\r\n await ctx.channel.purge(limit=(3 + erase_two))\r\n erase_two = 0\r\n break\r\n else:\r\n await ctx.channel.purge(limit=1)\r\n erase_two += 1\r\n await ctx.send(\"잘못된 입력입니다. 다시 입력해주세요.\")\r\n if msg.content == \"yes\":\r\n break\r\n\r\n except ValueError:\r\n erase_two += 1\r\n await ctx.channel.purge(limit=1)\r\n await ctx.send(\"잘못된 입력입니다. 카테고리의 번호를 입력해주세요.\")\r\n\r\n else:\r\n for i in range(14):\r\n if msg.content == str(i + 1):\r\n if cate[i][turn % 2]:\r\n choice = i\r\n cate[i][turn % 2] = False\r\n break\r\n else:\r\n choice = 0\r\n await ctx.channel.purge(limit=1)\r\n erase_two += 1\r\n await ctx.send(\"이미 할당된 카테고리입니다. 다른 카테고리를 입력해주세요.\")\r\n error = True\r\n break\r\n\r\n # count 정의\r\n count = [0 for p in range(6)]\r\n for i in range(5):\r\n count[dice[i] - 1] += 1\r\n\r\n # subtotal / total\r\n if choice == 6 or choice == 13:\r\n await ctx.channel.purge(limit=1)\r\n erase_two += 1\r\n await ctx.send(\"subtotal이나 Total은 선택할 수 없습니다.\")\r\n error = True\r\n\r\n # Aces ~ Sixes\r\n elif 0 <= choice <= 5:\r\n for i in range(6):\r\n if choice == i:\r\n for j in range(5):\r\n if dice[j] == (i + 1):\r\n score[i][turn % 2] += (i + 1)\r\n cate[i][turn % 2] = False\r\n break\r\n\r\n # Choice\r\n elif choice == 7:\r\n for i in range(5):\r\n score[7][turn % 2] += dice[i]\r\n cate[7][turn % 2] = False\r\n\r\n # 4 of a Kind\r\n elif choice == 8:\r\n for i in range(6):\r\n if count[i] >= 4:\r\n for j in range(5):\r\n score[8][turn % 2] += dice[j]\r\n cate[8][turn % 2] = False\r\n break\r\n # Full House\r\n elif choice == 9:\r\n for i in range(36):\r\n if count[int(i / 6)] == 3 and count[i % 6] == 2:\r\n for j in range(5):\r\n score[9][turn % 2] += dice[j]\r\n cate[9][turn % 2] = False\r\n break\r\n # S.Straight\r\n elif choice == 10:\r\n if count[2] >= 1 and count[3] >= 1:\r\n if (count[0] >= 1 and count[1] >= 1) or (count[1] >= 1 and count[4] >= 1) or (\r\n count[4] >= 1 and count[5] >= 1):\r\n score[10][turn % 2] = 15\r\n cate[10][turn % 2] = False\r\n # L.Straight\r\n elif choice == 11:\r\n if count[1] == 1 and count[2] == 1 and count[3] == 1 and count[4] == 1 and (\r\n count[0] == 1 or count[5] == 1):\r\n score[11][turn % 2] = 30\r\n cate[11][turn % 2] = False\r\n # Yacht\r\n elif choice == 12:\r\n for i in range(6):\r\n if count[i] == 5:\r\n score[12][turn % 2] = 50\r\n cate[12][turn % 2] = False\r\n break\r\n else:\r\n await ctx.channel.purge(limit=1)\r\n erase_two += 1\r\n await ctx.send(\"해당 번호는 존재하지 않습니다. 점수판에 옆의 번호로 입력해주세요.\")\r\n error = True\r\n\r\n if not error:\r\n await ctx.channel.purge(limit=(3 + erase_two))\r\n break\r\n\r\n if True in surren:\r\n break\r\n\r\n # subtotal 계산\r\n score[6][turn % 2] = 0\r\n for j in range(6):\r\n score[6][turn % 2] += score[j][turn % 2]\r\n\r\n # bonus 점수\r\n if score[6][turn % 2] >= 63:\r\n bonus = 35\r\n else:\r\n bonus = 0\r\n\r\n # total 계산\r\n score[13][turn % 2] = 0\r\n for j in range(6, 13):\r\n score[13][turn % 2] += score[j][turn % 2]\r\n score[13][turn % 2] += bonus\r\n\r\n # 마무리 텍스트 출력\r\n embed = discord.Embed(title=\"Yacht Dice\",\r\n description=\"Player A : [\" + str(ctx.author) + \"] vs. [\" + str(opponent)\r\n + \"] : Player B\", color=0xaaaaaa)\r\n for i in range(len(category)):\r\n if i == 6:\r\n embed.add_field(name=\"[\" + str(category[i]) + \"]\", value=str(score[i][0]) + \" / \"\r\n + str(\r\n score[i][1]) + \" [Aces ~ Sixes가 63점 이상이면 보너스 35점]\", inline=False)\r\n else:\r\n embed.add_field(name=\"[\" + str(category[i]) + \"]\", value=str(score[i][0]) + \" / \"\r\n + str(score[i][1]), inline=True)\r\n await ctx.send(embed=embed)\r\n\r\n # 점수 계산 함수\r\n def scorefunc(player, outcome):\r\n get_point = 0\r\n if 150 <= score[13][player] < 200:\r\n get_point = 2\r\n elif 200 <= score[13][player] < 250:\r\n get_point = 4\r\n elif 250 <= score[13][player] < 275:\r\n get_point = 7\r\n elif 275 <= score[13][player] < 300:\r\n get_point = 10\r\n elif 300 <= score[13][player] <= 325:\r\n get_point = 15\r\n\r\n if outcome == \"win\":\r\n get_point += 15\r\n elif outcome == \"lose\":\r\n get_point -= 15\r\n\r\n return get_point\r\n\r\n # 점수 계산\r\n if ctx.author != opponent:\r\n if score[13][0] > score[13][1] or surren[1]: # A가 이긴 경우\r\n score_data[str(ctx.author.id)] += scorefunc(0, \"win\")\r\n yacht_data[str(ctx.author.id), \"win\"] += 1\r\n score_data[str(opponent.id)] += scorefunc(1, \"lose\")\r\n yacht_data[str(opponent.id), \"lose\"] += 1\r\n\r\n embed = discord.Embed(title=\"Yacht Dice [\" + str(ctx.author) + \" 승리!!]\", color=0xaa0000)\r\n embed.add_field(name=\"Player A : \" + str(score[13][0]) + \" vs. \" + str(score[13][1]) + \" : Player B\"\r\n , value=\"Player A : \" + str(scorefunc(0, \"win\")) + \" point 획득\\n\"\r\n + \"[현재 point : \" + str(score_data[str(ctx.author.id)]) + \"]\\n\"\r\n + \"Player B : \" + str(scorefunc(1, \"lose\")) + \" point 획득\"\r\n + \"[현재 point : \" + str(score_data[str(opponent.id)]) + \"]\", inline=False)\r\n\r\n elif score[13][0] < score[13][1] or surren[0]: # B가 이긴 경우\r\n score_data[str(ctx.author.id)] += scorefunc(0, \"lose\")\r\n yacht_data[str(ctx.author.id), \"lose\"] += 1\r\n score_data[str(opponent.id)] += scorefunc(1, \"win\")\r\n yacht_data[str(opponent.id), \"win\"] += 1\r\n\r\n embed = discord.Embed(title=\"Yacht Dice [\" + str(opponent) + \" 승리!!]\", color=0x0000aa)\r\n embed.add_field(name=\"Player A : \" + str(score[13][0]) + \" vs. \" + str(score[13][1]) + \" : Player B\"\r\n , value=\"Player A : \" + str(scorefunc(0, \"lose\")) + \" point 획득\"\r\n + \"[현재 point : \" + str(score_data[str(ctx.author.id)]) + \"]\\n\"\r\n + \"Player B : \" + str(scorefunc(1, \"win\")) + \" point 획득\"\r\n + \"[현재 point : \" + str(score_data[str(opponent.id)]) + \"]\", inline=False)\r\n\r\n elif score[13][0] == score[13][1]: # 무승부\r\n score_data[str(ctx.author.id)] += scorefunc(0, \"draw\")\r\n yacht_data[str(ctx.author.id), \"draw\"] += 1\r\n score_data[str(opponent.id)] += scorefunc(1, \"draw\")\r\n yacht_data[str(opponent.id), \"draw\"] += 1\r\n\r\n embed = discord.Embed(title=\"Yacht Dice [무승부입니다. (어캐했누)]\", color=0xaaaaaa)\r\n embed.add_field(name=\"Player A : \" + str(score[13][0]) + \" vs. \" + str(score[13][1]) + \" : Player B\"\r\n , value=\"Player A : \" + str(scorefunc(0, \"draw\")) + \" point 획득\"\r\n + \" [현재 point : \" + str(score_data[str(ctx.author.id)]) + \"]\\n\"\r\n + \"Player B : \" + str(scorefunc(1, \"draw\")) + \" point 획득\"\r\n + \" [현재 point : \" + str(score_data[str(opponent.id)]) + \"]\", inline=False)\r\n\r\n else:\r\n embed = discord.Embed(title=\"Yacht Dice [Single Play]\", color=0xaa0000)\r\n embed.add_field(name=\"Player A : \" + str(score[13][0]) + \" vs. \" + str(score[13][1]) + \" : Player B\"\r\n , value=\"[Single Play는 최고 기록만 저장됩니다.]\", inline=False)\r\n\r\n await ctx.send(embed=embed)\r\n\r\n if score[13][0] > yacht_data[str(ctx.author.id), \"max\"]:\r\n yacht_data[str(ctx.author.id), \"max\"] = score[13][0]\r\n if score[13][1] > yacht_data[str(opponent.id), \"max\"]:\r\n yacht_data[str(opponent.id), \"max\"] = score[13][1]\r\n\r\n with open(\"score.bin\", \"wb\") as f: # 저장하기\r\n pickle.dump(score_data, f)\r\n with open(\"yacht.bin\", \"wb\") as f:\r\n pickle.dump(yacht_data, f)\r\n\r\n\r\n@client.command(name=\"점수_조정\", pass_context=True)\r\n# No.104 점수 추가 명령\r\nasync def change(ctx, type, user_name: discord.Member, amount):\r\n if ctx.author.id == 540360394691313664: # 명령 입력한 사람이 llMiNEll인 경우\r\n check = False\r\n score_data = scoreFileRead(user_name)\r\n rsp_data = rspFileRead(user_name)\r\n fortune_data = fortuneFileRead(user_name)\r\n yacht_data = yachtFileRead(user_name)\r\n\r\n if type == \"점수\":\r\n score_data[str(user_name.id)] += int(amount)\r\n check = True\r\n\r\n elif type == \"가위바위보:win\":\r\n rsp_data[str(user_name.id), \"win\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"가위바위보:lose\":\r\n rsp_data[str(user_name.id), \"lose\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"가위바위보:draw\":\r\n rsp_data[str(user_name.id), \"draw\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"운빨망겜:7\":\r\n fortune_data[str(user_name.id), \"7\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"운빨망겜:8\":\r\n fortune_data[str(user_name.id), \"8\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"운빨망겜:9\":\r\n fortune_data[str(user_name.id), \"9\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"운빨망겜:Clear\":\r\n fortune_data[str(user_name.id), \"Clear\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"야추:win\":\r\n yacht_data[str(user_name.id), \"win\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"야추:lose\":\r\n yacht_data[str(user_name.id), \"lose\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"야추:draw\":\r\n yacht_data[str(user_name.id), \"draw\"] += int(amount)\r\n check = True\r\n\r\n elif type == \"야추:max\":\r\n yacht_data[str(user_name.id), \"max\"] += int(amount)\r\n check = True\r\n\r\n if check:\r\n embed = discord.Embed(title=\"[System] \\\"\" + type + \"\\\"\",\r\n description=str(user_name) + \"의 점수가 \" + amount + \"만큼 증가하였습니다.\", color=0xaaaaaa)\r\n else:\r\n embed = discord.Embed(title=\"[System] 점수 조정\",\r\n description=\"Error 104002 : 존재하지 않는 type입니다.\", color=0xaaaaaa)\r\n await ctx.send(embed=embed)\r\n\r\n with open(\"score.bin\", \"wb\") as f:\r\n pickle.dump(score_data, f) # 저장하기\r\n with open(\"rsp.bin\", \"wb\") as f:\r\n pickle.dump(rsp_data, f)\r\n with open(\"fortune.bin\", \"wb\") as f:\r\n pickle.dump(fortune_data, f)\r\n with open(\"yacht.bin\", \"wb\") as f:\r\n pickle.dump(yacht_data, f)\r\n\r\n else:\r\n embed = discord.Embed(title=\"[System] 점수 조정\", description=\"Error 104001 : 이 Bot의 소유자에게만 권한이 있습니다.\",\r\n color=0xaaaaaa)\r\n await ctx.send(embed=embed)\r\n\r\naccess_token = os.environ[\"BOT_TOKEN\"]\r\nclient.run(access_token)\r\n","sub_path":"minigame_Bot.py","file_name":"minigame_Bot.py","file_ext":"py","file_size_in_byte":45632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"21588861","text":"#******************************************************************************\n#\n#******************************************************************************\nimport argparse\nimport h5py\nimport numpy\nimport csv\nimport os\n\ndef clear_metadata_value(attributes, attribute_name):\n \"\"\" Clear the specified attribute value.\n\n :param attributes: The list of attributes containing the value to be cleared.\n :param attribute_name: The name of the attribute to be cleared.\n \"\"\"\n\n if attribute_name in attributes:\n print(\"Information: The value for\", attribute_name, \"has been ignored.\")\n del attributes[attribute_name]\n\n#******************************************************************************\ndef get_metadata_type(attribute_name):\n \"\"\" Retrieve the specified attribute's type.\n\n :param attribute_name: The name of the attribute to retrive the type for.\n :returns: The attribute's type, None if not found.\n \"\"\"\n\n typeMap = dict()\n \n \"\"\"\n Carrier Metadata\n \"\"\"\n #Integer types\n typeMap['horizDatumValue'] = numpy.int64\n typeMap['timeRecordInterval'] = numpy.int64\n typeMap['numberOfTimes'] = numpy.int64\n typeMap['numberOfStations'] = numpy.int64\n typeMap['verticalDatum'] = numpy.int64\n typeMap['numPointsLongitudinal'] = numpy.int64\n typeMap['numPointsLatitudinal'] = numpy.int64\n typeMap['minGridPointLongitudinal'] = numpy.int64\n typeMap['minGridPointLatitudinal'] = numpy.int64\n\n #Real types\n typeMap['surfaceCurrentDepth'] = numpy.float64\n typeMap['gridOriginLongitude'] = numpy.float64\n typeMap['gridOriginLatitude'] = numpy.float64\n typeMap['gridSpacingLongitudinal'] = numpy.float64\n typeMap['gridSpacingLatitudinal'] = numpy.float64\n typeMap['gridLandMaskValue'] = numpy.float64\n typeMap['uncertaintyOfSpeed'] = numpy.float64\n typeMap['uncertaintyOfDirection'] = numpy.float64\n typeMap['uncertaintyOfHorzPosition'] = numpy.float64\n typeMap['uncertaintyOfVertPosition'] = numpy.float64\n typeMap['uncertaintyOfTime'] = numpy.float64\n typeMap['minSurfCurrentSpeed'] = numpy.float64\n typeMap['maxSurfCurrentSpeed'] = numpy.float64\n\n #String types\n typeMap['productSpecification'] = numpy.bytes_\n typeMap['dateTimeOfIssue'] = numpy.bytes_\n typeMap['nameRegion'] = numpy.bytes_\n typeMap['nameSubregion'] = numpy.bytes_\n typeMap['horizDatumReference'] = numpy.bytes_\n typeMap['protectionScheme'] = numpy.bytes_\n typeMap['dateTimeOfFirstRecord'] = numpy.bytes_\n typeMap['dateTimeOfLastRecord'] = numpy.bytes_ \n typeMap['methodCurrentsProduct'] = numpy.bytes_\n\n #Enumeration types\n typeMap['dataProtection'] = numpy.int64\n typeMap['typeOfCurrentData'] = numpy.int64\n typeMap['dataCodingFormat'] = numpy.int64\n typeMap['depthTypeIndex'] = numpy.int64\n\n #Removed?\n typeMap['nationalOriginator'] = numpy.bytes_\n typeMap['producingAgency'] = numpy.bytes_ \n typeMap['updateApplicationDate'] = numpy.bytes_\n typeMap['fileName'] = numpy.bytes_\n typeMap['dataType'] = numpy.bytes_\n typeMap['methodOrSource'] = numpy.bytes_\n typeMap['editionNumber'] = numpy.int64\n typeMap['updateNumber'] = numpy.int64 \n typeMap['numberOfNodes'] = numpy.int64\n \n #Removed in 1.09\n #typeMap['westBoundLongitude'] = numpy.float64\n #typeMap['eastBoundLongitude'] = numpy.float64\n #typeMap['southBoundLatitude'] = numpy.float64\n #typeMap['northBoundLatitude'] = numpy.float64\n\n if attribute_name not in typeMap:\n return None\n \n return typeMap[attribute_name]\n \n#******************************************************************************\ndef add_metadata(attributes, metadata_file):\n \"\"\" Add metadata values to the S-111 attributes.\n\n :param attributes: The S-111 attributes to be populated.\n :param metadata_file: The ASCII CSV file to retrieve the metadata values from.\n \"\"\"\n\n with open(metadata_file) as csvfile:\n reader = csv.reader(csvfile)\n \n #Grab the header and data rows.\n header = next(reader)\n data = next(reader)\n \n colnum = 0\n \n #For each column in the data row...\n for col in data:\n attribute_name = header[colnum].strip()\n attribute_value = col.strip().encode()\n attribute_type = get_metadata_type(attribute_name)\n \n #If we don't know what this attribute is, just report it to the user.\n if attribute_type == None:\n print(\"Warning: Unknown metadata value\", attribute_name)\n #Else if this is a string type...\n elif attribute_type == numpy.bytes_:\n attributes.create(attribute_name, attribute_value)\n #Else use the type returned.\n else:\n attributes.create(attribute_name, attribute_value, dtype=attribute_type)\n \n colnum += 1\n\n\n #We have a few pieces of metadata that may have been specified... but we want to ignore\n #They are computed attributes.\n clear_metadata_value(attributes, 'dateTimeOfFirstRecord')\n clear_metadata_value(attributes, 'dateTimeOfLastRecord')\n clear_metadata_value(attributes, 'numberOfStations')\n clear_metadata_value(attributes, 'numberOfTimes')\n clear_metadata_value(attributes, 'dataCodingFormat')\n clear_metadata_value(attributes, 'timeRecordInterval')\n clear_metadata_value(attributes, 'minSurfCurrentSpeed')\n clear_metadata_value(attributes, 'maxSurfCurrentSpeed')\n\n #Removed in 1.09\n #clear_metadata_value(attributes, 'westBoundLongitude')\n #clear_metadata_value(attributes, 'eastBoundLongitude')\n #clear_metadata_value(attributes, 'southBoundLatitude')\n #clear_metadata_value(attributes, 'northBoundLatitude')\n \n #Since this is a new file, we don't have any stations yet.\n attributes.create('numberOfStations', 0, dtype=numpy.int64)\n attributes.create('numberOfTimes', 0, dtype=numpy.int64)\n\n#****************************************************************************** \ndef create_dataset(output_file, metadata_file):\n \"\"\" Create a new S-111 dataset.\n\n :param output_file: The name of the file to be created.\n :param metadata_file: The ASCII CSV file to retrieve the metadata values from.\n \"\"\"\n\n #Make sure the output file has the correct extension.\n filename, file_extension = os.path.splitext(output_file)\n output_file_with_extension = filename + \".h5\"\n\n #Create the new HDF5 file.\n with h5py.File(output_file_with_extension, \"w\") as hdf_file:\n \n #Add the metadata to the file.\n add_metadata(hdf_file.attrs, metadata_file)\n \n#****************************************************************************** \ndef create_command_line():\n \"\"\"Create and initialize the command line parser.\n \n :returns: The command line parser.\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Create S-111 File')\n\n parser.add_argument('-m', '--metadata-file', help='The text file containing the file metadata.', required=True)\n parser.add_argument(\"outputFile\", nargs=1)\n\n return parser\n\n#****************************************************************************** \ndef main():\n\n #Create the command line parser.\n parser = create_command_line()\n\n #Parse the command line.\n results = parser.parse_args()\n \n create_dataset(results.outputFile[0], results.metadata_file)\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/s111_create_file.py","file_name":"s111_create_file.py","file_ext":"py","file_size_in_byte":7514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"42686357","text":"import cv2,time\n\nface_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\") # create a CascadeClassofier object\n # path to the xml file which contains the face features\n\nvideo = cv2.VideoCapture(0) # method to create VideoCapture object. it will trigger the camera\n # '0' is to specify that use bulit-in camera\n # either give the path to the video file or use numbers.numbers specify that you will be using the erbcam to capture video\na = 1\n\nwhile True:\n\n a = a + 1\n check,frame = video.read() # frame -- it is a numpy array, it represents the first image that video captures\n # check -- it is bool data type, return true if python is able to read the VideoCapture object\n print(check)\n print(frame)\n\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)# convert each frame into a gray scale image\n \n faces = face_cascade.detectMultiScale(gray, 1.05,5) # method to search for the face rectangle co-ordinates\n # scaleFactor -- Decrease the shape value by 5%, until the face is found.smaller this value,the greater is the accuracy\n print(type(faces))\n print(faces)\n\n for x,y,w,h in faces:\n cv2.rectangle(gray,(x,y),(x+w,y+h),(0,255,0),3) # method to create the face rectangle \n\n\n cv2.imshow('Capturing',gray)\n\n key = cv2.waitKey(1) # this will generate a new frame after every 1 millseconds\n\n if key == ord('q'): # once you enter 'q' the window will be destroyed\n break\n\nprint(a)\n\nvideo.release() # this will release cmaera in some millisecond\n\ncv2.destroyAllWindows()\n\n","sub_path":"face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"609685493","text":"\"Short demo inspired by https://docs.fast.ai/callbacks.mixup.html\"\nfrom fastai.vision import *\nfrom manifold_mixup import *\n\n# gets the data\npath = untar_data(URLs.MNIST_SAMPLE)\ndata = ImageDataBunch.from_folder(path)\n\n# no mixup\nmodel = simple_cnn((3,16,16,2))\nlearn = Learner(data, model, metrics=[accuracy])\nlearn.fit(8)\nlearn.recorder.plot_losses()\n\n# input mixup\nmodel = simple_cnn((3,16,16,2))\nlearn = Learner(data, model, metrics=[accuracy]).mixup()\nlearn.fit(8)\nlearn.recorder.plot_losses()\n\n# manifold mixup\nmodel = simple_cnn((3,16,16,2))\nlearn = Learner(data, model, metrics=[accuracy]).manifold_mixup()\nlearn.fit(8)\nlearn.recorder.plot_losses()\n\n# output mixup\nmodel = simple_cnn((3,16,16,2))\nlearn = Learner(data, model, metrics=[accuracy]).output_mixup()\nlearn.fit(8)\nlearn.recorder.plot_losses()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"123516634","text":"#%%\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 14 13:21:10 2018\r\nUpdated on Nov 15\r\nLast Updated on Nov16\r\n\r\n@author: bcubrich\r\n\r\n\r\n\r\nSUMMARY\r\n--------------------------------\r\nThis code takes audit files for gaseous data and collects the audit and \r\nindicated measurements, then outputs a pipe delimited text file called\r\n 'QA_output.txt' that can be directly uploaded to AQS.\r\n\r\nINDEX\r\n-------------------------------\r\n1. Functions\r\n -functions to get filenames and directories \r\n\r\n2. Retrieve Data\r\n -Section of the code to get a list of startion parameter \r\n (State Code, County Code, Paramter Metho, Etc.) , and also to get the \r\n files that the user wishes to create an AQS import file for\r\n \r\n3. Create Output Dataframe\r\n -Take the data from the user input audit forms and convert it to a pandas\r\n data frame (df). This is done partly because of how I find and assign the level \r\n values, but also because a pandas dataframe can quickly and easily be written \r\n to a pipe ('|') delimited text file, which is use to to upload the data\r\n\r\n4. Write to file \r\n -Write the above df to a file\r\n\r\n5. Testing\r\n -Not used here. Was create to check if this script was working, but \r\n actually became useful for error checking data already in AQS \r\n\r\n6. Update AQS\r\n -If the above finds error in AQS then this will write and AQS update file\r\n that can fix some types of mistakes\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np #didn't even use numpy!!! HA!\r\n#import seaborn as sns\r\nfrom tkinter import Tk\r\nfrom tkinter.filedialog import askopenfilename\r\n#import matplotlib.pyplot as plt\r\nimport os\r\n#import xlrd\r\nimport wx\r\n\r\n'''---------------------------------------------------------------------------\r\n 1. Functions\r\n----------------------------------------------------------------------------'''\r\n\r\n#The following functions are just used to get filepaths\r\n#I usually just run it once to get the path, and then leave this \r\n#fucntion so that I can get othe rpaths if needed\r\ndef get_dat():\r\n root = Tk()\r\n root.withdraw()\r\n root.focus_force()\r\n root.attributes(\"-topmost\", True) #makes the dialog appear on top\r\n filename = askopenfilename() # Open single file\r\n \r\n return filename\r\n\r\n#Thanks Kristy Weber 11/15/18 for giving me these functions to specify directories\r\ndef audit_path():\r\n app = wx.App()\r\n \r\n frame = wx.Frame(None, -1, 'win.py')\r\n frame.SetSize(0,0,200,50)\r\n \r\n # Create open file dialog\r\n openFileDialog = wx.DirDialog(frame, \"Choose directory with audit data\", \"\",\r\n wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)\r\n \r\n openFileDialog.ShowModal()\r\n print(openFileDialog.GetPath())\r\n \r\n # outfile_path is the string with the path name saved as a variable\r\n path = openFileDialog.GetPath()#+'\\\\'\r\n openFileDialog.Destroy()\r\n \r\n del app\r\n return path\r\n\r\n\r\ndef get_outpath():\r\n #function to get output path of file\r\n app = wx.App()\r\n \r\n frame = wx.Frame(None, -1, 'win.py')\r\n frame.SetSize(0,0,200,50)\r\n \r\n # Create open file dialog\r\n openFileDialog = wx.DirDialog(frame, \"Choose output file directory\", \"\",\r\n wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)\r\n \r\n openFileDialog.ShowModal()\r\n print(openFileDialog.GetPath())\r\n \r\n # outfile_path is the string with the path name saved as a variable\r\n out_path = openFileDialog.GetPath()\r\n openFileDialog.Destroy()\r\n \r\n del app\r\n \r\n return out_path\r\n\r\n'''---------------------------------------------------------------------------\r\n 2. Retrieve Data\r\n---------------------------------------------------------------------------'''\r\n\r\n\r\nsites=r'U:/PLAN/BCUBRICH/Python/Parameter Reader/'\\\r\nr'PARAMETERS.xls'\r\n\r\nsites_df=pd.read_excel(sites, converters={'SITE NAME':str,'State Code':str,\r\n 'County Code':str, 'Site Code':str,\r\n 'Paramter':str, 'Analyt':str, \r\n 'Method':str, 'Unit':str}) # load data\r\nsites_df['Analyt']=sites_df['Analyt'].str.strip('()') #strip parentheses from \r\n\r\n#This is the original path for when I wrote the code\r\n#directory='U:/PLAN/BCUBRICH/Python/Parameter Reader/tests'\r\n\r\n#get the path where the data are stored\r\ndirectory=audit_path()\r\n\r\n#I copied these columns right out of a pipe delimeted text file, and just \r\n#pasted them here. Need to load this as a big text string here\r\ncolumns_raw=r'Transaction Type|Action Indicator|Assessment Type|Performing '\\\r\n r'Agency|State Code / Tribal Indicator|County Code / Tribal Code|Site '\\\r\n r'Number|Parameter Code|POC|Assessment Date|Assessment Number|Monitor '\\\r\n r'Method Code|Reported Unit|Level 1 Monitor Concentration|Level 1 '\\\r\n r'Assessment Concentration|Level 2 Monitor Concentration|Level 2 '\\\r\n r'Assessment Concentration|Level 3 Monitor Concentration|Level 3 '\\\r\n r'Assessment Concentration|Level 4 Monitor Concentration|Level 4 '\\\r\n r'Assessment Concentration|Level 5 Monitor Concentration|Level 5 '\\\r\n r'Assessment Concentration|Level 6 Monitor Concentration|Level 6 '\\\r\n r'Assessment Concentration|Level 7 Monitor Concentration|Level 7 '\\\r\n r'Assessment Concentration|Level 8 Monitor Concentration|Level 8 '\\\r\n r'Assessment Concentration|Level 9 Monitor Concentration|Level 9 '\\\r\n r'Assessment Concentration|Level 10 Monitor Concentration|Level '\\\r\n r'10 Assessment Concentration'\r\n\r\n#Then break it into column headers here\r\ncolumns=columns_raw.split('|')\r\n\r\n#Next, create empty df. This is very important. In order to upload the output file\r\n#when everything is done we need to make sure there are a specific number of \r\n#pipes. The easiest way I can see to do that is to create a df with the right\r\n#number of columns, then fill only those columns. When the df is written to a \r\n#csv we can just specify '|' as the sep, and it will put in pipes for each of\r\n#of the empty rows.\r\noutput_df=pd.DataFrame(columns=columns) \r\n\r\ncount=0 #just want to be able to check if we looped all the files\r\n\r\n\r\n'''---------------------------------------------------------------------------\r\n 3. Create Output Dataframe\r\n \r\nThis section focuses on the pandas df 'output_df'. I use this df to store up\r\nall the info needed for an AQS upload that can be easily saved to a pipe \r\ndelimited csv.\r\n----------------------------------------------------------------------------'''\r\n\r\n\r\nfor filename in os.listdir(directory): #loop through files in user's dir\r\n if filename.endswith(\".xls\") or filename.endswith(\".xlsx\"):\r\n# print(filename) #this is useful for double checking the files are read\r\n \r\n file=filename.split('/')[-1][:-4] #Get the filename minus the extension\r\n site_name=file[:2] #Get sitename from filename\r\n if site_name=='OG' : site_name='O2' #OG is an old site, it's now called O2\r\n if site_name=='HA' : site_name='HW' #double naming...\r\n \r\n analyt=file.split()[2] #Find out what was being measured from filename\r\n #Sometime people use the following interchangeably, but we only want the NO2 instrument name\r\n if analyt.upper() == 'NOX' or analyt.upper() == 'NO' : analyt = 'NO2'\r\n \r\n #next line gets the info about the insturment and site from the Paramters... list\r\n loc_deets=sites_df[(sites_df['Site Symbol']==site_name)&\\\r\n (sites_df['Analyt'].str.contains\\\r\n (analyt.upper()))].reset_index()\r\n \r\n #fill in the parts of the df that will be the same for every entry\r\n output_df.loc[count,'Transaction Type']='QA'\r\n output_df.loc[count,'Action Indicator']='I'\r\n output_df.loc[count,'Assessment Type']='Annual PE'\r\n output_df.loc[count,'Performing Agency']='1113'\r\n output_df.loc[count,'State Code / Tribal Indicator']='49'\r\n \r\n #need to change the date from the format in the filename to the AQS format\r\n date=file.split()[1] #get date from filename\r\n date_split=date.split('-') #split on'-'\r\n date_fmt=date_split[2]+date_split[0]+date_split[1] #rearrange\r\n \r\n \r\n if len(loc_deets)>=1: #this if prevents some errors when there wasn't a match\r\n if len(loc_deets)==2: loc_deets=loc_deets[loc_deets['POC']=='1']\r\n #This section saves all the information about the site to the out_put \r\n #df in the AQS format.\r\n output_df.loc[count,'County Code / Tribal Code']=loc_deets.loc[0,'County Code']\r\n output_df.loc[count,'Site Number']=loc_deets.loc[0,'Site Code']\r\n output_df.loc[count,'Parameter Code']=int(loc_deets.loc[0,'Parameter'])\r\n output_df.loc[count,'POC']=loc_deets.loc[0,'POC']\r\n output_df.loc[count,'Assessment Date']=date_fmt\r\n output_df.loc[count,'Assessment Number']=1\r\n output_df.loc[count,'Monitor Method Code']=loc_deets.loc[0,'Method']\r\n output_df.loc[count,'Reported Unit']=loc_deets.loc[0,'Unit']\r\n \r\n \r\n #for NO2, NOx\r\n skiprows=31\r\n n_rows=9\r\n usecols=[1,8,9]\r\n \r\n #need some if's here because the different excel spreadsheets\r\n #have data in different places. These if's just specify where the\r\n #data is saved in the excel file.\r\n if analyt == 'O3':\r\n skiprows=24\r\n n_rows=5\r\n usecols=[1,3,5]\r\n if analyt == 'SO2':\r\n skiprows=21\r\n n_rows=4\r\n usecols=[1,3,4]\r\n if analyt == 'CO':\r\n skiprows=21\r\n n_rows=4\r\n usecols=[2,4,5]\r\n \r\n #Read in the audit workbook\r\n wb=pd.read_excel((directory+'/'+filename), skiprows =skiprows, \r\n n_rows=n_rows, usecols=usecols, \r\n names=['Audit Level', 'Audit', 'Indicated'])\r\n \r\n #There are two forms of ozone workbooks, so we need some if's\r\n #here to make sure that we get the right one. This works by \r\n #looking to see if there are any levels entries in the the first wb\r\n if analyt == 'O3' and wb['Audit Level'].isna().sum()>=7:\r\n# print('HERE!!!!') #just to help debug where the old )3 forms were used\r\n skiprows=24\r\n n_rows=5\r\n usecols=[2,4,6]\r\n wb=pd.read_excel((directory+'/'+filename), skiprows =skiprows, \r\n n_rows=n_rows, usecols=usecols, \r\n names=['Audit Level', 'Audit', 'Indicated'])\r\n \r\n no_match=0\r\n #need this if because you get some errors if the wb df is empty.\r\n #There are several reasons this might happen, so if you do get not\r\n #matches it requires some debugging. The most likely cause is a \r\n #non standardized form.\r\n if wb.empty==True:\r\n no_match+=1\r\n# print('Non match '+str(int(no_match))+' = ' +filename)\r\n else:\r\n #get rid of rows where there was no Level or Indicated value reported\r\n wb=wb.dropna(subset = ['Indicated', 'Audit Level'])\r\n wb=wb.set_index('Audit Level')\r\n \r\n \r\n for level in wb.index:\r\n level = int(level)\r\n# print (level) #for debug\r\n #these next lines are where the magic happens. I find the\r\n #levels contained in the audit file, then concat a string\r\n #with the column name in the output_df that corresponds \r\n #to the level. By writing there, it makes sure that the\r\n #correct number of pipes will be included in the AQS file.\r\n col_name='Level '+str(level)+' Assessment Concentration'\r\n output_df.loc[count,col_name]=wb.loc[level, 'Audit']\r\n col_name='Level '+str(level)+' Monitor Concentration'\r\n output_df.loc[count,col_name]=wb.loc[level, 'Indicated']\r\n \r\n else:\r\n print ('No site location matched')\r\n count+=1\r\n continue\r\n else:\r\n continue\r\n\r\n\r\n\r\n\r\n'''----------------------------------------------------------------------------\r\n 4. Write to file\r\n---------------------------------------------------------------------------'''\r\n\r\n\r\n\r\nout_path = get_outpath() #get user selected output path\r\n\r\n\r\noutput_df=output_df.set_index('Transaction Type') #need to get rid of index\r\noutput_df.to_csv(out_path+'\\QA_output.txt', sep='|') #write to pipe file\r\n\r\n\r\n'''---------\r\nThe following whole bit is used to add a '#' to the first line of the file. \r\nSeems like a lot of code just to add a hashtag to the file, but I like having \r\nthe header info right in the file, in case someone only sees the text file.\r\n------'''\r\nappendText='#'\r\ntext_file=open(out_path+'\\QA_output.txt','r')\r\ntext=text_file.read()\r\ntext_file.close()\r\ntext_file=open(out_path+'\\QA_output.txt','w')\r\ntext_file.seek(0,0)\r\ntext_file.write(appendText+text)\r\ntext_file.close()\r\n\r\n#%%\r\n\r\n'''--------------------------------------------------------------------------\r\n 5. Testing\r\n \r\n-This whole part of the script is used to check if previously uploaded data\r\non AQS is accurate. It requires a file called 'verify', which is a pipe-\r\ndelimited output file from AQS of the Audit data over a given period.\r\nThe error checking is not exactly straight forward. \r\n---------------------------------------------------------------------------'''\r\ntesting=False #set to true if you want to do some testing\r\nif testing==True:\r\n output_file=open('U:/PLAN/BCUBRICH/Python/Parameter Reader/QA_output.txt','r')\r\n out=output_file.read()\r\n out_lines=out.split('\\n')\r\n verify_file=open('U:/PLAN/BCUBRICH/Python/Parameter Reader/verify.txt','r')\r\n vers=verify_file.read()\r\n vers_lines=vers.split('\\n')\r\n print(len(out_lines))\r\n match=0\r\n count_err=0\r\n count_err2=0\r\n conv_cols=r'Level 1 Monitor Concentration|Level 1 '\\\r\n r'Assessment Concentration|Level 2 Monitor Concentration|Level 2 '\\\r\n r'Assessment Concentration|Level 3 Monitor Concentration|Level 3 '\\\r\n r'Assessment Concentration|Level 4 Monitor Concentration|Level 4 '\\\r\n r'Assessment Concentration|Level 5 Monitor Concentration|Level 5 '\\\r\n r'Assessment Concentration|Level 6 Monitor Concentration|Level 6 '\\\r\n r'Assessment Concentration|Level 7 Monitor Concentration|Level 7 '\\\r\n r'Assessment Concentration|Level 8 Monitor Concentration|Level 8 '\\\r\n r'Assessment Concentration|Level 9 Monitor Concentration|Level 9 '\\\r\n r'Assessment Concentration|Level 10 Monitor Concentration|Level '\\\r\n r'10 Assessment Concentration'\r\n \r\n conv_cols=conv_cols.split('|')\r\n conv_dict=dict()\r\n missed=[]\r\n \r\n def unq(seq):\r\n # Not order preserving \r\n Set = set(seq)\r\n return list(Set)\r\n \r\n for item in conv_cols:\r\n conv_dict[item]=float\r\n \r\n \r\n for line1 in out_lines:\r\n for line2 in vers_lines:\r\n if line1==line2:\r\n match+=1\r\n else:\r\n \r\n if line1[:38]==line2[:38]:\r\n \r\n df_check_temp=pd.DataFrame([line1.split('|'), line2.split('|')], columns=columns)\r\n df_check_temp[conv_cols]=df_check_temp[conv_cols].apply(pd.to_numeric)\r\n \r\n if count_err==0:\r\n df_check=df_check_temp.copy()\r\n else:\r\n df_check=df_check.append(df_check_temp)\r\n \r\n count_err+=1\r\n \r\n for line1 in out_lines:\r\n if line1 in vers_lines:\r\n something=1\r\n else:\r\n if line1[:38] in vers_lines:\r\n something=1\r\n else:\r\n print(line1)\r\n \r\n \r\n before=len(df_check)\r\n df_check=df_check.drop_duplicates(keep=False)\r\n after=len(df_check)\r\n match=match+(before-after)/2\r\n match+=5\r\n print(match)\r\n \r\n\r\n'''----------------------------------------------------------------------------\r\n 6. Update AQS\r\n \r\nYou can create an update file from the found errors in the above\r\ntesting section here. These can be directly uploaded to \r\n----------------------------------------------------------------------------'''\r\nupdate=False #change this if you want to run this.\r\nif update==True:\r\n df_update=df_check.loc[0,:].set_index('Transaction Type')\r\n df_update['Action Indicator']='U'\r\n df_update.to_csv('U:/PLAN/BCUBRICH/Python/Parameter Reader/QA_update.txt', sep='|') \r\n \r\n '''This whole bit is used to add a '#' to the first line of the file'''\r\n appendText='#'\r\n text_file=open('U:/PLAN/BCUBRICH/Python/Parameter Reader/QA_update.txt','r')\r\n text=text_file.read()\r\n text_file.close()\r\n text_file=open('U:/PLAN/BCUBRICH/Python/Parameter Reader/QA_update.txt','w')\r\n text_file.seek(0,0)\r\n text_file.write(appendText+text)\r\n text_file.close()\r\n","sub_path":"APE_to_AQS_GASEOUS_0.0.1.py","file_name":"APE_to_AQS_GASEOUS_0.0.1.py","file_ext":"py","file_size_in_byte":17905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"243219032","text":"\"\"\"To run the tests, you will need to move test documents into the pdf-to-png\ndirectory. The tests require a single page PDF named test_singlepage.pdf, a\nmulti-page PDF named test_multipage.pdf, and a JPG named test_nonpdf.jpg.\"\"\"\n\nfrom unittest import TestCase\nfrom server import app\nfrom io import FileIO\n\n\nclass FlaskTestsServer(TestCase):\n \"\"\"Flask unit tests for server routes.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test client before each test.\"\"\"\n\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"key\"\n\n def test_no_file(self):\n \"\"\"Test /upload-pdf route with no file submitted.\"\"\"\n\n result = self.client.post(\"/upload-pdf\")\n\n self.assertIn(\"No file submitted\", result.data)\n\n def test_single_page_pdf(self):\n \"\"\"Test /upload-pdf route for single page pdf.\"\"\"\n\n data = {\"file\": FileIO(\"test_singlepage.pdf\")}\n result = self.client.post(\"/upload-pdf\",\n content_type=\"multipart/form-data\",\n data=data)\n\n self.assertIn(\"/uploads/test_singlepage.png\", result.data)\n\n def test_multipage_pdf(self):\n \"\"\"Test /upload-pdf route for multi page pdf.\"\"\"\n\n data = {\"file\": FileIO(\"test_multipage.pdf\")}\n result = self.client.post(\"/upload-pdf\",\n content_type=\"multipart/form-data\",\n data=data)\n\n self.assertIn(\"/uploads/test_multipage-1.png\", result.data)\n\n def test_wrong_file_type(self):\n \"\"\"Test /upload-pdf route for non-pdf file upload.\"\"\"\n\n data = {\"file\": FileIO(\"test_nonpdf.jpg\")}\n result = self.client.post(\"/upload-pdf\",\n content_type=\"multipart/form-data\",\n data=data)\n\n self.assertIn(\"/upload-pdf route accepts only PDF file format.\",\n result.data)\n\n\nif __name__ == \"__main__\":\n import unittest\n\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"445883050","text":"import cv2, numpy as np\n\nimg = cv2.imread(\"images/IMG_20180331_180458.jpg\")\n\nimg_y_cr_cb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\ny, cr, cb = cv2.split(img_y_cr_cb)\n\n# Applying equalize Hist operation on Y channel.\ny_eq = cv2.equalizeHist(y)\n\nimg_y_cr_cb_eq = cv2.merge((y_eq, cr, cb))\nimg_rgb_eq = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR)\n\ncv2.imshow('original', img)\ncv2.imshow('equalized',img_rgb_eq)\ncv2.waitKey(0)\n\n# equ = cv2.equalizeHist(img)\n\n# #Contrast liomited adaptive histogram equalization\n# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n# cl1 = clahe.apply(img)\n\n# res = np.hstack((img,cl1)) #stacking images side-by-side\n\n# cv2.imshow('res', res)\n# cv2.waitKey(0)","sub_path":"pre-processing/image_eqhist.py","file_name":"image_eqhist.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"275131933","text":"from joblib import load\nimport pandas as pd\nimport numpy as np\n\ndef featureCorrection(result):\n\n frame = pd.read_csv(\"Outlier_removed.csv\")\n frame = frame.drop('Price', axis=1)\n\n result['Journey_month'] = result['Departure_Date'].split('-')[1]\n result['Journey_day'] = result['Departure_Date'].split('-')[2]\n result.pop('submit')\n result.pop('Departure_Date')\n\n frame = frame.append(result, ignore_index=True)\n\n frame[['Journey_day', 'Journey_month']] = frame[['Journey_day', 'Journey_month']].astype('int64')\n frame['Total_Duration'] = frame['Total_Duration'].astype('float64')\n\n frame = pd.get_dummies(frame, drop_first=True)\n scaler = load(\"FeatureScaler.pkl\")\n result = frame.iloc[-1].values\n result = scaler.transform(np.reshape(result, (1, -1)))\n\n return result\n","sub_path":"featureSetting.py","file_name":"featureSetting.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"275887518","text":"\n\n#calss header\nclass _SCRAP():\n\tdef __init__(self,): \n\t\tself.name = \"SCRAP\"\n\t\tself.definitions = [u'to not continue with a system or plan: ', u'to get rid of something that is no longer useful or wanted, often using its parts in new ways: ', u'to have a fight or an argument']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_scrap.py","file_name":"_scrap.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"464387151","text":"\"\"\"\nSuperModule for high level training on Pytorch models\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\n# local imports\nfrom ..datasets import TensorDataset\nfrom ..callbacks import CallbackModule, History, TQDM\nfrom ..constraints import ConstraintModule\nfrom ..regularizers import RegularizerModule\n\n\nclass SuperModule(nn.Module):\n\n def __init__(self, \n plot=False, \n module=None, \n use_gpu=torch.cuda.is_available()):\n \"\"\"\n SuperModule for high-level training of Pytorch models\n\n TODO:\n - allow metrics\n - e.g. for validation accuracy instead of loss\n \"\"\"\n super(SuperModule, self).__init__()\n \n self.plot = plot\n self._module = module\n self._use_gpu = use_gpu\n\n self.history = History()\n self._callbacks = [self.history]\n self._constraints = []\n self._regularizers = []\n self.stop_training = False\n\n def forward(self, *input):\n \"\"\"\n Defines the computation performed at every call.\n Should be overriden by all subclasses.\n \"\"\"\n if self._module:\n return self._module(*input)\n else:\n raise NotImplementedError('Must wrap existing module OR \\\n subclass must implement this method')\n\n def set_loss(self, loss):\n self._loss = loss\n\n def set_optimizer(self, optimizer, **kwargs):\n if 'parameters' in kwargs:\n parameters = kwargs['parameters']\n else:\n parameters = self.parameters()\n self._optimizer = optimizer(parameters, **kwargs)\n\n def set_regularizers(self, regularizers):\n self._regularizers = regularizers\n\n def set_constraints(self, constraints):\n self._constraints = constraints\n\n def set_callbacks(self, callbacks):\n self._callbacks += callbacks\n\n def fit(self,\n x, \n y,\n validation_data=None, \n nb_epoch=100, \n batch_size=32,\n cuda_device=None,\n verbose=1):\n \"\"\"\n Fit a model on torch tensors\n \"\"\"\n train_dataset = TensorDataset(x, y)\n train_loader = DataLoader(train_dataset, batch_size=batch_size)\n if validation_data is not None:\n test_dataset = TensorDataset(validation_data[0], validation_data[1])\n val_loader = DataLoader(test_dataset, batch_size=batch_size)\n else:\n val_loader = None\n self.fit_loader(loader=train_loader, val_loader=val_loader,\n nb_epoch=nb_epoch, cuda_device=cuda_device,\n verbose=verbose)\n\n def fit_on_batch(self, \n x, \n y, \n cuda_device=None):\n inputs = Variable(x)\n targets = Variable(y)\n if cuda_device is not None:\n inputs = inputs.cuda(cuda_device)\n targets = targets.cuda(cuda_device)\n\n # zero the gradients\n self._optimizer.zero_grad()\n # make forward pass\n outputs = self(inputs)\n # compute model loss\n loss = self._loss(outputs, targets)\n reg_loss = self._regularizers.compute_loss()\n total_loss = loss + reg_loss\n # make backward pass\n total_loss.backward()\n # make optimizer step to update weights\n self._optimizer.step()\n\n def fit_loader(self, \n loader, \n val_loader=None, \n nb_epoch=100,\n cuda_device=None,\n verbose=1):\n \"\"\"\n Fit a model on a DataLoader\n \"\"\"\n ## create regularizers\n if len(self._regularizers) > 0:\n regularizers = RegularizerModule(self._regularizers)\n regularizers.set_model(self)\n else:\n regularizers = None\n\n ## create constraints\n constraints = ConstraintModule(self._constraints)\n constraints.set_model(self)\n\n ## create callbacks\n if verbose > 0:\n self._callbacks += [TQDM()]\n callbacks = CallbackModule(self._callbacks)\n callbacks.set_model(self)\n\n callbacks.on_train_begin()\n\n for epoch_idx in range(nb_epoch):\n epoch_logs = {\n 'nb_batches': int(math.ceil(len(loader.dataset.inputs)/loader.batch_size)),\n 'nb_epoch': nb_epoch\n }\n callbacks.on_epoch_begin(epoch_idx, epoch_logs)\n\n for batch_idx,(x_batch, y_batch) in enumerate(loader):\n batch_logs = {\n 'batch_idx': batch_idx,\n 'batch_samples': len(x_batch)\n } \n callbacks.on_batch_begin(batch_idx, batch_logs)\n\n inputs = Variable(x_batch)\n targets = Variable(y_batch)\n if cuda_device is not None:\n inputs = inputs.cuda(cuda_device)\n targets = targets.cuda(cuda_device)\n\n\n self._optimizer.zero_grad()\n outputs = self(inputs)\n loss = self._loss(outputs, targets)\n \n if regularizers is not None:\n reg_loss = regularizers.compute_loss()\n loss += reg_loss\n batch_logs['reg_loss'] = reg_loss\n batch_logs['loss'] = loss.data[0]\n\n # make backward pass\n loss.backward()\n # make optimizer step to update weights\n self._optimizer.step()\n\n callbacks.on_batch_end(batch_idx, batch_logs)\n constraints.on_batch_end(batch_idx)\n\n if val_loader is not None:\n val_loss = self.evaluate_loader(val_loader, \n cuda_device=cuda_device)\n epoch_logs['val_loss'] = val_loss\n epoch_logs['loss'] = self.history.loss / self.history.samples_seen\n if regularizers is not None:\n epoch_logs['reg_loss'] = self.history.reg_loss / self.history.samples_seen\n\n callbacks.on_epoch_end(epoch_idx, epoch_logs)\n constraints.on_epoch_end(epoch_idx)\n if self.stop_training:\n break\n\n callbacks.on_train_end()\n\n def predict(self, \n x, \n batch_size=32,\n cuda_device=None, \n verbose=1):\n dataset = TensorDataset(x)\n loader = DataLoader(dataset, batch_size=batch_size)\n preds = self.predict_loader(loader, \n cuda_device=cuda_device,\n verbose=verbose)\n return preds\n\n def predict_loader(self,\n loader,\n cuda_device=None,\n verbose=1):\n self.eval()\n preds = []\n for batch_idx, batch in enumerate(loader):\n if loader.dataset.has_target:\n batch = batch[0]\n x_batch = Variable(batch)\n if cuda_device is not None:\n x_batch = x_batch.cuda(cuda_device)\n batch_pred = self(x_batch)\n preds.append(batch_pred.data)\n self.train()\n return Variable(torch.cat(preds))\n\n def predict_on_batch(self, \n x, \n cuda_device=None):\n self.eval()\n x = Variable(x)\n if cuda_device is not None:\n x = x.cuda(cuda_device)\n preds = self(x)\n self.train()\n return preds\n\n def evaluate(self, \n x, \n y, \n batch_size=32,\n cuda_device=None, \n verbose=1):\n dataset = TensorDataset(x,y)\n loader = DataLoader(dataset, batch_size=batch_size)\n loss = self.evaluate_loader(loader, \n cuda_device=cuda_device)\n return loss\n\n def evaluate_loader(self, \n loader, \n cuda_device=None):\n self.eval()\n total_loss = 0.\n total_samples = 0.\n for batch_idx, (x_batch, y_batch) in enumerate(loader):\n x_batch = Variable(x_batch)\n y_batch = Variable(y_batch)\n if cuda_device is not None:\n x_batch = x_batch.cuda(cuda_device)\n y_batch = y_batch.cuda(cuda_device)\n\n y_pred = self(x_batch)\n loss = self._loss(y_pred, y_batch)\n total_loss += loss.data[0]*len(x_batch)\n total_samples += len(x_batch)\n self.train()\n return total_loss / total_samples\n\n def evaluate_on_batch(self, \n x, \n y, \n cuda_device=None):\n self.eval()\n x = Variable(x)\n y = Variable(y)\n if cuda_device is not None:\n x = x.cuda(cuda_device)\n y = y.cuda(cuda_device)\n y_pred = self(y)\n loss = self._loss(y_pred, y)\n self.train()\n return loss.data[0]\n\n def save_state_dict(self, file):\n \"\"\"\n Save a model parameters to disk\n \"\"\"\n # model parameters -> ordered dict\n state_dict = self.state_dict()\n torch.save(state_dict, file)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"torchsample/modules/super_module.py","file_name":"super_module.py","file_ext":"py","file_size_in_byte":9528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"473250643","text":"import os\nimport json\nfrom pathlib import Path\nfrom subprocess import PIPE, Popen\nfrom enum import Enum\n\n\nclass KomodoType(Enum):\n UNKNOWN = 0 # Probably a test\n REAL = 1\n SHIM = 2\n VENV = 3\n\n\nclass Context(object):\n def __init__(self, srcpath, dstpath=None):\n self.srcpath = Path(srcpath)\n self.dstpath = dstpath\n self.dry_run = False\n\n bin_python = self.srcpath / \"root\" / \"bin\" / \"python\"\n if not bin_python.exists():\n # Probably constructed in a test. Ignore this until sometime later\n # in the process when we will inevitably fail.\n self.type = KomodoType.UNKNOWN\n return\n\n # Get python version_info\n script = b\"import sys,json;print(json.dumps(sys.version_info[:]))\"\n env = {\"LD_LIBRARY_PATH\": \"{0}/lib:{0}/lib64\".format(self.srcpath / \"root\")}\n self.version_info = json.loads(self.invoke_srcpython(script=script, env=env))\n\n # Get python sys.path\n script = b\"import sys,json;print(json.dumps(sys.path))\"\n env = {\"LD_LIBRARY_PATH\": \"{0}/lib:{0}/lib64\".format(self.srcpath / \"root\")}\n self.src_python_paths = json.loads(\n self.invoke_srcpython(script=script, env=env)\n )\n\n # Existence of libexec suggests that this is a libexec-shim komodo release\n libexec_python = self.srcpath / \"root\" / \"libexec\" / \"python\"\n if libexec_python.exists():\n self.type = KomodoType.SHIM\n return\n\n # Existence of any libpythons suggests that this is a normal Python\n # install (compiled from sources)\n for libdir in \"lib\", \"lib64\":\n for suffix in \"\", \"m\", \"dm\":\n name = \"libpython{}.{}{}.so\".format(\n self.version_info[0], self.version_info[1], suffix\n )\n\n if (self.srcpath / \"root\" / libdir / name).exists():\n self.type = KomodoType.REAL\n return\n\n # Otherwise this is most likely a virtualenv\n self.type = KomodoType.VENV\n\n def invoke_srcpython(self, args=None, env=None, script=None):\n pyexec = self.srcpath / \"root\" / \"bin\" / \"python\"\n return self.invoke_python(pyexec, args, env, script)\n\n def invoke_dstpython(self, args=None, env=None, script=None):\n pyexec = self.dstpath / \"root\" / \"bin\" / \"python\"\n return self.invoke_python(pyexec, args, env, script)\n\n def invoke_python(self, python_executable, args=None, env=None, script=None):\n if args is None:\n args = []\n if env is None:\n env = {}\n if script is not None:\n # Prepend '-' to tell Python to read from stdin\n args = [\"-\"] + args\n\n python_executable = Path(python_executable)\n\n env[\"PATH\"] = \"{}:{}\".format(\n python_executable.parent.absolute(), os.environ[\"PATH\"]\n )\n env[\"LD_LIBRARY_PATH\"] = \"{0}/lib:{0}/lib64\".format(str(self.srcpath / \"root\"))\n\n args = [python_executable] + args\n proc = Popen(map(str, args), stdin=PIPE, stdout=PIPE, env=env)\n stdout, _ = proc.communicate(script)\n\n return stdout\n\n @property\n def src_python_path(self):\n return self.srcpath / \"root\" / \"bin\" / \"python\"\n\n @property\n def dst_python_libpath(self):\n libdir = \"python{}.{}\".format(self.version_info[0], self.version_info[1])\n return self.dstpath / \"root\" / \"lib\" / libdir\n","sub_path":"komodoenv/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"386650695","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 24 11:22:15 2018\n\n@author: melisandezonta\n\"\"\"\n\n\nimport os\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom Functions import *\nimport numpy as np\n\n\nos.chdir(\"/Users/melisandezonta/Documents/Documents/GTL_courses_second_semester/Computer-Vision/PS1-all/PS1-images/\")\n\n\n# Load noisy image\nimg_noisy = cv2.imread(\"ps1-input1.jpg\")\nimg_noisy_gray = cv2.cvtColor(img_noisy, cv2.COLOR_BGR2GRAY)\n\n\n# Gaussian smoothing\n\nimage_noisy_smoothed = cv2.GaussianBlur(img_noisy_gray, (3,3), 5)\n\n# Gradient Calculation\n\ngxx=cv2.Sobel(image_noisy_smoothed,cv2.CV_32FC1,1,0);\ngyy=cv2.Sobel(image_noisy_smoothed,cv2.CV_32FC1,0,1);\ntheta_test=cv2.phase(gxx,gyy,angleInDegrees=True);\n\n\n\n# Apply the Canny edge detector on the smoothed noisy image\n\nedges_noisy_smoothed_image = cv2.Canny(image_noisy_smoothed, 60, 100)\n\n\n# Calculation of the accumulator\n\n[rows, cols] = edges_noisy_smoothed_image.shape\nr_min = 20\nr_max = 30\na_min = floor(1 - r_max)\na_max = floor(rows + r_max)\nb_min = floor(1 - r_max)\nb_max = floor(cols + r_max)\na_len = a_max - a_min\nb_len = b_max - b_min\nH = Hough_Circles(edges_noisy_smoothed_image, r_min, r_max,1, a_min, a_max, b_min, b_max, a_len, b_len,theta_test)\n\n# Search for the maximums\n\nthreshold = 0.45\nmax_points = find_max(H,threshold)\nnumber_max_points = len(max_points[1])\nprint('the number of max_points is :', number_max_points)\n\n# Move back in the polar domain\n\n[rows, cols] = edges_noisy_smoothed_image.shape\nr_len = r_max -r_min\nr_hough = []\na_hough = []\nb_hough = []\ndistance = 5\nfor i in range(0,number_max_points):\n a_hough.append(int(round(a_min + max_points[0][i] * (a_max - a_min) / a_len)))\n b_hough.append(int(round(b_min + max_points[1][i] * (b_max - b_min) / b_len)))\n r_hough.append(int(round(r_min + max_points[2][i] * (r_max - r_min) / r_len)))\na_new_hough, b_new_hough, r_new_hough = filter_circles(a_hough,b_hough,r_hough,40.0)\ncircles = zip(a_new_hough,b_new_hough,r_new_hough)\n\n# Draw the circles on the image\n\nimg_circles = cv2.cvtColor(image_noisy_smoothed, cv2.COLOR_GRAY2BGR)\nfor a,b,r in circles:\n cv2.circle(img_circles, (a, b), r, (0,255,127), 2)\n\n# Diverse plots\n\nplt.subplot(2,2,1)\nplt.imshow(image_noisy_smoothed, cmap='gray')\nplt.draw()\ncv2.imwrite(\"ps1-5-image-noisy-smoothed.png\", image_noisy_smoothed)\n\n\nplt.subplot(2,2,2)\nplt.imshow(edges_noisy_smoothed_image, cmap='gray')\nplt.draw()\ncv2.imwrite(\"ps1-5-edges-image-noisy.png\", edges_noisy_smoothed_image)\n\nplt.subplot(2,2,3)\nplt.imshow(cv2.cvtColor(img_circles, cv2.COLOR_BGR2RGB))\nplt.draw()\ncv2.imwrite(\"ps1-5-circles.png\", img_circles)\n\nplt.show()\n","sub_path":"PS1-all/PS1-code/ps1-5.py","file_name":"ps1-5.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"258818105","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom PySide import QtGui, QtCore\nimport sys\nimport windows\nimport runScripts\nfrom vars import * \n\n\n\nclass MainWindow(QtGui.QMainWindow):\n\n def __init__(self, titile, pos_x, pos_y, \\\n width=DEFAULT_M_WINDOW_WIDTH, hight=DEFAULT_M_WINDOW_HIGHT):\n super(MainWindow, self).__init__()\n\n self.__title = titile\n self.__pos_x = pos_x\n self.__pos_y = pos_y\n self.__width = width\n self.__hight = hight\n\n self.__editConfWindow = None\n self.__netSettingsWindow = None\n \n self.initUI()\n self.createMenu()\n\n\n def initUI(self):\n self.setGeometry(\n self.__pos_x, \n self.__pos_y, \n self.__width, \n self.__hight\n )\n\n self.__textEdit = QtGui.QTextEdit()\n self.setCentralWidget(self.__textEdit)\n\n self.setWindowTitle(self.__title)\n\n\n\n def createMenu(self):\n \n self.__action_open_file = QtGui.QAction(\"open file\", self)\n self.__action_close_file = QtGui.QAction(\"close file\", self)\n self.__action_save_file = QtGui.QAction(\"save file as\", self)\n self.__action_close_app = QtGui.QAction(\"Exit\", self)\n self.__action_edit_conf = QtGui.QAction(\"edit config\", self)\n self.__action_run_scripts = QtGui.QAction(\"run this current configure\", self)\n self.__action_set_font = QtGui.QAction(\"font\", self)\n self.__action_get_settings_and_run_scripts = QtGui.QAction(\"get settings and run\", self) \n\n self.__action_edit_conf.triggered.connect(self.edit_conf)\n self.__action_run_scripts.triggered.connect(self.run_scripts)\n self.__action_get_settings_and_run_scripts.triggered.connect(self.get_settings_and_run_scripts)\n self.__action_open_file.triggered.connect(self.read_file)\n self.__action_close_file.triggered.connect(self.close_file)\n self.__action_save_file.triggered.connect(self.save_file)\n self.__action_close_app.triggered.connect(self.close_window)\n self.__action_set_font.triggered.connect(self.set_font)\n\n \n self.__menubar = self.menuBar()\n\n\n self.__file = self.__menubar.addMenu(\"File\") \n self.__edit_conf = self.__menubar.addMenu(\"Edit\")\n self.__view = self.__menubar.addMenu(\"View\")\n self.__run_scripts = self.__menubar.addMenu(\"Run\")\n\n\n self.__file.addAction(self.__action_open_file)\n self.__file.addAction(self.__action_close_file)\n self.__file.addAction(self.__action_save_file)\n self.__file.addAction(self.__action_close_app)\n self.__edit_conf.addAction(self.__action_edit_conf)\n self.__view.addAction(self.__action_set_font)\n self.__run_scripts.addAction(self.__action_run_scripts)\n self.__run_scripts.addAction(self.__action_get_settings_and_run_scripts)\n \n\n\n\n def edit_conf(self):\n if self.__editConfWindow is None:\n self.__editConfWindow = windows.WindowEditConf(\"the edit window\", 300, 300)\n \n self.__editConfWindow.show_window()\n\n\n def run_scripts(self):\n runScripts.runExecTasks()\n\n\n def get_settings_and_run_scripts(self):\n if self.__netSettingsWindow is None:\n self.__netSettingsWindow = windows.WindowNetSettings(\"the net settings window\", 300, 300)\n\n self.__netSettingsWindow.show_window()\n\n\n\n def read_file(self):\n fname = self.getWorkFileName()\n\n with open(fname, 'r') as file:\n text = file.read().decode(\"utf-8\")\n self.__textEdit.setText(text)\n \n\n\n def close_file(self):\n self.__textEdit.setText(\"\")\n\n\n\n def save_file(self):\n text = self.__textEdit.toPlainText()\n fname = self.getWorkFileName()\n\n with open(fname, 'w') as file:\n file.write(text)\n \n\n\n def getWorkFileName(self):\n fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '../config/reports')\n return fname\n \n\n def set_font(self):\n font, ok = QtGui.QFontDialog.getFont()\n if ok:\n self.__textEdit.setFont(font)\n\n\n def show_window(self):\n self.show()\n\n \n def close_window(self):\n QtCore.QCoreApplication.instance().quit()\n\n\n \n\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n \n mWindow = MainWindow(\n MAIN_WINDOW_TITLE,\n DEFAULT_M_WINDOW_POS_X, \n DEFAULT_M_WINDOW_POS_Y\n )\n\n mWindow.show_window() \n \n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()","sub_path":"GUI/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"263458259","text":"import tensorflow as tf\nimport glob\nimport cv2\nimport random\nimport numpy as np\nimport os\nimport ctypes\nimport time\n\ndef new_weights_conv(name,shape):\n return tf.get_variable(name, shape=shape, dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer_conv2d())\n\ndef new_weights_fc(name,shape):\n return tf.get_variable(name, shape=shape, dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n \ndef new_biases(length):\n return tf.Variable(tf.constant(0.05, shape=[length], dtype=tf.float32), dtype=tf.float32)\n\ndef new_conv_layer(name,input, # The previous layer.\n num_input_channels, # Num. channels in prev. layer.\n filter_size, # Width and height of each filter.\n num_filters, # Number of filters.\n use_pooling=True): # Use 2x2 max-pooling.\n\n shape = [filter_size, filter_size, num_input_channels, num_filters]\n\n # Create new weights aka. filters with the given shape.\n weights = new_weights_conv(name,shape)\n\n # Create new biases, one for each filter.\n biases = new_biases(length=num_filters)\n layer = tf.nn.conv2d(input=input,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n layer += biases\n\n # Use pooling to down-sample the image resolution?\n if use_pooling:\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n layer = tf.nn.relu(layer)\n return layer, weights\n \ndef flatten_layer(layer):\n # Get the shape of the input layer.\n layer_shape = layer.get_shape()\n num_features = layer_shape[1:4].num_elements()\n layer_flat = tf.reshape(layer, [-1, num_features])\n return layer_flat, num_features\n\n\ndef new_fc_layer(name,input, # The previous layer.\n num_inputs, # Num. inputs from prev. layer.\n num_outputs, use_nonlinear):\n weights = new_weights_fc(name,[num_inputs, num_outputs])\n biases = new_biases(length=num_outputs)\n\n layer = tf.matmul(input, weights) + biases\n if use_nonlinear:\n layer = tf.nn.relu(layer)\n\n return layer, weights\n\n\n# Convolutional Layer 1.\nfilter_size1 = 3\nnum_filters1 = 32\nnum_filters2 = 64\nnum_filters3 = 128\n\n\nn_classes = 15\nbatch_size = 256\nimgSize = 64\n\nx = tf.placeholder(tf.float32, [None, imgSize, imgSize])\nx_image = tf.reshape(x, [-1, imgSize, imgSize, 1])\ny = tf.placeholder(tf.float32)\nkeep_prob = tf.placeholder(tf.float32)\n\nlayer_conv1a, weights_conv1a = \\\n new_conv_layer(\"conv1a\",input=x_image,\n num_input_channels=1,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=False)\n\nlayer_conv1a1, weights_conv1a1 = \\\n new_conv_layer(\"conv1a1\",input=layer_conv1a,\n num_input_channels=num_filters1,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=True)\n\nlayer_conv1b, weights_conv1b = \\\n new_conv_layer(\"conv1b\",input=layer_conv1a1,\n num_input_channels=num_filters1,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=False)\n\nlayer_conv1b1, weights_conv1b1 = \\\n new_conv_layer(\"conv1b1\",input=layer_conv1b,\n num_input_channels=num_filters1,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=True)\n\nlayer_conv1c, weights_conv1c = \\\n new_conv_layer(\"conv1c\",input=layer_conv1b1,\n num_input_channels=num_filters1,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=False)\n\nlayer_conv1c1, weights_conv1c1 = \\\n new_conv_layer(\"conv1c1\",input=layer_conv1c,\n num_input_channels=num_filters1,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=True)\n\nlayer_flat, num_features = flatten_layer(layer_conv1c1)\n\nlayer_f, weights_f = new_fc_layer(\"fc\",input=layer_flat,\n num_inputs=num_features,\n num_outputs=n_classes,\n use_nonlinear=False)\n\ny_pred = tf.nn.softmax(layer_f)\ny_pred_cls = tf.argmax(y_pred, dimension=1)\n\nprint(layer_conv1a)\nprint(layer_flat)\nprint(layer_f)\n\n\n\ncorrect = tf.equal(tf.argmax(layer_f, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n\n\nsaver = tf.train.Saver()\nsave_dir = 'final_model_15_16/'\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\nsave_path = os.path.join(save_dir, 'best_model')\n\n\n\n\n\n\n\n# direct inputs\n# source to this solution and code:\n# http://stackoverflow.com/questions/14489013/simulate-python-keypresses-for-controlling-a-game\n# http://www.gamespp.com/directx/directInputKeyboardScanCodes.html\n\n\n\nSendInput = ctypes.windll.user32.SendInput\n\n\nW = 0x11\nA = 0x1E\nS = 0x1F\nD = 0x20\n\n# C struct redefinitions \nPUL = ctypes.POINTER(ctypes.c_ulong)\nclass KeyBdInput(ctypes.Structure):\n _fields_ = [(\"wVk\", ctypes.c_ushort),\n (\"wScan\", ctypes.c_ushort),\n (\"dwFlags\", ctypes.c_ulong),\n (\"time\", ctypes.c_ulong),\n (\"dwExtraInfo\", PUL)]\n\nclass HardwareInput(ctypes.Structure):\n _fields_ = [(\"uMsg\", ctypes.c_ulong),\n (\"wParamL\", ctypes.c_short),\n (\"wParamH\", ctypes.c_ushort)]\n\nclass MouseInput(ctypes.Structure):\n _fields_ = [(\"dx\", ctypes.c_long),\n (\"dy\", ctypes.c_long),\n (\"mouseData\", ctypes.c_ulong),\n (\"dwFlags\", ctypes.c_ulong),\n (\"time\",ctypes.c_ulong),\n (\"dwExtraInfo\", PUL)]\n\nclass Input_I(ctypes.Union):\n _fields_ = [(\"ki\", KeyBdInput),\n (\"mi\", MouseInput),\n (\"hi\", HardwareInput)]\n\nclass Input(ctypes.Structure):\n _fields_ = [(\"type\", ctypes.c_ulong),\n (\"ii\", Input_I)]\n\n# Actuals Functions\n\ndef PressKey(hexKeyCode):\n extra = ctypes.c_ulong(0)\n ii_ = Input_I()\n ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )\n x = Input( ctypes.c_ulong(1), ii_ )\n ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))\n\ndef ReleaseKey(hexKeyCode):\n extra = ctypes.c_ulong(0)\n ii_ = Input_I()\n ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )\n x = Input( ctypes.c_ulong(1), ii_ )\n ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))\n\ndef sliding():\n PressKey(0x38)\n PressKey(0x0F)\n time.sleep(1)\n ReleaseKey(0x0F)\n\n ret, image_np = cap.read()\n \n cv2.imshow('object detection', cv2.resize(image_np, (400,300)))\n gray_image = cv2.cvtColor(cv2.resize(image_np, (imgSize,imgSize)), cv2.COLOR_BGR2GRAY)\n t2 = time.time()\n\n result = np.argmax(y_pred.eval({x:[gray_image]})) + 1\n\n while result == 5:\n\t PressKey(0x0F)\n\t time.sleep(1)\n\t ReleaseKey(0x0F)\n\t ret, image_np = cap.read()\n\t cv2.imshow('object detection', cv2.resize(image_np, (400,300)))\n\t gray_image = cv2.cvtColor(cv2.resize(image_np, (imgSize,imgSize)), cv2.COLOR_BGR2GRAY)\n\n\t result = np.argmax(y_pred.eval({x:[gray_image]})) + 1\n\t print(result)\n\n PressKey(0x1C)\n ReleaseKey(0x38)\n ReleaseKey(0x1C)\n time.sleep(1)\n return None\n\n\ngestures = ['None', 'fist', 'thumb up', 'thumb down', \\\n 'stop', 'catch', 'swing', 'phone', 'victory', \\\n 'C', 'okay', '2 fingers', '2 fingers horiz', \\\n 'rock&roll', 'rock&roll horiz']\n\nliste = glob.glob('./image/**')\ncap = cv2.VideoCapture(0)\nt = time.time()\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess=sess, save_path=save_path)\n for elm in liste[::-10]:\n ret, image_np = cap.read()\n image_np = cv2.imread(elm)\n cv2.imshow('object detection', cv2.resize(image_np, (400,300)))\n gray_image = cv2.cvtColor(cv2.resize(image_np, (imgSize,imgSize)), cv2.COLOR_BGR2GRAY)\n t2 = time.time()\n gray_image = cv2.equalizeHist(gray_image)\n result = np.argmax(y_pred.eval({x:[gray_image]}))\n\n print(gestures[result], 1/(time.time() - t), 1/(time.time() - t2))\n \n t = time.time()\n if cv2.waitKey(50) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break","sub_path":"testModel.py","file_name":"testModel.py","file_ext":"py","file_size_in_byte":8584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"129752047","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\n\r\nsin = np.sin\r\ncos = np.cos\r\n\r\ndef floats(str):\r\n return list(map(float, str.split()))\r\n\r\nv_init = float(input(\"Velocidad Inicial (m/s): \"))\r\nang = float(input(\"Ángulo de disparo (radianes): \"))\r\n\r\nif v_init <= 0:\r\n print(\"Ingrese una velocidad válida para tiro parabólico.\")\r\n exit()\r\n# Parámetros iniciales.\r\n\r\nvx0 = v_init * cos(ang)\r\n# Velocidad inicial en X\r\nvy0 = v_init * sin(ang)\r\n# Velocidad inicial en Y\r\n\r\nh_fin = input(\"\"\"\r\nSi la altura final supera a la inicial, especifíquela. \\\r\nDe lo contrario, presione ENTER: \"\"\")\r\n\r\nif h_fin != '':\r\n h_init = 0\r\n h_max = vy0 ** 2 / 19.62\r\n # Altura máxima del proyectil tomando la altura inicial como 0\r\n h_fin = float(h_fin)\r\n if h_fin > h_max:\r\n print(f\"La altura final supera la altura máxima de {h_max} m.\")\r\n exit()\r\nelse:\r\n h_init = float(input(\"\\nIngrese la altura inicial(m): \"))\r\n h_fin = 0\r\n h_max = vy0 ** 2 / 19.62 + h_init # Altura máxima.\r\n\r\n# Básicamente, lo que se quiere hacer con este condicional es que el eje X de\r\n# nuestro sistema de referencia coincida con el nivel de la altura más baja.\r\n# La primera parte es el caso donde la altura final supera a la inicial, y la\r\n# segunda parte es el caso contrario.\r\n\r\ndisc = vy0 ** 2 - 19.62 * (h_fin - h_init)\r\nt_floor = (vy0 + sqrt(disc)) / 9.81\r\n# Tiempo que tarda el proyectil en caer al piso.\r\nreach = vx0 * t_floor\r\n# Alcance horizontal del proyectil.\r\n\r\ninterval = floats(input(f\"\"\"\r\nEl proyectil cae al suelo tras {t_floor} segundos. \\\r\nIndique un tiempo final menor o igual a éste.\r\nIngrese el intervalo [t0, tf] como t0 tf: \"\"\"))\r\n# No tendría sentido graficar más allá de este punto.\r\n\r\nt0 = interval[0]\r\ntf = interval[1]\r\n\r\nif t0 < 0 or tf < 0 or tf > t_floor:\r\n print(\"Ingrese tiempos válidos.\")\r\n exit()\r\n# Intervalo de tiempo a graficar\r\n\r\nt = np.linspace(t0, tf)\r\nlenght = range(len(t))\r\n\r\npos_x = vx0 * t\r\nvel_x = np.array([vx0 for i in lenght])\r\nacc_x = np.array([0 for i in lenght])\r\n# Movimiento en la coordenada X. (Se asume que no hay aceleración horizontal).\r\n\r\npos_y = -9.81 * t ** 2 / 2 + vy0 * t + h_init\r\nvel_y = -9.81 * t + h_init\r\nacc_y = np.array([-9.81 for i in lenght])\r\n# Movimiento en la coordenada Y. (Uniformemente acelerado por la gravedad).\r\n\r\nwith open('movimiento_x.csv', 'w') as f:\r\n f.write('Tiempo,Posición,Velocidad,Aceleración\\n')\r\n f.write('\\n'.join('{},{},{},{}'.format(t[i], pos_x[i], vel_x[i], acc_x[i])\r\n for i in lenght))\r\n# Datos del movimiento en el eje X\r\n\r\nwith open('movimiento_y.csv', 'w') as f:\r\n f.write('Tiempo,Posición,Velocidad,Aceleración\\n')\r\n f.write('\\n'.join('{},{},{},{}'.format(t[i], pos_y[i], vel_y[i], acc_y[i])\r\n for i in lenght))\r\n# Datos del movimiento en el eje Y\r\n\r\nwith open('trayectoria.csv', 'w') as f:\r\n f.write('Posición_X,Posición_Y\\n')\r\n f.write('\\n'.join('{},{}'.format(pos_x[i], pos_y[i]) for i in lenght))\r\n# Datos de la posición en X vs posición en Y.\r\n\r\nprint(f\"\"\"\r\nAltura máxima: {h_max} m.\r\nTiempo de vuelo: {t_floor} s.\r\nAlcance horizontal: {reach} m.\r\n\"\"\")\r\n# Información relevante del tiro parabólico\r\n\r\nfig1, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)\r\n# Figura para el movimiento de cada coordenada respecto al tiempo. Ambas\r\n# graficas están en una sola figura.\r\nfig2, ax3 = plt.subplots()\r\n# Figura para la trayectoria del proyectil.\r\n\r\nax1.plot(t, pos_x, color='#222288', label='Posición (m)')\r\nax1.plot(t, vel_x, color='#ffd700', linestyle='-.', label='Velocidad (m/s)')\r\nax1.plot(t, acc_x, color='#329932', linestyle='--', label='Aceleración (m/s²)')\r\n# Datos graficados del movimiento en X\r\n\r\nax1.set_xlabel(\"Tiempo\")\r\nax1.set_ylabel(\"Unidades respectivas\")\r\nax1.set_title(\"Movimiento en la Coordenada X\")\r\n\r\nax1.legend()\r\n\r\nax2.plot(t, pos_y, color='#222288', label='Posición (m)')\r\nax2.plot(t, vel_y, color='#ffd700', linestyle='-.', label='Velocidad (m/s)')\r\nax2.plot(t, acc_y, color='#329932', linestyle='--', label='Aceleración (m/s²)')\r\n# Datos graficados del movimiento en Y\r\n\r\nax2.set_xlabel(\"Tiempo\")\r\nax2.set_title(\"Movimiento en la Coordenada Y\")\r\n\r\nax2.legend()\r\n\r\nax3.plot(pos_x, pos_y, color='#222288', label='Posición (m)')\r\n# Grafica de la trayectoria.\r\n\r\nax3.set_xlabel(\"Desplazamiento horizontal (m)\")\r\nax3.set_ylabel(\"Altura (m)\")\r\nax3.set_title(\"Trayectoria\")\r\n\r\nax3.legend()\r\n\r\nplt.tight_layout()\r\n\r\nplt.show()\r\n\r\nfig1.savefig('Movimiento.png')\r\nfig2.savefig('Trayectoria.png')\r\n","sub_path":"Python_Scripts/Ejercicios/12/parabolico.py","file_name":"parabolico.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"313834684","text":"from sklearn.naive_bayes import MultinomialNB\n\n# sudo pip install virtualenv\n# virtualenv ENV\n# pip install numpy scipy scikit-learn\n# python classificacao.py\n\n# perna curta, rabo comprido, peludo, gordinho\nporco1 = [1, 0, 0, 1]\nporco2 = [0, 0, 1, 1]\nporco3 = [1, 0, 1, 1]\nporco4 = [1, 0, 0, 0]\ngato1 = [0, 1, 1, 0]\ngato2 = [0, 1, 0, 1]\ngato3 = [1, 1, 0, 0]\ngato4 = [0, 0, 1, 0]\n\ndados = [porco1, porco2, porco3, porco4, gato1, gato2, gato3, gato4]\nclasses = [0, 0, 0, 0, 1, 1, 1, 1]\n\nmodelo = MultinomialNB()\nmodelo.fit(dados, classes)\n\nmisterioso1 = [1, 1, 1, 0] # gato\nmisterioso2 = [1, 0, 1, 0] # porco\nmisterioso3 = [0, 0, 0, 1] # porco\ntestes = [misterioso1, misterioso2, misterioso3]\nclasses_teste = [1, 0, 0]\nanimais_teste = [\"gato\" if i == 1 else \"porco\" for i in classes_teste]\n\nresultado = modelo.predict(testes)\nanimais_resultado = [\"gato\" if i == 1 else \"porco\" for i in resultado]\n\nprint(\"Resposta correta = \" + str(classes_teste).ljust(60, ' '))\nprint(\"Animais da resposta correta = \" + str(animais_teste).ljust(60, ' ') + \"\\n\")\nprint(\"Resultado = \" + str(resultado).ljust(60, ' '))\nprint(\"Animais = \" + str(animais_resultado).ljust(60, ' '))\n\n# Calculo da taxa de acerto\ndiferencas = resultado - classes_teste\nacertos = [d for d in diferencas if d==0]\ntotal_de_acertos = len(acertos)\ntotal_de_elementos = len(testes)\n\ntaxa_de_acerto = 100.0 * total_de_acertos / total_de_elementos\nprint(\"\\nTaxa de acerto = \" + str(taxa_de_acerto))","sub_path":"classificacao.py","file_name":"classificacao.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"209280552","text":"from flask import (flash, redirect, render_template, request, url_for, abort,\n jsonify, Blueprint, Response)\nfrom flask_login import current_user\nfrom functools import wraps\nfrom urllib.parse import parse_qsl\n\nimport app.service.user_service as user_service\nfrom app import db, constants, app\nfrom app.decorators import require_role, require_membership\nfrom app.forms import init_form\nfrom app.forms.custom_form import AddRegistrationForm\nfrom app.forms.custom_form import CreateForm\nfrom app.models.custom_form import CustomForm, CustomFormResult\nfrom app.roles import Roles\nfrom app.service import role_service, custom_form_service\nfrom app.utils import copernica\nfrom app.utils.forms import flash_form_errors\nfrom app.utils.pagination import Pagination\nfrom app.utils.serialize_sqla import serialize_sqla\n\nblueprint = Blueprint('custom_form', __name__, url_prefix='/forms')\n\n\ndef require_form_access(f):\n \"\"\"\n Check whether the user has access to the form.\n\n NOTE: Assumes that the form_id is the first parameter in the view function.\n \"\"\"\n\n @wraps(f)\n def wrapper(form_id=None, *args, **kwargs):\n if form_id:\n custom_form_service. \\\n check_user_can_access_form(form_id, current_user)\n return f(form_id, *args, **kwargs)\n\n return wrapper\n\n\n@blueprint.route('/', methods=['GET', 'POST'])\n@blueprint.route('//', methods=['GET', 'POST'])\n@require_role(Roles.ACTIVITY_WRITE)\ndef view(page_nr=1):\n followed_forms = custom_form_service. \\\n get_active_followed_forms_by_user(current_user)\n active_forms = custom_form_service. \\\n get_active_unfollowed_by_user(current_user)\n archived_forms = custom_form_service. \\\n get_inactive_forms_by_user(current_user)\n\n archived_paginate = Pagination(page_nr, 10, len(archived_forms),\n archived_forms)\n\n can_write = role_service.user_has_role(current_user, Roles.ACTIVITY_WRITE)\n return render_template('custom_form/overview.htm',\n followed_forms=followed_forms,\n active_forms=active_forms,\n archived_paginate=archived_paginate,\n page_nr=page_nr,\n can_write=can_write)\n\n\n@blueprint.route('/view/', methods=['GET', 'POST'])\n@require_role(Roles.ACTIVITY_WRITE)\n@require_form_access\ndef view_single(form_id=None):\n custom_form = custom_form_service.get_form_by_form_id(form_id)\n\n results = []\n entries = custom_form_service.get_form_entries_by_form_id(form_id)\n\n from urllib.parse import unquote_plus\n from urllib.parse import parse_qs\n\n attendants = 0\n for entry in entries:\n # Hide form entries from non existing users\n data = parse_qs(entry.data)\n\n # Add the entry date\n time = entry.created.strftime(constants.DT_FORMAT) if \\\n entry.created is not None else \"\"\n\n # Get the total number of attendants including extra attendees\n attendants = attendants + 1 + entry.introductions\n\n # Append the results with a single entry\n results.append({\n 'id': entry.id,\n 'owner': entry.owner,\n 'data': data,\n 'has_paid': entry.has_paid,\n 'time': time,\n 'introductions': entry.introductions,\n 'is_reserve': attendants > custom_form.max_attendants\n })\n\n custom_form.results = results\n\n can_update_paid = role_service.\\\n user_has_role(current_user, Roles.FINANCIAL_ADMIN)\n\n add_registration_form = AddRegistrationForm()\n\n return render_template('custom_form/view_results.htm',\n add_registration_form=add_registration_form,\n custom_form=custom_form,\n xps=CustomForm.exports,\n unquote_plus=unquote_plus,\n can_update_paid=can_update_paid)\n\n\n@blueprint.route('/export//', methods=['POST'])\n@require_role(Roles.ACTIVITY_WRITE)\n@require_form_access\ndef export(form_id):\n # Create the headers\n xp = CustomForm.exports\n xp_names = list(xp.keys())\n names = list(request.form.keys())\n\n form = custom_form_service.get_form_by_form_id(form_id)\n\n # First create a list of key based dictionaries to gather\n # all the different keys in the form\n csv_rows = []\n for r in form.custom_form_results:\n\n data = {}\n\n for name in xp_names:\n if name not in names:\n continue\n\n # Split the custom part of the form in different columns\n if name is 'form':\n # Data from the custom form is saved in querystring format\n export = xp[name]['export'](r)\n qs_dict = dict(parse_qsl(export, keep_blank_values=True))\n data.update(qs_dict)\n continue\n else:\n export = xp[name]['export']\n data.update({name: export(r)})\n\n csv_rows.append(data)\n\n # Calculate all the labels in the csv_rows\n label_set = set()\n for i in csv_rows:\n label_set.update(list(i.keys()))\n\n from io import StringIO\n from csv import DictWriter\n\n # Write all the values to the io field\n str_io = StringIO()\n wrt = DictWriter(str_io, fieldnames=label_set)\n wrt.writeheader()\n wrt.writerows(csv_rows)\n\n def generate():\n yield str_io.getvalue()\n\n return Response(generate(), mimetype='text/csv')\n\n\n@blueprint.route('/create/', methods=['GET', 'POST'])\n@blueprint.route('/edit/', methods=['GET', 'POST'])\n@require_role(Roles.ACTIVITY_WRITE)\n@require_form_access\ndef create(form_id=None):\n if form_id:\n custom_form = custom_form_service.get_form_by_form_id(form_id)\n prev_max = custom_form.max_attendants\n else:\n custom_form = CustomForm()\n\n form = init_form(CreateForm, obj=custom_form)\n\n if request.method == 'POST':\n custom_form.name = form.name.data\n custom_form.origin = form.origin.data\n custom_form.group = form.group.data\n custom_form.html = form.html.data\n custom_form.msg_success = form.msg_success.data\n custom_form.max_attendants = form.max_attendants.data\n custom_form.introductions = form.introductions.data\n if form.price.data is None:\n form.price.data = 0.0\n custom_form.price = form.price.data\n custom_form.terms = form.terms.data\n custom_form.requires_direct_payment = form.requires_direct_payment.data\n\n if form_id:\n flash('You\\'ve updated a form successfully.', 'success')\n cur_max = int(custom_form.max_attendants)\n # print(\"Current maximum: \" + cur_max)\n # print(\"Previous maximum: \" + prev_max)\n # print(\"Current submissions: \" + len(all_sub))\n if cur_max > prev_max:\n all_sub = CustomFormResult.query.filter(\n CustomFormResult.form_id == form_id\n ).all()\n # Update for users that were on the reserve list that they\n # can now attend.\n if prev_max < len(all_sub):\n for x in range(prev_max, min(cur_max, len(all_sub))):\n sub = all_sub[x]\n copernica_data = {\n \"Reserve\": \"Nee\"\n }\n copernica.update_subprofile(\n app.config['COPERNICA_ACTIVITEITEN'], sub.owner_id,\n sub.form_id, copernica_data)\n elif cur_max < prev_max:\n all_sub = CustomFormResult.query.filter(\n CustomFormResult.form_id == form_id\n ).all()\n if cur_max < len(all_sub):\n for x in range(cur_max, max(prev_max, len(all_sub) - 1)):\n sub = all_sub[x]\n copernica_data = {\n \"Reserve\": \"Ja\"\n }\n copernica.update_subprofile(\n app.config['COPERNICA_ACTIVITEITEN'], sub.owner_id,\n sub.form_id, copernica_data)\n\n db.session.add(custom_form)\n db.session.commit()\n\n if form_id is None:\n flash('You\\'ve created a form successfully.', 'success')\n custom_form_service.follow_form(\n form=custom_form, user_id=current_user.id)\n\n return redirect(url_for('custom_form.view'))\n else:\n flash_form_errors(form)\n\n return render_template('custom_form/create.htm', form=form)\n\n\n@blueprint.route('/remove///', methods=['POST'])\n@require_role(Roles.ACTIVITY_WRITE)\n@require_form_access\ndef remove_response(form_id=None, submission_id=None):\n\n response = \"success\"\n\n # Test if user already signed up\n submission = custom_form_service.\\\n get_form_submission_by_id(form_id, submission_id)\n\n form_id = submission.form_id\n max_attendants = submission.form.max_attendants\n\n db.session.delete(submission)\n db.session.commit()\n\n all_sub = custom_form_service.get_form_entries_by_form_id(form_id)\n\n if max_attendants <= len(all_sub):\n from_list = all_sub[max_attendants - 1]\n copernica_data = {\n \"Reserve\": \"Nee\"\n }\n copernica.update_subprofile(\n app.config['COPERNICA_ACTIVITEITEN'], from_list.owner_id,\n from_list.form_id, copernica_data)\n\n return response\n\n\n# Ajax method\n@blueprint.route('/submit/', methods=['POST'])\n@require_membership\ndef submit(form_id=-1):\n # TODO make sure custom_form rights are set on server\n response = \"success\"\n\n custom_form = custom_form_service.find_form_by_form_id(form_id)\n if not custom_form:\n return \"error\", 404\n\n print(custom_form.submittable_by(current_user))\n if not custom_form.submittable_by(current_user):\n return \"error\", 403\n\n if role_service.user_has_role(current_user, Roles.ACTIVITY_WRITE) and \\\n 'user_id' in request.form:\n user_id = int(request.form['user_id'])\n user = user_service.find_by_id(user_id)\n else:\n user = current_user\n\n # These fields might be there\n try:\n if request.form['phone_nr']:\n user.phone_nr = request.form['phone_nr']\n\n if request.form['noodnummer']:\n user.emergency_phone_nr = request.form['noodnummer']\n\n if request.form['shirt_maat']:\n user.shirt_size = request.form['shirt maat']\n\n if request.form['dieet[]']:\n user.diet = ', '.join(request.form['dieet[]'])\n\n if request.form['allergie/medicatie']:\n user.allergy = request.form['allergie/medicatie']\n\n if request.form['geslacht']:\n user.gender = request.form['geslacht']\n except Exception:\n pass\n\n # Test if current user already signed up\n duplicate_test = custom_form_service.find_form_submission_by_user_id(\n form_id, user.id)\n\n if duplicate_test:\n result = duplicate_test\n result.data = request.form['data']\n response = \"edit\"\n else:\n entries = custom_form_service.get_form_entries_by_form_id(form_id)\n num_attendants = sum(entry.introductions + 1 for entry in entries)\n num_introduce = min(int(request.form.get('introductions', 0)),\n custom_form.introductions)\n\n result = CustomFormResult(user.id, form_id,\n request.form['data'],\n has_paid=False,\n introductions=num_introduce)\n\n # Check if number attendants allows another registration\n if num_attendants >= custom_form.max_attendants:\n # Create \"Reserve\" signup\n response = \"reserve\"\n elif num_introduce > custom_form.introductions:\n response = \"edit\"\n else:\n copernica_data = {\n \"Naam\": custom_form.name,\n \"Betaald\": result.has_paid,\n \"Bedrag\": custom_form.price,\n \"viaductID\": form_id,\n \"Reserve\": \"Ja\" if response is \"reserve\" else \"Nee\",\n }\n copernica.add_subprofile(app.config['COPERNICA_ACTIVITEITEN'],\n user.id, copernica_data)\n\n db.session.add(user)\n db.session.commit()\n\n db.session.add(result)\n db.session.commit()\n\n return response\n\n\n@blueprint.route('/follow//', methods=['GET', 'POST'])\n@blueprint.route('/follow///',\n methods=['GET', 'POST'])\n@require_role(Roles.ACTIVITY_WRITE)\n@require_form_access\ndef follow(form_id, page_nr=1):\n following = custom_form_service.toggle_form_follow(\n form_id=form_id, user_id=current_user.id)\n\n if following:\n flash('Formulier gevolgd', 'success')\n else:\n flash('Formulier ontvolgd', 'success')\n return redirect(url_for('custom_form.view', page_nr=page_nr))\n\n\n@blueprint.route('/archive//', methods=['GET', 'POST'])\n@blueprint.route('/archive///',\n methods=['GET', 'POST'])\n@require_role(Roles.ACTIVITY_WRITE)\n@require_form_access\ndef archive(form_id, page_nr=1):\n custom_form_service.form_set_archive_status(form_id, True)\n\n flash('Formulier gearchiveerd', 'success')\n\n return redirect(url_for('custom_form.view', page_nr=page_nr))\n\n\n@blueprint.route('/unarchive//', methods=['GET', 'POST'])\n@blueprint.route('/unarchive///',\n methods=['GET', 'POST'])\n@require_role(Roles.ACTIVITY_WRITE)\n@require_form_access\ndef unarchive(form_id, page_nr=1):\n custom_form_service.form_set_archive_status(form_id, False)\n\n flash('Formulier gede-archiveerd', 'success')\n\n return redirect(url_for('custom_form.view', page_nr=page_nr))\n\n\n# Ajax endpoint\n@blueprint.route('/has_paid///',\n methods=['POST'])\n@require_role(Roles.FINANCIAL_ADMIN)\n@require_form_access\ndef has_paid(form_id=None, submission_id=None):\n\n custom_form_service.toggle_form_submission_paid(form_id, submission_id)\n\n return \"success\"\n\n\n@blueprint.route('/loader//', methods=['GET'])\ndef loader(current):\n try:\n current = int(current)\n except ValueError:\n return abort(404)\n\n return jsonify(forms=serialize_sqla(CustomForm.aslist(current)))\n","sub_path":"app/views/custom_form.py","file_name":"custom_form.py","file_ext":"py","file_size_in_byte":14613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"329955494","text":"\"\"\"\n Implementation of tractable approximation to the posterior covariance composed\n of tractable circulant and diagonal matrix operations only.\n\"\"\"\n\nfrom common import *\nfrom mvn import *\nimport gpvi\nimport cov, gpexact\nimport autograd.numpy as np\nimport matplotlib.pyplot as plt\nimport autograd.numpy.random as rng\nfrom cg_muq import ConjugateGradients as cg\n\nfrom optalg import GradientAscent, AdaGrad, AdaDelta, Adam\nfrom autograd import grad\nfrom DataStream import DataStream\nfrom DataGrid import DataGrid\nfrom scipy.linalg import cholesky as chol\nfrom scipy.linalg import solve_triangular as solve_tri\nimport copy\n\n\n\n# Compute initial variational parameters. M is the number of pseudo-data.\n#\n# Inputs:\n# M - the number of pseudo-data.\n# phi_h - existing dictionary of parameters. Created if not provided.\n#\n# Outputs:\n# phi_h - dictionary of parameters, transformed to be real-valued.\n#\ndef InitVariationalParameters(M, phi_h={}):\n phi_h['m_q'] = np.zeros(M)\n phi_h['w_h'] = BoundedToReal(0.99 * np.ones(M), lb=0.0, ub=1.0)\n phi_h['eps_h'] = BoundedToReal(1e-4 * np.ones(1), lb=0.0)\n return phi_h\n\n\n\n# Compute the parameters.\n#\n# Inputs:\n# phi - dictionary of parameters. Should be unbounded.\n#\n# Outputs:\n# m_q - mean of approximate posterior distribution.\n# w - diagonal of de-correlation matrix.\n# eps - auxilliary noise parameter.\n#\ndef ComputeVariationalParameters(phi_h):\n return phi_h['m_q'],\\\n RealToBounded(phi_h['w_h'], lb=0.0, ub=1.0),\\\n RealToBounded(phi_h['eps_h'], lb=0.0)\n\n\n\n# Compute the gradient w.r.t. the mean mu_q.\n#\n# Inputs:\n# phi_h - transformed variational parameters.\n# Z_grid - inducing point locations object.\n# datastream - DataStream object to provide access to mini-batches.\n# fcov - covariance function.\n# s2n_h - noise parameter s2n := log(1 + exp(s2nh)). (scalar)\n#\n# Outputs:\n# dmu_q - grad w.r.t. mu_q.\n#\ndef gradmuq(phi_h, Z_grid, datastream, fcov, s2n_h):\n\n # Get nex inputs and outputs from the data-stream object.\n Z, M, N = Z_grid.data, Z_grid.M, datastream.N\n X, y, L = datastream.GetNextMiniBatch()\n\n # Pre-compute invariant qtts.\n m_q = phi_h['m_q']\n eps = RealToBounded(phi_h['eps_h'], lb=0.0)\n Gamma = ComputeSpectrum(fcov, Z) + eps\n s2n = RealToBounded(s2n_h, lb=0.0)\n K0, KXZ = fcov(np.zeros(1)).flatten(), fcov(X, Z)\n\n # Compute gradient.\n tmp = np.real(fft(ifft(m_q) / Gamma))\n R = N / np.float64(L)\n delta = np.real(y - np.dot(KXZ, fft(ifft(m_q) / Gamma)))\n out = R * np.real(fft(ifft(np.dot(KXZ.T, delta)) / Gamma) / s2n) - tmp\n\n return { 'm_q' : out }\n\n\n\n# Compute the gradient w.r.t. the mean mu_q in Fourier domain. Hopefully this\n# will result in information sharing between inputs, resulting in more stable\n# convergence under optimisation. It's also faster at inference time as it\n# requires a couple fewer FFT operations.\n#\n# Inputs:\n# phi_h - transformed variational parameters.\n# Z_grid - inducing point locations object.\n# datastream - DataStream object to provide access to mini-batches.\n# fcov - covariance function.\n# s2n_h - noise parameter s2n := log(1 + exp(s2nh)). (scalar)\n#\n# Outputs:\n# dmu_q - grad w.r.t. mu_q.\n#\ndef gradmuqtilde(phi_h, Z_grid, datastream, fcov, s2n_h):\n\n # Get nex inputs and outputs from the data-stream object.\n Z, M, N = Z_grid.data, Z_grid.M, datastream.N\n X, y, L = datastream.GetNextMiniBatch()\n\n # Pre-compute invariant qtts.\n m_q = phi_h['m_q']\n eps = RealToBounded(phi_h['eps_h'], lb=0.0)\n Gamma = ComputeSpectrum(fcov, Z) + eps\n s2n = RealToBounded(s2n_h, lb=0.0)\n K0, KXZ = fcov(np.zeros(1)).flatten(), fcov(X, Z)\n\n # Compute gradient.\n R = N / np.float64(L)\n delta = y - np.dot(KXZ, np.real(fft(m_q / Gamma)))\n out = R * (fft(np.dot(KXZ.T, delta)) / s2n - m_q) / Gamma\n return { 'm_q' : out }\n\n\n# Compute the elbo given mean in fourier domain.\ndef elbo_mu_tilde(phi_h, Z_grid, datastream, fcov, s2n_h, check=False):\n phi_h_2 = copy.deepcopy(phi_h)\n phi_h_2['m_q'] = fft(phi_h['m_q'])\n return elbo_mu(phi_h_2, Z_grid, datastream, fcov, s2n_h, check=False)\n\n\n# Compute the elbo (lower bound on the log marginal probability). See\n# dissertation for explanation of the various opaquely-named quantities.\n#\n# Inputs:\n# phi_h - transformed variational parameters.\n# datastream - DataStream object to provide access to data.\n# Z_grid - inducing point locations. (M)\n# fcov - covariance function handle. (handle)\n# s2n_h - noise parameter s2n := log(1 + exp(s2nh)). (scalar)\n#\n# Outputs:\n# elbo - noisy estimate of the elbo.\n#\ndef elbo_mu(phi_h, Z_grid, datastream, fcov, s2n_h, check=False):\n\n # Get nex inputs and outputs from the data-stream object.\n Z, M, N = Z_grid.data, Z_grid.M, datastream.N\n X, y, L = datastream.GetNextMiniBatch()\n\n # Pre-compute invariant qtts.\n m_q, w, eps = ComputeVariationalParameters(phi_h)\n Gamma = ComputeSpectrum(fcov, Z) + eps\n s2n = RealToBounded(s2n_h, lb=0.0)\n K0, KXZ = fcov(np.zeros(1)).flatten(), fcov(X, Z)\n\n # Add likelihood expectation term using the last draws.\n mu = np.dot(KXZ, fft(ifft(m_q) / Gamma))\n elbo = lpdfDiagNormal(y, mu, np.array([s2n]), L) * N / np.float(L)\n\n # Subtract KL-divergence term.\n elbo -= 0.5 * np.sum(sqrabs(ifft(m_q)) / Gamma)\n return elbo\n\n\n\n# Compute the elbo (lower bound on the log marginal probability). See\n# dissertation for explanation of the various opaquely-named quantities.\n#\n# Inputs:\n# phi_h - transformed variational parameters.\n# datastream - DataStream object to provide access to data.\n# Z_grid - inducing point locations. (M)\n# fcov - covariance function handle. (handle)\n# s2n_h - noise parameter s2n := log(1 + exp(s2nh)). (scalar)\n#\n# Outputs:\n# elbo - noisy estimate of the elbo.\n#\ndef elbo(phi_h, Z_grid, datastream, fcov, s2n_h):\n\n # Get nex inputs and outputs from the data-stream object.\n Z, M, N = Z_grid.data, Z_grid.M, datastream.N\n X, y, L = datastream.GetNextMiniBatch()\n\n # Pre-compute invariant qtts.\n m_q, w, eps = ComputeVariationalParameters(phi_h)\n Gamma = ComputeSpectrum(fcov, Z) + eps\n s2n = RealToBounded(s2n_h, lb=0.0)\n K0, KXZ = fcov(np.zeros(1)).flatten(), fcov(X, Z)\n\n # Draw sample from approximate posterior.\n u_tilde = ifft(m_q + w * rndCircNormal(0.0, Gamma))\n\n # Add on cross-ent term.\n M = Z.shape[0]\n elbo = lpdfDiagNormal(u_tilde, 0.0, Gamma, M)\n\n # Add likelihood expectation term using the last draws.\n mu = np.dot(KXZ, fft(ifft(m_q) / Gamma))\n elbo += lpdfDiagNormal(y, mu, np.array([s2n]), L) * N / L\n\n # Subtract trace term - Monte Carlo approximation used.\n Q = rng.randn(L, 1)\n GammaT = np.reshape(Gamma, [M, 1])\n R = ifft(np.reshape(w, [M, 1]) * fft(ifft(np.dot(KXZ.T, Q)) / GammaT))\n elbo -= 0.5 * N * np.sum(np.sum(sqrabs(R), 1) * Gamma) / (s2n * L)\n\n # Subtract penalty trace term.\n cov_power = N * np.sum(sqrabs(fft(KXZ, axis=1)), 0) / L\n elbo -= 0.5 * (N * K0 - np.sum(cov_power / Gamma)) / s2n\n\n # Compute tractable bit of the elbo (entropy of r).\n elbo += HDiagNormal(Gamma) + np.sum(np.log(w))\n return elbo\n\n\n\n# Compute the root mean squared error (rmse) between the outputs from the\n# observation pairs in (X,y) and the posterior predictive mean under the\n# circulant approximation.\n#\n# Inputs:\n# phi_h - transformed variational parameters.\n# datastream - DataStream object to provide access to data.\n# Z_grid - inducing point locations. (M)\n# fcov - covariance function handle. (handle)\n# s2n_h - noise parameter s2n := log(1 + exp(s2nh)). (scalar)\n#\n# Outputs:\n# rmse - the root mean squared error. (scalar)\n#\ndef rmse_circ(phi_h, Z_grid, datastream, fcov):\n\n # Get next inputs and outputs from the data-stream object.\n Z = Z_grid.data\n X, y, L = datastream.GetNextMiniBatch()\n\n # Pre-compute invariant qtts.\n m_q, w, eps = ComputeVariationalParameters(phi_h)\n Gamma = ComputeSpectrum(fcov, Z) + eps\n\n # Compute mean predictions and compare with observation outputs.\n mu = np.dot(fcov(X, Z), np.real(fft(ifft(m_q) / Gamma)))\n return rmse(y, mu)\n\n\n\n# Compute the posterior mean and marginal variance at each input in X.\n#\n# Inputs:\n# phi_h - transformed variational parameters.\n# Z_grid - DataGrid object containing pseudo-data inputs.\n# X - inputs. (N)\n# fcov - covariance function handle. (handle)\n#\n# Outputs:\n# mu - posterior mean estimates at X.\n# s2 - posterior variance estimates at X.\n#\ndef posterior(phi_h, Z_grid, X, fcov):\n\n # Pre-compute invariant qtts.\n m_q, w, eps = ComputeVariationalParameters(phi_h)\n Gamma = ComputeSpectrum(fcov, Z_grid.data) + eps\n Ktilde, Kself = ifft(fcov(Z_grid.data, X)), fcov(np.zeros(1), diag=True)\n\n # Compute posterior mean.\n mu = np.real(np.dot(np.conj(Ktilde.T), ifft(m_q) / Gamma))\n\n # Compute marginal posterior variance.\n M, N = Z_grid.data.shape[0], X.shape[0]\n GammaT, wT = np.reshape(Gamma, [-1, 1]), np.reshape(w, [-1, 1])\n s2 = Kself - np.sum(sqrabs(Ktilde) / GammaT, 0) +\\\n np.sum(sqrabs(ifft(wT * fft(Ktilde / GammaT))) * GammaT, 0)\n return mu, s2\n\n\n\n# Knowing the kernel parameters, infer distribution over latent variables.\n#\n# Inputs:\n# fcov - covariance function, implicit parameters.\n# data_stream - DataStream object that containing the data.\n# Z_grid - DataGrid object containing the pseudo-data input locations.\n# phi_h - initial variational parameters. See InitVariationalParameters for\n# requirements.\n# s2n_h - observation noise. (1), (>0)\n# iters - number of iterations.\n# infalg - inference algorithm function handle.\n# collect_delta - number of iterations to wait between collecting elbo.\n# qtt_dict - dictionary mapping to function handles which compute scalar\n# quantities of interest as a function of the model parameters.\n#\n# Outputs:\n# elbos - logged elbos.\n#\ndef infer_mu(fcov, datastream, Z_grid, phi_h, iters, inf_alg, collect_delta,\\\n qtt_dict):\n\n # Define objective function and inference routine.\n dL = lambda phi_h : gradmuq(phi_h, Z_grid, datastream, fcov, phi_h['s2n_h'])\n inf_obj = inf_alg(dL, phi_h)\n\n # Define object to collect quantities of interest throughout iteration.\n out_dict = {}\n J, j = iters / collect_delta, 0\n for key in qtt_dict.keys():\n out_dict[key] = np.empty(J)\n\n # Iterate for the specified amount of time.\n updates = dict({'m_q' : None })\n elbos = np.empty(J)\n for itr in range(iters):\n inf_obj.update(phi_h, updates)\n if itr % collect_delta == 0:\n for key in qtt_dict.keys():\n out_dict[key][j] = qtt_dict[key](phi_h)\n print(str(j) + '/' + str(J))\n j += 1\n return out_dict\n\n\n\ndef main():\n\n # Define pseudo-data.\n lb, ub, N, s2n = 0.0, 10.0, 750, 1e-3\n extend, M, collect_delta = 5.0, 75, 25\n Z = DataGrid(lb - extend, ub + extend, M)\n\n # Define the covariance matrix.\n print('Define covariance function.')\n pars = cov.init_eq(s2=1.0, l2=1.0)\n delta = (ub - lb + 2 * extend)\n fcov = cov.circ_factory(cov.eq, delta, pars)\n\n # Generate some data and define streaming object.\n print('Generate toy data.')\n rng.seed(15485863)\n X1 = rng.uniform(low=lb, high=ub * 3.0/10.0, size=N / 2)\n X2 = rng.uniform(low=ub * 5.0/10.0, high=ub, size=N / 2)\n X = rng.permutation(np.hstack([X1, X2]))\n y = rndFullNormal(np.zeros(N), fcov(X, X) + s2n * np.eye(N))\n D = DataStream(N, X, y)\n\n # Define initial variational parameters and inference routine.\n print('Define variational parameters and inference.')\n phi_h = InitVariationalParameters(M)\n phi_h['s2n_h'] = BoundedToReal(s2n, lb=0.0)\n\n # Define dictionary of quantities to compute throughout inference.\n qtt_dict = {}\n qtt_dict['rmse'] = lambda phi_h : rmse_circ(phi_h, Z, D, fcov)\n qtt_dict['elbo'] = lambda phi_h : elbo_mu(phi_h, Z, D, fcov, phi_h['s2n_h'])\n\n # Perform inference.\n print('Perform inference.')\n phi_h_2 = copy.deepcopy(phi_h)\n D_ag = DataStream(25, X, y)\n df = lambda phi_h : gradmuq(phi_h, Z, D_ag, fcov, phi_h['s2n_h'])\n opt = AdaGrad(df, 2.0, phi_h)\n out_dict_ag = gradopt(phi_h, opt, 3000, 10, qtt_dict, updates={'m_q' : None})\n \"\"\"out_dict_ag =\\\n infer_mu(fcov, D_ag, Z, phi_h, 1000, inf_alg, 10, qtt_dict)\"\"\"\n out_dict_cg = cg(phi_h_2, Z.data, D, fcov, 100, 1, qtt_dict)\n\n # Plot rmse.\n plt.plot(out_dict_cg['rmse'], 'b', out_dict_ag['rmse'], 'g')\n plt.title('rmse')\n plt.figure()\n plt.plot(out_dict_cg['elbo'], 'b', out_dict_ag['elbo'], 'g')\n plt.title('elbo')\n\n # Define locations at which to make posterior predictions and compute under\n # circular structured posterior.\n Xhat = np.linspace(lb - extend, ub + extend, 1500)\n mu_ag, s2_ag = posterior(phi_h, Z, Xhat, fcov)\n mu_cg, s2_cg = posterior(phi_h_2, Z, Xhat, fcov)\n\n # Compute posterior approximation exactly and check that gradient is 0 at the\n # optimum.\n eps, s2n_h = RealToBounded(phi_h['eps_h'], lb=0.0), phi_h['s2n_h']\n ex_mu_q, ex_Sigma_q, elbo = gpvi.ApproxPosterior(fcov, X, y, Z.data, s2n_h)\n mu_ex, s2_ex = gpvi.PostPredict(Z.data, ex_mu_q, ex_Sigma_q, Xhat, fcov)\n\n # Plot the data, approximate circulant posterior and true posterior via VFE.\n plt.figure()\n plt.plot(Xhat, mu_ag, 'g')\n plt.plot(Xhat, mu_cg, 'b')\n plt.plot(Xhat, mu_ex, 'r')\n plt.plot(X, y, 'kx')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"exp/circgp/optimise_mean.py","file_name":"optimise_mean.py","file_ext":"py","file_size_in_byte":13025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"99945335","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.contrib.sites.models import Site\nfrom django.db.models import Q\n\nfrom machina.core.db.models import get_model\nfrom machina.core.loading import get_class\n\n\nPost = get_model('forum_conversation', 'Post')\nNotificationEmail = get_class('forum_member.emails', 'NotificationEmail')\n\n\ndef send_notifications(email_class=None, context=None):\n \"\"\"\n Send notification on email to the user that subscribe on topics.\n \"\"\"\n email_class = email_class or NotificationEmail\n email = email_class()\n\n if not context:\n context = {}\n\n posts = Post.objects.filter(\n approved=True,\n notifications_sent=False,\n ).select_related('topic__forum')\n for post in posts:\n users = post.topic.subscribers.filter(\n ~Q(id=post.poster_id),\n forum_profile__notify_subscribed_topics=True,\n email__isnull=False\n )\n\n for user in users:\n email_context = context.copy()\n email_context.update({\n 'user': user,\n 'post': post,\n 'topic': post.topic,\n 'current_site': Site.objects.get_current(),\n })\n email.send([user.email], email_context)\n\n post.notifications_sent = True\n post.save()\n","sub_path":"machina/apps/forum_member/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"527960515","text":"import os\nimport time\nimport pickle\nimport cv2\nimport threading\n\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nimport threading\nimport grpc\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nimport sys\nsys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/video-captioning-serving/')\n\nfrom modules_video_cap.video_cap_vgg16_rim import CapVGG16\nfrom modules_video_cap.video_cap_alexnet_rim import CapAlexnet\nfrom modules_video_cap.video_cap_s2vt_rim import CapS2VT\n\nvgg = CapVGG16()\nvgg.Setup()\n\nalexnet = CapAlexnet()\nalexnet.Setup()\n\n# first = vgg\nfirst = alexnet\n\ns2vt =CapS2VT()\ns2vt.Setup()\n\nichannel = grpc.insecure_channel(\"localhost:8500\")\nistub = prediction_service_pb2_grpc.PredictionServiceStub(ichannel)\n\nvideo_path = \"/home/yitao/Documents/fun-project/tensorflow-related/video-captioning-serving/inputs/vid264.mp4\"\nreader = cv2.VideoCapture(video_path)\n\nframe_id = 1\n\nfeatures_fc7 = []\nmy_lock = threading.Lock()\n\ntotal = 0.0\ncount = 0\n\nwhile (frame_id < 250):\n start = time.time()\n\n _, image = reader.read()\n\n request = dict()\n\n request[\"client_input\"] = image\n\n first.PreProcess(request = request, istub = istub, features_fc7 = features_fc7, my_lock = my_lock, grpc_flag = False)\n first.Apply()\n next_request = first.PostProcess(grpc_flag = False)\n\n # print(next_request[\"vgg_output\"])\n\n s2vt.PreProcess(request = next_request, istub = istub, grpc_flag = False)\n s2vt.Apply()\n next_request = s2vt.PostProcess(grpc_flag = False)\n\n # if (next_request[\"FINAL\"] != \"None\"):\n # print(next_request[\"FINAL\"])\n\n end = time.time()\n\n duration = end - start\n print(\"duration = %f\" % duration)\n if (frame_id > 5):\n count += 1\n total += duration\n\n frame_id += 1\n\nprint(\"on average, it takes %f sec per frame\" % (total / count))","sub_path":"tomTest/cap_rim.py","file_name":"cap_rim.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"159124513","text":"'''\n色彩空间之间的颜色互换\n'''\nimport cv2 as cv\nimport numpy as np\n'''\ndef extrace_obj_demo():\n capture=cv.VideoCapture(\"../data/vedo1.mp4\")\n while True:\n ret,frame=capture.read()\n if ret==False:\n break;\n hsv=cv.cvtColor(frame,cv.COLOR_BGR2HSV)\n lower_hsv=np.array([37,43,46])\n upper_hsv=np.array([77,255,255])\n mask=cv.inRange(hsv,lowerb=lower_hsv,upperb=upper_hsv)\n cv.bitwise_and(frame,frame,mask=mask)\n cv.imshow(\"video\",frame)\n cv.imshow(\"mask\",mask)\n c=cv.waitKey(40)\n if c==27:\n break;\n'''\ndef color_dpace_demo(image):\n gray=cv.cvtColor(image,cv.COLOR_BGR2GRAY)\n cv.imshow(\"gray\",gray)\n hsv= cv.cvtColor(image, cv.COLOR_BGR2HSV)\n cv.imshow(\"hav\", hsv)\n yuv = cv.cvtColor(image, cv.COLOR_BGR2YUV)\n cv.imshow(\"yuv\", yuv)\n Ycrcb= cv.cvtColor(image, cv.COLOR_BGR2YCrCb)\n cv.imshow(\"Ycrcb\", Ycrcb)\n\n\nprint(\"**\"*20)\n\nsrc=cv.imread(\"../data/5.jpg\")\ncv.namedWindow(\"input image\",cv.WINDOW_AUTOSIZE)\ncv.imshow(\"input image\",src)\nprint(src.shape)\nt1=cv.getTickCount()\ncolor_dpace_demo(src)\nt2=cv.getTickCount()\n\ntime=(t2-t1)/cv.getTickFrequency()\n\nprint(\"time:%s ms\"%(time*1000))\n\ncv.waitKey(0)\n\ncv.destroyAllWindows()","sub_path":"lq-Project/lq17.py","file_name":"lq17.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"581770006","text":"from unittest.mock import MagicMock\n\nfrom src import quotes\nfrom src.data_provider import DataProvider\n\n\ndef test_quote_should_be_none_for_empty_list():\n empty_data = DataProvider('')\n empty_data.get_all = MagicMock(return_value=[])\n quote = quotes.random_quote(empty_data)\n assert quote is None\n\n\ndef test_quote_should_be_in_array():\n fake_data = DataProvider('')\n qts = ['quote' + str(x) for x in range(5)]\n fake_data.get_all = MagicMock(return_value=qts)\n quote = quotes.random_quote(fake_data)\n assert quote in qts\n","sub_path":"tests/test_unit_quotes.py","file_name":"test_unit_quotes.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"78764106","text":"#Thomas Thorpe\r\n#08/01/15\r\n#Lists Stretch and Challenge Task 1\r\n\r\ndef option_one (band_names):\r\n item = input(\"Please enter the band to add: \")\r\n print (\" \")\r\n band_names.append(item)\r\n return band_names\r\n \r\ndef option_two (band_names):\r\n item = \"oisdfhoahiefnowifnehoiwdnfheowa\"\r\n while item not in band_names:\r\n item = input(\"Please enter the band to delete: \")\r\n print (\" \")\r\n band_names.remove(item)\r\n return band_names\r\n \r\ndef option_three (band_names):\r\n position = -1\r\n while position < 1 or position > len(band_names):\r\n position = int(input(\"Please enter the number of item to delete: \"))\r\n print (\" \")\r\n band_names.pop(position - 1)\r\n return band_names\r\n\r\ndef option_four (band_names):\r\n item = input(\"Please enter the band to add: \")\r\n position = int(input(\"Please enter the number to insert before: \"))\r\n print (\" \")\r\n band_names.insert(position - 1, item)\r\n return band_names\r\n\r\ndef option_five (band_names):\r\n position = -1\r\n while position < 1 or position > len(band_names):\r\n position = int(input(\"Please enter the number to change: \"))\r\n item = input(\"Please enter the band to change to: \")\r\n print (\" \")\r\n band_names[position - 1] = item\r\n return band_names\r\n\r\ndef display_options ():\r\n print (\"1. Add an item to th end of the list\")\r\n print (\"2. Delete an item by name\")\r\n print (\"3. Delete an item by list position\")\r\n print (\"4. Insert a new item\")\r\n print (\"5. Amend an item\")\r\n print (\"9. Exit\")\r\n\r\ndef choose_option ():\r\n choice = -1\r\n while choice not in [1,2,3,4,5,9]:\r\n choice = int(input(\"Please enter the menu choice: \"))\r\n return choice\r\n\r\ndef display_list(band_names):\r\n print (\"Current List:\")\r\n for count in range(0,len(band_names)):\r\n number = str(count + 1)\r\n print (\"{0}. {1}\".format(number,band_names[count]))\r\n print (\" \")\r\n\r\n\r\n\r\ndef main():\r\n band_names = [\"Fit For An Autospy\", \"Lorna Shore\", \"Motionless In White\", \"Suicide Silence\", \"Make Them Suffer\"]\r\n choice = -1\r\n while choice != 9:\r\n display_list(band_names)\r\n display_options()\r\n choice = choose_option()\r\n if choice == 1:\r\n band_names = option_one(band_names)\r\n elif choice == 2:\r\n band_names = option_two(band_names)\r\n elif choice == 3:\r\n band_names = option_three(band_names)\r\n elif choice == 4:\r\n band_names = option_four(band_names)\r\n elif choice == 5:\r\n band_names = option_five(band_names)\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Lists Strech and Challenge Task 1.py","file_name":"Lists Strech and Challenge Task 1.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"521053791","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport oneflow as flow\nimport oneflow.typing as oft\nimport numpy as np\nimport os\nimport unittest\n\n\n@unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\ndef test_1n1c(test_case):\n dcgan = DCGAN()\n dcgan.compare_with_tf(1)\n\n\n@unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\ndef test_1n4c(test_case):\n dcgan = DCGAN()\n dcgan.compare_with_tf(4)\n\n\nclass DCGAN:\n def __init__(self):\n self.lr = 1e-4\n self.z_dim = 100\n self.batch_size = 32\n\n def compare_with_tf(self, gpu_num, result_dir=\"/dataset/gan_test/dcgan/\"):\n flow.config.gpu_device_num(gpu_num)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def test_generator(\n z: oft.Numpy.Placeholder((self.batch_size, self.z_dim)),\n label1: oft.Numpy.Placeholder((self.batch_size, 1)),\n ):\n g_out = self.generator(z, trainable=True, const_init=True)\n g_logits = self.discriminator(g_out, trainable=False, const_init=True)\n g_loss = flow.nn.sigmoid_cross_entropy_with_logits(\n flow.ones_like(g_logits),\n g_logits,\n name=\"Gloss_sigmoid_cross_entropy_with_logits\",\n )\n\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [self.lr]), momentum=0\n ).minimize(g_loss)\n return g_loss\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def test_discriminator(\n z: oft.Numpy.Placeholder((self.batch_size, 100)),\n images: oft.Numpy.Placeholder((self.batch_size, 1, 28, 28)),\n label1: oft.Numpy.Placeholder((self.batch_size, 1)),\n label0: oft.Numpy.Placeholder((self.batch_size, 1)),\n ):\n g_out = self.generator(z, trainable=False, const_init=True)\n g_logits = self.discriminator(g_out, trainable=True, const_init=True)\n d_loss_fake = flow.nn.sigmoid_cross_entropy_with_logits(\n flow.zeros_like(g_logits),\n g_logits,\n name=\"Dloss_fake_sigmoid_cross_entropy_with_logits\",\n )\n\n d_logits = self.discriminator(\n images, trainable=True, reuse=True, const_init=True\n )\n d_loss_real = flow.nn.sigmoid_cross_entropy_with_logits(\n flow.ones_like(d_logits),\n d_logits,\n name=\"Dloss_real_sigmoid_cross_entropy_with_logits\",\n )\n d_loss = d_loss_fake + d_loss_real\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [self.lr]), momentum=0\n ).minimize(d_loss)\n\n return d_loss\n\n check_point = flow.train.CheckPoint()\n check_point.init()\n\n z = np.load(os.path.join(result_dir, \"z.npy\"))\n imgs = np.load(os.path.join(result_dir, \"img.npy\")).transpose(0, 3, 1, 2)\n label1 = np.ones((self.batch_size, 1)).astype(np.float32)\n label0 = np.zeros((self.batch_size, 1)).astype(np.float32)\n g_loss = test_generator(z, label1).get()\n d_loss = test_discriminator(z, imgs, label1, label0).get()\n tf_g_loss = np.load(os.path.join(result_dir, \"g_loss.npy\"))\n tf_d_loss = np.load(os.path.join(result_dir, \"d_loss.npy\"))\n\n if gpu_num == 1: # multi-gpu result can not pass\n assert np.allclose(\n g_loss.numpy(), tf_g_loss, rtol=1e-2, atol=1e-1\n ), \"{}-{}\".format(g_loss.ndarray().mean(), tf_g_loss.mean())\n assert np.allclose(\n d_loss.numpy(), tf_d_loss, rtol=1e-2, atol=1e-1\n ), \"{}-{}\".format(d_loss.ndarray().mean(), tf_d_loss.mean())\n\n def generator(self, z, const_init=False, trainable=True):\n # (n, 256, 7, 7)\n h0 = layers.dense(\n z, 7 * 7 * 256, name=\"g_fc1\", const_init=const_init, trainable=trainable\n )\n h0 = layers.batchnorm(h0, axis=1, name=\"g_bn1\")\n h0 = flow.nn.leaky_relu(h0, 0.3)\n h0 = flow.reshape(h0, (-1, 256, 7, 7))\n # (n, 128, 7, 7)\n h1 = layers.deconv2d(\n h0,\n 128,\n 5,\n strides=1,\n name=\"g_deconv1\",\n const_init=const_init,\n trainable=trainable,\n )\n h1 = layers.batchnorm(h1, name=\"g_bn2\")\n h1 = flow.nn.leaky_relu(h1, 0.3)\n # (n, 64, 14, 14)\n h2 = layers.deconv2d(\n h1,\n 64,\n 5,\n strides=2,\n name=\"g_deconv2\",\n const_init=const_init,\n trainable=trainable,\n )\n h2 = layers.batchnorm(h2, name=\"g_bn3\")\n h2 = flow.nn.leaky_relu(h2, 0.3)\n # (n, 1, 28, 28)\n out = layers.deconv2d(\n h2,\n 1,\n 5,\n strides=2,\n name=\"g_deconv3\",\n const_init=const_init,\n trainable=trainable,\n )\n out = flow.math.tanh(out)\n return out\n\n def discriminator(self, img, const_init=False, trainable=True, reuse=False):\n # (n, 1, 28, 28)\n h0 = layers.conv2d(\n img,\n 64,\n 5,\n name=\"d_conv1\",\n const_init=const_init,\n trainable=trainable,\n reuse=reuse,\n )\n h0 = flow.nn.leaky_relu(h0, 0.3)\n # h0 = flow.nn.dropout(h0, rate=0.3)\n # (n, 64, 14, 14)\n h1 = layers.conv2d(\n h0,\n 128,\n 5,\n name=\"d_conv2\",\n const_init=const_init,\n trainable=trainable,\n reuse=reuse,\n )\n h1 = flow.nn.leaky_relu(h1, 0.3)\n # h1 = flow.nn.dropout(h1, rate=0.3)\n # (n, 128 * 7 * 7)\n out = flow.reshape(h1, (self.batch_size, -1))\n # (n, 1)\n out = layers.dense(\n out, 1, name=\"d_fc\", const_init=const_init, trainable=trainable, reuse=reuse\n )\n return out\n\n\nclass layers:\n @classmethod\n def deconv2d(\n cls,\n input,\n filters,\n size,\n name,\n strides=2,\n trainable=True,\n reuse=False,\n const_init=False,\n use_bias=False,\n ):\n name_ = name if reuse == False else name + \"_reuse\"\n # weight : [in_channels, out_channels, height, width]\n weight_shape = (input.shape[1], filters, size, size)\n output_shape = (\n input.shape[0],\n input.shape[1],\n input.shape[2] * strides,\n input.shape[3] * strides,\n )\n\n weight = flow.get_variable(\n name + \"-weight\",\n shape=weight_shape,\n dtype=input.dtype,\n initializer=flow.random_normal_initializer(stddev=0.02)\n if not const_init\n else flow.constant_initializer(0.002),\n trainable=trainable,\n reuse=reuse,\n )\n\n output = flow.nn.conv2d_transpose(\n input,\n weight,\n strides=[strides, strides],\n output_shape=output_shape,\n padding=\"SAME\",\n data_format=\"NCHW\",\n name=name_,\n )\n\n if use_bias:\n bias = flow.get_variable(\n name + \"-bias\",\n shape=(filters,),\n dtype=input.dtype,\n initializer=flow.constant_initializer(0.0),\n trainable=trainable,\n reuse=reuse,\n )\n\n output = flow.nn.bias_add(output, bias, \"NCHW\")\n return output\n\n @classmethod\n def conv2d(\n cls,\n input,\n filters,\n size,\n name,\n strides=2,\n padding=\"same\",\n trainable=True,\n reuse=False,\n const_init=False,\n use_bias=True,\n ):\n name_ = name if reuse == False else name + \"_reuse\"\n\n # (output_dim, k_h, k_w, input.shape[3]) if NHWC\n weight_shape = (filters, input.shape[1], size, size)\n weight = flow.get_variable(\n name + \"-weight\",\n shape=weight_shape,\n dtype=input.dtype,\n initializer=flow.random_normal_initializer(stddev=0.02)\n if not const_init\n else flow.constant_initializer(0.002),\n trainable=trainable,\n reuse=reuse,\n )\n\n output = flow.nn.compat_conv2d(\n input,\n weight,\n strides=[strides, strides],\n padding=padding,\n data_format=\"NCHW\",\n name=name_,\n )\n\n if use_bias:\n bias = flow.get_variable(\n name + \"-bias\",\n shape=(filters,),\n dtype=input.dtype,\n initializer=flow.constant_initializer(0.0),\n trainable=trainable,\n reuse=reuse,\n )\n\n output = flow.nn.bias_add(output, bias, \"NCHW\")\n return output\n\n @classmethod\n def dense(\n cls,\n input,\n units,\n name,\n use_bias=False,\n trainable=True,\n reuse=False,\n const_init=False,\n ):\n name_ = name if reuse == False else name + \"_reuse\"\n\n in_shape = input.shape\n in_num_axes = len(in_shape)\n assert in_num_axes >= 2\n\n inputs = flow.reshape(input, (-1, in_shape[-1])) if in_num_axes > 2 else input\n\n weight = flow.get_variable(\n name=\"{}-weight\".format(name),\n shape=(units, inputs.shape[1]),\n dtype=inputs.dtype,\n initializer=flow.random_normal_initializer(stddev=0.02)\n if not const_init\n else flow.constant_initializer(0.002),\n trainable=trainable,\n model_name=\"weight\",\n reuse=reuse,\n )\n\n out = flow.matmul(a=inputs, b=weight, transpose_b=True, name=name_ + \"matmul\",)\n\n if use_bias:\n bias = flow.get_variable(\n name=\"{}-bias\".format(name),\n shape=(units,),\n dtype=inputs.dtype,\n initializer=flow.random_normal_initializer()\n if not const_init\n else flow.constant_initializer(0.002),\n trainable=trainable,\n model_name=\"bias\",\n reuse=reuse,\n )\n out = flow.nn.bias_add(out, bias, name=name_ + \"_bias_add\")\n\n out = flow.reshape(out, in_shape[:-1] + (units,)) if in_num_axes > 2 else out\n return out\n\n @classmethod\n def batchnorm(cls, input, name, axis=1, reuse=False):\n name_ = name if reuse == False else name + \"_reuse\"\n return flow.layers.batch_normalization(input, axis=axis, name=name_)\n","sub_path":"oneflow/python/test/models/test_dcgan.py","file_name":"test_dcgan.py","file_ext":"py","file_size_in_byte":11473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"613400479","text":"# [MAC]SSL Certificate FAIL 這句是MAC電腦才要加的\n# import ssl\n# ssl._create_default_https_context = ssl._create_unverified_context\n# 上面兩行都是MAC要加的\nfrom urllib.request import urlopen, urlretrieve\nimport json\nimport os\nyears = [\"2018\", \"2019\"]\nfor y in years:\n for m in range(12):\n url = \"https://www.google.com/doodles/json/\" + str(y) + \"/\" + str(m + 1) + \"?hl=zh_TW\"\n response = urlopen(url)\n # print(response.read()) read讀完就會去掉裡面的資料,若讀第二次就是空的\n doodles = json.load(response) # 轉成python格式\n for d in doodles:\n url = \"https:\" + d[\"url\"]\n # print(d[\"title\"], url)\n # print(url.split(\"/\")[-1]) # 把url分割然後只取最後一段\n dirname = \"doodles/\" + str(y) + \"/\" + str(m + 1) + \"/\"\n path = dirname + url.split(\"/\")[-1]\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n urlretrieve(url, path) # 下載\n\n","sub_path":"practice/doodles.py","file_name":"doodles.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"625934105","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 7 13:25:57 2018\n\n@author: zhan\n\n需要第三方包:python-kafka, scikit learn\n\"\"\"\n\nimport cluster \nfrom sklearn.externals import joblib\nimport argparse\nfrom kafka import KafkaConsumer\n\n\ndef _get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--pkl', required=True, help='path to pkl file') \n parser.add_argument('-H', '--host', help='host ip for database')\n parser.add_argument('-P', '--port', help='port for database', default='8086')\n parser.add_argument('-u', '--user', help='user name for database', default='root')\n parser.add_argument('-w', '--password', help='password for database', default='root') \n parser.add_argument('-s', '--source', help='broker that data is from,like kafka', default='localhost:9092') \n parser.add_argument('-t', '--topic', required=True, help='topic from kafka')\n args = parser.parse_args()\n return args\n\n\n#model = joblib.load(r\"C:/zhanghan/wzw/aiops/data/a.log.pkl\")\n#print(\"len_std=\", model.len_std)\n#print(\"len_mean=\", model.len_mean)\n#print(\"ratio_std=\", model.ratio_std) \n#print(\"ratio_mean=\", model.ratio_mean)\n\n#\n#log='{\"log\":\"bird: BGP: Unexpected connect from unknown address 10.252.21.153 (port 27467)\\n\",\"stream\":\"stdout\",\"hostname\":\"core-cmhadoop5-2\",\"container_log_file\":\"calico-node-lmcsz_kube-system_calico-node-d3fcbf92d8c09506a8493dfffeedd730543ec50b4e31564921444ef65ebd0a71\"}'\n#log='{\"log\":\"2018-06-01 01:00:05.229 [INFO][176] health.go 150: Overall health summary=&health.HealthReport{Live:true, Ready:true}\\n\",\"stream\":\"stdout\",\"hostname\":\"core-cmhadoop5-2\",\"container_log_file\":\"calico-node-lmcsz_kube-system_calico-node-d3fcbf92d8c09506a8493dfffeedd730543ec50b4e31564921444ef65ebd0a71\"}'\n#log='{\"log\":\"2018-06-01 01:00:07.282 [INFO][176] int_dataplane.go 690: Applying dataplane updates\\n\",\"stream\":\"stdout\",\"hostname\":\"core-cmhadoop5-2\",\"container_log_file\":\"calico-node-lmcsz_kube-system_calico-node-d3fcbf92d8c09506a8493dfffeedd730543ec50b4e31564921444ef65ebd0a71\"}'\n#df = cluster.import_sample_json(log)\n#print(df)\n#df = cluster.extract_feature(df, cluster.TXT_REF)\n#X,df = cluster.make_X(df, \n# len_mean=model.len_mean, \n# len_std=model.len_std,\n# ratio_mean=model.ratio_mean, \n# ratio_std=model.ratio_std)\n#print(\"***********\")\n#print(X)\n#labels = model.predict(X)\n#df[\"label\"] = labels\n#print(df)\n \n#'123.206.41.161:9092'\n\nif __name__ == '__main__':\n args = _get_args() \n model = joblib.load(args.pkl)\n \n \n print(\"len_std=\", model.len_std)\n print(\"len_mean=\", model.len_mean)\n print(\"ratio_std=\", model.ratio_std) \n print(\"ratio_mean=\", model.ratio_mean) \n \n print(\"start\")\n consumer = KafkaConsumer(args.topic, bootstrap_servers=[args.source])\n print(\"receiving\")\n for msg in consumer:\n print (msg.value.decode())\n df = cluster.import_sample_json(msg.value.decode())\n df = cluster.extract_feature(df, cluster.TXT_REF)\n X,df = cluster.make_X(df, \n len_mean=model.len_mean, \n len_std=model.len_std,\n ratio_mean=model.ratio_mean, \n ratio_std=model.ratio_std)\n print(\"***********\")\n print(X)\n labels = model.predict(X)\n df[\"label\"] = labels\n \n try:\n if args.host != None:\n print(\"save to database\")\n df[\"log\"] = df[\"log\"].str.replace('\"', r'\\\"' )\n cluster.save_to_db(df, host=args.host, port=args.port, table_name=\"log_cluster\",\n user=args.user, password=args.password) \n print(df)\n except Exception as e:\n print(str(e))","sub_path":"1/predict-kafka.py","file_name":"predict-kafka.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"166972392","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport os\n\nfrom eduid_common.config.parsers.ini import IniConfigParser\ntry:\n # Do not force applications that does not use EtcdConfigParser to have yaml and etcd installed\n from eduid_common.config.parsers.etcd import EtcdConfigParser\nexcept ImportError:\n EtcdConfigParser = None\nfrom eduid_common.config.parsers.exceptions import ParserException\n\n__author__ = 'lundberg'\n\n\nclass ConfigParser(object):\n \"\"\"\n Load config based on environment variable\n \"\"\"\n\n def __new__(cls, **kwargs):\n \"\"\"\n Load the type of config parser based on environment variables EDUID_CONFIG_NS or\n EDUID_INI_FILE_NAME.\n\n EDUID_CONFIG_NS initilizes EtcdConfigParser\n EDUID_CONFIG_FILE_NAME initializes IniConfigParser\n \"\"\"\n ns = os.environ.get('EDUID_CONFIG_NS')\n ini_file_name = os.environ.get('EDUID_INI_FILE_NAME')\n if ns:\n return EtcdConfigParser(ns, **kwargs)\n elif ini_file_name:\n return IniConfigParser(ini_file_name, **kwargs)\n raise ParserException('No environment variable for config initialization found')\n\n def read_configuration(self):\n \"\"\"\n :return: Configuration\n :rtype: dict\n \"\"\"\n raise NotImplementedError()\n","sub_path":"src/eduid_common/config/parsers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"428926911","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 27 16:20:29 2019\r\n\r\n@author: pitonhik\r\n\"\"\"\r\nimport os\r\nimport pandas as pd\r\nimport math\r\nimport statistics as st\r\nr ={}\r\nmark_mas=[]\r\nsvodka = {}\r\nw1 = 1\r\nw2 = 1\r\nw3 =1\r\nw4 =1\r\nw5 =1\r\nw6 =1\r\nw7 =1\r\ndef per(xm1,ym1,xmax1,ymax1,xm2,ym2,xmax2,ymax2):\r\n left = max(xm1,xm2)\r\n top = min(ymax1,ymax2)\r\n right= min(xmax1,xmax2)\r\n bot = max(ym1,ym2)\r\n wight = right - left\r\n h = top - bot\r\n #print(wight<0)\r\n #print(h<0)\r\n if wight < 0 or h < 0 :\r\n #print(wight < 0 or h < 0)\r\n return 'false'\r\n return wight * h\r\ndef mark(xm1,ym1,xmax1,ymax1,xm2,ym2,xmax2,ymax2):\r\n p = per(xm1,ym1,xmax1,ymax1,xm2,ym2,xmax2,ymax2)\r\n if p == 'false':\r\n return 0\r\n elif p == 5:\r\n return 1\r\n s2 = (max(xmax2,xm2) - min(xmax2,xm2))*(max(ymax2,ym2) - min(ymax2,ym2))\r\n s1 = (max(xmax1,xm1) - min(xmax1,xm1))*(max(ymax1,ym1) - min(ymax1,ym1))\r\n s = s1+s2 - p\r\n if s ==0:\r\n return 1\r\n mark = p / s\r\n return mark\r\ntr_data =pd.read_csv('train_data.csv',sep=',')\r\ntr_ans = pd.read_csv('train_answers.csv',sep=',')\r\nid_ob = [] #объект уникальных id обьектов \r\nfor i in range(len(tr_data)):\r\n if not (tr_data['itemId'][i] in id_ob):\r\n id_ob.append(tr_data['itemId'][i])\r\nid_p =[] #объект уникальных id людей из тестовых выборок\r\nfor j in range(len(tr_data)):\r\n if not (tr_data['userId'][j] in id_p):\r\n id_p.append(tr_data['userId'][j])\r\nts = pd.read_csv('test_data.csv',sep=',')\r\n#loc[] обращение по индексу\r\ndp ={}\r\ncl_p =[]\r\ndef sred_rast(mas):\r\n #med = st.median(mas)\r\n mas.sort()\r\n rast = []\r\n sr = 0\r\n if len(mas)==1:\r\n return 0\r\n for i in range(len(mas)-1):\r\n \r\n rast.append(mas[i+1]-mas[i])\r\n \r\n \r\n sr = sred(rast)\r\n #print(sr)\r\n med = st.median(mas)\r\n #print(med)\r\n return 1 - (sr / med)\r\ndef dlina(mas):\r\n d = len(mas)\r\n d = 1 / d\r\n return 1 - d\r\ndef sred(a):\r\n s =0 \r\n for i in range(len(a)):\r\n s = s+ a[i]\r\n res = s/ len(a)\r\n return(res)\r\ndef newrad(mas):\r\n nm =[]\r\n res = []\r\n mas.sort()\r\n if len(mas)==1:\r\n res.append(mas)\r\n return res\r\n for i in range(len(mas)-1):\r\n nm.append(mas[i+1]-mas[i])\r\n \r\n m = sred(nm)\r\n q=0\r\n for i in range(len(nm)):\r\n if nm[i]> m :\r\n \r\n newm = []\r\n newm = mas[q:i+1]\r\n q = i+1\r\n res.append(newm)\r\n del(newm)\r\n res.append(mas[q:len(mas)])\r\n return res\r\ndef naib_v(mas):\r\n m = newrad(mas)\r\n r = 0\r\n l = 0\r\n le = []\r\n s = 0\r\n for i in range(len(m)): \r\n le.append(len(m[i]))\r\n ma = max(le)\r\n if len(mas)==1:\r\n return mas[0]\r\n elif le.count(ma)==1:\r\n return sred(m[le.index(ma)])\r\n else:\r\n kol = le.count(ma)\r\n for i in range(kol):\r\n s = s + sred(m[le.index(ma)])\r\n m.remove(m[le.index(ma)])\r\n le.remove(ma)\r\n return s / kol\r\ndef srez(df,name):\r\n cl = []\r\n for i in range(len(df)):\r\n idf = df.index[i]\r\n \r\n cl.append(df[name].loc[idf])\r\n return cl\r\ndef my_print(ids):\r\n print('/-/-/-/-/-/-/-/-/')\r\n data = tr_data[tr_data['itemId']==ids]\r\n print(data)\r\n print('--------')\r\n data = tr_ans[tr_ans['itemId']==ids]\r\n print(data)\r\n \r\ndef new_pipl_filt():\r\n #print('/--/')\r\n c =[]\r\n for i in range(len(id_p)):\r\n \r\n pipl = id_p[i]\r\n pa = tr_data[tr_data['userId']==pipl]\r\n m = []\r\n for j in range(len(pa)):\r\n ind = pa.index[j]\r\n xm1 = pa['Xmin'].loc[ind]\r\n ym1 = pa['Ymin'].loc[ind]\r\n xmax1 = pa['Xmax'].loc[ind]\r\n ymax1 = pa['Ymax'].loc[ind]\r\n otv = tr_ans[tr_ans['itemId']==pa['itemId'].loc[ind]]\r\n #print(otv)\r\n ind2 = otv['Xmin_true'].index[0]\r\n xm2=otv['Xmin_true'].loc[ind2]\r\n ym2=otv['Ymin_true'].loc[ind2]\r\n xmax2=otv['Xmax_true'].loc[ind2]\r\n ymax2=otv['Ymax_true'].loc[ind2]\r\n mar = mark(xm1,ym1,xmax1,ymax1,xm2,ym2,xmax2,ymax2)\r\n m.append(mar)\r\n rez = {}\r\n rez['id']=pipl\r\n rez['mas']=m\r\n rez['sred']=sred(m)\r\n rez['med']=st.median(m)\r\n rez['min']= min(m)\r\n rez['max']= max(m)\r\n c.append(rez)\r\n del(rez)\r\n \r\n return c\r\ndef pipl_metrik():\r\n global pip\r\n global w1\r\n global w2\r\n global w3\r\n global w4\r\n w1= 0\r\n for i in range(len(pip)):\r\n dlin = dlina(pip[i]['mas'])\r\n mi = pip[i]['min']\r\n #print(pip[i]['mas'])\r\n ver = naib_v(pip[i]['mas'])\r\n med = pip[i]['med']\r\n metrika = w1*dlin + 1*mi + 2*ver + 1 * med\r\n pip[i]['metr']=metrika / 3\r\npip = new_pipl_filt()\r\npipl_metrik()\r\ndef new_sr_sr():\r\n if True:\r\n global svodka\r\n global mark_mas\r\n global r\r\n global id_ob\r\n global tr_ans\r\n global id_p\r\n global pip\r\n for i in range(len(id_ob)):\r\n ot = [0,0,0,0]\r\n ob = id_ob[i]\r\n t = tr_data[tr_data['itemId']==ob]\r\n work = []\r\n work.append(srez(t,'userId'))\r\n work.append([ob])\r\n work.append(srez(t,'Xmin'))\r\n work.append(srez(t,'Ymin'))\r\n work.append(srez(t,'Xmax'))\r\n work.append(srez(t,'Ymax'))\r\n work_test = []\r\n work_test.append(srez(t,'Xmin'))\r\n work_test.append(srez(t,'Ymin'))\r\n work_test.append(srez(t,'Xmax'))\r\n work_test.append(srez(t,'Ymax'))\r\n w8=1\r\n obl_x = newrad(work[2])\r\n obl_y = newrad(work[3])\r\n obl_xm = newrad(work[4])\r\n obl_ym = newrad(work[5])\r\n mas_o_x = obl(obl_x,work[2],work[0])\r\n mas_o_y = obl(obl_y,work[3],work[0])\r\n mas_o_xm = obl(obl_xm,work[4],work[0])\r\n mas_o_ym = obl(obl_ym,work[5],work[0])\r\n \r\n #for j in range(len(best_x)):\r\n \r\n \r\n xe = oblast(mas_o_x,obl_x)\r\n ye = oblast(mas_o_y,obl_y)\r\n xme =oblast(mas_o_xm,obl_xm)\r\n yme =oblast(mas_o_ym,obl_ym)\r\n masp =[]\r\n for j in range(len(work[0])):\r\n if work[0][j] in id_p:\r\n masp.append(ret_pm(work[0][j]))\r\n if len(masp)==0:\r\n for i in range(len(work[0])):\r\n p ={}\r\n p['id']=work[0][i]\r\n p['metr']=0\r\n pip.append(p)\r\n del(p)\r\n elif len(masp)>0:\r\n for i in range(len(work[0])):\r\n if not(work[0][i] in id_ob):\r\n p ={}\r\n p['id']=work[0][i]\r\n p['metr']=sred(masp)\r\n pip.append(p)\r\n del(p)\r\n bp = max(masp)\r\n #print(masp)\r\n pind = masp.index(bp)\r\n bpx = work_test[0][pind]\r\n #print(bpx)\r\n bpy = work_test[1][pind]\r\n #print(bpy)\r\n bpxm = work_test[2][pind]\r\n bpym = work_test[3][pind]\r\n mark1 = mark(xe,ye,xme,yme,bpx,bpy,bpxm,bpym)\r\n #print(mark1)\r\n if mark1 > 0.9:\r\n ot[0]= xe\r\n ot[1]= ye\r\n ot[2]= xme\r\n ot[3]= yme\r\n \r\n \r\n else:\r\n ot[0]= bpx + xe\r\n ot[0] = int(ot[0]/2)\r\n ot[1]= bpy + ye\r\n ot[1] = int(ot[1]/2)\r\n ot[2]= bpxm + xme\r\n ot[2] = int(ot[2]/2)\r\n ot[3]= bpym + yme\r\n ot[3] = int(ot[3]/2)\r\n if len(work[0])< 4:\r\n ot[0]= xe\r\n ot[1]= ye\r\n ot[2]= xme\r\n ot[3]= yme\r\n ans = tr_ans[tr_ans['itemId']==ob]\r\n \r\n io = ans.index[0]\r\n \r\n x = ans['Xmin_true'].loc[io]\r\n \r\n y = ans['Ymin_true'].loc[io]\r\n xm = ans['Xmax_true'].loc[io]\r\n ym = ans['Ymax_true'].loc[io]\r\n mg = mark(ot[0],ot[1],ot[2],ot[3],x,y,xm,ym)\r\n \r\n \"\"\"if mg < 0.5:\r\n r[str(ob)]=mg\"\"\"\r\n mark_mas.append(mg)\r\n svodka[str(ob)]= {}\r\n svodka[str(ob)]['piple']=[work[0]]\r\n svodka[str(ob)]['xmin']=work_test[0]\r\n svodka[str(ob)]['ymin']=work_test[1]\r\n svodka[str(ob)]['xmax']=work_test[2]\r\n svodka[str(ob)]['ymax']=work_test[3]\r\n svodka[str(ob)]['X_grup']= obl_x\r\n svodka[str(ob)]['Y_grup']= obl_y\r\n svodka[str(ob)]['Xm_grup']= obl_xm\r\n svodka[str(ob)]['Ym_grup']= obl_ym\r\n svodka[str(ob)]['xmin_v']=ot[0]\r\n svodka[str(ob)]['ymin_v']=ot[1]\r\n svodka[str(ob)]['xmax_v']=ot[2]\r\n svodka[str(ob)]['ymax_v']=ot[3]\r\n svodka[str(ob)]['ver']=mg\r\n svodka[str(ob)]['xmin_v_TRUE']=x\r\n svodka[str(ob)]['ymin_v_TRUE']=y\r\n svodka[str(ob)]['xmax_v_TRUE']=xm\r\n svodka[str(ob)]['ymax_v_TRUE']=ym\r\n pi_m =[]\r\n for j in range(len(work[0])):\r\n pi_m.append(ret_pm_inf(work[0][j]))\r\n svodka[str(ob)]['piple_ym']=pi_m\r\n del(pi_m)\r\n if mg < 0.5 and mg > 0.4:\r\n r[str(ob)] = mg\r\n \r\n del(work)\r\n del(work_test)\r\ndef oblast(mas_o_x,obl_x):\r\n best_x = newrad_2(mas_o_x)\r\n sre = 0\r\n sre_i = 0\r\n for j in range(len(best_x)):\r\n if sred(best_x[j]) > sre:\r\n sre = sred(best_x[j])\r\n sre_i = j\r\n if len(best_x) == 1:\r\n mac = max(best_x[0])\r\n mas_in = mas_o_x.index(mac)\r\n obx = obl_x[mas_in]\r\n return sred(obx)\r\n if len(best_x[sre_i])==1:\r\n mas_in = mas_o_x.index(best_x[sre_i])\r\n obx = obl_x[mas_in]\r\n else:\r\n \r\n obx = []\r\n for j in range(len(best_x[sre_i])):\r\n mas_in = mas_o_x.index(best_x[sre_i][j])\r\n for g in range(len(obl_x[mas_in])):\r\n \r\n obx.append(obl_x[mas_in][g])\r\n #print('---') \r\n return sred(obx)\r\n \r\ndef obl(mas_obl,mas,mas_p):\r\n global pip\r\n global w5\r\n global w6\r\n global w7\r\n \r\n rez =[]\r\n j = 0\r\n for i in range(len(mas_obl)):\r\n dlin = dlina(mas_obl[i])\r\n #print(mas_obl[i])\r\n sr_r = sred_rast(mas_obl[i])\r\n sum_p = 0\r\n \r\n for j in range(len(mas_obl[i])):\r\n ind = mas.index(mas_obl[i][j])\r\n piple = mas_p[ind]\r\n #print(piple)\r\n sum_p = sum_p + ret_pm(piple)\r\n sum_p = sum_p / len(mas_obl[i])\r\n metrika = 1*dlin + 2*sr_r + 0.5*sum_p\r\n rez.append(metrika)\r\n return rez\r\n \r\n \r\ndef ret_pm(a):\r\n global pip\r\n for i in range(len(pip)):\r\n if pip[i]['id']==a:\r\n return pip[i]['metr']\r\ndef ret_pm_inf(a):\r\n global pip\r\n for i in range(len(pip)):\r\n if pip[i]['id']==a:\r\n return pip[i]\r\n'''trem = [0,0,0,0,0,0,0,0]\r\nfor i1 in range(10):\r\n #w1=1 * (i1+1)\r\n for i2 in range(10):\r\n #w2=0.6 * (i2+1)\r\n for i3 in range(10):\r\n #w3=5 * (i3 +1)\r\n for i4 in range(10):\r\n #w4=0.4 * (i4+1)\r\n for i5 in range(10):\r\n #w5=0.5 * (i5 +1)\r\n for i6 in range(10):\r\n \r\n for i7 in range(10):\r\n \r\n w4=w4 +0.1\r\n pip = new_pipl_filt()\r\n pipl_metrik()\r\n new_sr_sr()\r\n print(sred(mark_mas))\r\n print(w4)\r\n if sred(mark_mas)>trem[0]:\r\n trem[0]=sred(mark_mas)\r\n trem[1]=w1\r\n trem[2]=w2\r\n trem[3]=w3\r\n trem[4]=w4\r\n trem[5]=w5\r\n trem[6]=w6\r\n trem[7]=w7\r\n \r\n #print(trem)\r\n print(trem)\r\n print(trem)\r\n \r\n print(trem)\r\n \r\nprint(trem) ''' \r\ndef newrad_2(mas):\r\n nm =[]\r\n res = []\r\n mase = []\r\n for i in range(len(mas)):\r\n mase.append(mas[i])\r\n mase.sort()\r\n if len(mase)==1:\r\n res.append(mase)\r\n return res\r\n for i in range(len(mase)-1):\r\n nm.append(mase[i+1]-mase[i])\r\n \r\n m = sred(nm)\r\n q=0\r\n for i in range(len(nm)):\r\n if nm[i]> m :\r\n \r\n newm = []\r\n newm = mase[q:i+1]\r\n q = i+1\r\n res.append(newm)\r\n del(newm)\r\n res.append(mase[q:len(mase)])\r\n return res\r\nnew_sr_sr()\r\nprint(sred(mark_mas)) ","sub_path":"metriks.py","file_name":"metriks.py","file_ext":"py","file_size_in_byte":11873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"538743885","text":"import numpy as np \r\nimport cv2\r\n\r\n#start cam and haar cascade\r\ncam = cv2.VideoCapture(0)\r\nface_cas = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml') #create a haar-cascade object for face detection. haarcascade has features that it extracts. we are not extracting the features rn tho, just using the algorithm. \r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\n\r\nf_01 = np.load('face_01.npy').reshape((20, 50*50*3)) #linearising into one matrix\r\nf_02 = np.load('face_02.npy').reshape((12, 50*50*3))\r\nf_03 = np.load('face_03.npy').reshape((20, 50*50*3))\r\n\r\nprint (f_01.shape, f_02.shape, f_03.shape)\r\n\r\nnames = {\r\n 0: 'Pratyush',\r\n 1: 'Ankita',\r\n 2: 'Anju', \r\n}\r\n\r\nlabels = np.zeros((52,1))\r\nlabels[:20, : ] = 0.0\r\nlabels[20:32, :] = 1.0\r\nlabels[32:, :] = 2.0\r\n\r\ndata = np.concatenate([f_01, f_02, f_03])\r\nprint(data.shape,labels.shape)\r\n\r\ndef distance(x1,x2):\r\n return np.sqrt(((x1-x2)**2).sum())\r\n\r\ndef knn(x, train, targets,k=5): \r\n m = train.shape[0]\r\n dist = []\r\n \r\n for ix in range(m): \r\n dist.append(distance(x, train[ix]))\r\n \r\n dist = np.asarray(dist)\r\n # Lets us pick up top K values\r\n indx = np.argsort(dist)\r\n sorted_labels = labels[indx][:k]\r\n counts = np.unique(sorted_labels, return_counts = True)\r\n \r\n return counts[0][np.argmax(counts[1])]\r\n\r\n\r\n\r\nwhile True:\r\n #get each frame\r\n ret, frame = cam.read()\r\n # convert to grayscale and get faces\r\n if ret == True:\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces = face_cas.detectMultiScale(gray, 1.3, 5)\r\n # for each frame\r\n for(x,y,w,h) in faces:\r\n face_component = frame[y:y+h, x:x+w, :]\r\n fc = cv2.resize(face_component, (50,50))\r\n\r\n # after processing the image and rescaling\r\n # convert to linear vector using flatten()\r\n # and pass to knn along with data\r\n\r\n lab = knn(fc.flatten(), data, labels) #flatten converts a matrix to linear vector\r\n text = names[int(lab)] # convert this label to int and get corresponding name\r\n cv2.putText(frame, text, (x,y), font, 1, (255,255,0), 2) #onts width, colour etc\r\n\r\n cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255),2)\r\n cv2.imshow('frame', frame)\r\n\r\n if cv2.waitKey(1) == 27:\r\n break \r\n else:\r\n print(\"error\")","sub_path":"FaceRecogFinal.py","file_name":"FaceRecogFinal.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"500884485","text":"balance = 320000\nannualInterestRate = 0.2\ncalceval = 1\nmonth = 0\nprebalance = balance\nincrease = .01\ninterest = annualInterestRate / 12\nnobalance = False\nlowerbound = prebalance / 12\nupperbound = (prebalance + (annualInterestRate * prebalance)) // 12\nmiddlebound = (upperbound + lowerbound) // 2\n\n\ndef monthcalc(prebalance, payment, month):\n while month < 12:\n prebalance = prebalance - payment\n prebalance = prebalance + (interest * prebalance)\n month = month + 1\n return prebalance\n\n\nwhile calceval !=0:\n calceval = monthcalc(prebalance, middlebound, month)\n if calceval < 0:\n middlebound = (middlebound + lowerbound) // 2\n elif calceval > 0:\n middlebound = (middlebound + upperbound) // 2\n\nprint(\"Lowest Payment: \", round(middlebound, 2))","sub_path":"creditcard_fixed.py","file_name":"creditcard_fixed.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"203560197","text":"# -- -- -- -- -- -- -- -- -- -- -- -- --\n# ICT CAT Term 4, 2019\n# Category A - Problem 4: Alphabet Values (Difficulty: ****)\n# https://github.com/mikejzx/py-ictcat\n#\n# -- -- -- -- -- -- -- -- -- -- -- -- --\n# If A = 1, B = 2, C = 3, and so on, we \n# can find the number value of a word. For \n# example “ROBOT” = 18 + 15 + 2 + 15 + 20 = 70. \n# Write a program that prints the number value \n# of an input word. (Hint: ord('A'.lower())-96 = 1).\n# -- -- -- -- -- -- -- -- -- -- -- -- --\n\n# Constants\nVALID_CHARS = \"abcdefghijklmnopqrstuvwxyz\"\n\n# Get x's position in the alphabet from 1-26.\ndef get_alphabet_pos(x):\n # Bit-masking an ASCII value with 0x1F gives us\n # Only the bits we are interested in.\n # In binary it appears: 0001 1111\n # Bit-wise operations are faster to run on \n # CPU than a subtraction. (Or should be at least...)\n return ord(x) & 0x1F\n\n# Get the alphabet position sum of each character in data.\ndef get_word_value(data):\n # Compute the sum.\n sum = 0\n for i in data:\n # Check that this letter is in the alphabet.\n if i not in VALID_CHARS:\n # If not, just skip this iteration.\n continue\n\n # Get the number at this position and add to sum.\n sum += get_alphabet_pos(i)\n return sum\n\n# Get input, compute sum, and print.\nprint(\"-- Alphabet Values --\")\nwhile True:\n print(\"Value: \", get_word_value(input(\"Enter a string you wish to get the value of.\\n\").lower()))","sub_path":"a4_alpha_vals.py","file_name":"a4_alpha_vals.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"469913289","text":"from .doc import Doc\nfrom .vocab import Vocab\nfrom .underscore import Underscore\n\nfrom syft.generic.object import AbstractObject\nfrom syft.workers.base import BaseWorker\nfrom syft.generic.string import String\n\nimport pickle\nfrom typing import List, Union\n\n\nclass TokenMeta(object):\n \"\"\"This class holds some meta data about a token from the text held by a Doc object.\n This allows to create a Token object when needed.\n \"\"\"\n\n def __init__(self, start_pos: int, end_pos: int, space_after: bool, is_space: bool):\n \"\"\"Initializes a TokenMeta object\n\n Args:\n start_pos (int): The start index of the token in the Doc text.\n end_pos (int): The end index of the token in the Doc text (the end index is\n part of the token).\n space_after (bool): Whether the token is followed by a single white \n space (True) or not (False).\n is_space (bool): Whether the token itself is composed of only white \n spaces (True) or not (false).\n\n \"\"\"\n\n self.start_pos = start_pos\n self.end_pos = end_pos\n self.space_after = space_after\n self.is_space = is_space\n\n # Initialize the Underscore object (inspired by spaCy)\n # This object will hold all the custom attributes set\n # using the `self.set_attribute` method\n self._ = Underscore()\n\n\nclass Tokenizer(AbstractObject):\n def __init__(\n self,\n vocab: Union[Vocab, str],\n id: int = None,\n owner: BaseWorker = None,\n client_id: str = None,\n tags: List[str] = None,\n description: str = None,\n ):\n \"\"\"Initialize the Tokenizer object\n \n Args:\n vocab (str or Vocab object) : If str, this should be the name of the language model \n to build the Vocab object from. such as 'en_core_web_lg' . This is useful when\n the Tokenizer object is sent to a remote worker. So it can rebuild\n its Vocab object from scratch instead of send the Vocab object to\n the remote worker which might take too much network traffic.\n \n id (int) : The id of the Tokenizer object.\n owner (BaseWorker) : The worker on which the Tokenizer object lives.\n client_id (str) : The id of the worker on which the Language object using this\n Tokenizer lives.\n tags (list of str) : Tags to attach to the current Tokenizer.\n description (str) : A description of this Tokenizer object.\n \"\"\"\n\n if isinstance(vocab, Vocab):\n self.vocab = vocab\n else:\n self.vocab = Vocab(model_name=vocab)\n\n # If the client id is not specified, then it should be the same as the owner id.\n # This means that the tokenizer and the Language objects live on the same\n # worker.\n if client_id:\n self.client_id = client_id\n else:\n self.client_id = owner.id\n\n super(Tokenizer, self).__init__(\n id=id, owner=owner, tags=tags, description=description\n )\n\n def __call__(self, text: Union[String, str] = None, text_id: int = None):\n \"\"\"The real tokenization procedure takes place here.\n\n As in the spaCy library. This is not exactly equivalent to \n text.split(' '). Because tokens can be whitle spaces if two or\n more consecutive white spaces are found.\n\n Exampele:\n 'I love apples' gives three tokens: 'I', 'love', 'apples'\n 'I love apples ' gives four tokens: 'I', ' ', 'love', 'apples'\n ' I love ' gives three tokens: ' ', 'I', 'love' (yes a single white space\n at the beginning is considered a token)\n\n Tokenizing this ways helps reconstructing the original string\n without loss of white spaces.\n I think that reconstructing the original string might be a good way\n to blindly verify the sanity of the blind tokenization process.\n\n\n Args:\n text (Syft String or str) : The text to be tokenized\n text_id (int) : the text id to be tokenized. The id can be used to get the object\n from the worker registery\n\n \"\"\"\n\n # Either the `text` or the `text_id` should be specified, they cannot be both None\n assert (\n text is not None or text_id is not None\n ), \"`text` and `text_id` cannot be both None\"\n\n # Create a document that will hold meta data of tokens\n # By meta data I mean the start and end positions of each token\n # in the original text, if the token is followed by a white space,\n # if the token itself is composed of white spaces or not, etc ...\n\n # If the text is not specified, then get the text using its id\n if text is None:\n text = self.owner.get_obj(text_id)\n\n doc = Doc(self.vocab, text, owner=self.owner)\n\n # The number of characters in the text\n text_size = len(text)\n\n # Initialize a pointer to the position of the first character of 'text'\n pos = 0\n\n # This is a flag to indicate whether the character we are comparing\n # to is a white space or not\n is_space = text[0].isspace()\n\n # Start tokenization\n for i, char in enumerate(text):\n\n # We are looking for a character that is the opposite of 'is_space'\n # if 'is_space' is True, then we want to find a character that is\n # not a space. and vice versa. This event marks the end of a token.\n is_current_space = char.isspace()\n if is_current_space != is_space:\n\n # Create the TokenMeta object that can be later used to retrieve the token\n # from the text\n token_meta = TokenMeta(\n start_pos=pos,\n end_pos=i - 1,\n space_after=is_current_space,\n is_space=is_space,\n )\n\n # Append the token to the document\n doc.container.append(token_meta)\n\n # Adjust the position 'pos' against which\n # we compare the currently visited chararater\n if is_current_space:\n pos = i + 1\n else:\n pos = i\n\n # Update the character type of which we are searching\n # the opposite (space vs. not space).\n # prevent 'pos' from being out of bound\n if pos < text_size:\n is_space = text[pos].isspace()\n\n # Create the last token if the end of the string is reached\n if i == text_size - 1 and pos <= i:\n\n # Create the TokenMeta object that can be later used to retrieve the token\n # from the text\n token_meta = TokenMeta(\n start_pos=pos,\n end_pos=None, # text[pos:None] ~ text[pos:]\n space_after=is_current_space,\n is_space=is_space,\n )\n\n # Append the token to the document\n doc.container.append(token_meta)\n\n # If the Language object using this tokenizer lives on a different worker\n # (self.client_id != self.owner.id)\n # Then return a DocPointer to the generated doc object\n if self.client_id != self.owner.id:\n\n # Register the Doc in the current worker\n self.owner.register_obj(obj=doc)\n\n # Create a pointer to the above Doc object\n doc_pointer = Doc.create_pointer(\n doc,\n location=self.owner,\n id_at_location=doc.id,\n garbage_collect_data=False,\n )\n\n return doc_pointer\n\n return doc\n\n def send(self, location: BaseWorker):\n \"\"\"Sends this tokenizer object to the worker specified by 'location'. \n and returns a pointer to that tokenizer as a TokenizerPointer object.\n\n Args:\n location: The BaseWorker object to which the tokenizer is to be sent.\n Note that this is never actually the BaseWorker but instead\n a class which inherits the BaseWorker abstraction.\n\n Returns:\n A TokenizerPointer objects to self.\n\n \"\"\"\n\n ptr = self.owner.send(self, location)\n\n return ptr\n\n @staticmethod\n def create_pointer(\n tokenizer,\n location: BaseWorker = None,\n id_at_location: (str or int) = None,\n register: bool = False,\n owner: BaseWorker = None,\n ptr_id: (str or int) = None,\n garbage_collect_data: bool = True,\n ):\n \"\"\"Creates a TokenizerPointer object that points to a Tokenizer object\n living in the worker 'location'.\n\n Returns:\n a TokenizerPointer object\n \"\"\"\n\n # I put the import here in order to avoid circular imports\n from .pointers.tokenizer_pointer import TokenizerPointer\n\n if id_at_location is None:\n id_at_location = tokenizer.id\n\n if owner is None:\n owner = tokenizer.owner\n\n tokenizer_pointer = TokenizerPointer(\n location=location,\n id_at_location=id_at_location,\n owner=owner,\n id=ptr_id,\n garbage_collect_data=garbage_collect_data,\n )\n\n return tokenizer_pointer\n\n @staticmethod\n def simplify(worker, tokenizer: \"Tokenizer\"):\n \"\"\"This method is used to reduce a `Tokenizer` object into a list of simpler objects that can be\n serialized.\n \"\"\"\n\n # Simplify attributes\n client_id = pickle.dumps(tokenizer.client_id)\n tags = [pickle.dumps(tag) for tag in tokenizer.tags] if tokenizer.tags else None\n description = pickle.dumps(tokenizer.description)\n model_name = pickle.dumps(tokenizer.vocab.model_name)\n\n return (tokenizer.id, client_id, tags, description, model_name)\n\n @staticmethod\n def detail(worker: BaseWorker, simple_obj: tuple):\n \"\"\"Create an object of type Tokenizer from the reduced representation in `simple_obj`.\n\n Args:\n worker (BaseWorker) : The worker on which the new Tokenizer object is to be created.\n simple_obj (tuple) : A tuple resulting from the serialized then deserialized returned tuple\n from the `_simplify` static method above.\n\n Returns:\n tokenizer (Tokenizer) : a Tokenizer object\n \"\"\"\n\n # Get the tuple elements\n id, client_id, tags, description, model_name = simple_obj\n\n # Unpickle\n client_id = pickle.loads(client_id)\n tags = [pickle.loads(tag) for tag in tags] if tags else None\n description = pickle.loads(description)\n model_name = pickle.loads(model_name)\n\n # Create the tokenizer object\n tokenizer = Tokenizer(\n vocab=model_name,\n id=id,\n owner=worker,\n client_id=client_id,\n tags=tags,\n description=description,\n )\n\n return tokenizer\n","sub_path":"syfertext/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":11283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"462448777","text":"import os\nimport time\nimport common\nimport subprocess\nimport pytest\n\nfrom common import client, random_labels, volume_name # NOQA\nfrom common import core_api, apps_api, pod # NOQA\nfrom common import SIZE, EXPAND_SIZE\nfrom common import check_device_data, write_device_random_data\nfrom common import check_volume_data, write_volume_random_data\nfrom common import get_self_host_id, volume_valid\nfrom common import iscsi_login, iscsi_logout\nfrom common import wait_for_volume_status\nfrom common import wait_for_volume_delete\nfrom common import wait_for_snapshot_purge\nfrom common import generate_volume_name\nfrom common import get_volume_endpoint, get_volume_engine\nfrom common import check_volume_endpoint\nfrom common import activate_standby_volume, check_volume_last_backup\nfrom common import create_pv_for_volume, create_pvc_for_volume\nfrom common import create_and_wait_pod, delete_and_wait_pod\nfrom common import delete_and_wait_pvc, delete_and_wait_pv\nfrom common import CONDITION_STATUS_FALSE, CONDITION_STATUS_TRUE\nfrom common import RETRY_COUNTS, RETRY_INTERVAL, RETRY_COMMAND_COUNT\nfrom common import cleanup_volume, create_and_check_volume, create_backup\nfrom common import DEFAULT_VOLUME_SIZE\nfrom common import Gi, Mi\nfrom common import wait_for_volume_detached\nfrom common import create_pvc_spec\nfrom common import generate_random_data, write_volume_data\nfrom common import VOLUME_RWTEST_SIZE\nfrom common import write_pod_volume_data\nfrom common import find_backup\nfrom common import wait_for_backup_completion\nfrom common import create_storage_class\nfrom common import wait_for_backup_restore_completed\nfrom common import wait_for_volume_restoration_completed\nfrom common import read_volume_data\nfrom common import pvc_name # NOQA\nfrom common import storage_class # NOQA\nfrom common import pod_make, csi_pv, pvc # NOQA\nfrom common import create_snapshot\nfrom common import expand_attached_volume\nfrom common import wait_for_dr_volume_expansion\nfrom common import check_block_device_size\nfrom common import wait_for_volume_expansion\nfrom common import fail_replica_expansion, wait_for_expansion_failure\nfrom common import wait_for_volume_creation, wait_for_volume_restoration_start\nfrom common import write_pod_volume_random_data, get_pod_data_md5sum\nfrom common import prepare_pod_with_data_in_mb\nfrom common import crash_replica_processes\nfrom common import wait_for_volume_condition_scheduled\nfrom common import wait_for_volume_condition_toomanysnapshots\nfrom common import wait_for_volume_degraded, wait_for_volume_healthy\nfrom common import VOLUME_FRONTEND_BLOCKDEV, VOLUME_FRONTEND_ISCSI\nfrom common import VOLUME_CONDITION_SCHEDULED\nfrom common import MESSAGE_TYPE_ERROR\nfrom common import DATA_SIZE_IN_MB_1\nfrom common import SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY\nfrom common import CONDITION_REASON_SCHEDULING_FAILURE\nfrom common import delete_backup\nfrom common import delete_backup_volume\nfrom common import BACKUP_BLOCK_SIZE\nfrom common import assert_backup_state\nfrom common import wait_for_backup_delete\nfrom common import VOLUME_FIELD_ROBUSTNESS, VOLUME_FIELD_READY\nfrom common import VOLUME_ROBUSTNESS_HEALTHY, VOLUME_ROBUSTNESS_FAULTED\nfrom common import DATA_SIZE_IN_MB_2, DATA_SIZE_IN_MB_3\nfrom common import wait_for_backup_to_start\n\nfrom backupstore import backupstore_corrupt_backup_cfg_file\nfrom backupstore import backupstore_delete_volume_cfg_file\nfrom backupstore import backupstore_cleanup\nfrom backupstore import backupstore_count_backup_block_files\nfrom backupstore import backupstore_create_dummy_in_progress_backup\nfrom backupstore import backupstore_delete_dummy_in_progress_backup\nfrom backupstore import backupstore_create_file\nfrom backupstore import backupstore_delete_file\nfrom backupstore import set_random_backupstore # NOQA\nfrom backupstore import backupstore_get_backup_volume_prefix\nfrom backupstore import backupstore_wait_for_lock_expiration\n\n\n\n@pytest.mark.coretest # NOQA\ndef test_hosts(client): # NOQA\n \"\"\"\n Check node name and IP\n \"\"\"\n hosts = client.list_node()\n for host in hosts:\n assert host.name is not None\n assert host.address is not None\n\n host_id = []\n for i in range(0, len(hosts)):\n host_id.append(hosts.data[i].name)\n\n host0_from_i = {}\n for i in range(0, len(hosts)):\n if len(host0_from_i) == 0:\n host0_from_i = client.by_id_node(host_id[0])\n else:\n assert host0_from_i.name == \\\n client.by_id_node(host_id[0]).name\n assert host0_from_i.address == \\\n client.by_id_node(host_id[0]).address\n\n\n@pytest.mark.coretest # NOQA\ndef test_settings(client): # NOQA\n \"\"\"\n Check input for settings\n \"\"\"\n\n setting_names = [common.SETTING_BACKUP_TARGET,\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET,\n common.SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE,\n common.SETTING_STORAGE_MINIMAL_AVAILABLE_PERCENTAGE,\n common.SETTING_DEFAULT_REPLICA_COUNT]\n settings = client.list_setting()\n\n settingMap = {}\n for setting in settings:\n settingMap[setting.name] = setting\n\n for name in setting_names:\n assert settingMap[name] is not None\n assert settingMap[name].definition.description is not None\n\n for name in setting_names:\n setting = client.by_id_setting(name)\n assert settingMap[name].value == setting.value\n\n old_value = setting.value\n\n if name == common.SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE:\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"-100\")\n assert \"with invalid \"+name in \\\n str(e.value)\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"testvalue\")\n assert \"with invalid \"+name in \\\n str(e.value)\n setting = client.update(setting, value=\"200\")\n assert setting.value == \"200\"\n setting = client.by_id_setting(name)\n assert setting.value == \"200\"\n elif name == common.SETTING_STORAGE_MINIMAL_AVAILABLE_PERCENTAGE:\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"300\")\n assert \"with invalid \"+name in \\\n str(e.value)\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"-30\")\n assert \"with invalid \"+name in \\\n str(e.value)\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"testvalue\")\n assert \"with invalid \"+name in \\\n str(e.value)\n setting = client.update(setting, value=\"30\")\n assert setting.value == \"30\"\n setting = client.by_id_setting(name)\n assert setting.value == \"30\"\n elif name == common.SETTING_BACKUP_TARGET:\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"testvalue$test\")\n assert \"with invalid \"+name in \\\n str(e.value)\n setting = client.update(setting, value=\"nfs://test\")\n assert setting.value == \"nfs://test\"\n setting = client.by_id_setting(name)\n assert setting.value == \"nfs://test\"\n elif name == common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET:\n setting = client.update(setting, value=\"testvalue\")\n assert setting.value == \"testvalue\"\n setting = client.by_id_setting(name)\n assert setting.value == \"testvalue\"\n elif name == common.SETTING_DEFAULT_REPLICA_COUNT:\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"-1\")\n assert \"with invalid \"+name in \\\n str(e.value)\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"testvalue\")\n assert \"with invalid \"+name in \\\n str(e.value)\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"21\")\n assert \"with invalid \"+name in \\\n str(e.value)\n setting = client.update(setting, value=\"2\")\n assert setting.value == \"2\"\n setting = client.by_id_setting(name)\n assert setting.value == \"2\"\n\n setting = client.update(setting, value=old_value)\n assert setting.value == old_value\n\n\ndef volume_rw_test(dev):\n assert volume_valid(dev)\n data = write_device_random_data(dev)\n check_device_data(dev, data)\n\n\n@pytest.mark.coretest # NOQA\ndef test_volume_basic(client, volume_name): # NOQA\n \"\"\"\n Test basic volume operations:\n\n 1. Check volume name and parameter\n 2. Create a volume and attach to the current node, then check volume states\n 3. Check soft anti-affinity rule\n 4. Write then read back to check volume data\n \"\"\"\n volume_basic_test(client, volume_name)\n\n\ndef volume_basic_test(client, volume_name, base_image=\"\"): # NOQA\n num_hosts = len(client.list_node())\n num_replicas = 3\n\n with pytest.raises(Exception):\n volume = client.create_volume(name=\"wrong_volume-name-1.0\", size=SIZE,\n numberOfReplicas=2)\n volume = client.create_volume(name=\"wrong_volume-name\", size=SIZE,\n numberOfReplicas=2)\n volume = client.create_volume(name=\"wrong_volume-name\", size=SIZE,\n numberOfReplicas=2,\n frontend=\"invalid_frontend\")\n\n volume = create_and_check_volume(client, volume_name, num_replicas, SIZE,\n base_image)\n assert volume.restoreRequired is False\n\n def validate_volume_basic(expected, actual):\n assert actual.name == expected.name\n assert actual.size == expected.size\n assert actual.numberOfReplicas == expected.numberOfReplicas\n assert actual.frontend == VOLUME_FRONTEND_BLOCKDEV\n assert actual.baseImage == base_image\n assert actual.state == expected.state\n assert actual.created == expected.created\n\n volumes = client.list_volume().data\n assert len(volumes) == 1\n validate_volume_basic(volume, volumes[0])\n\n volumeByName = client.by_id_volume(volume_name)\n validate_volume_basic(volume, volumeByName)\n\n lht_hostId = get_self_host_id()\n volume.attach(hostId=lht_hostId)\n volume = common.wait_for_volume_healthy(client, volume_name)\n assert volume.restoreRequired is False\n\n volumeByName = client.by_id_volume(volume_name)\n validate_volume_basic(volume, volumeByName)\n check_volume_endpoint(volumeByName)\n\n # validate soft anti-affinity\n hosts = {}\n for replica in volume.replicas:\n id = replica.hostId\n assert id != \"\"\n hosts[id] = True\n if num_hosts >= num_replicas:\n assert len(hosts) == num_replicas\n else:\n assert len(hosts) == num_hosts\n\n volumes = client.list_volume().data\n assert len(volumes) == 1\n assert volumes[0].name == volume.name\n assert volumes[0].size == volume.size\n assert volumes[0].numberOfReplicas == volume.numberOfReplicas\n assert volumes[0].state == volume.state\n assert volumes[0].created == volume.created\n check_volume_endpoint(volumes[0])\n\n volume = client.by_id_volume(volume_name)\n volume_rw_test(get_volume_endpoint(volume))\n\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n assert volume.restoreRequired is False\n\n client.delete(volume)\n wait_for_volume_delete(client, volume_name)\n\n volumes = client.list_volume().data\n assert len(volumes) == 0\n\n\ndef test_volume_iscsi_basic(client, volume_name): # NOQA\n \"\"\"\n Test basic volume operations with iscsi frontend\n\n 1. Create and attach a volume with iscsi frontend\n 2. Check the volume endpoint and connect it using the iscsi\n initator on the node.\n 3. Write then read back volume data for validation\n\n \"\"\"\n volume_iscsi_basic_test(client, volume_name)\n\n\ndef volume_iscsi_basic_test(client, volume_name, base_image=\"\"): # NOQA\n host_id = get_self_host_id()\n volume = create_and_check_volume(client, volume_name, 3, SIZE, base_image,\n VOLUME_FRONTEND_ISCSI)\n volume.attach(hostId=host_id)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n volumes = client.list_volume().data\n assert len(volumes) == 1\n assert volumes[0].name == volume.name\n assert volumes[0].size == volume.size\n assert volumes[0].numberOfReplicas == volume.numberOfReplicas\n assert volumes[0].state == volume.state\n assert volumes[0].created == volume.created\n assert volumes[0].frontend == VOLUME_FRONTEND_ISCSI\n endpoint = get_volume_endpoint(volumes[0])\n\n try:\n dev = iscsi_login(endpoint)\n volume_rw_test(dev)\n finally:\n iscsi_logout(endpoint)\n\n cleanup_volume(client, volume)\n\n\n@pytest.mark.coretest # NOQA\ndef test_snapshot(client, volume_name, base_image=\"\"): # NOQA\n \"\"\"\n Test snapshot operations\n\n 1. Create a volume and attach to the node\n 2. Create the empty snapshot `snap1`\n 3. Generate and write data `snap2_data`, then create `snap2`\n 4. Generate and write data `snap3_data`, then create `snap3`\n 5. List snapshot. Validate the snapshot chain relationship\n 6. Mark `snap3` as removed. Make sure volume's data didn't change\n 7. List snapshot. Make sure `snap3` is marked as removed\n 8. Detach and reattach the volume in maintenance mode.\n 9. Make sure the volume frontend is still `blockdev` but disabled\n 10. Revert to `snap2`\n 11. Detach and reattach the volume with frontend enabled\n 12. Make sure volume's data is `snap2_data`\n 13. List snapshot. Make sure `volume-head` is now `snap2`'s child\n 14. Delete `snap1` and `snap2`\n 15. Purge the snapshot.\n 16. List the snapshot, make sure `snap1` and `snap3`\n are gone. `snap2` is marked as removed.\n 17. Check volume data, make sure it's still `snap2_data`.\n \"\"\"\n snapshot_test(client, volume_name, base_image)\n\n\ndef snapshot_test(client, volume_name, base_image): # NOQA\n volume = create_and_check_volume(client, volume_name,\n base_image=base_image)\n\n lht_hostId = get_self_host_id()\n volume = volume.attach(hostId=lht_hostId)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n positions = {}\n\n snap1 = create_snapshot(client, volume_name)\n\n snap2_data = write_volume_random_data(volume, positions)\n snap2 = create_snapshot(client, volume_name)\n\n snap3_data = write_volume_random_data(volume, positions)\n snap3 = create_snapshot(client, volume_name)\n\n snapshots = volume.snapshotList()\n snapMap = {}\n for snap in snapshots:\n snapMap[snap.name] = snap\n\n assert snapMap[snap1.name].name == snap1.name\n assert snapMap[snap1.name].removed is False\n assert snapMap[snap2.name].name == snap2.name\n assert snapMap[snap2.name].parent == snap1.name\n assert snapMap[snap2.name].removed is False\n assert snapMap[snap3.name].name == snap3.name\n assert snapMap[snap3.name].parent == snap2.name\n assert snapMap[snap3.name].removed is False\n\n volume.snapshotDelete(name=snap3.name)\n check_volume_data(volume, snap3_data)\n\n snapshots = volume.snapshotList(volume=volume_name)\n snapMap = {}\n for snap in snapshots:\n snapMap[snap.name] = snap\n\n assert snapMap[snap1.name].name == snap1.name\n assert snapMap[snap1.name].removed is False\n assert snapMap[snap2.name].name == snap2.name\n assert snapMap[snap2.name].parent == snap1.name\n assert snapMap[snap2.name].removed is False\n assert snapMap[snap3.name].name == snap3.name\n assert snapMap[snap3.name].parent == snap2.name\n assert len(snapMap[snap3.name].children) == 1\n assert \"volume-head\" in snapMap[snap3.name].children.keys()\n assert snapMap[snap3.name].removed is True\n\n snap = volume.snapshotGet(name=snap3.name)\n assert snap.name == snap3.name\n assert snap.parent == snap3.parent\n assert len(snap3.children) == 1\n assert len(snap.children) == 1\n assert \"volume-head\" in snap3.children.keys()\n assert \"volume-head\" in snap.children.keys()\n assert snap.removed is True\n\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=lht_hostId, disableFrontend=True)\n common.wait_for_volume_healthy_no_frontend(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n assert volume.disableFrontend is True\n assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV\n check_volume_endpoint(volume)\n\n volume.snapshotRevert(name=snap2.name)\n\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n common.wait_for_volume_healthy(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n assert volume.disableFrontend is False\n assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV\n\n check_volume_data(volume, snap2_data)\n\n snapshots = volume.snapshotList(volume=volume_name)\n snapMap = {}\n for snap in snapshots:\n snapMap[snap.name] = snap\n\n assert snapMap[snap1.name].name == snap1.name\n assert snapMap[snap1.name].removed is False\n assert snapMap[snap2.name].name == snap2.name\n assert snapMap[snap2.name].parent == snap1.name\n assert \"volume-head\" in snapMap[snap2.name].children.keys()\n assert snap3.name in snapMap[snap2.name].children.keys()\n assert snapMap[snap2.name].removed is False\n assert snapMap[snap3.name].name == snap3.name\n assert snapMap[snap3.name].parent == snap2.name\n assert len(snapMap[snap3.name].children) == 0\n assert snapMap[snap3.name].removed is True\n\n volume.snapshotDelete(name=snap1.name)\n volume.snapshotDelete(name=snap2.name)\n\n volume.snapshotPurge()\n volume = wait_for_snapshot_purge(client, volume_name, snap1.name,\n snap3.name)\n\n snapshots = volume.snapshotList(volume=volume_name)\n snapMap = {}\n for snap in snapshots:\n snapMap[snap.name] = snap\n assert snap1.name not in snapMap\n assert snap3.name not in snapMap\n\n # it's the parent of volume-head, so it cannot be purged at this time\n assert snapMap[snap2.name].name == snap2.name\n assert snapMap[snap2.name].parent == \"\"\n assert \"volume-head\" in snapMap[snap2.name].children.keys()\n assert snapMap[snap2.name].removed is True\n check_volume_data(volume, snap2_data)\n\n cleanup_volume(client, volume)\n\n\ndef test_backup_status_for_unavailable_replicas(client, volume_name): # NOQA\n \"\"\"\n Test backup status for unavailable replicas\n\n Context:\n\n We want to make sure that we do not try to retrieve the backup status\n of no longer valid replicas (offline, deleted, etc). The reason for\n this is that trying to establish a tcp connection with an old replica\n address `(tcp://ip:port)` could block the engine retrieval process,\n since we will wait upto 1 minute for each individual backup status.\n When this happens for a lot of different statuses the manager will\n terminate the started engine retrieval process since the process would\n not have returned in the maximum allowed time. This would then lead\n to no longer being able to show newly created backups in the UI.\n\n Setup:\n\n 1. Create a volume and attach to the current node\n 2. Run the test for all the available backupstores\n\n Steps:\n\n 1. Create a backup of volume\n 2. Find the replica for that backup\n 3. Disable scheduling on the node of that replica\n 4. Delete the replica\n 5. Wait for volume backup status state to go to error\n 6. Verify backup status error contains `unknown replica`\n 7. Create a new backup\n 8. Verify new backup was successful\n 9. Cleanup (delete backups, delete volume)\n \"\"\"\n backup_status_for_unavailable_replicas_test(client, volume_name, SIZE)\n\n\ndef backup_status_for_unavailable_replicas_test(client, volume_name, # NOQA\n size, base_image=\"\"): # NOQA\n volume = create_and_check_volume(client, volume_name, 2, size, base_image)\n\n lht_hostId = get_self_host_id()\n volume = volume.attach(hostId=lht_hostId)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)\n # test backupTarget for multiple settings\n backupstores = common.get_backupstore_url()\n for backupstore in backupstores:\n if common.is_backupTarget_s3(backupstore):\n backupsettings = backupstore.split(\"$\")\n setting = client.update(setting, value=backupsettings[0])\n assert setting.value == backupsettings[0]\n\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=backupsettings[1])\n assert credential.value == backupsettings[1]\n else:\n setting = client.update(setting, value=backupstore)\n assert setting.value == backupstore\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=\"\")\n assert credential.value == \"\"\n\n # create a successful backup\n bv, b, _, _ = create_backup(client, volume_name)\n backup_id = b.id\n\n # find the replica for this backup\n volume = client.by_id_volume(volume_name)\n for status in volume.backupStatus:\n if status.id == backup_id:\n replica_name = status.replica\n assert replica_name\n\n # disable scheduling on that node\n volume = client.by_id_volume(volume_name)\n for r in volume.replicas:\n if r.name == replica_name:\n node = client.by_id_node(r.hostId)\n node = client.update(node, allowScheduling=False)\n common.wait_for_node_update(client, node.id,\n \"allowScheduling\", False)\n assert node\n\n # remove the replica with the backup\n volume.replicaRemove(name=replica_name)\n volume = common.wait_for_volume_degraded(client, volume_name)\n\n # now the backup status should be error unknown replica\n def backup_failure_predicate(b):\n return b.id == backup_id and \"unknown replica\" in b.error\n volume = common.wait_for_backup_state(client, volume_name,\n backup_failure_predicate)\n\n # re enable scheduling on the previously disabled node\n node = client.by_id_node(node.id)\n node = client.update(node, allowScheduling=True)\n common.wait_for_node_update(client, node.id,\n \"allowScheduling\", True)\n\n # delete the old backup\n delete_backup(client, bv.name, b.name)\n volume = wait_for_volume_status(client, volume_name,\n \"lastBackup\", \"\")\n assert volume.lastBackupAt == \"\"\n\n # check that we can create another successful backup\n bv, b, _, _ = create_backup(client, volume_name)\n\n # delete the new backup\n delete_backup(client, bv.name, b.name)\n volume = wait_for_volume_status(client, volume_name, \"lastBackup\", \"\")\n assert volume.lastBackupAt == \"\"\n\n cleanup_volume(client, volume)\n\n\ndef test_backup_block_deletion(set_random_backupstore, client, core_api, volume_name): # NOQA\n \"\"\"\n Test backup block deletion\n\n Context:\n\n We want to make sure that we only delete non referenced backup blocks,\n we also don't want to delete blocks while there other backups in progress.\n The reason for this is that we don't yet know which blocks are required by\n the in progress backup, so blocks deletion could lead to a faulty backup.\n\n Setup:\n\n 1. Setup minio as S3 backupstore\n\n Steps:\n\n 1. Create a volume and attach to the current node\n 2. Write 4 MB to the beginning of the volume (2 x 2MB backup blocks)\n 3. Create backup(1) of the volume\n 4. Overwrite the first of the backup blocks of data on the volume\n 5. Create backup(2) of the volume\n 6. Overwrite the first of the backup blocks of data on the volume\n 7. Create backup(3) of the volume\n 8. Verify backup block count == 4\n assert volume[\"DataStored\"] == str(BLOCK_SIZE * expected_count)\n assert count of *.blk files for that volume == expected_count\n 9. Create an artificial in progress backup.cfg file\n json.dumps({\"Name\": name, \"VolumeName\": volume, \"CreatedTime\": \"\"})\n 10. Delete backup(2)\n 11. Verify backup block count == 4 (because of the in progress backup)\n 12. Delete the artificial in progress backup.cfg file\n 13. Delete backup(1)\n 14. Verify backup block count == 2\n 15. Delete backup(3)\n 16. Verify backup block count == 0\n 17. Delete the backup volume\n 18. Cleanup the volume\n \"\"\"\n backupstore_cleanup(client)\n\n volume = create_and_check_volume(client, volume_name)\n host_id = get_self_host_id()\n volume = volume.attach(hostId=host_id)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n data0 = {'pos': 0,\n 'len': 2 * BACKUP_BLOCK_SIZE,\n 'content': common.generate_random_data(2 * BACKUP_BLOCK_SIZE)}\n\n bv0, backup0, _, _ = create_backup(client, volume_name, data0)\n\n data1 = {'pos': 0,\n 'len': BACKUP_BLOCK_SIZE,\n 'content': common.generate_random_data(BACKUP_BLOCK_SIZE)}\n\n bv1, backup1, _, _ = create_backup(client, volume_name, data1)\n\n data2 = {'pos': 0,\n 'len': BACKUP_BLOCK_SIZE,\n 'content': common.generate_random_data(BACKUP_BLOCK_SIZE)}\n\n bv2, backup2, _, _ = create_backup(client, volume_name, data2)\n\n backup_blocks_count = backupstore_count_backup_block_files(client,\n core_api,\n volume_name)\n assert backup_blocks_count == 4\n\n bvs = client.list_backupVolume()\n\n for bv in bvs:\n if bv['name'] == volume_name:\n assert bv['dataStored'] == \\\n str(backup_blocks_count * BACKUP_BLOCK_SIZE)\n\n backupstore_create_dummy_in_progress_backup(client, core_api, volume_name)\n delete_backup(client, volume_name, backup1.name)\n assert backupstore_count_backup_block_files(client,\n core_api,\n volume_name) == 4\n\n backupstore_delete_dummy_in_progress_backup(client, core_api, volume_name)\n\n delete_backup(client, volume_name, backup0.name)\n assert backupstore_count_backup_block_files(client,\n core_api,\n volume_name) == 2\n\n delete_backup(client, volume_name, backup2.name)\n assert backupstore_count_backup_block_files(client,\n core_api,\n volume_name) == 0\n\n delete_backup_volume(client, volume_name)\n\n\ndef test_backup_volume_list(set_random_backupstore ,client, core_api): # NOQA\n \"\"\"\n Test backup volume list\n Context:\n We want to make sure that an error when listing a single backup volume\n does not stop us from listing all the other backup volumes. Otherwise a\n single faulty backup can block the retrieval of all known backup volumes.\n Setup:\n 1. Setup minio as S3 backupstore\n Steps:\n 1. Create a volume(1,2) and attach to the current node\n 2. write some data to volume(1,2)\n 3. Create a backup(1) of volume(1,2)\n 4. request a backup list\n 5. verify backup list contains no error messages for volume(1,2)\n 6. verify backup list contains backup(1) for volume(1,2)\n 7. place a file named \"backup_1234@failure.cfg\"\n into the backups folder of volume(1)\n 8. request a backup list\n 9. verify backup list contains no error messages for volume(1,2)\n 10. verify backup list contains backup(1) for volume(1,2)\n 11. delete backup volumes(1 & 2)\n 12. cleanup\n \"\"\"\n backupstore_cleanup(client)\n\n # create 2 volumes.\n volume1_name, volume2_name = generate_volume_name(), generate_volume_name()\n\n volume1 = create_and_check_volume(client, volume1_name)\n volume2 = create_and_check_volume(client, volume2_name)\n\n host_id = get_self_host_id()\n volume1 = volume1.attach(hostId=host_id)\n volume1 = common.wait_for_volume_healthy(client, volume1_name)\n volume2 = volume2.attach(hostId=host_id)\n volume2 = common.wait_for_volume_healthy(client, volume2_name)\n\n bv1, backup1, snap1, _ = create_backup(client, volume1_name)\n bv2, backup2, snap2, _ = create_backup(client, volume2_name)\n\n def verify_no_err():\n '''\n request a backup list\n verify backup list contains no error messages for volume(1,2)\n verify backup list contains backup(1) for volume(1,2)\n '''\n for _ in range(RETRY_COUNTS):\n verified_bvs = set()\n backup_volume_list = client.list_backupVolume()\n for bv in backup_volume_list:\n if bv.name in (volume1_name, volume2_name):\n assert not bv['messages']\n for b in bv.backupList().data:\n if bv.name == volume1_name \\\n and b.name == backup1.name \\\n or bv.name == volume2_name \\\n and b.name == backup2.name:\n verified_bvs.add(bv.name)\n if len(verified_bvs) == 2:\n break\n time.sleep(RETRY_INTERVAL)\n assert len(verified_bvs) == 2\n\n verify_no_err()\n\n # place a bad named file into the backups folder of volume(1)\n prefix = \\\n backupstore_get_backup_volume_prefix(client, volume1_name) + \"/backups\"\n backupstore_create_file(client,\n core_api,\n prefix + \"/backup_1234@failure.cfg\")\n\n verify_no_err()\n\n backupstore_delete_file(client,\n core_api,\n prefix + \"/backup_1234@failure.cfg\")\n\n backupstore_cleanup(client)\n\n\ndef test_backup_metadata_deletion(set_random_backupstore, client, core_api, volume_name): # NOQA\n \"\"\"\n Test backup metadata deletion\n\n Context:\n\n We want to be able to delete the metadata (.cfg) files,\n even if they are corrupt or in a bad state (missing volume.cfg).\n\n Setup:\n\n 1. Setup minio as S3 backupstore\n 2. Cleanup backupstore\n\n Steps:\n\n 1. Create volume(1,2) and attach to the current node\n 2. write some data to volume(1,2)\n 3. Create backup(1,2) of volume(1,2)\n 4. request a backup list\n 5. verify backup list contains no error messages for volume(1,2)\n 6. verify backup list contains backup(1,2) information for volume(1,2)\n 7. corrupt backup(1) of volume(1)\n (overwrite) backup1_cfg.write(\"{corrupt: definitely\")\n 8. request a backup list\n 9. verify backup list contains no error messages for volume(1,2)\n 10. verify backup list contains backup(1,2) information for volume(1,2)\n 11. verify backup list backup(1) of volume(1) contains error message\n 12. delete backup(1) of volume(1,2)\n 10. request a backup list\n 11. verify backup list contains no error messages for volume(1,2)\n 12. verify backup list only contains backup(2) information for volume(1,2)\n 13. delete volume.cfg of volume(2)\n 14. request backup volume deletion for volume(2)\n 15. verify that volume(2) has been deleted in the backupstore.\n 16. request a backup list\n 17. verify backup list only contains volume(1) and no errors\n 18. verify backup list only contains backup(2) information for volume(1)\n 19. delete backup volume(1)\n 20. verify that volume(1) has been deleted in the backupstore.\n 21. cleanup\n \"\"\"\n backupstore_cleanup(client)\n\n volume1_name = volume_name + \"-1\"\n volume2_name = volume_name + \"-2\"\n\n host_id = get_self_host_id()\n\n volume1 = create_and_check_volume(client, volume1_name)\n volume2 = create_and_check_volume(client, volume2_name)\n\n volume1.attach(hostId=host_id)\n volume2.attach(hostId=host_id)\n\n volume1 = wait_for_volume_healthy(client, volume1_name)\n volume2 = wait_for_volume_healthy(client, volume2_name)\n\n v1bv, v1b1, _, _ = create_backup(client, volume1_name)\n v2bv, v2b1, _, _ = create_backup(client, volume2_name)\n _, v1b2, _, _ = create_backup(client, volume1_name)\n _, v2b2, _, _ = create_backup(client, volume2_name)\n\n bvs = client.list_backupVolume()\n\n for bv in bvs:\n backups = bv.backupList()\n for b in backups:\n assert b.messages is None\n\n v1b1_new = v1bv.backupGet(name=v1b1.name)\n assert_backup_state(v1b1, v1b1_new)\n\n v1b2_new = v1bv.backupGet(name=v1b2.name)\n assert_backup_state(v1b2, v1b2_new)\n\n v2b1_new = v2bv.backupGet(name=v2b1.name)\n assert_backup_state(v2b1, v2b1_new)\n\n v2b2_new = v2bv.backupGet(name=v2b2.name)\n assert_backup_state(v2b2, v2b2_new)\n\n backupstore_corrupt_backup_cfg_file(client,\n core_api,\n volume1_name,\n v1b1.name)\n\n bvs = client.list_backupVolume()\n\n for bv in bvs:\n if bv.name == volume1_name:\n backups = bv.backupList()\n for b in backups:\n if b.name == v1b1.name:\n assert b.messages is not None\n else:\n assert b.messages is None\n\n v1b2_new = v1bv.backupGet(name=v1b2.name)\n assert_backup_state(v1b2, v1b2_new)\n\n v2b1_new = v2bv.backupGet(name=v2b1.name)\n assert_backup_state(v2b1, v2b1_new)\n\n v2b2_new = v2bv.backupGet(name=v2b2.name)\n assert_backup_state(v2b2, v2b2_new)\n\n delete_backup(client, volume1_name, v1b1.name)\n delete_backup(client, volume2_name, v2b1.name)\n\n bvs = client.list_backupVolume()\n\n for bv in bvs:\n backups = bv.backupList()\n for b in backups:\n assert b.messages is None\n\n assert len(v1bv.backupList()) == 1\n assert len(v2bv.backupList()) == 1\n assert v1bv.backupList()[0].name == v1b2.name\n assert v2bv.backupList()[0].name == v2b2.name\n\n backupstore_delete_volume_cfg_file(client, core_api, volume2_name)\n\n delete_backup(client, volume2_name, v2b2.name)\n assert len(v2bv.backupList()) == 0\n\n delete_backup_volume(client, v2bv.name)\n assert backupstore_count_backup_block_files(client,\n core_api,\n volume2_name) == 0\n\n bvs = client.list_backupVolume()\n for bv in bvs:\n if bv.name == volume1_name:\n backups = bv.backupList()\n for b in backups:\n assert b.messages is None\n\n v1b2_new = v1bv.backupGet(name=v1b2.name)\n assert_backup_state(v1b2, v1b2_new)\n assert v1b2_new.messages == v1b2.messages is None\n\n delete_backup(client, volume1_name, v1b2.name)\n assert backupstore_count_backup_block_files(client,\n core_api,\n volume1_name) == 0\n\n\n@pytest.mark.coretest # NOQA\ndef test_backup(client, volume_name): # NOQA\n \"\"\"\n Test basic backup\n\n Setup:\n\n 1. Create a volume and attach to the current node\n 2. Run the test for all the available backupstores.\n\n Steps:\n\n 1. Create a backup of volume\n 2. Restore the backup to a new volume\n 3. Attach the new volume and make sure the data is the same as the old one\n 4. Detach the volume and delete the backup.\n 5. Wait for the restored volume's `lastBackup` to be cleaned (due to remove\n the backup)\n 6. Delete the volume\n \"\"\"\n backup_test(client, volume_name, SIZE)\n\n\ndef backup_test(client, volume_name, size, base_image=\"\"): # NOQA\n volume = create_and_check_volume(client, volume_name, 2, size, base_image)\n\n lht_hostId = get_self_host_id()\n volume = volume.attach(hostId=lht_hostId)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)\n # test backupTarget for multiple settings\n backupstores = common.get_backupstore_url()\n for backupstore in backupstores:\n if common.is_backupTarget_s3(backupstore):\n backupsettings = backupstore.split(\"$\")\n setting = client.update(setting, value=backupsettings[0])\n assert setting.value == backupsettings[0]\n\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=backupsettings[1])\n assert credential.value == backupsettings[1]\n else:\n setting = client.update(setting, value=backupstore)\n assert setting.value == backupstore\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=\"\")\n assert credential.value == \"\"\n\n backupstore_test(client, lht_hostId, volume_name, size)\n\n cleanup_volume(client, volume)\n\n\ndef backupstore_test(client, host_id, volname, size): # NOQA\n bv, b, snap2, data = create_backup(client, volname)\n\n # test restore\n restore_name = generate_volume_name()\n volume = client.create_volume(name=restore_name, size=size,\n numberOfReplicas=2,\n fromBackup=b.url)\n\n volume = common.wait_for_volume_restoration_completed(client, restore_name)\n volume = common.wait_for_volume_detached(client, restore_name)\n assert volume.name == restore_name\n assert volume.size == size\n assert volume.numberOfReplicas == 2\n assert volume.state == \"detached\"\n assert volume.restoreRequired is False\n\n volume = volume.attach(hostId=host_id)\n volume = common.wait_for_volume_healthy(client, restore_name)\n check_volume_data(volume, data)\n volume = volume.detach()\n volume = common.wait_for_volume_detached(client, restore_name)\n\n delete_backup(client, bv.name, b.name)\n volume = wait_for_volume_status(client, volume.name,\n \"lastBackup\", \"\")\n assert volume.lastBackupAt == \"\"\n\n client.delete(volume)\n volume = wait_for_volume_delete(client, restore_name)\n\n\n@pytest.mark.coretest # NOQA\ndef test_backup_labels(client, random_labels, volume_name): # NOQA\n \"\"\"\n Test that the proper Labels are applied when creating a Backup manually.\n\n 1. Create a volume\n 2. Run the following steps on all backupstores\n 3. Create a backup with some random labels\n 4. Get backup from backupstore, verify the labels are set on the backups\n \"\"\"\n backup_labels_test(client, random_labels, volume_name)\n\n\ndef backup_labels_test(client, random_labels, volume_name, size=SIZE, base_image=\"\"): # NOQA\n host_id = get_self_host_id()\n\n volume = create_and_check_volume(client, volume_name, 2, size, base_image)\n\n volume.attach(hostId=host_id)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)\n # test backupTarget for multiple settings\n backupstores = common.get_backupstore_url()\n for backupstore in backupstores:\n if common.is_backupTarget_s3(backupstore):\n backupsettings = backupstore.split(\"$\")\n setting = client.update(setting, value=backupsettings[0])\n assert setting.value == backupsettings[0]\n\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=backupsettings[1])\n assert credential.value == backupsettings[1]\n else:\n setting = client.update(setting, value=backupstore)\n assert setting.value == backupstore\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=\"\")\n assert credential.value == \"\"\n\n bv, b, _, _ = create_backup(client, volume_name, labels=random_labels)\n # If we're running the test with a BaseImage, check that this Label is\n # set properly.\n backup = bv.backupGet(name=b.name)\n if base_image:\n assert backup.labels.get(common.BASE_IMAGE_LABEL) == base_image\n # One extra Label from the BaseImage being set.\n assert len(backup.labels) == len(random_labels) + 1\n else:\n assert len(backup.labels) == len(random_labels)\n\n cleanup_volume(client, volume)\n\n\n@pytest.mark.coretest # NOQA\ndef test_restore_inc(client, core_api, volume_name, pod): # NOQA\n \"\"\"\n Test restore from disaster recovery volume (incremental restore)\n\n Run test against all the backupstores\n\n 1. Create a volume and attach to the current node\n 2. Generate `data0`, write to the volume, make a backup `backup0`\n 3. Create three DR(standby) volumes from the backup: `sb_volume0/1/2`\n 4. Wait for all three DR volumes to start the initial restoration\n 5. Verify DR volumes's `lastBackup` is `backup0`\n 6. Verify snapshot/pv/pvc/change backup target are not allowed as long\n as the DR volume exists\n 7. Activate standby `sb_volume0` and attach it to check the volume data\n 8. Generate `data1` and write to the original volume and create `backup1`\n 9. Make sure `sb_volume1`'s `lastBackup` field has been updated to\n `backup1`\n 10. Wait for `sb_volume1` to finish incremental restoration then activate\n 11. Attach and check `sb_volume1`'s data\n 12. Generate `data2` and write to the original volume and create `backup2`\n 13. Make sure `sb_volume2`'s `lastBackup` field has been updated to\n `backup1`\n 14. Wait for `sb_volume2` to finish incremental restoration then activate\n 15. Attach and check `sb_volume2`'s data\n 16. Create PV, PVC and Pod to use `sb_volume2`, check PV/PVC/POD are good\n\n FIXME: Step 16 works because the disk will be treated as a unformatted disk\n \"\"\"\n\n setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)\n # test backupTarget for multiple settings\n backupstores = common.get_backupstore_url()\n for backupstore in backupstores:\n if common.is_backupTarget_s3(backupstore):\n backupsettings = backupstore.split(\"$\")\n setting = client.update(setting, value=backupsettings[0])\n assert setting.value == backupsettings[0]\n\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=backupsettings[1])\n assert credential.value == backupsettings[1]\n else:\n setting = client.update(setting, value=backupstore)\n assert setting.value == backupstore\n credential = client.by_id_setting(\n common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)\n credential = client.update(credential, value=\"\")\n assert credential.value == \"\"\n\n restore_inc_test(client, core_api, volume_name, pod)\n\n\ndef restore_inc_test(client, core_api, volume_name, pod): # NOQA\n std_volume = create_and_check_volume(client, volume_name, 2, SIZE)\n lht_host_id = get_self_host_id()\n std_volume.attach(hostId=lht_host_id)\n std_volume = common.wait_for_volume_healthy(client, volume_name)\n\n with pytest.raises(Exception) as e:\n std_volume.activate(frontend=VOLUME_FRONTEND_BLOCKDEV)\n assert \"already in active mode\" in str(e.value)\n\n data0 = {'len': 4 * 1024, 'pos': 0}\n data0['content'] = common.generate_random_data(data0['len'])\n bv, backup0, _, data0 = create_backup(\n client, volume_name, data0)\n\n sb_volume0_name = \"sb-0-\" + volume_name\n sb_volume1_name = \"sb-1-\" + volume_name\n sb_volume2_name = \"sb-2-\" + volume_name\n client.create_volume(name=sb_volume0_name, size=SIZE,\n numberOfReplicas=2, fromBackup=backup0.url,\n frontend=\"\", standby=True)\n client.create_volume(name=sb_volume1_name, size=SIZE,\n numberOfReplicas=2, fromBackup=backup0.url,\n frontend=\"\", standby=True)\n client.create_volume(name=sb_volume2_name, size=SIZE,\n numberOfReplicas=2, fromBackup=backup0.url,\n frontend=\"\", standby=True)\n wait_for_backup_restore_completed(client, sb_volume0_name, backup0.name)\n wait_for_backup_restore_completed(client, sb_volume1_name, backup0.name)\n wait_for_backup_restore_completed(client, sb_volume2_name, backup0.name)\n\n sb_volume0 = common.wait_for_volume_healthy_no_frontend(client,\n sb_volume0_name)\n sb_volume1 = common.wait_for_volume_healthy_no_frontend(client,\n sb_volume1_name)\n sb_volume2 = common.wait_for_volume_healthy_no_frontend(client,\n sb_volume2_name)\n\n for i in range(RETRY_COUNTS):\n client.list_backupVolume()\n sb_volume0 = client.by_id_volume(sb_volume0_name)\n sb_volume1 = client.by_id_volume(sb_volume1_name)\n sb_volume2 = client.by_id_volume(sb_volume2_name)\n sb_engine0 = get_volume_engine(sb_volume0)\n sb_engine1 = get_volume_engine(sb_volume1)\n sb_engine2 = get_volume_engine(sb_volume2)\n if sb_volume0.restoreRequired is False or \\\n sb_volume1.restoreRequired is False or \\\n sb_volume2.restoreRequired is False or \\\n not sb_engine0.lastRestoredBackup or \\\n not sb_engine1.lastRestoredBackup or \\\n not sb_engine2.lastRestoredBackup:\n time.sleep(RETRY_INTERVAL)\n else:\n break\n assert sb_volume0.standby is True\n assert sb_volume0.lastBackup == backup0.name\n assert sb_volume0.frontend == \"\"\n assert sb_volume0.restoreRequired is True\n sb_engine0 = get_volume_engine(sb_volume0)\n assert sb_engine0.lastRestoredBackup == backup0.name\n assert sb_engine0.requestedBackupRestore == backup0.name\n assert sb_volume1.standby is True\n assert sb_volume1.lastBackup == backup0.name\n assert sb_volume1.frontend == \"\"\n assert sb_volume1.restoreRequired is True\n sb_engine1 = get_volume_engine(sb_volume1)\n assert sb_engine1.lastRestoredBackup == backup0.name\n assert sb_engine1.requestedBackupRestore == backup0.name\n assert sb_volume2.standby is True\n assert sb_volume2.lastBackup == backup0.name\n assert sb_volume2.frontend == \"\"\n assert sb_volume2.restoreRequired is True\n sb_engine2 = get_volume_engine(sb_volume2)\n assert sb_engine2.lastRestoredBackup == backup0.name\n assert sb_engine2.requestedBackupRestore == backup0.name\n\n sb0_snaps = sb_volume0.snapshotList()\n assert len(sb0_snaps) == 2\n for s in sb0_snaps:\n if s.name != \"volume-head\":\n sb0_snap = s\n assert sb0_snaps\n with pytest.raises(Exception) as e:\n sb_volume0.snapshotCreate()\n assert \"cannot create snapshot for standby volume\" in str(e.value)\n with pytest.raises(Exception) as e:\n sb_volume0.snapshotRevert(name=sb0_snap.name)\n assert \"cannot revert snapshot for standby volume\" in str(e.value)\n with pytest.raises(Exception) as e:\n sb_volume0.snapshotDelete(name=sb0_snap.name)\n assert \"cannot delete snapshot for standby volume\" in str(e.value)\n with pytest.raises(Exception) as e:\n sb_volume0.snapshotBackup(name=sb0_snap.name)\n assert \"cannot create backup for standby volume\" in str(e.value)\n with pytest.raises(Exception) as e:\n sb_volume0.pvCreate(pvName=sb_volume0_name)\n assert \"cannot create PV for standby volume\" in str(e.value)\n with pytest.raises(Exception) as e:\n sb_volume0.pvcCreate(pvcName=sb_volume0_name)\n assert \"cannot create PVC for standby volume\" in str(e.value)\n setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)\n with pytest.raises(Exception) as e:\n client.update(setting, value=\"random.backup.target\")\n assert \"cannot modify BackupTarget \" \\\n \"since there are existing standby volumes\" in str(e.value)\n with pytest.raises(Exception) as e:\n sb_volume0.activate(frontend=\"wrong_frontend\")\n assert \"invalid frontend\" in str(e.value)\n\n activate_standby_volume(client, sb_volume0_name)\n sb_volume0 = client.by_id_volume(sb_volume0_name)\n sb_volume0.attach(hostId=lht_host_id)\n sb_volume0 = common.wait_for_volume_healthy(client, sb_volume0_name)\n check_volume_data(sb_volume0, data0, False)\n\n zero_string = b'\\x00'.decode('utf-8')\n _, backup1, _, data1 = create_backup(\n client, volume_name,\n {'len': 2 * 1024, 'pos': 0, 'content': zero_string * 2 * 1024})\n # use this api to update field `last backup`\n client.list_backupVolume()\n check_volume_last_backup(client, sb_volume1_name, backup1.name)\n activate_standby_volume(client, sb_volume1_name)\n sb_volume1 = client.by_id_volume(sb_volume1_name)\n sb_volume1.attach(hostId=lht_host_id)\n sb_volume1 = common.wait_for_volume_healthy(client, sb_volume1_name)\n data0_modified = {\n 'len': data0['len'] - data1['len'],\n 'pos': data1['len'],\n 'content': data0['content'][data1['len']:],\n }\n check_volume_data(sb_volume1, data0_modified, False)\n check_volume_data(sb_volume1, data1)\n\n data2 = {'len': 1 * 1024 * 1024, 'pos': 0}\n data2['content'] = common.generate_random_data(data2['len'])\n _, backup2, _, data2 = create_backup(client, volume_name, data2)\n\n # HACK: #558 we use a side effect of the list call\n # to update the volumes last backup field\n client.list_backupVolume()\n check_volume_last_backup(client, sb_volume2_name, backup2.name)\n activate_standby_volume(client, sb_volume2_name)\n sb_volume2 = client.by_id_volume(sb_volume2_name)\n sb_volume2.attach(hostId=lht_host_id)\n sb_volume2 = common.wait_for_volume_healthy(client, sb_volume2_name)\n check_volume_data(sb_volume2, data2)\n\n # allocated this active volume to a pod\n sb_volume2.detach()\n sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name)\n\n create_pv_for_volume(client, core_api, sb_volume2, sb_volume2_name)\n create_pvc_for_volume(client, core_api, sb_volume2, sb_volume2_name)\n\n sb_volume2_pod_name = \"pod-\" + sb_volume2_name\n pod['metadata']['name'] = sb_volume2_pod_name\n pod['spec']['volumes'] = [{\n 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],\n 'persistentVolumeClaim': {\n 'claimName': sb_volume2_name,\n },\n }]\n create_and_wait_pod(core_api, pod)\n\n sb_volume2 = client.by_id_volume(sb_volume2_name)\n k_status = sb_volume2.kubernetesStatus\n workloads = k_status.workloadsStatus\n assert k_status.pvName == sb_volume2_name\n assert k_status.pvStatus == 'Bound'\n assert len(workloads) == 1\n for i in range(RETRY_COUNTS):\n if workloads[0].podStatus == 'Running':\n break\n time.sleep(RETRY_INTERVAL)\n sb_volume2 = client.by_id_volume(sb_volume2_name)\n k_status = sb_volume2.kubernetesStatus\n workloads = k_status.workloadsStatus\n assert len(workloads) == 1\n assert workloads[0].podName == sb_volume2_pod_name\n assert workloads[0].podStatus == 'Running'\n assert not workloads[0].workloadName\n assert not workloads[0].workloadType\n assert k_status.namespace == 'default'\n assert k_status.pvcName == sb_volume2_name\n assert not k_status.lastPVCRefAt\n assert not k_status.lastPodRefAt\n\n delete_and_wait_pod(core_api, sb_volume2_pod_name)\n delete_and_wait_pvc(core_api, sb_volume2_name)\n delete_and_wait_pv(core_api, sb_volume2_name)\n\n # cleanup\n std_volume.detach()\n sb_volume0.detach()\n sb_volume1.detach()\n std_volume = common.wait_for_volume_detached(client, volume_name)\n sb_volume0 = common.wait_for_volume_detached(client, sb_volume0_name)\n sb_volume1 = common.wait_for_volume_detached(client, sb_volume1_name)\n sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name)\n\n backupstore_cleanup(client)\n\n client.delete(std_volume)\n client.delete(sb_volume0)\n client.delete(sb_volume1)\n client.delete(sb_volume2)\n\n wait_for_volume_delete(client, volume_name)\n wait_for_volume_delete(client, sb_volume0_name)\n wait_for_volume_delete(client, sb_volume1_name)\n wait_for_volume_delete(client, sb_volume2_name)\n\n volumes = client.list_volume()\n assert len(volumes) == 0\n\n\ndef test_deleting_backup_volume(client, volume_name): # NOQA\n \"\"\"\n Test deleting backup volumes\n\n 1. Create volume and create backup\n 2. Delete the backup and make sure it's gone in the backupstore\n \"\"\"\n lht_host_id = get_self_host_id()\n volume = create_and_check_volume(client, volume_name)\n\n volume.attach(hostId=lht_host_id)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n bv, _, snap1, _ = create_backup(client, volume_name)\n _, _, snap2, _ = create_backup(client, volume_name)\n\n delete_backup_volume(client, volume_name)\n cleanup_volume(client, volume)\n\n\n@pytest.mark.coretest # NOQA\ndef test_listing_backup_volume(client, base_image=\"\"): # NOQA\n \"\"\"\n Test listing backup volumes\n\n 1. Create three volumes: `volume1/2/3`\n 2. Setup NFS backupstore since we can manipulate the content easily\n 3. Create multiple snapshots for all three volumes\n 4. Rename `volume1`'s `volume.cfg` to `volume.cfg.tmp` in backupstore\n 5. List backup volumes. Make sure `volume1` errors out but found other two\n 6. Restore `volume1`'s `volume.cfg`.\n 7. Make sure now backup volume `volume1` can be found\n 8. Delete backups for `volume1/2`, make sure they cannot be found later\n 9. Corrupt a backup.cfg on volume3\n 11. Check that the backup is listed with the other backups of volume3\n 12. Verify that the corrupted backup has Messages of type error\n 13. Check that backup inspection for the previously corrupted backup fails\n 14. Delete backups for `volume3`, make sure they cannot be found later\n \"\"\"\n lht_hostId = get_self_host_id()\n\n # create 3 volumes.\n volume1_name = generate_volume_name()\n volume2_name = generate_volume_name()\n volume3_name = generate_volume_name()\n\n volume1 = create_and_check_volume(client, volume1_name)\n volume2 = create_and_check_volume(client, volume2_name)\n volume3 = create_and_check_volume(client, volume3_name)\n\n volume1.attach(hostId=lht_hostId)\n volume1 = common.wait_for_volume_healthy(client, volume1_name)\n volume2.attach(hostId=lht_hostId)\n volume2 = common.wait_for_volume_healthy(client, volume2_name)\n volume3.attach(hostId=lht_hostId)\n volume3 = common.wait_for_volume_healthy(client, volume3_name)\n\n # we only test NFS here.\n # Since it is difficult to directly remove volume.cfg from s3 buckets\n setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)\n backupstores = common.get_backupstore_url()\n for backupstore in backupstores:\n if common.is_backupTarget_nfs(backupstore):\n updated = False\n for i in range(RETRY_COMMAND_COUNT):\n nfs_url = backupstore.strip(\"nfs://\")\n setting = client.update(setting, value=backupstore)\n assert setting.value == backupstore\n setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)\n if \"nfs\" in setting.value:\n updated = True\n break\n assert updated\n\n _, _, snap1, _ = create_backup(client, volume1_name)\n _, _, snap2, _ = create_backup(client, volume2_name)\n _, _, snap3, _ = create_backup(client, volume3_name)\n subprocess.check_output([\"sync\"])\n _, _, snap4, _ = create_backup(client, volume3_name)\n subprocess.check_output([\"sync\"])\n _, _, snap5, _ = create_backup(client, volume3_name)\n subprocess.check_output([\"sync\"])\n\n # invalidate backup volume 1 by renaming volume.cfg to volume.cfg.tmp\n cmd = [\"mkdir\", \"-p\", \"/mnt/nfs\"]\n subprocess.check_output(cmd)\n cmd = [\"mount\", \"-t\", \"nfs4\", nfs_url, \"/mnt/nfs\"]\n subprocess.check_output(cmd)\n cmd = [\"find\", \"/mnt/nfs\", \"-type\", \"d\", \"-name\", volume1_name]\n volume1_backup_volume_path = \\\n subprocess.check_output(cmd).strip().decode('utf-8')\n\n cmd = [\"find\", volume1_backup_volume_path, \"-name\", \"volume.cfg\"]\n volume1_backup_volume_cfg_path = \\\n subprocess.check_output(cmd).strip().decode('utf-8')\n cmd = [\"mv\", volume1_backup_volume_cfg_path,\n volume1_backup_volume_cfg_path + \".tmp\"]\n subprocess.check_output(cmd)\n subprocess.check_output([\"sync\"])\n\n found1 = found2 = found3 = False\n for i in range(RETRY_COUNTS):\n bvs = client.list_backupVolume()\n for bv in bvs:\n if bv.name == volume1_name:\n if \"error\" in bv.messages:\n assert \"volume.cfg\" in bv.messages.error.lower()\n found1 = True\n elif bv.name == volume2_name:\n assert not bv.messages\n found2 = True\n elif bv.name == volume3_name:\n assert not bv.messages\n found3 = True\n if found1 & found2 & found3:\n break\n time.sleep(RETRY_INTERVAL)\n assert found1 & found2 & found3\n\n cmd = [\"mv\", volume1_backup_volume_cfg_path + \".tmp\",\n volume1_backup_volume_cfg_path]\n subprocess.check_output(cmd)\n subprocess.check_output([\"sync\"])\n\n bv1, b1 = common.find_backup(client, volume1_name, snap1.name)\n common.delete_backup(client, volume1_name, b1.name)\n\n bv2, b2 = common.find_backup(client, volume2_name, snap2.name)\n common.delete_backup(client, volume2_name, b2.name)\n\n # corrupt backup for snap4\n bv4, b4 = common.find_backup(client, volume3_name, snap4.name)\n b4_cfg_name = \"backup_\" + b4[\"name\"] + \".cfg\"\n cmd = [\"find\", \"/mnt/nfs\", \"-type\", \"d\", \"-name\", volume3_name]\n v3_backup_path = subprocess.check_output(cmd).strip().decode('utf-8')\n b4_cfg_path = os.path.join(v3_backup_path, \"backups\", b4_cfg_name)\n assert os.path.exists(b4_cfg_path)\n b4_tmp_cfg_path = os.path.join(v3_backup_path, b4_cfg_name)\n os.rename(b4_cfg_path, b4_tmp_cfg_path)\n assert os.path.exists(b4_tmp_cfg_path)\n\n corrupt_backup = open(b4_cfg_path, \"w\")\n assert corrupt_backup\n assert corrupt_backup.write(\"{corrupt: definitely\") > 0\n corrupt_backup.close()\n subprocess.check_output([\"sync\"])\n\n # a corrupt backup cannot provide information about the snapshot\n for i in range(RETRY_COMMAND_COUNT):\n found = False\n for b in bv4.backupList().data:\n if b.name in b4[\"name\"]:\n found = True\n assert b.messages is not None\n assert MESSAGE_TYPE_ERROR in b.messages\n break\n assert found\n\n # cleanup b4\n os.remove(b4_cfg_path)\n os.rename(b4_tmp_cfg_path, b4_cfg_path)\n subprocess.check_output([\"sync\"])\n\n bv3, b3 = common.find_backup(client, volume3_name, snap3.name)\n common.delete_backup(client, volume3_name, b3.name)\n bv4, b4 = common.find_backup(client, volume3_name, snap4.name)\n common.delete_backup(client, volume3_name, b4.name)\n bv5, b5 = common.find_backup(client, volume3_name, snap5.name)\n common.delete_backup(client, volume3_name, b5.name)\n\n volume1.detach()\n volume1 = common.wait_for_volume_detached(client, volume1_name)\n client.delete(volume1)\n wait_for_volume_delete(client, volume1_name)\n\n volume2.detach()\n volume2 = common.wait_for_volume_detached(client, volume2_name)\n client.delete(volume2)\n wait_for_volume_delete(client, volume2_name)\n\n volume3.detach()\n volume3 = common.wait_for_volume_detached(client, volume3_name)\n client.delete(volume3)\n wait_for_volume_delete(client, volume3_name)\n\n volumes = client.list_volume()\n assert len(volumes) == 0\n\n\n@pytest.mark.coretest # NOQA\ndef test_volume_multinode(client, volume_name): # NOQA\n \"\"\"\n Test the volume can be attached on multiple nodes\n\n 1. Create one volume\n 2. Attach it on every node once, verify the state, then detach it\n \"\"\"\n hosts = [node['name'] for node in client.list_node()]\n\n volume = client.create_volume(name=volume_name,\n size=SIZE,\n numberOfReplicas=2)\n volume = common.wait_for_volume_detached(client,\n volume_name)\n\n for host_id in hosts:\n volume = volume.attach(hostId=host_id)\n volume = common.wait_for_volume_healthy(client,\n volume_name)\n engine = get_volume_engine(volume)\n assert engine.hostId == host_id\n volume = volume.detach()\n volume = common.wait_for_volume_detached(client,\n volume_name)\n\n client.delete(volume)\n wait_for_volume_delete(client, volume_name)\n\n volumes = client.list_volume()\n assert len(volumes) == 0\n\n\n@pytest.mark.coretest # NOQA\ndef test_volume_scheduling_failure(client, volume_name): # NOQA\n '''\n Test fail to schedule by disable scheduling for all the nodes\n\n Also test cannot attach a scheduling failed volume\n\n 1. Disable `allowScheduling` for all nodes\n 2. Create a volume.\n 3. Verify the volume condition `Scheduled` is false\n 4. Verify the volume is not ready for workloads\n 5. Verify attaching the volume will result in error\n 6. Enable `allowScheduling` for all nodes\n 7. Volume should be automatically scheduled (condition become true)\n 8. Volume can be attached now\n '''\n nodes = client.list_node()\n assert len(nodes) > 0\n\n for node in nodes:\n node = client.update(node, allowScheduling=False)\n node = common.wait_for_node_update(client, node.id,\n \"allowScheduling\", False)\n\n volume = client.create_volume(name=volume_name, size=SIZE,\n numberOfReplicas=3)\n\n volume = common.wait_for_volume_condition_scheduled(client, volume_name,\n \"status\",\n CONDITION_STATUS_FALSE)\n volume = common.wait_for_volume_detached(client, volume_name)\n assert not volume.ready\n self_node = get_self_host_id()\n with pytest.raises(Exception) as e:\n volume.attach(hostId=self_node)\n assert \"cannot be scheduled\" in str(e.value)\n\n for node in nodes:\n node = client.update(node, allowScheduling=True)\n node = common.wait_for_node_update(client, node.id,\n \"allowScheduling\", True)\n\n volume = common.wait_for_volume_condition_scheduled(client, volume_name,\n \"status\",\n CONDITION_STATUS_TRUE)\n volume = common.wait_for_volume_detached(client, volume_name)\n volume = volume.attach(hostId=self_node)\n volume = common.wait_for_volume_healthy(client, volume_name)\n endpoint = get_volume_endpoint(volume)\n volume_rw_test(endpoint)\n\n volume = volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n client.delete(volume)\n wait_for_volume_delete(client, volume_name)\n\n\n@pytest.mark.coretest # NOQA\ndef test_setting_default_replica_count(client, volume_name): # NOQA\n \"\"\"\n Test `Default Replica Count` setting\n\n 1. Set default replica count in the global settings to 5\n 2. Create a volume without specify the replica count\n 3. The volume should have 5 replicas (instead of the previous default 3)\n \"\"\"\n setting = client.by_id_setting(common.SETTING_DEFAULT_REPLICA_COUNT)\n old_value = setting.value\n setting = client.update(setting, value=\"5\")\n\n volume = client.create_volume(name=volume_name, size=SIZE)\n volume = common.wait_for_volume_detached(client, volume_name)\n assert len(volume.replicas) == int(setting.value)\n\n client.delete(volume)\n wait_for_volume_delete(client, volume_name)\n\n setting = client.update(setting, value=old_value)\n\n\n@pytest.mark.coretest # NOQA\ndef test_volume_update_replica_count(client, volume_name): # NOQA\n \"\"\"\n Test updating volume's replica count\n\n 1. Create a volume with 2 replicas\n 2. Attach the volume\n 3. Increase the replica to 3.\n 4. Volume will become degraded and start rebuilding\n 5. Wait for rebuilding to complete\n 6. Update the replica count to 2. Volume should remain healthy\n 7. Remove 1 replicas, so there will be 2 replicas in the volume\n 8. Verify the volume is still healthy\n\n Volume should always be healthy even only with 2 replicas.\n \"\"\"\n host_id = get_self_host_id()\n\n replica_count = 2\n volume = create_and_check_volume(client, volume_name, replica_count)\n\n volume.attach(hostId=host_id)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n replica_count = 3\n volume = volume.updateReplicaCount(replicaCount=replica_count)\n volume = common.wait_for_volume_degraded(client, volume_name)\n volume = common.wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == replica_count\n\n old_replica_count = replica_count\n replica_count = 2\n volume = volume.updateReplicaCount(replicaCount=replica_count)\n volume = common.wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == old_replica_count\n\n volume.replicaRemove(name=volume.replicas[0].name)\n\n volume = common.wait_for_volume_replica_count(client, volume_name,\n replica_count)\n assert volume.robustness == \"healthy\"\n assert len(volume.replicas) == replica_count\n\n client.delete(volume)\n wait_for_volume_delete(client, volume_name)\n\n\n@pytest.mark.coretest # NOQA\ndef test_attach_without_frontend(client, volume_name): # NOQA\n \"\"\"\n Test attach in maintenance mode (without frontend)\n\n 1. Create a volume and attach to the current node with enabled frontend\n 2. Check volume has `blockdev`\n 3. Write `snap1_data` into volume and create snapshot `snap1`\n 4. Write more random data into volume and create another anspshot\n 5. Detach the volume and reattach with disabled frontend\n 6. Check volume still has `blockdev` as frontend but no endpoint\n 7. Revert back to `snap1`\n 8. Detach and reattach the volume with enabled frontend\n 9. Check volume contains data `snap1_data`\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n\n lht_hostId = get_self_host_id()\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n common.wait_for_volume_healthy(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n assert volume.disableFrontend is False\n assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV\n\n snap1_data = write_volume_random_data(volume)\n snap1 = create_snapshot(client, volume_name)\n\n write_volume_random_data(volume)\n create_snapshot(client, volume_name)\n\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=lht_hostId, disableFrontend=True)\n common.wait_for_volume_healthy_no_frontend(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n assert volume.disableFrontend is True\n assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV\n check_volume_endpoint(volume)\n\n volume.snapshotRevert(name=snap1.name)\n\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n common.wait_for_volume_healthy(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n assert volume.disableFrontend is False\n assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV\n\n check_volume_data(volume, snap1_data)\n\n client.delete(volume)\n wait_for_volume_delete(client, volume_name)\n\n\n@pytest.mark.coretest # NOQA\ndef test_storage_class_from_backup(volume_name, pvc_name, storage_class, client, core_api, pod_make): # NOQA\n \"\"\"\n Test restore backup using StorageClass\n\n 1. Create volume and PV/PVC/POD\n 2. Write `test_data` into pod\n 3. Create a snapshot and back it up. Get the backup URL\n 4. Create a new StorageClass `longhorn-from-backup` and set backup URL.\n 5. Use `longhorn-from-backup` to create a new PVC\n 6. Wait for the volume to be created and complete the restoration.\n 7. Create the pod using the PVC. Verify the data\n \"\"\"\n VOLUME_SIZE = str(DEFAULT_VOLUME_SIZE * Gi)\n\n pv_name = pvc_name\n\n volume = create_and_check_volume(\n client,\n volume_name,\n size=VOLUME_SIZE\n )\n\n wait_for_volume_detached(client, volume_name)\n\n create_pv_for_volume(client, core_api, volume, pv_name)\n create_pvc_for_volume(client, core_api, volume, pvc_name)\n\n pod_manifest = pod_make()\n pod_manifest['spec']['volumes'] = [create_pvc_spec(pvc_name)]\n pod_name = pod_manifest['metadata']['name']\n create_and_wait_pod(core_api, pod_manifest)\n\n test_data = generate_random_data(VOLUME_RWTEST_SIZE)\n write_pod_volume_data(core_api, pod_name, test_data)\n\n volume_id = client.by_id_volume(volume_name)\n snapshot = volume_id.snapshotCreate()\n\n volume_id.snapshotBackup(name=snapshot.name)\n wait_for_backup_completion(client, volume_name, snapshot.name)\n bv, b = find_backup(client, volume_name, snapshot.name)\n\n backup_url = b.url\n\n storage_class['metadata']['name'] = \"longhorn-from-backup\"\n storage_class['parameters']['fromBackup'] = backup_url\n\n create_storage_class(storage_class)\n\n backup_pvc_name = generate_volume_name()\n\n backup_pvc_spec = {\n \"apiVersion\": \"v1\",\n \"kind\": \"PersistentVolumeClaim\",\n \"metadata\": {\n \"name\": backup_pvc_name,\n },\n \"spec\": {\n \"accessModes\": [\n \"ReadWriteOnce\"\n ],\n \"storageClassName\": storage_class['metadata']['name'],\n \"resources\": {\n \"requests\": {\n \"storage\": VOLUME_SIZE\n }\n }\n }\n }\n\n volume_count = len(client.list_volume())\n\n core_api.create_namespaced_persistent_volume_claim(\n 'default',\n backup_pvc_spec\n )\n\n backup_volume_created = False\n\n for i in range(RETRY_COUNTS):\n if len(client.list_volume()) == volume_count + 1:\n backup_volume_created = True\n break\n time.sleep(RETRY_INTERVAL)\n\n assert backup_volume_created\n\n for i in range(RETRY_COUNTS):\n pvc_status = core_api.read_namespaced_persistent_volume_claim_status(\n name=backup_pvc_name,\n namespace='default'\n )\n\n if pvc_status.status.phase == 'Bound':\n break\n time.sleep(RETRY_INTERVAL)\n\n found = False\n for i in range(RETRY_COUNTS):\n volumes = client.list_volume()\n for volume in volumes:\n if volume.kubernetesStatus.pvcName == backup_pvc_name:\n backup_volume_name = volume.name\n found = True\n break\n if found:\n break\n time.sleep(RETRY_INTERVAL)\n assert found\n\n wait_for_volume_restoration_completed(client, backup_volume_name)\n wait_for_volume_detached(client, backup_volume_name)\n\n backup_pod_manifest = pod_make(name=\"backup-pod\")\n backup_pod_manifest['spec']['volumes'] = \\\n [create_pvc_spec(backup_pvc_name)]\n backup_pod_name = backup_pod_manifest['metadata']['name']\n create_and_wait_pod(core_api, backup_pod_manifest)\n\n restored_data = read_volume_data(core_api, backup_pod_name)\n assert test_data == restored_data\n\n\n@pytest.mark.coretest # NOQA\ndef test_expansion_basic(client, volume_name): # NOQA\n \"\"\"\n Test volume expansion using Longhorn API\n\n 1. Create volume and attach to the current node\n 2. Generate data `snap1_data` and write it to the volume\n 3. Create snapshot `snap1`\n 4. Expand the volume (volume will be detached, expanded, then attached)\n 5. Verify the volume has been expanded\n 6. Generate data `snap2_data` and write it to the volume\n 7. Create snapshot `snap2`\n 8. Gerneate data `snap3_data` and write it after the original size\n 9. Create snapshot `snap3` and verify the `snap3_data` with location\n 10. Detach and reattach the volume.\n 11. Verify the volume is still expanded, and `snap3_data` remain valid\n 12. Detach the volume.\n 13. Reattach the volume in maintence mode\n 14. Revert to `snap2` and detach.\n 15. Attach the volume and check data `snap2_data`\n 16. Generate `snap4_data` and write it after the original size\n 17. Create snapshot `snap4` and verify `snap4_data`.\n 18. Detach the volume and revert to `snap1`\n 19. Validate `snap1_data`\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n\n lht_hostId = get_self_host_id()\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n common.wait_for_volume_healthy(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n assert volume.disableFrontend is False\n assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV\n\n snap1_data = write_volume_random_data(volume)\n snap1 = create_snapshot(client, volume_name)\n\n expand_attached_volume(client, volume_name)\n volume = client.by_id_volume(volume_name)\n check_block_device_size(volume, int(EXPAND_SIZE))\n\n snap2_data = write_volume_random_data(volume)\n snap2 = create_snapshot(client, volume_name)\n\n snap3_data = {\n 'pos': int(SIZE),\n 'content': generate_random_data(VOLUME_RWTEST_SIZE),\n }\n snap3_data = write_volume_data(volume, snap3_data)\n create_snapshot(client, volume_name)\n check_volume_data(volume, snap3_data)\n\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n common.wait_for_volume_healthy(client, volume_name)\n volume = client.by_id_volume(volume_name)\n check_block_device_size(volume, int(EXPAND_SIZE))\n check_volume_data(volume, snap3_data)\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=lht_hostId, disableFrontend=True)\n volume = common.wait_for_volume_healthy_no_frontend(client, volume_name)\n assert volume.disableFrontend is True\n assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV\n check_volume_endpoint(volume)\n volume.snapshotRevert(name=snap2.name)\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n common.wait_for_volume_healthy(client, volume_name)\n volume = client.by_id_volume(volume_name)\n check_volume_data(volume, snap2_data, False)\n snap4_data = {\n 'pos': int(SIZE),\n 'content': generate_random_data(VOLUME_RWTEST_SIZE),\n }\n snap4_data = write_volume_data(volume, snap4_data)\n create_snapshot(client, volume_name)\n check_volume_data(volume, snap4_data)\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=lht_hostId, disableFrontend=True)\n volume = common.wait_for_volume_healthy_no_frontend(client, volume_name)\n volume.snapshotRevert(name=snap1.name)\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n common.wait_for_volume_healthy(client, volume_name)\n volume = client.by_id_volume(volume_name)\n check_volume_data(volume, snap1_data, False)\n\n client.delete(volume)\n wait_for_volume_delete(client, volume_name)\n\n\n@pytest.mark.coretest # NOQA\ndef test_restore_inc_with_expansion(set_random_backupstore, client, core_api, volume_name, pod): # NOQA\n \"\"\"\n Test restore from disaster recovery volume with volume expansion\n\n Run test against a random backupstores\n\n 1. Create a volume and attach to the current node\n 2. Generate `data0`, write to the volume, make a backup `backup0`\n 3. Create three DR(standby) volumes from the backup: `dr_volume0/1/2`\n 4. Wait for all three DR volumes to start the initial restoration\n 5. Verify DR volumes's `lastBackup` is `backup0`\n 6. Verify snapshot/pv/pvc/change backup target are not allowed as long\n as the DR volume exists\n 7. Activate standby `dr_volume0` and attach it to check the volume data\n 8. Expand the original volume. Make sure the expansion is successful.\n 8. Generate `data1` and write to the original volume and create `backup1`\n 9. Make sure `dr_volume1`'s `lastBackup` field has been updated to\n `backup1`\n 10. Activate `dr_volume1` and check data `data0` and `data1`\n 11. Generate `data2` and write to the original volume after original SIZE\n 12. Create `backup2`\n 13. Wait for `dr_volume2` to finish expansion, show `backup2` as latest\n 14. Activate `dr_volume2` and verify `data2`\n 15. Detach `dr_volume2`\n 16. Create PV, PVC and Pod to use `sb_volume2`, check PV/PVC/POD are good\n\n FIXME: Step 16 works because the disk will be treated as a unformatted disk\n \"\"\"\n lht_host_id = get_self_host_id()\n\n std_volume = create_and_check_volume(client, volume_name, 2, SIZE)\n std_volume.attach(hostId=lht_host_id)\n std_volume = common.wait_for_volume_healthy(client, volume_name)\n\n with pytest.raises(Exception) as e:\n std_volume.activate(frontend=VOLUME_FRONTEND_BLOCKDEV)\n assert \"already in active mode\" in str(e.value)\n\n data0 = {'pos': 0, 'len': VOLUME_RWTEST_SIZE,\n 'content': common.generate_random_data(VOLUME_RWTEST_SIZE)}\n bv, backup0, _, data0 = create_backup(\n client, volume_name, data0)\n\n dr_volume0_name = \"dr-expand-0-\" + volume_name\n dr_volume1_name = \"dr-expand-1-\" + volume_name\n dr_volume2_name = \"dr-expand-2-\" + volume_name\n client.create_volume(name=dr_volume0_name, size=SIZE,\n numberOfReplicas=2, fromBackup=backup0.url,\n frontend=\"\", standby=True)\n client.create_volume(name=dr_volume1_name, size=SIZE,\n numberOfReplicas=2, fromBackup=backup0.url,\n frontend=\"\", standby=True)\n client.create_volume(name=dr_volume2_name, size=SIZE,\n numberOfReplicas=2, fromBackup=backup0.url,\n frontend=\"\", standby=True)\n wait_for_backup_restore_completed(client, dr_volume0_name, backup0.name)\n wait_for_backup_restore_completed(client, dr_volume1_name, backup0.name)\n wait_for_backup_restore_completed(client, dr_volume2_name, backup0.name)\n\n dr_volume0 = common.wait_for_volume_healthy_no_frontend(client,\n dr_volume0_name)\n dr_volume1 = common.wait_for_volume_healthy_no_frontend(client,\n dr_volume1_name)\n dr_volume2 = common.wait_for_volume_healthy_no_frontend(client,\n dr_volume2_name)\n\n for i in range(RETRY_COUNTS):\n client.list_backupVolume()\n dr_volume0 = client.by_id_volume(dr_volume0_name)\n dr_volume1 = client.by_id_volume(dr_volume1_name)\n dr_volume2 = client.by_id_volume(dr_volume2_name)\n dr_engine0 = get_volume_engine(dr_volume0)\n dr_engine1 = get_volume_engine(dr_volume1)\n dr_engine2 = get_volume_engine(dr_volume2)\n if dr_volume0.restoreRequired is False or \\\n dr_volume1.restoreRequired is False or \\\n dr_volume2.restoreRequired is False or \\\n not dr_engine0.lastRestoredBackup or \\\n not dr_engine1.lastRestoredBackup or \\\n not dr_engine2.lastRestoredBackup:\n time.sleep(RETRY_INTERVAL)\n else:\n break\n assert dr_volume0.standby is True\n assert dr_volume0.lastBackup == backup0.name\n assert dr_volume0.frontend == \"\"\n assert dr_volume0.restoreRequired is True\n dr_engine0 = get_volume_engine(dr_volume0)\n assert dr_engine0.lastRestoredBackup == backup0.name\n assert dr_engine0.requestedBackupRestore == backup0.name\n assert dr_volume1.standby is True\n assert dr_volume1.lastBackup == backup0.name\n assert dr_volume1.frontend == \"\"\n assert dr_volume1.restoreRequired is True\n dr_engine1 = get_volume_engine(dr_volume1)\n assert dr_engine1.lastRestoredBackup == backup0.name\n assert dr_engine1.requestedBackupRestore == backup0.name\n assert dr_volume2.standby is True\n assert dr_volume2.lastBackup == backup0.name\n assert dr_volume2.frontend == \"\"\n assert dr_volume2.restoreRequired is True\n dr_engine2 = get_volume_engine(dr_volume2)\n assert dr_engine2.lastRestoredBackup == backup0.name\n assert dr_engine2.requestedBackupRestore == backup0.name\n\n dr0_snaps = dr_volume0.snapshotList()\n assert len(dr0_snaps) == 2\n\n activate_standby_volume(client, dr_volume0_name)\n dr_volume0 = client.by_id_volume(dr_volume0_name)\n dr_volume0.attach(hostId=lht_host_id)\n dr_volume0 = common.wait_for_volume_healthy(client, dr_volume0_name)\n check_volume_data(dr_volume0, data0, False)\n\n expand_attached_volume(client, volume_name)\n std_volume = client.by_id_volume(volume_name)\n check_block_device_size(std_volume, int(EXPAND_SIZE))\n\n data1 = {'pos': VOLUME_RWTEST_SIZE, 'len': VOLUME_RWTEST_SIZE,\n 'content': common.generate_random_data(VOLUME_RWTEST_SIZE)}\n bv, backup1, _, data1 = create_backup(\n client, volume_name, data1)\n\n client.list_backupVolume()\n check_volume_last_backup(client, dr_volume1_name, backup1.name)\n activate_standby_volume(client, dr_volume1_name)\n dr_volume1 = client.by_id_volume(dr_volume1_name)\n dr_volume1.attach(hostId=lht_host_id)\n dr_volume1 = common.wait_for_volume_healthy(client, dr_volume1_name)\n check_volume_data(dr_volume1, data0, False)\n check_volume_data(dr_volume1, data1, False)\n\n data2 = {'pos': int(SIZE), 'len': VOLUME_RWTEST_SIZE,\n 'content': common.generate_random_data(VOLUME_RWTEST_SIZE)}\n bv, backup2, _, data2 = create_backup(\n client, volume_name, data2)\n assert backup2.volumeSize == EXPAND_SIZE\n\n client.list_backupVolume()\n wait_for_dr_volume_expansion(client, dr_volume2_name, EXPAND_SIZE)\n check_volume_last_backup(client, dr_volume2_name, backup2.name)\n activate_standby_volume(client, dr_volume2_name)\n dr_volume2 = client.by_id_volume(dr_volume2_name)\n dr_volume2.attach(hostId=lht_host_id)\n dr_volume2 = common.wait_for_volume_healthy(client, dr_volume2_name)\n check_volume_data(dr_volume2, data2)\n\n # allocated this active volume to a pod\n dr_volume2.detach()\n dr_volume2 = common.wait_for_volume_detached(client, dr_volume2_name)\n\n create_pv_for_volume(client, core_api, dr_volume2, dr_volume2_name)\n create_pvc_for_volume(client, core_api, dr_volume2, dr_volume2_name)\n\n dr_volume2_pod_name = \"pod-\" + dr_volume2_name\n pod['metadata']['name'] = dr_volume2_pod_name\n pod['spec']['volumes'] = [{\n 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],\n 'persistentVolumeClaim': {\n 'claimName': dr_volume2_name,\n },\n }]\n create_and_wait_pod(core_api, pod)\n\n dr_volume2 = client.by_id_volume(dr_volume2_name)\n k_status = dr_volume2.kubernetesStatus\n workloads = k_status.workloadsStatus\n assert k_status.pvName == dr_volume2_name\n assert k_status.pvStatus == 'Bound'\n assert len(workloads) == 1\n for i in range(RETRY_COUNTS):\n if workloads[0].podStatus == 'Running':\n break\n time.sleep(RETRY_INTERVAL)\n dr_volume2 = client.by_id_volume(dr_volume2_name)\n k_status = dr_volume2.kubernetesStatus\n workloads = k_status.workloadsStatus\n assert len(workloads) == 1\n assert workloads[0].podName == dr_volume2_pod_name\n assert workloads[0].podStatus == 'Running'\n assert not workloads[0].workloadName\n assert not workloads[0].workloadType\n assert k_status.namespace == 'default'\n assert k_status.pvcName == dr_volume2_name\n assert not k_status.lastPVCRefAt\n assert not k_status.lastPodRefAt\n\n delete_and_wait_pod(core_api, dr_volume2_pod_name)\n delete_and_wait_pvc(core_api, dr_volume2_name)\n delete_and_wait_pv(core_api, dr_volume2_name)\n\n # cleanup\n std_volume.detach()\n dr_volume0.detach()\n dr_volume1.detach()\n std_volume = common.wait_for_volume_detached(client, volume_name)\n dr_volume0 = common.wait_for_volume_detached(client, dr_volume0_name)\n dr_volume1 = common.wait_for_volume_detached(client, dr_volume1_name)\n dr_volume2 = common.wait_for_volume_detached(client, dr_volume2_name)\n\n backupstore_cleanup(client)\n\n client.delete(std_volume)\n client.delete(dr_volume0)\n client.delete(dr_volume1)\n client.delete(dr_volume2)\n\n wait_for_volume_delete(client, volume_name)\n wait_for_volume_delete(client, dr_volume0_name)\n wait_for_volume_delete(client, dr_volume1_name)\n wait_for_volume_delete(client, dr_volume2_name)\n\n volumes = client.list_volume().data\n assert len(volumes) == 0\n\n\ndef test_engine_image_daemonset_restart(client, apps_api, volume_name): # NOQA\n \"\"\"\n Test restarting engine image daemonset\n\n 1. Get the default engine image\n 2. Create a volume and attach to the current node\n 3. Write random data to the volume and create a snapshot\n 4. Delete the engine image daemonset\n 5. Engine image daemonset should be recreated\n 6. In the meantime, validate the volume data to prove it's still functional\n 7. Wait for the engine image to become `ready` again\n 8. Check the volume data again.\n 9. Write some data and create a new snapshot.\n 1. Since create snapshot will use engine image binary.\n 10. Check the volume data again\n \"\"\"\n default_img = common.get_default_engine_image(client)\n ds_name = \"engine-image-\" + default_img.name\n\n volume = create_and_check_volume(client, volume_name)\n\n lht_hostId = get_self_host_id()\n volume.attach(hostId=lht_hostId, disableFrontend=False)\n volume = common.wait_for_volume_healthy(client, volume_name)\n snap1_data = write_volume_random_data(volume)\n create_snapshot(client, volume_name)\n\n # The engine image DaemonSet will be recreated/restarted automatically\n apps_api.delete_namespaced_daemon_set(ds_name, common.LONGHORN_NAMESPACE)\n\n # The Longhorn volume is still available\n # during the engine image DaemonSet restarting\n check_volume_data(volume, snap1_data)\n\n # Wait for the restart complete\n common.wait_for_engine_image_state(client, default_img.name, \"ready\")\n\n # Longhorn is still able to use the corresponding engine binary to\n # operate snapshot\n check_volume_data(volume, snap1_data)\n snap2_data = write_volume_random_data(volume)\n create_snapshot(client, volume_name)\n check_volume_data(volume, snap2_data)\n\n\n@pytest.mark.coretest # NOQA\ndef test_expansion_canceling(client, core_api, volume_name, pod): # NOQA\n \"\"\"\n Test expansion canceling\n\n 1. Create a volume, then create the corresponding PV, PVC and Pod.\n 2. Generate `test_data` and write to the pod\n 3. Create an empty directory with expansion snapshot tmp meta file path\n so that the following expansion will fail\n 4. Delete the pod and wait for volume detachment\n 5. Try to expand the volume using Longhorn API\n 6. Wait for expansion failure then use Longhorn API to cancel it\n 7. Create a new pod and validate the volume content,\n then re-write random data to the pod\n 8. Delete the pod and wait for volume detachment\n 9. Retry expansion then verify the expansion done using Longhorn API\n 10. Create a new pod\n 11. Validate the volume content, then check if data writing looks fine\n 12. Clean up pod, PVC, and PV\n \"\"\"\n expansion_pvc_name = \"pvc-\" + volume_name\n expansion_pv_name = \"pv-\" + volume_name\n pod_name = \"pod-\" + volume_name\n volume = create_and_check_volume(client, volume_name, 2, SIZE)\n create_pv_for_volume(client, core_api, volume, expansion_pv_name)\n create_pvc_for_volume(client, core_api, volume, expansion_pvc_name)\n pod['metadata']['name'] = pod_name\n pod['spec']['volumes'] = [{\n 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],\n 'persistentVolumeClaim': {\n 'claimName': expansion_pvc_name,\n },\n }]\n create_and_wait_pod(core_api, pod)\n\n volume = client.by_id_volume(volume_name)\n replicas = volume.replicas\n fail_replica_expansion(client, core_api,\n volume_name, EXPAND_SIZE, replicas)\n\n test_data = generate_random_data(VOLUME_RWTEST_SIZE)\n write_pod_volume_data(core_api, pod_name, test_data)\n\n delete_and_wait_pod(core_api, pod_name)\n volume = wait_for_volume_detached(client, volume_name)\n\n volume.expand(size=EXPAND_SIZE)\n wait_for_expansion_failure(client, volume_name)\n volume = client.by_id_volume(volume_name)\n volume.cancelExpansion()\n wait_for_volume_expansion(client, volume_name)\n volume = client.by_id_volume(volume_name)\n assert volume.state == \"detached\"\n assert volume.size == SIZE\n\n # check if the volume still works fine\n create_and_wait_pod(core_api, pod)\n resp = read_volume_data(core_api, pod_name)\n assert resp == test_data\n test_data = generate_random_data(VOLUME_RWTEST_SIZE)\n write_pod_volume_data(core_api, pod_name, test_data)\n\n # retry expansion\n delete_and_wait_pod(core_api, pod_name)\n volume = wait_for_volume_detached(client, volume_name)\n volume.expand(size=EXPAND_SIZE)\n wait_for_volume_expansion(client, volume_name)\n volume = client.by_id_volume(volume_name)\n assert volume.state == \"detached\"\n assert volume.size == str(EXPAND_SIZE)\n\n create_and_wait_pod(core_api, pod)\n volume = client.by_id_volume(volume_name)\n engine = get_volume_engine(volume)\n assert volume.size == EXPAND_SIZE\n assert volume.size == engine.size\n resp = read_volume_data(core_api, pod_name)\n assert resp == test_data\n write_pod_volume_data(core_api, pod_name, test_data)\n resp = read_volume_data(core_api, pod_name)\n assert resp == test_data\n\n delete_and_wait_pod(core_api, pod_name)\n delete_and_wait_pvc(core_api, expansion_pvc_name)\n delete_and_wait_pv(core_api, expansion_pv_name)\n\n\n@pytest.mark.coretest # NOQA\ndef test_running_volume_with_scheduling_failure(\n client, core_api, volume_name, pod): # NOQA\n \"\"\"\n Test if the running volume still work fine\n when there is a scheduling failed replica\n\n Prerequisite:\n Setting \"soft anti-affinity\" is false.\n\n 1. Create a volume, then create the corresponding PV, PVC and Pod.\n 2. Wait for the pod running and the volume healthy.\n 3. Write data to the pod volume and get the md5sum.\n 4. Disable the scheduling for a node contains a running replica.\n 5. Crash the replica on the scheduling disabled node for the volume.\n 6. Wait for the scheduling failure which is caused\n by the new replica creation.\n 7. Verify:\n 7.1. `volume.ready == True`.\n 7.2. `volume.conditions[scheduled].status == False`.\n 7.3. the volume is Degraded.\n 7.4. the new replica is created but it is not running.\n 8. Write more data to the volume and get the md5sum\n 9. Delete the pod and wait for the volume detached.\n 10. Verify the scheduling failed replica is removed.\n 11. Verify:\n 11.1. `volume.ready == True`.\n 11.2. `volume.conditions[scheduled].status == True`\n 12. Recreate a new pod for the volume and wait for the pod running.\n 13. Validate the volume content, then check if data writing looks fine.\n 14. Clean up pod, PVC, and PV.\n \"\"\"\n\n replica_node_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(replica_node_soft_anti_affinity_setting, value=\"false\")\n\n data_path1 = \"/data/test1\"\n test_pv_name = \"pv-\" + volume_name\n test_pvc_name = \"pvc-\" + volume_name\n test_pod_name = \"pod-\" + volume_name\n\n volume = create_and_check_volume(client, volume_name, size=str(1 * Gi))\n create_pv_for_volume(client, core_api, volume, test_pv_name)\n create_pvc_for_volume(client, core_api, volume, test_pvc_name)\n\n pod['metadata']['name'] = test_pod_name\n pod['spec']['volumes'] = [{\n 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],\n 'persistentVolumeClaim': {\n 'claimName': test_pvc_name,\n },\n }]\n create_and_wait_pod(core_api, pod)\n wait_for_volume_healthy(client, volume_name)\n write_pod_volume_random_data(core_api, test_pod_name,\n data_path1, DATA_SIZE_IN_MB_1)\n original_md5sum1 = get_pod_data_md5sum(core_api, test_pod_name,\n data_path1)\n\n volume = client.by_id_volume(volume_name)\n existing_replicas = {}\n for r in volume.replicas:\n existing_replicas[r.name] = r\n node = client.by_id_node(volume.replicas[0].hostId)\n node = client.update(node, allowScheduling=False)\n common.wait_for_node_update(client, node.id,\n \"allowScheduling\", False)\n\n crash_replica_processes(client, core_api, volume_name,\n replicas=[volume.replicas[0]],\n wait_to_fail=False)\n\n # Wait for scheduling failure.\n # It means the new replica is created but fails to be scheduled.\n wait_for_volume_condition_scheduled(client, volume_name, \"status\",\n CONDITION_STATUS_FALSE)\n wait_for_volume_condition_scheduled(client, volume_name, \"reason\",\n CONDITION_REASON_SCHEDULING_FAILURE)\n volume = wait_for_volume_degraded(client, volume_name)\n assert len(volume.replicas) == 4\n assert volume.ready\n for r in volume.replicas:\n if r.name not in existing_replicas:\n new_replica = r\n break\n assert new_replica\n assert not new_replica.running\n assert not new_replica.hostId\n\n data_path2 = \"/data/test2\"\n write_pod_volume_random_data(core_api, test_pod_name,\n data_path2, DATA_SIZE_IN_MB_1)\n original_md5sum2 = get_pod_data_md5sum(core_api, test_pod_name, data_path2)\n\n delete_and_wait_pod(core_api, test_pod_name)\n wait_for_volume_detached(client, volume_name)\n volume = wait_for_volume_condition_scheduled(client, volume_name, \"status\",\n CONDITION_STATUS_TRUE)\n assert volume.ready\n # The scheduling failed replica will be removed\n # so that the volume can be reattached later.\n assert len(volume.replicas) == 3\n for r in volume.replicas:\n assert r.hostId != \"\"\n assert r.name != new_replica.name\n\n create_and_wait_pod(core_api, pod)\n wait_for_volume_degraded(client, volume_name)\n\n md5sum1 = get_pod_data_md5sum(core_api, test_pod_name, data_path1)\n assert md5sum1 == original_md5sum1\n md5sum2 = get_pod_data_md5sum(core_api, test_pod_name, data_path2)\n assert md5sum2 == original_md5sum2\n\n # The data writing is fine\n data_path3 = \"/data/test3\"\n write_pod_volume_random_data(core_api, test_pod_name,\n data_path3, DATA_SIZE_IN_MB_1)\n get_pod_data_md5sum(core_api, test_pod_name, data_path3)\n\n delete_and_wait_pod(core_api, test_pod_name)\n delete_and_wait_pvc(core_api, test_pvc_name)\n delete_and_wait_pv(core_api, test_pv_name)\n\n\n@pytest.mark.coretest # NOQA\ndef test_expansion_with_scheduling_failure(\n client, core_api, volume_name, pod): # NOQA\n \"\"\"\n Test if the running volume with scheduling failure\n can be expanded after the detachment.\n\n Prerequisite:\n Setting \"soft anti-affinity\" is false.\n\n 1. Create a volume, then create the corresponding PV, PVC and Pod.\n 2. Wait for the pod running and the volume healthy.\n 3. Write data to the pod volume and get the md5sum.\n 4. Disable the scheduling for a node contains a running replica.\n 5. Crash the replica on the scheduling disabled node for the volume.\n Then delete the failed replica so that it won't be reused.\n 6. Wait for the scheduling failure which is caused\n by the new replica creation.\n 7. Verify:\n 7.1. `volume.ready == True`.\n 7.2. `volume.conditions[scheduled].status == False`.\n 7.3. the volume is Degraded.\n 7.4. the new replica is created but it is not running.\n 8. Write more data to the volume and get the md5sum\n 9. Delete the pod and wait for the volume detached.\n 10. Verify the scheduling failed replica is removed.\n 11. Verify:\n 11.1. `volume.ready == True`.\n 11.2. `volume.conditions[scheduled].status == True`\n 12. Expand the volume and wait for the expansion succeeds.\n 13. Verify there is no rebuild replica after the expansion.\n 14. Recreate a new pod for the volume and wait for the pod running.\n 15. Validate the volume content.\n 16. Verify the expanded part can be read/written correctly.\n 17. Enable the node scheduling.\n 18. Wait for the volume rebuild succeeds.\n 19. Verify the data written in the expanded part.\n 20. Clean up pod, PVC, and PV.\n\n Notice that the step 1 to step 11 is identical with\n those of the case test_running_volume_with_scheduling_failure().\n \"\"\"\n replica_node_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(replica_node_soft_anti_affinity_setting, value=\"false\")\n\n data_path1 = \"/data/test1\"\n test_pv_name = \"pv-\" + volume_name\n test_pvc_name = \"pvc-\" + volume_name\n test_pod_name = \"pod-\" + volume_name\n\n volume = create_and_check_volume(client, volume_name, size=str(300 * Mi))\n create_pv_for_volume(client, core_api, volume, test_pv_name)\n create_pvc_for_volume(client, core_api, volume, test_pvc_name)\n\n pod['metadata']['name'] = test_pod_name\n pod['spec']['volumes'] = [{\n 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],\n 'persistentVolumeClaim': {\n 'claimName': test_pvc_name,\n },\n }]\n create_and_wait_pod(core_api, pod)\n wait_for_volume_healthy(client, volume_name)\n write_pod_volume_random_data(core_api, test_pod_name,\n data_path1, DATA_SIZE_IN_MB_1)\n original_md5sum1 = get_pod_data_md5sum(core_api, test_pod_name,\n data_path1)\n\n volume = client.by_id_volume(volume_name)\n old_replicas = {}\n for r in volume.replicas:\n old_replicas[r.name] = r\n failed_replica = volume.replicas[0]\n node = client.by_id_node(failed_replica.hostId)\n node = client.update(node, allowScheduling=False)\n common.wait_for_node_update(client, node.id,\n \"allowScheduling\", False)\n\n crash_replica_processes(client, core_api, volume_name,\n replicas=[failed_replica],\n wait_to_fail=False)\n\n # Remove the failed replica so that it won't be reused later\n volume = wait_for_volume_degraded(client, volume_name)\n volume.replicaRemove(name=failed_replica.name)\n\n # Wait for scheduling failure.\n # It means the new replica is created but fails to be scheduled.\n wait_for_volume_condition_scheduled(client, volume_name, \"status\",\n CONDITION_STATUS_FALSE)\n wait_for_volume_condition_scheduled(client, volume_name, \"reason\",\n CONDITION_REASON_SCHEDULING_FAILURE)\n volume = wait_for_volume_degraded(client, volume_name)\n assert len(volume.replicas) == 3\n assert volume.ready\n for r in volume.replicas:\n assert r.name != failed_replica.name\n if r.name not in old_replicas:\n new_replica = r\n break\n assert new_replica\n assert not new_replica.running\n assert not new_replica.hostId\n\n data_path2 = \"/data/test2\"\n write_pod_volume_random_data(core_api, test_pod_name,\n data_path2, DATA_SIZE_IN_MB_1)\n original_md5sum2 = get_pod_data_md5sum(core_api, test_pod_name, data_path2)\n\n delete_and_wait_pod(core_api, test_pod_name)\n wait_for_volume_detached(client, volume_name)\n volume = wait_for_volume_condition_scheduled(client, volume_name, \"status\",\n CONDITION_STATUS_TRUE)\n assert volume.ready\n # The scheduling failed replica will be removed\n # so that the volume can be reattached later.\n assert len(volume.replicas) == 2\n for r in volume.replicas:\n assert r.hostId != \"\"\n assert r.name != new_replica.name\n\n expanded_size = str(400 * Mi)\n volume.expand(size=expanded_size)\n wait_for_volume_expansion(client, volume_name)\n volume = client.by_id_volume(volume_name)\n assert volume.state == \"detached\"\n assert volume.size == expanded_size\n assert len(volume.replicas) == 2\n for r in volume.replicas:\n assert r.name in old_replicas\n\n create_and_wait_pod(core_api, pod)\n wait_for_volume_degraded(client, volume_name)\n\n md5sum1 = get_pod_data_md5sum(core_api, test_pod_name, data_path1)\n assert md5sum1 == original_md5sum1\n md5sum2 = get_pod_data_md5sum(core_api, test_pod_name, data_path2)\n assert md5sum2 == original_md5sum2\n\n # The data writing is fine\n data_path3 = \"/data/test3\"\n write_pod_volume_random_data(core_api, test_pod_name,\n data_path3, DATA_SIZE_IN_MB_1)\n original_md5sum3 = get_pod_data_md5sum(core_api, test_pod_name, data_path3)\n\n node = client.by_id_node(failed_replica.hostId)\n client.update(node, allowScheduling=True)\n wait_for_volume_healthy(client, volume_name)\n\n md5sum3 = get_pod_data_md5sum(core_api, test_pod_name, data_path3)\n assert md5sum3 == original_md5sum3\n\n delete_and_wait_pod(core_api, test_pod_name)\n delete_and_wait_pvc(core_api, test_pvc_name)\n delete_and_wait_pv(core_api, test_pv_name)\n\n\ndef test_dr_volume_with_last_backup_deletion(set_random_backupstore, client, core_api, csi_pv, pvc, volume_name, pod_make): # NOQA\n \"\"\"\n Test if the DR volume can be activated\n after deleting the lastest backup. There are two cases to the last\n backup, one is the last backup is no empty, and the other one is\n last backup is empty.\n\n 1. Set a random backupstore.\n 2. Create a volume, then create the corresponding PV, PVC and Pod.\n 3. Write data to the pod volume and get the md5sum\n after the pod running.\n 4. Create the 1st backup.\n 5. Create two DR volumes from the backup.\n 6. Wait for the DR volumes restore complete.\n 7. Write data to the original volume then create the 2nd backup.\n 8. Wait for the DR volumes incremental restore complete.\n 9. Delete the 2nd backup.\n 10. Verify the `lastBackup == 1st backup` for 2 DR volumes and\n original volume.\n 11. Activate the DR volume 1 and wait for it complete.\n 12. Create PV/PVC/Pod for the activated volume 1.\n 13. Validate the volume content.\n 14. Delete the 1st backup.\n 15. Verify the `lastBackup == \"\"` for DR volume 2 and original volume.\n 16. Activate the DR volume 2 and wait for it complete.\n 17. Create PV/PVC/Pod for the activated volume 2.\n 18. Validate the volume content, should be backup 1.\n \"\"\"\n std_volume_name = volume_name + \"-std\"\n data_path1 = \"/data/test1\"\n std_pod_name, std_pv_name, std_pvc_name, std_md5sum1 = \\\n prepare_pod_with_data_in_mb(\n client, core_api, csi_pv, pvc, pod_make, std_volume_name,\n data_path=data_path1, data_size_in_mb=DATA_SIZE_IN_MB_1)\n\n std_volume = client.by_id_volume(std_volume_name)\n snap1 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap1.name)\n wait_for_backup_completion(client, std_volume_name, snap1.name)\n bv, b1 = find_backup(client, std_volume_name, snap1.name)\n\n # Create DR volume 1 and 2.\n dr_volume_name = volume_name + \"-dr\"\n client.create_volume(name=dr_volume_name, size=str(1 * Gi),\n numberOfReplicas=3, fromBackup=b1.url,\n frontend=\"\", standby=True)\n wait_for_volume_creation(client, dr_volume_name)\n wait_for_volume_restoration_start(client, dr_volume_name, b1.name)\n wait_for_backup_restore_completed(client, dr_volume_name, b1.name)\n\n dr2_volume_name = volume_name + \"-dr2\"\n client.create_volume(name=dr2_volume_name, size=str(1 * Gi),\n numberOfReplicas=3, fromBackup=b1.url,\n frontend=\"\", standby=True)\n wait_for_volume_creation(client, dr2_volume_name)\n wait_for_volume_restoration_start(client, dr2_volume_name, b1.name)\n wait_for_backup_restore_completed(client, dr2_volume_name, b1.name)\n\n # Write data and create backup 2.\n data_path2 = \"/data/test2\"\n write_pod_volume_random_data(core_api, std_pod_name,\n data_path2, DATA_SIZE_IN_MB_1)\n snap2 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap2.name)\n wait_for_backup_completion(client, std_volume_name, snap2.name)\n bv, b2 = find_backup(client, std_volume_name, snap2.name)\n\n # Wait for the incremental restoration triggered then complete.\n check_volume_last_backup(client, dr_volume_name, b2.name)\n wait_for_volume_restoration_start(client, dr_volume_name, b2.name)\n wait_for_backup_restore_completed(client, dr_volume_name, b2.name)\n\n check_volume_last_backup(client, dr2_volume_name, b2.name)\n wait_for_volume_restoration_start(client, dr2_volume_name, b2.name)\n wait_for_backup_restore_completed(client, dr2_volume_name, b2.name)\n\n # Delete the latest backup backup 2 then check the `lastBackup` field.\n delete_backup(client, bv.name, b2.name)\n client.list_backupVolume()\n check_volume_last_backup(client, std_volume_name, b1.name)\n check_volume_last_backup(client, dr_volume_name, b1.name)\n check_volume_last_backup(client, dr2_volume_name, b1.name)\n\n # Active DR volume 1 and create PV/PVC/Pod for DR volume 1.\n activate_standby_volume(client, dr_volume_name)\n dr_volume = wait_for_volume_detached(client, dr_volume_name)\n\n dr_pod_name = dr_volume_name + \"-pod\"\n dr_pv_name = dr_volume_name + \"-pv\"\n dr_pvc_name = dr_volume_name + \"-pvc\"\n dr_pod = pod_make(name=dr_pod_name)\n create_pv_for_volume(client, core_api, dr_volume, dr_pv_name)\n create_pvc_for_volume(client, core_api, dr_volume, dr_pvc_name)\n dr_pod['spec']['volumes'] = [create_pvc_spec(dr_pvc_name)]\n create_and_wait_pod(core_api, dr_pod)\n\n # Validate the volume content.\n md5sum1 = get_pod_data_md5sum(core_api, dr_pod_name, data_path1)\n assert std_md5sum1 == md5sum1\n\n # For DR volume, the requested backup restore is backup1 and\n # the last restored backup is backup2 now. Since the backup2 is gone,\n # the DR volume will automatically fall back to do full restore\n # for backup1.\n\n # Delete backup 1 and check the `lastBackup` field.\n delete_backup(client, bv.name, b1.name)\n client.list_backupVolume()\n check_volume_last_backup(client, std_volume_name, \"\")\n check_volume_last_backup(client, dr_volume_name, \"\")\n check_volume_last_backup(client, dr2_volume_name, \"\")\n\n # Active DR volume 2 and create PV/PVC/Pod for DR volume 2.\n activate_standby_volume(client, dr2_volume_name)\n dr2_volume = wait_for_volume_detached(client, dr2_volume_name)\n\n dr2_pod_name = dr2_volume_name + \"-pod\"\n dr2_pv_name = dr2_volume_name + \"-pv\"\n dr2_pvc_name = dr2_volume_name + \"-pvc\"\n dr2_pod = pod_make(name=dr2_pod_name)\n create_pv_for_volume(client, core_api, dr2_volume, dr2_pv_name)\n create_pvc_for_volume(client, core_api, dr2_volume, dr2_pvc_name)\n dr2_pod['spec']['volumes'] = [create_pvc_spec(dr2_pvc_name)]\n create_and_wait_pod(core_api, dr2_pod)\n\n # Validate the volume content.\n md5sum1 = get_pod_data_md5sum(core_api, dr2_pod_name, data_path1)\n assert std_md5sum1 == md5sum1\n\n delete_and_wait_pod(core_api, std_pod_name)\n delete_and_wait_pod(core_api, dr_pod_name)\n delete_and_wait_pod(core_api, dr2_pod_name)\n client.delete(bv)\n\n\ndef test_backup_lock_deletion_during_restoration(set_random_backupstore, client, core_api, volume_name, csi_pv, pvc, pod_make): # NOQA\n \"\"\"\n Test backup locks\n Context:\n To test the locking mechanism that utilizes the backupstore,\n to prevent the following case of concurrent operations.\n - prevent backup deletion during backup restoration\n\n steps:\n 1. Create a volume, then create the corresponding PV, PVC and Pod.\n 2. Wait for the pod running and the volume healthy.\n 3. Write data to the pod volume and get the md5sum.\n 4. Take a backup.\n 5. Wait for the backup to be completed.\n 6. Start backup restoration for the backup creation.\n 7. Wait for restoration to be in progress.\n 8. Delete the backup from the backup store.\n 9. Wait for the restoration to be completed.\n 10. Assert the data from the restored volume with md5sum.\n 11. Assert the backup count in the backup store with 1.\n (The backup should not be deleted)\n \"\"\"\n backupstore_cleanup(client)\n std_volume_name = volume_name + \"-std\"\n restore_volume_name = volume_name + \"-restore\"\n _, _, _, std_md5sum = \\\n prepare_pod_with_data_in_mb(\n client, core_api, csi_pv, pvc, pod_make, std_volume_name,\n data_size_in_mb=DATA_SIZE_IN_MB_2)\n std_volume = client.by_id_volume(std_volume_name)\n snap1 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap1.name)\n wait_for_backup_completion(client, std_volume_name, snap1.name)\n backup_volume = client.by_id_backupVolume(std_volume_name)\n\n _, b = common.find_backup(client, std_volume_name, snap1.name)\n client.create_volume(name=restore_volume_name, fromBackup=b.url)\n wait_for_volume_restoration_start(client, restore_volume_name, b.name)\n\n backup_volume.backupDelete(name=b.name)\n\n wait_for_volume_restoration_completed(client, restore_volume_name)\n restore_volume = wait_for_volume_detached(client, restore_volume_name)\n assert len(restore_volume.replicas) == 3\n\n restore_pod_name = restore_volume_name + \"-pod\"\n restore_pv_name = restore_volume_name + \"-pv\"\n restore_pvc_name = restore_volume_name + \"-pvc\"\n restore_pod = pod_make(name=restore_pod_name)\n create_pv_for_volume(client, core_api, restore_volume, restore_pv_name)\n create_pvc_for_volume(client, core_api, restore_volume, restore_pvc_name)\n restore_pod['spec']['volumes'] = [create_pvc_spec(restore_pvc_name)]\n create_and_wait_pod(core_api, restore_pod)\n\n restore_volume = client.by_id_volume(restore_volume_name)\n assert restore_volume[VOLUME_FIELD_ROBUSTNESS] == VOLUME_ROBUSTNESS_HEALTHY\n\n md5sum = get_pod_data_md5sum(core_api, restore_pod_name, \"/data/test\")\n assert std_md5sum == md5sum\n\n _, b = common.find_backup(client, std_volume_name, snap1.name)\n assert b is not None\n\n\ndef test_backup_lock_deletion_during_backup(set_random_backupstore, client, core_api, volume_name, csi_pv, pvc, pod_make): # NOQA\n \"\"\"\n Test backup locks\n Context:\n To test the locking mechanism that utilizes the backupstore,\n to prevent the following case of concurrent operations.\n - prevent backup deletion while a backup is in progress\n\n steps:\n 1. Create a volume, then create the corresponding PV, PVC and Pod.\n 2. Wait for the pod running and the volume healthy.\n 3. Write data to the pod volume and get the md5sum.\n 4. Take a backup.\n 5. Wait for the backup to be completed.\n 6. Write more data into the volume and compute md5sum.\n 7. Take another backup of the volume.\n 8. While backup is in progress, delete the older backup up.\n 9. Wait for the backup creation in progress to be completed.\n 10. Check the backup store, there should be 2 backups.\n (The older backup should not be deleted)\n 11. Restore the latest backup.\n 12. Wait for the restoration to be completed. Assert md5sum from step 6.\n 13. Restore the older backup.\n 14. Wait for the restoration to be completed. Assert md5sum from step 3.\n \"\"\"\n backupstore_cleanup(client)\n std_volume_name = volume_name + \"-std\"\n restore_volume_name_1 = volume_name + \"-restore-1\"\n restore_volume_name_2 = volume_name + \"-restore-2\"\n\n std_pod_name, _, _, std_md5sum1 = \\\n prepare_pod_with_data_in_mb(\n client, core_api, csi_pv, pvc, pod_make, std_volume_name)\n std_volume = client.by_id_volume(std_volume_name)\n snap1 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap1.name)\n wait_for_backup_completion(client, std_volume_name, snap1.name)\n backup_volume = client.by_id_backupVolume(std_volume_name)\n _, b1 = common.find_backup(client, std_volume_name, snap1.name)\n\n write_pod_volume_random_data(core_api, std_pod_name, \"/data/test2\",\n DATA_SIZE_IN_MB_3)\n\n std_md5sum2 = get_pod_data_md5sum(core_api, std_pod_name, \"/data/test2\")\n snap2 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap2.name)\n wait_for_backup_to_start(client, std_volume_name, snap2.name)\n\n backup_volume.backupDelete(name=b1.name)\n\n wait_for_backup_completion(client, std_volume_name, snap2.name,\n retry_count=600)\n\n _, b1 = common.find_backup(client, std_volume_name, snap1.name)\n _, b2 = common.find_backup(client, std_volume_name, snap2.name)\n\n assert b1, b2 is not None\n\n client.create_volume(name=restore_volume_name_1, fromBackup=b1.url)\n\n wait_for_volume_restoration_completed(client, restore_volume_name_1)\n restore_volume_1 = wait_for_volume_detached(client, restore_volume_name_1)\n assert len(restore_volume_1.replicas) == 3\n\n restore_pod_name_1 = restore_volume_name_1 + \"-pod\"\n restore_pv_name_1 = restore_volume_name_1 + \"-pv\"\n restore_pvc_name_1 = restore_volume_name_1 + \"-pvc\"\n restore_pod_1 = pod_make(name=restore_pod_name_1)\n create_pv_for_volume(client, core_api, restore_volume_1, restore_pv_name_1)\n create_pvc_for_volume(client, core_api, restore_volume_1,\n restore_pvc_name_1)\n restore_pod_1['spec']['volumes'] = [create_pvc_spec(restore_pvc_name_1)]\n create_and_wait_pod(core_api, restore_pod_1)\n\n md5sum1 = get_pod_data_md5sum(core_api, restore_pod_name_1, \"/data/test\")\n\n assert std_md5sum1 == md5sum1\n\n client.create_volume(name=restore_volume_name_2, fromBackup=b2.url)\n\n wait_for_volume_restoration_completed(client, restore_volume_name_2)\n restore_volume_2 = wait_for_volume_detached(client, restore_volume_name_2)\n assert len(restore_volume_2.replicas) == 3\n\n restore_pod_name_2 = restore_volume_name_2 + \"-pod\"\n restore_pv_name_2 = restore_volume_name_2 + \"-pv\"\n restore_pvc_name_2 = restore_volume_name_2 + \"-pvc\"\n restore_pod_2 = pod_make(name=restore_pod_name_2)\n create_pv_for_volume(client, core_api, restore_volume_2, restore_pv_name_2)\n create_pvc_for_volume(client, core_api, restore_volume_2,\n restore_pvc_name_2)\n restore_pod_2['spec']['volumes'] = [create_pvc_spec(restore_pvc_name_2)]\n create_and_wait_pod(core_api, restore_pod_2)\n\n md5sum2 = get_pod_data_md5sum(core_api, restore_pod_name_2, \"/data/test2\")\n\n assert std_md5sum2 == md5sum2\n\n\ndef test_backup_lock_creation_during_deletion(set_random_backupstore, client, core_api, volume_name, csi_pv, pvc, pod_make): # NOQA\n \"\"\"\n Test backup locks\n Context:\n To test the locking mechanism that utilizes the backupstore,\n to prevent the following case of concurrent operations.\n - prevent backup creation during backup deletion\n\n steps:\n 1. Create a volume, then create the corresponding PV, PVC and Pod.\n 2. Wait for the pod running and the volume healthy.\n 3. Write data (DATA_SIZE_IN_MB_2) to the pod volume and get the md5sum.\n 4. Take a backup.\n 5. Wait for the backup to be completed.\n 6. Delete the backup.\n 7. Without waiting for the backup deletion completion, create another\n backup of the same volume.\n 8. Verify the API response of the backup creation containing the backup\n creation failure info.\n 9. Wait for the backup deletion and assert there is 0 backup in the backup\n store.\n \"\"\"\n backupstore_cleanup(client)\n std_volume_name = volume_name + \"-std\"\n\n std_pod_name, _, _, std_md5sum1 = \\\n prepare_pod_with_data_in_mb(\n client, core_api, csi_pv, pvc, pod_make, std_volume_name,\n data_size_in_mb=DATA_SIZE_IN_MB_2)\n std_volume = client.by_id_volume(std_volume_name)\n snap1 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap1.name)\n wait_for_backup_completion(client, std_volume_name, snap1.name)\n backup_volume = client.by_id_backupVolume(std_volume_name)\n _, b1 = common.find_backup(client, std_volume_name, snap1.name)\n\n write_pod_volume_random_data(core_api, std_pod_name,\n \"/data/test2\", DATA_SIZE_IN_MB_2)\n\n snap2 = create_snapshot(client, std_volume_name)\n\n backup_volume.backupDelete(name=b1.name)\n\n try:\n std_volume.snapshotBackup(name=snap2.name)\n except Exception as e:\n assert e.error.status == 500\n\n wait_for_backup_delete(client, volume_name, b1.name)\n try:\n _, b2 = common.find_backup(client, std_volume_name, snap2.name)\n except AssertionError:\n b2 = None\n assert b2 is None\n\n backupstore_wait_for_lock_expiration()\n\n\n@pytest.mark.skip(reason=\"This test takes more than 20 mins to run\") # NOQA\ndef test_backup_lock_restoration_during_deletion(set_random_backupstore, client, core_api, volume_name, csi_pv, pvc, pod_make): # NOQA\n \"\"\"\n Test backup locks\n Context:\n To test the locking mechanism that utilizes the backupstore,\n to prevent the following case of concurrent operations.\n - prevent backup restoration during backup deletion\n\n steps:\n 1. Create a volume, then create the corresponding PV, PVC and Pod.\n 2. Wait for the pod running and the volume healthy.\n 3. Write data to the pod volume and get the md5sum.\n 4. Take a backup.\n 5. Wait for the backup to be completed.\n 6. Write more data (1.5 Gi) to the volume and take another backup.\n 7. Wait for the 2nd backup to be completed.\n 8. Delete the 2nd backup.\n 9. Without waiting for the backup deletion completion, restore the 1st\n backup from the backup store.\n 10. Verify the restored volume become faulted.\n 11. Wait for the 2nd backup deletion and assert the count of the backups\n with 1 in the backup store.\n \"\"\"\n backupstore_cleanup(client)\n std_volume_name = volume_name + \"-std\"\n restore_volume_name = volume_name + \"-restore\"\n std_pod_name, _, _, std_md5sum1 = \\\n prepare_pod_with_data_in_mb(\n client, core_api, csi_pv, pvc, pod_make, std_volume_name,\n volume_size=str(3*Gi), data_size_in_mb=DATA_SIZE_IN_MB_1)\n std_volume = client.by_id_volume(std_volume_name)\n snap1 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap1.name)\n wait_for_backup_completion(client, std_volume_name, snap1.name)\n std_volume.snapshotBackup(name=snap1.name)\n backup_volume = client.by_id_backupVolume(std_volume_name)\n _, b1 = common.find_backup(client, std_volume_name, snap1.name)\n\n write_pod_volume_random_data(core_api, std_pod_name,\n \"/data/test2\", 1500)\n snap2 = create_snapshot(client, std_volume_name)\n std_volume.snapshotBackup(name=snap2.name)\n wait_for_backup_completion(client, std_volume_name, snap2.name,\n retry_count=1200)\n _, b2 = common.find_backup(client, std_volume_name, snap2.name)\n\n backup_volume.backupDelete(name=b2.name)\n\n client.create_volume(name=restore_volume_name, fromBackup=b1.url)\n wait_for_volume_detached(client, restore_volume_name)\n restore_volume = client.by_id_volume(restore_volume_name)\n assert restore_volume[VOLUME_FIELD_ROBUSTNESS] == VOLUME_ROBUSTNESS_FAULTED\n\n wait_for_backup_delete(client, volume_name, b2.name)\n\n _, b1 = common.find_backup(client, std_volume_name, snap1.name)\n assert b1 is not None\n\n try:\n _, b2 = common.find_backup(client, std_volume_name, snap2.name)\n except AssertionError:\n b2 = None\n assert b2 is None\n\n\n@pytest.mark.coretest\ndef test_allow_volume_creation_with_degraded_availability(client, volume_name): # NOQA\n \"\"\"\n Test Allow Volume Creation with Degraded Availability (API)\n\n Requirement:\n 1. Set `allow-volume-creation-with-degraded-availability` to true.\n 2. `node-level-soft-anti-affinity` to false.\n\n Steps:\n (degraded availablity)\n 1. Disable scheduling for node 2 and 3.\n 2. Create a volume with three replicas.\n 1. Volume should be `ready` after creation and `Scheduled` is true.\n 2. One replica schedule succeed. Two other replicas failed scheduling.\n 3. Enable the scheduling of node 2.\n 1. One additional replica of the volume will become scheduled.\n 2. The other replica is still failed to schedule.\n 3. Scheduled condition is still true.\n 4. Attach the volume.\n 1. After the volume is attached, scheduled condition become false.\n 5. Write data to the volume.\n 6. Detach the volume.\n 1. Scheduled condition should become true.\n 7. Reattach the volume to verify the data.\n 1. Scheduled condition should become false.\n 8. Enable the scheduling for the node 3.\n 9. Wait for the scheduling condition to become true.\n 10. Detach and reattach the volume to verify the data.\n \"\"\"\n # enable volume create with degraded availability\n degraded_availability_setting = \\\n client.by_id_setting(common.SETTING_DEGRADED_AVAILABILITY)\n client.update(degraded_availability_setting, value=\"true\")\n\n # disable node level soft anti-affinity\n replica_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(replica_soft_anti_affinity_setting, value=\"false\")\n\n nodes = client.list_node()\n node1 = nodes[0]\n node2 = nodes[1]\n node3 = nodes[2]\n\n # disable node 2 and 3 to schedule to node 1\n client.update(node2, allowScheduling=False)\n client.update(node3, allowScheduling=False)\n\n # create volume\n volume = create_and_check_volume(client, volume_name, num_of_replicas=3)\n assert volume.ready\n assert volume.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == \"True\"\n\n # check only 1 replica scheduled successfully\n common.wait_for_replica_scheduled(client, volume_name,\n to_nodes=[node1.name],\n expect_success=1, expect_fail=2,\n is_vol_healthy=False,\n is_replica_running=False)\n\n # enable node 2 to schedule to node 1 and 2\n client.update(node2, allowScheduling=True)\n\n # check 2 replicas scheduled successfully\n common.wait_for_replica_scheduled(client, volume_name,\n to_nodes=[node1.name, node2.name],\n expect_success=2, expect_fail=1,\n is_vol_healthy=False,\n is_replica_running=False)\n\n volume = client.by_id_volume(volume_name)\n assert volume.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == \"True\"\n\n # attach volume\n self_host = get_self_host_id()\n volume.attach(hostId=self_host)\n volume = common.wait_for_volume_degraded(client, volume_name)\n assert volume.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == \"False\"\n\n data = write_volume_random_data(volume, {})\n\n # detach volume\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n assert volume.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == \"True\"\n\n # re-attach volume to verify the data\n volume.attach(hostId=self_host)\n volume = common.wait_for_volume_degraded(client, volume_name)\n check_volume_data(volume, data)\n assert volume.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == \"False\"\n\n # enable node 3 to schedule to node 1, 2 and 3\n client.update(node3, allowScheduling=True)\n common.wait_for_volume_condition_scheduled(client, volume_name,\n \"status\", \"True\")\n\n # detach and re-attach the volume to verify the data\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=self_host)\n volume = common.wait_for_volume_degraded(client, volume_name)\n check_volume_data(volume, data)\n\n\n@pytest.mark.coretest\ndef test_allow_volume_creation_with_degraded_availability_error(\n client, volume_name): # NOQA\n \"\"\"\n Test Allow Volume Creation with Degraded Availability (API)\n\n Requirement:\n 1. Set `allow-volume-creation-with-degraded-availability` to true.\n 2. `node-level-soft-anti-affinity` to false.\n\n Steps:\n (no availability)\n 1. Disable all nodes' scheduling.\n 2. Create a volume with three replicas.\n 1. Volume should be NotReady after creation.\n 2. Scheduled condition should become false.\n 3. Attaching the volume should result in error.\n 4. Enable one node's scheduling.\n 1. Volume should become Ready soon.\n 2. Scheduled condition should become true.\n 5. Attach the volume. Write data. Detach and reattach to verify the data.\n \"\"\"\n # enable volume create with degraded availability\n degraded_availability_setting = \\\n client.by_id_setting(common.SETTING_DEGRADED_AVAILABILITY)\n client.update(degraded_availability_setting, value=\"true\")\n\n # disable node level soft anti-affinity\n replica_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(replica_soft_anti_affinity_setting, value=\"false\")\n\n nodes = client.list_node()\n node1 = nodes[0]\n node2 = nodes[1]\n node3 = nodes[2]\n\n # disable node 1, 2 and 3 to make 0 available node\n client.update(node1, allowScheduling=False)\n client.update(node2, allowScheduling=False)\n client.update(node3, allowScheduling=False)\n\n # create volume\n volume = create_and_check_volume(client, volume_name, num_of_replicas=3)\n assert not volume.ready\n assert volume.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == \"False\"\n\n # attach the volume\n self_host = get_self_host_id()\n with pytest.raises(Exception) as e:\n volume.attach(hostId=self_host)\n assert \"cannot be scheduled\" in str(e.value)\n\n # enable node 1\n client.update(node1, allowScheduling=True)\n\n # check only 1 replica scheduled successfully\n common.wait_for_replica_scheduled(client, volume_name,\n to_nodes=[node1.name],\n expect_success=1, expect_fail=2,\n is_vol_healthy=False,\n is_replica_running=False)\n volume = common.wait_for_volume_status(client, volume_name,\n VOLUME_FIELD_READY, True)\n assert volume.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == \"True\"\n\n # attach the volume and write some data\n volume.attach(hostId=self_host)\n volume = common.wait_for_volume_degraded(client, volume_name)\n data = write_volume_random_data(volume, {})\n\n # detach and re-attach the volume to verify the data\n volume.detach()\n volume = common.wait_for_volume_detached(client, volume_name)\n\n volume.attach(hostId=self_host)\n volume = common.wait_for_volume_degraded(client, volume_name)\n check_volume_data(volume, data)\n\n\n@pytest.mark.skip(reason=\"TODO\") # NOQA\ndef test_multiple_volumes_creation_with_degraded_availability():\n \"\"\"\n Goal:\n We want to verify that multiple volumes with degraded availability\n can be created, attached, detached, and deleted at the nearly the\n same time.\n\n Steps:\n 1. create StorageClass longhorn-extra with numberOfReplicas=5\n Set allow-volume-creation-with-degraded-availability to True\n 2. Deploy this StatefulSet:\n https://github.com/longhorn/longhorn/issues/2073#issuecomment-742948726\n 3. In a 1-min retry loop, Verify that all 10 volumes are healthy\n 4. Delete the StatefulSet\n 5. In a 1-min retry loop, Verify that all 10 volumes are detached\n 6. Find and delete the PVC of the 10 volumes.\n 7. In a 1-min retry loop, Verify that all 10 volumes are deleted\n 8. Make sure to delete all extra storage classes in\n common.cleanup_client()\n \"\"\"\n pass\n\n\n@pytest.mark.skip(reason=\"TODO\")\ndef test_allow_volume_creation_with_degraded_availability_restore():\n \"\"\"\n Test Allow Volume Creation with Degraded Availability (Restore)\n\n Requirement:\n 1. Set `allow-volume-creation-with-degraded-availability` to true\n 2. `node-level-soft-anti-affinity` to false\n 3. Create a backup of 800MB.\n\n Steps:\n (restore)\n 1. Disable scheduling for node 2 and 3\n 2. Restore a volume with three replicas.\n 1. Volume should be attached automatically and `Scheduled` is true\n 2. One replica schedule succeed. Two other replicas failed scheduling.\n 3. During the restore, enable scheduling for node 2.\n 1. One additional replica of the volume will become scheduled\n 2. The other replica is still failed to schedule.\n 3. Scheduled condition is still true\n 4. Wait for the restore to complete and volume detach automatically.\n 1. After the volume detached, scheduled condition become true.\n 5. Attach the volume and verify the data.\n 1. After the volume is attached, scheduled condition become false.\n\n (DR volume)\n 1. Disable scheduling for node 2 and 3\n 2. Create a DR volume from backup with three replicas.\n 1. Volume should be attached automatically and `Scheduled` is true\n 2. One replica schedule succeed. Two other replicas failed scheduling.\n 3. During the restore, enable scheduling for node 2.\n 1. One additional replica of the volume will become scheduled\n 2. The other replica is still failed to schedule.\n 3. Scheduled condition is still true\n 4. Wait for the restore to complete.\n 5. Enable the scheduling for node 3.\n 1. DR volume should automatically rebuild the third replica.\n 6. Activate the volume and verify the data.\n \"\"\"\n\n\ndef test_cleanup_system_generated_snapshots(client, core_api, volume_name, csi_pv, pvc, pod_make): # NOQA\n \"\"\"\n Test Cleanup System Generated Snapshots\n\n 1. Enabled 'Auto Cleanup System Generated Snapshot'.\n 2. Create a volume and attach it to a node.\n 3. Write some data to the volume and get the checksum of the data.\n 4. Delete a random replica to trigger a system generated snapshot.\n 5. Repeat Step 3 for 3 times, and make sure only one snapshot is left.\n 6. Check the data with the saved checksum.\n \"\"\"\n\n pod_name, _, _, md5sum1 = \\\n prepare_pod_with_data_in_mb(\n client, core_api, csi_pv, pvc, pod_make, volume_name)\n\n volume = client.by_id_volume(volume_name)\n\n for i in range(3):\n replica_name = volume[\"replicas\"][i][\"name\"]\n volume.replicaRemove(name=replica_name)\n wait_for_volume_degraded(client, volume_name)\n wait_for_volume_healthy(client, volume_name)\n\n volume = client.by_id_volume(volume_name)\n # For the below assertion, the number of snapshots is compared with 2\n # as the list of snapshot have the volume-head too.\n assert len(volume.snapshotList()) == 2\n\n read_md5sum1 = get_pod_data_md5sum(core_api, pod_name, \"/data/test\")\n assert md5sum1 == read_md5sum1\n\n\ndef test_volume_toomanysnapshots_condition(client, core_api, volume_name): # NOQA\n \"\"\"\n Test Volume TooManySnapshots Condition\n\n 1. Create a volume and attach it to a node.\n 2. Check the 'TooManySnapshots' condition is False.\n 3. Writing data to this volume and meanwhile taking 100 snapshots.\n 4. Check the 'TooManySnapshots' condition is True.\n 5. Take one more snapshot to make sure snapshots works fine.\n 6. Delete 2 snapshots, and check the 'TooManySnapshots' condition is\n False.\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n self_hostId = get_self_host_id()\n volume = volume.attach(hostId=self_hostId)\n volume = common.wait_for_volume_healthy(client, volume_name)\n\n snap = {}\n max_count = 100\n for i in range(max_count):\n write_volume_random_data(volume, {})\n\n count = i + 1\n snap[count] = create_snapshot(client, volume_name)\n\n if count < max_count:\n volume = client.by_id_volume(volume_name)\n assert volume.conditions.toomanysnapshots.status == \"False\"\n else:\n wait_for_volume_condition_toomanysnapshots(client, volume_name,\n \"status\", \"True\")\n\n snap[max_count + 1] = create_snapshot(client, volume_name)\n wait_for_volume_condition_toomanysnapshots(client, volume_name,\n \"status\", \"True\")\n\n volume = client.by_id_volume(volume_name)\n volume.snapshotDelete(name=snap[100].name)\n volume.snapshotDelete(name=snap[99].name)\n\n volume.snapshotPurge()\n volume = wait_for_snapshot_purge(client, volume_name,\n snap[100].name, snap[99].name)\n\n wait_for_volume_condition_toomanysnapshots(client, volume_name,\n \"status\", \"False\")\n","sub_path":"manager/integration/tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":135542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"548504849","text":"def gcd(a, b):\n if (b == 0):\n return a\n return gcd(b, a%b)\n\nN = int(input())\nfor i in range(N):\n A = input().split()\n size = int(A[0])\n A = A[1:]\n ans=0\n for k in range(0, size):\n for j in range(k+1, size):\n ans+=gcd(int(A[k]), int(A[j]))\n print(ans)\n","sub_path":"intro/9613.py","file_name":"9613.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"30973769","text":"import os\nimport sys\nimport urllib.request\n\ndef checkImg(id,secret,keys,input):\n client_id = id # 개발자센터에서 발급받은 Client ID 값\n client_secret = secret # 개발자센터에서 발급받은 Client Secret 값\n code = \"1\"\n key = keys\n value = input\n url = \"https://openapi.naver.com/v1/captcha/nkey?code=\" + code + \"&key=\" + key + \"&value=\" + value\n request = urllib.request.Request(url)\n request.add_header(\"X-Naver-Client-Id\",client_id)\n request.add_header(\"X-Naver-Client-Secret\",client_secret)\n response = urllib.request.urlopen(request)\n rescode = response.getcode()\n if(rescode==200):\n response_body = response.read()\n result = response_body.decode('utf-8')\n return result\n else:\n print(\"Error Code:\" + rescode)","sub_path":"HelloPython/capchar/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"39717340","text":"#!/usr/bin/env python2.7\n\nimport numpy as np\nimport commpy as cp\n\n__all__ = [\n 'transmitMatrix',\n 'fourierMatrix',\n 'samplingMatrix',\n 'randomQAMSymbols',\n 'gfdm_tx',\n 'gfdm_rx']\n\n# FIXME TransmitMatrix should group different subcarriers on timeslot-basis\n\n\ndef transmitMatrix(filtertype, alpha, M, K, N):\n '''\n Create Convolution Matrix for pulse shaping\n\n filtertype : (rrc,rc)\n alpha : roll-off-factor\n sampling_rate : sampling rate (in Hz)\n symbol_period : symbol period (in s)\n M : number of symbol time slots\n K : number of subcarriers\n\n h_matrix: array of impulse responses for time slot (0...M-1)\n '''\n if filtertype == \"rrc\":\n time_h, h = cp.rrcosfilter(M*K*N, alpha, N*K, 1)\n elif filtertype == \"rc\":\n time_h, h = cp.rcosfilter(M*K*N, alpha, N*K, 1)\n # Move filter cyclic\n G_tx = np.array([np.roll(h, m - (M*K*N/2)) for m in xrange(M*K*N)])\n S_mn = samplingMatrix(M, K*N)\n S_nm = samplingMatrix(K, M)\n if N > 1:\n # if oversampling is specified add zeros to samplingMatrix\n S_nm = np.insert(\n S_nm, M * K / 2, np.zeros((M * K * (N - 1), K), dtype='complex'),\n axis=0)\n W_H = fourierMatrix(M*K*N).conj().transpose()\n # Resample Filter\n G_tx_s = np.dot(G_tx, S_mn)\n # Resample FourierMatrix\n W_s = np.dot(S_nm.transpose(), W_H)\n # compute and use all elements of the main diagonal W_s.dot(G_tx_s)\n A = np.array([(np.kron(W_s.transpose()[n], G_tx_s[n]))\n for n in xrange(K*M*N)])\n\n return A\n\n\ndef fourierMatrix(N):\n i, j = np.meshgrid(np.arange(N), np.arange(N))\n omega = np.exp(- 2 * np.pi * 1j / N)\n W = np.power(omega, i * j) / np.sqrt(N)\n return W\n\n\ndef samplingMatrix(M, K):\n output = np.zeros((M*K, M), dtype='complex')\n for n in xrange(M*K):\n for m in xrange(M):\n if n == ((m)*K):\n output[n][m] = 1\n return output\n\n\ndef randomQAMSymbols(length, M):\n '''\n length: number of symbols to generate\n M: M-QAM - Order (4,16,64,...)\n '''\n n = np.sqrt(M/4)\n if np.around(n) - n > 1e-10:\n raise Exception('M must be power of 4')\n n = int(n)\n n_M_pos = np.array([1+2*i for i in xrange(n)])\n n_M_neg = np.array([-1-2*i for i in xrange(n)])\n choices = np.concatenate((n_M_pos, n_M_neg))\n return np.array(\n [np.random.choice(choices) + 1j * np.random.choice\n (choices) for i in xrange(length)])\n\n\ndef gfdm_tx(x, filtertype, alpha, M, K, L, N):\n '''\n x: Input-Symbols (length M*K)\n filtertype: ['rrc','rc']\n alpha: rolloff-factor\n M: number of timeslots\n K: number of subcarrier\n N: oversampling-factor\n '''\n A = transmitMatrix(filtertype, alpha, M, K, N)\n A = A*M\n tx =A.dot(x)\n return tx\n\n\ndef gfdm_rx(y, filtertype, alpha, M, K, L, N, QAM, J):\n '''\n y: Transmit-Symbols (length M*K*N)\n filtertype: ['rrc','rc']\n alpha: rolloff-factor\n rx_strat: ['zf','mf']\n M: number of timeslots\n K: numbor of subcarrier\n N: oversampling-factor\n '''\n A = transmitMatrix(filtertype, alpha, M, K, N)\n #if rx_strat == \"zf\":\n # A_rx = np.linalg.pinv(A)\n #else:\n #A_rx = np.linalg.pinv(A)/M\n A_rx = A.conj().transpose()\n rx = np.array([])\n rx = A_rx.dot(y)\n return rx\n\n\ndef gfdm_tx_fft(x, filtertype, alpha, M, K):\n '''\n Realization of GFDM-Transmitter in FFT:\n Required input: x a np.array of length M*K\n FFT is applied in shifted version (zero-frequency term is centered)\n First symbol is on -freq_max and last symbol ist on freq_max\n h: Prototype-filter impulse response\n s_e[n]:[s_0[n] 0{M-1} s_1[n] .... s_N-1[n] 0{M-1}]\n x[n] = h (*) (IFFT(s_e[n]))\n x_gfdm = sum_M(circshift(x[n],nN))\n '''\n if filtertype == \"rrc\":\n time_h, h = cp.rrcosfilter(M*K, alpha, K, 1)\n elif filtertype == \"rc\":\n time_h, h = cp.rcosfilter(M*K, alpha, K, 1)\n # Initialization of output vector\n x_out = np.zeros(M*K, dtype='complex')\n # circulary move filter window to symbol 0\n h = np.roll(h, -(M*K/2))\n # for each gfdm-block\n # for each timeslot\n for m in xrange(M):\n # select the K next symbols\n #symbols = np.fft.ifftshift(x[(m*K):(m+1)*K])\n symbols = np.fft.ifftshift(np.array([x[k*M+m] for k in xrange(K)]))\n # transform K symbols to K carriertones in time-domain\n sym_t = np.fft.ifft(symbols)\n sym_te = np.array([])\n # Repeat result M-times in a vector\n for m2 in xrange(M):\n sym_te = np.concatenate((sym_te, sym_t))\n # multipy with transmit filter -> better convolve?\n sym_te = np.convolve(sym_te, h, mode='same')\n #sym_te = np.multiply(sym_te,h)\n # shift result m*K samples to the right and add it up to the result\n # vector\n x_out = np.add(x_out, np.roll(sym_te, m*K))\n\n return x_out\n\n\ndef gfdm_tx_fft2(x, filtertype, alpha, M, K, L, N):\n '''\n x: Input-Array (length: M*K symbols)\n filtertype: ('rrc'|'rc')\n alpha: (0,1) float\n M: number of slots\n K: number of subcarriers\n L: freq-domain length of filter\n\n Low-complexity transmitter implementation as proposed by G. Fettweis\n '''\n if filtertype == \"rrc\":\n time_h, h = cp.rrcosfilter(M*K, alpha, K, 1)\n elif filtertype == \"rc\":\n time_h, h = cp.rcosfilter(M*K, alpha, K, 1)\n h = np.roll(h, h.shape[-1]/2)\n H = np.fft.fft(h)\n H_sparse = np.concatenate((H[0:(M*L)/2], H[-(M*L)/2:]))\n # Sort Input subcarrierwise\n x = reshape_input(x, M, K)\n x_out = np.zeros((M*K)+(L-1)*M, dtype='complex')\n for k in xrange(K):\n # M rows and L columns with respective FFT output\n # pick symbols per subcarrier\n x_k = x[k*M:((k+1)*M)]\n # perform fft and switch to frequency domain\n x_f = np.fft.fft(x_k)\n # copy values of M-point DFT to obtain MK-point DFT\n x_f_L = np.tile(x_f, L)\n # make data-vector 'sparse'\n #x_f_L = np.concatenate((x_f_K[0:(M*L)/2], x_f_K[-(M*L)/2:]))\n # filter with sparse filter taps in frequency domain\n x_fil = np.multiply(x_f_L, H_sparse)\n # Add data-vector to correct position -max neg frequency : 0 :\n # max_pos_frequency\n x_out[k*M:(k+L)*M] = x_out[k*M:(L+k)*M] + np.fft.fftshift(x_fil)\n # Add 'oversampled' parts of first subcarrier to end and 'oversampled' parts\n # of last subcarrier to start\n x_first = x_out[0:(L-1)*M/2]\n x_last = x_out[-(L-1)*M/2:]\n x_out = x_out[(L-1)*M/2:-(L-1)*M/2]\n x_out[0:(L-1)*M/2] = x_out[0:(L-1)*M/2] + x_last\n x_out[-(L-1)*M/2:] = x_out[-(L-1)*M/2:] + x_first\n x_t = np.fft.ifft(np.fft.ifftshift(x_out))\n x_t = (1.0/K)*x_t\n return x_t\n\n\ndef gfdm_rx_fft2(y, filtertype, alpha, M, K, L, N, QAM,J):\n '''\n y: transmitted gfdm-block (length: M*K samples)\n filtertype: ('rrc'|'rc')\n alpha: (0,1) float\n M: number of slots\n K: number of subcarriers\n L: freq-domain length of filter\n Low-complexity receiver implementation as proposed by G.Fettweis\n (based on sparse frequency Domain Processing)\n\n output: demodulated samples in original order (first K samples in timeslot 1, second K ...)\n '''\n if filtertype == \"rrc\":\n time_h, h = cp.rrcosfilter(M*K, alpha, K, 1)\n h = np.roll(h, h.shape[-1]/2)\n H_rx = np.fft.fft(h)\n H_sparse = np.concatenate((H_rx[0:M*L/2], H_rx[-M*L/2:]))\n y_ifft = np.array([])\n y = (1.0/K)*y\n # Transfer input to frequency domain and center around 0th frequency bin\n y_f = np.fft.fftshift(np.fft.fft(y))\n # Filter and superposition in frequency domain\n Y_fs = gfdm_rx_filsup(y_f, H_sparse, M, K, L)\n # Demodulate per subcarrier\n y_ifft = gfdm_rx_demod(Y_fs, K)\n if J>0:\n y_ifft = gfdm_rx_sic(K,M,J,H_sparse,y_ifft,Y_fs,QAM)\n y_ifft = np.reshape(y_ifft,(K*M))\n # Sort output in timeslot,subcarrier order\n y_ifft = reshape_input(y_ifft, K, M)\n return y_ifft\n\ndef gfdm_rx_demod(Y_fs, K):\n '''\n Y_fs: received samples filtered and superpositioned in frequency domain (not centered) KxM-array\n K: Number of subcarriers\n\n output: demodulated samples in subcarrier order (first M samples are on subcarrier 1, second M....)\n '''\n y_ifft = np.array([])\n for k in xrange(K):\n y_ifft = np.concatenate((y_ifft, np.fft.ifft(Y_fs[k])))\n return y_ifft\n\ndef gfdm_rx_filsup(y_f, H_sparse, M, K, L):\n '''\n y_f: input samples centered in frequency domain 1xK*M-array\n H_sparse: Rx-filter per subcarrier - length (M*L)\n M: number of time slots\n K: number of subcarrier\n L: width of sparse Rx-filter in number of subcarrier\n\n output: (K,M) - array\n '''\n y_out = np.empty((K,M),dtype='complex')\n y_f = np.concatenate((y_f[-(L-1)*M/2:],y_f,y_f[0:(L-1)*M/2]))\n for k in xrange(K):\n # select kth subcarrier\n y_down = y_f[k*M:(k+L)*M]\n # 'uncenter' in Frequency domain\n y_down = np.fft.ifftshift(y_down)\n # apply filter in frequency domain (not centered)\n y_filter = np.multiply(y_down, H_sparse)\n # Superposition L samples in frequency domain\n y_out[k] = np.sum(y_filter.reshape(L, M), axis=0)\n return y_out\n\n\n\ndef gfdm_rx_sic(K,M,J,H_sparse,d_rx,Y_fs,QAM):\n '''\n K: Number of subcarriers\n M: Number of slots\n J: Number of Iterations for Interference Cancellation\n H_sparse: Rx-filter of length M*L in frequency domain\n d_rx: mapped symbols before Interference cancellation (sorted by subcarrier)\n Y_fs: filtered, superpositioned input samples in frequency domain (not centered) KxM-array\n QAM: QAM order\n '''\n # Receive all subcarriers in F-Domain\n # map each symbol to closest QAM - Point\n # d_rx s\n qam_mod = cp.QAMModem(QAM)\n # Calculate rising/falling flank interference coefficients\n H_rf = np.multiply((H_sparse[0:M]/K),(H_sparse[M:]/K))\n # Reshape mapped symbols into per-subcarrier array\n d_p = np.empty((K,M),dtype='complex')\n # d_p (K,M)\n for k in xrange(K):\n d_p[k] = qam_mod.mapping(d_rx[k*M:(k+1)*M],'hard')\n for j in xrange(J):\n y = np.empty((K,M),dtype='complex')\n for k in xrange(K):\n y[k] = Y_fs[k] - H_rf*np.fft.fft(d_p[(k-1) % K] + d_p[(k+1) % K])\n # Recalculate d_rx\n d_rx = gfdm_rx_demod(y,K)\n for k in xrange(K):\n d_p[k] = d_rx[k*M:(k+1)*M]\n d_p[k] = qam_mod.mapping(d_p[k], 'hard')\n return d_rx\n\n\ndef sync_symbol(filtertype, alpha, K, n_mod, N):\n '''\n Generate Schmidls Training Symbols to achieve Receiver Synchronisation\n K: should be an odd number\n Process:\n * First Symbol: Transmit PN-Sequence on all even frequencies while zeros on the odd\n frequencies. Constant signal energy -> Multiply every Symbol with sqrt(2)\n\n * Second Symbol: Transmit PN-Sequence on all odd frequencies and another PN-Sequence\n on the even frequencies\n\n\n '''\n pn_order = 14\n pn_seed = '00101101010010'\n pn_mask = '01001110100111'\n if int(np.floor(K/2.0)) % 2:\n n_even_freq = int(np.floor(K/2.0))\n else:\n n_even_freq = int(np.ceil(K/2.0))\n seq_length = n_even_freq*n_mod\n sym_sequence = np.zeros(K, dtype='complex')\n pn_sequence = cp.pnsequence(pn_order, pn_seed, pn_mask, seq_length)\n qam_mod = cp.modulation.QAMModem(2**n_mod)\n qam_sequence = qam_mod.modulate(pn_sequence)\n for i in xrange(len(sym_sequence)):\n if not i % 2:\n sym_sequence[i] = qam_sequence[i/2]\n ifft_sequence = np.fft.ifftshift(sym_sequence)\n output = gfdm_tx(ifft_sequence, filtertype, alpha, 1, K, N)\n # output = np.fft.ifft(np.sqrt(2)*ifft_sequence)\n\n return output\n\n\ndef sync_symbol2(filtertype, alpha, K, L, n_mod):\n pn_order = 14\n pn_seed = '01001000111011'\n pn_mask = '01001101001110'\n seq_length = K*n_mod\n pn_sequence = cp.pnsequence(pn_order, pn_seed, pn_mask, seq_length)\n qam_mod = cp.modulation.QAMModem(2**n_mod)\n qam_sequence = qam_mod.modulate(pn_sequence)\n output = gfdm_tx_fft2(np.tile(qam_sequence, 2), filtertype, alpha, 2, K, 2, 1)\n return output\n\n\ndef sync_product(x, L):\n '''\n Auto-Korrelation der ersten L Samples mit den naechsten L Samples\n '''\n return np.sum([x[i].conj()*x[i+L] for i in xrange(L)])\n\n\ndef sync_iter(x, L, cp):\n '''\n Schrittweise Iteration ueber alle Samples (zunaechst die ersten 2*L Samples)\n Danach ueber die restlichen len(x)-2*L Samples\n '''\n P_d = np.array([])\n P_d = np.append(P_d, sync_product(x, L))\n for i in xrange(len(x)-2*L):\n P_d = np.append(\n P_d, P_d[i] + (x[L + i].conj() * x[2 * L + i]) -\n (x[i].conj() * x[L + i]))\n P_d_out = P_d\n P_d = np.append(np.zeros(cp, dtype='complex'), P_d)\n # Integrate cp-samples to eliminate cp-plateau\n P_di = np.array([])\n for i in xrange(cp, len(x)-2*L):\n P_di = np.append(\n P_di, (1.0/(cp+1)*np.sum(np.abs(P_d[i-cp:i])**2)))\n return (P_di, P_d_out)\n\n\ndef sync_energy(x, L):\n '''\n Berechnung der Energie der zweiten Haelfte der Sync-Samples -> normieren\n '''\n R_d = np.array([])\n R_d = np.append(R_d, np.sum([np.abs(x[i+L])**2 for i in xrange(L)]))\n for i in xrange(len(x)-2*L):\n R_d = np.append(R_d, R_d[-1]+np.abs(x[2*L+i])**2-np.abs(x[L+i])**2)\n return R_d\n\n\ndef sync_perform(x, L, cp):\n (P_di, P_d) = sync_iter(x, L, cp)\n #R_d = sync_energy(x, L)\n #M_d = (np.abs(P_d)**2)/(R_d**2)\n return (P_di, P_d)\n\n\ndef sync_CFO(P_d, P_di):\n '''\n Gewinn von d (Beginn der Pilotsequenz) und d_f Frequenzoffset genormt auf 1/T.\n Kann bei nur einem Pilotsymbol nur +/- 1/T betragen.\n '''\n d = np.argmax(P_di)\n dphi = np.angle(P_d[d])\n d_f = dphi/(np.pi)\n print(\"P_d:{},df:{})\".format(P_d[d], d_f))\n\n return (d, d_f)\n\n\ndef add_cp(x, n):\n return np.append(x[-n:], x)\n\n\ndef reshape_input(x, M, K):\n '''\n 1. pick every m*Kth symbol and append to output.\n 2. Increase counter one time\n 3. perform step 1.+2. M times\n '''\n x_out = np.array([])\n for k in xrange(K):\n for m in xrange(M):\n x_out = np.append(x_out, x[(m*K)+k])\n return x_out\n","sub_path":"python/pygfdm/modulation.py","file_name":"modulation.py","file_ext":"py","file_size_in_byte":14331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"517644571","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 23 11:28:39 2020\n\n@author: Claudio Collado\n\n\"\"\"\n#Ejercicio 4.30\n\nimport csv\nimport matplotlib.pyplot as plt\n\ndef leer_arboles(nombre_archivo):\n arboleda = []\n f = open(nombre_archivo,'r',encoding = 'utf8')\n filas = csv.reader(f)\n encabezado = next(filas)\n for fila in filas:\n diccionario_arbol = dict(zip(encabezado,fila))\n arboleda.append(diccionario_arbol)\n return arboleda\n \narboleda = leer_arboles('arbolado-en-espacios-verdes.csv')\n\naltos = [float(arbol['altura_tot']) for arbol in arboleda if arbol['nombre_com'] == 'Jacarandá']\n\nplt.hist(altos,bins='auto')\n\n\n\n","sub_path":"4. Aleatoridad/4.5 Gráficos del Arbolado porteño/ejercicio_4.30.py","file_name":"ejercicio_4.30.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"276392384","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 22 09:55:49 2019\r\n\r\n@author: samde\r\n\"\"\"\r\n\r\n\"\"\"\r\nquand un des deux joueur a 2 points de différence avec l'autre il gagne le jeu\r\n\r\npour gagner un set il faut 2 jeu d'avance\r\n\r\n6 jeu = 1 set\r\n\r\npour gagner il faut 2 jeu d'avance sur l'autre\r\n\r\n6 jeu max par joueurs\r\n\r\npour gagner la partie il faut 2 set d'avance\r\n\r\n/!\\ : si le joueur arrive a 6 jeu et qui y a pas 2 jeu de diff : JEU DECISIF\r\n-> on compte les points de 1 a 7, il faut deux points de diff pour gagner un set\r\n\r\nBrouillon :\r\n \r\ndef jeu_v1():\r\n point_j1 = 0\r\n point_j2 = 0\r\n \r\n joueur_1_gagnant = None\r\n jeu_finis = False\r\n while jeu_finis != True :\r\n gagnant = int (input(\"quel joueur a gagner le point ? 1 ou 2\\n\"))\r\n if gagnant == 1 :\r\n point_j1 += 1 \r\n elif gagnant == 2:\r\n point_j2 += 1\r\n \r\n jeu_finis, joueur_1_gagnant = j1_as_win_jeu(point_j1,point_j2)\r\n if joueur_1_gagnant == True :\r\n print(\"joueur 1 a gagner\")\r\n else:\r\n print(\"joueur 2 a gagner\")\r\n\r\n#\"\"\"\r\n\r\ndef j1_as_win_jeu(score_j1,score_j2):\r\n diff_score = score_j1-score_j2\r\n j_as_win = False\r\n j1_win = None\r\n\r\n if diff_score >=2 :\r\n j1_win = True\r\n j_as_win = True\r\n elif diff_score <= -2:\r\n j1_win = False\r\n j_as_win = True\r\n return j_as_win,j1_win\r\n\r\ndef jeu_v2(liste_de_point):\r\n point_j1 = 0\r\n point_j2 = 0\r\n \r\n joueur_1_gagnant = None\r\n jeu_finis = False\r\n for g in list(liste_de_point):\r\n gagnant = int(g)\r\n if gagnant == 1 :\r\n point_j1 += 1 \r\n elif gagnant == 2:\r\n point_j2 += 1\r\n \r\n jeu_finis, joueur_1_gagnant = j1_as_win_jeu(point_j1,point_j2)\r\n \r\n if jeu_finis == True:\r\n if joueur_1_gagnant == True :\r\n print(\"joueur 1 a gagner le jeu\")\r\n else:\r\n print(\"joueur 2 a gagner le jeu\")\r\n return joueur_1_gagnant\r\n\r\ndef set_tennis():\r\n set_gagnant_j1 = 0\r\n set_gagnant_j2 = 0\r\n partie_finie = False\r\n set_decisif = False\r\n #tant que le score des deux joueurs est différent et strictement plus petit que 6 ou le score des deux joueurs est 6 et egal (set decisif)\r\n while partie_finie == False:\r\n jeu = input(\"entrez le score:\\n\")\r\n j1_win = jeu_v2(jeu)\r\n if j1_win == True :\r\n set_gagnant_j1 += 1\r\n else :\r\n set_gagnant_j2 += 1\r\n \r\n if abs(set_gagnant_j1-set_gagnant_j2)>=2:\r\n partie_finie = True\r\n elif set_gagnant_j1==6 and set_gagnant_j2==6 :\r\n partie_finie = True\r\n set_decisif = True\r\n \r\n j1_win = None\r\n if partie_finie == True and set_decisif == False :\r\n if set_gagnant_j1-set_gagnant_j2>0:\r\n print(\"joueur 1 a gagner le set\")\r\n j1_win = True\r\n else:\r\n print(\"joueur 2 a gagner le set\")\r\n j1_win = False\r\n else:\r\n j1_win = jeu_decisif()\r\n return j1_win\r\n\r\ndef jeu_decisif():\r\n j1_win = jeu_v2(input(\"ecrir les score du j1 et j2 : ex 1122212121211\\n\")) \r\n if j1_win == True :\r\n print(\"joueur 1 a gagner\")\r\n else:\r\n print(\"joueur 2 a gagner\")\r\n return j1_win\r\n\r\ndef match(nb_set_gagnant):\r\n set_j1 = 0\r\n set_j2 = 0\r\n while set_j1 < nb_set_gagnant and set_j2 < nb_set_gagnant :\r\n j1_win = set_tennis()\r\n if j1_win == True :\r\n set_j1 += 1\r\n else:\r\n set_j2 += 1\r\n if set_j1 == nb_set_gagnant :\r\n print(\"joueur 1 remporte le match\")\r\n else:\r\n print(\"joueur 2 remporte le match\")","sub_path":"Python Project/TD/Archive/Samuel/Tenis.py","file_name":"Tenis.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"356765811","text":"from django.http import HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.decorators import user_passes_test\nfrom .models import Leaders\nfrom user.models import Player\nfrom quiz.forms import UserAnswer\nfrom django.core.mail import send_mail\nfrom django.contrib import messages\n\nfrom django.contrib.admin.views.decorators import staff_member_required\n\n\n# Create your views here.\n@staff_member_required\ndef email_users(request) :\n player = Player.objects.all()\n return render(request, 'home/emails.html', {'players':player})\n\n\ndef not_logged_in(user):\n return not user.is_authenticated\n\n\ndef base(request):\n return render(request, 'home/base.html')\n\n\ndef home(request):\n return render(request, 'home/home.html')\n\n\ndef hello(request):\n return render(request, 'home/hello.html')\n\n\ndef rules(request):\n return render(request, 'home/rule.html')\n\n\n@staff_member_required\ndef page(request):\n \"Only After 1st Round is complete\"\n\n p = get_object_or_404(Leaders, pk=1)\n n = p.playerNum\n lst = [0, 1, 2]\n form = UserAnswer\n\n if request.method == 'GET':\n # print(n)\n j = 1\n leaders = Player.objects.order_by(\n '-score', 'last_submit')[:n]\n\n email_list = []\n\n for i in leaders:\n i.rank = j\n j += 1\n i.save()\n\n email_list.append(i.email)\n\n print(email_list)\n return render(request, 'home/page.html', {\"n\": n, \"leaders\": leaders, \"form\": form, \"lst\": lst[0]})\n\n if request.method == \"POST\": # if the admin submits the passcode\n my_form = UserAnswer(request.POST)\n\n if my_form.is_valid():\n ans = my_form.cleaned_data.get(\"answer\")\n organs = \"AlohaMoraHarryPotter\"\n\n \n\n # correct answer\n if (str(organs) == str(ans)): # if the answer is correct\n leaders = Player.objects.order_by(\n '-score', 'last_submit')[:n]\n for x in leaders:\n x.level2 = 0 \n \n x.save()\n print(x.name)\n\n with open('text_messages/login_user.txt', 'r') as file:\n data_email = file.read()\n\n send_mail(\n 'Congrats, you cleared round 1 !',\n str(data_email).format(x.name),\n 'ieeesbnitd@gmail.com',\n [x.email],\n fail_silently=True,\n )\n\n\n\n return render(request, 'home/page.html', {\"n\": n, \"leaders\": leaders, \"form\": form, \"lst\": lst[1]})\n\n # incorrect answer\n else: # returns the same page\n leaders = Player.objects.order_by(\n '-score', 'last_submit')[:n]\n return render(request, 'home/page.html', {\"n\": n, \"leaders\": leaders, \"form\": form, \"lst\": lst[2]})\n else:\n return HttpResponse('
Your Form Data was Invalid
')\n\n\ndef error_404(request, exception):\n data = {}\n return render(request,'home/404.html', data)\n\n\n\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"537013231","text":"import sys\nimport numpy as np\n\nsys.path.append(\"..\")\nfrom Game import Game\nfrom .Connect4Logic import *\n\n\nclass Connect4Game(Game):\n \"\"\"\n Connect4 Game class implementing the alpha-zero-general Game interface.\n \"\"\"\n\n def __init__(self, height=None, width=None, win_length=None, np_pieces=None):\n Game.__init__(self)\n board = Board(height, width, win_length, np_pieces)\n self.height = board.height\n self.width = board.width\n self.win_length = board.win_length\n self.np_pieces = np_pieces\n\n def getInitBoard(self):\n return Board(self.height, self.width, self.win_length, self.np_pieces)\n\n def getBoardSize(self):\n return (self.height, self.width)\n\n def getActionSize(self):\n return self.width\n\n def getNextState(self, board, player, action):\n \"\"\"Returns the board with updated move, original board is modified.\"\"\"\n board.add_stone(action, player)\n return board, -player\n\n def getValidMoves(self, board, player):\n \"Any zero value in top row in a valid move\"\n try:\n return board.get_valid_moves(player)\n except AttributeError:\n board = Board(self.height, self.width, self.win_length, board)\n return self.getValidMoves(board, player)\n\n def getGameEnded(self, board, player):\n try:\n winstate = board.get_win_state()\n if winstate.is_ended:\n if winstate.winner is None:\n # draw has very little value.\n return 1e-4\n elif winstate.winner == player:\n return +1\n elif winstate.winner == -player:\n return -1\n else:\n raise ValueError(\"Unexpected winstate found: \", winstate)\n else:\n # 0 used to represent unfinished game.\n return 0\n # TODO: Really, really need a better workaround\n except AttributeError:\n board = Board(self.height, self.width, self.win_length, board)\n return self.getGameEnded(board, player)\n\n def getCanonicalForm(self, board, player):\n # Flip player from 1 to -1\n return np.copy(board.np_pieces)\n\n def getSymmetries(self, board, pi):\n \"\"\"Board is left/right board symmetric\"\"\"\n return [(board, pi), (board[:, ::-1], pi[::-1])]\n\n def stringRepresentation(self, board):\n return board.tostring()\n\n @staticmethod\n def display(board):\n print(\" -----------------------\")\n print(\" \".join(map(str, range(len(board[0])))))\n print(board)\n print(\" -----------------------\")\n\n def getModelBoard(self, canonicalBoard, player=1):\n # TODO: Rename\n return Board(self.height, self.width, self.win_length, canonicalBoard)\n\n def getNextPlayer(self, board):\n pieces_1 = 0\n pieces_2 = 0\n for y in range(self.width):\n for x in range(self.height):\n if board[x][y] == 1:\n pieces_1 += 1\n if board[x][y] == -1:\n pieces_2 += 1\n if pieces_1 > pieces_2:\n return -1\n else:\n return 1\n\n\nclass InvisibleConnect4Game(Connect4Game):\n \n def __init__(self, height=None, width=None, win_length=None, np_pieces=None):\n super().__init__(height, width, win_length, np_pieces)\n board = InvisibleBoard(height, width, win_length, np_pieces)\n\n def getInitBoard(self):\n return InvisibleBoard(self.height, self.width, self.win_length, self.np_pieces)\n\n def getNextState(self, board, player, action):\n if board.add_stone(action, player):\n # Weird solution that'll probably work\n next_player = self.getNextPlayer(board)\n return (board, next_player)\n else:\n return (board, player)\n\n def getCanonicalForm(self, board, player):\n # Flip player from 1 to -1\n return np.copy(board.visible_pieces[player])\n\n def getModelBoard(self, canonicalBoard, player):\n # TODO: Rename\n return InvisibleBoard(self.height, self.width, self.win_length, canonicalBoard, player)\n\n @staticmethod\n def display(board):\n print(\" -----------------------\")\n print(\" \".join(map(str, range(len(board[0])))))\n print(board)\n print(board.visible_pieces[1])\n print(board.visible_pieces[-1])\n print(\" -----------------------\")\n","sub_path":"connect4/Connect4Game.py","file_name":"Connect4Game.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"515639851","text":"\r\n# datamodel class를 생성한다.\r\nclass DataModel:\r\n def __init__(self):\r\n print(\"데이터 모델 입니다.\")\r\n self.myLoginInfo = None\r\n self.itemList = []\r\n\r\n class Logininfo:\r\n def __init__(self, accCnt, accList, userId, userName, keyBSEC, firew, serverGubun):\r\n self.accCnt = accCnt\r\n self.accList = accList\r\n self.userId = userId\r\n self.userName = userName\r\n self.keyBSEC = keyBSEC\r\n self.firew = firew\r\n self.serverGubun = serverGubun\r\n\r\n def getServerGubun(self):\r\n if self.serverGubun == '1':\r\n return \"모의투자\"\r\n else:\r\n return \"실서버\"\r\n\r\n # 종목관리를 위한 클래스\r\n class ItemInfo:\r\n def __init__(self, itemCode, itemName):\r\n self.itemCode = itemCode\r\n self.itemName = itemName","sub_path":"dataModel.py","file_name":"dataModel.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"474031080","text":"from plug_manager_template import create_plug, ExtendedGenericManagerDescriptor\nfrom rboxfileplug import FileManager\nfrom models import RboxFileConnector, RboxFile\nfrom south.modelsinspector import add_introspection_rules\n\nclass SingleFileManagerDescriptor(ExtendedGenericManagerDescriptor):\n \"\"\"\n This class provides the functionality that makes the related-object\n managers available as attributes on a model class, for fields that have\n multiple \"remote\" values and have a GenericRelation defined in their model\n (rather than having another model pointed *at* them). In the example\n \"article.publications\", the publications attribute is a\n ReverseGenericRelatedObjectsDescriptor instance.\n \"\"\"\n \n def __init__(self, field, field_identifier, max_count=1, manager_kwargs=None):\n if max_count > 1:\n raise TypeError(\"max_count in a singlefileplug should be always 1\")\n self.field = field\n self.field_identifier = field_identifier\n self.max_count = max_count\n self.manager_kwargs = manager_kwargs\n \n\n def __get__(self, instance, instance_type=None, return_manager=False):\n manager = super(SingleFileManagerDescriptor, self).__get__(instance, instance_type=None)\n if return_manager:\n return manager\n try:\n return manager.all()[0]\n except IndexError:\n return None\n \n\n def __set__(self, instance, value):\n if instance is None:\n raise AttributeError(\"Manager must be accessed via instance\")\n \n if not isinstance(value, RboxFile):\n raise TypeError(\"Only accepts a RboxFile object\")\n \n manager = self.__get__(instance, return_manager=True) \n manager.remove(manager.all()[0])\n manager.add(value)\n\n\nRboxSingleFilePlug = create_plug(manager=FileManager, descriptor_cls=SingleFileManagerDescriptor, to=RboxFileConnector)\n\nrboxsinglefileplug_introspection_rules = [((RboxSingleFilePlug,),[],{\"field_identifier\": [\"field_identifier\",{}],},)]\nadd_introspection_rules(rboxsinglefileplug_introspection_rules, [\"filemanager.models.RboxSingleFilePlug\"])\n","sub_path":"rboxsinglefileplug.py","file_name":"rboxsinglefileplug.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"453915373","text":"class Solution(object):\n def removeDuplicates(self, nums):\n i=0\n j=i+1\n while j!=len(nums) and nums:\n if nums[i]==nums[j]:\n j+=1\n else:\n nums[i+1]=nums[j]\n i+=1\n j+=1\n return i+1\n'''\nRuntime: 60 ms, faster than 95.81% of Python online submissions for Remove Duplicates from Sorted Array.\nMemory Usage: 13.5 MB, less than 87.50% of Python online submissions for Remove Duplicates from Sorted Array.\n'''\n\nif __name__ == '__main__':\n nums = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n sol=Solution()\n ans=sol.removeDuplicates(nums)\n print('non-duplicated:',ans)\n for i in range(ans):\n print(nums[i])","sub_path":"26. Remove Duplicates from Sorted Array/doublePointer1.py","file_name":"doublePointer1.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"392476852","text":"from packages.datapoint import Datapoint\nimport csv\nimport os\nimport ntpath\nimport numpy as np\nimport random as rnd\nimport matplotlib.pyplot as plt\n\n\nclass SimulatorDatasetImporter(object):\n def __init__(self):\n self._dataset = []\n\n @property\n def dataset(self):\n return self._dataset\n\n def clear_dataset(self):\n self._dataset = []\n\n def append_dataset(self, csv_file_path, exclude_angles: list=None):\n if os.path.isfile(csv_file_path) is False:\n raise FileNotFoundError('Could not read csv in DatasetImporter.')\n\n csv_directory = ntpath.dirname(csv_file_path)\n csv_directory = os.path.join(csv_directory, 'IMG')\n with open(csv_file_path) as f:\n reader = csv.reader(f)\n for row in reader:\n\n angle = float(row[3])\n if exclude_angles is not None and angle in exclude_angles:\n continue\n\n datapoint = Datapoint(os.path.join(csv_directory, ntpath.basename(row[0])),\n os.path.join(csv_directory, ntpath.basename(row[1])),\n os.path.join(csv_directory, ntpath.basename(row[2])),\n float(row[3]),\n float(row[4]),\n float(row[5]),\n float(row[6]))\n self._dataset.append(datapoint)\n\n def harmonize_angles(self,\n epsilon=1e-1,\n exclude_angles: list=None,\n exclude_less_than=None,\n random_sample_max_to=None,\n center=False,\n show_histogram=False):\n\n # collect all angles in a dictionary\n angles_dict = {}\n for element in self._dataset:\n rounded = float(int(element.steering_angle / epsilon) * epsilon)\n if rounded not in angles_dict:\n angles_dict[rounded] = []\n # building up the histogram\n angles_dict[rounded].append(element)\n\n if show_histogram:\n self.visualize_dataset_frequencies(angles_dict, 'Non-normalized steering angles')\n\n # Random sample the maximum to x\n if random_sample_max_to is not None:\n max_entries_key = sorted([(k, len(angles_dict[k])) for k in angles_dict],\n key=lambda x: x[1],\n reverse=True)[0][0]\n rnd.shuffle(angles_dict[max_entries_key])\n angles_dict[max_entries_key] = angles_dict[max_entries_key][:random_sample_max_to]\n\n # Exclude some angles\n if exclude_angles is not None:\n for angle_to_ex in exclude_angles:\n angles_dict.pop(angle_to_ex)\n\n # Exclude rare occurrences\n to_pop = []\n if exclude_less_than is not None:\n for angle_k in angles_dict:\n if len(angles_dict[angle_k]) < exclude_less_than:\n to_pop.append(angle_k)\n for tp in to_pop:\n angles_dict.pop(tp)\n\n # Center the steering angles\n if center is True:\n max_angle = float(np.max([float(k) for k in angles_dict.keys()]))\n min_angle = float(np.min([float(k) for k in angles_dict.keys()]))\n\n if abs(min_angle) > max_angle:\n min_angle = -max_angle\n if max_angle > abs(min_angle):\n max_angle = abs(min_angle)\n\n to_pop = []\n for angle_k in angles_dict:\n if angle_k > max_angle or angle_k < min_angle:\n to_pop.append(angle_k)\n\n for tp in to_pop:\n angles_dict.pop(tp)\n\n # Calc the maximum count of a rounded steering angle\n angle_max_count = np.max(np.array([len(angles_dict[k]) for k in angles_dict]))\n\n # Now, fill up a new dictionary with indices\n angles_dict_harmonize = angles_dict.copy()\n\n for k in angles_dict_harmonize:\n needed_for_fill = angle_max_count - len(angles_dict[k])\n for i in range(needed_for_fill):\n angles_dict_harmonize[k].append(rnd.choice(angles_dict[k]))\n\n # Overwrite dataset with harmonized version of itself\n self._dataset = []\n for k in angles_dict_harmonize:\n for element in angles_dict_harmonize[k]:\n self._dataset.append(element)\n\n if show_histogram:\n self.visualize_dataset_frequencies(angles_dict_harmonize, 'Normalized steering angles')\n # Done\n return\n\n def visualize_dataset_frequencies(self, y, title: str):\n # count the frequencies of classes in dataset and visualize\n hist = {}\n\n for label_id in sorted(y.keys()):\n hist[label_id] = len(y[label_id])\n\n # visualize as histogram\n fig = plt.figure(figsize=(16, 12))\n sub = fig.add_subplot(1, 1, 1)\n sub.set_title(title)\n y_data = np.array([float(hist[k]) for k in hist])\n plt.bar(range(len(hist)), y_data, align='center')\n x_axis = np.array([k for k in hist])\n plt.xticks(range(len(hist)), x_axis, rotation='vertical', fontsize=8)\n plt.subplots_adjust(bottom=0.4)\n plt.show()\n","sub_path":"packages/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"388285014","text":"from __future__ import print_function\n\nimport requests\nfrom frinx_rest import odl_url_base, odl_headers, odl_credentials, parse_response\n\nfrom string import Template\n\nodl_url_components = odl_url_base + \"/operational/network-topology:network-topology/topology/unified/node/$id/yang-ext:mount/frinx-openconfig-platform:components\"\n\n\ndef read_components(task):\n device_id = task['inputData']['id']\n\n id_url = Template(odl_url_components).substitute({\"id\": device_id})\n\n r = requests.get(id_url, headers=odl_headers, auth=odl_credentials)\n response_code, response_json = parse_response(r)\n\n if response_code == requests.codes.ok:\n return {'status': 'COMPLETED', 'output': {'url': id_url,\n 'response_code': response_code,\n 'response_body': response_json},\n 'logs': []}\n else:\n return {'status': 'FAILED', 'output': {'url': id_url,\n 'response_code': response_code,\n 'response_body': response_json},\n 'logs': []}\n\n\ndef start(cc):\n print('Starting Platform workers')\n\n cc.start('OC-PLATFORM_read_components', read_components, False)\n","sub_path":"microservices/netinfra_utils/workers/platform_worker.py","file_name":"platform_worker.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"134301293","text":"import os\nimport random\n\nfrom flask import Flask ,render_template\n\nfrom . import db\n\ndef create_app(test_config=None):\n app = Flask(\"todolist\")\n app.config.from_mapping(\n DATABASE=os.path.join(app.instance_path,'todo.db')\n )\n if test_config is not None:\n app.config.update(test_config)\n \n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n from . import todo\n app.register_blueprint(todo.bp)\n \n from . import db\n db.init_app(app)\n \n return app","sub_path":"project/todolist/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"425660330","text":"\"\"\" Page.py module implements a page \"\"\"\nfrom .html_parser import parse_html\nfrom .log import warn\n\nfrom .compat import urljoin, parse_qsl\n\nfrom .form import Form\nfrom re import findall\n\n\nclass Page(object):\n def __init__(self, url, html, headers, status_code, blacklist=[]):\n self.html = html\n self.headers = headers\n self.url = url\n self.status_code = status_code\n self.document = parse_html(html, url)\n\n self.blacklist = blacklist\n\n @property\n def get_url_parameters(self):\n _, _, url = self.url.partition(\"?\")\n return parse_qsl(url)\n\n def get_forms(self):\n \"\"\" Generator for all forms on the page. \"\"\"\n for form in self.document.findall('.//form[@action]'):\n generated = Form(self.url, form)\n\n if any([findall(x, generated.action) for x in self.blacklist]):\n continue\n\n yield generated\n\n def get_links(self):\n \"\"\" Generator for all links on the page. \"\"\"\n for link in self.document.findall('.//a[@href]'):\n href = link.attrib.get('href')\n yield urljoin(self.url, href)\n","sub_path":"webvulnscan/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"544540923","text":"#coding=utf-8\n\n\nabc=list(map(int,input(\"input a b and c:\").split(\" \")))\n\n\nif abc[0]+abc[1]>abc[2] and abc[1]+abc[2]>abc[0] and abc[2]+abc[0]>abc[1]:\n\tp=0.5*(abc[0]+abc[1]+abc[2])\n\tprint('能构成三角形 面积是 %f' % (p*(p-abc[0])*(p-abc[1])*(p-abc[2]) )**0.5 )\nelse:\n\t print(\"不能构成三角形\")\n\n","sub_path":"Python-learning/2019course/python作业1/2_triangle.py","file_name":"2_triangle.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"166039811","text":"# 55분\n# 날짜를 넣으면 시작시간과 종료 시간을 반환하는 함수\n# 날짜는 고정되어 있으므로 시간, 분, 초만 생각하면 된다.\ndef time_log(line):\n # 2016년 9월 15일은 고정, 종료 시간, 프로세스 시간\n yymmdd, hhmmss, process = line.split()\n hh,mm,ss = hhmmss.split(':')\n hh,mm,ss = int(hh), int(mm), float(ss)\n # 소수점 연산의 경우 이진분수 표현 문제로 오류가 발생하므로 1000을 곱해 소수점 처리를 해준다.\n process= int(float(process[:-1])*1000)\n end_time = int((hh*3600 + mm*60 + ss)*1000)\n start_time = end_time - process + 1\n return start_time, end_time\n \ndef solution(lines):\n for idx in range(len(lines)):\n lines[idx] = time_log(lines[idx])\n # lines를 시작 시점을 기준으로 정렬\n lines.sort()\n max_cnt = 0\n for i in range(len(lines)):\n cnt = 0\n # 새로 시작하는 요소 앞의 1초를 탐색한다.\n target_time = lines[i][0]-1000\n for j in range(i+1):\n if lines[j][1] > target_time:\n cnt += 1\n if cnt > max_cnt:\n max_cnt = cnt\n return max_cnt\n\nprint(solution(\t[\"2016-09-15 01:00:04.002 2.0s\", \"2016-09-15 01:00:07.000 2s\"]))\n\n","sub_path":"Programmers/카카오 모의고사/2018 KAKAO BLIND RECRUITMENT/추석트래픽.py","file_name":"추석트래픽.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"644189640","text":"from .. import Parser, parser\n\nerrors = [\"virt-what: virt-what-cpuid-helper program not found in $PATH\"]\n\n\n@parser('virt-what')\nclass VirtWhat(Parser):\n\n @property\n def is_virtual(self):\n return self.generic != \"Baremetal\" and self.generic != 'Failed'\n\n @property\n def has_specific(self):\n return self.specific is not None and self.generic != self.specific\n\n def parse_content(self, content):\n if content and content[0] in errors:\n self.generic = 'Failed'\n self.specific = content[0]\n elif content:\n self.generic = content[0]\n self.specific = content[-1]\n else:\n self.generic = \"Baremetal\"\n self.specific = None\n","sub_path":"insights/parsers/virt_what.py","file_name":"virt_what.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"48408235","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef countApplesAndOranges(s, t, a, b, apples, oranges):\n appleCount = 0\n orangeCount = 0\n fellPointApple = list(map(lambda apple: apple + a, apples))\n fellPointOrange = list(map(lambda orange: orange + b, oranges))\n\n for point in fellPointApple:\n if(s <= point <= t):\n appleCount += 1\n\n for point in fellPointOrange:\n if(s <= point <= t):\n orangeCount += 1\n\n print(appleCount)\n print(orangeCount)\n\n\nif __name__ == '__main__':\n st = input().split()\n\n s = int(st[0])\n\n t = int(st[1])\n\n ab = input().split()\n\n a = int(ab[0])\n\n b = int(ab[1])\n\n mn = input().split()\n\n m = int(mn[0])\n\n n = int(mn[1])\n\n apples = list(map(int, input().rstrip().split()))\n\n oranges = list(map(int, input().rstrip().split()))\n\n countApplesAndOranges(s, t, a, b, apples, oranges)\n","sub_path":"hackerrank/algorithms/implemantation/apple-and-orange/apple_and_orange.py","file_name":"apple_and_orange.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"352933496","text":"import copy\n\nimport lenstronomy.Util.util as util\nimport lenstronomy.Util.mask as util_maskl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.ndimage as ndimage\nfrom lenstronomy.LensModel.Profiles.external_shear import ExternalShear\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom lenstronomy.LensModel.lens_model_extensions import LensModelExtensions\nimport lenstronomy.Util.class_creator as class_creator\nfrom lenstronomy.Analysis.lens_analysis import LensAnalysis\nfrom lenstronomy.Data.coord_transforms import Coordinates\nfrom lenstronomy.Data.imaging_data import Data\n\n\ndef text_description(ax, d, text, color='w', backgroundcolor='k', flipped=False):\n if flipped:\n ax.text(d - d / 40., d - d / 15., text, color=color, fontsize=15, backgroundcolor=backgroundcolor)\n else:\n ax.text(d / 40., d - d / 15., text, color=color, fontsize=15, backgroundcolor=backgroundcolor)\n\n\ndef scale_bar(ax, d, dist=1., text='1\"', color='w', flipped=False):\n if flipped:\n p0 = d - d / 15. - dist\n p1 = d / 15.\n ax.plot([p0, p0 + dist], [p1, p1], linewidth=2, color=color)\n ax.text(p0 + dist / 2., p1 + 0.01 * d, text, fontsize=15, color=color, ha='center')\n else:\n p0 = d / 15.\n ax.plot([p0, p0 + dist], [p0, p0], linewidth=2, color=color)\n ax.text(p0 + dist / 2., p0 + 0.01 * d, text, fontsize=15, color=color, ha='center')\n\n\ndef coordinate_arrows(ax, d, coords, color='w', arrow_size=0.05):\n d0 = d / 8.\n p0 = d / 15.\n pt = d / 9.\n deltaPix = coords.pixel_size\n ra0, dec0 = coords.map_pix2coord((d - d0) / deltaPix, d0 / deltaPix)\n xx_, yy_ = coords.map_coord2pix(ra0, dec0)\n xx_ra, yy_ra = coords.map_coord2pix(ra0 - p0, dec0)\n xx_dec, yy_dec = coords.map_coord2pix(ra0, dec0 + p0)\n xx_ra_t, yy_ra_t = coords.map_coord2pix(ra0 - pt, dec0)\n xx_dec_t, yy_dec_t = coords.map_coord2pix(ra0, dec0 + pt)\n\n ax.arrow(xx_ * deltaPix, yy_ * deltaPix, (xx_ra - xx_) * deltaPix, (yy_ra - yy_) * deltaPix,\n head_width=arrow_size * d, head_length=arrow_size * d, fc=color, ec=color, linewidth=1)\n ax.text(xx_ra_t * deltaPix, yy_ra_t * deltaPix, \"E\", color=color, fontsize=15, ha='center')\n ax.arrow(xx_ * deltaPix, yy_ * deltaPix, (xx_dec - xx_) * deltaPix, (yy_dec - yy_) * deltaPix,\n head_width=arrow_size * d, head_length=arrow_size * d, fc\n =color, ec=color, linewidth=1)\n ax.text(xx_dec_t * deltaPix, yy_dec_t * deltaPix, \"N\", color=color, fontsize=15, ha='center')\n\n\ndef plot_line_set(ax, coords, ra_caustic_list, dec_caustic_list, color='g'):\n \"\"\"\n\n :param coords:\n :return:\n \"\"\"\n deltaPix = coords.pixel_size\n for i in range(len(ra_caustic_list)):\n x_c, y_c = coords.map_coord2pix(ra_caustic_list[i], dec_caustic_list[i])\n ax.plot((x_c + 0.5) * (deltaPix), (y_c + 0.5) * (deltaPix), color=color)\n return ax\n\n\ndef image_position_plot(ax, coords, ra_image, dec_image, color='w'):\n \"\"\"\n\n :param ax:\n :param coords:\n :param kwargs_else:\n :return:\n \"\"\"\n deltaPix = coords.pixel_size\n if len(ra_image) > 0:\n x_image, y_image = coords.map_coord2pix(ra_image, dec_image)\n abc_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G']\n for i in range(len(x_image)):\n x_ = (x_image[i] + 0.5) * deltaPix\n y_ = (y_image[i] + 0.5) * deltaPix\n ax.plot(x_, y_, 'or')\n ax.text(x_, y_, abc_list[i], fontsize=20, color=color)\n return ax\n\n\ndef source_position_plot(ax, coords, kwargs_source):\n \"\"\"\n\n :param ax:\n :param coords:\n :param kwargs_source:\n :return:\n \"\"\"\n deltaPix = coords.pixel_size\n x_source, y_source = coords.map_coord2pix(kwargs_source[0]['center_x'], kwargs_source[0]['center_y'])\n ax.plot((x_source + 0.5) * deltaPix, (y_source + 0.5) * deltaPix, '*', markersize=10)\n return ax\n\n\ndef lens_model_plot(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, sourcePos_x=0, sourcePos_y=0, point_source=False):\n \"\"\"\n plots a lens model (convergence) and the critical curves and caustics\n\n :param ax:\n :param kwargs_lens:\n :param numPix:\n :param deltaPix:\n :return:\n \"\"\"\n from lenstronomy.SimulationAPI.simulations import Simulation\n simAPI = Simulation()\n data = simAPI.data_configure(numPix, deltaPix)\n _frame_size = numPix * deltaPix\n _coords = data._coords\n x_grid, y_grid = data.coordinates\n lensModelExt = class_creator.creat_lens_model_extension(lensModel)\n ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list = lensModelExt.critical_curve_caustics(\n kwargs_lens, compute_window=_frame_size, grid_scale=0.005)\n kappa_result = util.array2image(lensModel.kappa(x_grid, y_grid, kwargs_lens))\n im = ax.matshow(np.log10(kappa_result), origin='lower',\n extent=[0, _frame_size, 0, _frame_size], cmap='Greys', vmin=-1, vmax=1) #, cmap=self._cmap, vmin=v_min, vmax=v_max)\n\n plot_line_set(ax, _coords, ra_caustic_list, dec_caustic_list, color='g')\n plot_line_set(ax, _coords, ra_crit_list, dec_crit_list, color='r')\n if point_source:\n from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver\n solver = LensEquationSolver(lensModel)\n theta_x, theta_y = solver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens)\n mag_images = lensModel.magnification(theta_x, theta_y, kwargs_lens)\n x_image, y_image = _coords.map_coord2pix(theta_x, theta_y)\n abc_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']\n for i in range(len(x_image)):\n x_ = (x_image[i] + 0.5) * deltaPix\n y_ = (y_image[i] + 0.5) * deltaPix\n ax.plot(x_, y_, 'dk', markersize=4*(1 + np.log(np.abs(mag_images[i]))), alpha=0.5)\n #ax.text(x_, y_, abc_list[i], fontsize=20, color='k')\n x_source, y_source = _coords.map_coord2pix(sourcePos_x, sourcePos_y)\n ax.plot((x_source + 0.5) * deltaPix, (y_source + 0.5) * deltaPix, '*k', markersize=10)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n #image_position_plot(ax, _coords, self._kwargs_else)\n #source_position_plot(ax, self._coords, self._kwargs_source)\n return ax\n\n\nclass LensModelPlot(object):\n \"\"\"\n class that manages the summary plots of a lens model\n \"\"\"\n def __init__(self, kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, arrow_size=0.1, cmap_string=\"gist_heat\", high_res=5):\n \"\"\"\n\n :param kwargs_options:\n :param kwargs_data:\n :param arrow_size:\n :param cmap_string:\n \"\"\"\n self._kwargs_data = kwargs_data\n if isinstance(cmap_string, str) or isinstance(cmap_string, unicode):\n cmap = plt.get_cmap(cmap_string)\n else:\n cmap = cmap_string\n cmap.set_bad(color='k', alpha=1.)\n cmap.set_under('k')\n self._cmap = cmap\n self._arrow_size = arrow_size\n data = Data(kwargs_data)\n self._coords = data._coords\n nx, ny = np.shape(kwargs_data['image_data'])\n Mpix2coord = kwargs_data['transform_pix2angle']\n self._Mpix2coord = Mpix2coord\n\n self._deltaPix = self._coords.pixel_size\n self._frame_size = self._deltaPix * nx\n\n self._x_grid, self._y_grid = data.coordinates\n\n self._imageModel = class_creator.creat_image_model(kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model)\n self._analysis = LensAnalysis(kwargs_model)\n self._lensModel = LensModelExtensions(lens_model_list=kwargs_model.get('lens_model_list', ['NONE']),\n z_source=kwargs_model.get('z_source', None),\n redshift_list=kwargs_model.get('redshift_list', None),\n multi_plane=kwargs_model.get('multi_plane', False))\n self._ra_crit_list, self._dec_crit_list, self._ra_caustic_list, self._dec_caustic_list = self._lensModel.critical_curve_caustics(kwargs_lens, compute_window=self._frame_size, grid_scale=0.01)\n\n model, error_map, cov_param, param = self._imageModel.image_linear_solve(kwargs_lens, kwargs_source,\n kwargs_lens_light, kwargs_ps, inv_bool=True)\n self._kwargs_lens = kwargs_lens\n self._kwargs_source = kwargs_source\n self._kwargs_lens_light = kwargs_lens_light\n self._kwargs_else = kwargs_ps\n self._model = model\n self._data = kwargs_data['image_data']\n self._cov_param = cov_param\n self._norm_residuals = self._imageModel.reduced_residuals(model, error_map=error_map)\n self._reduced_x2 = self._imageModel.reduced_chi2(model, error_map=error_map)\n log_model = np.log10(model)\n log_model[np.isnan(log_model)] = -5\n self._v_min_default = max(np.min(log_model), -5)\n self._v_max_default = min(np.max(log_model), 10)\n print(\"reduced chi^^ = \", self._reduced_x2)\n\n def data_plot(self, ax, v_min=None, v_max=None):\n \"\"\"\n\n :param ax:\n :return:\n \"\"\"\n if v_min is None:\n v_min = self._v_min_default\n if v_max is None:\n v_max = self._v_max_default\n im = ax.matshow(np.log10(self._data), origin='lower',\n extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap, vmin=v_min, vmax=v_max) # , vmin=0, vmax=2\n\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n\n scale_bar(ax, self._frame_size, dist=1, text='1\"')\n text_description(ax, self._frame_size, text=\"Observed\", color=\"w\", backgroundcolor='k')\n coordinate_arrows(ax, self._frame_size, self._coords, arrow_size=self._arrow_size)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'log$_{10}$ flux', fontsize=15)\n return ax\n\n def model_plot(self, ax, v_min=None, v_max=None):\n \"\"\"\n\n :param ax:\n :param model:\n :param v_min:\n :param v_max:\n :return:\n \"\"\"\n if v_min is None:\n v_min = self._v_min_default\n if v_max is None:\n v_max = self._v_max_default\n im = ax.matshow(np.log10(self._model), origin='lower', vmin=v_min, vmax=v_max,\n extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"')\n text_description(ax, self._frame_size, text=\"Reconstructed\", color=\"w\", backgroundcolor='k')\n coordinate_arrows(ax, self._frame_size, self._coords, arrow_size=self._arrow_size)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'log$_{10}$ flux', fontsize=15)\n\n plot_line_set(ax, self._coords, self._ra_caustic_list, self._dec_caustic_list, color='b')\n plot_line_set(ax, self._coords, self._ra_crit_list, self._dec_crit_list, color='r')\n ra_image, dec_image = self._imageModel.image_positions(self._kwargs_else, self._kwargs_lens)\n image_position_plot(ax, self._coords, ra_image[0], dec_image[0])\n source_position_plot(ax, self._coords, self._kwargs_source)\n\n def convergence_plot(self, ax, v_min=None, v_max=None):\n \"\"\"\n\n :param x_grid:\n :param y_grid:\n :param kwargs_lens:\n :param kwargs_else:\n :return:\n \"\"\"\n kappa_result = util.array2image(self._lensModel.kappa(self._x_grid, self._y_grid, self._kwargs_lens))\n im = ax.matshow(np.log10(kappa_result), origin='lower',\n extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap, vmin=v_min, vmax=v_max)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"', color='w')\n coordinate_arrows(ax, self._frame_size, self._coords, color='w', arrow_size=self._arrow_size)\n text_description(ax, self._frame_size, text=\"Convergence\", color=\"w\", backgroundcolor='k', flipped=False)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'log$_{10}$ $\\kappa$', fontsize=15)\n return ax\n\n def normalized_residual_plot(self, ax, v_min=-6, v_max=6):\n \"\"\"\n\n :param ax:\n :param residuals:\n :return:\n \"\"\"\n im = ax.matshow(self._norm_residuals, vmin=v_min, vmax=v_max,\n extent=[0, self._frame_size, 0, self._frame_size], cmap='bwr', origin='lower')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"', color='k')\n text_description(ax, self._frame_size, text=\"Normalized Residuals\", color=\"k\", backgroundcolor='w')\n coordinate_arrows(ax, self._frame_size, self._coords, color='k', arrow_size=self._arrow_size)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'(f$_{model}$-f$_{data}$)/$\\sigma$', fontsize=15)\n return ax\n\n def absolute_residual_plot(self, ax, v_min=-1, v_max=1):\n \"\"\"\n\n :param ax:\n :param residuals:\n :return:\n \"\"\"\n im = ax.matshow(self._model - self._data, vmin=v_min, vmax=v_max,\n extent=[0, self._frame_size, 0, self._frame_size], cmap='bwr', origin='lower')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"', color='k')\n text_description(ax, self._frame_size, text=\"Residuals\", color=\"k\", backgroundcolor='w')\n coordinate_arrows(ax, self._frame_size, self._coords, color='k', arrow_size=self._arrow_size)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'(f$_{model}$-f$_{data}$)', fontsize=15)\n return ax\n\n def source_plot(self, ax, numPix, deltaPix_source, source_sigma=0.001, convolution=False, v_min=None, v_max=None):\n \"\"\"\n\n :param ax:\n :param coords_source:\n :param source:\n :return:\n \"\"\"\n if v_min is None:\n v_min = self._v_min_default\n if v_max is None:\n v_max = self._v_max_default\n d_s = numPix * deltaPix_source\n x_grid_source, y_grid_source = util.make_grid_transformed(numPix,\n self._Mpix2coord * deltaPix_source / self._deltaPix)\n x_center = self._kwargs_source[0]['center_x']\n y_center = self._kwargs_source[0]['center_y']\n x_grid_source += x_center\n y_grid_source += y_center\n coords_source = Coordinates(self._Mpix2coord * deltaPix_source / self._deltaPix, ra_at_xy_0=x_grid_source[0],\n dec_at_xy_0=y_grid_source[0])\n\n source = self._imageModel.SourceModel.surface_brightness(x_grid_source, y_grid_source, self._kwargs_source)\n source = util.array2image(source)\n if convolution:\n source = ndimage.filters.gaussian_filter(source, sigma=source_sigma / deltaPix_source, mode='nearest',\n truncate=20)\n\n im = ax.matshow(np.log10(source), origin='lower', extent=[0, d_s, 0, d_s],\n cmap=self._cmap, vmin=v_min, vmax=v_max) # source\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'log$_{10}$ flux', fontsize=15)\n plot_line_set(ax, coords_source, self._ra_caustic_list, self._dec_caustic_list, color='b')\n scale_bar(ax, d_s, dist=0.1, text='0.1\"', color='w', flipped=False)\n coordinate_arrows(ax, d_s, coords_source, arrow_size=self._arrow_size, color='w')\n text_description(ax, d_s, text=\"Reconstructed source\", color=\"w\", backgroundcolor='k', flipped=False)\n source_position_plot(ax, coords_source, self._kwargs_source)\n return ax\n\n def error_map_source_plot(self, ax, numPix, deltaPix_source, v_min=None, v_max=None):\n x_grid_source, y_grid_source = util.make_grid_transformed(numPix,\n self._Mpix2coord * deltaPix_source / self._deltaPix)\n x_center = self._kwargs_source[0]['center_x']\n y_center = self._kwargs_source[0]['center_y']\n x_grid_source += x_center\n y_grid_source += y_center\n coords_source = Coordinates(self._Mpix2coord * deltaPix_source / self._deltaPix, ra_at_xy_0=x_grid_source[0],\n dec_at_xy_0=y_grid_source[0])\n error_map_source = self._analysis.error_map_source(self._kwargs_source, x_grid_source, y_grid_source, self._cov_param)\n error_map_source = util.array2image(error_map_source)\n d_s = numPix * deltaPix_source\n im = ax.matshow(error_map_source, origin='lower', extent=[0, d_s, 0, d_s],\n cmap=self._cmap, vmin=v_min, vmax=v_max) # source\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'error variance', fontsize=15)\n plot_line_set(ax, coords_source, self._ra_caustic_list, self._dec_caustic_list, color='b')\n scale_bar(ax, d_s, dist=0.1, text='0.1\"', color='w', flipped=False)\n coordinate_arrows(ax, d_s, coords_source, arrow_size=self._arrow_size, color='w')\n text_description(ax, d_s, text=\"Error map in source\", color=\"w\", backgroundcolor='k', flipped=False)\n source_position_plot(ax, coords_source, self._kwargs_source)\n return ax\n\n def magnification_plot(self, ax, v_min=-10, v_max=10):\n \"\"\"\n\n :param ax:\n :return:\n \"\"\"\n mag_result = util.array2image(self._lensModel.magnification(self._x_grid, self._y_grid, self._kwargs_lens))\n im = ax.matshow(mag_result, origin='lower', extent=[0, self._frame_size, 0, self._frame_size],\n vmin=v_min, vmax=v_max, cmap=self._cmap, alpha=0.5)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"', color='k')\n coordinate_arrows(ax, self._frame_size, self._coords, color='k', arrow_size=self._arrow_size)\n text_description(ax, self._frame_size, text=\"Magnification model\", color=\"k\", backgroundcolor='w')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'det(A$^{-1}$)', fontsize=15)\n\n plot_line_set(ax, self._coords, self._ra_caustic_list, self._dec_caustic_list, color='b')\n plot_line_set(ax, self._coords, self._ra_crit_list, self._dec_crit_list, color='r')\n ra_image, dec_image = self._imageModel.image_positions(self._kwargs_else, self._kwargs_lens)\n image_position_plot(ax, self._coords, ra_image[0], dec_image[0], color='k')\n source_position_plot(ax, self._coords, self._kwargs_source)\n return ax\n\n def deflection_plot(self, ax, v_min=None, v_max=None, axis=0):\n \"\"\"\n\n :param kwargs_lens:\n :param kwargs_else:\n :return:\n \"\"\"\n\n alpha1, alpha2 = self._lensModel.alpha(self._x_grid, self._y_grid, self._kwargs_lens)\n alpha1 = util.array2image(alpha1)\n alpha2 = util.array2image(alpha2)\n if axis == 0:\n alpha = alpha1\n else:\n alpha = alpha2\n im = ax.matshow(alpha, origin='lower', extent=[0, self._frame_size, 0, self._frame_size],\n vmin=v_min, vmax=v_max, cmap=self._cmap, alpha=0.5)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"', color='k')\n coordinate_arrows(ax, self._frame_size, self._coords, color='k', arrow_size=self._arrow_size)\n text_description(ax, self._frame_size, text=\"Deflection model\", color=\"k\", backgroundcolor='w')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'arcsec', fontsize=15)\n\n plot_line_set(ax, self._coords, self._ra_caustic_list, self._dec_caustic_list, color='b')\n plot_line_set(ax, self._coords, self._ra_crit_list, self._dec_crit_list, color='r')\n ra_image, dec_image = self._imageModel.image_positions(self._kwargs_else, self._kwargs_lens)\n image_position_plot(ax, self._coords, ra_image[0], dec_image[0])\n source_position_plot(ax, self._coords, self._kwargs_source)\n return ax\n\n def decomposition_plot(self, ax, text='Reconstructed', v_min=None, v_max=None, unconvolved=False, point_source_add=False, source_add=False, lens_light_add=False):\n\n model = self._imageModel.image(self._kwargs_lens, self._kwargs_source, self._kwargs_lens_light,\n self._kwargs_else, unconvolved=unconvolved, source_add=source_add,\n lens_light_add=lens_light_add, point_source_add=point_source_add)\n if v_min is None:\n v_min = self._v_min_default\n if v_max is None:\n v_max = self._v_max_default\n im = ax.matshow(np.log10(model), origin='lower', vmin=v_min, vmax=v_max,\n extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"')\n text_description(ax, self._frame_size, text=text, color=\"w\", backgroundcolor='k')\n coordinate_arrows(ax, self._frame_size, self._coords, arrow_size=self._arrow_size)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'log$_{10}$ flux', fontsize=15)\n return ax\n\n def subtract_from_data_plot(self, ax, text='Subtracted', v_min=None, v_max=None, point_source_add=False, source_add=False, lens_light_add=False):\n model = self._imageModel.image(self._kwargs_lens, self._kwargs_source, self._kwargs_lens_light,\n self._kwargs_else, unconvolved=False, source_add=source_add,\n lens_light_add=lens_light_add, point_source_add=point_source_add)\n if v_min is None:\n v_min = self._v_min_default\n if v_max is None:\n v_max = self._v_max_default\n im = ax.matshow(np.log10(self._data - model), origin='lower', vmin=v_min, vmax=v_max,\n extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n scale_bar(ax, self._frame_size, dist=1, text='1\"')\n text_description(ax, self._frame_size, text=text, color=\"w\", backgroundcolor='k')\n coordinate_arrows(ax, self._frame_size, self._coords, arrow_size=self._arrow_size)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n cb.set_label(r'log$_{10}$ flux', fontsize=15)\n return ax\n\n\ndef plot_chain(chain, param_list):\n X2_list, pos_list, vel_list, _ = chain\n\n f, axes = plt.subplots(1, 3, figsize=(18, 6), sharex=False, sharey=False)\n ax = axes[0]\n ax.plot(np.log10(-np.array(X2_list)))\n ax.set_title('-logL')\n\n ax = axes[1]\n pos = np.array(pos_list)\n vel = np.array(vel_list)\n n_iter = len(pos)\n plt.figure()\n for i in range(0,len(pos[0])):\n ax.plot((pos[:,i]-pos[n_iter-1,i]),label=param_list[i])\n ax.set_title('particle position')\n ax.legend()\n\n ax = axes[2]\n for i in range(0,len(vel[0])):\n ax.plot(vel[:,i], label=param_list[i])\n ax.set_title('param velocity')\n ax.legend()\n return f, axes\n\n\ndef ext_shear_direction(data_class, lens_model_class, kwargs_lens, strength_multiply=10):\n \"\"\"\n\n :param kwargs_data:\n :param kwargs_psf:\n :param kwargs_options:\n :param lens_result:\n :param source_result:\n :param lens_light_result:\n :param else_result:\n :return:\n \"\"\"\n x_grid, y_grid = data_class.coordinates\n shear = ExternalShear()\n\n f_x_shear, f_y_shear = 0, 0\n for i, lens_model in enumerate(lens_model_class.lens_model_list):\n if lens_model == 'SHEAR':\n kwargs = kwargs_lens[i]\n f_x_shear, f_y_shear = shear.derivatives(x_grid, y_grid, e1=kwargs['e1'] * strength_multiply,\n e2=kwargs['e2'] * strength_multiply)\n x_shear = x_grid - f_x_shear\n y_shear = y_grid - f_y_shear\n\n f_x_foreground, f_y_foreground = 0, 0\n for i, lens_model in enumerate(lens_model_class.lens_model_list):\n if lens_model == 'FOREGROUND_SHEAR':\n kwargs = kwargs_lens[i]\n f_x_foreground, f_y_foreground = shear.derivatives(x_grid, y_grid, e1=kwargs['e1'] * strength_multiply,\n e2=kwargs['e2'] * strength_multiply)\n x_foreground = x_grid - f_x_foreground\n y_foreground = y_grid - f_y_foreground\n\n center_x = np.mean(x_grid)\n center_y = np.mean(y_grid)\n radius = (np.max(x_grid) - np.min(x_grid))/4\n circle_shear = util_maskl.mask_sphere(x_shear, y_shear, center_x, center_y, radius)\n circle_foreground = util_maskl.mask_sphere(x_foreground, y_foreground, center_x, center_y, radius)\n f, ax = plt.subplots(1, 1, figsize=(16, 8), sharex=False, sharey=False)\n im = ax.matshow(np.log10(data_class.data), origin='lower', alpha=0.5)\n im = ax.matshow(util.array2image(circle_shear), origin='lower', alpha=0.5, cmap=\"jet\")\n im = ax.matshow(util.array2image(circle_foreground), origin='lower', alpha=0.5)\n #f.show()\n return f, ax\n\n\ndef psf_iteration_compare(kwargs_psf):\n \"\"\"\n\n :param kwargs_psf:\n :return:\n \"\"\"\n psf_out = kwargs_psf['kernel_point_source']\n psf_in = kwargs_psf['kernel_point_source_init']\n n_kernel = len(psf_in)\n delta_x = n_kernel/20.\n delta_y = n_kernel/10.\n cmap_kernel = 'seismic'\n\n f, axes = plt.subplots(1, 3, figsize=(15, 5), sharex=False, sharey=False)\n ax = axes[0]\n im = ax.matshow(np.log10(psf_in), origin='lower', cmap=cmap_kernel)\n v_min, v_max = im.get_clim()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.text(delta_x, n_kernel-delta_y, \"stacked stars\", color=\"k\", fontsize=20, backgroundcolor='w')\n\n ax = axes[1]\n im = ax.matshow(np.log10(psf_out), origin='lower', vmin=v_min, vmax=v_max, cmap=cmap_kernel)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.text(delta_x, n_kernel-delta_y, \"iterative reconstruction\", color=\"k\", fontsize=20, backgroundcolor='w')\n\n ax = axes[2]\n im = ax.matshow(psf_out-psf_in, origin='lower', vmin=-10**-3, vmax=10**-3, cmap=cmap_kernel)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.text(delta_x, n_kernel-delta_y, \"difference\", color=\"k\", fontsize=20, backgroundcolor='w')\n f.tight_layout()\n return f, axes","sub_path":"lenstronomy/Plots/output_plots.py","file_name":"output_plots.py","file_ext":"py","file_size_in_byte":28820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"578949660","text":"# Exercise3 List Less than 10\n\n'''Take a list, say for example this one:\n\n a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nand write a program that prints out all the elements of the list that are less than 5.\n\nExtras:\n\nInstead of printing the elements one by one, make a new list that has all the elements less than 5 from this list in it and print out this new list.\nWrite this in one line of Python.\nAsk the user for a number and return a list that contains only elements from the original list a \nthat are smaller than that number given by the user.\n'''\n\nl = list(input(\"Please input a list here : \"))\nnum = input(\"Please input the max number : \")\nprint([ele for ele in l if ele < num])","sub_path":"level 1/exercise3_list_<_10.py","file_name":"exercise3_list_<_10.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"440291445","text":"# расчет полей пространственного заряда в любой точке\n\nclass pic:\n\n def __init__(self, h, x, y, z, E0, B0):\n self.h = h\n self.x = x\n self.y = y\n self.z = z\n self.E0 = E0\n self.B0 = B0\n\n# функция которая определяет диапазон координат, из сеточных узлов которого брать напряженности\n def rangeGrid(self, r, p):\n index = []\n for i in r:\n if abs(p - i) < self.h:\n index.append(int(i/self.h))\n return [min(index), max(index)]\n\n # функция для расчета сеточного ядра\n def R(self, r, p):\n if abs(r - p) < self.h:\n return (1 - abs(r - p)/self.h)/self.h\n else: return 0\n\n # функция для расчета полей пространственного заряда непосредственно для конкретной частицы\n def fieldEBRandomPoint(self, mas, particle):\n maxminX = self.rangeGrid(self.x, particle[0])\n maxminY = self.rangeGrid(self.y, particle[1])\n maxminZ = self.rangeGrid(self.z, particle[2])\n particle[6] = 0\n particle[7] = self.E0\n particle[8] = 0\n particle[9] = self.B0\n particle[10] = 0\n particle[11] = 0\n for i in range(maxminX[0], maxminX[1]+1):\n for j in range(maxminY[0], maxminY[1]+1):\n for k in range(maxminZ[0], maxminZ[1]+1):\n ii = i * 2\n jj = j * 2\n kk = k * 2\n # ПЗ электрического поля\n particle[6] += (mas[i+0.5][j][k])[0] * self.R(self.x[ii+1], particle[0]) * self.R(self.y[jj], particle[1]) * self.R(self.z[kk], particle[2])\n particle[7] += (mas[i][j+0.5][k])[0] * self.R(self.x[ii], particle[0]) * self.R(self.y[jj+1], particle[1]) * self.R(self.z[kk], particle[2])\n particle[8] += (mas[i][j][k+0.5])[0] * self.R(self.x[ii], particle[0]) * self.R(self.y[jj], particle[1]) * self.R(self.z[kk+1], particle[2])\n # ПЗ магнитного поля\n particle[9] += (mas[i][j+0.5][k+0.5])[1] * self.R(self.x[ii], particle[0]) * self.R(self.y[jj+1], particle[1]) * self.R(self.z[kk+1], particle[2])\n particle[10] += (mas[i+0.5][j][k+0.5])[1] * self.R(self.x[ii+1], particle[0]) * self.R(self.y[jj], particle[1]) * self.R(self.z[kk+1], particle[2])\n particle[11] += (mas[i+0.5][j+0.5][k])[1] * self.R(self.x[ii+1], particle[0]) * self.R(self.y[jj+1], particle[1]) * self.R(self.z[kk], particle[2])\n return particle[6], particle[7], particle[8], particle[9], particle[10], particle[11]\n","sub_path":"program/pic.py","file_name":"pic.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"524386274","text":"import random\nfrom setting import setting\n\nclass tower(setting):\n\n\tdef __init__(self):\n\t\tsetting.__init__('A tower', 'TowerDescription', 'TowerTransition')\n\n\n\tdef get_description(self):\n\t\tname = open(\"./transition.txt\") \n\t\tcase = open(\"description.txt\")\n\t\tfor number in range(0, random.randint(1,3)):\n\t\t\ttransition = name.readline()\n\t\t\tsetting = case.readline()\n\t\treturn(transition + setting)","sub_path":"settings/tower/tower.py","file_name":"tower.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"482156687","text":"\n\nimport pytest\n\nfrom problems.p18_get_binary_tree_next_node import get_next, Node\n\n\n# Tree nodes\nA, B, C = Node('a'), Node('b'), Node('c')\nD, E, F = Node('d'), Node('e'), Node('f')\nG, H, I = Node('g'), Node('h'), Node('i')\n\nA.left, A.right = B, C\nB.left, B.right, B.parent = D, E, A\nC.left, C.right, C.parent = F, G, A\nD.parent = B\nE.left, E.right, E.parent = H, I, B\nF.parent = C\nG.parent = C\nH.parent = E\nI.parent = E\n\n# In-order 中序遍历获得的序列\nin_order = [D, B, H, E, I, A, F, C, G]\n\n# 输入节点对应的下个节点\ntest_params = [\n (A, F),\n (D, B),\n (C, G),\n (E, I)\n]\n\n\n@pytest.mark.parametrize('node, expected', test_params)\ndef test_get_next_node(node, expected):\n assert get_next(node) == expected\n","sub_path":"src/problems/tests/test_get_next_node_of_binary_tree.py","file_name":"test_get_next_node_of_binary_tree.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"551520841","text":"import pytest\nfrom traitlets import Float, TraitError\n\nfrom ctapipe.core import Component\nfrom abc import abstractmethod\n\n\ndef test_component_is_abstract():\n\n class AbstractComponent(Component):\n @abstractmethod\n def test(self):\n pass\n\n with pytest.raises(TypeError):\n AbstractComponent()\n\n\ndef test_component_simple():\n \"\"\"\n very basic test to construct a component and check\n that it's traits work correctly\n \"\"\"\n\n class ExampleComponent(Component):\n description = \"this is a test\"\n param = Float(default_value=1.0,\n help=\"float parameter\").tag(config=True)\n\n comp = ExampleComponent()\n\n assert comp.has_trait('param') is True\n comp.param = 1.2\n\n with pytest.raises(TraitError):\n comp.param = \"badvalue\"\n\n\ndef test_component_kwarg_setting():\n class ExampleComponent(Component):\n description = \"this is a test\"\n param = Float(default_value=1.0,\n help=\"float parameter\").tag(config=True)\n\n comp = ExampleComponent(param=3)\n assert comp.param == 3\n\n # Invalid type\n with pytest.raises(TraitError):\n comp = ExampleComponent(param=\"badvalue\")\n\n # Invalid traitlet\n with pytest.raises(TraitError):\n comp = ExampleComponent(incorrect=\"wrong\")\n","sub_path":"ctapipe/core/tests/test_component.py","file_name":"test_component.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"411569591","text":"\"\"\" Compiled: 2020-09-18 10:38:55 \"\"\"\n\n#__src_file__ = \"extensions/SecuritiesLending/etc/FSecLendOrdersMenuItems.py\"\nfrom __future__ import print_function\n\"\"\"--------------------------------------------------------------------------\nMODULE\n FSecLendOrdersMenuItems\n\n (c) Copyright 2017 FIS FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n Order Manager - Menu items.\n\n---------------------------------------------------------------------------\"\"\"\nimport acm\n\nimport FSecLendHooks\nimport FSecLendUtils\nimport FSecLendHoldTrade\nfrom ACMPyUtils import Transaction\nfrom FClipboardUtilities import SetClipboardText # @UnresolvedImport pylint: disable=import-error\nfrom FSecLendHooks import ClipBoardTextHookFromTrades\nfrom FSecLendMenuItem import SecLendMenuItemBase\nfrom FSecLendOrderCapturePanelBase import StreamBufferDialog\nfrom FSecLendWorkflow import SecLendWorkflow\nfrom GenerateOrderReportAPI import GetReportParameters\nfrom SecLendingReportingATS import SEC_LEND_REPORT_TO_DESTINATIONS\nfrom FWorkflowMenuItem import MultiWorkflowMenuItem\nfrom FParameterSettings import ParameterSettingsCreator\nfrom datetime import datetime, timedelta\n\n_SETTINGS = ParameterSettingsCreator.FromRootParameter('SecLendSettings')\n\nclass SecLendingWorkflowMenuItem(MultiWorkflowMenuItem, SecLendMenuItemBase):\n EVENT = None\n BP_CACHE = {}\n\n def __init__(self, extObj):\n super(SecLendingWorkflowMenuItem, self).__init__(extObj, SecLendWorkflow, self.EVENT)\n\n\n def _BusinessProcess(self, trade):\n try:\n if not trade in self.BP_CACHE:\n bp = acm.BusinessProcess.FindBySubjectAndStateChart(trade, self._workflow.StateChart())[0]\n self.BP_CACHE[trade] = bp\n return self.BP_CACHE[trade]\n except IndexError:\n return None\n\n def _SelectedTrades(self, maxNbrOfTrades=1000):\n return [trade for trade in self._frame.ActiveSheet().Selection().SelectedTrades()[0:maxNbrOfTrades] \n if not trade.IsDeleted()]\n\n def BusinessProcesses(self):\n trades = self._SelectedTrades(100) # Cut off because of performance\n if not trades:\n return None\n return filter(lambda x: bool(x), [self._BusinessProcess(trade) for trade in trades])\n\n def Applicable(self):\n return True\n\n def _HandleEvent(self, businessProcess, parameters=None, notes=None):\n MIFIDParams = FSecLendUtils.ColumnValuesFromExtensionattribute(businessProcess.Subject(),\n \"_SecurityLoan_Reporting_Columns\")\n if parameters:\n parameters.update(MIFIDParams)\n self.Workflow()(businessProcess)._HandleEvent(self.Event(), parameters, notes)\n\n def _IsValidUserForAction(self):\n validUser = []\n trades = self._SelectedTrades()\n if trades:\n return FSecLendHooks.IsValidUserForRibbon(trades)\n else:\n return False\n \n def _IsValidEvent(self):\n if self.EVENT:\n return MultiWorkflowMenuItem.Enabled(self)\n else:\n return True\n \n def Enabled(self):\n return bool(self._SelectedTrades()) and self._IsValidUserForAction() and SecLendMenuItemBase.Enabled(self) and self._IsValidEvent()\n \n\nclass ReCheckMenuItem(SecLendingWorkflowMenuItem):\n EVENT = 'Re-check'\n\n def Invoke(self, eii):\n \"\"\" Force error state if state is different from \n or handle event to the previous state. Apply all changes if there are any..\"\"\"\n notes, parameters = self._NotesAndParameters()\n for bp in self.BusinessProcesses():\n if bp.CurrentStep().IsInErrorState():\n bp.HandleEvent(\"Revert\")\n bp.Commit()\n elif self._SatisfiesCondition(bp, parameters):\n self._HandleEvent(bp, parameters, notes)\n\n\nclass ManualApproveMenuItem(SecLendingWorkflowMenuItem):\n EVENT = 'Manual approve'\n\n\nclass SecLendingWorkflowReplyingMenuItem(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n if not self.VerifyValidCollection(eii):\n return\n notes, parameters = self._NotesAndParameters()\n if not self.VerifyOrderReportMapping(eii, parameters):\n return\n if not self.SetClipboardTextAndShowBuffer(eii):\n return\n self.SetRespondTrades(parameters)\n for bp in self.BusinessProcesses():\n trade = bp.Subject()\n self.SetRespondSource(eii, parameters, trade)\n if self._SatisfiesCondition(bp, parameters):\n self._HandleEvent(bp, parameters, notes)\n \n def SelectedTradesString(self):\n return ','.join([str(trade.Oid()) for trade in self._SelectedTrades(100)])\n \n def VerifyOrderReportMapping(self, eii, parameters = {}):\n destination = self.GetDestination(eii)\n if destination:\n if destination in SEC_LEND_REPORT_TO_DESTINATIONS:\n sheet = eii.Parameter('sheet') or eii.ExtensionObject().ActiveSheet()\n trade = sheet.Selection().SelectedCell().RowObject().Trade()\n try:\n GetReportParameters(destination, trade.Counterparty())\n except Exception as e:\n dialogMsg = \"%s. Do you want to %s it anyway without a confirmation?\" % (e, self.EVENT)\n if acm.UX().Dialogs().MessageBoxYesNo(self._frame.Shell(), \"Warning\", dialogMsg) == 'Button1':\n parameters.update({\"Response\":\"NO [%s]\" % e})\n return True\n return False\n elif destination not in ('Clipboard', \"Manual\"):\n dialogMsg = \"No confirmation can be sent to %s. Do you want to %s it anyway without a confirmation?\" % (destination, self.EVENT)\n return acm.UX().Dialogs().MessageBoxYesNo(self._frame.Shell(), \"Warning\", dialogMsg ) == 'Button1'\n \n return True\n \n def VerifyValidCollection(self, eii):\n destination = eii.MenuExtension().GetString('Destination')\n if destination == '*SOURCE*':\n sources = set(trade.Market() and trade.Market().Name() for trade in self._SelectedTrades())\n if len(sources) == 1:\n destination = sources.pop()\n elif set(SEC_LEND_REPORT_TO_DESTINATIONS) & sources:\n msg = 'Trades from multiple sources %s cannot be sent in the same message' % list(sources)\n acm.UX().Dialogs().MessageBox(self._frame.Shell(), \"Error\", msg, 'Ok', None, None, 'Button1', 'Button3' )\n return False\n else:\n #Multiple sources but no source is a reporting source\n destination = None\n \n if destination and destination in SEC_LEND_REPORT_TO_DESTINATIONS:\n source = '_NO_VALUE_'\n counterparty = '_NO_VALUE_'\n error = ''\n for trade in self._SelectedTrades():\n s = trade.AddInfoValue('SBL_TradeOriginId')\n if source != '_NO_VALUE_' and s != source:\n error = 'source objects'\n cp = trade.Counterparty()\n if counterparty != '_NO_VALUE_' and cp != counterparty:\n error = 'counterparties'\n if error:\n msg = 'Trades from multiple %s cannot be sent in the same message' % error\n acm.UX().Dialogs().MessageBox(self._frame.Shell(), \"Error\", msg, 'Ok', None, None, 'Button1', 'Button3' )\n return False\n source = s\n counterparty = cp\n nbrOfTrades = len(self._SelectedTrades())\n\n if source:\n sourceTrades = acm.FAdditionalInfo.Select(\"addInf = 'SBL_TradeOriginId' and fieldValue = '%s'\" % source)\n if sourceTrades.Size() > nbrOfTrades:\n msg = 'Only %d of %d orders from the import where selected, do you want to send the reply anyway?' % \\\n (nbrOfTrades, sourceTrades.Size())\n if acm.UX().Dialogs().MessageBoxYesNo(self._frame.Shell(), \"Warning\", msg) == 'Button1':\n return True\n return False\n if sourceTrades.Size() < nbrOfTrades:\n print(\"ERROR - shouldn't end up here, there has to be multiple sources, %d, %d\" %\\\n (sourceTrades.Size(), nbrOfTrades))\n return False\n return True\n \n def GetDestination(self, eii, defaultValue = ''):\n destination = eii.MenuExtension().GetString('Destination', defaultValue)\n if destination == '*SOURCE*':\n sheet = eii.Parameter('sheet') or eii.ExtensionObject().ActiveSheet()\n trade = sheet.Selection().SelectedCell().RowObject().Trade()\n destination = trade.Market() and trade.Market().Name()\n return destination\n\n def SetClipboardTextAndShowBuffer(self, eii):\n destination = self.GetDestination(eii, eii.MenuExtension().Name().AsString())\n try:\n text = ClipBoardTextHookFromTrades(self._SelectedTrades(), self.Event())\n SetClipboardText(text)\n if destination == \"Manual\" and _SETTINGS.ShowRespondBuffer():\n buffer_dialog = StreamBufferDialog(text, 'Text To Clipboard', False)\n return acm.UX().Dialogs().ShowCustomDialogModal(self._frame.Shell(),\n buffer_dialog.CreateLayout(), buffer_dialog)\n except Exception as e:\n print('Failed to set content to clipboard', e)\n return True\n \n def SetRespondTrades(self, parameters):\n parameters.update({\"TargetTrades\":self.SelectedTradesString()})\n \n def SetRespondSource(self, eii, parameters, trade):\n buttonName = eii.MenuExtension().Name().AsString()\n if buttonName == self.EVENT:\n parameters.update({\"TargetSource\":trade.Market() and trade.Market().Name()})\n else:\n assert acm.FMarketPlace[buttonName] is not None, \\\n \"No FMarketPlace named '{}' to route orders to.\" \\\n .format(buttonName)\n parameters.update({\"TargetSource\":buttonName})\n \n def Enabled(self):\n return SecLendingWorkflowMenuItem.Enabled(self) and \\\n FSecLendHooks.IsCompliantToSourceMapping(self._SelectedTrades())\n\n\nclass RespondMenuItem(SecLendingWorkflowReplyingMenuItem):\n EVENT = 'Respond'\n \n def SelectedTradesAreNotReturns(self):\n for t in self._SelectedTrades():\n if t.Type() == 'Closing':\n return False\n return True\n\n def Enabled(self):\n return self.SelectedTradesAreNotReturns() and super(RespondMenuItem, self).Enabled()\n\nclass RejectMenuItem(SecLendingWorkflowReplyingMenuItem):\n EVENT = 'Reject'\n \n def SetRespondSource(self, eii, parameters, trade):\n destination = self.GetDestination(eii)\n if destination:\n assert acm.FMarketPlace[destination] is not None, \\\n \"No FMarketPlace named '{}' to route orders to. The destination is specified in the menu extension '{}'\" \\\n .format(destination, eii.MenuExtension().Name())\n parameters.update({\"TargetSource\":destination})\n else:\n parameters.update({\"TargetSource\":trade.Market().Name(),\n \"Response\":\"NO\"})\n\n\nclass BookMenuItem(SecLendingWorkflowReplyingMenuItem):\n EVENT = 'Book'\n\n def Invoke(self, eii):\n if not all(FSecLendHooks.IsValidForProcessing(trade) for trade in self._SelectedTrades()):\n acm.GetFunction('msgBox', 3)('Security Lending', \"One or more trades is not suitable for booking\", 0)\n return\n isOverClosing, instrument = FSecLendHooks.IsOverClosingPosition([trade for trade in self._SelectedTrades() \\\n if trade.Type() == 'Closing'])\n if isOverClosing:\n acm.GetFunction('msgBox', 3)('Closing Position', \"To high quantity to return for order(s) in \" \\\n \"instrument %s. Adjust the quantity of the order(s).\" %(instrument), 0)\n return \n super(BookMenuItem, self).Invoke(eii)\n\n def SetRespondSource(self, eii, parameters, trade):\n destination = self.GetDestination(eii)\n if destination:\n assert acm.FMarketPlace[destination] is not None, \\\n \"No FMarketPlace named '{}' to route orders to. The destination is specified in the menu extension '{}'\" \\\n .format(destination, eii.MenuExtension().Name())\n parameters.update({\"TargetSource\":destination})\n else:\n parameters.update({\"TargetSource\":trade.Market().Name(),\n \"Response\":\"NO\"})\n\n\nclass InspectWorkflowMenuItem(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n businessProcesses = self.BusinessProcesses()\n if len(businessProcesses) > 1:\n acm.StartApplication(\"Operations Manager\", businessProcesses)\n elif len(businessProcesses) == 1:\n acm.StartApplication(\"Business Process Details\", businessProcesses[0])\n\n def Enabled(self):\n return bool(self.BusinessProcesses())\n\n\nclass DeleteTradeMenuItem(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n res = acm.UX.Dialogs().MessageBoxYesNo(self._frame.Shell(), 'Question',\n 'Are you sure you want to delete the selected trades?')\n if res == 'Button1': # Yes\n trades = self._SelectedTrades()\n instruments = [t.Instrument() for t in trades]\n for bp in self.BusinessProcesses():\n bp.Delete()\n for trade in trades:\n trade.Delete()\n for ins in set(instruments):\n if not ins.Trades():\n ins.Delete()\n\n\nclass AssignMenuItem(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n editedTrades = []\n assignedTrades = {}\n for trade in self._SelectedTrades():\n assignedTrades.setdefault(trade.Trader(), []).append(trade)\n with Transaction():\n for trade in self._SelectedTrades():\n if trade.StorageId() == trade.Originator().StorageId():\n trade = trade.StorageImage()\n trade.Trader(acm.User())\n trade.AddInfoValue('SBL_ToDoList', eii.MenuExtension().GetString('ToDoList'))\n trade.Commit()\n else:\n editedTrades.append(trade)\n # Edited trades need to be handled separetly.\n # They shouldn't be commited as they're already an image of the original\n # They can't be changed within a transaction as that will mute notifications\n # The should only be changed it the transaction succeeds\n for trade in editedTrades:\n trade.Trader(acm.User())\n \n for trader, trades in assignedTrades.items():\n if trader != acm.User():\n acm.SendUserMessage([trader], \"Assigned orders\", \"%s has assigned himself to the attached orders\" % acm.User().Name(), trades)\n\n def Enabled(self):\n return bool(self._SelectedTrades())\n\n\nclass HoldMenuItem(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n self.HoldTrade()\n \n def SetHoldTime(self, trade, time, hold):\n FSecLendHoldTrade.HoldTrade(trade, time) if hold else FSecLendHoldTrade.UnholdTrade(trade)\n\n def HoldTrade(self, time=None, hold=True):\n editedTrades = []\n with Transaction():\n for trade in self._SelectedTrades():\n if trade.StorageId() == trade.Originator().StorageId():\n self.SetHoldTime(trade, time, hold)\n trade.Commit()\n else:\n editedTrades.append(trade)\n # Edited trades need to be handled separetly.\n # They shouldn't be commited as they're already an image of the original\n # They can't be changed within a transaction as that will mute notifications\n # The should only be changed it the transaction succeeds\n for trade in editedTrades:\n self.SetHoldTime(trade, time, hold)\n\nclass HoldOneHourMenuItem(HoldMenuItem):\n\n def Invoke(self, eii):\n timeOneHour = datetime.now()+timedelta(hours=1)\n self.HoldTrade(timeOneHour)\n\n\nclass HoldThreeHoursMenuItem(HoldMenuItem):\n\n def Invoke(self, eii):\n timeThreeHours = datetime.now()+timedelta(hours=3)\n self.HoldTrade(time=timeThreeHours)\n\n\nclass HoldEndOfDayMenuItem(HoldMenuItem):\n\n def Invoke(self, eii):\n if _SETTINGS.EndOfBusinessDay() is not None:\n hour, min = _SETTINGS.EndOfBusinessDay().split(':')\n timeEndOfDay = datetime.today().replace(hour=int(hour), minute=int(min), second=0, microsecond=0)\n self.HoldTrade(timeEndOfDay)\n else:\n timeEndOfDay = datetime.today().replace(hour=18, minute=0, second=0, microsecond=0)\n self.HoldTrade(timeEndOfDay)\n\n\nclass RemoveHoldMenuItem(HoldMenuItem):\n \n def Invoke(self, eii):\n self.HoldTrade(hold=False)\n\n\nclass TradeOrigin(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n trade = self._SelectedTrades()[0]\n info = FSecLendHooks.GetTradeOriginInfo(trade)\n dlg = FSecLendUtils.InformationDialog(\"Trade Origin\", info, 500, 500)\n acm.UX().Dialogs().ShowCustomDialog(self._frame.Shell(), dlg.CreateLayout(), dlg)\n\n def Enabled(self):\n enabled = False\n if len(self._SelectedTrades()) == 1:\n enabled = bool(self._SelectedTrades()[0].AddInfoValue('SBL_TradeOriginId'))\n return enabled\n\n\nclass FlipSignMenuItem(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n editedTrades = []\n with Transaction():\n for trade in self._SelectedTrades():\n if trade.StorageId() == trade.Originator().StorageId():\n trade.Quantity(-trade.Quantity())\n trade.Commit()\n else:\n editedTrades.append(trade)\n # Edited trades need to be handled separetly.\n # They shouldn't be commited as they're already an image of the original\n # They can't be changed within a transaction as that will mute notifications\n # The should only be changed it the transaction succeeds\n for trade in editedTrades:\n trade.Quantity(-trade.Quantity())\n\n\nclass ApplySuggestedFeeItem(SecLendingWorkflowMenuItem):\n\n def Invoke(self, eii):\n editedTrades = []\n with Transaction():\n for trade in self._SelectedTrades():\n if trade.StorageId() == trade.Originator().StorageId() and trade.StorageId() > 0:\n FSecLendUtils.SetDefaultRate(trade) # Only changes the instrument\n trade.Instrument().Commit()\n else:\n editedTrades.append(trade)\n # Edited trades need to be handled separetly.\n # They shouldn't be commited as they're already an image of the original\n # They can't be changed within a transaction as that will mute notifications\n # The should only be changed it the transaction succeeds\n for trade in editedTrades:\n FSecLendUtils.SetDefaultRate(trade)\n\n\ndef FlipSign(eii):\n return FlipSignMenuItem(eii)\n\n\ndef Assign(eii):\n return AssignMenuItem(eii)\n\n \ndef Hold(eii):\n return HoldMenuItem(eii)\n\n\ndef HoldOneHour(eii):\n return HoldOneHourMenuItem(eii)\n\n\ndef HoldThreeHours(eii):\n return HoldThreeHoursMenuItem(eii)\n\n\ndef RemoveHold(eii):\n return RemoveHoldMenuItem(eii)\n \n\ndef HoldEndOfDay(eii):\n return HoldEndOfDayMenuItem(eii)\n\n\ndef InspectWorkflow(eii):\n return InspectWorkflowMenuItem(eii)\n\n\ndef Book(eii):\n return BookMenuItem(eii)\n\n\ndef ReCheck(eii):\n return ReCheckMenuItem(eii)\n\n\ndef Respond(eii):\n return RespondMenuItem(eii)\n\n\ndef Reject(eii):\n return RejectMenuItem(eii)\n\n\ndef ManualApprove(eii):\n return ManualApproveMenuItem(eii)\n\n\ndef DeleteTrade(eii):\n return DeleteTradeMenuItem(eii)\n\n\ndef ApplySuggestedFee(eii):\n return ApplySuggestedFeeItem(eii)\n \n","sub_path":"Extensions/_securities_lending_py/FPythonCode/FSecLendOrdersMenuItems.py","file_name":"FSecLendOrdersMenuItems.py","file_ext":"py","file_size_in_byte":20385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"612827159","text":"import logging\nimport threading\n\nfrom google.appengine.api import datastore\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.signals import request_finished, request_started\nfrom django.dispatch import receiver\nfrom djangae.db import utils\nfrom djangae.db.unique_utils import unique_identifiers_from_entity, _format_value_for_identifier\nfrom djangae.db.backends.appengine.context import ContextStack\n\nlogger = logging.getLogger(\"djangae\")\n\n_context = threading.local()\n\nCACHE_TIMEOUT_SECONDS = getattr(settings, \"DJANGAE_CACHE_TIMEOUT_SECONDS\", 60 * 60)\nCACHE_ENABLED = getattr(settings, \"DJANGAE_CACHE_ENABLED\", True)\n\n\nclass CachingSituation:\n DATASTORE_GET = 0\n DATASTORE_PUT = 1\n DATASTORE_GET_PUT = 2 # When we are doing an update\n\n\ndef ensure_context():\n _context.memcache_enabled = getattr(_context, \"memcache_enabled\", True)\n _context.context_enabled = getattr(_context, \"context_enabled\", True)\n _context.stack = _context.stack if hasattr(_context, \"stack\") else ContextStack()\n\n\ndef _add_entity_to_memcache(model, entity, identifiers):\n cache.set_many({ x: entity for x in identifiers}, timeout=CACHE_TIMEOUT_SECONDS)\n\n\ndef _get_cache_key_and_model_from_datastore_key(key):\n model = utils.get_model_from_db_table(key.kind())\n\n if not model:\n # This should never happen.. if it does then we can edit get_model_from_db_table to pass\n # include_deferred=True/included_swapped=True to get_models, whichever makes it better\n raise AssertionError(\"Unable to locate model for db_table '{}' - item won't be evicted from the cache\".format(key.kind()))\n\n # We build the cache key for the ID of the instance\n cache_key = \"|\".join([key.kind(), \"{}:{}\".format(model._meta.pk.column, _format_value_for_identifier(key.id_or_name()))])\n\n return (cache_key, model)\n\n\ndef _remove_entity_from_memcache_by_key(key):\n \"\"\"\n Note, if the key of the entity got evicted from the cache, it's possible that stale cache\n entries would be left behind. Remember if you need pure atomicity then use disable_cache() or a\n transaction.\n \"\"\"\n\n cache_key, model = _get_cache_key_and_model_from_datastore_key(key)\n entity = cache.get(cache_key)\n\n if entity:\n identifiers = unique_identifiers_from_entity(model, entity)\n cache.delete_many(identifiers)\n\n\ndef _get_entity_from_memcache(identifier):\n return cache.get(identifier)\n\n\ndef _get_entity_from_memcache_by_key(key):\n # We build the cache key for the ID of the instance\n cache_key, _ = _get_cache_key_and_model_from_datastore_key(key)\n return cache.get(cache_key)\n\n\ndef add_entity_to_cache(model, entity, situation):\n ensure_context()\n\n identifiers = unique_identifiers_from_entity(model, entity)\n\n # Don't cache on Get if we are inside a transaction, even in the context\n # This is because transactions don't see the current state of the datastore\n # We can still cache in the context on Put() but not in memcache\n if situation == CachingSituation.DATASTORE_GET and datastore.IsInTransaction():\n return\n\n if situation in (CachingSituation.DATASTORE_PUT, CachingSituation.DATASTORE_GET_PUT) and datastore.IsInTransaction():\n # We have to wipe the entity from memcache\n if entity.key():\n _remove_entity_from_memcache_by_key(entity.key())\n\n _context.stack.top.cache_entity(identifiers, entity, situation)\n\n # Only cache in memcache of we are doing a GET (outside a transaction) or PUT (outside a transaction)\n # the exception is GET_PUT - which we do in our own transaction so we have to ignore that!\n if (not datastore.IsInTransaction() and situation in (CachingSituation.DATASTORE_GET, CachingSituation.DATASTORE_PUT)) or \\\n situation == CachingSituation.DATASTORE_GET_PUT:\n\n _add_entity_to_memcache(model, entity, identifiers)\n\n\ndef remove_entity_from_cache(entity):\n key = entity.key()\n remove_entity_from_cache_by_key(key)\n\n\ndef remove_entity_from_cache_by_key(key, memcache_only=False):\n \"\"\"\n Removes an entity from all caches (both context and memcache)\n or just memcache if specified\n \"\"\"\n ensure_context()\n\n if not memcache_only:\n for identifier in _context.stack.top.reverse_cache.get(key, []):\n if identifier in _context.stack.top.cache:\n del _context.stack.top.cache[identifier]\n\n _remove_entity_from_memcache_by_key(key)\n\n\ndef get_from_cache_by_key(key):\n \"\"\"\n Return an entity from the context cache, falling back to memcache when possible\n \"\"\"\n\n ensure_context()\n\n if not CACHE_ENABLED:\n return None\n\n ret = None\n if _context.context_enabled:\n # It's safe to hit the context cache, because a new one was pushed on the stack at the start of the transaction\n ret = _context.stack.top.get_entity_by_key(key)\n if ret is None and not datastore.IsInTransaction():\n if _context.memcache_enabled:\n ret = _get_entity_from_memcache_by_key(key)\n elif _context.memcache_enabled and not datastore.IsInTransaction():\n ret = _get_entity_from_memcache_by_key(key)\n\n return ret\n\n\ndef get_from_cache(unique_identifier):\n \"\"\"\n Return an entity from the context cache, falling back to memcache when possible\n \"\"\"\n\n ensure_context()\n\n if not CACHE_ENABLED:\n return None\n\n ret = None\n if _context.context_enabled:\n # It's safe to hit the context cache, because a new one was pushed on the stack at the start of the transaction\n ret = _context.stack.top.get_entity(unique_identifier)\n if ret is None and not datastore.IsInTransaction():\n if _context.memcache_enabled:\n ret = _get_entity_from_memcache(unique_identifier)\n elif _context.memcache_enabled and not datastore.IsInTransaction():\n ret = _get_entity_from_memcache(unique_identifier)\n\n return ret\n\n\n@receiver(request_finished)\n@receiver(request_started)\ndef reset_context(keep_disabled_flags=False, *args, **kwargs):\n \"\"\"\n Called at the beginning and end of each request, resets the thread local\n context. If you pass keep_disabled_flags=True the memcache_enabled and context_enabled\n flags will be preserved, this is really only useful for testing.\n \"\"\"\n\n memcache_enabled = getattr(_context, \"memcache_enabled\", True)\n context_enabled = getattr(_context, \"context_enabled\", True)\n\n for attr in (\"stack\", \"memcache_enabled\", \"context_enabled\"):\n if hasattr(_context, attr):\n delattr(_context, attr)\n\n ensure_context()\n\n if keep_disabled_flags:\n _context.memcache_enabled = memcache_enabled\n _context.context_enabled = context_enabled\n","sub_path":"djangae/db/backends/appengine/caching.py","file_name":"caching.py","file_ext":"py","file_size_in_byte":6778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"513090863","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('release', '0005_release_comments'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='release',\n name='name',\n field=models.ForeignKey(related_name='project_release', to='release.projectinfo'),\n ),\n ]\n","sub_path":"release/migrations/0006_auto_20151013_0320.py","file_name":"0006_auto_20151013_0320.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"314655992","text":"# -*- coding: utf-8 -*-\n\nfrom .context import scripts\n\nimport unittest\n\nfrom scripts import get_welcome, get_dt, get_rules\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_absolute_truth_and_meaning(self):\n assert True\n\n def test_getWelcome(self):\n self.assertEquals(get_welcome(), 'RPSLS - Rock, Paper, Scissor, Lizard, Spock' \\\n '\\nPlease choose from the list below:', 'Should get the welcome message')\n\n def test_shouldGetDT(self):\n self.assertNotEquals(get_dt(), 'Not dt', 'It should not get the dt')\n\n def test_getRules(self):\n self.assertEqual(get_rules(),\n '1. Scissors cuts paper'\n '2. Paper covers rock'\n '3. Rock crushes lizard'\n '4. Lizard poisons Spock'\n '5. Spock smashes scissors'\n '6. Scissors decapitates lizard'\n '7. Lizard eats paper'\n '8. Paper disproves Spock'\n '9. Spock vaporizes rock'\n '10. Rock crushes scissors', 'Should get rules of game')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"129475767","text":"from datetime import datetime, timedelta\n\nimport pytest\nfrom asgiref.sync import sync_to_async\nfrom channels.testing import WebsocketCommunicator\nfrom rest_framework.reverse import reverse\nfrom rest_framework.status import HTTP_201_CREATED\nfrom thenewboston.utils.signed_requests import generate_signed_request\n\nfrom v1.notifications.constants import VALIDATOR_CONFIRMATION_SERVICE_NOTIFICATION\nfrom ..consumers.validator_confirmation_service import ValidatorConfirmationServiceConsumer\n\n\n@pytest.mark.asyncio\nasync def test_validator_confirmation_service_post_async(\n client, validator, signing_key\n):\n communicator = WebsocketCommunicator(\n ValidatorConfirmationServiceConsumer,\n 'ws/validator_confirmation_services'\n )\n connected, subprotocol = await communicator.connect()\n assert connected\n\n start = datetime.now().isoformat()\n end = (datetime.now() + timedelta(days=2)).isoformat()\n\n payload = generate_signed_request(\n data={\n 'start': start,\n 'end': end\n },\n nid_signing_key=signing_key\n )\n\n response = await sync_to_async(\n client.post_json\n )(\n reverse('validatorconfirmationservice-list'),\n payload,\n expected=HTTP_201_CREATED\n )\n communicator_response = await communicator.receive_json_from()\n\n assert response['end'][:-1] == end\n assert response['start'][:-1] == start\n assert response['validator'] == str(validator.pk)\n\n assert communicator_response == {\n 'notification_type': VALIDATOR_CONFIRMATION_SERVICE_NOTIFICATION,\n 'payload': {\n 'bank_node_identifier': validator.node_identifier,\n 'validator_confirmation_service': response\n }\n }\n\n await communicator.disconnect()\n","sub_path":"v1/validator_confirmation_services/tests/validator_confirmation_service_async.py","file_name":"validator_confirmation_service_async.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"283047284","text":"\"\"\"empty message\n\nRevision ID: 9b53a6374401\nRevises: ec07f067b572\nCreate Date: 2019-11-04 12:49:36.086719\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9b53a6374401'\ndown_revision = 'ec07f067b572'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('soft',\n sa.Column('soft_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=50), nullable=True),\n sa.Column('version', sa.String(length=30), nullable=True),\n sa.Column('cost', sa.Integer(), nullable=True),\n sa.Column('creation_date', sa.Date(), nullable=True),\n sa.PrimaryKeyConstraint('soft_id')\n )\n op.add_column('repo', sa.Column('soft_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'repo', 'soft', ['soft_id'], ['soft_id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'repo', type_='foreignkey')\n op.drop_column('repo', 'soft_id')\n op.drop_table('soft')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/9b53a6374401_.py","file_name":"9b53a6374401_.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"324468957","text":"import thread\nimport time\nimport random\nimport threading\nfrom scapy.all import *\n\ndef keep_query():\n\tno_such_domain = 'no-such-domain.com'\n\tdns_query = IP(dst='127.0.0.1')/UDP(dport=53)/DNS(rd=1,qd=DNSQR(qname=no_such_domain, qtype=2))\n\tDNS_RCODE_NXDOMAIN = 3\n\twhile True:\n\t\tprint('query')\n\t\tresponse = sr1(dns_query)\n\t\tresponse.show()\n\t\tif response.rcode != DNS_RCODE_NXDOMAIN:\n\t\t\tprint(response.rdata)\n\t\t\texit(0)\n\t\ttime.sleep(0.5)\n\ndef keep_answer():\n\twhile True:\n\t\tprint('answer')\n\t\tdns_response = DNS(\n\t\t\t\tqr=1, # response packet\n\t\t\t\trd=1,\n\t\t\t\tid=random.randint(1, 65535),\n\t\t\t\tqd=DNSQR(qname='no-such-domain.com', qtype=2),\n\t\t\t\tan=DNSRR(\n\t\t\t\t\t\trrname='no-such-domain.com',\n\t\t\t\t\t\ttype=1,\n\t\t\t\t\t\trclass=1,\n\t\t\t\t\t\tttl=512,\n\t\t\t\t\t\trdata='192.168.0.1'\n\t\t\t\t\t)\n\t\t\t)\n\t\tfake_response = IP(dst='127.0.0.1')/UDP(dport=8000)/dns_response\n\t\tsend(fake_response)\n\t\ttime.sleep(0.2)\n\nif __name__ == '__main__':\n\tno_such_domain = 'no-such-domain.com'\n\tdns_query = IP(dst='127.0.0.1')/UDP()/DNS(rd=1,qd=DNSQR(qname=no_such_domain, qtype=1))\n\tdns_query.show()\n\tsend(dns_query)\n\t# query_thread = threading.Thread(target=keep_query)\n\t# answer_thread = threading.Thread(target=keep_answer)\n\t# answer_thread.setDaemon(True)\n\t# try:\n\t# \tquery_thread.start()\n\t# \tanswer_thread.start()\n\t# except KeyboardInterrupt:\n\t# \texit()\n\t\n","sub_path":"homework/KaminskyAttack.py","file_name":"KaminskyAttack.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"}
+{"seq_id":"161146581","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom typing import Union\n\nimport numpy as np\n\n\ndef gen_batch_vectors(\n n_batches: int,\n n_features: int,\n batch_scale: float,\n bio_batch_angle: Union[float, None] = None,\n projection_to_bio: Union[np.ndarray, None] = None,\n) -> np.ndarray:\n \"\"\"Generates a batch-effect vector for each batch, optionally with some\n relation to the biological space\n\n :param n_batches: number of batches\n :param n_features: number of features\n :param batch_scale: size of batch effect relative to data\n :param bio_batch_angle: angle of batch effect w/ bio subspace\n :param projection_to_bio: projection from latent into gene space\n :return: array of shape (n_batches, n_features)\n \"\"\"\n\n norm = lambda X: np.linalg.norm(X, axis=1, keepdims=True)\n\n batch_vectors = np.random.randn(n_batches, n_features)\n batch_vectors = (\n batch_vectors / norm(batch_vectors) * np.mean(norm(expression)) * batch_scale\n )\n\n if bio_batch_angle is not None:\n v_projected = np.dot(batch_vectors, projection_to_bio)\n v_complement = batch_vectors - v_projected\n\n batch_vectors = norm(batch_vectors) * (\n np.sin(bio_batch_angle) * v_complement / norm(v_complement)\n + np.cos(bio_batch_angle) * v_projected / norm(v_projected)\n )\n\n return batch_vectors\n\n\ndef add_batch_vectors(\n expression: np.ndarray,\n batch: np.ndarray,\n batch_scale: Union[int, float],\n bio_batch_angle: Union[float, None],\n projection_to_bio: Union[np.ndarray, None],\n copy: bool = True,\n) -> np.ndarray:\n \"\"\"Generate batch-effect vectors and apply them to the expression data\n\n :param expression: array of true expression, in latent space\n :param batch: indicator of which obs belongs to which batch\n :param batch_scale: batch effect relative to data\n :param bio_batch_angle: angle of batch effect w/ bio subspace\n :param projection_to_bio: projection from latent into gene space\n :param copy: return a copy of the expression array or modify in-place\n :return: expression matrix with batch effect\n \"\"\"\n if copy:\n expression = expression.copy()\n\n n_batches = len(np.unique(batch))\n\n # add batch vector\n batch_vectors = gen_batch_vectors(\n n_batches, expression.shape[1], batch_scale, bio_batch_angle, projection_to_bio\n )\n\n for i in range(n_batches):\n expression[batch == i, :] += batch_vectors[i, :]\n\n return expression\n","sub_path":"src/simscity/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"176405158","text":"import json\nimport logging\nimport re\nfrom pathlib import Path\nfrom shutil import copy\nfrom typing import AnyStr, List, Optional, Dict, Any\n\nfrom src.python.review.common.file_system import new_temp_dir\nfrom src.python.review.common.subprocess_runner import run_in_subprocess\nfrom src.python.review.inspectors.base_inspector import BaseInspector\nfrom src.python.review.inspectors.inspector_type import InspectorType\nfrom src.python.review.inspectors.issue import BaseIssue, ChildrenNumberIssue, ClassResponseIssue, CodeIssue, \\\n CohesionIssue, \\\n CouplingIssue, InheritanceIssue, IssueType, MethodNumberIssue, WeightedMethodIssue, IssueData\nfrom src.python.review.inspectors.tips import get_child_number_tip, get_class_coupling_tip, get_class_response_tip, \\\n get_cohesion_tip, get_inheritance_depth_tip, get_method_number_tip, get_weighted_method_tip\n\nPATH_TOOLS_SPRINGLINT_FILES = Path(__file__).parent / 'files'\nPATH_SPRINGLINT_JAR = PATH_TOOLS_SPRINGLINT_FILES / 'springlint-0.6.jar'\nSPRINGLINT_OUTPUT_NAME = 'springlint-result.html'\n\nlogger = logging.getLogger(__name__)\n\n\nclass SpringlintInspector(BaseInspector):\n inspector_type = InspectorType.SPRINGLINT\n\n metric_name_to_property = {\n 'dit': 'inheritance_tree_depth',\n 'noc': 'children_number',\n 'wmc': 'weighted_method',\n 'cbo': 'class_objects_coupling',\n 'lcom': 'cohesion_lack',\n 'rfc': 'class_response',\n 'nom': 'method_number'\n }\n\n metric_name_to_description = {\n 'dit': get_inheritance_depth_tip(),\n 'noc': get_child_number_tip(),\n 'wmc': get_weighted_method_tip(),\n 'cbo': get_class_coupling_tip(),\n 'lcom': get_cohesion_tip(),\n 'rfc': get_class_response_tip(),\n 'nom': get_method_number_tip()\n }\n\n metric_name_to_issue_type = {\n 'dit': IssueType.INHERITANCE_DEPTH,\n 'noc': IssueType.CHILDREN_NUMBER,\n 'wmc': IssueType.WEIGHTED_METHOD,\n 'cbo': IssueType.COUPLING,\n 'lcom': IssueType.COHESION,\n 'rfc': IssueType.CLASS_RESPONSE,\n 'nom': IssueType.METHOD_NUMBER\n }\n\n @classmethod\n def _create_command(cls, path: Path, output_path: Path) -> List[str]:\n return [\n 'java', '-jar',\n PATH_SPRINGLINT_JAR,\n '--output', str(output_path),\n '-otype', 'html',\n '--project', str(path)\n ]\n\n def inspect(self, path: Path, config: dict) -> List[BaseIssue]:\n with new_temp_dir() as temp_dir:\n if path.is_file():\n return self._inspect_file(path, temp_dir)\n else:\n return self._inspect_project(path, temp_dir)\n\n @classmethod\n def _inspect_project(cls, path: Path, temp_dir: Path) -> List[BaseIssue]:\n output_path = temp_dir / SPRINGLINT_OUTPUT_NAME\n command = cls._create_command(path, temp_dir)\n run_in_subprocess(command)\n return cls._parse(output_path)\n\n @classmethod\n def _inspect_file(cls, path: Path, temp_dir: Path) -> List[BaseIssue]:\n output_path = temp_dir / SPRINGLINT_OUTPUT_NAME\n copy(str(path), str(temp_dir))\n command = cls._create_command(temp_dir, temp_dir)\n run_in_subprocess(command)\n return cls._parse(output_path, str(path))\n\n @classmethod\n def _parse(cls, output_path: Path, origin_path: str = '') -> List[BaseIssue]:\n if not output_path.is_file():\n logger.error('%s: error - no output file' % cls.inspector_type.value)\n return []\n\n with open(str(output_path)) as out_file:\n file_content = out_file.read()\n issues: List[BaseIssue] = cls._parse_smells(file_content, origin_path)\n issues.extend(cls._parse_metrics(file_content, origin_path))\n return issues\n\n @classmethod\n def _parse_smells(cls, file_content: AnyStr, origin_path: str = '') -> List[BaseIssue]:\n smells_re = re.compile(r'var smells=([^;]*);', re.S)\n smells_string = smells_re.findall(file_content)[0]\n smells = json.JSONDecoder().decode(smells_string)\n\n issues: List[BaseIssue] = []\n for file_smell in smells:\n if origin_path:\n file_path = origin_path\n else:\n file_path = file_smell['file']\n issues.extend([CodeIssue(\n file_path=Path(file_path),\n line_no=1,\n column_no=1,\n origin_class=smell['name'],\n inspector_type=cls.inspector_type,\n type=IssueType.ARCHITECTURE,\n description=smell['description']\n ) for smell in file_smell['smells']])\n\n return issues\n\n @classmethod\n def _parse_metrics(cls, file_content: AnyStr, origin_path: str = '') -> List[BaseIssue]:\n metrics_re = re.compile(r'var classes =([^;]*);', re.S)\n metrics_string = metrics_re.findall(file_content)[0]\n type_metrics_list = json.loads(metrics_string).items()\n\n issues: List[BaseIssue] = []\n for metrics_list in type_metrics_list:\n for metrics in metrics_list[1]:\n for metric_name in metrics:\n if metric_name not in cls.metric_name_to_property:\n continue\n if origin_path:\n file_path = origin_path\n else:\n file_path = metrics['file']\n issues.append(cls._create_issue(metric_name,\n metrics[metric_name],\n Path(file_path)))\n return issues\n\n @classmethod\n def _create_issue(cls, metric_name: str,\n metric_value: int, path: Path) -> Optional[BaseIssue]:\n property_name = cls.metric_name_to_property[metric_name]\n issue_data = cls._get_common_issue_data(path)\n issue_data[property_name] = metric_value\n issue_data['description'] = cls.metric_name_to_description[metric_name]\n issue_data['type'] = cls.metric_name_to_issue_type[metric_name]\n\n if metric_name == 'dit':\n return InheritanceIssue(**issue_data)\n if metric_name == 'noc':\n return ChildrenNumberIssue(**issue_data)\n if metric_name == 'wmc':\n return WeightedMethodIssue(**issue_data)\n if metric_name == 'cbo':\n return CouplingIssue(**issue_data)\n if metric_name == 'lcom':\n return CohesionIssue(**issue_data)\n if metric_name == 'rfc':\n return ClassResponseIssue(**issue_data)\n if metric_name == 'nom':\n return MethodNumberIssue(**issue_data)\n\n return None\n\n @classmethod\n def _get_common_issue_data(cls, file: Path) -> Dict[str, Any]:\n return IssueData.get_base_issue_data_dict(file, cls.inspector_type)\n","sub_path":"src/python/review/inspectors/springlint/springlint.py","file_name":"springlint.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"641239280","text":"\n# encoding=utf-8\n\nfrom .models import Question, Choice\n\n\nQUESTIONS = [\n {\n 'text': u'西西里什么景点最好玩',\n 'choices': [u'海岛', u'山洞', u'卫城', u'小镇']\n }, {\n 'text': u'西西里在哪个国家',\n 'choices': [u'意大利', u'西班牙', u'法国', u'希腊']\n }, {\n 'text': u'西西里是什么气候',\n 'choices': [u'地中海气候', u'海洋性气候', u'季风气候', u'大陆性气候']\n }, {\n 'text': u'西西里和科西嘉是同一个地方么',\n 'choices': [u'一直不是', u'以前是现在不是', u'以前不是现在是', u'一直是']\n }, {\n 'text': u'西西里的有什么好吃的特产',\n 'choices': [u'香肠', u'匹萨', u'汉堡', u'牛肉']\n }, {\n 'text': u'下面哪个是法国著名球星',\n 'choices': [u'里贝里', u'外贝外', u'上贝上', u'下贝下']\n }, {\n 'text': u'下面哪个难题是NP完全问题',\n 'choices': [u'汉密尔顿回路', u'欧拉回路', u'最小生成树', u'单源最短路径']\n }, {\n 'text': u'邓布利多和甘道夫是同一个演员吗',\n 'choices': [u'不一直是', u'一直是', u'一直不是', u'选我肯定是错的']\n }, {\n 'text': u'坎昆和以下哪个城市最近',\n 'choices': [u'佛罗里达', u'纽约', u'莫斯科', u'伦敦']\n }, {\n 'text': u'以下哪支国家队没有出现在2008年欧洲杯中',\n 'choices': [u'英格兰', u'意大利', u'荷兰', u'德国']\n }\n]\n\n\ndef random_questions(city):\n for q in QUESTIONS:\n question = Question.objects.create(city = city,\n text = q['text'])\n choices = [Choice(question = question,\n text = text,\n right = (index == 0))\n for index, text in enumerate(q['choices'])]\n Choice.objects.bulk_create(choices)\n\n\nimport os\ndef insert_questions():\n f = open(os.path.join(os.path.dirname(__file__), 'questions.txt'))\n lines = f.readlines()\n c = 1\n q = 1\n for i in range(0, len(lines), 5):\n margin = 3 if q < 10 else 4\n question = Question.objects.create(city_id = c, text = lines[i][margin:])\n choices = [Choice(question = question, text = lines[i+j], right = False)\n for j in range(1, 5)]\n Choice.objects.bulk_create(choices)\n q += 1\n if q > 10:\n q = 1\n c += 1\n","sub_path":"polls/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"71701451","text":"\"\"\"\nMIT License\n\nCopyright (c) 2018 Edison Neto\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport datetime as dt\nimport sys\n\nimport pandas as pd\n\n\nclass NoaaReport(object):\n \"\"\"Reads noaa report.\n\n Reads the last active region on the file from the previous day,\n and compares to the first one.\n\n Arguments:\n year {str or int} -- The report's year.\n month {str or int} -- The report's month.\n day {str or int} -- The report's day.\n \"\"\"\n\n def __init__(self, year, month, day, path):\n self._year = str(year)\n self._month = str(month)\n self._day = str(day)\n self._path = path\n self._filename = self.__set_filename()\n self._data = []\n self.df = None\n\n def __set_filename(self):\n \"\"\"Creates the file name, given the year, month and day.\n\n Returns:\n {str} -- The name of the file.\n \"\"\"\n\n if len(self._month) == 1:\n self._month = \"0\" + self._month\n if len(self._day) == 1:\n self._day = \"0\" + self._day\n\n filename = self._year + self._month + self._day + \"events.txt\"\n filename = self._path + filename\n return filename\n\n def __check_data(self):\n \"\"\"Checks if the data has already been saved.\n\n Returns:\n {bool} -- True if data has alredy been read.\n \"\"\"\n\n if len(self._data):\n return True\n\n self._read_data()\n\n def _read_data(self):\n \"\"\"Reads the file.\n \"\"\"\n\n with open(self._filename) as _file:\n for line in _file.readlines():\n sep = line.split()\n\n try:\n if (not sep[0].startswith(\":\") and\n not sep[0].startswith(\"#\")):\n self._data.append(sep)\n except IndexError:\n pass\n\n for event in self._data:\n if event[1] == \"+\":\n event[0] += \" +\"\n del event[1]\n\n def set_Qs(self):\n \"\"\"Sets the Q column.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n Qs = []\n for info in self._data:\n if len(info[5]) == 1:\n Qs.append(info[5])\n else:\n Qs.append(None)\n return Qs\n\n def set_observatories(self):\n \"\"\"Set the obs column, and deletes the line (not on the actual file)\n that doesn't contain it.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n index = 0\n observatories = []\n while index < len(self._data):\n if len(self._data[index][4]) == 3:\n observatories.append(self._data[index][4])\n index += 1\n else:\n del self._data[index]\n\n return observatories\n\n def set_particulars(self):\n \"\"\"I don't know how i made this work. But, \"it just works\"\n - Todd Howard.\n\n Returns:\n {list} -- Contains all the particulars and None if there was\n nothing registered at that moment (I guess that never)\n happens.\n \"\"\"\n\n self.__check_data()\n particulars = []\n index = 0\n regs = self.set_regions()\n while index < len(self._data):\n try:\n last_index = len(self._data[index]) - 1\n last_reg = \"\"\n for reg in regs:\n if reg is not None:\n last_reg = reg\n break\n\n # If the last thing in a row is a 4 digit number.\n if (self._data[index][last_index].isdigit()\n and len(self._data[index][last_index]) == 4):\n # If there are more than 10 things in a row.\n if len(self._data[index]) > 10:\n particular = (self._data[index][last_index - 2] + \" \" +\n self._data[index][last_index - 1])\n elif (int(self._data[index][last_index])+25 <= int(last_reg)\n and int(self._data[index][last_index])-25 >= int(last_reg)):\n particular = self._data[index][last_index]\n else:\n particular = self._data[index][last_index - 1]\n else:\n if len(self._data[index]) > 9:\n particular = (self._data[index][last_index - 1] + \" \" +\n self._data[index][last_index])\n else:\n particular = self._data[index][last_index]\n\n particulars.append(particular)\n except IndexError:\n particulars.append(None)\n\n index += 1\n\n return particulars\n\n def set_regions(self, valid_regions_day_before=None):\n \"\"\"Get the regions from the file.\n The region to be valid must be a 4 digit number.\n There's a range of 25 to check if the other number will be a region,\n or not.\n The function gets the active regions from the other day to compare and\n check if the number is truly and active region.\n\n Returns:\n {list} -- A list containing the regions and None if there is no\n region at that time.\n \"\"\"\n\n self.__check_data()\n reg = []\n valid_regions = []\n for info in self._data:\n try:\n last_index = len(info) - 1\n if info[last_index].isdigit() and len(info[last_index]) == 4:\n if len(valid_regions) == 0 and info[last_index] != \"0000\":\n if valid_regions_day_before is not None:\n if (int(info[last_index]) >= int(valid_regions_day_before[-1])-25\n and int(info[last_index]) <= int(valid_regions_day_before[-1])+25):\n reg.append(info[last_index])\n valid_regions.append(info[last_index])\n else:\n reg.append(None)\n else:\n reg.append(info[last_index])\n valid_regions.append(info[last_index])\n elif (int(info[last_index]) >= int(valid_regions[-1]) - 25\n and int(info[last_index]) <= int(valid_regions[-1]) + 25\n and info[last_index] != \"0000\"):\n reg.append(info[last_index])\n valid_regions.append(info[last_index])\n else:\n reg.append(None)\n else:\n reg.append(None)\n except IndexError:\n reg.append(None)\n\n return reg\n\n def set_event(self):\n \"\"\"Sets the event column.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n return [i[0] for i in self._data]\n\n def set_begin(self):\n \"\"\"Sets the begin column.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n return [i[1] for i in self._data]\n\n def set_max(self):\n \"\"\"Sets the max column.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n return [i[2] for i in self._data]\n\n def set_end(self):\n \"\"\"Sets the end column.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n return [i[3] for i in self._data]\n\n def set_type(self):\n \"\"\"Sets the type column.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n return [i[6] for i in self._data]\n\n def set_freq(self):\n \"\"\"Sets the loc/freq column.\n\n Returns:\n {list} -- Contains the value for each line for the column.\n \"\"\"\n\n self.__check_data()\n return [i[7] for i in self._data]\n\n @classmethod\n def get_regions_from_other_day(cls, year, month, day, path):\n \"\"\"Gets all the not None regions from the day before the one\n being read.\n\n Arguments:\n year {str or int} -- The yesr being read.\n month {str or int} -- The month being read.\n day {str or int} -- The day being read.\n path {str} -- File's path.\n\n Returns:\n {list} -- All the not None active regions from the day before.\n \"\"\"\n\n date = dt.date(int(year), int(month), int(day))\n day_before = date - dt.timedelta(days=1)\n\n report = cls(day_before.year, day_before.month, day_before.day, path)\n regs = report.set_regions()\n regs = [x for x in regs if x is not None]\n return regs\n\n def set_final_data(self):\n \"\"\"Stores all the data in a dataframe.\n\n Returns:\n {pd.DataFrame} - A DataFrame with the data.\n \"\"\"\n\n self.__check_data()\n\n regs = NoaaReport.get_regions_from_other_day(self._year, self._month,\n self._day, self._path)\n\n # observatories must be declared first, because it changes the\n # data list.\n final_data = {\n \"obs\": self.set_observatories(),\n \"event\": self.set_event(),\n \"begin\": self.set_begin(),\n \"max\": self.set_max(),\n \"end\": self.set_end(),\n \"Q\": self.set_Qs(),\n \"type\": self.set_type(),\n \"loc/freq\": self.set_freq(),\n \"particulars\": self.set_particulars(),\n \"reg\": self.set_regions(regs)\n }\n\n columns = [\"event\", \"begin\", \"max\",\n \"end\", \"obs\", \"Q\", \"type\",\n \"loc/freq\", \"particulars\", \"reg\"]\n\n self.df = pd.DataFrame(final_data, columns=columns)\n\n return self.df\n\n def get_active_region(self, start_time, end_time):\n \"\"\"Returns registered active region of a certain time range.\n\n Arguments:\n start_time {str} -- event's start time.\n end_time {str} -- event's end time.\n\n Returns:\n {list} -- All the not None active regions.\n \"\"\"\n\n start_time = str(start_time)\n end_time = str(end_time)\n start_time = start_time[11:16].replace(\":\", \"\")\n start_time = dt.timedelta(hours=int(start_time[0:2]),\n minutes=int(start_time[2:]))\n end_time = end_time[11:16].replace(\":\", \"\")\n end_time = dt.timedelta(hours=int(end_time[0:2]),\n minutes=int(end_time[2:]))\n ar = []\n\n for i in range(0, len(self.df)):\n\n if not self.df[\"begin\"][i][0].isnumeric():\n self.df[\"begin\"][i] = self.df[\"begin\"][i][1:]\n if not self.df[\"max\"][i][0].isnumeric():\n self.df[\"max\"][i] = self.df[\"max\"][i][1:]\n if not self.df[\"end\"][i][0].isnumeric():\n self.df[\"end\"][i] = self.df[\"end\"][i][1:]\n\n event_begin = dt.timedelta(hours=int(self.df[\"begin\"][i][0:2]),\n minutes=int(self.df[\"begin\"][i][2:]))\n\n event_end = dt.timedelta(hours=int(self.df[\"end\"][i][0:2]),\n minutes=int(self.df[\"end\"][i][2:]))\n\n eleven_oclock = dt.timedelta(hours=23, minutes=00)\n fifteen_minutes = dt.timedelta(minutes=15)\n if event_begin >= eleven_oclock:\n continue\n\n if event_begin >= start_time and event_end <= end_time + fifteen_minutes:\n print(\"\\nBegin: {}\".format(self.df[\"begin\"][i]))\n print(\"Max: {}\".format(self.df[\"max\"][i]))\n print(\"End: {}\".format(self.df[\"end\"][i]))\n print(\"Type: {}\".format(self.df[\"type\"][i]))\n print(\"Loc/Freq: {}\".format(self.df[\"loc/freq\"][i]))\n print(\"Region: {}\".format(self.df[\"reg\"][i]))\n\n ar.append(self.df[\"reg\"][i])\n\n ar = [x for x in ar if x is not None]\n if len(ar) == 0:\n print(\"No regions identified.\")\n\n return ar\n\n def stuff(self):\n saves = []\n for i in range(0, len(self.df)):\n if (self.df[\"type\"][i] == \"XRA\" and (\n self.df[\"particulars\"][i].startswith(\"M\")\n or self.df[\"particulars\"][i].startswith(\"X\"))):\n if (int(self.df[\"begin\"][i]) < 800\n or int(self.df[\"begin\"][i]) > 1800):\n continue\n saves.append(i)\n\n for sav in saves:\n if sav+5 > len(self.df[\"type\"]):\n df_max = (len(self.df[\"type\"])-1)\n for i in range(sav-5, df_max):\n if self.df[\"type\"][i] == \"RBR\":\n print(\"\\nBegin: {}\".format(self.df[\"begin\"][i]))\n print(\"Freq: {}\".format(self.df[\"loc/freq\"][i]))\n print(\"Particulars: {}\".format(self.df[\"particulars\"][i]))\n print(\"Index: {}\".format(i))\n\n if sav > 5:\n for i in range(sav-5, sav+5):\n if self.df[\"type\"][i] == \"RBR\":\n print(\"\\nBegin: {}\".format(self.df[\"begin\"][i]))\n print(\"Freq: {}\".format(self.df[\"loc/freq\"][i]))\n print(\"Particulars: {}\".format(self.df[\"particulars\"][i]))\n print(\"Index: {}\".format(i))\n else:\n for i in range(0, sav+5):\n if self.df[\"type\"][i] == \"RBR\":\n print(\"\\nBegin: {}\".format(self.df[\"begin\"][i]))\n print(\"Freq: {}\".format(self.df[\"loc/freq\"][i]))\n print(\"Particulars: {}\".format(self.df[\"particulars\"][i]))\n print(\"Index: {}\".format(i))\n\n\nif __name__ == \"__main__\":\n report = NoaaReport(sys.argv[1], sys.argv[2], sys.argv[3],\n sys.argv[4])\n report.set_final_data()\n if len(sys.argv) > 5:\n ars = report.get_active_region(sys.argv[5], sys.argv[6])\n print(ars)\n","sub_path":"noaareport/noaareport.py","file_name":"noaareport.py","file_ext":"py","file_size_in_byte":15616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"130982750","text":"# The MIT License (MIT)\n# \n# Copyright (c) 2015 addfor s.r.l.\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nColor and color palette management helper functions.\n\nThis modules provides some simple functions to help with the management and\nuse of colors and color palettes. Although it was written to be used\nwith Bokeh, it doesn't really have any dependency, and can be used\nanywhere else it could be useful.\n\nFunctions:\n linear_map - map (linearly) a sequence of real values to the given palette\n sample_mpl_cmap - convert a Matplotlib-like colormap to a simple array of colors\n to_rgb_bytes - converts a color expressed as an RGB [0.0, 1.0]-ranged tuple to a RGB bytes (int 0-255) tuple\n to_hex - converts a color expressed as an RGB [0.0, 1.0]-ranged tuple to a hex representation #aabbcc\n\nVariables:\n mpl_cmap_jet - Colormap from Matplotlib: jet (deprecated)\n mpl_cmap_hot - Colormap from Matplotlib: hot\n\n jet_hex, hot_hex, jet_bytes, hot_bytes -\n *_hex: matplotlib colormap converted to hex representation\n *_bytes: matplotlib colormap converted to bytes (int 0-255) tuple\n\"\"\"\n\nmpl_cmap_jet = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1),\n (1, 0.5, 0.5)),\n 'green': ((0., 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1),\n (0.91, 0, 0), (1, 0, 0)),\n 'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1),\n (0.65, 0, 0), (1, 0, 0))}\n\nmpl_cmap_hot = {'red': ((0. , 0.0416, 0.0416),\n (0.365079 , 1.000000, 1.000000),\n (1.0 , 1.0, 1.0)),\n 'green': ((0. , 0., 0.),\n (0.365079 , 0.000000, 0.000000),\n (0.746032 , 1.000000, 1.000000),\n (1.0 , 1.0, 1.0)),\n 'blue': ((0. , 0., 0.),\n (0.746032 , 0.000000, 0.000000),\n (1.0 , 1.0, 1.0))}\n\ndef sample(channel, pos):\n try:\n idx_b = next((idx for idx, it in enumerate(channel) if it[0] >= pos))\n except StopIteration:\n return channel[-1][1]\n\n idx_a = max(0, idx_b - 1)\n if idx_a == idx_b:\n return channel[idx_a][1]\n \n pos_a, val_a, _ = channel[idx_a]\n pos_b, val_b, _ = channel[idx_b]\n dx = (pos - pos_a) / (pos_b - pos_a)\n return val_a + dx * (val_b - val_a)\n\ndef sample_mpl_cmap(cmap, nsamples):\n #channels = map(list, [ cmap['red'], cmap['green'], cmap['blue'] ])\n channels = [list(b) for b in [ cmap['red'], cmap['green'], cmap['blue'] ] ]\n\n\n for chan in channels:\n # Sort stops by position\n chan.sort(key=lambda stop: stop[0])\n\n positions = [ 1.0 / nsamples * i for i in range(nsamples+1) ]\n \n samples = []\n for pos in positions:\n r, g, b = map(lambda chan: sample(chan, pos), channels)\n samples.append((r,g,b))\n \n return samples\n\njet = sample_mpl_cmap(mpl_cmap_jet, 80)\nhot = sample_mpl_cmap(mpl_cmap_hot, 80)\n\ndef to_rgb_bytes(rgb):\n r, g, b = rgb[:3]\n r = int(min(1, r) * 255)\n g = int(min(1, g) * 255)\n b = int(min(1, b) * 255)\n return (r,g,b)\n\njet_rgb = map(to_rgb_bytes, jet)\nhot_rgb = map(to_rgb_bytes, hot)\n\ndef to_hex(rgb):\n return \"#%02x%02x%02x\" % to_rgb_bytes(rgb)\n\njet_hex = map(to_hex, jet)\nhot_hex = map(to_hex, hot)\n\ndef linear_map(xs, palette, low=None, high=None):\n \"\"\"Map (linearly) a sequence of real values to the given palette.\n \n Parameters:\n xs - A list of numbers, in the range [low, high]\n palette - A list of colors\n\n Returns:\n A list of the same size of xs, with the color of each sample\n \"\"\"\n \n if xs == []: return []\n \n if low == None:\n low = min(xs)\n if high == None:\n high = max(xs)\n\n idx = lambda x: int( (float(x) - low)\n / (high - low)\n * (len(palette)-1) )\n clamped = [ max(low, min(high, x)) for x in xs ]\n return [ palette[ idx(x) ] for x in clamped ]\n\n","sub_path":"addutils/palette.py","file_name":"palette.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"547875770","text":"# AsynQueue:\n# Asynchronous task queueing based on the Twisted framework, with task\n# prioritization and a powerful worker/manager interface.\n#\n# Copyright (C) 2006-2007, 2015 by Edwin A. Suominen,\n# http://edsuom.com/AsynQueue\n#\n# See edsuom.com for API documentation as well as information about\n# Ed's background and other projects, software and otherwise.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS\n# IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"\nUnit tests for mcmandelbrot.runner\n\"\"\"\n\nimport png\n\nfrom twisted.internet import defer\n\nfrom mcmandelbrot import main\nfrom mcmandelbrot.test.testbase import TestCase\n\n \nclass TestRun(TestCase):\n verbose = True\n \n @defer.inlineCallbacks\n def test_basic(self):\n N = 100\n filePath = \"image.png\"\n runInfo = yield self.checkProducesFile(\n filePath, main.run,\n \"-o\", filePath, 100, -0.630, 0, 1.4,\n ignoreReactor=True)\n self.assertEqual(runInfo[1], N*N)\n\n","sub_path":"mcmandelbrot/test/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"445818289","text":"from selenium import webdriver\nfrom fixture.session import SessionHelper\n\n\n# Класс-менеджер, который инициализирует всех помощников\nclass Application:\n\n # создание фикстуры, инициализация драйвера\n def __init__(self, browser, base_url):\n if browser == \"firefox\":\n self.wd = webdriver.Firefox(capabilities={\"marionette\": False})\n elif browser == \"chrome\":\n self.wd = webdriver.Chrome()\n elif browser == \"ie\":\n self.wd = webdriver.Ie\n# self.wd.implicitly_wait(60)\n else:\n raise ValueError(\"Unrecognized browser %s\", browser)\n # конструирование помощников, передаем ссылку на саму фикстуру\n self.session = SessionHelper(self)# помощник сесссий получает ссылку на объект класса Application\n self.base_url = base_url\n\n def open_home_page(self): # метод навигации, кандидат на перенос в соответ.помощник\n wd = self.wd\n wd.get(self.base_url)\n\n # метод разрушает фикстуру, останавливает браузер\n def destroy(self):\n self.wd.quit()\n\n # метод проверяет валидность фикстуры\n def is_valid(self):\n try:\n self.wd.current_url\n return True\n except:\n return False\n\n\n","sub_path":"fixture/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"276187426","text":"\nclass readMKP:\n\n def read_MkpProblems():\n problems = dict();\n problems['data'] = []\n problems['capacidad'] = []\n problems['beneficio'] = []\n problems['pesos'] = []\n problems['optimo'] = []\n f = open(\"./mkp_problems.txt\", \"r\")\n lines = f.readlines()\n for line in lines:\n if '#' in line:\n data = lines[lines.index(line)+1].split()\n data = list(map(int, data))\n nmochilas = data[1]\n capacidad = lines[lines.index(line)+2].split()\n capacidad = list(map(float, capacidad))\n beneficio = lines[lines.index(line)+3].split()\n beneficio = list(map(float, beneficio))\n optimo = float(lines[lines.index(line)+4+nmochilas])\n pesos = []\n #print(data)\n problems['data'].append(data)\n #print(capacidad)\n problems['capacidad'].append(capacidad) \n #print(beneficio)\n problems['beneficio'].append(beneficio)\n for knapsack in range(lines.index(line)+4,lines.index(line)+4+nmochilas):\n mochila_pesos = lines[knapsack].split()\n mochila_pesos = list(map(float, mochila_pesos))\n pesos.append(mochila_pesos)\n #print(pesos)\n problems['pesos'].append(pesos)\n #print(optimo)\n problems['optimo'].append(optimo)\n\n f.close()\n return problems\n","sub_path":"repo/readMKP.py","file_name":"readMKP.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"136732847","text":"from django.shortcuts import render_to_response\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nimport forms\nimport django.forms as dforms\nfrom base import models\nfrom base.templatetags import diagram\nfrom browse import view_curation\n\n@user_passes_test(lambda u: u.is_superuser)\ndef home(request):\n \"\"\"Entry point for validate curation\"\"\"\n template_file = \"validate_curation_main.html\"\n return render_to_response(template_file, {}, context_instance=RequestContext(request))\n\ndef view_edit_validate(request):\n \"\"\"View main page for edit/validate curations\"\"\"\n template_file = \"validate_edit_or_validate.html\"\n # get all not validated curations\n curations = models.Curation.objects.\\\n filter(validated_by=None).\\\n order_by('created').all()\n template = {'curations': curations}\n return render_to_response(template_file, template,\n context_instance=RequestContext(request))\n\ndef view_validated_curations(request):\n \"\"\"View page to view validated curations.\"\"\"\n template_file = \"validate_view_curations.html\"\n # get all validated curation objects\n curations = models.Curation.objects.\\\n exclude(validated_by=None).\\\n order_by('created').all()\n template = {'curations': curations}\n return render_to_response(template_file, template,\n context_instance=RequestContext(request))\n\n@user_passes_test(lambda u: u.is_superuser)\ndef edit_curation(request, curation_id):\n return edit_curation.edit_curation(request, curation_id)\n\n\n@user_passes_test(lambda u: u.is_superuser)\ndef validate_curation(request, curation_id):\n \"\"\"Validate curation handler.\"\"\"\n if request.method == 'POST':\n curation = models.Curation.objects.get(pk=request.POST[\"curation_id\"])\n form = forms.EditCurationForm(request.POST, curation=curation)\n \n if form.is_valid():\n validate_form_done(request, form, curation)\n messages.add_message(request, messages.SUCCESS, \"Curation was modified/validated successfully.\")\n return HttpResponseRedirect(reverse(browseapp.view_curation.view_curation, kwargs={'cid': curation.pk}))\n else:\n curation = models.Curation.objects.get(pk=curation_id)\n # initial data preparation functions\n form = validate_get_form(curation)\n template = {'form': form,\n 'curation': curation}\n \n return render(request, \"validate_curation.html\",\n {'form': form, 'curation': curation},\n context_instance=RequestContext(request))\n\n@user_passes_test(lambda u: u.is_superuser)\ndef edit_validated_curation(request, curation_id):\n if request.method == 'POST':\n curation = models.Curation.objects.get(pk=request.POST[\"curation_id\"])\n form = forms.EditCurationForm(request.POST, curation=curation)\n if form.is_valid():\n validate_form_done(request, form, curation)\n messages.add_message(request, messages.SUCCESS, \"Curation was modified/validated successfully.\")\n return HttpResponseRedirect(reverse(browseapp.view_curation.view_curation, kwargs={'cid': curation.pk}))\n else:\n curation = models.Curation.objects.get(pk=curation_id)\n form = validate_get_form(curation)\n # check if any of the sites has been submitted to NCBI.\n ncbi_submitted = False\n csis = curation.curation_siteinstance_set.all()\n if models.NCBISubmission.objects.filter(curation_site_instance__in=csis):\n ncbi_submitted = True\n\n template = {'form': form,\n 'curation': curation,\n 'ncbi_submitted': ncbi_submitted,}\n \n return render(request, \"validate_curation.html\",\n template,\n context_instance=RequestContext(request))\n\n\ndef validate_get_form(curation):\n def get_genome_accession():\n if curation.site_instances.all():\n return curation.site_instances.all()[0].genome.genome_accession\n return \"\"\n def get_used_techniques():\n ts = curation.experimental_techniques.all()\n return [str(t.technique_id) for t in ts]\n def get_external_db():\n try:\n external_db = models.Curation_ExternalDatabase.objects.get(curation=curation)\n except models.Curation_ExternalDatabase.DoesNotExist:\n external_db = None\n return external_db\n\n def populate_site_instances(form):\n for csi in curation.curation_siteinstance_set.all():\n site_instance = csi.site_instance\n seq = csi.site_instance.seq\n strand = '+' if site_instance.strand == 1 else '-'\n loc = '[%d,%d]' % (site_instance.start+1, site_instance.end+1)\n label = pretty_print.site2label(csi.pk, seq+' '+strand+loc)\n help_text = gene_diagram.regulation_diagram(csi.regulation_set.all(), csi.site_instance)\n form.fields[\"site_instance_%d\"%csi.pk] = dforms.BooleanField(label=label,\n help_text=help_text,\n required=False)\n form.fields[\"site_instance_%d\"%csi.pk].initial = True\n \n external_db = get_external_db()\n data = dict(\n # genome/TF initialization\n TF = curation.TF,\n TF_type = curation.TF_type,\n genome_accession = get_genome_accession(),\n TF_accession = curation.TF_instance.protein_accession,\n TF_species = curation.TF_species,\n site_species = curation.site_species,\n # techniques initialization\n techniques = get_used_techniques(),\n experimental_process = curation.experimental_process,\n forms_complex = curation.forms_complex,\n complex_notes = curation.complex_notes,\n external_db_type = (external_db.external_database.ext_database_id\n if external_db else None),\n external_db_accession = (external_db.accession_number\n if external_db else \"\"),\n # curation review initialization\n revision_reasons = curation.requires_revision,\n confidence = curation.confidence,\n paper_complete = curation.publication.curation_complete,\n NCBI_submission_ready = curation.NCBI_submission_ready,\n notes = curation.notes,\n )\n #form = forms.EditCurationForm(data)\n # add sites\n #populate_site_instances(form)\n kwargs = {'curation': curation}\n form = forms.EditCurationForm(data, **kwargs)\n return form\n","sub_path":"curate/validate_curation.py","file_name":"validate_curation.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}
+{"seq_id":"67242663","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nIntakes the sequence or fasta string of a protein using the standard, single letter alphabet,\ngets the orthologs from an external (online) database, the OMA browser. It then generates a\nmultiple sequence alignment (MSA) using T-Coffee, and calculates the conservation score of each\namino acid at each position using Rate4Site, with respect to the entered sequence.\n\nCitations\nOMA database:\nAltenhoff A et al., The OMA orthology database in 2018: retrieving evolutionary relationships among\nall domains of life through richer web and programmatic interfaces Nucleic Acids Research, 2018,\n46 (D1): D477-D485 (doi:10.1093/nar/gkx1019).\n\nT-Coffee:\nT-Coffee: A novel method for multiple sequence alignments.\nNotredame,Higgins,Heringa,JMB,302(205-217)2000\n\nRate4Site:\nMayrose, I., Graur, D., Ben-Tal, N., and Pupko, T. 2004. Comparison of site-specific rate-inference methods:\nBayesian methods are superior. Mol Biol Evol 21: 1781-1791.\n\"\"\"\n\nimport oma\nimport aminoCons\nimport argparse\nimport os\n\nOLD_DIR = os.getcwd()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"sequence\", help=\"Input the sequence of the protein of interest\")\nparser.add_argument(\"--hogs\", action=\"store_true\", help=\"When specified, the Hierarchical Orthologous Group (HOGs) of the sequence is returned\")\nparser.add_argument(\"--name\", default=\"Protein_sequence\", help=\"The name of the protein. All files generated will be based on this name\")\nparser.add_argument(\"--identity\", action=\"store_true\", help=\"The identity of the amino acid (Single letter code) at each position\")\nparser.add_argument(\"--score\", action=\"store_true\", help=\"The conservation scores. lower value = higher conservation\")\nparser.add_argument(\"--qqint\", action=\"store_true\", help=\"QQ-INTERVAL, the confidence interval for the rate estimates. The default interval is 25-75 percentiles\")\nparser.add_argument(\"--std\", action=\"store_true\", help=\"The standard deviation of the posterior rate distribution\")\nparser.add_argument(\"--gapped\", action=\"store_true\", help=\"MSA DATA, the number of aligned sequences having an amino acid (non-gapped) from the overall number of sequences at each position\")\nargs = parser.parse_args()\n\nwith open(args.sequence, 'r') as prot_file:\n prot_seq = prot_file.read()\n\nif args.hogs:\n seq2ortho = oma.OrthologFinder(prot_seq)\n orthologs = seq2ortho.get_HOGs()\nelse:\n seq2ortho = oma.OrthologFinder(prot_seq)\n orthologs = seq2ortho.get_orthologs()\n\nwith open(\"%s.txt\" %(args.name), 'w') as seq_file:\n seq_file.write(orthologs)\n\nalignment = aminoCons.build_alignment(os.getcwd() + os.sep + \"%s.txt\"%(args.name))\n\ncons_dict = aminoCons.Rate4Site(msa= (alignment + os.sep+ \"%s.aln\"%(args.name)), identity=args.identity,\n score=args.score, qqint=args.qqint,std=args.std, gapped=args.gapped)\ncons_dict = cons_dict.run()\n\n\nwith open(\"%s data\"%(args.name), 'w') as mat_file:\n output = str(cons_dict)\n mat_file.write(output)\n","sub_path":"ortho_to_cons_score.py","file_name":"ortho_to_cons_score.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"298545689","text":"from flask import jsonify\n# from lin import route_meta, group_required, login_required\n# from lin.exception import Success\nfrom lin.redprint import Redprint\nfrom lin.util import paginate\n\nfrom app.models.article import Article\n\narticle_api = Redprint('article')\n\n\n@article_api.route('/', methods=['GET'])\ndef get_all():\n start, count = paginate()\n res = Article.get_all(start, count)\n for val in res:\n try:\n val.logo = \"https://qnhszm.obs.cn-north-1.myhuaweicloud.com/images/\" + val.logo.split('/')[-1]\n val.img1 = \"https://qnhszm.obs.cn-north-1.myhuaweicloud.com/images/\" + val.img1.split('/')[-1]\n val.img2 = \"https://qnhszm.obs.cn-north-1.myhuaweicloud.com/images/\" + val.img2.split('/')[-1]\n val.img3 = \"https://qnhszm.obs.cn-north-1.myhuaweicloud.com/images/\" + val.img3.split('/')[-1]\n val.img4 = \"https://qnhszm.obs.cn-north-1.myhuaweicloud.com/images/\" + val.img4.split('/')[-1]\n val.img5 = \"https://qnhszm.obs.cn-north-1.myhuaweicloud.com/images/\" + val.img5.split('/')[-1]\n except:\n continue\n return jsonify(res)\n\n\n@article_api.route('/', methods=['GET'])\ndef get_article(aid):\n res = Article.get_article(aid)\n return jsonify(res)\n","sub_path":"app/api/v1/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"168279473","text":"# Python Tkinter Build A text Editor V - Undo Redo and Horizontal Scrollbar\n# Python Tkinter Build A editor de texto\n\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import font\n\nroot = Tk()\nroot.title('Python Tkinter Build A text Editor V')\nroot.iconbitmap('Python Tkinter Build A text Editor V/icons/document.ico')\nroot.geometry(\"1000x680\")\n\n# Set Variable for opne file name\nglobal open_status_name\nopen_status_name = False\n\nglobal selected\nselected = False\n\n# Create New File Function\ndef new_file():\n # Delete previos text\n my_text.delete(\"1.0\", END)\n # Update status bars\n root.title(\"New File - TextPad!\")\n status_bar.config(text=\"New File \")\n\n global open_status_name\n open_status_name = False\n\n# Open Files\ndef open_file():\n # Delete Precios Text\n my_text.delete(\"1.0\", END)\n\n # Grab Filename\n text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Build A text Editor V/documents/\", title=\"Open File\", filetypes=((\"Text Files\", \"*.txt\"), (\"HTML Files\", \"*.html\"), (\"Python Files\", \"*.py\"), (\"All Files\", \"*.*\")))\n \n # Check to see if there is a file name\n if text_file:\n # Make filename global so we can access it later\n global open_status_name\n open_status_name = text_file\n\n # Updaet status bars\n name = text_file\n status_bar.config(text=f'{name} ')\n name = name.replace(\"C:/Users/brian/Documents/Python-Course/Python Tkinter Build A text Editor V/documents/\", \"\")\n root.title(f'{name} - TextPad!')\n\n # Open the File\n text_file = open(text_file, 'r')\n stuff = text_file.read()\n\n # Close the opened file\n text_file.close()\n\n # Add File textbox\n my_text.insert(END, stuff)\n\n#Save as file\ndef save_as_file():\n text_file = filedialog.asksaveasfilename(defaultextension=\".*\", initialdir=\"C:/Users/brian/Documents/Python-Course/Python Tkinter Build A text Editor V/documents/\", title=\"Save File\", filetypes=((\"Text Files\", \"*.txt\"), (\"HTML Files\", \"*.html\"), (\"Python Files\", \"*.py\"), (\"All Files\", \"*.*\")))\n if text_file:\n # Updates Status Bars\n name = text_file\n status_bar.config(text=f'{name} ')\n name = name.replace(\"C:/Users/brian/Documents/Python-Course/Python Tkinter Build A text Editor V/documents/\", \"\")\n root.title(f'{name} - TextPad!')\n \n # Save File\n text_file = open(text_file, \"w\")\n text_file.write(my_text.get(1.0, END))\n #close the file\n text_file.close()\n\n# Save File\ndef save_file():\n global open_status_name\n if open_status_name:\n # Save File\n text_file = open(open_status_name, \"w\")\n text_file.write(my_text.get(1.0, END))\n #close the file\n text_file.close()\n # put status\n status_bar.config(text=f'{open_status_name} ')\n else:\n save_as_file()\n\n# cut Text\ndef cut_text(e):\n global selected\n \n if e:\n selected = root.clipboard_get()\n\n else:\n if my_text.selection_get():\n selected = my_text.selection_get()\n # Delected selected text from box\n my_text.delete(\"sel.first\", \"sel.last\")\n #clear the clipboard then append\n root.clipboard_clear()\n root.clipboard_append(selected)\n\n# copy Text\ndef copy_text(e):\n global selected\n #CHECK TO SEE IF WE USED KEYBOARD SHORTCUTS\n if e:\n selected = root.clipboard_get()\n\n if my_text.selection_get():\n # Grab selected text from text box\n selected = my_text.selection_get()\n root.clipboard_clear()\n root.clipboard_append(selected)\n\n# paste Text\ndef paste_text(e):\n global selected\n if e:\n selected = root.clipboard_get()\n else:\n if selected:\n position = my_text.index(INSERT)\n my_text.insert(position, selected)\n\n\n# Creare Main Frame\nmy_frame = Frame(root)\nmy_frame.pack(pady=5)\n\n# Create our Scrollbar For the Text Box\ntext_scroll = Scrollbar(my_frame)\ntext_scroll.pack(side=RIGHT, fill=Y)\n\n# Horizontal Scrollbar\nhor_scroll = Scrollbar(my_frame, orient='horizontal')\nhor_scroll.pack(side=BOTTOM, fill=X)\n\n# Create Text Box\nmy_text = Text(my_frame, width=97, height=25, font=(\"Helvetica\", 16), selectbackground=\"#4FDECD\", selectforeground=\"black\", undo=True, yscrollcommand=text_scroll.set, wrap=\"none\", xscrollcommand=hor_scroll.set)\nmy_text.pack()\n\n# Configure our Scroolbar\ntext_scroll.config(command=my_text.yview)\nhor_scroll.config(command=my_text.xview)\n\n# Create Menu\nmy_menu = Menu(root)\nroot.config(menu=my_menu)\n\n#A Add File Menu\nfile_menu = Menu(my_menu, tearoff=False)\nmy_menu.add_cascade(label=\"File\", menu=file_menu)\nfile_menu.add_command(label=\"New\", command=new_file)\nfile_menu.add_command(label=\"Open\", command=open_file)\nfile_menu.add_command(label=\"Save\", command=save_file)\nfile_menu.add_command(label=\"Save As\", command=save_as_file)\nfile_menu.add_separator()\nfile_menu.add_command(label=\"Exit\", command=root.quit)\n\n# Add Edit Menu\nedit_menu = Menu(my_menu, tearoff=False)\nmy_menu.add_cascade(label=\"Edit\", menu=edit_menu)\nedit_menu.add_command(label=\"Cut\", command=lambda: cut_text(False), accelerator=\"(Ctrl+x)\")\nedit_menu.add_command(label=\"Copy\", command=lambda: copy_text(False), accelerator=\"(Ctrl+c)\")\nedit_menu.add_command(label=\"Paste\", command=lambda: paste_text(False), accelerator=\"(Ctrl+v)\")\nedit_menu.add_separator()\nedit_menu.add_command(label=\"Undo\", command=my_text.edit_undo, accelerator=\"(Ctrl+z)\")\nedit_menu.add_command(label=\"Redo\", command=my_text.edit_redo, accelerator=\"(Ctrl+y)\")\n\n# Add Status Bar To Botton of App\nstatus_bar = Label(root, text=\"Ready \", anchor=E)\nstatus_bar.pack(fill=X, side=BOTTOM, ipady=15)\n\n# Edit Bindings\nroot.bind('', cut_text)\nroot.bind('', copy_text)\nroot.bind('', paste_text)\n\nroot.mainloop()\n","sub_path":"Python Tkinter Build A text Editor V/BuildTextEditorV.py","file_name":"BuildTextEditorV.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"394026522","text":"from __future__ import print_function\nimport nltk\nimport random\nimport string\nimport numpy as np\nfrom time import time\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import svm, preprocessing\nfrom nltk import word_tokenize,sent_tokenize\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import movie_reviews\nfrom nltk.tokenize import RegexpTokenizer\n\nfrom nmf_kl import KLdivNMF\nfrom rlx_nmf_kl import RlxKLdivNMF\n\n\n\nrandom.seed(0)\ntoken_dict = {}\n\ni=0\nfor category in movie_reviews.categories():\n\tfor fileid in movie_reviews.fileids(category):\n\t\ttoken_dict[i] = movie_reviews.raw(fileid)\n\t\ti = i+1\n\ndef stem_tokens(tokens, stemmer):\n stemmed = []\n for item in tokens:\n stemmed.append(stemmer.stem(item))\n return stemmed\n\ndef tokenize(text):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(text)\n\t#stems = stem_tokens(tokens, stemmer)\n\treturn tokens\n\ndef print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n print(\"Topic #%d:\" % topic_idx)\n print(\" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]))\n print()\n\n#print token_dict.values()\n\nn_features = 5000\nn_topics = 80\nn_top_words = 20\nmax_iter = 300\n\n\ncountvec = CountVectorizer(tokenizer=tokenize)\nraw_tdmatrix = countvec.fit_transform(token_dict.values())\nraw_vocab = countvec.vocabulary_\nfeature_names = countvec.get_feature_names()\n\nsorted_idx = raw_tdmatrix.sum(0).argsort().tolist()[0]\nvocab_idx = sorted_idx[-5050:-50]\n\nvocab = []\nfor idx in vocab_idx:\n\tvocab.append(feature_names[idx])\n\ncountvec = CountVectorizer(tokenizer=tokenize, vocabulary=vocab)\ntdmatrix = countvec.fit_transform(token_dict.values())\nfeature_names = countvec.get_feature_names()\n\n# Fit the Relaxed NMF model\nprint(\"Fitting the Relaxed NMF model\")\nt0 = time()\nrlx_nmf = RlxKLdivNMF(n_components=n_topics, random_state=1, max_iter=max_iter, init='random', rho=500.0)\nrlx_nmf.fit(tdmatrix)\nprint(\"done in %0.3fs.\" % (time() - t0))\n#print(\"\\nTopics in L2-NMF model:\")\n#print_top_words(rlx_nmf, feature_names, n_top_words)\n\n\n# Fit the L2-NMF model\nprint(\"Fitting the L2-NMF model\")\nt0 = time()\nnmf = NMF(n_components=n_topics, random_state=1, max_iter=max_iter, init='nndsvd', solver='cd')\nnmf.fit(tdmatrix)\nprint(\"done in %0.3fs.\" % (time() - t0))\n#print(\"\\nTopics in L2-NMF model:\")\n#print_top_words(nmf, feature_names, n_top_words)\n\n# Fit the NMF model\nprint(\"Fitting the KL-NMF model\")\nt0 = time()\nkl_nmf = KLdivNMF(n_components=n_topics, random_state=1, max_iter=max_iter, init='nndsvd')\nkl_nmf.fit(tdmatrix)\nprint(\"done in %0.3fs.\" % (time() - t0))\n#print(\"\\nTopics in KL-NMF model:\")\n#print_top_words(nmf, feature_names, n_top_words)\n\n# Fit the LDA model\nt0 = time()\nprint(\"Fitting LDA models\")\nlda = LatentDirichletAllocation(n_topics=n_topics, max_iter=max_iter,\n learning_method='batch', n_jobs=1,\n evaluate_every=5, random_state=0)\nlda.fit(tdmatrix)\nprint(\"done in %0.3fs.\" % (time() - t0))\n\n#print(\"\\nTopics in LDA model:\")\n#print_top_words(lda, feature_names, n_top_words)\n\n\n# extract features\npermute = random.sample(range(2000), 2000)\nlabels = np.ones(2000)\nlabels[1000:] = -1\nraw_rlx_nmf_features = rlx_nmf.transform(tdmatrix)\nraw_nmf_features = nmf.transform(tdmatrix)\nraw_kl_nmf_features = kl_nmf.transform(tdmatrix)\nraw_lda_features = lda.transform(tdmatrix)\n\n# scale the raw features\nmin_max_scaler = preprocessing.MinMaxScaler()\n\nlabels = labels[permute];\nrlx_nmf_features = min_max_scaler.fit_transform(raw_rlx_nmf_features[permute])\nnmf_features = min_max_scaler.fit_transform(raw_nmf_features[permute])\nkl_nmf_features = min_max_scaler.fit_transform(raw_kl_nmf_features[permute])\nlda_features = raw_lda_features[permute]\n\n# train svms on scaled features\nprint(\"10-fold cross-validation acc of Relaxed NMF:\")\nclf = svm.SVC(kernel='linear', C=1)\nscores = cross_val_score(clf, rlx_nmf_features, labels, cv=10)\nprint(np.mean(scores))\n\nprint(\"10-fold cross-validation acc of L2-NMF:\")\nclf = svm.SVC(kernel='linear', C=1)\nscores = cross_val_score(clf, nmf_features, labels, cv=10)\nprint(np.mean(scores))\n\nprint(\"10-fold cross-validation acc of KL-NMF:\")\nclf = svm.SVC(kernel='linear', C=1)\nscores = cross_val_score(clf, kl_nmf_features, labels, cv=10)\nprint(np.mean(scores))\n\nprint(\"10-fold cross-validation acc of LDA:\")\nclf = svm.SVC(kernel='linear', C=1)\nscores = cross_val_score(clf, lda_features, labels, cv=10)\nprint(np.mean(scores))\n\n","sub_path":"non-negative-matrix-factorization/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"454027330","text":"\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or (at your option)\n# any later version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# imports\nimport bpy\nfrom bpy.types import PropertyGroup\nfrom bpy.props import EnumProperty, BoolProperty, StringProperty, IntProperty\n\n###########\n## LISTS ##\n###########\n\n# list\nclass menuList:\n '''\n Contains Lists;\n objects\n modifiers\n constraints\n '''\n # object\n objects = [\n ('ALL', 'All Objects', '', 'OBJECT_DATA', 0),\n ('MESH', 'Mesh', '', 'OUTLINER_OB_MESH', 1),\n ('CURVE', 'Curve', '', 'OUTLINER_OB_CURVE', 2),\n ('SURFACE', 'Surface', '', 'OUTLINER_OB_SURFACE', 3),\n ('META', 'Meta', '', 'OUTLINER_OB_META', 4),\n ('FONT', 'Text', '', 'OUTLINER_OB_FONT', 5),\n ('ARMATURE', 'Armature', '', 'OUTLINER_OB_ARMATURE', 6),\n ('LATTICE', 'Lattice', '', 'OUTLINER_OB_LATTICE', 7),\n ('EMPTY', 'Empty', '', 'OUTLINER_OB_EMPTY', 8),\n ('SPEAKER', 'Speaker', '', 'OUTLINER_OB_SPEAKER', 9),\n ('CAMERA', 'Camera', '', 'OUTLINER_OB_CAMERA', 10),\n ('LAMP', 'Lamp', '', 'OUTLINER_OB_LAMP', 11)\n ]\n\n # constraint\n constraints = [\n ('ALL', 'All Constraints', '', 'CONSTRAINT', 0),\n\n # motion tracking\n ('CAMERA_SOLVER', 'Camera Solver', '', 'CONSTRAINT_DATA', 1),\n ('FOLLOW_TRACK', 'Follow Track', '', 'CONSTRAINT_DATA', 2),\n ('OBJECT_SOLVER', 'Object Solver', '', 'CONSTRAINT_DATA', 3),\n\n # transform\n ('COPY_LOCATION', 'Copy Location', '', 'CONSTRAINT_DATA', 4),\n ('COPY_ROTATION', 'Copy Rotation', '', 'CONSTRAINT_DATA', 5),\n ('COPY_SCALE', 'Copy Scale', '', 'CONSTRAINT_DATA', 6),\n ('COPY_TRANSFORMS', 'Copy Transforms', '', 'CONSTRAINT_DATA', 7),\n ('LIMIT_DISTANCE', 'Limit Distance', '', 'CONSTRAINT_DATA', 8),\n ('LIMIT_LOCATION', 'Limit Location', '', 'CONSTRAINT_DATA', 9),\n ('LIMIT_ROTATION', 'Limit Rotation', '', 'CONSTRAINT_DATA', 10),\n ('LIMIT_SCALE', 'Limit Scale', '', 'CONSTRAINT_DATA', 11),\n ('MAINTAIN_VOLUME', 'Maintain Volume', '', 'CONSTRAINT_DATA', 12),\n ('TRANSFORM', 'Transformation', '', 'CONSTRAINT_DATA', 13),\n\n # tracking\n ('CLAMP_TO', 'Clamp To', '', 'CONSTRAINT_DATA', 14),\n ('DAMPED_TRACK', 'Damped Track', '', 'CONSTRAINT_DATA', 15),\n ('IK', 'Inverse Kinematics', '', 'CONSTRAINT_DATA', 16),\n ('LOCKED_TRACK', 'Locked Track', '', 'CONSTRAINT_DATA', 17),\n ('SPLINE_IK', 'Spline IK', '', 'CONSTRAINT_DATA', 18),\n ('STRETCH_TO', 'Stretch To', '', 'CONSTRAINT_DATA', 19),\n ('TRACK_TO', 'Track To', '', 'CONSTRAINT_DATA', 20),\n\n # relationship\n ('ACTION', 'Action', '', 'CONSTRAINT_DATA', 21),\n ('CHILD_OF', 'Child Of', '', 'CONSTRAINT_DATA', 22),\n ('FLOOR', 'Floor', '', 'CONSTRAINT_DATA', 23),\n ('FOLLOW_PATH', 'Follow Path', '', 'CONSTRAINT_DATA', 24),\n ('PIVOT', 'Pivot', '', 'CONSTRAINT_DATA', 25),\n ('RIGID_BODY_JOINT', 'Rigid Body Joint', '', 'CONSTRAINT_DATA', 26),\n ('SHRINKWRAP', 'Shrinkwrap', '', 'CONSTRAINT_DATA', 27)\n ]\n\n # modifier\n modifiers = [\n ('ALL', 'All Modifiers', '', 'MODIFIER', 0),\n\n # modify\n ('DATA_TRANSFER', 'Data Transfer', '', 'MOD_DATA_TRANSFER', 1),\n ('MESH_CACHE', 'Mesh Cache', '', 'MOD_MESHDEFORM', 2),\n ('NORMAL_EDIT', 'Normal Edit', '', 'MOD_NORMALEDIT', 3),\n ('UV_PROJECT', 'UV Project', '', 'MOD_UVPROJECT', 4),\n ('UV_WARP', 'UV Warp', '', 'MOD_UVPROJECT', 5),\n ('VERTEX_WEIGHT_EDIT', 'Vertex Weight Edit', '', 'MOD_VERTEX_WEIGHT', 6),\n ('VERTEX_WEIGHT_MIX', 'Vertex Weight Mix', '', 'MOD_VERTEX_WEIGHT', 7),\n ('VERTEX_WEIGHT_PROXIMITY', 'Vertex Weight Proximity', '', 'MOD_VERTEX_WEIGHT', 8),\n\n # generate\n ('ARRAY', 'Array', '', 'MOD_ARRAY', 9),\n ('BEVEL', 'Bevel', '', 'MOD_BEVEL', 10),\n ('BOOLEAN', 'Boolean', '', 'MOD_BOOLEAN', 11),\n ('BUILD', 'Build', '', 'MOD_BUILD', 12),\n ('DECIMATE', 'Decimate', '', 'MOD_DECIM', 13),\n ('EDGE_SPLIT', 'Edge Split', '', 'MOD_EDGESPLIT', 14),\n ('MASK', 'Mask', '', 'MOD_MASK', 15),\n ('MIRROR', 'Mirror', '', 'MOD_MIRROR', 16),\n ('MULTIRES', 'Multiresolution', '', 'MOD_MULTIRES', 17),\n ('REMESH', 'Remesh', '', 'MOD_REMESH', 18),\n ('SCREW', 'Screw', '', 'MOD_SCREW', 19),\n ('SKIN', 'Skin', '', 'MOD_SKIN', 20),\n ('SOLIDIFY', 'Solidify', '', 'MOD_SOLIDIFY', 21),\n ('SUBSURF', 'Subdivision Surface', '', 'MOD_SUBSURF', 22),\n ('TRIANGULATE', 'Triangulate', '', 'MOD_TRIANGULATE', 23),\n ('WIREFRAME', 'Wireframe', '', 'MOD_WIREFRAME', 24),\n\n # deform\n ('ARMATURE', 'Armature', '', 'MOD_ARMATURE', 25),\n ('CAST', 'Cast', '', 'MOD_CAST', 26),\n ('CORRECTIVE_SMOOTH', 'Corrective Smooth', '', 'MOD_SMOOTH', 27),\n ('CURVE', 'Curve', '', 'MOD_CURVE', 28),\n ('DISPLACE', 'Displace', '', 'MOD_DISPLACE', 29),\n ('HOOK', 'Hook', '', 'HOOK', 30),\n ('LAPLACIANSMOOTH', 'Laplacian Smooth', '', 'MOD_SMOOTH', 31),\n ('LAPLACIANDEFORM', 'Laplacian Deform', '', 'MOD_MESHDEFORM', 32),\n ('LATTICE', 'Lattice', '', 'MOD_LATTICE', 33),\n ('MESH_DEFORM', 'Mesh Deform', '', 'MOD_MESHDEFORM', 34),\n ('SHRINKWRAP', 'Shrinkwrap', '', 'MOD_SHRINKWRAP', 35),\n ('SIMPLE_DEFORM', 'Simple Deform', '', 'MOD_SIMPLEDEFORM', 36),\n ('SMOOTH', 'Smooth', '', 'MOD_SMOOTH', 37),\n ('WARP', 'Warp', '', 'MOD_WARP', 38),\n ('WAVE', 'Wave', '', 'MOD_WAVE', 39),\n\n # simulate\n ('CLOTH', 'Cloth', '', 'MOD_CLOTH', 40),\n ('COLLISION', 'Collision', '', 'MOD_PHYSICS', 41),\n ('DYNAMIC_PAINT', 'Dynamic Paint', '', 'MOD_DYNAMICPAINT', 42),\n ('EXPLODE', 'Explode', '', 'MOD_EXPLODE', 43),\n ('FLUID_SIMULATION', 'Fluid Simulation', '', 'MOD_FLUIDSIM', 44),\n ('OCEAN', 'Ocean', '', 'MOD_OCEAN', 45),\n ('PARTICLE_INSTANCE', 'Particle Instance', '', 'MOD_PARTICLES', 46),\n ('PARTICLE_SYSTEM', 'Particle System', '', 'MOD_PARTICLES', 47),\n ('SMOKE', 'Smoke', '', 'MOD_SMOKE', 48),\n ('SOFT_BODY', 'Soft Body', '', 'MOD_SOFT', 49)\n ]\n\n#####################\n## PROPERTY GROUPS ##\n#####################\n\n# panel\nclass panel(PropertyGroup):\n '''\n Properties that effect how item panel displays the datablocks within the users current selection.\n '''\n\n # filters\n filters = BoolProperty(\n name = 'Filters',\n description = 'Show options for whether datablock names are displayed.',\n default = False\n )\n\n # options\n options = BoolProperty(\n name = 'Options',\n description = 'Show shortcut options next to datablock names.',\n default = False\n )\n\n # selected\n selected = BoolProperty(\n name = 'Selected',\n description = 'Display all possible object related datablock names within your current selection inside the item panel.',\n default = False\n )\n\n # groups\n groups = BoolProperty(\n name = 'Groups',\n description = 'Display group name.',\n default = False\n )\n\n # action\n action = BoolProperty(\n name = 'Action',\n description = 'Display action name.',\n default = False\n )\n\n # grease pencil\n greasePencil = BoolProperty(\n name = 'Grease Pencil',\n description = 'Display grease pencil and layer names',\n default = False\n )\n\n # constraints\n constraints = BoolProperty(\n name = 'Constraints',\n description = 'Display constraint names.',\n default = False\n )\n\n # modifiers\n modifiers = BoolProperty(\n name = 'Modifiers',\n description = 'Display modifier names.',\n default = False\n )\n\n # bone groups\n boneGroups = BoolProperty(\n name = 'Bone Groups',\n description = 'Display bone group names.',\n default = False\n )\n\n # bone constraints\n boneConstraints = BoolProperty(\n name = 'Bone Constraints',\n description = 'Display bone constraint names.',\n default = False\n )\n\n # vertex groups\n vertexGroups = BoolProperty(\n name = 'Vertex Groups',\n description = 'Display vertex group names.',\n default = False\n )\n\n # shapekeys\n shapekeys = BoolProperty(\n name = 'Shapekeys',\n description = 'Display shapekey names.',\n default = False\n )\n\n # uvs\n uvs = BoolProperty(\n name = 'UV\\'s',\n description = 'Display uv names.',\n default = False\n )\n\n # vertex color\n vertexColors = BoolProperty(\n name = 'Vertex Colors',\n description = 'Display vertex color names.',\n default = False\n )\n\n # materials\n materials = BoolProperty(\n name = 'Materials',\n description = 'Display material names.',\n default = False\n )\n\n # textures\n textures = BoolProperty(\n name = 'Textures.',\n description = 'Display material texture names.',\n default = False\n )\n\n # particle systems\n particleSystems = BoolProperty(\n name = 'Particle Systems',\n description = 'Display the particle system and setting names. (Modifier filter must be active)',\n default = False\n )\n\n # selected bones\n selectedBones = BoolProperty(\n name = 'Selected',\n description = 'Display selected bone names.',\n default = False\n )\n\nclass batch:\n '''\n Contains Classes;\n auto\n name (PropertyGroup)\n copy (PropertyGroup)\n '''\n # auto\n class auto:\n '''\n Contains Classes;\n name (PropertyGroup)\n objects (PropertyGroup)\n constraints (PropertyGroup)\n modifiers (PropertyGroup)\n objectData (PropertyGroup)\n '''\n # options\n class name(PropertyGroup):\n '''\n Main properties that effect how the batch auto name operator is performed.\n '''\n\n # batch type\n batchType = EnumProperty(\n name = 'Batch Type',\n description = '',\n items = [\n ('SELECTED', 'Selected', 'Batch auto name will only effect the object related datablock names within the current selection.'),\n ('OBJECTS', 'All Objects', 'Batch auto name will effect all object related datablock names in the file.')\n ],\n default = 'SELECTED'\n )\n\n # objects\n objects = BoolProperty(\n name = 'Objects',\n description = 'Name objects.',\n default = False\n )\n\n # constraints\n constraints = BoolProperty(\n name = 'Constraints',\n description = 'Name constraints.',\n default = False\n )\n\n # modifiers\n modifiers = BoolProperty(\n name = 'Modifiers',\n description = 'Name modifiers.',\n default = False\n )\n\n # objectData\n objectData = BoolProperty(\n name = 'Object Data',\n description = 'Name object data.',\n default = False\n )\n\n # bone Constraints\n boneConstraints = BoolProperty(\n name = 'Bone Constraints',\n description = 'Name bone constraints.'\n )\n\n # object type\n objectType = EnumProperty(\n name = 'Object Type',\n description = 'Type of objects to be effected.',\n items = menuList.objects,\n default = 'ALL'\n )\n\n # constraint type\n constraintType = EnumProperty(\n name = 'Constraint Type',\n description = 'Type of constraints to be effected.',\n items = menuList.constraints,\n default = 'ALL'\n )\n\n # modifier type\n modifierType = EnumProperty(\n name = 'Modifier Type',\n description = 'Type of modifiers to be effected.',\n items = menuList.modifiers,\n default = 'ALL'\n )\n\n # object\n class objects(PropertyGroup):\n '''\n Properties that effect the names used when auto naming objects.\n '''\n # mesh\n mesh = StringProperty(\n name = 'Mesh',\n description = 'Name used for mesh objects.',\n default = 'Mesh'\n )\n\n # curve\n curve = StringProperty(\n name = 'Curve',\n description = 'Name used for curve objects.',\n default = 'Curve'\n )\n\n # surface\n surface = StringProperty(\n name = 'Surface',\n description = 'Name used for surface objects.',\n default = 'Surface'\n )\n\n # meta\n meta = StringProperty(\n name = 'Meta',\n description = 'Name used for meta objects.',\n default = 'Meta'\n )\n\n # font\n font = StringProperty(\n name = 'Text',\n description = 'Name used for font objects.',\n default = 'Text'\n )\n\n # armature\n armature = StringProperty(\n name = 'Armature',\n description = 'Name used for armature objects.',\n default = 'Armature'\n )\n\n # lattice\n lattice = StringProperty(\n name = 'Lattice',\n description = 'Name used for lattice objects.',\n default = 'Lattice'\n )\n\n # empty\n empty = StringProperty(\n name = 'Empty',\n description = 'Name used for empty objects.',\n default = 'Empty'\n )\n\n # speaker\n speaker = StringProperty(\n name = 'Speaker',\n description = 'Name used for speaker objects.',\n default = 'Speaker'\n )\n\n # camera\n camera = StringProperty(\n name = 'Camera',\n description = 'Name used for camera objects.',\n default = 'Camera'\n )\n\n # lamp\n lamp = StringProperty(\n name = 'Lamp',\n description = 'Name used for lamp objects.',\n default = 'Lamp'\n )\n\n # constraint\n class constraints(PropertyGroup):\n '''\n Properties that effect the names used when auto naming constraints.\n '''\n\n # camera solver\n cameraSolver = StringProperty(\n name = 'Camera Solver',\n description = 'Name used for camera solver constraints.',\n default = 'Camera Solver'\n )\n\n # follow track\n followTrack = StringProperty(\n name = 'Follow Track',\n description = 'Name used for follow track constraints.',\n default = 'Follow Track'\n )\n\n # object solver\n objectSolver = StringProperty(\n name = 'Object Solver',\n description = 'Name used for object solver constraints.',\n default = 'Object Solver'\n )\n\n # copy location\n copyLocation = StringProperty(\n name = 'Copy Location',\n description = 'Name used for copy location constraints.',\n default = 'Copy Location'\n )\n\n # copy rotation\n copyRotation = StringProperty(\n name = 'Copy Rotation',\n description = 'Name used for copy rotation constraints.',\n default = 'Copy Rotation'\n )\n\n # copy scale\n copyScale = StringProperty(\n name = 'Copy Scale',\n description = 'Name used for copy scale constraints.',\n default = 'Copy Scale'\n )\n\n # copy transforms\n copyTransforms = StringProperty(\n name = 'Copy Transforms',\n description = 'Name used for copy transforms constraints.',\n default = 'Copy Transforms'\n )\n\n # limit distance\n limitDistance = StringProperty(\n name = 'Limit Distance',\n description = 'Name used for limit distance constraints.',\n default = 'Limit Distance'\n )\n\n # limit location\n limitLocation = StringProperty(\n name = 'Limit Location',\n description = 'Name used for limit location constraints.',\n default = 'Limit Location'\n )\n\n # limit rotation\n limitRotation = StringProperty(\n name = 'Limit Rotation',\n description = 'Name used for limit rotation constraints.',\n default = 'Limit Rotation'\n )\n\n # limit scale\n limitScale = StringProperty(\n name = 'Limit Scale',\n description = 'Name used for limit scale constraints.',\n default = 'Limit Scale'\n )\n\n # maintain volume\n maintainVolume = StringProperty(\n name = 'Maintain Volume',\n description = 'Name used for maintain volume constraints.',\n default = 'Maintain Volume'\n )\n\n # transform\n transform = StringProperty(\n name = 'Transform',\n description = 'Name used for transform constraints.',\n default = 'Transform'\n )\n\n # clamp to\n clampTo = StringProperty(\n name = 'Clamp To',\n description = 'Name used for clamp to constraints.',\n default = 'Clamp To'\n )\n\n # damped track\n dampedTrack = StringProperty(\n name = 'Damped Track',\n description = 'Name used for damped track constraints.',\n default = 'Damped Track'\n )\n\n # inverse kinematics\n inverseKinematics = StringProperty(\n name = 'Inverse Kinematics',\n description = 'Name used for inverse kinematics constraints.',\n default = 'Inverse Kinematics'\n )\n\n # locked track\n lockedTrack = StringProperty(\n name = 'Locked Track',\n description = 'Name used for locked track constraints.',\n default = 'Locked Track'\n )\n\n # spline inverse kinematics\n splineInverseKinematics = StringProperty(\n name = 'Spline Inverse Kinematics',\n description = 'Name used for spline inverse kinematics constraints.',\n default = 'Spline Inverse Kinematics'\n )\n\n # stretch to\n stretchTo = StringProperty(\n name = 'Stretch To',\n description = 'Name used for stretch to constraints.',\n default = 'Stretch To'\n )\n\n # track to\n trackTo = StringProperty(\n name = 'Track To',\n description = 'Name used for track to constraints.',\n default = 'Track To'\n )\n\n # action\n action = StringProperty(\n name = 'Action',\n description = 'Name used for action constraints.',\n default = 'Action'\n )\n\n # child of\n childOf = StringProperty(\n name = 'Child Of',\n description = 'Name used for child of constraints.',\n default = 'Child Of'\n )\n\n # floor\n floor = StringProperty(\n name = 'Floor',\n description = 'Name used for floor constraints.',\n default = 'Floor'\n )\n\n # follow path\n followPath = StringProperty(\n name = 'Follow Path',\n description = 'Name used for follow path constraints.',\n default = 'Follow Path'\n )\n\n # pivot\n pivot = StringProperty(\n name = 'Pivot',\n description = 'Name used for pivot constraints.',\n default = 'Pivot'\n )\n\n # rigid body joint\n rigidBodyJoint = StringProperty(\n name = 'Rigid Body Joint',\n description = 'Name used for rigid body joint constraints.',\n default = 'Rigid Body Joint'\n )\n\n # shrinkwrap\n shrinkwrap = StringProperty(\n name = 'Shrinkwrap',\n description = 'Name used for shrinkwrap constraints.',\n default = 'Shrinkwrap'\n )\n\n # modifier\n class modifiers(PropertyGroup):\n '''\n Properties that effect the names used when auto naming modifiers.\n '''\n # data transfer\n dataTransfer = StringProperty(\n name = 'Data Transfer',\n description = 'Name used for data transfer modifiers.',\n default = 'Data Transfer'\n )\n\n # mesh cache\n meshCache = StringProperty(\n name = 'Mesh Cache',\n description = 'Name used for mesh cache modifiers.',\n default = 'Mesh Cache'\n )\n\n # normal edit\n normalEdit = StringProperty(\n name = 'Normal Edit',\n description = 'Name used for normal edit modifiers.',\n default = 'Normal Edit'\n )\n\n # uv project\n uvProject = StringProperty(\n name = 'UV Project',\n description = 'Name used for uv project modifiers.',\n default = 'UV Project'\n )\n\n # uv warp\n uvWarp = StringProperty(\n name = 'UV Warp',\n description = 'Name used for uv warp modifiers.',\n default = 'UV Warp'\n )\n\n # vertex weight edit\n vertexWeightEdit = StringProperty(\n name = 'Vertex Weight Edit',\n description = 'Name used for vertex weight edit modifiers.',\n default = 'Vertex Weight Edit'\n )\n\n # vertex weight mix\n vertexWeightMix = StringProperty(\n name = 'Vertex Weight Mix',\n description = 'Name used for vertex weight mix modifiers.',\n default = 'Vertex Weight Mix'\n )\n\n # vertex weight proximity\n vertexWeightProximity = StringProperty(\n name = 'Vertex Weight Proximity',\n description = 'Name used for vertex weight proximity modifiers.',\n default = 'Vertex Weight Proximity'\n )\n\n # array\n array = StringProperty(\n name = 'Array',\n description = 'Name used for array modifiers.',\n default = 'Array'\n )\n\n # bevel\n bevel = StringProperty(\n name = 'Bevel',\n description = 'Name used for bevel modifiers.',\n default = 'Bevel'\n )\n\n # boolean\n boolean = StringProperty(\n name = 'Boolean',\n description = 'Name used for boolean modifiers.',\n default = 'Boolean'\n )\n\n # build\n build = StringProperty(\n name = 'Build',\n description = 'Name used for build modifiers.',\n default = 'Build'\n )\n\n # decimate\n decimate = StringProperty(\n name = 'Decimate',\n description = 'Name used for decimate modifiers.',\n default = 'Decimate'\n )\n\n # edge split\n edgeSplit = StringProperty(\n name = 'Edge Split',\n description = 'Name used for edge split modifiers.',\n default = 'Edge Split'\n )\n\n # mask\n mask = StringProperty(\n name = 'Mask',\n description = 'Name used for mask modifiers.',\n default = 'Mask'\n )\n\n # mirror\n mirror = StringProperty(\n name = 'Mirror',\n description = 'Name used for mirror modifiers.',\n default = 'Mirror'\n )\n\n # multiresolution\n multiresolution = StringProperty(\n name = 'Multiresolution',\n description = 'Name used for multiresolution modifiers.',\n default = 'Multiresolution'\n )\n\n # remesh\n remesh = StringProperty(\n name = 'Remesh',\n description = 'Name used for remesh modifiers.',\n default = 'Remesh'\n )\n\n # screw\n screw = StringProperty(\n name = 'Screw',\n description = 'Name used for screw modifiers.',\n default = 'Screw'\n )\n\n # skin\n skin = StringProperty(\n name = 'Skin',\n description = 'Name used for skin modifiers.',\n default = 'Skin'\n )\n\n # solidify\n solidify = StringProperty(\n name = 'Solidify',\n description = 'Name used for solidify modifiers.',\n default = 'Solidify'\n )\n\n # subdivision surface\n subdivisionSurface = StringProperty(\n name = 'Subdivision Surface',\n description = 'Name used for subdivision surface modifiers.',\n default = 'Subdivision Surface'\n )\n\n # triangulate\n triangulate = StringProperty(\n name = 'Triangulate',\n description = 'Name used for triangulate modifiers.',\n default = 'Triangulate'\n )\n\n # wireframe\n wireframe = StringProperty(\n name = 'Wireframe',\n description = 'Name used for wireframe modifiers.',\n default = 'Wireframe'\n )\n\n # armature\n armature = StringProperty(\n name = 'Armature',\n description = 'Name used for armature modifiers.',\n default = 'Armature'\n )\n\n # cast\n cast = StringProperty(\n name = 'Cast',\n description = 'Name used for cast modifiers.',\n default = 'Cast'\n )\n\n # corrective smooth\n correctiveSmooth = StringProperty(\n name = 'Corrective Smooth',\n description = 'Name used for corrective smooth modifiers.',\n default = 'Corrective Smooth'\n )\n\n # curve\n curve = StringProperty(\n name = 'Curve',\n description = 'Name used for curve modifiers.',\n default = 'Curve'\n )\n\n # displace\n displace = StringProperty(\n name = 'Displace',\n description = 'Name used for displace modifiers.',\n default = 'Displace'\n )\n\n # hook\n hook = StringProperty(\n name = 'Hook',\n description = 'Name used for hook modifiers.',\n default = 'Hook'\n )\n\n # laplacian smooth\n laplacianSmooth = StringProperty(\n name = 'Laplacian Smooth',\n description = 'Name used for laplacian smooth modifiers.',\n default = 'Laplacian Smooth'\n )\n\n # laplacian deform\n laplacianDeform = StringProperty(\n name = 'Laplacian Deform',\n description = 'Name used for laplacian deform modifiers.',\n default = 'Laplacian Deform'\n )\n\n # lattice\n lattice = StringProperty(\n name = 'Lattice',\n description = 'Name used for lattice modifiers.',\n default = 'Lattice'\n )\n\n # mesh deform\n meshDeform = StringProperty(\n name = 'Mesh Deform',\n description = 'Name used for mesh deform modifiers.',\n default = 'Mesh Deform'\n )\n\n # shrinkwrap\n shrinkwrap = StringProperty(\n name = 'Shrinkwrap',\n description = 'Name used for shrinkwrap modifiers.',\n default = 'Shrinkwrap'\n )\n\n # simple deform\n simpleDeform = StringProperty(\n name = 'Simple Deform',\n description = 'Name used for simple deform modifiers.',\n default = 'Simple Deform'\n )\n\n # smooth\n smooth = StringProperty(\n name = 'Smooth',\n description = 'Name used for smooth modifiers.',\n default = 'Smooth'\n )\n\n # warp\n warp = StringProperty(\n name = 'Warp',\n description = 'Name used for warp modifiers.',\n default = 'Warp'\n )\n\n # wave\n wave = StringProperty(\n name = 'Wave',\n description = 'Name used for wave modifiers.',\n default = 'Wave'\n )\n\n # cloth\n cloth = StringProperty(\n name = 'Cloth',\n description = 'Name used for cloth modifiers.',\n default = 'Cloth'\n )\n\n # collision\n collision = StringProperty(\n name = 'Collision',\n description = 'Name used for collision modifiers.',\n default = 'Collision'\n )\n\n # dynamic paint\n dynamicPaint = StringProperty(\n name = 'Dynamic Paint',\n description = 'Name used for dynamic paint modifiers.',\n default = 'Dynamic Paint'\n )\n\n # explode\n explode = StringProperty(\n name = 'Explode',\n description = 'Name used for explode modifiers.',\n default = 'Explode'\n )\n\n # fluid simulation\n fluidSimulation = StringProperty(\n name = 'Fluid Simulation',\n description = 'Name used for fluid simulation modifiers.',\n default = 'Fluid Simulation'\n )\n\n # ocean\n ocean = StringProperty(\n name = 'Ocean',\n description = 'Name used for ocean modifiers.',\n default = 'Ocean'\n )\n\n # particle instance\n particleInstance = StringProperty(\n name = 'Particle Instance',\n description = 'Name used for particle instance modifiers.',\n default = 'Particle Instance'\n )\n\n # particle system\n particleSystem = StringProperty(\n name = 'Particle System',\n description = 'Name used for particle system modifiers.',\n default = 'Particle System'\n )\n\n # smoke\n smoke = StringProperty(\n name = 'Smoke',\n description = 'Name used for smoke modifiers.',\n default = 'Smoke'\n )\n\n # soft body\n softBody = StringProperty(\n name = 'Soft Body',\n description = 'Name used for soft body modifiers.',\n default = 'Soft Body'\n )\n\n # object data\n class objectData(PropertyGroup):\n '''\n Properties that effect the names used when auto naming objects.\n '''\n # mesh\n mesh = StringProperty(\n name = 'Mesh',\n description = 'Name used for mesh objects.',\n default = 'Mesh'\n )\n\n # curve\n curve = StringProperty(\n name = 'Curve',\n description = 'Name used for curve objects.',\n default = 'Curve'\n )\n\n # surface\n surface = StringProperty(\n name = 'Surface',\n description = 'Name used for surface objects.',\n default = 'Surface'\n )\n\n # meta\n meta = StringProperty(\n name = 'Meta',\n description = 'Name used for meta objects.',\n default = 'Meta'\n )\n\n # font\n font = StringProperty(\n name = 'Text',\n description = 'Name used for font objects.',\n default = 'Text'\n )\n\n # armature\n armature = StringProperty(\n name = 'Armature',\n description = 'Name used for armature objects.',\n default = 'Armature'\n )\n\n # lattice\n lattice = StringProperty(\n name = 'Lattice',\n description = 'Name used for lattice objects.',\n default = 'Lattice'\n )\n\n # empty\n empty = StringProperty(\n name = 'Empty',\n description = 'Name used for empty objects.',\n default = 'Empty'\n )\n\n # speaker\n speaker = StringProperty(\n name = 'Speaker',\n description = 'Name used for speaker objects.',\n default = 'Speaker'\n )\n\n # camera\n camera = StringProperty(\n name = 'Camera',\n description = 'Name used for camera objects.',\n default = 'Camera'\n )\n\n # lamp\n lamp = StringProperty(\n name = 'Lamp',\n description = 'Name used for lamp objects.',\n default = 'Lamp'\n )\n\n # name\n class name(PropertyGroup):\n '''\n Properties that effect how the batch name operation is performed.\n '''\n\n # tag\n tag = BoolProperty(\n name = 'Tag',\n description = 'Used by batch name internally. (keep it off)',\n default = False\n )\n\n # batch type\n batchType = EnumProperty(\n name = 'Batch Type',\n description = '',\n items = [\n ('SELECTED', 'Selected', 'Batch name will only effect the object related datablock names within the current selection.'),\n ('OBJECTS', 'All Objects', 'Batch name will effect all object related datablock names in the file.'),\n ('GLOBAL', 'Global', 'Batch name will effect all datablocks in the file. (Disables type filter menus.)')\n ],\n default = 'SELECTED'\n )\n\n # objects\n objects = BoolProperty(\n name = 'Objects',\n description = 'Name objects.',\n default = False\n )\n\n # groups\n groups = BoolProperty(\n name = 'Groups',\n description = 'Name groups.',\n default = False\n )\n\n # actions\n actions = BoolProperty(\n name = 'Actions',\n description = 'Name actions.',\n default = False\n )\n\n # grease pencil\n greasePencil = BoolProperty(\n name = 'Grease Pencil',\n description = 'Name grease pencils and layers.',\n default = False\n )\n\n # constraints\n constraints = BoolProperty(\n name = 'Object Constraints',\n description = 'Name constraints.',\n default = False\n )\n\n # modifiers\n modifiers = BoolProperty(\n name = 'Modifiers',\n description = 'Name modifiers.',\n default = False\n )\n\n # object data\n objectData = BoolProperty(\n name = 'Object Data',\n description = 'Name object data.',\n default = False\n )\n\n # bone groups\n boneGroups = BoolProperty(\n name = 'Bone Groups',\n description = 'Name bone groups.',\n default = False\n )\n\n # bones\n bones = BoolProperty(\n name = 'Bones',\n description = 'Name bones.',\n default = False\n )\n\n # bone constraints\n boneConstraints = BoolProperty(\n name = 'Bone Constraints',\n description = 'Name bone constraints.',\n default = False\n )\n\n # vertex groups\n vertexGroups = BoolProperty(\n name = 'Vertex Groups',\n description = 'Name vertex groups.',\n default = False\n )\n\n # shapekeys\n shapekeys = BoolProperty(\n name = 'Shapekeys',\n description = 'Name shapekeys.',\n default = False\n )\n\n # uvs\n uvs = BoolProperty(\n name = 'UV Maps',\n description = 'Name uv maps.',\n default = False\n )\n\n # vertex colors\n vertexColors = BoolProperty(\n name = 'Vertex Colors',\n description = 'Name vertex colors.',\n default = False\n )\n\n # materials\n materials = BoolProperty(\n name = 'Materials',\n description = 'Name materials.',\n default = False\n )\n\n # textures\n textures = BoolProperty(\n name = 'Textures',\n description = 'Name material textures.',\n default = False\n )\n\n # particle systems\n particleSystems = BoolProperty(\n name = 'Particle Systems',\n description = 'Name particle systems.',\n default = False\n )\n\n # particle settings\n particleSettings = BoolProperty(\n name = 'Particle Settings',\n description = 'Name particle settings.',\n default = False\n )\n\n # object type\n objectType = EnumProperty(\n name = 'Object Type',\n description = 'Type of objects to be effected.',\n items = menuList.objects,\n default = 'ALL'\n )\n\n # constraint type\n constraintType = EnumProperty(\n name = 'Constraint Type',\n description = 'Type of constraints to be effected.',\n items = menuList.constraints,\n default = 'ALL'\n )\n\n # modifier type\n modifierType = EnumProperty(\n name = 'Modifier Type',\n description = 'Type of modifiers to be effected.',\n items = menuList.modifiers,\n default = 'ALL'\n )\n\n # scenes\n scenes = BoolProperty(\n name = 'scenes',\n description = 'Name scenes. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # render layers\n renderLayers = BoolProperty(\n name = 'Render Layers',\n description = 'Name render layers. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # worlds\n worlds = BoolProperty(\n name = 'Worlds',\n description = 'Name worlds. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # libraries\n libraries = BoolProperty(\n name = 'Libraries',\n description = 'Name libraries. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # images\n images = BoolProperty(\n name = 'Images',\n description = 'Name images. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # masks\n masks = BoolProperty(\n name = 'Masks',\n description = 'Name masks. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # sequences\n sequences = BoolProperty(\n name = 'Sequences',\n description = 'Name sequences. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # movie clips\n movieClips = BoolProperty(\n name = 'Movie Clips',\n description = 'Name movie clips. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # sounds\n sounds = BoolProperty(\n name = 'Sounds',\n description = 'Name sounds. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # screens\n screens = BoolProperty(\n name = 'Screens',\n description = 'Name screens. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # keying sets\n keyingSets = BoolProperty(\n name = 'Keying Sets',\n description = 'Name keying sets. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # palettes\n palettes = BoolProperty(\n name = 'Palettes',\n description = 'Name color palettes. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # brushes\n brushes = BoolProperty(\n name = 'Brushes',\n description = 'Name brushes. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # linestyles\n linestyles = BoolProperty(\n name = 'Linestyles',\n description = 'Name linestyles. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # nodes\n nodes = BoolProperty(\n name = 'Nodes',\n description = 'Name nodes. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # node labels\n nodeLabels = BoolProperty(\n name = 'Node Labels',\n description = 'Name node labels. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # node groups\n nodeGroups = BoolProperty(\n name = 'Node Groups',\n description = 'Name node groups. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # texts\n texts = BoolProperty(\n name = 'Texts',\n description = 'Name text documents. (Must use \\'Global\\' batch type option)',\n default = False\n )\n\n # custom name\n customName = StringProperty(\n name = 'Custom Name',\n description = 'Designate a new name.'\n )\n\n # find\n find = StringProperty(\n name = 'Find',\n description = 'Find this string in the datablock name and remove it.'\n )\n\n # regex\n regex = BoolProperty(\n name = 'Regular Expressions',\n description = 'Use regular expressions.',\n default = False\n )\n\n # replace\n replace = StringProperty(\n name = 'Replace',\n description = 'Replace found string with the string entered here.'\n )\n\n # prefix\n prefix = StringProperty(\n name = 'Prefix',\n description = 'Place this string at the beginning of the name.'\n )\n\n # suffix\n suffix = StringProperty(\n name = 'Suffix',\n description = 'Place this string at the end of the name.'\n )\n\n # trim start\n trimStart = IntProperty(\n name = 'Trim Start',\n description = 'Trim the beginning of the name.',\n min = 0,\n max = 50,\n default = 0\n )\n\n # trim end\n trimEnd = IntProperty(\n name = 'Trim End',\n description = 'Trim the ending of the name.',\n min = 0,\n max = 50,\n default = 0\n )\n\n # copy\n class copy(PropertyGroup):\n '''\n Properties that effect how the batch copy name operation is performed.\n '''\n\n # batch type\n batchType = EnumProperty(\n name = 'Batch Type',\n description = '',\n items = [\n ('SELECTED', 'Selected', 'Batch name copy will only effect the object related datablock names within the current selection.'),\n ('OBJECTS', 'All Objects', 'Batch name copy will effect all object related datablock names in the file.')\n ],\n default = 'SELECTED'\n )\n\n # source\n source = EnumProperty(\n name = 'Copy',\n description = 'Type of datablock to copy the name from.',\n items = [\n ('OBJECT', 'Object', 'Use the name from the object.', 'OBJECT_DATA', 0),\n ('DATA', 'Object Data', 'Use the name from the object\\'s data.', 'MESH_DATA', 1),\n ('MATERIAL', 'Material', 'Use the name from the active material of the object.', 'MATERIAL', 2),\n ('TEXTURE', 'Texture', 'Use the name from the active material\\'s active texture of the object.', 'TEXTURE', 3),\n ('PARTICLE_SYSTEM', 'Particle System', 'Use the name from the active particle system of the object.', 'PARTICLES', 4),\n ('PARTICLE_SETTINGS', 'Particle Settings', 'Use the name from the active particle system\\'s settings of the object.', 'MOD_PARTICLES', 5)\n ],\n default = 'OBJECT'\n )\n\n # objects\n objects = BoolProperty(\n name = 'Object',\n description = 'Paste to objects.',\n default = False\n )\n\n # object data\n objectData = BoolProperty(\n name = 'Object Data',\n description = 'Paste to object data.',\n default = False\n )\n\n # materials\n materials = BoolProperty(\n name = 'Material',\n description = 'Paste to materials.',\n default = False\n )\n\n # textures\n textures = BoolProperty(\n name = 'Texture',\n description = 'Paste to textures.',\n default = False\n )\n\n # particle systems\n particleSystems = BoolProperty(\n name = 'Particle System',\n description = 'Paste to particle systems.',\n default = False\n )\n\n # particle settings\n particleSettings = BoolProperty(\n name = 'Particle Settings',\n description = 'Paste to particle settings.',\n default = False\n )\n\n # use active object\n useActiveObject = BoolProperty(\n name = 'Use active object',\n description = 'Use the names available from the active object to paste to the other datablock names.',\n default = False\n )\n","sub_path":"scripts/addons_extern/item_panel/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":40988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"590158359","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 23 16:31:43 2015\n\n@author: adler\n\"\"\"\n\nfrom os.path import isfile as isfile\nfrom os.path import isdir as isdir\nimport sys\nfrom os import popen as system\n\nanimeinfofilelocation='animeInfo.txt'\n\ndef parseInfo(string):\n l=[k.split(': ')[1] for k in string.strip().strip('\\n').strip().split('\\n')]\n l[2]=int(l[2])\n l[3]=int(l[3])\n return l\n\ndef readAllFile(loc):\n f=open(loc,'rt')\n a=f.read()\n f.close()\n return a\n\ndef readList(loc):\n return [a for a in readAllFile(loc).strip('\\n').strip(' ').strip('\\n').split('\\n')]\n\ndef run(cmd):\n return system('bash -c \"'+cmd+'\"').read()\n\ndef cp(f,p):\n run('cp '+f+' '+p)\n\ndef mkdir_ondemand(a):\n if not isdir(a):\n run('mkdir '+'./%s/'%a)\n\ndef main():\n downfoldername='Downloads'\n downfolder='./%s/'%downfoldername\n archfn='archieved'\n sys.stdout.flush()\n cont=readAllFile(animeinfofilelocation)\n infoParsed=parseInfo(cont)\n fnList=readList(infoParsed[7]+'-vid-fns.txt')\n finalLoc=[downfolder+a for a in fnList]\n aafn=archfn+'/'+infoParsed[7]\n adafn=aafn+'/'+downfoldername\n mkdir_ondemand(archfn)\n mkdir_ondemand(aafn)\n mkdir_ondemand(adafn)\n print('Starting file movimentation...')\n sys.stdout.flush()\n cp('animeInfo.txt',aafn)\n mv(infoParsed[7]+'.txt',aafn)\n mv(infoParsed[7]+'-pag-url.txt',aafn)\n mv(infoParsed[7]+'-vid-url.txt',aafn)\n mv(infoParsed[7]+'-vid-fns.txt',aafn)\n [mv(f,adafn) for f in finalLoc]\n print('This script done its duty.')\n sys.stdout.flush()\n pass\n\nif __name__=='__main__':\n main()\n","sub_path":"simpleAnimeFetch_moveToArchieve.py","file_name":"simpleAnimeFetch_moveToArchieve.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"584743605","text":"import os\n\nfrom rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER\nfrom rlbot.agents.base_independent_agent import BaseIndependentAgent\nfrom rlbot.botmanager.helper_process_request import HelperProcessRequest\nfrom rlbot.parsing.custom_config import ConfigObject\n\n\nclass ScratchBot(BaseIndependentAgent):\n\n def __init__(self, name, team, index):\n super().__init__(name, team, index)\n self.port: int = None\n self.spawn_browser: bool = False\n self.sb3file: str = None\n self.headless: bool = False\n\n def get_helper_process_request(self):\n file = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'scratch_manager.py'))\n self.logger.info(self.sb3file)\n key = 'scratch_helper' + (self.sb3file or '') + str(self.port)\n options = {\n 'port': self.port,\n 'spawn_browser': self.spawn_browser,\n 'sb3-file': self.sb3file,\n 'headless': self.headless\n }\n return HelperProcessRequest(file, key, options=options)\n\n def run_independently(self, terminate_request_event):\n pass\n\n def load_config(self, config_header):\n self.port = config_header.getint('port')\n self.spawn_browser = config_header.getint('spawn_browser')\n self.sb3file = config_header.getpath('sb3file')\n self.headless = config_header.getboolean('headless')\n\n @staticmethod\n def create_agent_configurations(config: ConfigObject):\n params = config.get_header(BOT_CONFIG_AGENT_HEADER)\n params.add_value('port', int, default=42008,\n description='Port to use for websocket communication')\n params.add_value('spawn_browser', bool, default=False,\n description='True if we should automatically open google chrome to the scratch page.')\n params.add_value('sb3file', str, default=None,\n description='Location of the scratch .sb3 file to load automatically')\n params.add_value('headless', bool, default=False,\n description='If true, bot will run automatically with no visible web browser')\n","sub_path":"RLBotPack/PacificScienceScratcher/scratch_bot.py","file_name":"scratch_bot.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"452831211","text":"import sys\n\nsys.path.append(\"..\")\n\nimport os\nimport pickle\nimport time\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.keras.models import Sequential, load_model, Model\nfrom evaluate.helper_functions import layerwise_activations\nfrom evaluate.plot import plot_confusion_matrix\nfrom tensorflow.keras.layers import Dropout, Dense, Activation\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras import backend as K\nfrom lstm.lstm_helper_functions import *\nfrom sklearn.metrics import confusion_matrix\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nFORWARD_PATH = '/home/jonas/Desktop/testing/convnet/stateful/5length_64size_1538406857/best_model'\nREVERSE_PATH = '/home/jonas/Desktop/testing/convnet/stateful/5length_64size_1538407083_Reverse/best_model'\n\n# set paramters\nNUM_TIME_STEPS = 5\nBATCH_SIZE = 32\nLSTM_SIZE = 64\nWINDOW_LENGTH = 30\nSOFTMAX_DROPOUT = 0.5\n\nSAVE_PATH = \"/home/jonas/Desktop/testing/convnet\"\n\n# set FEATURE_MODEL to None if no keras model is used\nFEATURE_MODEL = \"/home/jonas/Desktop/testing/convnet/test1/Model.hdf5\"\n# LAYER_NAME can be obtained from calling model.summary()\nLAYER_NAME = \"global_average_pooling1d\"\nCONV_NET = True\n\n# set DBN_MODEL to None if no dbn is used\nDBN_MODEL = None\n# LAYER_INDEX is only relevant for DBN models\nLAYER_INDEX = -1 # -1 for last layer\n\nif FEATURE_MODEL is not None and DBN_MODEL is not None:\n raise AttributeError(\"Keras model and DBN model given, set one or both to None!\")\n\n# get training and validation files\nLOAD_PATH = \"/home/jonas/Desktop/testing/raw_data_30s_intervals/\"\nKEYS = [\"sample\", \"one_hot_label\"]\nDATA_TYPES = [\"float32\", \"int32\"]\n\ntraining_files = []\nvalidation_files = []\n\n# only works if there is now dataset number higher than 50!\nfor file in os.listdir(LOAD_PATH):\n if \"dataset5\" in file:\n validation_files.append(LOAD_PATH + file)\n elif \"dataset10\" in file:\n validation_files.append(LOAD_PATH + file)\n elif \"dataset15\" in file:\n validation_files.append(LOAD_PATH + file)\n elif \"dataset20\" in file:\n validation_files.append(LOAD_PATH + file)\n elif \"dataset25\" in file:\n validation_files.append(LOAD_PATH + file)\n else:\n training_files.append(LOAD_PATH + file)\n\ntraining_files = sorted(training_files)\nvalidation_files = sorted(validation_files)\nprint(\"Num training files: \", len(training_files))\nprint(\"Num validation files: \", len(validation_files))\n\n# extract samples from files\nall_train_samples, all_train_labels, all_val_samples, all_val_labels = extract_samples(training_files,\n validation_files,\n KEYS,\n DATA_TYPES,\n False)\nif CONV_NET and FEATURE_MODEL is not None:\n for d, train_samples in enumerate(all_train_samples):\n all_train_samples[d] = train_samples.reshape([-1, train_samples.shape[-1] // 3, 3])\n\n for d, val_samples in enumerate(all_val_samples):\n all_val_samples[d] = val_samples.reshape([-1, val_samples.shape[-1] // 3, 3])\n\nprint(\"after file reading:\\n___________________\")\nfor s, l in zip(all_train_samples, all_train_labels):\n print(s.shape, l.shape)\n\nprint(\"val files: \")\nfor s, l in zip(all_val_samples, all_val_labels):\n print(s.shape, l.shape)\n\n# get outputs from model if required\nif FEATURE_MODEL is not None:\n model = load_model(FEATURE_MODEL)\n model.summary()\n last_layer_model = Model(inputs=model.input, outputs=model.get_layer(LAYER_NAME).output)\n\n for d, train_samples in enumerate(all_train_samples):\n all_train_samples[d] = last_layer_model.predict(train_samples)\n\n for d, val_samples in enumerate(all_val_samples):\n all_val_samples[d] = last_layer_model.predict(val_samples)\n K.clear_session()\n\nif DBN_MODEL is not None:\n pickle_in = open(DBN_MODEL, \"rb\")\n dbn = pickle.load(pickle_in)\n pickle_in.close()\n\n # calculate layerwise activations\n for d, train_samples in enumerate(all_train_samples):\n all_train_samples[d] = layerwise_activations(dbn, train_samples, num_activations=1)[LAYER_INDEX]\n\n for d, val_samples in enumerate(all_val_samples):\n all_val_samples[d] = layerwise_activations(dbn, val_samples, num_activations=1)[LAYER_INDEX]\n\n# cut all datasets to have a (whole number * BATCH_SIZE * time_steps) samples:\nnum_samples_batch = BATCH_SIZE * NUM_TIME_STEPS\n\nall_train_samples = [train_samples[:train_samples.shape[0] // num_samples_batch * num_samples_batch, :] for\n train_samples in all_train_samples]\n\nall_train_labels = [train_labels[:train_labels.shape[0] // num_samples_batch * num_samples_batch, :] for\n train_labels in all_train_labels]\n\nall_val_samples = [val_samples[:val_samples.shape[0] // num_samples_batch * num_samples_batch, :] for\n val_samples in all_val_samples]\n\nall_val_labels = [val_labels[:val_labels.shape[0] // num_samples_batch * num_samples_batch, :] for\n val_labels in all_val_labels]\n\nprint(\"after cutting of unusable samples:\\n___________________\")\nfor s, l in zip(all_train_samples, all_train_labels):\n print(s.shape, l.shape)\n\nprint(\"val files: \")\nfor s, l in zip(all_val_samples, all_val_labels):\n print(s.shape, l.shape)\n\nnum_x_signals = all_train_samples[0].shape[1]\nnum_labels = all_train_labels[0].shape[1]\n\n# get forward outputs:\n\n# reshape to match shape (num_series, num_time_steps, num_x_signals):\nall_train_samples_forward = [\n np.reshape(train_samples, [train_samples.shape[0] // NUM_TIME_STEPS, NUM_TIME_STEPS, num_x_signals]) for\n train_samples in all_train_samples]\n\nall_val_samples_forward = [\n np.reshape(val_samples, [val_samples.shape[0] // NUM_TIME_STEPS, NUM_TIME_STEPS, num_x_signals]) for\n val_samples in all_val_samples]\n\nfor counter, train_samples in enumerate(all_train_samples_forward):\n batches = make_batches(train_samples, BATCH_SIZE)\n train_samples = np.concatenate(batches)\n all_train_samples_forward[counter] = train_samples\n\nfor counter, val_samples in enumerate(all_val_samples_forward):\n batches = make_batches(val_samples, BATCH_SIZE)\n val_samples = np.concatenate(batches)\n all_val_samples_forward[counter] = val_samples\n\nlength_train_datasets = [d.shape[0] for d in all_train_samples_forward]\nlength_val_datasets = [d.shape[0] for d in all_val_samples_forward]\n\nmodel = load_model(FORWARD_PATH)\nmodel.summary()\nlayer_name = 'lstm'\nlast_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)\n\nall_train_outputs = []\nfor d, length in enumerate(length_train_datasets):\n train_samples = all_train_samples_forward[d]\n num_samples = length // BATCH_SIZE\n train_outputs = []\n for i in range(num_samples):\n x_batch = train_samples[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]\n outputs = last_layer_model.predict_on_batch(x_batch)\n train_outputs.append(outputs)\n train_outputs = np.concatenate(train_outputs, axis=1)\n train_outputs = train_outputs.reshape([-1, LSTM_SIZE])\n last_layer_model.reset_states()\n all_train_outputs.append(train_outputs)\n\nall_val_outputs = []\nfor d, length in enumerate(length_val_datasets):\n val_samples = all_val_samples_forward[d]\n num_samples = length // BATCH_SIZE\n val_outputs = []\n for i in range(num_samples):\n x_batch = val_samples[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]\n outputs = last_layer_model.predict_on_batch(x_batch)\n val_outputs.append(outputs)\n val_outputs = np.concatenate(val_outputs, axis=1)\n val_outputs = val_outputs.reshape([-1, LSTM_SIZE])\n last_layer_model.reset_states()\n all_val_outputs.append(val_outputs)\nK.clear_session()\n\nforward_train_outputs = all_train_outputs\nforward_val_outputs = all_val_outputs\n\n# get reverse outputs:\n\n# flip datasets:\nall_train_samples_reverse = [np.flip(train_samples, axis=0) for train_samples in all_train_samples]\nall_val_samples_reverse = [np.flip(val_samples, axis=0) for val_samples in all_val_samples]\n\n# reshape to match shape (num_series, num_time_steps, num_x_signals):\nall_train_samples_reverse = [\n np.reshape(train_samples, [train_samples.shape[0] // NUM_TIME_STEPS, NUM_TIME_STEPS, num_x_signals]) for\n train_samples in all_train_samples_reverse]\n\nall_val_samples_reverse = [\n np.reshape(val_samples, [val_samples.shape[0] // NUM_TIME_STEPS, NUM_TIME_STEPS, num_x_signals]) for\n val_samples in all_val_samples_reverse]\n\nfor counter, train_samples in enumerate(all_train_samples_reverse):\n batches = make_batches(train_samples, BATCH_SIZE)\n train_samples = np.concatenate(batches)\n all_train_samples_reverse[counter] = train_samples\n\nfor counter, val_samples in enumerate(all_val_samples_reverse):\n batches = make_batches(val_samples, BATCH_SIZE)\n val_samples = np.concatenate(batches)\n all_val_samples_reverse[counter] = val_samples\n\nlength_train_datasets = [d.shape[0] for d in all_train_samples_reverse]\nlength_val_datasets = [d.shape[0] for d in all_val_samples_reverse]\n\nmodel = load_model(REVERSE_PATH)\nmodel.summary()\nlayer_name = 'lstm'\nlast_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)\n\nall_train_outputs = []\nfor d, length in enumerate(length_train_datasets):\n train_samples = all_train_samples_reverse[d]\n num_samples = length // BATCH_SIZE\n train_outputs = []\n for i in range(num_samples):\n x_batch = train_samples[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]\n outputs = last_layer_model.predict_on_batch(x_batch)\n train_outputs.append(outputs)\n train_outputs = np.concatenate(train_outputs, axis=1)\n train_outputs = train_outputs.reshape([-1, LSTM_SIZE])\n last_layer_model.reset_states()\n all_train_outputs.append(train_outputs)\n\nall_val_outputs = []\nfor d, length in enumerate(length_val_datasets):\n val_samples = all_val_samples_reverse[d]\n num_samples = length // BATCH_SIZE\n val_outputs = []\n for i in range(num_samples):\n x_batch = val_samples[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]\n outputs = last_layer_model.predict_on_batch(x_batch)\n val_outputs.append(outputs)\n val_outputs = np.concatenate(val_outputs, axis=1)\n val_outputs = val_outputs.reshape([-1, LSTM_SIZE])\n last_layer_model.reset_states()\n all_val_outputs.append(val_outputs)\nK.clear_session()\n\n# reflip\nreverse_train_outputs = [np.flip(train_samples, axis=0) for train_samples in all_train_outputs]\nreverse_val_outputs = [np.flip(val_samples, axis=0) for val_samples in all_val_outputs]\n\n# train combined softmax layer:\n\n# combine outputs\nall_train_outputs_combined = [np.concatenate([x, y], axis=1) for x, y in\n zip(forward_train_outputs, reverse_train_outputs)]\nfor train_outputs in all_train_outputs_combined:\n print(train_outputs.shape)\n\nall_val_outputs_combined = [np.concatenate([x, y], axis=1) for x, y in zip(forward_val_outputs, reverse_val_outputs)]\nfor val_outputs in all_val_outputs_combined:\n print(val_outputs.shape)\n\nmodel = Sequential()\nmodel.add(Dropout(SOFTMAX_DROPOUT))\nmodel.add(Dense(num_labels, input_shape=(2 * LSTM_SIZE,)))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\nmodel.fit(x=np.concatenate(all_train_outputs_combined, axis=0),\n y=np.concatenate(all_train_labels, axis=0),\n epochs=10,\n batch_size=64,\n validation_data=(np.concatenate(all_val_outputs_combined, axis=0), np.concatenate(all_val_labels, axis=0)),\n verbose=1, callbacks=[EarlyStopping()])\n\nall_outputs = []\nfor d, val_samples in enumerate(all_val_outputs_combined):\n all_outputs.append(model.predict(val_samples))\n print(f'Shape outputs dataset {d}', all_outputs[d].shape)\nK.clear_session()\n\nall_output_classes_reduced, all_true_classes_reduced = get_accuracies_and_plot_labels(all_outputs,\n all_val_labels,\n time_window_length=WINDOW_LENGTH,\n save_path=SAVE_PATH)\n\ncm = confusion_matrix(np.concatenate(all_true_classes_reduced), np.concatenate(all_output_classes_reduced))\nnp.set_printoptions(precision=2)\n\n# Plot normalized confusion matrix\nclass_names = ['awake', 'N1', 'N2', 'N3', 'REM']\nplt.figure(figsize=(5.79, 5.79))\nplot_confusion_matrix(cm, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\nif SAVE_PATH is not None:\n plt.savefig(os.path.join(SAVE_PATH, \"confusion_matrix.pdf\"), format='pdf', bbox_inches='tight')\nplt.show()\n","sub_path":"lstm/combine_forward_and_reverse.py","file_name":"combine_forward_and_reverse.py","file_ext":"py","file_size_in_byte":13000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"562965356","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\nprocess = cms.Process(\"FakeSyncExercise\")\noptions = VarParsing.VarParsing ('analysis')\n\n#set default arguments\noptions.inputFiles= 'file:/nfs-3/userdata/jgran/qcd_mu_file1.root, file:/nfs-3/userdata/jgran/qcd_mu_file2.root'\noptions.maxEvents = -1 # -1 means all events\n#options.maxEvents = 100 \n\n# get and parse the command line arguments\noptions.parseArguments()\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxEvents) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(options.inputFiles),\n)\n\nprocess.out = cms.OutputModule(\n \"PoolOutputModule\",\n fileName = cms.untracked.string('ntuple.root'),\n)\n\nprocess.outpath = cms.EndPath(process.out)\nprocess.out.outputCommands = cms.untracked.vstring( 'drop *' )\nprocess.out.outputCommands.extend(cms.untracked.vstring('keep *_*fakeSync*_*_*'))\n\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load(\"Configuration.Geometry.GeometryIdeal_cff\")\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.GlobalTag.globaltag = \"START53_V7::All\"\n\nprocess.load(\"SUSYFakes.Base.bTaggingSequence_cfi\")\nprocess.load(\"SUSYFakes.Base.fakeSync_cfi\")\n\nprocess.p = cms.Path(process.CSVSequence*process.fakeSync)\n","sub_path":"Base/python/run_cfg.py","file_name":"run_cfg.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"370117275","text":"import re\nfrom Metrics.BaseMetric import BaseMetric\n\n\nclass JilbaMetric(BaseMetric):\n\n __ifs = []\n __fores = []\n __whiles = []\n __operators = 0\n __max_depth = 0\n\n def __init__(self, source_code, operators):\n self.__operators = [operator.rstrip('\\n') for operator in operators if operator != '\\n']\n super().__init__(source_code)\n\n def __find_if_count(self):\n self.__ifs += re.findall(r'if ', self.source_code)\n #self.__ifs += re.findall(r'else', ''.join(self.__source_code).rstrip('\\n'))\n\n def __find_while_count(self):\n self.__whiles += re.findall(r'while', self.source_code)\n\n def __find_for_count(self):\n self.__fores += re.findall(r'for', self.source_code)\n\n def __get_all_operators_count(self):\n words = ''.join(self.source_code.split(' '))\n self.__using_operators = [word for word in words if word in self.__operators]\n\n @classmethod\n def get_first_spaces_count(cls, line):\n spaces_count = 0\n for symbol in line:\n if symbol == ' ':\n spaces_count += 1\n else:\n return spaces_count\n\n def __get_max_depth(self):\n all_depths = []\n for line in self.source_code:\n if re.findall(r'if', line) or re.findall(r'else', line) or re.findall(r'while', line) \\\n or re.findall(r'for', line):\n all_depths.append(JilbaMetric.get_first_spaces_count(line))\n\n all_depths = list(filter(lambda element: element != 0, all_depths))\n all_depths = list(filter(lambda element: element / 2, all_depths))\n print(all_depths)\n if len(all_depths) == 0:\n self.__max_depth = 0\n return\n max_spaces_count = max(all_depths)\n min_spaces_count = min(all_depths)\n print('max = {}, min = {}'.format(max_spaces_count, min_spaces_count))\n self.__max_depth = max_spaces_count/min_spaces_count\n\n def __prepare_result(self):\n return {\n 'if count: ': self.__ifs,\n #'while count: ': len(self.__whiles),\n #'for count: ': len(self.__fores),\n #'all_operators: ': self.__using_operators,\n #'all_operators_count: ': len(self.__using_operators),\n 'CL: ': (len(self.__ifs) + len(self.__fores) + len(self.__whiles)),\n 'cl: ': (len(self.__ifs) + len(self.__fores) + len(self.__whiles)) / len(self.__using_operators),\n 'max_depth: ': self.__max_depth\n }\n\n def prepare_to_metric(self):\n self.__find_for_count()\n self.__find_if_count()\n self.__find_while_count()\n self.__get_all_operators_count()\n self.__get_max_depth()\n return self.__prepare_result()\n\n def get_metric(self):\n return self.prepare_to_metric()\n","sub_path":"Metrics/JilbaMetric.py","file_name":"JilbaMetric.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"218619635","text":"import unittest\nimport json\nfrom data.environment import Environment\n\n\nclass Test_Environment(unittest.TestCase):\n\n def setUp(self):\n self.env = Environment()\n\n #def tearDown(self):\n #self.widget.dispose()\n\n def test_create_environment(self): \n \n self.assertNotEqual(self.env, None)\n\n def test_get_data(self): \n with open('config.json') as f:\n config=json.load(f)\n start_date = config['session']['start_date']\n end_date = config['session']['end_date']\n codes_num = config['session']['codes']\n market = config['session']['market_types']\n features = config['session']['features']\n train_start_date, train_end_date, test_start_date, test_end_date, codes = self.env.get_repo(start_date, end_date,\n codes_num, market)\n window_length=10\n self.env.get_data(train_start_date, train_end_date, features, window_length, market, codes) \n self.assertTrue(len(self.env.states)>0) # states has shape (1,6,10,2) 1,codes_num+1,window_length, features\n self.assertTrue(len(self.env.price_history)>0) #price_history has shape (6,1) codes_num + 1 ; \n #First element in price_history is always 1, means cash\n #print (self.env.states[0].shape)\n #print (self.env.price_history[0].shape)\n #print (self.env.price_history[0])\n\n def test_get_repo(self):\n with open('config.json') as f:\n config=json.load(f)\n start_date = config['session']['start_date']\n end_date = config['session']['end_date']\n codes_num = config['session']['codes']\n market = config['session']['market_types']\n self.train_start_date, self.train_end_date, test_start_date, test_end_date, self.codes = self.env.get_repo(start_date, end_date,\n codes_num, market)\n self.assertTrue(len(self.env.data)>0)\n self.assertTrue(len(self.env.date_set)>0)\n\n # step requires get_data to have been called first to fill the environment.\n def test_step(self):\n self.test_get_data()\n self.env.reset()\n noise_flag = False\n info = self.env.step(None,None,noise_flag)\n # dict_keys(['reward', 'continue', 'next state', 'weight vector', 'price', 'risk'])\n #print (info.keys())\n #print (info['reward']) # Reward is an integer\n #print (info['continue']) # continue is True/False\n #print (info['next state'].shape) # Shape for next state is (1,6,10,2)\n #print (info['weight vector'].shape) # Shape for weight vector is (1,6)\n #print (info['risk']) #Risk is an integer\n #print (info['price'].shape) #Shape for price is 6,1)\n self.assertEqual(len(info.keys(),6))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/test_environment.py","file_name":"test_environment.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}
+{"seq_id":"631320663","text":"# =============================================================================\n# pre_migrate.py - plugin for preparing for migrating classic XR to eXR/fleXR\n#\n# Copyright (c) 2013, Cisco Systems\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# =============================================================================\n\nimport csv\nimport os\nimport re\nimport subprocess\n\nimport pexpect\n\nfrom csmpe.plugins import CSMPlugin\nfrom csmpe.core_plugins.csm_install_operations.utils import ServerType, is_empty, concatenate_dirs\nfrom simple_server_helper import TFTPServer, FTPServer, SFTPServer\nfrom hardware_audit import Plugin as HardwareAuditPlugin\nfrom migration_lib import log_and_post_status, compare_version_numbers\nfrom csmpe.core_plugins.csm_get_inventory.ios_xr.plugin import get_package, get_inventory\n\nMINIMUM_RELEASE_VERSION_FOR_MIGRATION = \"6.1.3\"\n\nNOX_FOR_MAC = \"nox-mac64.bin\"\nNOX_64_BINARY = \"nox-linux-64.bin\"\n\nTIMEOUT_FOR_COPY_CONFIG = 36000\nTIMEOUT_FOR_COPY_IMAGE = 36000\nTIMEOUT_FOR_FPD_UPGRADE = 36000\n\nIMAGE_LOCATION = \"harddisk:/\"\nCONFIG_LOCATION = \"harddiskb:/\"\n\nXR_CONFIG_IN_CSM = \"xr.cfg\"\nADMIN_CONFIG_IN_CSM = \"admin.cfg\"\n\nCONVERTED_XR_CONFIG_IN_CSM = \"xr.iox\"\nCONVERTED_ADMIN_CAL_CONFIG_IN_CSM = \"admin.cal\"\nCONVERTED_ADMIN_XR_CONFIG_IN_CSM = \"admin.iox\"\n\nFINAL_CAL_CONFIG = \"cXR_admin_plane_converted_eXR.cfg\"\nFINAL_XR_CONFIG = \"cXR_xr_plane_converted_eXR.cfg\"\n\nCRYPTO_KEY_FILENAME = \"crypto_auto_key_gen.txt\"\n\n# XR_CONFIG_ON_DEVICE = \"iosxr.cfg\"\n# ADMIN_CAL_CONFIG_ON_DEVICE = \"admin_calvados.cfg\"\n# ADMIN_XR_CONFIG_ON_DEVICE = \"admin_iosxr.cfg\"\n\n\nclass Plugin(CSMPlugin):\n \"\"\"\n A plugin for preparing device for migration from\n ASR9K IOS-XR (a.k.a. XR) to ASR9K IOS-XR 64 bit (a.k.a. eXR)\n\n This plugin does the following:\n 1. Check several pre-requisites\n 2. Resize the eUSB partition(/harddiskb:/ on XR)\n 3. Migrate the configurations with NoX and upload them to device\n 4. Copy the eXR image to /harddiskb:/\n 5. Upgrade some FPD's if needed.\n\n Console access is needed.\n \"\"\"\n name = \"Pre-Migrate Plugin\"\n platforms = {'ASR9K'}\n phases = {'Pre-Migrate'}\n os = {'XR'}\n\n node_pattern = re.compile(r\"^\\d+(/\\w+)+$\")\n repo_ip_search_pattern = re.compile(r\"[/@](\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})(;.*?)?/\")\n\n def _save_show_platform(self):\n \"\"\"Save the output of 'show platform' to session log\"\"\"\n\n cmd = \"show platform\"\n # show platform can take more than 1 minute after router reload. Issue No. 47\n output = self.ctx.send(cmd, timeout=600)\n file_name = self.ctx.save_to_file(cmd, output)\n if file_name is None:\n self.ctx.warning(\"Unable to save '{}' output to file: {}\".format(cmd, file_name))\n\n def _ping_repository_check(self, repo_url):\n \"\"\"Test ping server repository ip from device\"\"\"\n repo_ip = re.search(self.repo_ip_search_pattern, repo_url)\n if not repo_ip:\n self.ctx.error(\"Bad hostname for server repository. Please check the settings in CSM.\")\n\n if not repo_ip.group(2):\n vrf = ''\n else:\n vrf = repo_ip.group(2)[1:]\n\n if vrf:\n output = self.ctx.send(\"ping vrf {} {}\".format(vrf, repo_ip.group(1)))\n else:\n output = self.ctx.send(\"ping {}\".format(repo_ip.group(1)))\n\n if \"100 percent\" not in output:\n self.ctx.error(\"Failed to ping server repository {} on device.\".format(repo_ip.group(1)) +\n \"Please check session.log.\")\n\n def _all_configs_supported(self, nox_output):\n \"\"\"Check text output from running NoX on system. Only return True if all configs are supported by eXR.\"\"\"\n pattern = r\"Filename[\\sA-Za-z\\n]*[-\\s]*\\S*\\s+\\d*\\s+\\d*\\(\\s*\\d*%\\)\\s+\\d*\\(\\s*\\d*%\\)\\s+\\d*\\(\\s*\\d*%\\)\\s+(\\d*)\"\n match = re.search(pattern, nox_output)\n\n if match:\n if match.group(1) != \"0\":\n return False\n\n return True\n\n def _upload_files_to_server_repository(self, sourcefiles, server, destfilenames):\n \"\"\"\n Upload files from their locations in the host linux system to the FTP/TFTP/SFTP server repository.\n\n Arguments:\n :param sourcefiles: a list of string file paths that each points to a file on the system where CSM is hosted.\n The paths are all relative to csm/csmserver/.\n For example, if the source file is in csm_data/migration/filename,\n sourcefiles = [\"../../csm_data/migration/filename\"]\n :param server: the associated server repository object stored in CSM database\n :param destfilenames: a list of string filenames that the source files should be named after being copied to\n the designated directory in the server repository. i.e., [\"thenewfilename\"]\n :return: True if no error occurred.\n \"\"\"\n\n server_type = server.server_type\n selected_server_directory = self.ctx._csm.install_job.server_directory\n if server_type == ServerType.TFTP_SERVER:\n tftp_server = TFTPServer(server)\n for x in range(0, len(sourcefiles)):\n log_and_post_status(self.ctx, \"Copying file {} to {}/{}/{}.\".format(sourcefiles[x],\n server.server_directory,\n selected_server_directory,\n destfilenames[x]))\n try:\n tftp_server.upload_file(sourcefiles[x], destfilenames[x],\n sub_directory=selected_server_directory)\n except:\n self.ctx.error(\"Exception was thrown while \" +\n \"copying file {} to {}/{}/{}.\".format(sourcefiles[x],\n server.server_directory,\n selected_server_directory,\n destfilenames[x]))\n\n elif server_type == ServerType.FTP_SERVER:\n ftp_server = FTPServer(server)\n for x in range(0, len(sourcefiles)):\n log_and_post_status(self.ctx, \"Copying file {} to {}/{}/{}.\".format(sourcefiles[x],\n server.server_directory,\n selected_server_directory,\n destfilenames[x]))\n try:\n ftp_server.upload_file(sourcefiles[x], destfilenames[x],\n sub_directory=selected_server_directory)\n except:\n self.ctx.error(\"Exception was thrown while \" +\n \"copying file {} to {}/{}/{}.\".format(sourcefiles[x],\n server.server_directory,\n selected_server_directory,\n destfilenames[x]))\n elif server_type == ServerType.SFTP_SERVER:\n sftp_server = SFTPServer(server)\n for x in range(0, len(sourcefiles)):\n log_and_post_status(self.ctx, \"Copying file {} to {}/{}/{}.\".format(sourcefiles[x],\n server.server_directory,\n selected_server_directory,\n destfilenames[x]))\n try:\n sftp_server.upload_file(sourcefiles[x], destfilenames[x],\n sub_directory=selected_server_directory)\n except:\n self.ctx.error(\"Exception was thrown while \" +\n \"copying file {} to {}/{}/{}.\".format(sourcefiles[x],\n server.server_directory,\n selected_server_directory,\n destfilenames[x]))\n else:\n self.ctx.error(\"Pre-Migrate does not support {} server repository.\".format(server_type))\n\n return True\n\n def _copy_files_to_device(self, server, repository, source_filenames, dest_files, timeout=3600):\n \"\"\"\n Copy files from their locations in the user selected server directory in the FTP/TFTP/SFTP server repository\n to locations on device.\n\n Arguments:\n :param server: the server object fetched from database\n :param repository: the string url link that points to the location of files in the SFTP server repository\n :param source_filenames: a list of string filenames in the designated directory in the server repository.\n :param dest_files: a list of string file paths that each points to a file to be created on device.\n i.e., [\"harddiskb:/asr9k-mini-x64.tar\"]\n :param timeout: the timeout for the sftp copy operation on device. The default is 10 minutes.\n :return: None if no error occurred.\n \"\"\"\n\n if server.server_type == ServerType.FTP_SERVER or server.server_type == ServerType.TFTP_SERVER:\n self._copy_files_from_ftp_tftp_to_device(repository, source_filenames, dest_files, timeout=timeout)\n\n elif server.server_type == ServerType.SFTP_SERVER:\n self._copy_files_from_sftp_to_device(server, source_filenames, dest_files, timeout=timeout)\n\n else:\n self.ctx.error(\"Pre-Migrate does not support {} server repository.\".format(server.server_type))\n\n def _copy_files_from_ftp_tftp_to_device(self, repository, source_filenames, dest_files, timeout=3600):\n \"\"\"\n Copy files from their locations in the user selected server directory in the FTP or TFTP server repository\n to locations on device.\n\n Arguments:\n :param repository: the string url link that points to the location of files in the FTP/TFTP server repository,\n with no extra '/' in the end. i.e., tftp://223.255.254.245/tftpboot\n :param source_filenames: a list of string filenames in the designated directory in the server repository.\n :param dest_files: a list of string file paths that each points to a file to be created on device.\n i.e., [\"harddiskb:/asr9k-mini-x64.tar\"]\n :param timeout: the timeout for the 'copy' CLI command on device. The default is 10 minutes.\n :return: None if no error occurred.\n \"\"\"\n\n def send_repo_ip(ctx):\n repo_ip = re.search(self.repo_ip_search_pattern, repository)\n ctx.ctrl.sendline(repo_ip.group(1))\n return True\n\n def send_newline(ctx):\n ctx.ctrl.sendline()\n return True\n\n def error(ctx):\n ctx.message = \"Error copying file.\"\n return False\n\n for x in range(0, len(source_filenames)):\n\n command = \"copy {}/{} {}\".format(repository, source_filenames[x], dest_files[x])\n\n CONFIRM_HOST = re.compile(r\"Address or name of remote host\")\n CONFIRM_FILENAME = re.compile(r\"Destination filename.*\\?\")\n CONFIRM_OVERWRITE = re.compile(r\"Copy : Destination exists, overwrite \\?\\[confirm\\]\")\n COPIED = re.compile(r\".+bytes copied in.+ sec\")\n COPYING = re.compile(r\"C\" * 50)\n NO_SUCH_FILE = re.compile(r\"%Error copying.*\\(Error opening source file\\): No such file or directory\")\n ERROR_COPYING = re.compile(r\"%Error copying\")\n\n PROMPT = self.ctx.prompt\n TIMEOUT = self.ctx.TIMEOUT\n\n events = [PROMPT, CONFIRM_HOST, CONFIRM_FILENAME, CONFIRM_OVERWRITE, COPIED, COPYING,\n TIMEOUT, NO_SUCH_FILE, ERROR_COPYING]\n transitions = [\n (CONFIRM_HOST, [0], 0, send_repo_ip, 120),\n (CONFIRM_FILENAME, [0], 1, send_newline, 120),\n (CONFIRM_OVERWRITE, [1], 2, send_newline, timeout),\n (COPIED, [0, 1, 2], 3, None, 60),\n (COPYING, [0, 1, 2], 2, send_newline, timeout),\n (PROMPT, [3], -1, None, 0),\n (TIMEOUT, [0, 1, 2, 3], -1, error, 0),\n (NO_SUCH_FILE, [0, 1, 2, 3], -1, error, 0),\n (ERROR_COPYING, [0, 1, 2, 3], -1, error, 0),\n ]\n\n log_and_post_status(self.ctx, \"Copying {}/{} to {} on device\".format(repository,\n source_filenames[x],\n dest_files[x]))\n\n if not self.ctx.run_fsm(\"Copy file from tftp/ftp to device\", command, events, transitions,\n timeout=80, max_transitions=200):\n self.ctx.error(\"Error copying {}/{} to {} on device\".format(repository,\n source_filenames[x],\n dest_files[x]))\n\n output = self.ctx.send(\"dir {}\".format(dest_files[x]))\n if \"No such file\" in output:\n self.ctx.error(\"Failed to copy {}/{} to {} on device\".format(repository,\n source_filenames[x],\n dest_files[x]))\n\n def _copy_files_from_sftp_to_device(self, server, source_filenames, dest_files, timeout=3600):\n \"\"\"\n Copy files from their locations in the user selected server directory in the SFTP server repository\n to locations on device.\n\n Arguments:\n :param server: the sftp server object\n :param source_filenames: a list of string filenames in the designated directory in the server repository.\n :param dest_files: a list of string file paths that each points to a file to be created on device.\n i.e., [\"harddiskb:/asr9k-mini-x64.tar\"]\n :param timeout: the timeout for the sftp copy operation on device. The default is 10 minutes.\n :return: None if no error occurred.\n \"\"\"\n source_path = server.server_url\n\n remote_directory = concatenate_dirs(server.server_directory, self.ctx._csm.install_job.server_directory)\n if not is_empty(remote_directory):\n source_path += \":{}\".format(remote_directory)\n\n def send_password(ctx):\n ctx.ctrl.sendline(server.password)\n \"\"\"\n This was made necessary because during sftp download, when file is large,\n the number of transferred bytes keeps changing and session log takes so much\n time in reading and writing the changing number that it is still doing that\n long after the operation is complete.\n \"\"\"\n self.ctx.pause_session_logging()\n return True\n\n def send_yes(ctx):\n ctx.ctrl.sendline(\"yes\")\n self.ctx.pause_session_logging()\n return True\n\n def reinstall_logfile(ctx):\n self.ctx.resume_session_logging()\n return True\n\n def timeout_error(ctx):\n reinstall_logfile(ctx)\n ctx.message = \"Timed out while copying file from sftp.\"\n return False\n\n def no_such_file_error(ctx):\n reinstall_logfile(ctx)\n ctx.message = \"Copying the file from sftp failed because it is not found in the specified path.\"\n return False\n\n def download_abort_error(ctx):\n reinstall_logfile(ctx)\n ctx.message = \"Copying the file from sftp failed. Download was aborted.\"\n return False\n\n for x in range(0, len(source_filenames)):\n if is_empty(server.vrf):\n command = \"sftp {}@{}/{} {}\".format(server.username, source_path, source_filenames[x], dest_files[x])\n else:\n command = \"sftp {}@{}/{} {} vrf {}\".format(server.username, source_path, source_filenames[x],\n dest_files[x], server.vrf)\n\n PASSWORD = re.compile(r\"Password:\")\n CONFIRM_OVERWRITE = re.compile(r\"Overwrite.*\\[yes/no\\]\\:\")\n COPIED = re.compile(r\"bytes copied in\", re.MULTILINE)\n NO_SUCH_FILE = re.compile(r\"src.*does not exist\")\n DOWNLOAD_ABORTED = re.compile(r\"Download aborted.\")\n\n PROMPT = self.ctx.prompt\n TIMEOUT = self.ctx.TIMEOUT\n\n events = [PROMPT, PASSWORD, CONFIRM_OVERWRITE, COPIED, TIMEOUT, NO_SUCH_FILE, DOWNLOAD_ABORTED]\n transitions = [\n (PASSWORD, [0], 1, send_password, timeout),\n (CONFIRM_OVERWRITE, [1], 2, send_yes, timeout),\n (COPIED, [1, 2], -1, reinstall_logfile, 0),\n (PROMPT, [1, 2], -1, reinstall_logfile, 0),\n (TIMEOUT, [0, 1, 2], -1, timeout_error, 0),\n (NO_SUCH_FILE, [0, 1, 2], -1, no_such_file_error, 0),\n (DOWNLOAD_ABORTED, [0, 1, 2], -1, download_abort_error, 0),\n ]\n\n log_and_post_status(self.ctx, \"Copying {}/{} to {} on device\".format(source_path,\n source_filenames[x],\n dest_files[x]))\n\n if not self.ctx.run_fsm(\"Copy file from sftp to device\", command, events, transitions, timeout=80):\n self.ctx.error(\"Error copying {}/{} to {} on device\".format(source_path,\n source_filenames[x],\n dest_files[x]))\n\n output = self.ctx.send(\"dir {}\".format(dest_files[x]))\n if \"No such file\" in output:\n self.ctx.error(\"Failed to copy {}/{} to {} on device\".format(source_path,\n source_filenames[x],\n dest_files[x]))\n\n def _run_migration_on_config(self, fileloc, filename, nox_to_use, hostname):\n \"\"\"\n Run the migration tool - NoX - on the configurations copied out from device.\n\n The conversion/migration is successful if the number under 'Total' equals to\n the number under 'Known' in the text output.\n\n If it's successful, but not all existing configs are supported by eXR, create two\n new log files for the supported and unsupported configs in session log directory.\n The unsupported configs will not appear on the converted configuration files.\n Log a warning about the removal of unsupported configs, but this is not considered\n as error.\n\n If it's not successful, meaning that there are some configurations not known to\n the NoX tool, in this case, create two new log files for the supported and unsupported\n configs in session log directory. After that, error out.\n\n :param fileloc: string location where the config needs to be converted/migrated is,\n without the '/' in the end. This location is relative to csm/csmserver/\n :param filename: string filename of the config\n :param nox_to_use: string name of NoX binary executable.\n :param hostname: hostname of device, as recorded on CSM.\n :return: None if no error occurred.\n \"\"\"\n\n try:\n commands = [subprocess.Popen([\"chmod\", \"+x\", nox_to_use]),\n subprocess.Popen([nox_to_use, \"-f\", os.path.join(fileloc, filename)],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n ]\n\n nox_output, nox_error = commands[1].communicate()\n except OSError:\n self.ctx.error(\"Failed to run the configuration migration tool {} on config file {} - OSError.\".format(\n nox_to_use,\n os.path.join(fileloc, filename))\n )\n\n if nox_error:\n self.ctx.error(\"Failed to run the configuration migration tool on the admin configuration \" +\n \"we retrieved from device - {}.\".format(nox_error))\n\n if filename.split('.')[0] == 'admin':\n if (not os.path.isfile(os.path.join(fileloc, CONVERTED_ADMIN_CAL_CONFIG_IN_CSM))) or \\\n (not os.path.isfile(os.path.join(fileloc, CONVERTED_ADMIN_XR_CONFIG_IN_CSM))):\n self.ctx.error(\"Failed to convert the ASR9K admin configuration with NoX tool.\")\n\n elif not os.path.isfile(os.path.join(fileloc, CONVERTED_XR_CONFIG_IN_CSM)):\n self.ctx.error(\"Failed to convert the ASR9K IOS-XR configuration with NoX tool.\")\n\n conversion_success = False\n\n match = re.search(r\"Filename[\\sA-Za-z\\n]*[-\\s]*\\S*\\s+(\\d*)\\s+\\d*\\(\\s*\\d*%\\)\\s+\\d*\\(\\s*\\d*%\\)\\s+(\\d*)\",\n nox_output)\n\n if match:\n if match.group(1) == match.group(2):\n conversion_success = True\n\n if filename == ADMIN_CONFIG_IN_CSM:\n supported_log_name = \"supported_config_in_admin_configuration\"\n unsupported_log_name = \"unsupported_config_in_admin_configuration\"\n else:\n supported_log_name = \"supported_config_in_xr_configuration\"\n unsupported_log_name = \"unsupported_config_in_xr_configuration\"\n\n if conversion_success:\n\n if self._all_configs_supported(nox_output):\n log_and_post_status(self.ctx, \"Configuration {} was migrated successfully. \".format(filename) +\n \"No unsupported configurations found.\")\n else:\n self._create_config_logs(os.path.join(fileloc, filename.split(\".\")[0] + \".csv\"),\n supported_log_name, unsupported_log_name,\n hostname, filename)\n\n log_and_post_status(self.ctx, \"Configurations that are unsupported in eXR were removed in \" +\n \"{}. Please look into {} and {}.\".format(filename,\n unsupported_log_name,\n supported_log_name))\n else:\n self._create_config_logs(os.path.join(fileloc, filename.split(\".\")[0] + \".csv\"),\n supported_log_name, unsupported_log_name, hostname, filename)\n\n self.ctx.error(\"Unknown configurations found. Please look into {} \".format(unsupported_log_name) +\n \"for unprocessed configurations, and {} for known/supported configurations\".format(\n unsupported_log_name, supported_log_name)\n )\n\n def _resize_eusb(self):\n \"\"\"Resize the eUSB partition on device - Run the /pkg/bin/resize_eusb script on device(from ksh).\"\"\"\n\n output = self.ctx.send(\"run /pkg/bin/resize_eusb\")\n\n if \"Pre-Migration Operation Completed.\" not in output:\n self.ctx.error(\"Pre-Migrate partition check failed. Please check session.log.\")\n # output = self.ctx.send(\"show media\")\n\n # eusb_size = re.search(\"/harddiskb:.* ([.\\d]+)G\", output)\n\n # if not eusb_size:\n # self.ctx.error(\"Unexpected output from CLI 'show media'.\")\n\n # if eusb_size.group(1) < \"1.0\":\n # self.ctx.error(\"/harddiskb:/ is smaller than 1 GB after running /pkg/bin/resize_eusb. \" +\n # \"Please make sure that the device has either RP2 or RSP4.\")\n\n def _check_fpd(self, fpd_relevant_nodes):\n \"\"\"\n Check the versions of migration related FPD's on device. Return a dictionary\n that tells which FPD's on which nodes require successful FPD upgrade later on.\n\n :param fpd_relevant_nodes: a dictionary. Keys are strings representing all node locations\n on device parsed from output of \"admin show platform\".\n Values are integers. Value can either be 0 or 1.\n value 1 means that we actually will need to make sure that the\n FPD upgrade later on for this node location completes successfully,\n value 0 means that we don't need to check if the\n FPD upgrade later on for this node location is successful or not.\n :return: a dictionary with string FPD type as key, and a set of the string names of\n node locations as value.\n \"\"\"\n fpdtable = self.ctx.send(\"show hw-module fpd location all\")\n\n subtype_to_locations_need_upgrade = {}\n\n last_location = None\n for line in fpdtable.split('\\n'):\n\n first_word = line.split(' ', 1)[0]\n\n if self.node_pattern.match(first_word):\n # since fpd_relevant_nodes is loaded from db, the keys are\n # unicode instead of byte strings\n indicator = fpd_relevant_nodes.get(unicode(first_word, encoding=\"latin1\"))\n # indicator is 1:\n # Detect a new node(RSP/RP/LC/FC) of which fpds we'll need to check\n # if upgrade goes successful\n # indicator is None:\n # Detect node that is not found in output of \"admin show platform\"\n # we need to check if FPD upgrade goes successful in this case\n if indicator == 1 or indicator is None:\n last_location = first_word\n # indicator is 0:\n # Detect node to be PEM/FAN or some other unsupported hardware in eXR.\n # we don't care if the FPD upgrade for these is successful or not\n # so we update last_location to None\n else:\n last_location = None\n\n # Found some fpd that needs upgrade\n if last_location and len(line) >= 79 and line[76:79] == \"Yes\":\n fpdtype_end_idx = 51\n while line[fpdtype_end_idx] != ' ':\n fpdtype_end_idx += 1\n\n fpdtype = line[51:fpdtype_end_idx]\n\n if fpdtype not in subtype_to_locations_need_upgrade:\n # it is possible to have duplicates, so using set here\n subtype_to_locations_need_upgrade[fpdtype] = set()\n subtype_to_locations_need_upgrade[fpdtype].add(last_location)\n\n return subtype_to_locations_need_upgrade\n\n def _check_if_fpd_package_installed(self):\n \"\"\"\n Check if the FPD package is already active on device.\n Error out if not.\n\n :return: None if FPD package is active, error out if not.\n \"\"\"\n active_packages = self.ctx.send(\"show install active summary\")\n\n match = re.search(\"fpd\", active_packages)\n\n if not match:\n self.ctx.error(\"No FPD package is active on device. Please install the FPD package on device first.\")\n\n return\n\n def _ensure_updated_fpd(self, fpd_relevant_nodes):\n \"\"\"\n Upgrade FPD's if needed.\n Steps:\n 1. Check version of the migration related FPD's. Get the dictionary\n of FPD types mapped to locations for which we need to check for\n upgrade successs.\n 2. Force install the FPD types that need upgrade on all locations.\n Check FPD related sys log to make sure all necessary upgrades\n defined by the dictionary complete successfully.\n\n :param fpd_relevant_nodes: a dictionary. Keys are strings representing all node locations\n on device parsed from output of \"admin show platform\".\n Values are integers. Value can either be 0 or 1.\n value 1 means that we actually will need to make sure that the\n FPD upgrade later on for this node location completes successfully,\n value 0 means that we don't need to check if the\n FPD upgrade later on for this node location is successful or not.\n\n :return: True if no error occurred.\n \"\"\"\n # check for the FPD version, if FPD needs upgrade,\n log_and_post_status(self.ctx, \"Checking FPD versions...\")\n subtype_to_locations_need_upgrade = self._check_fpd(fpd_relevant_nodes)\n\n if subtype_to_locations_need_upgrade:\n\n # Force upgrade all FPD's in RP and Line card that need upgrade, with the FPD pie or both the FPD\n # pie and FPD SMU depending on release version\n self._upgrade_all_fpds(subtype_to_locations_need_upgrade)\n\n return True\n\n def _upgrade_all_fpds(self, subtype_to_locations_need_upgrade):\n \"\"\"Force upgrade certain FPD's on all locations. Check for success. \"\"\"\n def send_newline(ctx):\n ctx.ctrl.sendline()\n return True\n\n def send_yes(ctx):\n ctx.ctrl.sendline(\"yes\")\n return True\n\n def error(ctx):\n ctx.message = \"Error upgrading FPD.\"\n return False\n\n def timeout(ctx):\n ctx.message = \"Timeout upgrading FPD.\"\n return False\n\n for fpdtype in subtype_to_locations_need_upgrade:\n\n log_and_post_status(self.ctx, \"FPD upgrade - start to upgrade FPD {} on all locations\".format(fpdtype))\n\n CONFIRM_CONTINUE = re.compile(r\"Continue\\? \\[confirm\\]\")\n CONFIRM_SECOND_TIME = re.compile(r\"Continue \\? \\[no\\]:\")\n UPGRADE_END = re.compile(r\"FPD upgrade has ended.\")\n\n PROMPT = self.ctx.prompt\n TIMEOUT = self.ctx.TIMEOUT\n\n events = [PROMPT, CONFIRM_CONTINUE, CONFIRM_SECOND_TIME, UPGRADE_END, TIMEOUT]\n transitions = [\n (CONFIRM_CONTINUE, [0], 1, send_newline, TIMEOUT_FOR_FPD_UPGRADE),\n (CONFIRM_SECOND_TIME, [0, 1], 2, send_yes, TIMEOUT_FOR_FPD_UPGRADE),\n (UPGRADE_END, [1, 2], 3, None, 120),\n (PROMPT, [3], -1, None, 0),\n (PROMPT, [1, 2], -1, error, 0),\n (TIMEOUT, [0, 1, 2], -1, timeout, 0),\n\n ]\n\n if not self.ctx.run_fsm(\"Upgrade FPD\",\n \"admin upgrade hw-module fpd {} location all\".format(fpdtype),\n events, transitions, timeout=80):\n self.ctx.error(\"Error while upgrading FPD subtype {}. Please check session.log\".format(fpdtype))\n\n fpd_log = self.ctx.send(\"show log | include fpd\", timeout=1800)\n\n for location in subtype_to_locations_need_upgrade[fpdtype]:\n\n pattern = r\"Successfully\\s*(?:downgrade|upgrade)\\s*{}.*location\\s*{}\".format(fpdtype, location)\n fpd_upgrade_success = re.search(pattern, fpd_log)\n\n if not fpd_upgrade_success:\n self.ctx.error(\"Failed to upgrade FPD subtype {} on location {}. \".format(fpdtype, location) +\n \"Please check session.log.\")\n return True\n\n def _create_config_logs(self, csvfile, supported_log_name, unsupported_log_name, hostname, filename):\n \"\"\"\n Create two logs for migrated configs that are unsupported and supported by eXR.\n They are stored in the same directory as session log, for user to view.\n\n :param csvfile: the string csv filename generated by running NoX on original config.\n :param supported_log_name: the string filename for the supported configs log\n :param unsupported_log_name: the string filename for the unsupported configs log\n :param hostname: string hostname of device, as recorded on CSM.\n :param filename: string filename of original config\n :return: None if no error occurred\n \"\"\"\n\n if not os.path.isfile(os.path.join(csvfile)):\n self.ctx.error(\"Missing the csv file {} that should have been generated by the NoX tool\".format(csvfile) +\n \" during the configuration conversion. Failed to write diagnostic files.\")\n supported_config_log = os.path.join(self.ctx.log_directory, supported_log_name)\n unsupported_config_log = os.path.join(self.ctx.log_directory, unsupported_log_name)\n try:\n with open(supported_config_log, 'w') as supp_log:\n with open(unsupported_config_log, 'w') as unsupp_log:\n supp_log.write('Configurations Known and Supported to the NoX Conversion Tool \\n \\n')\n\n unsupp_log.write('Configurations Unprocessed by the NoX Conversion Tool (Comments, Markers,' +\n ' or Unknown/Unsupported Configurations) \\n \\n')\n\n supp_log.write('{0[0]:<8} {0[1]:^20} \\n'.format((\"Line No.\", \"Configuration\")))\n unsupp_log.write('{0[0]:<8} {0[1]:^20} \\n'.format((\"Line No.\", \"Configuration\")))\n with open(csvfile, 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) >= 3 and row[1].strip() == \"KNOWN_SUPPORTED\":\n supp_log.write('{0[0]:<8} {0[1]:<} \\n'.format((row[0], row[2])))\n elif len(row) >= 3:\n unsupp_log.write('{0[0]:<8} {0[1]:<} \\n'.format((row[0], row[2])))\n\n msg = \"\\n \\nPlease find original configuration in csm_data/migration/{}/{} \\n\".format(hostname,\n filename)\n supp_log.write(msg)\n unsupp_log.write(msg)\n if filename.split('.')[0] == 'admin':\n msg2 = \"The final converted configuration is in csm_data/migration/\" + \\\n hostname + \"/\" + CONVERTED_ADMIN_CAL_CONFIG_IN_CSM + \\\n \" and csm_data/migration/\" + hostname + \"/\" + CONVERTED_ADMIN_XR_CONFIG_IN_CSM\n else:\n msg2 = \"The final converted configuration is in csm_data/migration/\" + \\\n hostname + \"/\" + CONVERTED_XR_CONFIG_IN_CSM\n supp_log.write(msg2)\n unsupp_log.write(msg2)\n csvfile.close()\n unsupp_log.close()\n supp_log.close()\n except:\n self.ctx.error(\"Error writing diagnostic files - in \" + self.ctx.log_directory +\n \" during configuration migration.\")\n\n def _filter_server_repository(self, server):\n \"\"\"Filter out LOCAL server repositories and only keep TFTP, FTP and SFTP\"\"\"\n if not server:\n self.ctx.error(\"Pre-Migrate missing server repository object.\")\n if server.server_type != ServerType.FTP_SERVER and server.server_type != ServerType.TFTP_SERVER and \\\n server.server_type != ServerType.SFTP_SERVER:\n self.ctx.error(\"Pre-Migrate does not support \" + server.server_type + \" server repository.\")\n\n def _save_config_to_csm_data(self, files, admin=False):\n \"\"\"\n Copy the admin configuration or IOS-XR configuration\n from device to csm_data.\n\n :param files: the full local file paths for configs.\n :param admin: True if asking for admin config, False otherwise.\n :return: None\n \"\"\"\n\n try:\n cmd = \"admin show run\" if admin else \"show run\"\n output = self.ctx.send(cmd, timeout=TIMEOUT_FOR_COPY_CONFIG)\n init_line = 'Building configuration...'\n ind = output.rfind(init_line)\n\n except pexpect.TIMEOUT:\n self.ctx.error(\"CLI '{}' timed out after 1 hour.\".format(cmd))\n\n for file_path in files:\n # file = '../../csm_data/migration/' + filename\n file_to_write = open(file_path, 'w+')\n if ind >= 0:\n file_to_write.write(output[(ind + len(init_line)):])\n else:\n file_to_write.write(output)\n file_to_write.close()\n\n def _handle_configs(self, hostname, server, repo_url, fileloc, nox_to_use, config_filename):\n \"\"\"\n 1. Copy admin and XR configs from device to TFTP/FTP/SFTP server repository.\n 2. Copy admin and XR configs from server repository to csm_data/migration//\n 3. Copy admin and XR configs from server repository to session log directory as\n show-running-config.txt and admin-show-running-config.txt for comparisons\n after Migrate or Post-Migrate. (Diff will be generated.)\n 4. Run NoX on admin config first. This run generates 1) eXR admin/calvados config\n and POSSIBLY 2) eXR XR config.\n 5. Run NoX on XR config if no custom eXR config has been selected by user when\n Pre-Migrate is scheduled. This run generates eXR XR config.\n 6. Merge either the selected eXR custom config or the converted XR config with the converted\n eXR XR config to form a new file - cXR_xr_plane_converted_eXR.cfg\n 7. Copy the eXR admin/calvados config and the cXR_xr_plane_converted_eXR.cfg to the server\n repository and then from there to device.\n Note if user selected custom eXR XR config, that will be uploaded instead of\n the NoX migrated original XR config.\n\n :param hostname: string hostname of device, as recorded on CSM.\n :param repo_url: the URL of the selected server repository. i.e., tftp://223.255.254.245/tftpboot\n :param fileloc: the string path ../../csm_data/migration/