diff --git "a/6608.jsonl" "b/6608.jsonl" new file mode 100644--- /dev/null +++ "b/6608.jsonl" @@ -0,0 +1,222 @@ +{"seq_id":"3643797325","text":"from random import randint\nfrom time import sleep\nfrom operator import itemgetter\njogos = dict()\njogos['jogador1'] = randint(1, 6)\njogos['jogador2'] = randint(1, 6)\njogos['jogador3'] = randint(1, 6)\njogos['jogador4'] = randint(1, 6)\nranking = list()\nfor k, v in jogos.items():\n print(f'o {k} tirou {v}')\n sleep(0.75)\nranking = sorted(jogos.items(), key=itemgetter(1), reverse=True)\nprint(' == RANKING ==')\nfor i, v in enumerate(ranking):\n print(f'{i + 1} lugar: {v[0]} com {v[1]}')\n","repo_name":"josefcaique/ProjetoEstudosPython","sub_path":"Dicionario/ex 91.py","file_name":"ex 91.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27967038316","text":"\"\"\"Integração Numérica\"\"\"\nfrom math import sin, pi\n\n#Cada subintervalo é aproximado com um trapézio `area = ½dx[f(a)+f(b)]`\n\ndef trapezoidal(f, x0, xf, dx):\n \"\"\"\n f: função a integrar\n x0: inicio do intervalo de integração\n xf: fim do interval de integração\n dx: tamanho dos subintervalos de integração\n \"\"\"\n\n # A soma das sub-áreas\n soma = 0\n \n # Precisamos de dois ponto no intervalo dx\n x1 = x0\n x2 = x0 + dx\n \n while x1<=xf: \n soma += 0.5*dx*( f(x1) + f(x2) ) \n x1 += dx\n x2 += dx\n \n return soma\n\n# Testando com f=sin e dx=0.1 no intervalo [0, pi/2]\n# O resultado esperado é 1\n\ntrapezoidal(sin, 0, pi/2, 0.1)\ntrapezoidal(sin, 0, pi/2, 0.1)","repo_name":"dwbewiahn/DiscreteMath","sub_path":"MatDiscreta/trapezoidal.py","file_name":"trapezoidal.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"75125750882","text":"import unittest\n\nfrom dateutil.parser import parse as parse_dt\nfrom unittest.mock import patch\n\nfrom bot.dataset_parser import Parser\nfrom bot.exceptions import RequestError, ResponseError\nfrom bot.utiles import tf_to_minutes\n\n\nclass Test(unittest.TestCase):\n def __mocked_response(*args, **kwargs):\n \"\"\"Function to mock requests\"\"\"\n class MockResponse:\n def __init__(self, json_data, status_code):\n self.json_data = json_data\n self.status_code = status_code\n\n def json(self):\n return self.json_data\n\n if 'https://api.binance.com' in args[0]:\n # fail of server\n if kwargs[\"params\"][\"symbol\"] == \"fail_500\":\n return MockResponse([], 500)\n # fail of request\n if kwargs[\"params\"][\"symbol\"] == \"fail_300\":\n return MockResponse([], 300)\n return MockResponse(\n # filling with random values\n [\n [\n 1499040000000, # Kline open time\n \"0.01634790\", # Open price\n \"0.80000000\", # High price\n \"0.01575800\", # Low price\n \"0.01577100\", # Close price\n \"148976.11427815\", # Volume\n 1499644799999, # Kline Close time\n \"2434.19055334\", # Quote asset volume\n 308, # Number of trades\n \"1756.87402397\", # Taker buy base asset volume\n \"28.46694368\", # Taker buy quote asset volume\n \"0\" # Unused field, ignore.\n ]\n ],\n 200\n )\n\n return MockResponse(None, 404)\n\n @patch('bot.dataset_parser.sleep')\n @patch('bot.dataset_parser.datetime')\n @patch('bot.dataset_parser.requests.get', side_effect=__mocked_response)\n def test_get_table(self, send_request_mock, now_mock, _):\n \"\"\"Checks correct get_table request\"\"\"\n now_mock.now.return_value = parse_dt(\"2023-01-20T00:00:00\")\n\n for tf in ['1m', '5m', '1h']:\n parser = Parser('SOLUSDT', tf, ignore_gaps=True)\n for args in [\n [\"2023-01-15T00:00:00\", \"2023-01-20T00:00:00\"],\n [5 * 24 * 60 // tf_to_minutes(tf)]\n ]:\n table = parser.get_table(*args)\n self.assertEqual(\n table.shape[0],\n len(send_request_mock.call_args_list),\n f\"{tf}, {args}\"\n )\n\n sum_limit = sum(\n int(call[1]['params']['limit'])\n for call in send_request_mock.call_args_list\n )\n self.assertEqual(\n sum_limit,\n 5 * 24 * 60 // tf_to_minutes(tf),\n f\"{tf}, {args}\"\n )\n\n priv_time = \"1673740800000\"\n for call in send_request_mock.call_args_list:\n self.assertEqual(\n priv_time,\n call[1]['params']['startTime'],\n f\"{tf}, {args}\"\n )\n priv_time = call[1]['params']['endTime']\n self.assertEqual(priv_time, \"1674172800000\", f\"{tf}, {args}\")\n\n send_request_mock.call_args_list = []\n\n @patch('bot.dataset_parser.sleep')\n @patch('bot.dataset_parser.requests.get', side_effect=__mocked_response)\n def test_exc(self, send_request_mock, _):\n \"\"\"checks fail get_table request\"\"\"\n # no response\n parser = Parser('fail_500', '1m', ignore_gaps=True)\n try:\n _ = parser.get_table(\"2023-01-15T00:00:00\", \"2023-01-15T00:01:00\")\n except ResponseError:\n pass\n else:\n raise \"No crush\"\n self.assertEqual(len(send_request_mock.call_args_list), 5)\n\n # dont ignore gaps\n parser = Parser('SOLUSDT', '1m', ignore_gaps=False)\n try:\n _ = parser.get_table(\"2023-01-15T00:00:00\", 123)\n except RequestError:\n pass\n else:\n raise \"Gaps ignored\"\n\n # wrong request\n parser = Parser('fail_300', '1m', ignore_gaps=True)\n try:\n _ = parser.get_table(10)\n except RequestError:\n pass\n else:\n raise \"No crush\"\n","repo_name":"ifrair/Trading-bot","sub_path":"tests/test_dataset_parser.py","file_name":"test_dataset_parser.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71265569441","text":"import os\n\nfrom langchain.document_loaders import TextLoader\nfrom langchain.text_splitter import (\n RecursiveCharacterTextSplitter,\n Language,\n)\n\n\nclass JavaSourceProcessor:\n\n def __init__(self):\n self.exclusions = ['.git', '.jcheck', '.github', 'make', 'build', 'out', 'target', 'bin', 'classes', 'lib',\n 'test', 'doc', 'demo']\n\n def load_source(self, source_dir: str):\n docs = []\n for root, dirs, files in os.walk(source_dir, topdown=True):\n dirs[:] = [d for d in dirs if d not in self.exclusions]\n for file in files:\n if file.endswith('.java'):\n text_loader = TextLoader(os.path.join(root, file), encoding='utf-8')\n docs.extend(text_loader.load_and_split())\n return docs\n\n def split_java_files(self, source_docs):\n java_code_splitter = RecursiveCharacterTextSplitter.from_language(\n language=Language.JAVA, chunk_size=100, chunk_overlap=0\n )\n java_docs = java_code_splitter.split_documents(source_docs)\n return java_docs\n\n\nif __name__ == '__main__':\n java_source_loader = JavaSourceProcessor()\n docs = java_source_loader.load_source(\"TODO\")\n print(len(docs))\n print(docs[0])\n java_source_loader.split_java_files(docs)\n\n","repo_name":"lranasingha/langchain-agent","sub_path":"dataloader/JavaSourceProcessor.py","file_name":"JavaSourceProcessor.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11669404314","text":"from random import sample, seed\nseed()\n\ndef lista_ordenada(l):\n estaordenada = False\n aux = l[:]\n aux.sort()\n if (aux == l):\n estaordenada = True\n return estaordenada\n\ndef sa(a,b):\n if len(a) > 1: a[0],a[1] = a[1],a[0]\n return a,b\ndef sb(a,b):\n if len(b) > 1: b[0],b[1] = b[1],b[0]\n return a,b\ndef ss(a,b):\n sa(a,b)\n sb(a,b)\n return a,b\ndef pa(a,b):\n if len(b) > 0:\n a.insert(0, b[0])\n b.pop(0)\n return a,b\ndef pb(a,b):\n if len(a) > 0:\n b.insert(0, a[0])\n a.pop(0)\n return a,b\ndef ra(a,b):\n if len(a) > 1: a.append(a.pop(0))\n return a,b\ndef rb(a,b):\n if len(b) > 1: b.append(b.pop(0))\n return a,b\ndef rr(a,b):\n ra(a,b)\n rb(a,b)\n return a,b\ndef rra(a,b):\n if len(a) > 1: a.insert(0, a.pop())\n return a,b\ndef rrb(a,b):\n if len(b) > 1: b.insert(0, b.pop())\n return a,b\ndef rrr(a,b):\n rra(a,b)\n rrb(a,b)\n return a,b\n\nif __name__ == \"__main__\":\n # Generación de la pila a\n n = 5 # número de elementos de la pila\n a = sample(range(1, n+1), n)\n #a = [17, 5, 13, 20, 6, 12, 4, 3, 7, 18, 1, 14, 8, 10, 16, 2, 11, 15, 19, 9] # valores de ejemplo\n a_original = a[:]\n b = []\n #print('a:', a)\n #print('b:', b)\n contador = 0\n while len(a)>1: # se repite hasta que solo quede un elemento en la pila a\n # buscamos el mínimo y vamos haciendo ra hasta que el mínimo esté el primero\n while min(a) != a[0]:\n ra(a,b); contador += 1\n pb(a,b); contador += 1 # luego hacemos pb\n # luego se hace pa pa pa pa hasta que no quede nadie en la pila b\n while len(b)>0:\n pa(a,b); contador += 1\n \n #print('a:', a)\n #print('b:', b)\n print(\"contador:\", contador)\n\n ### PROCEDIMIENTO MEJORADO\n contador = 0\n a = a_original[:]\n print(a)\n while len(a) > 1 and not lista_ordenada(a): # se repite hasta que solo quede un elemento en la pila a\n # buscamos el mínimo y vamos haciendo ra o rra hasta que el mínimo esté el primero\n while min(a) != a[0]:\n #print(\"min(a)=\", min(a))\n if a.index(min(a)) <= int(len(a)/2):\n #print(min(a))\n #print(\"index del min y mitad:\", a.index(min(a)), int(len(a)/2))\n ra(a,b)\n else:\n rra(a,b)\n contador += 1\n print(a,b)\n if not lista_ordenada(a):\n pb(a,b); contador += 1; print(a,b) # luego hacemos pb\n # luego se hace pa pa pa pa hasta que no quede nadie en la pila b\n while len(b) > 0:\n pa(a,b); contador += 1; print(a,b)\n print(\"contador:\", contador)\n","repo_name":"financieras/campus","sub_path":"pilas/pilas05.py","file_name":"pilas05.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38478452660","text":"import collections\nimport time\nimport pytest\nimport unittest\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__),\"..\",\"..\"))\nimport json\nfrom colorama import Fore, Back, Style\nimport HtmlTestRunner\nfrom Utils import utils as utils\n\nfrom Utils.BaseClass import BaseClass\nfrom POM.HomePage import HomePage\nfrom POM.DynamicContentPage import DynamicContentPage\nfrom selenium.webdriver import ActionChains\n\n@pytest.mark.usefixtures(\"test_setup\")\nclass TestDynamicContent(BaseClass):\n\n def test_DynamicContent(self):\n log = self.get_Logger()\n driver = self.driver\n hp = HomePage(driver)\n hp.clickDynamicContentLink()\n dc = DynamicContentPage(driver)\n #first_paragraph = dc.returnTitleFirstParagraph()\n #second_paragraph = dc.returnTitleSecondParagraph()\n #third_paragraph = dc.returnTitleThirdParagraph\n aux1 = dc.returnAllParagraphs()\n list1 = []\n n = 1\n for i in aux1:\n if n == 1:\n #print(\"No sumarlo\")\n n = n+1\n\n else:\n list1.append(i.text)\n n = n+1\n print(list1)\n time.sleep(3)\n dc.clickHereLink()\n time.sleep(1)\n aux2 = dc.returnAllParagraphs()\n list2 = []\n m = 1\n for i in aux2:\n if m == 1:\n #print(\"No sumarlo\")\n m = m+1\n\n else:\n list2.append(i.text)\n m = m+1\n print(list2)\n\n # esta es una forma de comparar dos listas y ver si son iguales entre ellas\n #if collections.Counter(list1) == collections.Counter(list2):\n # print(\"Las listas son iguales\")\n #else:\n # print(\"Las listas no son iguales, los segundos párrafos son distintos a los primeros, estamos ante contenido dinámico\")\n\n # esta es otra forma de comparar dos listas y ver si son iguales entre ellas\n if set(list1) == set(list2):\n print(\"Las listas son iguales\")\n else:\n print(\"Las listas no son iguales, los segundos párrafos son distintos a los primeros, estamos ante contenido dinámico\")\n url = driver.current_url\n assert url == \"https://the-internet.herokuapp.com/dynamic_content?with_content=static\"\n time.sleep(3)\n","repo_name":"gonzamol17/QuintoProyectoPython-heroKuapp","sub_path":"Test/test_Dynamic_Content.py","file_name":"test_Dynamic_Content.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16815678224","text":"import math\nimport numpy as np\n\n# https://kr.mathworks.com/help/matlab/ref/acos.html\npi = 2.0 * math.acos(0)\n\nglobal y, x, r, ranges\n\ndef init():\n return np.full(100, 0.0), np.full(100, 0.0), np.full(100, 0.0), []\n\ndef convertToRange():\n for i in range(0, n):\n loc = math.fmod(2 * pi + math.atan2(y[i], x[i]), 2 * pi)\n reach = 2.0 * math.asin(r[i] / 2.0 / 8.0)\n ranges.append((loc - reach, loc + reach))\n\n ranges.sort()\n\nINF = 987654321\n\ndef solveLinear(begin, end):\n used = 0\n idx = 0\n\n while(begin < end):\n maxCover = -1\n while (idx < n and ranges[idx][0] <= begin):\n maxCover = max(maxCover, ranges[idx][1])\n idx += 1\n\n if maxCover <= begin:\n return INF\n begin = maxCover\n used += 1\n\n return used\n\ndef solveCircular():\n ranges.sort()\n\n ret = INF\n for i in range(0, n):\n (first, second) = ranges[i]\n if first <= 0 or second >= 2 * pi:\n begin = math.fmod(second, 2*pi)\n end = math.fmod(first + 2*pi, 2*pi)\n ret = min(ret, 1 + solveLinear(begin, end))\n\n if ret == INF:\n return 'IMPOSSIBLE'\n else:\n return ret\n\nn = 10\ny, x, r, ranges = init()\ny[0], x[0], r[0] = 7.02066050, -3.83540431, 4.0\ny[1], x[1], r[1] = -7.23257714, -3.41903904, 2.0\ny[2], x[2], r[2] = 0.0, -8.0, 8.0\ny[3], x[3], r[3] = -8.0, -0.0, 4.8\ny[4], x[4], r[4] = -6.47213595, 4.70228202, 3.2\ny[5], x[5], r[5] = -4.70228202, 6.47213595, 4.8\ny[6], x[6], r[6] = 7.60845213, -2.48213595, 1.6\ny[7], x[7], r[7] = -2.47213595, -7.60845213, 8.8\ny[8], x[8], r[8] = 6.47213595, 4.70228202, 7.6\ny[9], x[9], r[9] = -0.0, 8.0, 4.8\n\nconvertToRange()\nprint('problem1:', solveCircular())\n\nn = 4\ny, x, r, ranges = init()\ny[0], x[0], r[0] = 8.0, 0.0, 8.0\ny[1], x[1], r[1] = 0.0, -8.0, 8.0\ny[2], x[2], r[2] = -8.0, -0.0, 8.0\ny[3], x[3], r[3] = 1.25147572, 7.90150672, 5.4\n\nconvertToRange()\nprint('problem2:', solveCircular())\n\nn = 1\ny, x, r, ranges = init()\ny[0], x[0], r[0] = 8.0, 0.0, 15.99\n\nconvertToRange()\nprint('problem3:', solveCircular())\n","repo_name":"openmindedcode/diveintoalgo","sub_path":"chap10/kn/p388 10.6문제 미나스 아노르.py","file_name":"p388 10.6문제 미나스 아노르.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70305102561","text":"# 55. Jump Game\n\n# Approach 1: O(n) but after writing this, I realised it can be done in a single loop instead of 2 loops\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n zero_count = 0\n zero_list = list()\n start_index = 0\n end_index = 0\n \n if nums[0] == 0:\n return True if len(nums) == 1 else False\n \n # if last element is zero, change it to something else because unlike others we have to \"just reach\" the last element, not cross it\n nums[-1] = 1\n \n for i in range(len(nums)):\n if nums[i] == 0:\n start_index = i if zero_count == 0 else start_index\n zero_count += 1\n elif zero_count != 0:\n end_index = i - 1\n zero_list.append([start_index, end_index, zero_count])\n \n zero_count = 0\n\n if not zero_list:\n return True\n \n total_zeroes_to_cross = 0\n \n while zero_list:\n curr_start, _, curr_zeroes = zero_list.pop()\n \n if zero_list:\n prev_end = zero_list[-1][1]\n else:\n prev_end = -1\n \n total_zeroes_to_cross += curr_zeroes\n \n for i in range(curr_start-1, prev_end, -1):\n if nums[i] > total_zeroes_to_cross:\n total_zeroes_to_cross = 0\n break\n total_zeroes_to_cross += 1\n \n return True if total_zeroes_to_cross == 0 else False\n\n# Approach 2: Similar to first but in one pass\n# Traverse from right to left and handle 2 cases:\n# 1. If you get zeroes, increment 'zeroes' var and continue\n# 2. If you get a non-zero number, check if it is greater than the zeroes you have counted, if not, increment the 'zeroes' var further because the next good index will have to cross this index too.\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n if nums[0] == 0:\n return True if len(nums) == 1 else False\n\n nums[-1] = 1\n zeroes = 0\n\n for i in range(len(nums)-1, -1, -1):\n if nums[i] > zeroes:\n zeroes = 0\n elif nums[i] == 0 or zeroes:\n zeroes += 1\n\n return False if zeroes else True","repo_name":"AnirudhGoel/CompetitiveProgramming","sub_path":"LeetCode/55. Jump Game.py","file_name":"55. Jump Game.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14575841775","text":"import numpy as np\nfrom tqdm import tqdm\n\n\ndef angle_to_state(angle):\n return int(30 * ((angle + np.pi) / (2 * np.pi) % 1)) # Discretization of the angle space\n\n\ndef vel(theta, theta_0=0, theta_dead=np.pi / 12):\n return 1 - np.exp(-(theta - theta_0) ** 2 / theta_dead)\n\n\ndef rew(theta, theta_0=0, theta_dead=np.pi / 12):\n return vel(theta, theta_0, theta_dead) * np.cos(theta)\n\nrandom_ys = []\nfor episode in tqdm(range(100), desc='Running: Random agent on open sea task'): # run for 500 episodes\n angle = 0 # always start with angle 0\n y = 0\n for i in range(200):\n a = np.random.choice(range(2)) # Sample a random action\n\n out = [-0.1, 0.1][a] # Get the change in angle as a result of the selected angle\n\n y += rew(angle + out)\n\n # Update the angle\n angle += out\n\n random_ys.append(y)\n\nQ = np.zeros((30, 2)) # Initialization of the Q-values with zeros\n# There are 30 angle states and 2 actions\n\nrho = 0 # Initialize the average reward to 0\ntd_ys = []\nfor episode in tqdm(range(500), desc='Running: Train agent on open sea task'): # run for 500 episodes\n angle = 0 # always start with angle 0\n y = 0\n for i in range(200):\n state = angle_to_state(angle)\n\n p = np.exp(Q[state]) / np.sum(np.exp(Q[state])) # Action selection using softmax\n a = np.random.choice(range(2), p=p) # Sample the action from the softmax distribution\n\n out = [-0.1, 0.1][a] # Get the change in angle as a result of the selected angle\n\n new_state = angle_to_state(angle + out)\n\n y += rew(angle + out)\n\n # Calculate the prediction error\n delta = rew(angle + out) - rho + Q[new_state].max() - Q[state, a]\n\n # Update the average reward\n rho += 0.1 * (rew(angle + out) - rho)\n\n # Update the Q-value\n Q[state, a] += 0.1 * delta\n\n # Update the angle\n angle += out\n\n td_ys.append(y)\n\nrandom_mean = np.mean(random_ys[-100:])\nrandom_std = np.std(random_ys[-100:])\n\ntd_mean = np.mean(td_ys[-100:])\ntd_stq = np.std(td_ys[-100:])\n\nprint('Results from last 100 episodes')\nprint('| ===== agent ===== | ===== mean ===== | ===== std ===== |')\nprint(f'{\"| Random\":<20}| {random_mean:<17.2f}| {random_std:<16.2f}|')\nprint(f'{\"| Trained\":<20}| {td_mean:<17.2f}| {td_stq:<16.2f}|')\n","repo_name":"PPierzc/ai-learns-to-sail","sub_path":"tasks/open_sea.py","file_name":"open_sea.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"16029603727","text":"# std\nfrom datetime import datetime\n\n# external\nfrom sqlalchemy.orm import Session\n\n# molar\nfrom molar.backend.schemas.eventstore import (\n EventStore,\n EventStoreCreate,\n EventStoreDelete,\n EventStoreUpdate,\n EventTypes,\n)\n\nfrom .base import CRUDBase, ModelType\n\n\nclass CRUDEventStore(CRUDBase[ModelType, EventStoreCreate, EventStoreUpdate]):\n def get_all(self, db: Session):\n return db.query(self.model).all()\n\n def create(self, db: Session, *, obj_in: EventStoreCreate, user_id: int):\n db_obj = self.model(\n event=\"create\", type=obj_in.type, data=obj_in.data, user_id=user_id\n )\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def update(self, db: Session, *, obj_in: EventStoreUpdate, user_id: int):\n db_obj = self.model(\n event=\"update\",\n uuid=str(obj_in.uuid),\n type=obj_in.type,\n data=obj_in.data,\n user_id=user_id,\n )\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def delete(self, db: Session, *, obj_in: EventStoreDelete, user_id: int):\n db_obj = self.model(\n event=\"delete\", type=obj_in.type, uuid=str(obj_in.uuid), user_id=user_id\n )\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def rollback(self, db: Session, *, before: datetime, user_id: int):\n db_obj = self.model(\n event=\"rollback\", data={\"before\": str(before)}, user_id=user_id\n )\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n","repo_name":"aspuru-guzik-group/molar","sub_path":"molar/backend/crud/crud_eventstore.py","file_name":"crud_eventstore.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"21770682557","text":"from collections import defaultdict\n\nimport pytorch_lightning as pl\nfrom optuna.integration import PyTorchLightningPruningCallback\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\nfrom data.MoNuSeg.data_module import MoNuSegDataModule\nfrom models.al_net import ALNet\nfrom models.net_module import NetModule\nfrom models.reu_net import REUNet\n\n\ndef suggest_train_params(trial):\n train_params = defaultdict()\n train_params[\"max_epochs\"] = 200\n train_params[\"batch_size\"] = 8\n\n train_params[\"lr_schedule\"] = trial.suggest_categorical(\"lr_schedule\", [\"none\", \"plateau\", \"warmup_cos\",\n \"cos_warm_restart\"])\n if train_params[\"lr_schedule\"] == \"none\":\n train_params[\"lr\"] = trial.suggest_float(\"lr_none\", 1e-4, 1e-3, log=False)\n\n elif train_params[\"lr_schedule\"] == \"plateau\":\n train_params[\"lr\"] = trial.suggest_float(\"lr_plateau\", 1e-4, 1e-3, log=False)\n train_params[\"min_lr\"] = trial.suggest_float(\"min_lr_plateau\", 1e-8, 1e-4, log=False)\n train_params[\"reduction_factor\"] = trial.suggest_categorical(\"reduction_factor\", [0.5, 0.25, 0.1])\n train_params[\"patience\"] = trial.suggest_categorical(\"patience\", [5, 10, 15])\n\n elif train_params[\"lr_schedule\"] == \"warmup_cos\":\n train_params[\"lr\"] = trial.suggest_float(\"lr_cos\", 1e-4, 1e-3, log=False)\n train_params[\"min_lr\"] = trial.suggest_float(\"min_lr_cos\", 0, 1e-4, log=False)\n train_params[\"warmup_epochs\"] = trial.suggest_int(\"warmup_epochs\", 1, 5)\n\n elif train_params[\"lr_schedule\"] == \"cos_warm_restart\":\n train_params[\"lr\"] = trial.suggest_float(\"lr_restart\", 1e-4, 1e-3, log=False)\n train_params[\"min_lr\"] = trial.suggest_float(\"min_lr_restart\", 0, 1e-4, log=False)\n train_params[\"period_mult\"] = trial.suggest_int(\"period_mult\", 1, 4)\n period_mult_as_str = str(train_params[\"period_mult\"])\n period_initial = {\"1\": [100, 50, 25], \"2\": [67, 29, 13], \"3\": [50, 15], \"4\": [40, 10]}\n train_params[\"period_initial\"] = trial.suggest_categorical(\"period_initial\", period_initial[period_mult_as_str])\n\n return train_params\n\n\ndef tune_train_params(trial):\n mode = \"naylor\"\n net = ALNet(mode=mode)\n\n pl.seed_everything(42, workers=True) # Seed for torch, numpy and python.random\n train_params = suggest_train_params(trial)\n log_name = str(train_params)\n model = NetModule(net=net, train_params=train_params, pprocess_params=None)\n trainer = pl.Trainer(\n accelerator=\"gpu\", devices=1, max_epochs=model.train_params[\"max_epochs\"], max_time=\"0:3:0:0\",\n fast_dev_run=False, log_every_n_steps=1, check_val_every_n_epoch=1, enable_model_summary=False,\n enable_checkpointing=False, callbacks=[PyTorchLightningPruningCallback(trial, monitor=\"val_loss\")],\n logger=TensorBoardLogger(save_dir=\"lightning_logs/tuning\", name=log_name, default_hp_metric=False),\n )\n\n data_module = MoNuSegDataModule.default_mode(mode=model.mode, auxiliary_task=model.auxiliary_task,\n batch_size=model.train_params[\"batch_size\"])\n trainer.fit(model, data_module)\n return trainer.logged_metrics[\"val_loss\"]\n\n\ndef tune_net_params(trial):\n mode = \"contour\"\n\n pl.seed_everything(42, workers=True) # Seed for torch, numpy and python.random\n net_params = {\n \"inter_channels_equal_out_channels\": True,\n \"aspp_inter_channels\": trial.suggest_categorical(\"aspp_inter_channels\", [1, 4, 8, 12, 16]),\n \"norm_actv\": trial.suggest_categorical(\"norm_actv\", [True, False]),\n \"down_mode\": trial.suggest_categorical(\"down_mode\", [\"max\", \"avg\", \"conv\"])\n }\n log_name = f\"{mode}_reu_net_params_assp{net_params['aspp_inter_channels']}_dmode{net_params['down_mode']}_notv{net_params['norm_actv']}\"\n model = NetModule(net=REUNet(mode=mode, net_params=net_params))\n log_folder = f\"{mode}_reu_net_params\"\n trainer = pl.Trainer(\n accelerator=\"gpu\", devices=1, max_epochs=model.train_params[\"max_epochs\"], max_time=\"0:8:0:0\",\n fast_dev_run=True, log_every_n_steps=1, check_val_every_n_epoch=1, enable_model_summary=False,\n enable_checkpointing=False, callbacks=[PyTorchLightningPruningCallback(trial, monitor=\"val_loss\")],\n logger=TensorBoardLogger(save_dir=f\"lightning_logs/tuning/{log_folder}\", name=log_name,\n default_hp_metric=False),\n )\n data_module = MoNuSegDataModule.default_mode(mode=model.mode, auxiliary_task=model.auxiliary_task,\n batch_size=model.train_params[\"batch_size\"])\n trainer.fit(model, data_module)\n return trainer.logged_metrics[\"val_loss\"]\n\n\nif __name__ == '__main__':\n pass\n # study = optuna.create_study(direction=\"maximize\")\n # study.optimize(tune_pprocess_params, n_trials=100)\n # # study = optuna.create_study(direction=\"minimize\", pruner=MedianPruner())\n # # study.optimize(tune_train_params, n_trials=100)\n #\n # print(\"Number of finished trials: \", len(study.trials))\n # print(\"Best trial:\")\n # trial = study.best_trial\n #\n # print(\"Value: \", trial.value)\n # print(\"Params: \")\n # for key, value in trial.params.items():\n # print(f\" {key}: {value}\")\n","repo_name":"emerbitz/NucInstSeg","sub_path":"misc/tuning.py","file_name":"tuning.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6523229878","text":"def miniMaxSum(arr):\n max_min = []\n for i in arr:\n max_min.append(sum(arr) - i)\n print(f'{min(max_min)} {max(max_min)}')\nif __name__ == '__main__':\n\n arr = list(map(int, input().rstrip().split()))\n\n miniMaxSum(arr)\n \n \n","repo_name":"muhsinan/hackerrank","sub_path":"problem_solving/mini-max_sum.py","file_name":"mini-max_sum.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25660898624","text":"contador = totHomens = totMulheres20 = 0\nwhile True:\n print('-'*20)\n print('CADASTRE UMA PESSOA')\n print('-'*20)\n idade = int(input('Idade: '))\n sexo = ' '\n while sexo not in 'MF': #Enquanto o sexo não for masculino ou feminino, ele sempre vai repetir a pergunta.\n sexo = str(input('Sexo: [M/F]')).strip().upper()[0]\n escolha = ' ' \n while escolha not in 'SN': #Enquanto a escolha não for sim ou não, ele vai sempre repetir a pergunta\n escolha = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if idade >= 18:\n contador += 1 #Adicionar +1 a toda pessoa com +18 de idade\n if sexo == 'M':\n totHomens += 1 #adicionar +1 a toda pessoa que tenha o sexo masculino\n if sexo == 'F' and idade <20:\n totMulheres20 += 1 #Adicionar +1 a toda pessoa que seja mulher e que tenha menos de 20 anos.\n if escolha == 'N': \n break #parar quando a escolha for igual a 'N'\n \nprint(f'\\033[32mTOTAL DE PESSOAS COM MAIS DE 18 ANOS: {contador}')\nprint(f'AO TODO TEMOS {totHomens} HOMENS CADASTRADOS')\nprint(f'TEMOS {totMulheres20} MULHERES COM MENOS DE 20 ANOS\\033[m')\n\n\n ","repo_name":"RonalddMatias/Curso-Completo-Python","sub_path":"Curso de Python/Python-Exercícios/ex069.py","file_name":"ex069.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10988228997","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 4 22:21:06 2022\r\n\r\n@author: Jeffrey\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport datetime\r\n\r\ndf = pd.read_csv('Merge.csv')\r\n\r\n# Sanity check\r\ndf.info()\r\ndf.describe()\r\ndf['Resolution'].unique()\r\n\r\nconsolidatedData = df.copy()\r\nconsolidatedData['Datetime'] = pd.to_datetime(consolidatedData['Datetime'])\r\nconsolidatedData['Date'] = consolidatedData['Datetime'].apply(lambda x:x.date())\r\nconsolidatedData['Datetime+1'] = consolidatedData['Datetime'] + datetime.timedelta(hours=1)\r\nconsolidatedData.set_index('Datetime+1', inplace=True)\r\n\r\n\r\ndef avg2HoursPricce (DF,freq):\r\n df = DF.copy()\r\n df = df.between_time('07:00', '17:00')\r\n df = df[df['Resolution'] == freq].resample('2H').mean()\r\n df.reset_index(inplace=True)\r\n df['Datetime'] = df['Datetime+1'] - datetime.timedelta(hours=1)\r\n df.dropna(inplace=True)\r\n df.rename(columns={'Price':freq},inplace=True)\r\n df.set_index('Datetime',inplace=True)\r\n df.drop('Datetime+1',axis=1,inplace=True)\r\n return df\r\n\r\ntenMin = avg2HoursPricce(consolidatedData,'10MIN')\r\nsixtyMin = avg2HoursPricce(consolidatedData,'1H')\r\n\r\nexp = tenMin.merge(sixtyMin,left_index=True,right_index=True,how='left')\r\nexp.reset_index(inplace=True)\r\nexp['Date'] = exp['Datetime'].apply(lambda x:x.date())\r\n\r\nexp = exp.merge(consolidatedData[consolidatedData['Resolution'] == 'D'][['Date','Price']],on='Date',validate='many_to_one')\r\n\r\nexp.set_index('Datetime',inplace=True)\r\nexp.drop('Date',axis=1,inplace=True)\r\nexp.rename(columns={'10MIN':'10-mins', '1H':'60-mins', 'Price':'1-day'},inplace=True)\r\n\r\n# exp is the solution DataFrame for Question 1\r\n","repo_name":"MrT0011/Petroineos","sub_path":"Jeffrey-Question1.py","file_name":"Jeffrey-Question1.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13495919690","text":"import json\nimport os\nimport pkg_resources\n\n\ndef get_safety_words():\n \"\"\"\n Extract the set of safety words from safety file\n and return\n \"\"\"\n safety_words = set()\n safety_words_path = \"{}/{}\".format(\n pkg_resources.resource_filename(\"droidlet.documents\", \"internal\"), \"safety.txt\"\n )\n if os.path.isfile(safety_words_path):\n \"\"\"Read a set of safety words to prevent abuse.\"\"\"\n with open(safety_words_path) as f:\n for l in f.readlines():\n w = l.strip(\"\\n\").lower()\n if w != \"\" and w[0] != \"<\" and w[0] != \"#\":\n safety_words.add(w)\n return safety_words\n\n\ndef get_greetings(ground_truth_data_dir):\n # Load greetings\n greetings_path = ground_truth_data_dir + \"greetings.json\"\n greetings_map = {\"hello\": [\"hi\", \"hello\", \"hey\"], \"goodbye\": [\"bye\"]}\n if os.path.isfile(greetings_path):\n with open(greetings_path) as fd:\n greetings_map = json.load(fd)\n return greetings_map\n","repo_name":"facebookresearch/fairo","sub_path":"droidlet/dialog/load_datasets.py","file_name":"load_datasets.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":826,"dataset":"github-code","pt":"54"} +{"seq_id":"24197936805","text":"from __future__ import absolute_import\n\nimport pytest\n\nimport MDAnalysis as mda\nimport numpy as np\nfrom pmda.rdf import InterRDF\nfrom MDAnalysis.analysis import rdf\n\nfrom numpy.testing import assert_almost_equal\n\nfrom MDAnalysisTests.datafiles import GRO_MEMPROT, XTC_MEMPROT\n\n\n@pytest.fixture(scope='module')\ndef u():\n return mda.Universe(GRO_MEMPROT, XTC_MEMPROT)\n\n\n@pytest.fixture(scope='module')\ndef sels(u):\n s1 = u.select_atoms('name OD1 and resname ASP')\n s2 = u.select_atoms('name OD2 and resname ASP')\n return s1, s2\n\n\ndef test_nbins(u):\n s1 = u.atoms[:3]\n s2 = u.atoms[3:]\n rdf = InterRDF(s1, s2, nbins=412).run()\n\n assert len(rdf.bins) == 412\n\n\ndef test_range(u):\n s1 = u.atoms[:3]\n s2 = u.atoms[3:]\n rmin, rmax = 1.0, 13.0\n rdf = InterRDF(s1, s2, range=(rmin, rmax)).run()\n\n assert rdf.edges[0] == rmin\n assert rdf.edges[-1] == rmax\n\n\ndef test_count_sum(sels, scheduler):\n # OD1 vs OD2\n # should see 577 comparisons in count\n s1, s2 = sels\n rdf = InterRDF(s1, s2).run()\n assert rdf.count.sum() == 577\n\n\ndef test_count(sels):\n # should see two distances with 7 counts each\n s1, s2 = sels\n rdf = InterRDF(s1, s2).run()\n assert len(rdf.count[rdf.count == 3]) == 7\n\n\ndef test_double_run(sels):\n # running rdf twice should give the same result\n s1, s2 = sels\n rdf = InterRDF(s1, s2).run()\n rdf.run()\n assert len(rdf.count[rdf.count == 3]) == 7\n\n\n@pytest.mark.parametrize(\"n_blocks\", [1, 2, 3, 4])\ndef test_same_result(sels, n_blocks):\n # should see same results from analysis.rdf and pmda.rdf\n s1, s2 = sels\n nrdf = rdf.InterRDF(s1, s2).run()\n prdf = InterRDF(s1, s2).run(n_blocks=n_blocks)\n assert_almost_equal(nrdf.count, prdf.count)\n assert_almost_equal(nrdf.rdf, prdf.rdf)\n\n\n@pytest.mark.parametrize(\"step\", [1, 2, 3])\ndef test_trj_len(sels, step):\n # should see same results from analysis.rdf and pmda.rdf\n s1, s2 = sels\n nrdf = rdf.InterRDF(s1, s2).run(step=step)\n prdf = InterRDF(s1, s2).run(step=step)\n assert_almost_equal(nrdf.n_frames, prdf.n_frames)\n assert_almost_equal(nrdf.rdf, prdf.rdf)\n\n\ndef test_cdf(sels):\n s1, s2 = sels\n rdf = InterRDF(s1, s2).run()\n cdf = np.cumsum(rdf.count) / rdf.n_frames\n assert_almost_equal(rdf.cdf[-1], rdf.count.sum()/rdf.n_frames)\n assert_almost_equal(rdf.cdf, cdf)\n\n\ndef test_reduce(sels):\n # should see numpy.array addtion\n s1, s2 = sels\n rdf = InterRDF(s1, s2)\n res = []\n single_frame = np.array([np.array([1, 2]), np.array([3])])\n res = rdf._reduce(res, single_frame)\n res = rdf._reduce(res, single_frame)\n assert_almost_equal(res[0], np.array([2, 4]))\n assert_almost_equal(res[1], np.array([6]))\n\n\n@pytest.mark.parametrize('exclusion_block, value', [\n (None, 577),\n ((1, 1), 397)])\ndef test_exclusion(sels, exclusion_block, value):\n # should see 397 comparisons in count when given exclusion_block\n # should see 577 comparisons in count when exclusion_block is none\n s1, s2 = sels\n rdf = InterRDF(s1, s2, exclusion_block=exclusion_block).run()\n assert rdf.count.sum() == value\n","repo_name":"MDAnalysis/pmda","sub_path":"pmda/test/test_rdf.py","file_name":"test_rdf.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"37773202041","text":"# import __main__ as main\n# from Helper.TimerLogger import CodeTimeLogging\n# fileName = main.__file__\n# fileName = fileName.split('\\\\')[-1]\n\n# CodeTimeLogging(Flag='F', filename=fileName, Tag='Array', Difficult='Medium')\n\n\ndef nextGreaterEle(nums):\n stack = [nums[0]]\n ele = 0\n nxt = 0\n n = len(nums)\n for i in range(1, n):\n nxt = nums[i]\n if stack:\n ele = stack.pop()\n while ele < nxt:\n print(f'{ele} -- > {nxt}')\n if not stack:\n break\n ele = stack.pop()\n if ele > nxt:\n stack.append(ele)\n stack.append(nxt)\n while len(stack) != 0:\n ele = stack.pop()\n nxt = -1\n print(f'{ele} -- > {nxt}')\n\n\nnums = [4, 3, 2, 1, 5, 2, 25]\nnextGreaterEle(nums)\n","repo_name":"Omkar02/FAANG","sub_path":"MN_Round_two_5.py","file_name":"MN_Round_two_5.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23474584920","text":"#!/usr/bin/python3\n\nimport os\nimport json\nimport sys\nimport sklearn\nfrom sklearn import cross_validation, grid_search\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.svm import SVC\nfrom sklearn.externals import joblib\n\n# Code credit to https://code.oursky.com/tensorflow-svm-image-classifications-engine/\n\ndef train_svm_classifer(features, labels, model_output_path):\n \"\"\"\n train_svm_classifer will train a SVM, saved the trained and SVM model and\n report the classification performance\n\n features: 2D array of each input feature for each sample\n labels: array of string labels classifying each sample\n model_output_path: path for storing the trained svm model\n \"\"\"\n # save 20% of data for performance evaluation\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(features, labels, test_size=0.2)\n\n param = [\n {\n \"kernel\": [\"linear\"],\n \"C\": [1, 10, 100, 1000]\n },\n {\n \"kernel\": [\"rbf\"],\n \"C\": [1, 10, 100, 1000],\n \"gamma\": [1e-2, 1e-3, 1e-4, 1e-5]\n }\n ]\n\n # request probability estimation\n svm = SVC(probability=True)\n\n # 10-fold cross validation, use 4 thread as each fold and each parameter set can be train in parallel\n clf = grid_search.GridSearchCV(svm, param,\n cv=10, n_jobs=20, verbose=3)\n\n clf.fit(X_train, y_train)\n\n if os.path.exists(model_output_path):\n joblib.dump(clf.best_estimator_, model_output_path)\n else:\n print(\"Cannot save trained svm model to {0}.\".format(model_output_path))\n\n print(\"\\nBest parameters set:\")\n print(clf.best_params_)\n\n y_predict=clf.predict(X_test)\n\n labels=sorted(list(set(labels)))\n print(\"\\nConfusion matrix:\")\n print(\"Labels: {0}\\n\".format(\",\".join(labels)))\n print(confusion_matrix(y_test, y_predict, labels=labels))\n\n print(\"\\nClassification report:\")\n print(classification_report(y_test, y_predict))\n\ndef main():\n # load the feature data from a file\n with open(sys.argv[1]) as infile:\n dataset = json.load(infile)\n app_names = list(dataset['apps'].keys())\n feature_vectors = [dataset['apps'][app]['vector'] for app in app_names]\n labels = ['1' if dataset['apps'][app]['malicious'] == [1,0] else '0' for app in app_names]\n train_svm_classifer(feature_vectors, labels, 'model.out')\n\nif __name__=='__main__':\n main()\n","repo_name":"mwleeds/android-malware-analysis","sub_path":"sklearn_svm.py","file_name":"sklearn_svm.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"54"} +{"seq_id":"16386954220","text":"\r\nfrom twisted.words.xish import domish\r\nfrom twisted.words.protocols.jabber import xmlstream\r\nfrom twisted.python import log\r\nfrom wokkel.subprotocols import XMPPHandler\r\nimport xmlrpclib, xml.dom.minidom, logging\r\n\r\nfrom common import CommonClientManager\r\n\r\nNS_RPC = 'jabber:iq:rpc'\r\n\r\ndef removeWhitespaceNodes(dom):\r\n \"\"\"\r\n This method is called recursively for each element within the submitted dom\r\n and is used to remove empty whitespace elements\r\n \"\"\"\r\n for child in list( dom.childNodes ):\r\n if child.nodeType == child.TEXT_NODE and child.data.strip() == '':\r\n dom.removeChild(child)\r\n else:\r\n removeWhitespaceNodes(child)\r\n\r\nclass RPCProtocolHandler( XMPPHandler ):\r\n subscribed_methods = {}\r\n\r\n def __init__( self, client ):\r\n super( RPCProtocolHandler, self ).__init__()\r\n\r\n self.my_client = client\r\n\r\n def connectionInitialized(self):\r\n RPC_SET = \"/iq[@type='set']/query[@xmlns='%s']\" % NS_RPC\r\n self.xmlstream.addObserver(RPC_SET, self.onMethodCall)\r\n\r\n def onMethodCall(self, iq):\r\n log.msg( 'onMethodCall', level = logging.DEBUG )\r\n\r\n method_name = str( iq.query.methodName )\r\n\r\n if not method_name in self.subscribed_methods:\r\n # send error response\r\n err_iq = FaultResponse( self.xmlstream, 1, 'method not implemented' )\r\n err_iq['id'] = iq.getAttribute('id')\r\n err_iq['to'] = iq.getAttribute('from')\r\n err_iq.send()\r\n return\r\n\r\n log.msg( 'method found', level = logging.DEBUG )\r\n target_method = self.subscribed_methods[ method_name ]\r\n converted_data = xmlrpclib.loads( iq.query.params.toXml() )[0]\r\n\r\n log.msg( 'converted_data', level = logging.DEBUG )\r\n log.msg( iq.query.params.toXml(), level = logging.DEBUG )\r\n log.msg( converted_data, level = logging.DEBUG )\r\n method_result = target_method( iq, *converted_data )\r\n\r\n if isinstance( method_result, RPCFault ):\r\n response_iq = FaultResponse( self.xmlstream, method_result.error_code, method_result.error_string )\r\n else:\r\n response_iq = MethodResponse( self.xmlstream, method_result )\r\n\r\n response_iq['id'] = iq.getAttribute('id')\r\n response_iq['to'] = iq.getAttribute('from')\r\n response_iq.send()\r\n\r\n def callMethod(self, recipient, method_name, params ):\r\n\r\n new_iq = xmlstream.IQ( self.xmlstream, 'set' )\r\n new_iq['to'] = recipient.full()\r\n\r\n q = new_iq.addElement( ( NS_RPC, 'query') )\r\n q.addElement( (None, 'methodName' ), content = str( method_name ) )\r\n\r\n params_dom = xml.dom.minidom.parseString( xmlrpclib.dumps( tuple( params ), allow_none = True ) )\r\n removeWhitespaceNodes( params_dom )\r\n q.addRawXml( params_dom.documentElement.toxml( 'utf-8' ) )\r\n\r\n log.msg( 'callMethod', level = logging.DEBUG )\r\n log.msg( tuple( params ), level = logging.DEBUG )\r\n log.msg( xmlrpclib.dumps( tuple( params ), allow_none = True ), level = logging.DEBUG )\r\n\r\n return new_iq.send()\r\n\r\n def subscribeMethod(self, method_name, method ):\r\n self.subscribed_methods[ method_name ] = method\r\n\r\n def unsubscribeMethod(self, method_name):\r\n if method_name in self.subscribed_methods:\r\n del self.subscribed_methods[ method_name ]\r\n\r\nclass RPCFault( object ):\r\n\r\n def __init__(self, error_code, error_string):\r\n self.error_code = error_code\r\n self.error_string = error_string\r\n\r\nclass FaultResponse( xmlstream.IQ ):\r\n\r\n def __init__(self, stream, error_code, error_string ):\r\n xmlstream.IQ.__init__(self, stream, 'result')\r\n\r\n q = self.addElement( ( NS_RPC, 'query') )\r\n st = q.addElement( ( None, 'methodResponse') ).addElement( ( None, 'fault' ) ).addElement( ( None, 'value' ) ).addElement( ( None, 'struct' ) )\r\n\r\n cm = st.addElement( (None, 'member' ) )\r\n cm.addElement( (None, 'name'), content = 'faultCode' )\r\n cm.addElement( (None, 'value') ).addElement( (None, 'int'), content = str( error_code ) )\r\n\r\n sm = st.addElement( (None, 'member' ) )\r\n sm.addElement( (None, 'name'), content = 'faultString' )\r\n sm.addElement( (None, 'value') ).addElement( (None, 'string'), content = str( error_string ) )\r\n\r\nclass MethodResponse( xmlstream.IQ ):\r\n\r\n def __init__(self, stream, params ):\r\n xmlstream.IQ.__init__(self, stream, 'result')\r\n\r\n q = self.addElement( ( NS_RPC, 'query') )\r\n q.addElement( ( None, 'methodResponse') )\r\n\r\n params_dom = xml.dom.minidom.parseString( xmlrpclib.dumps( ( params, ), allow_none = True ) )\r\n removeWhitespaceNodes( params_dom )\r\n q.addRawXml( params_dom.documentElement.toxml( 'utf-8' ) )\r\n\r\ndef objectToElement( item ):\r\n\r\n if isinstance( item, type( () ) ) or isinstance( item, type( [] ) ) and len( item ):\r\n new_ele = domish.Element( (None, 'array') )\r\n d = new_ele.addElement( (None, 'data') )\r\n for x in item:\r\n d.addElement( (None, 'value') ).addChild( objectToElement( x ) )\r\n elif isinstance( item, type( {} ) ):\r\n new_ele = domish.Element( (None, 'struct') )\r\n for name, value in item.items():\r\n if not isinstance( name, type( str() ) ):\r\n continue\r\n m = new_ele.addElement( (None, 'member') )\r\n m.addElement( (None, 'name'), content = str( name ) )\r\n m.addElement( (None, 'value') ).addChild( objectToElement( value ) )\r\n elif item is True or item is False:\r\n new_ele = domish.Element( (None, 'boolean') )\r\n new_ele.addContent( ( item and '1' ) or '0' )\r\n elif isinstance( item, type( int() ) ):\r\n new_ele = domish.Element( (None, 'int') )\r\n new_ele.addContent( str( item ) )\r\n elif isinstance( item, type( float() ) ):\r\n new_ele = domish.Element( (None, 'double') )\r\n new_ele.addContent( str( item ) )\r\n else:\r\n new_ele = domish.Element( (None, 'string') )\r\n new_ele.addContent( str( item ) )\r\n\r\n return new_ele\r\n\r\nCommonClientManager.addHandler( 'rpc', RPCProtocolHandler )\r\n","repo_name":"bogobog/steerage","sub_path":"plugins/rpc_handler.py","file_name":"rpc_handler.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"642413807","text":"class Employee:\r\n company = \"Google\"\r\n def getSalary(self):\r\n print(\"salary is 1000k\")\r\n \r\n @staticmethod # if we want to run the below defined funcion without using self so we can do that by this \"static method\"\r\n def greet():\r\n print(\"Good morning sir\")\r\n\r\nkartik = Employee()\r\nkartik.getSalary() # This will be interpreted as Employee.getSalary(kartik) just because we used self while defining the getSalary function \r\nkartik.greet()","repo_name":"kartikver15gr8/Python-programming","sub_path":"Object Oriented Programming & Methodology/oops_my_python/self.py","file_name":"self.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37296232633","text":"import logging\nimport io\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom . import settings, dataloader\nfrom .. optimization import utils\n\nlog = logging.getLogger(__name__)\n\n\ndef main(args, cfg, log): \n chromosome, start_bin, end_bin = utils.split_chromosome_input(args.region, args.resolution)\n hic_filename = os.path.join('output/data', args.samplename, chromosome + '.txt.gz')\n\n\n plotter = settings.Plot(hic_filename,\n chromosome, \n \t\t\t\t\t\t\tstart_bin,\n end_bin,\n \t\t\t\t\t\t\targs.resolution)\n\n plotter.plotHiC(cfg['hic_text'], cfg['cmap'], int(cfg['nticks']))\n\n tads = dataloader.get_domains(args.samplename, chromosome)\n plotter.plotTAD(tads, float(cfg['tad_linewidth']), cfg['tad_linestyle'])\n\n if args.chipseq != False:\n chromsize = plotter.get_chromsize()\n\n chipseq = dataloader.get_chipseq(args.chipseq, chromosome, chromsize, args.resolution, args.log2_chip, args.zscore_chip)\n plotter.plotTrack(chipseq, cfg['chip_text'], \n float(cfg['vline_linewidth']), \n cfg['vline_linestyle'], \n int(cfg['fontsize']))\n\n if args.rnaseq != 'False':\n rnaseq = dataloader.get_rnaseq(args.rnaseq, chromosome, start_bin, end_bin)\n plotter.plotTrack(rnaseq, cfg['rnaseq_text'], \n float(cfg['vline_linewidth']), \n cfg['vline_linestyle'], \n int(cfg['fontsize']))\n\n\n plotter.show()\n plotter.saveplot(cfg['filename'], int(cfg['dpi']))\n log.info('Done!')\n\n","repo_name":"cosmoskaluga/optimalTAD","sub_path":"optimalTAD/visualization/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"6274391421","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Load the images\nimage_filenames=[]\nfor i in range(1,63):\n img = f\"D:/computer vision/puzzles/puzzle_affine_10/pieces/piece_{i}.jpg\"\n image_filenames.append(img)\n\nimages = [cv2.imread(filename) for filename in image_filenames]\n\n# Create a SIFT object\nsift = cv2.SIFT_create()\n\n# Detect keypoints and compute descriptors for each image\nkeypoints_list = [sift.detectAndCompute(image, None) for image in images]\n\n# Create a feature matcher\nmatcher = cv2.BFMatcher()\n\n# Match keypoints and descriptors between adjacent images\nmatches_list = []\nfor i in range(len(images)-1):\n matches = matcher.match(keypoints_list[i][1], keypoints_list[i+1][1])\n matches_list.append(matches)\n# matches_list = []\n# for i in range(len(images)-1):\n# matches = matcher.knnMatch(descriptors_list[i], descriptors_list[i+1], k=2)\n# good_matches = []\n# for m, n in matches:\n# if m.distance < 0.9 * n.distance:\n# good_matches.append(m)\n# matches_list.append(good_matches)\n\n# Apply RANSAC to estimate the affine transformation between matched keypoints\naffine_list = []\nfor matches, keypoints1, keypoints2 in zip(matches_list, keypoints_list[:-1], keypoints_list[1:]):\n src_pts = np.float32([keypoints1[0][m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([keypoints2[0][m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n M, mask = cv2.estimateAffine2D(src_pts, dst_pts)\n affine_list.append(M)\n\n# Stitch the images together using OpenCV's stitcher\nstitcher = cv2.Stitcher_create()\n(status, stitched) = stitcher.stitch(images)\n\n# Display the stitched image\nplt.imshow(cv2.cvtColor(stitched, cv2.COLOR_BGR2RGB))\nplt.show()\n\n# Save the stitched image\ncv2.imwrite('panorama_planar.jpeg', stitched)\n\n","repo_name":"NourAbdi/computer-vision","sub_path":"ex1/Untitled25.py","file_name":"Untitled25.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1009023086","text":"import argparse\nimport collections\nimport csv\nfrom dataclasses import dataclass\nimport json\nimport os\nimport re\n\nfrom typing import List\n\n\n@dataclass\nclass Mention:\n entity_id: str\n start: int = None\n end: int = None\n\n\n@dataclass\nclass Document:\n docid: str\n text: str\n mentions: List[Mention]\n\n @classmethod\n def from_lines(cls, lines):\n header = lines.popleft()\n match = re.search(f'(?<=\\().*(?=\\))', header)\n if match:\n docid = match.group(0)\n print(docid)\n else:\n raise RuntimeError('No DocID identified')\n text = ''\n mentions = []\n while lines:\n line = lines.popleft().strip()\n split = line.split('\\t')\n if len(split) == 1:\n token = split[0]\n bio_tag = None\n yago2_id = None\n else:\n token, bio_tag, _, yago2_id, *_ = split\n if bio_tag == 'B':\n mentions.append(Mention(yago2_id, start=len(text)))\n text += token\n if bio_tag in ('B', 'I'):\n mentions[-1].end = len(text)\n if len(lines) > 1:\n text += ' '\n return Document(docid, text, mentions)\n\n\ndef parse(f):\n instances = []\n lines = collections.deque()\n lines.append(next(f))\n for line in f:\n if '-DOCSTART-' in line:\n yield Document.from_lines(lines)\n lines.append(line)\n # One last document\n yield Document.from_lines(lines)\n\n\ndef main(args):\n if not os.path.exists(args.prefix):\n os.makedirs(args.prefix)\n\n g_train = open(os.path.join(args.prefix, 'train.jsonl'), 'w')\n g_dev = open(os.path.join(args.prefix, 'dev.jsonl'), 'w')\n g_test = open(os.path.join(args.prefix, 'test.jsonl'), 'w')\n g_entity = open(os.path.join(args.prefix, 'entity_vocab.txt'), 'w')\n\n splits = {\n '1 EU': ('train', g_train),\n '947testa CRICKET': ('dev', g_dev),\n '1163testb SOCCER': ('test', g_test),\n }\n\n entity_ids = collections.Counter()\n with open(args.input, 'r') as f:\n for document in parse(f):\n if document.docid in splits:\n mention_index = 0 # Reset at start of each section\n active_split, g = splits[document.docid]\n for mention in document.mentions:\n if mention.entity_id == '--NME--':\n continue\n obj = {\n 'left_context': document.text[:mention.start],\n 'mention': document.text[mention.start:mention.end],\n 'right_context': document.text[mention.end:],\n 'entity_id': mention.entity_id,\n 'document_id': document.docid,\n 'mention_index': mention_index\n }\n if active_split == 'train':\n entity_ids[mention.entity_id] += 1\n print(json.dumps(obj), file=g)\n mention_index += 1\n writer = csv.writer(g_entity)\n writer.writerow(('[PAD]', 0))\n for entity_id, count in sorted(entity_ids.items(), key=lambda x: x[1], reverse=True):\n writer.writerow((entity_id, count))\n\n g_train.close()\n g_dev.close()\n g_test.close()\n g_entity.close()\n\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', type=str)\n parser.add_argument('--prefix', type=str)\n args = parser.parse_args()\n\n main(args)\n\n","repo_name":"rloganiv/streaming-cdc","sub_path":"scripts/preprocess_aida_yago2.py","file_name":"preprocess_aida_yago2.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"4104379246","text":"from typing import Any, Dict, List, Type, TypeVar\n\nfrom attrs import define as _attrs_define\nfrom attrs import field as _attrs_field\n\nT = TypeVar(\"T\", bound=\"SearchAssetDto\")\n\n\n@_attrs_define\nclass SearchAssetDto:\n \"\"\"\n Attributes:\n search_term (str):\n \"\"\"\n\n search_term: str\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n search_term = self.search_term\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"searchTerm\": search_term,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n search_term = d.pop(\"searchTerm\")\n\n search_asset_dto = cls(\n search_term=search_term,\n )\n\n search_asset_dto.additional_properties = d\n return search_asset_dto\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"daniele-athome/immich-carddav-sync","sub_path":"immich_carddav_sync/immich_client/models/search_asset_dto.py","file_name":"search_asset_dto.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40363179637","text":"import requests, json\nimport os\nfrom time import sleep\n\ndef main():\n\n\ttry:\n\t\tapi_key = 'AIzaSyChq3CxzQwiXbCmqXHvO4cuMjQKXiwoutg'\n\t\turl = \"https://maps.googleapis.com/maps/api/place/textsearch/json?\"\n\t\tquery = input('\\n\\033[1;35m[+]\\033[1;36mCari tempat:\\033[1;32m ')\n\t\tprint (\"\\033[1;35m[+]\\033[1;36mMembuka google maps\\n\")\n\t\tsleep(3)\n\t\tr = requests.get(url + 'query=' + query +\n \t\t '&key=' + api_key)\n\t\tx = r.json()\n\t\ty = x['results']\n\t\tfor i in range(len(y)):\n\t\t print(\"\\033[1;31m[+]\\033[1;33mResult:\\033[1;36m \", y[i]['name'])\n\n\n\t\twhile True:\n\t\t\tul = input(\"\\n\\033[1;34m[?]\\033[1;31mMau nyari lagi?(y/n):\\033[1;33m \")\n\t\t\tif ul ==\"y\":\n\t\t\t\tmain()\n\t\t\telif ul ==\"n\":\n\t\t\t\texit(\"GoodBye\")\n\n\texcept:\n\t\tpass\n\nif __name__ == \"__main__\":\n\t#banner\n\tos.system('cls' if os.name == \"nt\" else 'clear')\n\tprint (\"\\033[1;32m\t╔═╗┌─┐┬─┐┬ ╔╦╗┌─┐┌┬┐┌─┐┌─┐┌┬┐ \")\n\tprint (\"\\033[1;35m\t║ ├─┤├┬┘│ ║ ├┤ │││├─┘├─┤ │ \")\n\tprint (\"\\033[1;32m\t╚═╝┴ ┴┴└─┴ ╩ └─┘┴ ┴┴ ┴ ┴ ┴ \")\n\tprint (\"\\033[1;36m\t Cari tempat nongkrong yuks \")\n\tprint (\"\\033[1;34m\t [+]\\033[1;33mCoded By Khazul\\033[1;34m[+]\")\n\n\n\tmain()\n","repo_name":"khazulys/caritempat","sub_path":"se.py","file_name":"se.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37569498974","text":"import requests\nimport json\n\n\n\nurl = \"http://games-global.wanyol.com\"\npath = \"/games/community/v1/review/delete\"\n\nrequest_url = url + path\nheaders = {\n \"id\": \"//869372030027653\",\n \"User-Agent\":\"b/c/d/e/f/31001//\",\n \"locale\": \"zh_CN;IN;TT\",\n \"UT\":\"TOKEN_m4xsyi29e2Ywa93EtFMFAuHoWKe3tYSrsY2u9m/HhkEplIZkPHXFbY7KtoGOJGVkt0bnl/a6CXhnH8QGwOPVLjm4hgyIwMI3hPk4KlZeW7U=\",\n \"Content-Type\": \"application/JSON; charset=UTF-8\",\n \"Accept\":\"application/JSON; charset=UTF-8\"\n }\nparam = {\"rid\":2149245556681110528}\nresult = requests.get(url=request_url,headers=headers,params=param)\ntry:\n print(json.dumps(result.json()))\nexcept:\n print(result.text)\n#assert result.status_code == 200","repo_name":"HoldCarry/GaamesInterface","sub_path":"GaamesInterface/data/request_get.py","file_name":"request_get.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"603629393","text":"from collections import deque\n\norbits = {}\nwith open( \"input\", \"r\" ) as f:\n for line in f:\n a, b = line.strip().split(')')\n if a in orbits:\n orbits[a].append(b)\n else:\n orbits[a] = [b]\n\ntotal = 0\nseq = deque( [(\"COM\",0)] )\nwhile len(seq) > 0:\n v, n = seq.popleft()\n total += n\n if v in orbits:\n seq.extend( [ (x,n+1) for x in orbits[v] ] )\nprint(total)\n \n","repo_name":"Factoid/AdventOfCode2019","sub_path":"6/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"34502188936","text":"import io\nimport logging\nimport queue\nimport threading\nimport time\nfrom typing import Optional, BinaryIO, Callable, Union\n\nimport sounddevice as sd\nimport soundfile\nimport soundfile as sf\nimport requests\nimport os\n\napi_endpoint = \"https://api.elevenlabs.io/v1\"\ndefault_headers = {'accept': '*/*'}\n\ndef _api_call_v2(requestMethod, argsDict) -> requests.Response:\n path = argsDict[\"path\"]\n if path[0] != \"/\":\n path = \"/\"+path\n argsDict[\"url\"] = api_endpoint + path\n argsDict.pop(\"path\")\n\n response:requests.Response = requestMethod(**argsDict)\n try:\n response.raise_for_status()\n return response\n except requests.exceptions.RequestException as e:\n _pretty_print_POST(response)\n raise e\n\ndef _api_get(path, headers, stream=False, params=None) -> requests.Response:\n args = {\n \"path\":path,\n \"headers\":headers,\n \"stream\":stream\n }\n if params is not None:\n args[\"params\"] = params\n return _api_call_v2(requests.get, args)\ndef _api_del(path, headers) -> requests.Response:\n args = {\n \"path\": path,\n \"headers\": headers\n }\n return _api_call_v2(requests.delete, args)\ndef _api_json(path, headers, jsonData, stream=False, params=None) -> requests.Response:\n args = {\n \"path\":path,\n \"headers\":headers,\n \"json\":jsonData,\n \"stream\":stream\n }\n if params is not None:\n args[\"params\"] = params\n return _api_call_v2(requests.post, args)\n\ndef _api_multipart(path, headers, data, filesData=None, stream=False, params=None):\n args = {\n \"path\":path,\n \"headers\":headers,\n \"stream\":stream,\n \"data\":data\n }\n if filesData is not None:\n args[\"files\"] = filesData\n if params is not None:\n args[\"params\"] = params\n\n return _api_call_v2(requests.post, args)\n\ndef _pretty_print_POST(res:requests.Response):\n req = res.request\n logging.debug(f\"RESPONSE DATA: {res.text}\")\n logging.debug('REQUEST THAT CAUSED THE ERROR:\\n{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))\n\n\ndef run_ai_speech_classifier(audioBytes:bytes):\n \"\"\"\n Runs Elevenlabs' AI speech classifier on the provided audio data.\n Parameters:\n audioBytes: The bytes of the audio file (mp3, wav, most formats should work) you want to analzye\n\n Returns:\n Dict containing all the information returned by the tool (usually just the probability of it being AI generated)\n \"\"\"\n data = io.BytesIO(audioBytes)\n files = {'file': ('audioSample.mp3', data, 'audio/mpeg')}\n response = _api_multipart(\"/moderation/ai-speech-classification\", headers=None, data=None, filesData=files)\n return response.json()\n\ndef play_audio_bytes(audioData:bytes, playInBackground:bool, portaudioDeviceID:Optional[int] = None,\n onPlaybackStart:Callable=lambda: None, onPlaybackEnd:Callable=lambda: None) -> sd.OutputStream:\n \"\"\"\n Plays the given audio and calls the given functions.\n \n Parameters:\n onPlaybackStart: Function to call once the playback begins\n onPlaybackEnd: Function to call once the playback ends\n audioData: The audio to play\n playInBackground: Whether to play it in the background\n portaudioDeviceID: The ID of the portaudioDevice to play it back on (Optional)\n\n Returns:\n None\n \"\"\"\n\n if portaudioDeviceID is None:\n portaudioDeviceID = sd.default.device[1]\n\n #Let's make sure the user didn't just forward a tuple from one of the other functions...\n if isinstance(audioData, tuple):\n for item in audioData:\n if isinstance(item,bytes):\n audioData = item\n\n\n playbackWrapper = _SDPlaybackWrapper(audioData, portaudioDeviceID, onPlaybackStart, onPlaybackEnd)\n\n if not playInBackground:\n with playbackWrapper.stream:\n playbackWrapper.endPlaybackEvent.wait()\n else:\n playbackWrapper.stream.start()\n return playbackWrapper.stream\n\ndef save_audio_bytes(audioData:bytes, saveLocation:Union[BinaryIO,str], outputFormat) -> None:\n \"\"\"\n This function saves the audio data to the specified location OR file-like object.\n soundfile is used for the conversion, so it supports any format it does.\n\n Parameters:\n audioData: The audio data.\n saveLocation: The path (or file-like object) where the data will be saved.\n outputFormat: The format in which the audio will be saved\n \"\"\"\n\n # Let's make sure the user didn't just forward a tuple from one of the other functions...\n if isinstance(audioData, tuple):\n for item in audioData:\n if isinstance(item, bytes):\n audioData = item\n\n tempSoundFile = soundfile.SoundFile(io.BytesIO(audioData))\n\n if isinstance(saveLocation, str):\n with open(saveLocation, \"wb\") as fp:\n sf.write(fp, tempSoundFile.read(), tempSoundFile.samplerate, format=outputFormat)\n else:\n sf.write(saveLocation, tempSoundFile.read(), tempSoundFile.samplerate, format=outputFormat)\n if callable(getattr(saveLocation,\"flush\")):\n saveLocation.flush()\n\n#This class just helps with the callback stuff.\nclass _SDPlaybackWrapper:\n def __init__(self, audioData, deviceID, onPlaybackStart:Callable=lambda: None, onPlaybackEnd:Callable=lambda: None):\n soundFile = sf.SoundFile(io.BytesIO(audioData))\n soundFile.seek(0)\n self.onPlaybackStart = onPlaybackStart\n self.onPlaybackEnd = onPlaybackEnd\n self.startPlaybackEvent = threading.Event()\n self.endPlaybackEvent = threading.Event()\n self.data = soundFile.read(always_2d=True)\n self.currentFrame = 0\n self.stream = sd.OutputStream(channels=soundFile.channels,\n callback=self.callback,\n samplerate=soundFile.samplerate,\n device=deviceID,\n finished_callback=self.end_playback)\n\n def callback(self, outdata, frames, time, status):\n if status:\n print(status)\n\n if not self.startPlaybackEvent.is_set(): #Ensure this is only fired once\n self.startPlaybackEvent.set()\n self.onPlaybackStart()\n\n chunksize = min(len(self.data) - self.currentFrame, frames)\n outdata[:chunksize] = self.data[self.currentFrame:self.currentFrame + chunksize]\n if chunksize < frames:\n outdata[chunksize:] = 0\n raise sd.CallbackStop()\n self.currentFrame += chunksize\n def end_playback(self):\n self.onPlaybackEnd()\n self.endPlaybackEvent.set()\n\nclass PeekQueue(queue.Queue):\n def peek(self):\n with self.mutex:\n return list(self.queue)[0]\n\n def snapshot(self):\n with self.mutex:\n return list(self.queue)\n\n\ndef _api_tts_with_concurrency(requestFunction:callable, generationID:str, generationQueue:PeekQueue) -> requests.Response:\n #Just a helper function which does all the concurrency stupidity for TTS calls.\n waitMultiplier = 1\n try:\n response = requestFunction()\n response.raise_for_status() #Just in case the callable isn't a function that already does this.\n except requests.exceptions.RequestException as e:\n if e.response.json()[\"detail\"][\"status\"] == \"too_many_concurrent_requests\":\n logging.warning(f\"{generationID} - broke concurrency limits, handling the cooldown...\")\n # Insert this in the user's \"waiting to be generated\" queue.\n generationQueue.put(generationID)\n response = None\n else:\n raise e\n\n if response is None:\n while True:\n try:\n peeked = generationQueue.peek()\n if peeked == generationID:\n response = requestFunction()\n response.raise_for_status()\n generationQueue.get()\n break\n else:\n logging.debug(f\"\\nCurrent first is {peeked}, we are {generationID}\\n\")\n logging.debug(f\"\\nOther items are first in queue, waiting for 0.3s\\n\")\n time.sleep(0.5) # The time to peek at the queue is constant.\n except requests.exceptions.RequestException as e:\n if e.response.json()[\"detail\"][\"status\"] == \"too_many_concurrent_requests\":\n logging.debug(f\"\\nWaiting for {0.5 * waitMultiplier}s\\n\")\n time.sleep(0.5 * waitMultiplier) # Just wait a moment and try again.\n waitMultiplier += 1\n continue\n raise e\n\n return response\n","repo_name":"hitech777/elevenlabslib","sub_path":"elevenlabslib/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":8799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"71506662883","text":"# takes a supplementary regulatory region file and returns a dataframe for each region with overlaps resolved as one longer region\n# so that overlapping positions and not counted multiple times\n# run this code for eRNA, lncRNA, L.var, arenas_mena_lit review\n# a separate code was run to process the gff file\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nnames=[\"chr\", \"start\", \"stop\", \"n1\", \"n2\", \"n3\", \"n4\", \"n5\", \"n6\", \"n7\", \"n8\", \"n9\"]\nlncRNA=pd.read_csv(\"sp4.lncRNAs.bed\", skiprows=0, sep=\"\\t\", names=names)\natac1=lncRNA[[\"chr\",\"start\",\"stop\"]]\n\natac1=atac1.sort_values(by=[\"chr\",'start'])\nall_chrs=atac1.chr.unique()\n\ndef clean_segments(src):\n if len(src) <= 1:\n return src\n\n a = src[0]\n dest = []\n for b in src[1:]:\n # print(a,b, check_overlap(a,b))\n if check_overlap(a,b):\n #print(\"OVERLAP\")\n #print(src)\n a = (min(a[0],b[0]),max(a[1],b[1]))\n else:\n dest.append(a)\n a = b\n dest.append(a)\n return dest\n\n#returns TRUE if there is overlap\ndef check_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0])) > 0\ndef get_segs(df, chromosome):\n return [(start,stop) for _,(_,start,stop) in df[df['chr']==chromosome].iterrows()]\n\nresults = pd.DataFrame()\ni=1\nfor ch in tqdm(range(len(all_chrs))):\n #print(i)\n src = get_segs(atac1,all_chrs[ch])\n tmp = clean_segments(src)\n for t in tmp:\n results = results.append({'chr' : all_chrs[ch], 'start' : t[0], 'stop' : t[1]}, ignore_index = True)\n i=i+1\n\nresults.to_csv(\"sp4.lncRNAs_overlap_processed.csv\")\n","repo_name":"PespeniLab/urchin_local_adapt_WGS","sub_path":"code/check_overlap_within_df.py","file_name":"check_overlap_within_df.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5919029041","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 28 17:07:08 2019\n\n@author: jmcleod\n\"\"\"\n\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport pylab\n#from sklearn.metrics.pairwise import cosine_similarity\n#from scipy.spatial.distance import cosine\ndef done_message():\n print()\n print('DDDD OOOO NN NN EEEEEE')\n print('DD DD OO OO NNN NN EE')\n print('DD DD OO OO NNNNNN EEEE')\n print('DD DD OO OO NN NNN EE')\n print('DDDDD OOOO NN NN EEEEEE')\n#%%\n\ndata = pd.read_csv('cards_by_deck.csv')\ndata2 = pd.read_csv('long_form.csv')\ndata3 = pd.read_csv('card_data.csv')\n\nprint(list(data2))\nprint()\nprint(list(data))\nprint()\nprint(list(data3))\ndata.fillna(0, inplace=True)\n\ndeck_names= list(data)\ndeck_names.pop(0)\ncard_names = []\nfor i in data[\"Card Name\"]:\n card_names.append(i)\n\ndata = data.drop(columns=[\"Card Name\"])\n\n#%%\nprint(data3.iloc[3][\"Legendary\"])\n\n#%%\n\ndef calc_same_diff(a,b):\n if a == b:\n same = (a + b)/2\n diff = 0\n else: \n #card_index.append(i)\n c = max(a,b)\n d = min(a,b)\n same = d\n diff = c-d\n union = a+b\n return(same,diff,union)\n\n\n\ndef measurement(deck_a,deck_b,card_type):\n diff = 0\n same = 0\n union = 0\n# card_index = []\n for i in range(0,len(data[\"Affinity\"])):\n a = data[deck_a][i]\n b = data[deck_b][i]\n# card = card_names[i]\n# print(card)\n value_ind = data3.iloc[i][\"Over $100\"] # also change line 120\n #value_ind=1\n if card_type == \"All\":\n same_update,diff_update,union_update = calc_same_diff(a,b)\n same += (same_update*value_ind)\n diff += (diff_update*value_ind)\n union += (union_update*value_ind)\n else:\n in_type = data3.iloc[i][card_type] # Binary value for card of type\n if in_type ==1:\n same_update,diff_update,union_update = calc_same_diff(a,b)\n same += (same_update*value_ind)\n diff += (diff_update*value_ind)\n union+=(union_update*value_ind)\n return(diff,same,union)\n\ndiff,same,union = measurement(\"UR Delver\",\"Grixis Delver\",\"All\")\nprint(diff,same,union)\n \n#%%\ndef generate_long_form(type_card):\n counter=0\n long_form_adj = [[\"Deck A\",\"Deck B\",\"Different\",\"Same\",\"Jaccard\"]]\n for i in range(0,len(deck_names)):\n for j in range(0,len(deck_names)):\n if j > i:\n counter+=1\n a=deck_names[i]\n b=deck_names[j]\n #print(type_card,counter,a,\"vs\",b)\n diff,same,union = measurement(a,b,type_card)\n if same >= 1:\n jaccard = same/(union-same)\n temp_string1 = str('{\"source\": \"'+a+'\", \"target\": \"'+b+'\", \"value\":'+str(jaccard)+'},')\n temp_string2 = str('{\"source\": \"'+a+'\", \"target\": \"'+b+'\", \"value\":'+str(jaccard**2)+'},')\n temp_list = [a,b,diff,same,jaccard,temp_string1,temp_string2]\n long_form_adj.append(temp_list)\n return(long_form_adj)\n\n#%%\ntypes = [\"All\",\"Land\",\"Creature\",\"Artifact\",\"Enchantment\",\"Planeswalker\",\"Instant\",\"Sorcery\"]\n\n\n\nfor i in types:\n print('starting',i)\n long_form_adj = generate_long_form(i)\n output_name = \"Output_adjacency_over100_\"+i +\".csv\" # also change line73\n# output_name = \"Output_adjacency_\"+i +\".csv\"\n out_df = pd.DataFrame(long_form_adj)\n out_df.head()\n out_df.to_csv(output_name,sep=\",\",index=False)\n print(\"completed\",i)\n\ndone_message()\n\n#%%\n\n# Network\nG.clear()\nG = nx.Graph()\nfor i in deck_names:\n G.add_node(i)\n\ntype_card = \"Planeswalker\"\nfor i in range(0,len(deck_names)):\n for j in range(0,len(deck_names)):\n if j > i:\n a=deck_names[i]\n b=deck_names[j]\n #print(type_card,counter,a,\"vs\",b)\n diff,same,union = measurement(a,b,type_card)\n if same > 0:\n jaccard = same/(union-same)\n G.add_edge(a, b, weight= jaccard)\n \n \n#%%\nG2 = nx.Graph()\nG2.clear()\nG2 = nx.Graph()\nfor a, b, data in sorted(G.edges(data=True), key=lambda x: x[2]['weight']):\n print('{a}, {b}, {w}'.format(a=a, b=b, w=data['weight']))\n G2.add_edge(a, b, weight=data['weight'])\n\nfor edge in G2.edges.data():\n print(edge)\nfor node in G2.nodes.data():\n print(node)\n \n#%%\n\npos=nx.circular_layout(G2)\npylab.figure(1)\nnx.draw(G2,pos)\npylab.figure(2)\nnx.draw(G2,pos,node_size=60,font_size=8)\npylab.figure(3,figsize=(12,12))\nnx.draw(G2,pos)\npylab.show()\n\n#%%\n\n\nprint(nx.topological_sort(G2))\n","repo_name":"jericho85/CSI-Coursework","sub_path":"CSI_703/Project Files/mtg_data.py","file_name":"mtg_data.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2067721004","text":"#!/usr/bin/python3\n\"\"\" Almost a Circle \"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" Inherits from Base \"\"\"\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Initializes class \"\"\"\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n @property\n def width(self):\n \"\"\"int: Assigns value to width\"\"\"\n return self.__width\n\n @property\n def height(self):\n \"\"\"int: Assigns value to height\"\"\"\n return self.__height\n\n @property\n def x(self):\n \"\"\"int: Assigns value to x\"\"\"\n return self.__x\n\n @property\n def y(self):\n \"\"\"int: Assigns value to y\"\"\"\n return self.__y\n\n @width.setter\n def width(self, value):\n if type(value) is not int:\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value\n\n @height.setter\n def height(self, value):\n if type(value) is not int:\n raise TypeError(\"height must be an integer\")\n if value <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = value\n\n @x.setter\n def x(self, value):\n if type(value) is not int:\n raise TypeError(\"x must be an integer\")\n if value < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @y.setter\n def y(self, value):\n if type(value) is not int:\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n def area(self):\n \"\"\" Returns area of Rectangle \"\"\"\n return self.__width * self.__height\n\n def display(self):\n \"\"\" Prints Rectangle to stdout \"\"\"\n for y in range(self.y):\n print()\n for i in range(self.height):\n for x in range(self.x):\n print(\" \", end=\"\")\n for j in range(self.width):\n print('#', end=\"\")\n print()\n\n def __str__(self):\n \"\"\" Creates new string \"\"\"\n return \"[Rectangle] ({}) {}/{} - {}/{}\".format(\n self.id, self.__x, self.__y, self.__width, self.__height)\n\n def update(self, *args, **kwargs):\n \"\"\" Assigns an argument to each attribute \"\"\"\n attributes = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n if args and len(args) != 0:\n for i in range(len(args)):\n setattr(self, attributes[i], args[i])\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of Rectangle \"\"\"\n return {\n \"x\": self.x,\n \"y\": self.y,\n \"id\": self.id,\n \"height\": self.height,\n \"width\": self.width\n }\n","repo_name":"amberwagoner/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6961807408","text":"\"\"\"\n 作者:陈思\n 功能:五角星的绘制\n 版本:1.0\n 日期:25/03/2019\n\"\"\"\n\nimport turtle\n\ndef main():\n \"\"\"\n 主函数\n \"\"\"\n\n #计数\n count = 1\n while count <= 5:\n\n # 绘制边\n turtle.forward(100)\n turtle.right(144)\n count = count + 1\n\n turtle.exitonclick()\n\nif __name__ == '__main__':\n main()","repo_name":"chensi06lj/Python_project","sub_path":"lec02图像绘制/pentagram_1.0.py","file_name":"pentagram_1.0.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30064073607","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/core/actions/#custom-actions/\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\n\nfrom typing import Any, Text, Dict, List\n\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom datetime import datetime\n\n\nclass ActionGiveCleanTime(Action):\n\n def name(self) -> Text:\n return \"action_schedule_cleaning\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n \n after = tracker.get_slot(\"hours\")\n t=datetime.now()\n newH=t.hour + after\n if newH>24:\n newH= newH%24\n s=\"Sure, I have scheduled a cleaning for \"+str(newH)+\":\"+str(t.minute)+\" am today.\"\n if newH>12:\n newH= newH%12\n s=\"Sure, I have scheduled a cleaning for \"+str(newH)+\":\"+str(t.minute)+\" pm today.\"\n else:\n s=\"Sure, I have scheduled a cleaning for \"+str(newH)+\":\"+str(t.minute)+\" am today.\"\n dispatcher.utter_message(text=s)\n\n return []\n","repo_name":"AdityaVashista30/Hotel-Managment-Bot-Using-Rasa","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41615826626","text":"\nfrom URLExtraction import ExtractURLs\nfrom URLExtraction import URLsResultsProcessing\n#from URLExtraction import CategoryAssign\nfrom URLExtraction import URLsResultsProcessing\nimport os\n#\ndef web_mining(company_name_file, keywords_file):\n csvfile = ExtractURLs.extract_urls(keywords_file, company_name_file)\n\n# filename = ExtractURLs.convert_to_excel(csvfile, companies_list)\n# result = URLsResultsProcessing.urls_processing(filename)\n# CategoryAssign.assign_category(result, '/Users/keleigong/Dropbox/Python/AUTO_Rating/Final_keywords.xlsx')\n\nif __name__ == \"__main__\":\n # Setting the base working path, and required file names\n base_path = \"606\"\n keywords_file = 'keywords.txt'\n company_name_file = 'xxx-xxx'\n keywords_file_path = os.path.join(base_path, keywords_file)\n company_name_file_path = os.path.join(base_path, company_name_file)\n # proxy_file = \"/home/scrc/program/AUTO_Rating/URLExtraction/ProxyProvider/proxy.txt\"\n # Collecting URLs using Google search, it will generate a json file and a db file to store the results.\n result_file = ExtractURLs.extract_urls(keywords_file_path, company_name_file_path)\n\n print(\"#################################################\")\n print(\"# #\")\n print(\"# Collecting URLs part is done #\")\n print(\"# #\")\n print(\"#################################################\")\n # Processing the collected results.\n company_json = URLsResultsProcessing.get_company_query_from_db(company_name_file_path)\n company_querys = URLsResultsProcessing.query_processing(company_json)\n company_all_urls = URLsResultsProcessing.remove_irrelevant_urls(company_querys)\n print(len(company_all_urls))\n URLsResultsProcessing.write_to_xlsx(company_all_urls, company_name_file_path+'.xlsx')","repo_name":"KeleiAzz/AUTO_Rating","sub_path":"URLExtraction/WebMining.py","file_name":"WebMining.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23646112774","text":"import pygame\r\nfrom random import choice\r\nWINDOW_SIZE = (800, 500)\r\n\r\nclass Ball(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.image.load(\"graphics/ball/ball.png\")\r\n self.image = pygame.transform.rotozoom(self.image, 0, 0.15)\r\n self.rect = self.image.get_rect(center = (WINDOW_SIZE[0]/2, 450))\r\n self.moveX = -1\r\n self.moveY = -1\r\n self.collision_with_plate = False\r\n def move_x(self):\r\n self.rect.x *= -1\r\n def move_y(self):\r\n self.rect.y *= -1\r\n def move(self):\r\n self.rect.y += self.moveY\r\n self.rect.x += self.moveX\r\n\r\n def cheak_move(self):\r\n for brick_group in brick_groups:\r\n if pygame.sprite.spritecollide(self, brick_group, True):\r\n self.moveY *= -1\r\n self.moveX *= -1\r\n else:\r\n self.move()\r\n # collision between the ball and the plate\r\n if pygame.sprite.spritecollide(self, plate_group, False):\r\n if self.rect.y >=420 and self.collision_with_plate == False:\r\n self.collision_with_plate = True\r\n self.moveY *= -1\r\n else:\r\n self.collision_with_plate = False\r\n\r\n\r\n # collision between ball and the window\r\n if self.rect.top <= 0:\r\n self.moveY *= -1\r\n if self.rect.left <=0 or self.rect.right >= WINDOW_SIZE[0]:\r\n self.moveX *= -1\r\n\r\n\r\n def update(self):\r\n self.cheak_move()\r\n\r\n\r\n\r\n# Plate class\r\nclass Plate(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.image.load(\"graphics/plate/plate.png\")\r\n self.image = pygame.transform.rotozoom(self.image, 0, 0.2)\r\n self.rect = self.image.get_rect(center=(WINDOW_SIZE[0] / 2, 470))\r\n\r\n def move(self):\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_LEFT]:\r\n self.rect.x -= 7\r\n elif keys[pygame.K_RIGHT]:\r\n self.rect.x += 7\r\n\r\n def update(self):\r\n self.move()\r\n\r\nclass Bricks(pygame.sprite.Sprite):\r\n def __init__(self, x , y, brick):\r\n super().__init__()\r\n self.image = pygame.image.load(brick)\r\n self.image = pygame.transform.rotozoom(self.image, 0 , 0.17)\r\n self.rect = self.image.get_rect(topleft = (x,y))\r\n\r\n\r\n# plate group and object\r\nplate_group = pygame.sprite.GroupSingle()\r\nplate_group.add(Plate())\r\n\r\n\r\n# array of bricks, brick group and brick object\r\nbricks = [f\"graphics/bricks/breakout_brick{i}.png\" for i in range(1,11)]\r\n\r\nbrick_groups = [pygame.sprite.Group() for i in range(5)]\r\namount_of_bricks = 12\r\nmargin = 10\r\nfor j in range(5):\r\n for i in range(amount_of_bricks):\r\n new_brick = Bricks(65 * i + margin, j * 22, choice(bricks))\r\n brick_groups[j].add(new_brick)\r\n amount_of_bricks -= 2\r\n margin += 65\r\n\r\n\r\n# ball groups and object\r\nball_group = pygame.sprite.GroupSingle()\r\nball_group.add(Ball())\r\n","repo_name":"abey-asmare/breakout-game","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11226199914","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[83]:\n\n\nimport ipynb\n\n\n# In[71]:\n\n\nimport gridstatusio as gs\nimport gridstatus as g\nimport pandas as pd\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport datetime\n\n\n# In[87]:\n\n\ndef hourly_data(df):\n df[\"Hour\"] = df[\"Time\"].dt.hour\n cols_dict = {}\n \n for c in range(len(df.dtypes)):\n if df.dtypes[c] in ['int64','float64']:\n cols_dict[df.columns[c]] = 'mean'\n \n df2 = df.groupby(['Hour'], as_index=False).agg(cols_dict)\n return df2\n\n\n# In[5]:\n\n\ndef hourly_daily_data(df):\n fuel_mix_hourly = df.set_index(\"Time\").resample(\"H\").mean().reset_index()\n return fuel_mix_hourly\n\n# def hourly_data_by_day(df):\n# #fuel_mix_hourly = df.set_index(\"Time\").resample(\"H\").mean().reset_index could be more effective\n# df[\"Hour\"] = df[\"Time\"].dt.hour\n# df[\"Date\"] = df[\"Time\"].dt.date\n# cols_dict = {}\n \n# for c in range(len(df.dtypes)):\n# if df.dtypes[c] != 'object':\n# cols_dict[df.columns[c]] = 'mean'\n \n# df2 = df.groupby(['Date','Hour'], as_index=False).agg(cols_dict) \n# df2[\"Date\"] = pd.to_datetime(df2[\"Date\"])\n# return df2\n\n\n# In[ ]:\n\n\ndef line_single(df, num_params): \n \n #if we enter the whole name of the df and its colname this should work with multiple dfs, actually it won't\n \n #x axis column\n x_axis = input('Enter x-axis colname: ')\n while x_axis not in df.columns:\n print('***Invalid colname***')\n x_axis = input('Enter x-axis colname: ')\n \n #enter columns to plot\n params = []\n for i in range(num_params):\n col = input('Enter y-axis colname: ') \n while col not in df.columns:\n print('***Invalid colname***')\n col = input('Enter colname: ')\n params.append(col)\n \n #set up figure\n fig, ax = plt.subplots(figsize=(10,6))\n ax.set_title(input('Enter title: '))\n ax.set_ylabel(input('Enter y-axis label: '))\n ax.set_xlabel(input('Enter x-axis label: '))\n colors = list(mcolors.TABLEAU_COLORS.keys())\n \n labs = []\n lns_list = []\n count = 0\n for p in params: #set a parameter for xticks for date ranges +- longer than a day\n #if len(df[x_axis]) > 24:\n lns_var = ax.plot(df[x_axis] ,df[p], label = p, color=colors[count])\n lns_list.append(lns_var)\n labs.append(p)\n count += 1\n\n #set up legend\n lns = []\n for l in lns_list:\n for m in l:\n lns.append(m)\n \n legend_loc = input('Legend in or out of plot? ')\n if legend_loc == 'in':\n loc = int(input('Enter legend location: '))\n ax.legend(lns, labs, loc=loc) #loc=loc, #put the legend to the right side every time\n elif legend_loc == 'out':\n plt.legend(bbox_to_anchor=(1.05, .5), loc='center left')\n plt.tight_layout()\n \n plt.show()\n \n#Add annotation feature\n\n\n# In[ ]:\n\n\ndef line_dual(df, num_params): #num dfs\n \n #if we enter the whole name of the df and its colname this should work with multiple dfs, actually it won't\n \n #x axis column\n x_axis = input('Enter x-axis colname: ')\n while x_axis not in df.columns:\n print('***Invalid colname***')\n x_axis = input('Enter x-axis colname: ')\n \n #enter columns to plot\n params = {}\n for i in range(num_params):\n col = input('Enter y-axis colname: ') \n while col not in df.columns:\n print('***Invalid colname***')\n col = input('Enter y-axis colname: ')\n axis = input('Which axis? (left or right): ')\n while axis not in ['left','right']:\n print('***Invalid axis***')\n axis = input('Which axis? (left or right): ')\n params[col] = axis\n \n #print(params)\n \n #set up figure\n fig, ax = plt.subplots(figsize=(10,6))\n ax.set_title(input('Enter title: '))\n ax.set_ylabel(input('Enter left y-axis label: '))\n ax.set_xlabel(input('Enter x-axis label: '))\n colors = list(mcolors.TABLEAU_COLORS.keys())\n \n #for dual axis\n ax2 = ax.twinx()\n ax2.set_ylabel(input('Enter right y-axis label: '))\n \n lns_list = []\n count = 0\n for p,a in params.items():\n if a == 'left':\n ax.plot(df[x_axis] ,df[p], label = p)\n lns_list.append(ax.plot(df[x_axis] ,df[p], label = p, color = colors[count]))\n else:\n ax2.plot(df[x_axis] ,df[p], label = p)\n lns_list.append(ax2.plot(df[x_axis] ,df[p], label = p, color = colors[count]))\n count += 1\n\n #set up legend\n lns = []\n for l in lns_list:\n for m in l:\n lns.append(m)\n labs = [l.get_label() for l in lns]\n \n legend_loc = input('Legend in or out of plot? ')\n if legend_loc == 'in':\n loc = int(input('Enter legend location: '))\n ax.legend(lns, labs, loc=loc) #loc=loc, #put the legend to the right side every time\n elif legend_loc == 'out':\n plt.legend(bbox_to_anchor=(1.05, .5), loc='center left')\n plt.tight_layout()\n \n plt.show()\n\n\n# In[91]:\n\n\ndef basics(df):\n if 'Time' in df.columns:\n df['Time'] = pd.to_datetime(df['Time'])\n print(df.info(), '\\n\\n', df.describe(), '\\n\\n', df.isna().sum(), '\\n\\n')\n return df.head()\n\n\n# In[ ]:\n\n\n# def multi_df(*args):\n \n# #set up figure\n# fig, ax = plt.subplots(figsize=(10,6))\n# ax.set_title(input('Enter title: '))\n# ax.set_ylabel(input('Enter y-axis label: '))\n# ax.set_xlabel(input('Enter x-axis label: '))\n# colors = list(mcolors.TABLEAU_COLORS.keys())\n \n# for c in args:\n# c.columns\n\n\n# In[ ]:\n\n\n#def binned(df,bins,labels): #binning function\n\n\n# In[ ]:\n\n\n#def bar_hour(df):\n\n\n# In[46]:\n\n\n# caiso = g.CAISO()\n# mix_df = caiso.get_fuel_mix(\"2023-03-16\")\n# load_df = caiso.get_load(\"2023-03-16\")\n# as_df = caiso.get_as_prices(\"2023-03-16\")\n# full_df = hourly_mix_load_as(mix_df,load_df,as_df)\n\n\n# In[33]:\n\n\n# mix_df2023 = caiso.get_fuel_mix(start='2023-01-01',end='today')\n\n","repo_name":"larsenburack/portfolio","sub_path":"Grid_Status_Functions.py","file_name":"Grid_Status_Functions.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6707336160","text":"# EXEMPLE de MASTER RETURNER: SSC : /usr/lib/python3.6/site-packages/sseape/returners/sseapi_return.py\n# Salt Open /usr/lib/python3.6/site-packages/salt/returners/cassandra_cql_return.py\n#\n# MASTER RETURNER\n# v 1.0\n# Alexandre Hugla\n# 12 Dec 2021\n# alex_event_returner1.py\n#\n# USAGE :\n# -----\n# Requires LogInsight config to be set:\n# A. On Salt Master Open Source\n# Put in '/etc/salt/master.d/event_return.conf':\n# vrli_return:\n# address: vrli.cpod-vrealize.az-fkd.cloud-garage.net\n# port: 9000\n# B. On Saltconfig (SSC)\n# Put in '/etc/salt/master.d/vrli.conf':\n# vrli_return:\n# address: vrli.cpod-vrealize.az-fkd.cloud-garage.net\n# port: 9000\n#\n\n\n\nimport json\nimport requests\n\n\ndef event_return(events): # events is a \"list\"\n PythonLogfile = \"/tmp/event_returnerLog.log\"\n ReturnerKind = \"Master_Returner\"\n\n # Open local log file and log parameters\n f = open(PythonLogfile, \"a\")\n\n # retrieve options and get masterFQDN, vRLI_SERVER and vRLI_PORT\n opts=json.dumps(__opts__)\n #f.write(str(opts)) # => permet de le voir au format json (avec \"\")\n jsonopts=json.loads(opts)\n grains=jsonopts[\"grains\"]\n #f.write(str(grains))\n masterFQDN=grains[\"fqdn\"]\n VRLIsettings=jsonopts[\"vrli_return\"]\n vRLI_SERVER=VRLIsettings[\"address\"]\n vRLI_PORT=VRLIsettings[\"port\"]\n\n #Pour chaque evenement de la liste\n for event in events: # event is a \"dict\"\n tag = event.get(\"tag\", \"\")\n data = event.get(\"data\", \"\")\n minion_name = data.get(\"id\")\n Function = data.get(\"fun\")\n isSuccess = data.get(\"success\")\n target = data.get(\"tgt\")\n '''\n #purely log\n if (tag.find('salt/auth') != 0) and (tag.find('salt/raas_master/iteration') != 0):\n f.write(\"all tag : %s\\n\" % (tag))\n #f.write(\"all data : %s\\n\" % (data))\n '''\n\n # Find if event is a return of a job execution\n # Do not log to vRLI if Function = 'saltutil.find_job'\n # Il faut aussi que function existe sinon on se retrouve avec des logs Function=None\n #INUTILE : if (tag.find('ret') != -1) and (tag.find('salt/auth') != 0) and (Function != \"saltutil.find_job\") and (Function):\n if (tag.find('ret') != -1) and (Function != \"saltutil.find_job\") and (Function):\n f.write(\"C est un retour d'execution\\n\")\n f.write(\"tag : %s\\n\" % (tag))\n #f.write(\"data : %s\\n\" % (data))\n '''\n f.write(\"minion_name : %s\\n\" % (minion_name))\n f.write(\"target : %s\\n\" % (target))\n f.write(\"ReturnerKind : %s\\n\" % (ReturnerKind))\n f.write(\"Function : %s\\n\" % (Function))\n f.write(\"isSuccess : %s\\n\" % (isSuccess))\n '''\n # Get State File if exist, N/A if not\n statefile = func_GET_STATE_FILE(Function,data,\"fun_args\")\n\n\n #Envoie a Log Insight\n func_VRLI_CREATE_AND_SEND(f,masterFQDN,Function,statefile,minion_name,isSuccess,vRLI_SERVER,vRLI_PORT)\n\n\n else:\n #f.write(\"Ce n'est pas un retour d'execution\\n\")\n # On gere le cas de timeout minion dans SSC (tag..../complete). Pas de gestion de timeout sur le Salt Master Open Source\n if (tag.find('complete') != -1):\n f.write(\"SSC timeout\\n\")\n isSuccess=\"TIMEOUT\"\n all_minion_name=data[\"missing\"] # => ['vra-006613.cpod-vrealize.az-fkd.cloud-garage.net', 'vra-006612.cpod-vrealize.az-fkd.cloud-garage.net']\n statefile = func_GET_STATE_FILE(Function,data,\"arg\")\n for minion_name in all_minion_name:\n func_VRLI_CREATE_AND_SEND(f,masterFQDN,Function,statefile,minion_name,isSuccess,vRLI_SERVER,vRLI_PORT)\n\n\n f.close()\n\n\n# Create message and send it to Log Insight\ndef func_VRLI_CREATE_AND_SEND(f,masterFQDN,Function,statefile,minion_name,isSuccess,vRLI_SERVER,vRLI_PORT):\n # Create Message\n messageLog = \"Salt Execution, ParentMaster: %s, ReturnerKind: Master_Returner, Function: %s, StateFile: %s, Minion: %s, IsSuccess: %s\" % (masterFQDN,Function,statefile,minion_name,isSuccess)\n messagevRLI= '{\"messages\":[{\"text\":\"%s\"}]}' % (messageLog)\n f.write(\"PAYLOAD: %s\\n\" % (messagevRLI))\n # Send to Log Insight\n URL = \"http://%s:%s/api/v1/messages/ingest/1\" % (vRLI_SERVER, vRLI_PORT)\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n messagevRLI_JSON = json.loads(messagevRLI)\n resp = requests.post(URL, verify=False, headers=headers, json=messagevRLI_JSON)\n if resp.status_code != 200:\n f.write(\" => ERROR : Event NOT send to Log Insight\\n\")\n else:\n f.write(\" => SUCCESS : Event sent to Log Insight\\n\")\n retour = \"func_VRLI_CREATE_AND_SEND terminée\"\n return retour\n\n\ndef func_GET_STATE_FILE(Function,data,argname):\n # Get State File if exist, N/A if not\n # argname is \"fun_args\" (common return) or \"arg\" (tag 'complete' in SSC)\n # 'fun_args' or 'arg' is interesting only if the Function is 'state.apply'\n statefile = \"N/A\"\n if (Function == \"state.apply\"):\n argStr=\"\"\n argnames=data[argname] # is a \"list\"\n for argname in argnames: # is a \"str\"\n argStr = \"%s-%s\" % (argStr,argname) # concatenate all arg and add '-' between them\n statefile = argStr[1:] # remove first '-' if exist\n statefile = func_CLEAN_STATE_FILE(statefile) # if secops we clean the state file\n return statefile\n\n\n# state file are complex in secops so we clean\n# if Vulnerability, statefile = vman.policy_assess_u-{'queue': True, 'pillar': {'policy_filename': '64766296-9b81-4558-bc25-c0f3712cd9d1_20'}\n# if Compliance, statefile = locke.policy_assess_u-{'queue': True, 'pillar': {'policy_uuid': 'b98c8ceb-1d41-4f69-86f2-3422e19137f9', 'policy_filename': 'b9'}\ndef func_CLEAN_STATE_FILE(statefile_in):\n statefile_out = statefile_in\n if (statefile_in.find(\"vman.policy_assess\") != -1):\n statefile_out = \"vman.policy_assess\"\n if (statefile_in.find(\"locke.policy_assess\") != -1):\n statefile_out = \"locke.policy_assess\"\n return statefile_out\n\n\n\n# les retours dont identifé par : tag : salt/job/20211203122055473529/ret/vra-001517.cpod-vrealize.az-fkd.cloud-garage.net\n\n# les tags \"salt/auth\" sont a eviter pour limiter la log (certificats)\n# n existe que dans SSC\n\n# En cas de non retour du minion (par exemple pour absence):\n #Dans SSC:\n #tag : salt/job/20211206132826022514/complete\n #data de test.ping : {'returned': [], 'missing': ['vra-006605'], 'fun': 'test.ping', 'arg': [], '_stamp': '2021-12-06T13:29:00.063231', '_master_path': ['saltstack_enterprise_installer']}\n #data de state.apply: {'returned': [], 'missing': ['vra-006605'], 'fun': 'state.apply', 'arg': ['alex_vim'], '_stamp': '2021-12-07T16:48:07.576132', '_master_path': ['saltstack_enterprise_installer']}\n\n #Dans Salt Open Source (master only):\n # RIEN n'est indiqué !!\n\n#SECOPS:\n# if Vulnerability, statefile = vman.policy_assess_u-{'queue': True, 'pillar': {'policy_filename': '64766296-9b81-4558-bc25-c0f3712cd9d1_20'}\n# if Compliance, statefile = locke.policy_assess_u-{'queue': True, 'pillar': {'policy_uuid': 'b98c8ceb-1d41-4f69-86f2-3422e19137f9', 'policy_filename': 'b9'}\n# we do not have in the events statistics about criticity nor standards => we cannot do it in returners\n","repo_name":"ahugla/Salt","sub_path":"returners/master returners/Master_Returner_to_vRLI_v1.py","file_name":"Master_Returner_to_vRLI_v1.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34974776145","text":"def xml_tree_equivalence(e1, e2):\n \"\"\"\n Rough XML comparison function based on https://stackoverflow.com/a/24349916/1294458.\n This is necessary to provide some sort of structural equivalence of a generated XML\n tree; however there is no XML deserialisation implementation yet. A naive text comparison\n fails because it seems it enforces ordering, which seems to vary between python versions\n etc. Strictly speaking, I think, only the *leaf-list* element mandates ordering.. this\n function uses simple sorting on tag name, which I think, should maintain the relative\n order of these elements.\n \"\"\"\n if e1.tag != e2.tag:\n return False\n if e1.text != e2.text:\n return False\n if e1.tail != e2.tail:\n return False\n if e1.attrib != e2.attrib:\n return False\n if len(e1) != len(e2):\n return False\n e1_children = sorted(e1.getchildren(), key=lambda x: x.tag)\n e2_children = sorted(e2.getchildren(), key=lambda x: x.tag)\n if len(e1_children) != len(e2_children):\n return False\n return all(xml_tree_equivalence(c1, c2) for c1, c2 in zip(e1_children, e2_children))\n","repo_name":"robshakir/pyangbind","sub_path":"tests/serialise/xml_utils.py","file_name":"xml_utils.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"54"} +{"seq_id":"34511568902","text":"import os\nimport os.path as osp\nimport time\n\nimport matplotlib.pyplot as plt\nimport mmcv\nfrom mmcv.runner import get_dist_info\nimport numpy as np\nimport open3d as o3d\nfrom open3d.geometry import TriangleMesh as T\nimport torch\n\nfrom test_utils import visualize_novel_views, create_novel_views_dir\n\n# NOVEL_IMG_SIZE = (800, 300)\nNOVEL_IMG_SIZE = (800, 300)\nFOV_H, FOV_V = np.pi * 60 / 180, np.pi * 20 / 180 # 120 degree horizontal FOV, 60 degree vertical FOV\nNOVEL_CAM_K = np.array([\n [NOVEL_IMG_SIZE[0] / 2 / np.sqrt(np.tan(FOV_H / 2)), 0, NOVEL_IMG_SIZE[0] / 2],\n [0, NOVEL_IMG_SIZE[1] / 2 / np.sqrt(np.tan(FOV_V / 2)), NOVEL_IMG_SIZE[1] / 2],\n [0, 0, 1]]) \n\n\ndef sample_novel_cam_poses():\n lidar2front_cam = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, -1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n lidar2back_cam = np.array([\n [-1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, -1.0, 0.0],\n [0.0, -1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n \n forward_trans = 3.0\n rightward_trans = [-3.0, 0, 3.0]\n front_rads = [-np.pi / 6, 0, np.pi / 6]\n back_rads = [np.pi / 6, 0, -np.pi / 6]\n\n def _generate_cam_poses(forward_trans, lidar2cam, rads):\n T_base = np.eye(4)\n T_base[1, 3] = forward_trans\n poses = []\n for i in range(3):\n T_rightward = np.eye(4)\n T_rightward[0, 3] = rightward_trans[i]\n T_rightward[:3, :3] = T.get_rotation_matrix_from_xyz((0, 0, rads[i]))\n cam_pose = T_base @ T_rightward @ lidar2cam\n poses.append(cam_pose)\n return poses\n\n forward_front_cams = _generate_cam_poses(forward_trans, lidar2front_cam, front_rads)\n center_front_cams = _generate_cam_poses(0.5, lidar2front_cam, front_rads)\n center_back_cams = _generate_cam_poses(-0.5, lidar2back_cam, back_rads)\n backward_back_cams = _generate_cam_poses(-forward_trans, lidar2back_cam, back_rads)\n # cam_poses = forward_front_cams + center_front_cams + center_back_cams + backward_back_cams\n # cam_poses = forward_front_cams + backward_back_cams\n cam_poses = center_front_cams + center_back_cams\n cam_poses = np.stack(cam_poses)\n return cam_poses\n\n\ndef visualize_cam_poses(cam_poses, cam_K=None, img_size=None):\n import pytransform3d.camera as pc\n import pytransform3d.visualizer as pv\n lidar_frame = T.create_coordinate_frame(size=2.0, origin=[0, 0, 0])\n frames = [lidar_frame]\n for cam_pose in cam_poses:\n cam_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])\n cam_frame.transform(cam_pose)\n frames.append(cam_frame)\n # o3d.visualization.draw_geometries(frames)\n\n if cam_K is None:\n cam_K = NOVEL_CAM_K\n \n if img_size is None:\n img_size = NOVEL_IMG_SIZE\n\n fig = pv.figure()\n # ax = None\n for pose in cam_poses:\n fig.plot_transform(A2B=pose, s=0.5)\n fig.plot_camera(M=cam_K, cam2world=pose, virtual_image_distance=1, sensor_size=img_size)\n # ax = pc.plot_camera(ax=ax, M=cam_K, cam2world=pose, virtual_image_distance=1, sensor_size=img_size, ax_s=0.5, color='red')\n # plt.show()\n fig.show()\n\n\ndef generate_novel_depth(model, data_loader, save_root):\n model.eval()\n model.module.set_inference_mode(True)\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n time.sleep(2) # This line can prevent deadlock problem in some cases.\n save_dir = osp.join(save_root, 'novel_views')\n create_novel_views_dir(save_dir)\n\n # novel_cam_poses = torch.tensor(sample_novel_cam_poses()).float().cuda()\n novel_cam_K = torch.tensor(NOVEL_CAM_K).float().cuda()\n for i, data in enumerate(data_loader):\n novel_cam_poses = torch.inverse(data['lidar2camera'].data[0][0]).cuda()\n data['novel_cam_intrinsics'] = novel_cam_K\n data['novel_img_size'] = NOVEL_IMG_SIZE\n data['novel_cam_poses'] = novel_cam_poses\n with torch.no_grad():\n result = model(**data)\n\n visualize_novel_views(data, result, save_dir, i * world_size + rank)\n\n results.extend(result)\n\n if rank == 0:\n batch_size = data['img'].data[0].shape[0]\n for _ in range(batch_size * world_size):\n prog_bar.update()\n return results\n\n\nif __name__ == '__main__':\n cam_poses = sample_novel_cam_poses()\n visualize_cam_poses(cam_poses)\n","repo_name":"TroyeFun/BEV-RF","sub_path":"tools/data_converter/generate_novel_depth.py","file_name":"generate_novel_depth.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"71378848483","text":"import json\nimport os\nimport socket\nfrom collections import namedtuple\nfrom datetime import timedelta, datetime\nfrom typing import NamedTuple, Pattern, Callable, Tuple, List\n\nimport requests\n\nfrom cpt.common import LOG_TIMESTAMP_RE, SCRIPT_STARTED_RE, START_ITER, START_TRX, \\\n END_TRX, END_TRX_PASSED_THINK_TIME, END_TRX_PASSED, OPERATION_NAME_START_RE\n\nSTATUS_FAIL = 'Fail'\nSTATUS_PASS = 'Pass'\nTransactionTimes = namedtuple('TransactionTimes', 'duration, think, wasted')\n\nMMM_BUILD_INFO_GQL = {\n \"query\": \"query getBuildInfo {\\n _buildInfo {\\n branch\\n buildHost\\n buildUserName\\n buildVersion\\n \"\n \" commitId\\n commitIdAbbrev\\n commitTime\\n commitUserName\\n totalCommitCount\\n }\\n}\\n\",\n \"operationName\": \"getBuildInfo\"}\n\n\ndef build_info(be_url):\n if be_url is None:\n return None\n r = requests.post(be_url, auth=('admin', 'admin'), verify=False, json=MMM_BUILD_INFO_GQL)\n if r.status_code != 200:\n return None\n ret = r.json()['data']['_buildInfo']\n if ret:\n return ret\n else:\n return None\n\n\nclass ELKEvent:\n def __init__(self, event_type: str, event: dict, timestamp):\n self.eventType = event_type\n self.event = event\n self.timestamp = timestamp\n self.eventBuildInfo = None\n self.eventSourceHost = None\n\n\nclass FeTransactionTestResultEvent:\n # only OS envs in constructor\n # field names according to com.ataccama.one.performance.model.TestResultEvent\n def __init__(self, test_env: str = 'test_env', cpt_branch: str = 'cpt_branch',\n cpt_head_commit: str = 'cpt_head_commit'):\n self.testEnv = test_env\n self.cptBranch = cpt_branch\n self.cptHeadCommit = cpt_head_commit\n self.scriptName = None\n self.trxName = None\n self.result = None\n self.duration = -1\n self.testError = None\n # from build info POST to https://backend-toba-perf.build.atc/graphql\n self.appBranch = None\n self.appHeadCommit = None\n self.sqlServiceType = None\n self.gqlCount = -1\n self.iteration = 0\n\n\nclass FeTransaction2Gql:\n def __init__(self, trx_name: str, gql: dict, iteration: int):\n self.trx_name = trx_name\n self.gql: dict = gql\n self.iteration = iteration\n\n\nclass FeTransaction:\n def __init__(self, name: str, status: str,\n end_time: str = '', error: str = '',\n iteration: int = 0, duration: float = 0,\n wasted_time: float = 0, think_time: float = 0):\n self.trx_name = name\n self.status = status\n self.duration = duration\n self.think_time = think_time\n self.wasted_time = wasted_time\n self.trx_time = round((duration - wasted_time), 3)\n self.error = error\n self.iteration = iteration\n self.end_time = end_time\n\n\nclass FeTransactionGqlCount:\n def __init__(self, trx_name, gql_count, iteration):\n self.trx_name = trx_name\n self.gql_count = gql_count\n self.iteration = iteration\n\n def __str__(self):\n if self.trx_name is not None:\n return str(self.iteration) + \".\" + self.trx_name + \" : \" + str(self.gql_count) + \" GQLs\"\n else:\n return str(self.iteration) + \".Unassigned : \" + str(self.gql_count) + \" GQLs\"\n\n\nclass ParseResults:\n def __init__(self):\n self.script_start_time = None\n self.current_iteration: int = 0\n self.ended_fe_transaction: List[FeTransaction] = []\n self.current_transaction: str = ''\n self.opened_transaction: List[str] = []\n # remember previous line with relative timestamp for error and end time\n self.previous_line: str = ''\n self.line: str = ''\n self.rule_result: List[str] = []\n # table [fe_trans, iteration, gql] created during parsing\n self.fet_iter_gql: List[FeTransaction2Gql] = []\n # table [fe_trans, iteration, n_gqls]\n self.fet_iter_n_gqls: List[FeTransactionGqlCount] = []\n self.fe_elk_events: List[dict] = []\n self.build_info = build_info(os.getenv('APP_GQL_URL'))\n\n def process_gql(self, rule_result: dict):\n gql = rule_result\n last_opened_fe_trx = self.opened_transaction[-1] if len(self.opened_transaction) > 0 else None\n trx_gql = FeTransaction2Gql(last_opened_fe_trx, gql, self.current_iteration)\n self.fet_iter_gql.append(trx_gql)\n\n def fe_trx_end_time(self) -> str:\n \"\"\" ISO time for transaction end\"\"\"\n match = LOG_TIMESTAMP_RE.match(self.previous_line)\n if match:\n t = int(match.group(1))\n end_time = self.script_start_time + timedelta(milliseconds=t)\n return end_time.isoformat()\n return ''\n\n def process_end_transaction(self, rule_result):\n # event_result = name , status, times (for Passed trx)\n name = rule_result[0]\n status = rule_result[1]\n if status == STATUS_PASS:\n t_times: TransactionTimes = transaction_times(self)\n # trx_time is calculated during initialization\n fet = FeTransaction(name=name, status=status,\n duration=t_times.duration,\n wasted_time=t_times.wasted,\n think_time=t_times.think,\n end_time=self.fe_trx_end_time(),\n iteration=self.current_iteration)\n self.ended_fe_transaction.append(fet)\n if status == STATUS_FAIL:\n fet = FeTransaction(name=name, status=status,\n end_time=self.fe_trx_end_time(),\n error=self.previous_line,\n iteration=self.current_iteration)\n self.ended_fe_transaction.append(fet)\n self.update_opened_fe_transaction(name)\n\n def update_opened_fe_transaction(self, ended_trans: str):\n try:\n self.opened_transaction.remove(ended_trans)\n except ValueError as e:\n pass\n\n def get_gql_count(self, fet_name, iteration):\n ret = [x.gql_count for x in self.fet_iter_n_gqls\n if x.trx_name == fet_name and x.iteration == iteration]\n return int(ret[0]) if len(ret) == 1 else None\n\n def set_gql_counts(self):\n # creates set\n fe_transactions = {trx_gql.trx_name for trx_gql in self.fet_iter_gql}\n fe_iterations = {trx_gql.iteration for trx_gql in self.fet_iter_gql}\n for i in sorted(fe_iterations):\n for fe_trx in fe_transactions:\n trx_gql_count = [x for x in self.fet_iter_gql\n if (x.trx_name == fe_trx and int(x.iteration) == int(i))]\n gql_count = len(trx_gql_count)\n item = FeTransactionGqlCount(fe_trx, gql_count, i)\n self.fet_iter_n_gqls.append(item)\n\n def map_to_elk(self, source: FeTransaction) -> FeTransactionTestResultEvent:\n ret = FeTransactionTestResultEvent(os.getenv('TEST_ENV'),\n os.getenv('GIT_BRANCH'),\n os.getenv('GIT_COMMIT'))\n ret.scriptName = os.getenv('LR_VUGEN_SCRIPT')\n ret.appBranch = self.build_info['branch'] if self.build_info else None\n ret.appHeadCommit = self.build_info['commitId'] if self.build_info else None\n ret.sqlServiceType = os.getenv('SQL_SERVICE_TYPE')\n ret.iteration = source.iteration\n ret.duration = source.duration\n ret.trxName = source.trx_name\n ret.result = source.status\n ret.testError = source.error\n ret.gqlCount = self.get_gql_count(source.trx_name, source.iteration)\n return ret\n\n def set_fe_elk_events(self):\n \"\"\" Map self.fe_gql to FeTransactionTestResultEvent\"\"\"\n for fet in self.ended_fe_transaction:\n elk = self.map_to_elk(fet)\n elk_event = ELKEvent('fe-test-results', elk.__dict__, fet.end_time)\n # TODO duplicated\n elk_event.eventBuildInfo = self.build_info\n elk_event.eventSourceHost = socket.gethostname()\n self.fe_elk_events.append(elk_event.__dict__)\n\n\nclass Rule(NamedTuple):\n regexp: Pattern\n # groups available from regexp pattern\n groups: Tuple[int, ...] = (1,)\n set: Callable[[ParseResults], None] = lambda x: None\n sub_rules: List = []\n\n\ndef set_script_start_time(x: ParseResults):\n y = x.rule_result\n x.script_start_time = datetime.strptime(y[0], \"%Y-%m-%d %H:%M:%S\")\n\n\ndef set_iteration_start(x: ParseResults):\n y = x.rule_result\n x.current_iteration = int(y[0])\n\n\ndef set_transaction_start(x: ParseResults):\n y = x.rule_result\n trx = str(y[0])\n x.current_transaction = trx\n x.opened_transaction.append(trx)\n\n\ndef set_transaction_end(x: ParseResults):\n y = x.rule_result\n x.process_end_transaction(y)\n\n\ndef set_gql_request(x: ParseResults):\n stripped = x.line.replace('\\\\\\\\', '\\\\')\n # when created by cmd line version\n if \"\\t\" in stripped:\n tab = stripped.index(\"\\t\")\n stripped = stripped[0:tab]\n gql = json.loads(stripped)\n x.process_gql(gql)\n\n\ndef transaction_times(lp: ParseResults):\n ret = [float(x) for x in lp.rule_result[2]]\n if len(ret) == 2:\n # think time = 0\n return TransactionTimes(ret[0], 0, ret[1])\n if len(ret) == 3:\n return TransactionTimes(ret[0], ret[1], ret[2])\n\n\ndef set_transaction_times(x: ParseResults, y):\n # no set for transaction_times\n return None\n\n\ntransaction_end_rules = [\n # duration,think time,wasted time goes first\n Rule(END_TRX_PASSED_THINK_TIME, groups=(1, 2, 3)),\n # duration,wasted time\n Rule(END_TRX_PASSED, groups=(1, 2))\n]\nrequest_response_log_rules = [\n Rule(SCRIPT_STARTED_RE, set=set_script_start_time),\n Rule(START_ITER, set=set_iteration_start),\n Rule(START_TRX, set=set_transaction_start),\n # with status Pass\n Rule(END_TRX, groups=(1, 2), set=set_transaction_end, sub_rules=transaction_end_rules),\n Rule(OPERATION_NAME_START_RE, set=set_gql_request)\n]\n","repo_name":"TomasBahnik/pslib","sub_path":"python/pycpt/cpt/parse_rules.py","file_name":"parse_rules.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32005911978","text":"import decide_politics_base_test\n\nimport pytest\nfrom unittest.mock import patch\n\nfrom decide_politics.core.models import CFields\nfrom decide_politics.core.models import Customer\nfrom decide_politics.transactions import transaction_base as tb\nfrom decide_politics.transactions.transaction_manager import TransactionManager\n\n@pytest.fixture\ndef dummy_customer():\n return Customer.create_new(attributes={\n # Test number not connected to someones real phone\n CFields.PHONE_NUMBER: \"+15419670010\"\n })\n\n@pytest.fixture\ndef dummy_transaction():\n class DummyTransactionBase(tb.TransactionBase):\n ID = 'dummy_id'\n\n STATE_NODES = {\n 'begin': tb.StateNode('begin'),\n 'end': tb.StateNode('end'),\n }\n\n def __init__(self):\n TRIGGER_MESSAGE = \"TRIGGER TEXT MESSAGE\"\n self.STATE_NODES['begin'].register_trigger(\n lambda trigger_data: trigger_data.MESSAGE == TRIGGER_MESSAGE,\n self.STATE_NODES['end'],\n )\n\n super().__init__(self.STATE_NODES['begin'])\n\n return DummyTransactionBase()\n\n\nclass TestTransactionManager:\n @patch('decide_politics.transactions.transaction_manager.TransactionManager.COMMAND_TO_TRANSACTION_MAP',\n dict(DUMMY=dummy_transaction()))\n @patch('decide_politics.transactions.transaction_manager.TransactionManager.TRANSACTION_ID_TO_TRANSACTION_MAP',\n {dummy_transaction().ID: dummy_transaction()})\n def test_general(self, dummy_customer, dummy_transaction):\n TransactionManager.handle_message(dummy_customer, \"DUMMY\")\n\n assert dummy_customer[CFields.CUR_TRANSACTION_ID] == dummy_transaction.ID\n assert dummy_customer[CFields.TRANSACTION_STATE_ID] == \"begin\"\n\n TransactionManager.handle_message(dummy_customer, \"TRIGGER TEXT MESSAGE\")\n\n assert dummy_customer[CFields.CUR_TRANSACTION_ID] == dummy_transaction.ID\n assert dummy_customer[CFields.TRANSACTION_STATE_ID] == \"end\"\n","repo_name":"georgeaf99/DecidePolitics","sub_path":"tests/test_decide_politics/test_transaction_manager.py","file_name":"test_transaction_manager.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"22319560794","text":"def tic_tac_toe_winner(board):\n\n if board == \"XXXXXXXXX\" or board == \"OOOOOOOOO\":\n raise ValueError()\n\n if board_check(board, \"X\"):\n return \"X\"\n elif board_check(board, \"O\"):\n return \"O\"\n\n\ntest_cases = {\n \" \": None,\n \"2317 \": None,\n \"XXX \": \"X\",\n \" XXX \": \"X\",\n \" XXX\": \"X\",\n \"OOO \": \"O\",\n \" OOO \": \"O\",\n \" OOO\": \"O\",\n \"O O O \": \"O\",\n \" O O O \": \"O\",\n \" O O O\": \"O\",\n \"X X X \": \"X\",\n \" X X X \": \"X\",\n \" X X X\": \"X\",\n \"XO X O X\": \"X\",\n \"OX O X O\": \"O\",\n \"XXOOXXXOO\": None,\n \"XXXXXXXXX\": ValueError,\n \"OOOOOOOOO\": ValueError,\n}\n\n\ndef board_check(board, mark):\n return (\n (board[0] == board[1] == board[2] == mark)\n or (board[3] == board[4] == board[5] == mark)\n or (board[6] == board[7] == board[8] == mark)\n or (board[0] == board[3] == board[6] == mark)\n or (board[1] == board[4] == board[7] == mark)\n or (board[2] == board[5] == board[8] == mark)\n or (board[0] == board[4] == board[8] == mark)\n or (board[6] == board[4] == board[2] == mark)\n )\n\n\nfor board, expectation in test_cases.items():\n assert (\n tic_tac_toe_winner(board) == expectation\n ), f\"Expected {expectation!r} for {board!r} got {tic_tac_toe_winner(board)!r}\"\n","repo_name":"Cram-In/TestAutomation","sub_path":"Basic/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34537734600","text":"#coding:utf-8\nimport time\nimport json\nimport urllib.request, urllib.error, urllib.parse\nimport redis\nimport requests\nrequests.packages.urllib3.disable_warnings()\nimport pymysql\nimport pymysql.cursors\nfrom warnings import filterwarnings\nimport time\nfilterwarnings('ignore', category = pymysql.Warning)\ntest_db_ip = '192.168.1.101'\ntest_user = 'dddev'\ntest_passwd = '123456'\ntest_mainDb='ctcdb_new_test'\ntest_ckDb='ctcdb_ck_test'\nconn_test = pymysql.connect(host=test_db_ip, user=test_user, passwd=test_passwd, port=3306,charset=\"utf8\")\n\nredis_db_ip='192.168.1.101'\nredis_port=6379\nheaders={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'}\nstore_url = 'http://192.168.1.251:30011'\nglobal_url='http://192.168.1.251:39000'\ndef changeIntoStr(data,str_data=''):\n if isinstance(data, str):\n str_data = data.encode('utf-8')\n elif isinstance(data, str):\n str_data = data\n return str_data\nserver_url='http://192.168.1.251:31010'\n#登录\nSession=requests.Session()\nlogin_url='http://192.168.1.251:31010/users/login'\nlogin_data = {'userName': 13111111111, 'password': 123456}\nlogin_response = Session.post(url=login_url, data=login_data, headers=headers)\n#切换城市\nnanjing_url = '{0}/users/updateAgency'.format(server_url)\nnanjing_data = {'cityId': 320100, 'agencyId': 101}\nnanjing_data_response = Session.post(url=nanjing_url, data=nanjing_data, headers=headers)\n#上架单商品\naddpr_url='{0}/api/goods/shelve/publish'.format(server_url)\n\n\ntask_num = input('\\n1.上架单商品,\\n\\n2.上架套装\\n')\nif str(task_num) == str(1):\n input_price = input('请输入上架商品价格:')\n input_dadou=input('请输入达豆立减:')\n addprone_data = {\n \"amount\": \"1\",\n \"areaPriceStr\": \"156:1#157:1#166:1#173:1#230:1#232:1#234:1#236:1#238:1#242:1#244:1#248:1#250:1#252:1#256:1#258:1#260:1#262:1#264:1#524:1#526:1#588:1#590:1#592:1#666:1#668:1#670:1#672:1#706:1#708:1#710:1#712:1#714:1#716:1#718:1#720:1#722:1#724:1#726:1#728:1#730:1#732:1#734:1#736:1#738:1#740:1#742:1#744:1#746:1#748:1#780:1#782:1#784:1#786:1#794:1#\",\n \"catalogId\": \"4090000\",\n \"comboType\": \"0\",\n \"combos[0][id]\": \"137469\",\n \"combos[0][isFree]\": \"0\",\n \"combos[0][originalPrice]\": \"0\",\n \"combos[0][packageNum]\": \"1\",\n \"combos[0][preWarehouseId]\": \"13\",\n \"combos[0][price]\": \"0\",\n \"combos[0][specification]\": \"10支*1盒\",\n \"combos[0][unit]\": \"件\",\n \"dadou\": input_dadou,\n \"endTime\": \"2018-03-17 23:59:59\",\n \"id\": \"137469\",\n \"isAllFirst\": \"0\",\n \"isDirectSell\": \"0\",\n \"isDiscount\": \"1\",\n \"isHotFirst\": \"0\",\n \"isOrderLimit\": \"unlimited\",\n \"isTypeFirst\": \"0\",\n \"limit\": \"100\",\n \"notSoldPriceArea\": \"[]\",\n \"onSale\": \"0\",\n \"price\": input_price,\n \"showProduceDate\": \"1\",\n \"specification\": \"规格\",\n \"startTime\": \"2018-03-14 10:18:05\",\n \"title\": input_price + \"元的商品\",\n \"type\": \"0\",\n \"unit\": \"件\",\n \"vip\": \"0\",\n \"warehouseId\": \"13\",\n \"yjPrice\": \"0\"\n }\n addpr_data_response = Session.post(url=addpr_url, data=addprone_data, headers=headers)\n print(addpr_data_response.text)\n time.sleep(10)\nelif str(task_num) == str(2):\n store_name = input('输入店铺登录账号:')\n price1=input('第一个商品价格:')\n price2=input('第二个商品价格:')\n input_dadou = input('请输入达豆立减:')\n price=price1+price2\n addprmany_data = {\n \"amount\": \"1\",\n \"areaPriceStr\": \"156:#157:#166:#173:#230:#232:#234:#236:#238:#242:#244:#248:#250:#252:#256:#258:#260:#262:#264:#524:#526:#588:#590:#592:#666:#668:#670:#672:#706:#708:#710:#712:#714:#716:#718:#720:#722:#724:#726:#728:#730:#732:#734:#736:#738:#740:#742:#744:#746:#748:#780:#782:#784:#786:#794:#\",\n \"catalogId\": \"4090000\",\n \"comboType\": \"2\",\n \"combos[0][id]\": \"137558\",\n \"combos[0][isFree]\": \"0\",\n \"combos[0][originalPrice]\": price1,\n \"combos[0][packageNum]\": \"1\",\n \"combos[0][preWarehouseId]\": \"13\",\n \"combos[0][price]\": \"price1\",\n \"combos[0][specification]\": \"12*1箱\",\n \"combos[0][unit]\": \"件\",\n \"combos[1][id]\": \"137469\",\n \"combos[1][isFree]\": \"0\",\n \"combos[1][originalPrice]\": price2,\n \"combos[1][packageNum]\": \"1\",\n \"combos[1][preWarehouseId]\": \"13\",\n \"combos[1][price]\": price2,\n \"combos[1][specification]\": \"10支*1盒\",\n \"combos[1][unit]\": \"件\",\n \"dadou\": input_dadou,\n \"endTime\": \"2018-03-17 23:59:59\",\n \"id\": \"137558\",\n \"isAllFirst\": \"0\",\n \"isDirectSell\": \"0\",\n \"isDiscount\": \"1\",\n \"isHotFirst\": \"0\",\n \"isOrderLimit\": \"unlimited\",\n \"isTypeFirst\": \"0\",\n \"limit\": \"11\",\n \"notSoldPriceArea\": \"[]\",\n \"onSale\": \"0\",\n \"price\": price,\n \"showProduceDate\": \"1\",\n \"specification\": \"规格\",\n \"startTime\": \"2018-03-14 10:59:54\",\n \"title\": \"蒙牛11+美美7\",\n \"type\": \"0\",\n \"unit\": \"件\",\n \"vip\": \"0\",\n \"warehouseId\": \"13\",\n \"yjPrice\": \"0\"\n }\n addpr_data_response = Session.post(url=addpr_url, data=addprmany_data, headers=headers)\n print(addpr_data_response.text)\n time.sleep(10)\n\n\n\n\n\n\n\n\n","repo_name":"xixils520/RandyforTest","sub_path":"ReturnPromotion.py","file_name":"ReturnPromotion.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20022941070","text":"# !/usr/bin/python\n\n# Make sure to install exifread and argparse using pip\n\nimport os, exifread, argparse\nfrom shutil import copyfile\n\nextList=['jpg','jpeg','JPG','JPEG']\nparser = argparse.ArgumentParser(description='Organize media files based on EXIF data')\nparser.add_argument('source_dir',type=str,help='Source Directory')\nparser.add_argument('target_dir',type=str,help='Target Directory')\nargs=parser.parse_args()\noutput=args.target_dir\nif os.path.isdir(output)==False:\n os.mkdir(output)\nfor root, dirs, files in os.walk(args.source_dir,topdown=False):\n for name in files:\n filePath=os.path.join(root,name) # full filepath\n fileExt=filePath.split('.')[-1] # file extension\n for ext in extList:\n if fileExt==ext:\n f=open(filePath,'rb')\n date=exifread.process_file(f,stop_tag='EXIF DateTimeOriginal')['EXIF DateTimeOriginal']\n print (filePath)\n date=str(date).split(\" \")[0].split(\":\")\n year=output+'/'+date[0]\n month=year+'/'+date[1]\n day=month+'/'+date[2]\n dest=day+'/'+name\n print (dest)\n if os.path.isdir(year)==False:\n os.mkdir(year)\n if os.path.isdir(month)==False:\n os.mkdir(month)\n if os.path.isdir(day)==False:\n os.mkdir(day)\n if os.path.exists(dest)==False:\n copyfile(filePath, dest)\n else:\n print ('ERROR: file exists')\n","repo_name":"adiprad/media-sorter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5085747613","text":"# -*- coding: utf-8 -*-\n\n# Plugin created by Merola\n\n# This plugin does not use an official API and is in no way endorsed by the\n# scraped web service. You are fully responsible for any breaches of\n# terms of use and/or applicable laws that usage of this plugin could result in.\n\nimport re\nimport string\nimport urllib\nimport utility\nfrom commands import Command\n\ndef decode_characters(encoded_string):\n\tdecoded_string = encoded_string.decode('unicode_escape')\n\treturn re.sub(r'\\\\(.)', r'\\1', decoded_string)\n\t\ndef prisjakt_search(query_string):\n\t# Build URLs\n\turl_base = 'http://www.prisjakt.nu/ajax/jsonajaxserver.php?'\n\turl_product = url_base + 'm=super_search&p={\"mode\"%3A\"' + 'prod_pj' + \\\n\t\t\t'\"%2C\"search\"%3A\"' + query_string + '\"%2C\"limit\"%3A1%2C\"v4\"%3A1}'\n\turl_book = url_base + 'm=super_search&p={\"mode\"%3A\"' + 'bok' + \\\n\t\t\t'\"%2C\"search\"%3A\"' + query_string + '\"%2C\"limit\"%3A1%2C\"v4\"%3A1}'\n\n\t# Fetch the product result page\n\tresponse = utility.read_url(url_product)\n\tdata = response[\"data\"]\n\n\t# Look for info\n\tid_pattern = \"\\{'mode': 'produkt', 'produkt_id': '(\\d+)'\\}\"\n\tid_match = re.search(id_pattern, data)\n\n\tif not id_match:\n\t\t# Fetch the book result page\n\t\tresponse = utility.read_url(url_book)\n\t\tdata = response[\"data\"]\n\n\t\t# Look for info\n\t\tid_pattern = \"\\{'mode': 'bok', 'produkt_id': '(\\d+)'\\}\"\n\t\tid_match = re.search(id_pattern, data)\n\n\t\turl_type = \"bok\"\n\telse:\n\t\turl_type = \"produkt\"\n\n\tif id_match:\n\t\t# We seem to have found something\n\t\tproduct_id = id_match.group(1)\n\n\t\t# Get title\n\t\tif url_type == \"bok\":\n\t\t\ttitle_pattern = \"class=\\\\\\\\\\\"ikon(14)?\\\\\\\\\\\"( alt=\\\\\\\\\\\"\\\\\\\\\\\")?\\> (.+?) \\\\\\\\n\"\n\t\t\ttitle_match = re.search(title_pattern, data)\n\t\t\tif title_match:\n\t\t\t\tencoded_title = title_match.group(3)\n\t\t\telse:\n\t\t\t\tencoded_title = \"Unknown title\"\n\t\telse:\n\t\t\ttitle_pattern = \"onmouseout=\\\\\\\\\\\"ajaxpopup_hide\\(\\);\\\\\\\\\\\"\\>\\\\\\\\n (.+?)\\\\\\\\n\"\n\t\t\ttitle_match = re.search(title_pattern, data)\n\t\t\tif title_match:\n\t\t\t\tencoded_title = title_match.group(1)\n\t\t\telse:\n\t\t\t\tencoded_title = \"Unknown title\"\n\t\t\n\t\t# Remove HTML tags\n\t\tencoded_title = string.replace(\n\t\t\t\tencoded_title, \"\", \"\")\n\t\tencoded_title = string.replace(encoded_title, \"<\\\\/span>\", \"\")\n\t\t# Decode special characters\n\t\tproduct_title = decode_characters(encoded_title)\n\n\t\t# Get price\n\t\tdata = data.replace(\" \", \"\")\n\t\tprice_pattern = \"\\(\\d+:-)\\<\\\\\\\\\\/span\\>\"\n\t\tprice_match = re.search(price_pattern, data)\n\t\tif price_match:\n\t\t\tproduct_price = price_match.group(1)\n\t\telse:\n\t\t\tproduct_price = \"???:-\"\n\n\t\t# Done, return info string (latin-1 to make IRCClient.send() happy)\n\t\treturn product_title.encode('latin-1', 'replace') + \", \" + product_price + \\\n\t\t\t\t\", http://www.prisjakt.nu/\" + url_type + \".php?p=\" + product_id + \\\n\t\t\t\t\" | All results: http://www.prisjakt.nu/search.php?query=\" + query_string\n\telse:\n\t\treturn \"No product found.\"\n\ndef prisjakt_product(url):\n\t# Fetch the web page\n\tresponse = utility.read_url(url)\n\tdata = response[\"data\"]\n\tdata = data.replace(\" \", \"\")\n\n\t# Look for title\n\ttitle_pattern = \"\\(\\)?(.+?)(\\<\\/a\\>)?\\<\\/h1\\>\"\n\ttitle_match = re.search(title_pattern, data)\n\n\tif not title_match:\n\t\t# Failure\n\t\treturn \"Could not extract product info :(\"\n\t\n\t# Success\n\ttitle = utility.unescape(title_match.group(2))\n\t\n\t# Look for price\n\tprice_pattern = \"ägsta: \\(.|\\n)(\\d+:-)\\<\\/span\\>\"\n\tprice_match = re.search(price_pattern, data)\n\tprice = price_match.group(2)\n\n\t# Done, return info string\n\treturn title + \", \" + price + \", \" + url\n\nclass PrisjaktCommand(Command):\n\tdef __init__(self):\n\t\tpass\n\t\t\n\tdef trig_prisjakt(self, bot, source, target, trigger, argument):\n\t\t\"\"\"Command used to search the Swedish price comparison web site www.prisjakt.nu\"\"\"\n\t\t\n\t\t# Sanitize argument\n\t\targument = argument.strip()\n\t\tif not argument:\n\t\t\treturn \"Usage: .prisjakt | .prisjakt \"\n\t\t\n\t\tif re.match(\"http:\\/\\/www\\.prisjakt\\.nu\\/(bok|produkt).php\\?\\w+=\\d+\", argument):\n\t\t\t# Parse product page\n\t\t\treturn prisjakt_product(argument)\n\t\t\t\n\t\telse:\n\t\t\t# Search for products\n\t\t\t\n\t\t\t# We want to use latin1 encoding, i.e. %F6 instead of %C3B6\n\t\t\targument = urllib.quote_plus(argument, 'åäöÅÄÖ')\n\t\t\ttranslation = { 'å': '%E5', 'ä': '%E4', 'ö': '%F6', 'Å': '%C5', 'Ä': '%C4', 'Ö': '%D6' }\n\t\t\tfor key in translation.keys():\n\t\t\t\targument = argument.replace(key, translation[key])\n\t\t\t\n\t\t\treturn prisjakt_search(argument)\n\n","repo_name":"dhtech/pyirkbot","sub_path":"plugins/prisjakt.py","file_name":"prisjakt.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73638794723","text":"class Solution:\n def findTargetSumWays(self, nums: List[int], target: int) -> int:\n memo = defaultdict(int)\n memo[nums[0]] = 1\n memo[-nums[0]] += 1\n\n for i in range(1, len(nums)):\n temp_memo = defaultdict(int)\n for sum, count in memo.items():\n temp_memo[sum + nums[i]] += count\n temp_memo[sum - nums[i]] += count\n memo = temp_memo\n \n return memo[target]","repo_name":"Haymanot-Demis/A2SV-Problems","sub_path":"target-sum.py","file_name":"target-sum.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4133969117","text":"\"\"\"\nAuthor: Ron Lockwood-Childs\n\nLicensed under LGPL v2.1 (see file COPYING for details)\n\nConvert infix token stack into postfix (aka RPN) form.\n\nTaken from the wikipedia page describing RPN:\nhttps://en.wikipedia.org/wiki/Reverse_Polish_notation#Postfix_algorithm\n\nWhile there are tokens to be read:\n\n Read a token.\n If the token is a number, then add it to the output queue.\n If the token is a function token, then push it onto the stack.\n If the token is a function argument separator (e.g., a comma):\n\n Until the token at the top of the stack is a left parenthesis, pop\n operators off the stack onto the output queue. If no left\n parentheses are encountered, either the separator was misplaced\n or parentheses were mismatched.\n\nIf the token is an operator, o1, then:\n\n while there is an operator token, o2, at the top of the operator stack, and\n either:\n o1 is left-associative and its precedence is less than or equal to that of\n o2, or\n o1 is right associative, and has precedence less than that of o2,\n\n then pop o2 off the operator stack, onto the output queue;\n\n push o1 onto the operator stack.\n\nIf the token is a left parenthesis, then push it onto the stack.\nIf the token is a right parenthesis:\n\n Until the token at the top of the stack is a left parenthesis, pop operators\n off the stack onto the output queue.\n Pop the left parenthesis from the stack, but not onto the output queue.\n If the token at the top of the stack is a function token, pop it onto the\n output queue.\n If the stack runs out without finding a left parenthesis, then\n there are mismatched parentheses.\n\nWhen there are no more tokens to read:\n\n While there are still operator tokens in the stack:\n\n If the operator token on the top of the stack is a parenthesis, then there\n are mismatched parentheses.\n Pop the operator onto the output queue.\n\nExit.\n\"\"\"\n\nimport re\n\n#: Known operators\nOPERATORS = ['^', '*', '/', '%', '-', '+', 'not', '<', '<=', '>', '>=', '==', '!=', '=', \\\n 'and', 'or']\n#: Operator precedence table\nPRECEDENCE_TABLE = [\n ('not', 11),\n ('^', 10),\n ('*', 9), ('/', 9), ('%', 9),\n ('-', 8), ('+', 8),\n ('<', 7), ('<=', 7), ('>', 7), ('>=', 7), ('==', 7), ('!=', 7),\n ('and', 6), ('or', 6),\n ('=', 5)\n]\n#: Left associativity\nLEFT = 0\n#: Right associativity\nRIGHT = 1\n#: Table to keep track of operator associativity\nASSOCIATIVITY = {\n 'not': RIGHT,\n '^': RIGHT,\n '*': LEFT,\n '/': LEFT,\n '%': LEFT,\n '-': LEFT,\n '+': LEFT,\n '<': LEFT,\n '<=': LEFT,\n '>': LEFT,\n '>=': LEFT,\n '==': LEFT,\n '!=': LEFT,\n 'and': LEFT,\n 'or': LEFT,\n '=': RIGHT\n}\n\nFLOAT_RE = re.compile(r\"^\\d+\\.\\d+$\")\nINT_RE = re.compile(r\"^\\d+$\")\n\n\nclass ExpressionException(Exception):\n \"\"\"\n Raised when infix symbols remain after conversion to postfix (too many\n values, not enough operators).\n \"\"\"\n pass\n\n\ndef convert_infix_to_postfix(tok_list, replacement_ops=None):\n \"\"\"\n Convert a list of tokens in infix notation into postfix notation.\n\n :param tok_list: The list of infix tokens (or a single token string)\n :type tok_list: str | list\n :param replacement_ops: Optional dict containing mappings from an infix\n operator to a postfix operator (E.G. {'+': operator.add, ...})\n :type replacement_ops: dict\n :return: A list of the tokens in postfix order\n :rtype: list\n \"\"\"\n stack = []\n op_stack = []\n item_list = tok_list\n if isinstance(tok_list, str):\n # The iterator below would take single characters from the string, so\n # wrap strings in a single-element list.\n item_list = [tok_list]\n for tok in item_list:\n # val will take on the proper type based on string contents.\n val = None\n if isinstance(tok, str):\n fl_minfo = FLOAT_RE.search(tok)\n if fl_minfo:\n val = float(tok)\n in_minfo = INT_RE.search(tok)\n if in_minfo:\n #pylint: disable=redefined-variable-type\n val = int(tok)\n #pylint: enable=redefined-variable-type\n if not (fl_minfo or in_minfo):\n if tok == \"true\":\n val = 1\n elif tok == \"false\":\n val = 0\n else:\n val = str(tok)\n if val in OPERATORS:\n # Shift operators to the appropriate position based on operator\n # precedence.\n while op_stack:\n prec_diff = precedence_check(val, op_stack[0])\n if (((ASSOCIATIVITY[op_stack[0]] == RIGHT) and (prec_diff < 0)) or\n ((ASSOCIATIVITY[op_stack[0]] == LEFT) and (prec_diff <= 0))):\n stack.append(op_stack[0])\n if len(op_stack) > 1:\n op_stack = op_stack[1:]\n else:\n op_stack = []\n else:\n break\n op_stack.insert(0, val)\n elif isinstance(val, str):\n # Make sure no identifier aliases to python operators.\n val = \"_\" + val\n stack.append(val)\n else:\n stack.append(val)\n elif tok:\n # Throws a TypeError exception if len() isn't supported.\n stack = stack + convert_infix_to_postfix(tok, replacement_ops)\n while op_stack:\n stack.append(op_stack[0])\n if len(op_stack) > 1:\n op_stack = op_stack[1:]\n else:\n op_stack = []\n if op_stack:\n raise ExpressionException(\"Stack underflow in token list '{}'\".format(tok_list))\n if replacement_ops:\n #pylint: disable=consider-using-enumerate\n for idx in range(len(stack)):\n if stack[idx] in replacement_ops:\n stack[idx] = replacement_ops[stack[idx]]\n #pylint: enable=consider-using-enumerate\n return stack\n\n\ndef precedence_check(opa, opb):\n \"\"\"\n Sort two operators by precedence.\n\n :param opa: First operator\n :type opa: str\n :param opb: Second operator\n :type opb: str\n :return: negative if a has lower precedence than b, 0 if both operators\n have the same precedence, positive if a has higher precedence\n :rtype: int\n \"\"\"\n prec_a = -1\n prec_b = -1\n for prec in PRECEDENCE_TABLE:\n if opa == prec[0]:\n prec_a = prec[1]\n if opb == prec[0]:\n prec_b = prec[1]\n if (prec_a >= 0) and (prec_b >= 0):\n break\n diff = prec_a - prec_b\n return diff\n","repo_name":"rlc2/pygame_maker","sub_path":"pygame_maker/logic/infix_to_postfix.py","file_name":"infix_to_postfix.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12436247930","text":"#!/usr/bin/env python\nfrom __future__ import with_statement, print_function\nimport subprocess\nimport sys\nimport re\nimport os\nimport collections\nfrom collections import OrderedDict\nimport argparse\nimport sqlite3\n\n\ndef default_paper_name(name):\n name = re.sub(r'Rule$', '', name)\n name = re.sub(r'([a-z])([A-Z])', r'\\1 \\2', name)\n return name\n\nclass RuleDef(object):\n def __init__(self, name, category='L', paper_name=None):\n self.name = name\n self.paper_name = paper_name if paper_name else default_paper_name(name)\n self.category = category\n\n def __str__(self):\n return self.name if isinstance(self.name, basestring) else str(self.name)\n\n def __repr__(self):\n args = map(repr, (self.name, self.paper_name, self.category))\n return 'RuleDef({}, paper_name={}, category={})'.format(*args)\n\nALL_RULES = [\n RuleDef('TransformationRule', 'L', paper_name='Select List Format'),\n RuleDef('ForeignKeyReplacementRule', 'A', paper_name='Implicit Relationship'),\n RuleDef('NotInRelationshipRule', 'A'),\n RuleDef('InRelationshipLabelRule', 'G'),\n RuleDef('DescribingAttributeLabelRule', 'A', paper_name='Describing Attribute'),\n RuleDef('JoinLabelRule', 'A', paper_name='Join Relationship'),\n RuleDef('MergeCompositeAttributeRule', 'A'),\n RuleDef('ValueTypeInferenceRule', 'A'),\n RuleDef('TableEntityRefNeedsIdRule', 'G', paper_name='Entity Ref Needs Id'),\n RuleDef('AllAttributesLiteralLabelRule', 'L'),\n RuleDef('AttributeLiteralLabelRule', 'L'),\n RuleDef('BetweenLiteralsRule', 'L'),\n RuleDef('BinaryComparisonRule', 'L'),\n RuleDef('NumberLiteralRule', 'L'),\n RuleDef('SelectLabelRule', 'L'),\n RuleDef('WhereLiteralRule', 'L'),\n RuleDef('ColumnReferenceLiteralRule', 'L'),\n RuleDef('DefaultIsNullRule', 'L'),\n RuleDef('InRelationshipLoweringRule', 'L'),\n RuleDef('TableEntityRefLiteralRule', 'L', paper_name='Entity Ref Lowering'),\n RuleDef('SimplifyConjunctionsRule', 'G'),\n RuleDef('FixVerbTenseRule', 'C'),\n RuleDef('InvalidDeterminerRule', 'C'),\n RuleDef('ConjunctScopeComputationRule', 'D'),\n RuleDef('DefaultColumnLabelRule', 'D'),\n RuleDef('DefaultTableLabelRule', 'D'),\n RuleDef('AnaphoricPronounRule', 'G'),\n RuleDef('DeterminerRedundancyRule', 'G'),\n RuleDef('RangeToBetweenRule', 'A'),\n RuleDef('SimplifyRepeatedAttributesRule', 'G'),\n RuleDef('SingleReferenceAnaphoraRule', 'G')\n]\n\ndef get_rule_def(name):\n for rule in ALL_RULES:\n if rule.name == name:\n return rule\n return None\n\n# establish map from implementation name to name used in the paper\nRULE_TO_PAPER_NAME = OrderedDict()\nfor rule in ALL_RULES:\n RULE_TO_PAPER_NAME[rule.name] = rule.paper_name\n\ndef get_paper_name(rule):\n return get_rule_def(rule).paper_name\n\n\nIGNORE_RULES = [\n 'DefaultColumnLabelRule',\n 'DefaultTableLabelRule',\n 'ConjunctScopeComputationRule',\n 'InvalidDeterminerRule',\n 'FixVerbTenseRule',\n 'SimplifyConjunctionsRule'\n]\n\nSOURCES = OrderedDict()\nSOURCES['textbook'] = 'CompanyExperiment1Test'\nSOURCES['generation-company'] = 'Experiment3CompanyTest'\nSOURCES['generation-businesstrip'] = 'Experiment3BusinessTripTest'\nSOURCES['guidance'] = 'Experiment4CompanyTest'\n\n\nclass QueryStats(object):\n def __init__(self, query):\n self.query = query\n self.rules = OrderedDict()\n\n def applied(self, rule):\n self.rules[rule] = None\n\n\nclass RuleStats(object):\n def __init__(self):\n self.rules = OrderedDict()\n for rule in ALL_RULES:\n self.rules[rule.name] = 0\n\n def applied(self, rule, times=1):\n try:\n self.rules[rule] += times\n except KeyError:\n self.rules[rule] = times\n\n def print_stats(self):\n print('{}\\t{}'.format('Rule', 'Count'))\n for rule, count in self.rules.iteritems():\n print('{}\\t{}'.format(rule, count))\n\n\ndef run_init(args):\n db = args.db\n create_schema(db)\n\n\ndef create_schema(outfile):\n with sqlite3.connect(outfile) as conn:\n conn.execute('''CREATE TABLE rule_application (\n source STRING NOT NULL,\n query STRING NOT NULL,\n query_number INTEGER NOT NULL,\n rule STRING NOT NULL,\n PRIMARY KEY(source, query, rule))\n ''')\n conn.commit()\n\n\ndef run_update(args):\n db = args.db\n source = args.sourcename\n with sqlite3.connect(db) as conn:\n stats = read_stats(args.logfile)\n conn.execute('DELETE FROM rule_application WHERE source=?', (source,))\n qnum = 0\n for query, qstats in stats.iteritems():\n qnum += 1\n for rule in qstats.rules.iterkeys():\n conn.execute('INSERT INTO rule_application (source, query, query_number, rule) VALUES (?, ?, ?, ?)',\n (source, query, qnum, rule))\n\n\ndef read_stats(logfile):\n \"\"\"Reads the rule application stats from a log file.\n :returns a dict mapping query -> QueryStats\"\"\"\n stats = OrderedDict()\n qstats = None\n with open(logfile) as fp:\n for line in fp:\n query = get_query(line)\n if query:\n qstats = stats[query] = QueryStats(query)\n continue\n rule = get_applied(line)\n if rule:\n qstats.applied(rule)\n return stats\n\n\ndef get_query_rules(conn, source, query):\n q = 'SELECT rule FROM rule_application WHERE source=? AND query=?'\n return conn.execute(q, (source, query))\n\n\ndef get_source_counts(conn, source=None, query=None):\n source_query = (source, query)\n q = 'SELECT COUNT(*) FROM rule_application WHERE source=? AND query=?'\n total = conn.execute(q, source_query).fetchone()[0]\n q += ' AND rule NOT IN (\\'' + '\\',\\''.join(IGNORE_RULES) + '\\')'\n paper = conn.execute(q, source_query).fetchone()[0]\n return total, paper\n\n\ndef run_query_report(args):\n db = args.db\n with sqlite3.connect(db) as conn:\n if not args.sources:\n query_report(conn, unique_queries=args.unique_queries, show_rules=args.show_rules)\n else:\n for source in args.sources:\n query_report(conn, source=source, unique_queries=args.unique_queries, show_rules=args.show_rules)\n\n\ndef query_report(conn, source=None, unique_queries=False, show_rules=False):\n \"\"\"Generate a report aggregated by query.\n\n :param conn: the database connection\n :param source: optional source to restrict the report to\n :param unique_queries: if only unique queries should be counted, has no effect if a single source is given\n :param show_rules: if the individual rules used should be included in the output\n \"\"\"\n seen = set()\n qargs = []\n headers = ['Source', 'Query #', 'Core', 'Total', 'Query']\n if show_rules:\n headers.append('Rules')\n\n q = 'SELECT DISTINCT source, query, query_number FROM rule_application'\n if source:\n q += ' WHERE source=?'\n qargs.append(source)\n q += ' ORDER BY source DESC, query_number ASC'\n\n cur = conn.execute(q, qargs)\n print('\\t'.join(headers))\n for source, query, query_number in cur:\n # skip if unique\n if unique_queries:\n if query in seen:\n continue\n seen.add(query)\n\n total, paper_total = get_source_counts(conn, source, query)\n output = [source, query_number, paper_total, total, query]\n if show_rules:\n rules = '|'.join(r[0] for r in get_query_rules(conn, source, query))\n output.append(rules)\n print('\\t'.join(map(str, output)))\n\n\ndef run_rules_report(args):\n db = args.db\n with sqlite3.connect(db) as conn:\n if not args.sources:\n rules_report(conn, unique_queries=args.unique_queries)\n else:\n for source in args.sources:\n print('Source: {}'.format(source), file=sys.stderr)\n rules_report(conn, unique_queries=args.unique_queries, source=source)\n\n\ndef rules_report(conn, unique_queries=False, source=None):\n \"\"\"Collects and reports rule application counts, either globally or for a particular source.\n\n :param conn: sqlite3.Connection the database connections\n :param unique_queries: bool if only unique queries should be considered for global counts\n :param source: basestring|None if counts should come from a single source\n \"\"\"\n stats = RuleStats()\n if unique_queries:\n queries = set()\n for query, rule in conn.execute('SELECT DISTINCT query, rule FROM rule_application'):\n queries.add(query)\n stats.applied(rule)\n print('Unique queries: {}'.format(len(queries)), file=sys.stderr)\n else:\n args = []\n q = 'SELECT rule, COUNT(*) FROM rule_application'\n if source:\n args.append(source)\n q += ' WHERE source=?'\n q += ' GROUP BY rule'\n for row in conn.execute(q, args):\n stats.applied(row[0], times=int(row[1]))\n stats.print_stats()\n\n\ndef run_all_tests(args):\n \"\"\":param args: argparser.Namespace\"\"\"\n db = args.db\n if not os.path.isfile('build.xml'):\n raise RuntimeError('build.xml does not exist, generate it from Eclipse first')\n if not os.path.isfile(db):\n print('Creating db file: {}'.format(db))\n main(['--db', db, 'init'])\n for source, testname in SOURCES.iteritems():\n print('Running test case: {}'.format(testname))\n ant_args = ('ant', testname)\n retcode = subprocess.call(ant_args)\n if retcode != 0:\n raise RuntimeError('Ant run failed with code: {}'.format(retcode))\n print('Updating results for {}'.format(source))\n main(['update', source])\n\n\n\ndef get_query(line):\n m = re.match(r'^.*TestBase - Query: (?P.+)$', line)\n return m.group('query') if m else None\n\n\ndef get_applied(line):\n m = re.match(r'^.*Applied rule: (?P[^\\s]+)', line)\n return m.group('rule') if m else None\n\n\ndef _parser():\n common = argparse.ArgumentParser(add_help=False)\n common.add_argument('--db', default='sqltutorrules.db',\n help='Choose the SQLite database file')\n\n parent = argparse.ArgumentParser(parents=[common])\n # parent.add_argument('--db', default='sqltutorrules.db')\n\n sub = parent.add_subparsers()\n\n init = sub.add_parser('init', help='Initialize the database file',\n description='Creates the database file and schema.')\n \"\"\":type : argparse.ArgumentParser\"\"\"\n init.set_defaults(func=run_init)\n\n update = sub.add_parser('update', help='Update the database from a log file',\n description='Reads a log file and updates the database entries of a source.')\n \"\"\":type : argparse.ArgumentParser\"\"\"\n update.add_argument('--logfile', default='sqltutor.debug.log',\n help='The logfile location to read results from')\n update.add_argument('sourcename', help='Source (test case) name for these results.')\n update.set_defaults(func=run_update)\n\n report_parent = argparse.ArgumentParser(add_help=False)\n report_parent.add_argument('--unique-queries', action='store_true', default=False,\n help='Only consider unique queries for global counts')\n report_parent.add_argument('sources', nargs='*', default=None,\n help='Report for each source (report is global otherwise)')\n\n report = sub.add_parser('report', help='Generate reports, see subcommand help.',\n description='Generate rule application reports, either per query or per rule.')\n report_subs = report.add_subparsers()\n report_queries = report_subs.add_parser('queries', parents=[report_parent],\n help='Generate per-query reports')\n report_queries.add_argument('--show-rules', action='store_true', default=False,\n help='Show the rules used for per-source reports')\n report_queries.set_defaults(func=run_query_report)\n\n report_rules = report_subs.add_parser('rules', parents=[report_parent],\n help='Generate per-rule reports')\n report_rules.set_defaults(func=run_rules_report)\n\n run = sub.add_parser('run', help='Run tests and gather results',\n description='Run all tests and update the results.')\n run.set_defaults(func=run_all_tests)\n\n return parent\n\n\ndef main(args=None):\n if args is None: args = sys.argv[1:]\n parser = _parser()\n args = parser.parse_args(args)\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sqltutor-project/sqltutor","sub_path":"sqltutor-web/scripts/rulecount.py","file_name":"rulecount.py","file_ext":"py","file_size_in_byte":12664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25575517957","text":"from django.shortcuts import render, get_object_or_404\nfrom lesson_4.models import Author\n\n\ndef index(request):\n return render(request, 'lesson_5/main_3.html')\n\n\ndef search_author(request, name='', surname=''):\n authors = Author.objects.all()\n if 'name' in request.GET:\n name = request.GET['name']\n if name != '':\n authors = authors.filter(name=name)\n if 'surname' in request.GET:\n surname = request.GET['surname']\n if surname != '':\n authors = authors.filter(surname=surname)\n if name == '' and surname == '':\n context = {'searched_authors': authors}\n else:\n context = {'searched_authors': authors}\n return render(request, 'lesson_5/search_author.html', context)\n","repo_name":"tarhonskyi/django_homework","sub_path":"lesson_5/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31589819962","text":"from ..base.base_modelWrapper import BaseModelWrapper\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.utils import plot_model\n\nfrom tensorflow.keras import backend as kb\nimport tensorflow as tf\n\nimport sys\n\n\nclass BackwaterCurveModel(BaseModelWrapper):\n \"\"\"A NN model for backwater curve solver's surrogate\n\n Model input variables: bed profile zb\n Model output variables: WSE\n\n \"\"\"\n\n def __init__(self, config, dataLoader):\n super(BackwaterCurveModel, self).__init__(config, dataLoader)\n\n self.build_model()\n\n #loss components (record each iteration, i.e., each batch). Total numbers = number of epoch X number of batches\n self.loss_value = [] #loss due to value error\n\n # loss components (record each epoch); will be set by a callback function in trainer\n self.loss_value_epoch = [] #loss due to value error\n\n def build_model(self):\n self.model = Sequential()\n self.model.add(Dense(100, input_dim=self.dataLoader.get_input_data_length(), activation='relu', \\\n kernel_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.L1(0.0), \\\n activity_regularizer=tf.keras.regularizers.L2(0.0)))\n #self.model.add(Dense(200, activation='relu', kernel_initializer='he_uniform'))\n self.model.add(Dense(200, activation='relu', kernel_initializer='he_uniform'))\n self.model.add(Dense(self.dataLoader.get_output_data_length(), activation='linear'))\n\n # summarize the model\n #plot_model(self.model, 'model.png', show_shapes=True)\n\n self.model.compile(\n loss=self.backwaterCurveLossFunction(),\n optimizer=self.config.model.optimizer,\n run_eagerly=True,\n )\n\n def backwaterCurveLossFunction(self):\n \"\"\"Customized loss function for backwater curve\n\n\n :return:\n \"\"\"\n\n #tf.print(\"\\n x_for_gradient: \", type(x_for_loss_function), output_stream=sys.stdout)\n\n def loss(y_true, y_pred):\n \"\"\"This is the loss function\n\n It seems that only this portion of the loss function is called during\n training. Anything outside is only called once during the \"compile\" stage.\n\n :param y_true:\n :param y_pred:\n :return:\n \"\"\"\n\n # difference between true value and predicted value\n error = y_true - y_pred # the error\n sqr_error = kb.square(error) # square of the error\n mean_sqr_error = kb.mean(sqr_error) # mean of the square of the error\n\n self.loss_value.append(mean_sqr_error)\n\n return mean_sqr_error\n\n return loss\n\n def test_model(self, test_data, verbose=0):\n \"\"\"Test the model with given data\n\n :param test_data:\n :param verbose:\n :return:\n \"\"\"\n\n if self.model is None:\n raise Exception(\"You have to build the model first.\")\n\n score = self.model.evaluate(test_data[0], test_data[1], verbose=0)\n\n print(\"Test loss:\", score[0])\n print(\"Test accuracy:\", score[1])\n\n def predict(self, input_data, verbose=0):\n \"\"\"Use the trained model to make prediction\n\n :param input_data:\n :param verbose:\n :return:\n \"\"\"\n\n if self.model is None:\n raise Exception(\"You have to build and train the model first.\")\n\n return self.model.predict(input_data)\n","repo_name":"psu-efd/dl4HM","sub_path":"dl4HM/models/backwater_curve_model.py","file_name":"backwater_curve_model.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"37780739420","text":"import numpy as np\nfrom mathutils import Vector\nfrom mathutils.geometry import area_tri as area\nfrom mathutils.geometry import tessellate_polygon as tessellate\nfrom sverchok.data_structure import has_element\nfrom sverchok.utils.math import np_normalize_vectors\nfrom sverchok.utils.sv_bmesh_utils import bmesh_from_pydata\nfrom sverchok.utils.modules.matrix_utils import vectors_center_axis_to_matrix\nfrom sverchok.utils.modules.vertex_utils import vertex_shell_factor, adjacent_edg_pol, adjacent_edg_pol_idx\nfrom sverchok.nodes.analyzer.mesh_filter import Faces\nfrom .edge_utils import adjacent_faces_idx\n\n\ndef areas_from_polygons(verts, polygons, sum_faces=False):\n '''\n returns pols area as [float, float,...]\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n sum_faces if True it will return the sum of the areas as [float]\n '''\n areas = []\n concat_area = areas.append\n\n for polygon in polygons:\n num = len(polygon)\n if num == 3:\n concat_area(area(verts[polygon[0]], verts[polygon[1]], verts[polygon[2]]))\n elif num == 4:\n area_1 = area(verts[polygon[0]], verts[polygon[1]], verts[polygon[2]])\n area_2 = area(verts[polygon[0]], verts[polygon[2]], verts[polygon[3]])\n concat_area(area_1 + area_2)\n elif num > 4:\n ngon_area = 0.0\n subcoords = [Vector(verts[idx]) for idx in polygon]\n for tri in tessellate([subcoords]):\n ngon_area += area(*[verts[polygon[i]] for i in tri])\n concat_area(ngon_area)\n else:\n concat_area(0)\n\n if sum_faces:\n areas = [sum(areas)]\n\n return areas\n\n\ndef pols_perimeters(verts, polygons, sum_perimeters=False, output_numpy=False):\n '''\n returns pols perimeter as [float, float,...]\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n sum_perimeters if True it will return the sum of the perimenters as [float]\n '''\n vals = np_process_polygons(verts, polygons, func=np_faces_perimeters, dims=1, output_numpy=True)\n\n if sum_perimeters:\n if output_numpy:\n return [np.sum(vals)]\n\n return [np.sum(vals).tolist()]\n if output_numpy:\n return vals\n return vals.tolist()\n\ndef pols_vertices(vertices, faces):\n '''\n Explodes geometry.\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n returns [[vertex, vertex, vertex,..], [vertex, vertex, vertex,..], ...] and [[polygon], [polygon],...]\n '''\n verts = [[vertices[c] for c in p] for p in faces]\n pols = [[list(range(len(p)))] for p in faces]\n\n vals = [verts, pols]\n return vals\n\ndef pols_to_edges(obj, unique_edges=False):\n '''\n Generate edges from pols\n obj: list as [[polygon, polygon,..], [polygon,...],...], being each polygon [int, int, ...].\n unique_edges: if False will return all polygon edges if True will remove the doubled edges\n '''\n out = []\n for faces in obj:\n out_edges = []\n seen = set()\n for face in faces:\n for edge in zip(face, list(face[1:]) + list([face[0]])):\n if unique_edges and tuple(sorted(edge)) in seen:\n continue\n if unique_edges:\n seen.add(tuple(sorted(edge)))\n out_edges.append(edge)\n out.append(out_edges)\n return out\n\ndef pols_sides(faces, sum_sides=False):\n '''\n returns the number of sides of each polygon as [int, int, ... ]\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n sum_sides: if True will return the sum of sides of the whole object returning [int]\n '''\n vals = [len(p) for p in faces]\n if sum_sides:\n vals = [sum(vals)]\n return vals\n\n\ndef pols_adjacent(pols):\n \"\"\"\n returns the polygons that share a vertex with each polygon [[pol, pol,..], [pol,..]]\n pols: list as [polygon, polygon,..], being each polygon [int, int, ...].\n \"\"\"\n return [[pols[i] for i in p_indexes] for p_indexes in pols_adjacent_idx(pols)]\n\n\ndef pols_adjacent_idx(pols):\n \"\"\"\n returns the polygons that share a vertex with each polygon [[pol, pol,..], [pol,..]]\n pols: list as [polygon, polygon,..], being each polygon [int, int, ...].\n \"\"\"\n edges = {tuple(sorted(e)): None for pol_edges in pols_edges(pols) for e in pol_edges}\n edges = {e: adj_indexes for e, adj_indexes in zip(edges.keys(), adjacent_faces_idx(edges.keys(), pols))}\n adj_pols = []\n for ei, pol_edges in enumerate(pols_edges(pols)):\n adj_pols.append(list({fi for e in pol_edges for fi in edges[tuple(sorted(e))]} - {ei}))\n return adj_pols\n\n\ndef pols_adjacent_num(pols):\n \"\"\"\n returns the number polygons that share a vertex with each polygon [int, int,..]]\n pols: list as [polygon, polygon,..], being each polygon [int, int, ...].\n \"\"\"\n return [len(p) for p in pols_adjacent_idx(pols)]\n\n\ndef pols_neighbor(verts, pols):\n '''\n returns the polygons that share one edges with each polygon [[pol, pol,..], [pol,..]]\n pols: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n v_adj = adjacent_edg_pol(verts, pols)\n vals = []\n for pol in pols:\n pol_adj = []\n for v_id in pol:\n for related_pol in v_adj[v_id]:\n if not related_pol in pol_adj:\n pol_adj.append(related_pol)\n\n pol_adj.remove(pol)\n vals.append(pol_adj)\n\n return vals\ndef pols_neighbor_idx(verts, pols):\n '''\n returns the polygons that share one edges with each polygon [[pol, pol,..], [pol,..]]\n pols: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n v_adj = adjacent_edg_pol_idx(verts, pols)\n vals = []\n for idx, pol in enumerate(pols):\n pol_adj = []\n for v_id in pol:\n for related_pol in v_adj[v_id]:\n if not related_pol in pol_adj:\n pol_adj.append(related_pol)\n\n pol_adj.remove(idx)\n vals.append(pol_adj)\n\n return vals\n\ndef pols_neighbor_num(verts, pols):\n '''\n returns the number of polygons that share one edges with each polygon [int, int,...]\n pols: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n return [len(p) for p in pols_neighbor(verts, pols)]\n\ndef pols_normals(vertices, faces, output_numpy):\n '''\n Returns Faces normals as [vector, vector,...]\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n vals = np_process_polygons(vertices, faces, func=np_faces_normals, output_numpy=output_numpy)\n return vals\n\ndef np_faces_normals(v_pols):\n pol_sides = v_pols.shape[1]\n if pol_sides > 3:\n f_normals = np.zeros((len(v_pols), 3), dtype=np.float64)\n for i in range(pol_sides - 2):\n f_normals += np.cross(v_pols[::, (1+i)%pol_sides] - v_pols[::, 0], v_pols[::, (2+i)%pol_sides] - v_pols[::, 0])\n else:\n f_normals = np.cross(v_pols[::, 1] - v_pols[::, 0], v_pols[::, 2] - v_pols[::, 0])\n np_normalize_vectors(f_normals)\n\n return f_normals\n\ndef np_faces_absolute_normals(v_pols):\n return np_center_median(v_pols) + np_faces_normals(v_pols)\n\ndef pols_absolute_normals(vertices, faces, output_numpy):\n '''\n Returns Faces center + faces normals as [vector, vector,...]\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n return np_process_polygons(vertices, faces, func=np_faces_absolute_normals, output_numpy=output_numpy)\n\n\ndef pols_shell_factor(vertices, faces):\n '''\n Average of vertex shell_factor\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n edges: list as [edge, edge,..], being each edge [int, int].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n returns vals as [float, float,...]\n '''\n v_shell = vertex_shell_factor(vertices, [], faces)\n vals = []\n for face in faces:\n p_shell = 0\n for v_id in face:\n p_shell += v_shell[v_id]\n vals.append(p_shell/len(face))\n\n return vals\n\ndef np_faces_perimeters(v_pols):\n\n return np.sum(edges_lengths(v_pols), axis=1)\n\ndef vertex_weight_factor(v_pols):\n ed_lengths = edges_lengths(v_pols)\n perimeters = np.sum(ed_lengths, axis=1)\n edges_prop = ed_lengths/perimeters[:, np.newaxis]\n return (edges_prop + np.roll(edges_prop, 1, axis=1))/2\n\ndef np_center_median(v_pols):\n return np.sum(v_pols, axis=1) / v_pols.shape[1]\n\ndef np_center_bbox(v_pols):\n\n return (np.amin(v_pols, axis=1) + np.amax(v_pols, axis=1))/2\n\ndef np_center_weighted(v_pols):\n\n v_factor = vertex_weight_factor(v_pols)\n\n return np.sum(v_pols * v_factor[:, :, np.newaxis], axis=1)\n\ndef edges_lengths(v_pols):\n return np.linalg.norm(v_pols-np.roll(v_pols, -1, axis=1), axis=2)\n\ndef np_tangent_longest_edge(v_pols):\n edges_dir = v_pols-np.roll(v_pols, 1, axis=1)\n ed_length = np.linalg.norm(edges_dir, axis=2)\n ed_idx = np.argmax(ed_length, axis=1)\n\n return np_normalize_vectors(edges_dir[np.arange(len(v_pols)), ed_idx, :])\n\ndef np_tangent_center_orig(v_pols):\n return np_normalize_vectors(np_center_median(v_pols) - v_pols[:, 0, :])\n\n\n\ndef np_process_polygons(verts, faces, func=None, dims=3, output_numpy=False):\n if not func:\n return\n if not (has_element(verts) and has_element(faces)):\n return\n if isinstance(verts, np.ndarray):\n np_verts = verts\n else:\n np_verts = np.array(verts)\n\n if isinstance(faces, np.ndarray):\n np_faces = faces\n else:\n np_faces = np.array(faces,dtype=object)\n\n if np_faces.dtype == object:\n lens = np.array([len(i) for i in np_faces])\n pol_types = np.unique(lens)\n if dims == 1:\n vals = np.zeros(np_faces.shape[0], dtype=float)\n else:\n vals = np.zeros((np_faces.shape[0], dims), dtype=float)\n for p in pol_types:\n mask = lens == p\n np_faces_g = np.array(np_faces[mask].tolist())\n v_pols = np_verts[np_faces_g]\n if dims == 1:\n vals[mask] = func(v_pols)\n else:\n vals[mask, :] = func(v_pols)\n else:\n v_pols = np_verts[np_faces] #num pols, num sides\n vals = func(v_pols)\n\n if output_numpy:\n return vals\n return vals.tolist()\n\ndef pols_center(vertices, faces, origin, output_numpy):\n '''\n Cemter of faces\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n edges: list as [edge, edge,..], being each edge [int, int].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n origin: String that can be any key of pols_origin_modes_dict\n returns vals as [float, float,...]\n '''\n\n if origin == 'Median Center':\n centers_func = np_center_median\n elif origin == 'Bounds Center':\n centers_func = np_center_bbox\n else:\n centers_func = np_center_weighted\n\n vals = np_process_polygons(vertices, faces, func=centers_func, output_numpy=output_numpy)\n\n return vals\n\ndef pols_center_bounds(bm_faces):\n return [tuple(bm_face.calc_center_bounds()) for bm_face in bm_faces]\n\ndef pols_center_median(bm_faces):\n return [tuple(bm_face.calc_center_median()) for bm_face in bm_faces]\n\ndef pols_center_median_weighted(bm_faces):\n return [tuple(bm_face.calc_center_median_weighted()) for bm_face in bm_faces]\n\ndef pols_first_vert(bm_faces):\n return [tuple(bm_face.verts[0].co) for bm_face in bm_faces]\n\ndef pols_last_vert(bm_faces):\n return [tuple(bm_face.verts[-1].co) for bm_face in bm_faces]\n\ndef pols_perimeter(vertices, faces):\n '''\n returns pols perimeter.\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n bm = bmesh_from_pydata(vertices, [], faces, normal_update=True)\n vals = [bm_face.calc_perimeter() for bm_face in bm.faces]\n bm.free()\n return vals\n\n\ndef pols_tangent(vertices, faces, direction):\n '''\n returns pols tangents.\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n direction: String that can be any key of tangent_modes_dict\n '''\n if direction == 'Edge':\n return np_process_polygons(vertices, faces,\n func=np_tangent_longest_edge,\n output_numpy=False)\n if direction == 'Center - Origin':\n return np_process_polygons(vertices, faces,\n func=np_tangent_center_orig,\n output_numpy=False)\n else:\n bm = bmesh_from_pydata(vertices, [], faces, normal_update=True)\n vals = tangent_modes_dict[direction][1](bm.faces)\n bm.free()\n return vals\n\ndef pols_tangent_edge(bm_faces):\n return [tuple(bm_face.calc_tangent_edge()) for bm_face in bm_faces]\n\ndef pols_tangent_edge_diagonal(bm_faces):\n return [tuple(bm_face.calc_tangent_edge_diagonal()) for bm_face in bm_faces]\n\ndef pols_tangent_edge_pair(bm_faces):\n return [tuple(bm_face.calc_tangent_edge_pair()) for bm_face in bm_faces]\n\ndef pols_tangent_center_origin(bm_faces):\n return [tuple((Vector(bm_face.verts[0].co)-Vector(bm_face.calc_center_median())).normalized()) for bm_face in bm_faces]\n\n\ndef pols_tangent_vert_diagonal(bm_faces):\n return [tuple(bm_face.calc_tangent_vert_diagonal()) for bm_face in bm_faces]\n\n\ndef pols_is_boundary(vertices, faces):\n '''\n bridge to mesh filter node Faces class\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n bm = bmesh_from_pydata(vertices, [], faces, normal_update=True)\n interior, boundary, mask = Faces.process(bm, [], [])\n bm.free()\n return mask, interior, boundary\n\n\ndef pols_edges(faces):\n '''\n extracts the edges from polygons order [0, 1, 2] -> [[0, 1], [1, 2], [2, 0]]\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n vals = [[(c, cn) for c, cn in zip(face, face[1:] + [face[0]])] for face in faces]\n return vals\n\ndef pols_inverted(faces):\n '''\n inverts the polygon order [0, 1, 2] -> [2, 1, 0]\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n '''\n vals = [list(reversed(f)) for f in faces]\n return vals\n\ndef pols_matrix(vertices, faces, origin, direc):\n '''\n Matrix aligned with faces\n vertices: list as [vertex, vertex, ...], being each vertex [float, float, float].\n faces: list as [polygon, polygon,..], being each polygon [int, int, ...].\n orientation: contains origin and direction\n origin: String that can be any key of pols_origin_modes_dict\n direction: String that can be any key of tangent_modes_dict\n outputs each polygon matrix [matrix, matrix, matrix]\n '''\n\n bm = bmesh_from_pydata(vertices, [], faces, normal_update=True)\n normals = [Vector(face.normal) for face in bm.faces]\n centers = pols_origin_modes_dict[origin][1](bm.faces)\n tangents = tangent_modes_dict[direc][1](bm.faces)\n vals = vectors_center_axis_to_matrix(centers, normals, tangents)\n bm.free()\n return vals\n\n\ntangent_modes_dict = {\n 'Edge': (1, pols_tangent_edge, 'Face tangent based on longest edge'),\n 'Edge Diagonal': (2, pols_tangent_edge_diagonal, 'Face tangent based on the edge farthest from any vertex'),\n 'Edge Pair': (3, pols_tangent_edge_pair, 'Face tangent based on the two longest disconnected edges'),\n 'Vert Diagonal': (4, pols_tangent_vert_diagonal, 'Face tangent based on the two most distant vertices'),\n 'Center - Origin': (5, pols_tangent_center_origin, 'Face tangent based on the mean center and first corner'),\n }\n\npols_origin_modes_dict = {\n 'Bounds Center': (30, pols_center_bounds, 'Center of bounding box of faces'),\n 'Median Center': (31, pols_center_median, 'Mean of vertices of each face'),\n 'Median Weighted Center': (32, pols_center_median_weighted, 'Mean of vertices of each face weighted by edges length'),\n 'First Vertex': (33, pols_first_vert, 'First Vertex of Face'),\n 'Last Vertex': (34, pols_last_vert, 'Last Vertex of Face'),\n }\n\n# Name: (index, input_sockets, func_options, output_options, function, output_sockets, output_sockets_names, description)\nfaces_modes_dict = {\n 'Geometry': (0, 'vp', '', 'u', pols_vertices, 'vs', 'Vertices, Faces', \"Geometry of each face. (explode)\"),\n 'Center': (10, 'vp', 'ca', '', pols_center, 'v', 'Center', 'Center faces'),\n 'Normal': (20, 'vp', 'a', '', pols_normals, 'v', 'Normal', 'Normal of faces'),\n 'Normal Absolute': (21, 'vp', 'a', '', pols_absolute_normals, 'v', 'Normal_Abs', 'Median Center + Normal'),\n 'Tangent': (30, 'vp', 't', '', pols_tangent, 'v', 'Tangent', 'Face tangent.'),\n 'Matrix': (40, 'vp', 'qt', 'u', pols_matrix, 'm', 'Matrix', 'Matrix of face. Z axis on normal. X to first corner'),\n 'Area': (50, 'vp', 's', '', areas_from_polygons, 's', 'Area', \"Area of faces\"),\n 'Perimeter': (51, 'vp', 'sa', '', pols_perimeters, 's', 'Perimeter', 'Perimeter of faces'),\n 'Sides Number': (52, 'p', 's', '', pols_sides, 's', 'Sides', \"Number of sides of faces\"),\n 'Adjacent Faces Num': (53, 'p', '', '', pols_adjacent_num, 's', 'Number', \"Number of Faces that share a edge with face\"),\n 'Neighbor Faces Num': (54, 'vp', '', '', pols_neighbor_num, 's', 'Number', \"Number of Faces that share a vertex with face\"),\n 'Sharpness': (55, 'vp', '', '', pols_shell_factor, 's', 'Sharpness', 'Average of curvature of mesh in faces vertices'),\n 'Inverse': (60, 'p', '', '', pols_inverted, 's', 'Faces', 'Reversed Polygons (Flipped)'),\n 'Edges': (61, 'p', '', 'u', pols_edges, 's', 'Edges', 'Face Edges'),\n 'Adjacent Faces': (62, 'p', '', 'u', pols_adjacent, 's', 'Faces', 'Faces that share a edge with face'),\n 'Neighbor Faces': (63, 'vp', '', 'u', pols_neighbor, 's', 'Faces', 'Faces that share a vertex with face'),\n 'Adjacent Faces Idx': (64, 'p', '', 'u', pols_adjacent_idx, 's', 'Faces Idx', 'Index of faces that share a edge with face'),\n 'Neighbor Faces Idx': (65, 'vp', '', 'u', pols_neighbor_idx, 's', 'Faces Idx', 'Index of faces that share a vertex with face'),\n 'Is Boundary': (70, 'vp', '', '', pols_is_boundary, 'sss', 'Mask, Boundary, Interior', 'Is the face boundary'),\n }\n","repo_name":"nortikin/sverchok","sub_path":"utils/modules/polygon_utils.py","file_name":"polygon_utils.py","file_ext":"py","file_size_in_byte":19429,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"43402026976","text":"import requests\nimport datetime\nimport holidays\nimport random\nimport smtplib\nimport time\nfrom datetime import date\nimport os\nimport logging\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nlogging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n\ndef log_and_print(msg, level=\"info\"):\n print(msg)\n if level == \"info\":\n logging.info(msg)\n elif level == \"warning\":\n logging.warning(msg)\n elif level == \"error\":\n logging.error(msg)\n\ndef ensure_file_exists(file_name):\n \"\"\"\n Verifica se o arquivo existe. \n Se o arquivo não existir, ele é criado.\n \"\"\"\n if not os.path.exists(file_name):\n with open(file_name, \"w\") as file:\n pass\n \ndef clear_file_content(file_name):\n \"\"\"\n Limpa o conteúdo do arquivo. \n \"\"\"\n with open(file_name, \"w\") as file:\n pass\n\ndef ensure_current_day_log_file(today):\n \"\"\"\n Essa função garante que o arquivo \"pontos.txt\" contenha apenas as entradas do dia atual.\n As entradas de dias anteriores são removidas.\n \"\"\"\n date_str = today.strftime(\"%Y-%m-%d\")\n with open(\"pontos.txt\", \"r\") as file:\n lines = file.readlines()\n with open(\"pontos.txt\", \"w\") as file:\n for line in lines:\n date, _, _ = line.strip().split(',')\n # mantém apenas as linhas cuja data corresponde à data atual\n if date == date_str:\n file.write(line)\n\n#Enviando Email\ndef send_email(subject, body, timestamp=None):\n sender_email = 'SEU_E-MAIL_AQUI'\n receiver_email = 'SEU_E-MAIL_AQUI'\n password = 'SENHA_DO_SEU_E-MAIL_AQUI'\n\n msg = MIMEText(body, 'plain', 'utf-8')\n msg['Subject'] = subject\n msg['From'] = sender_email\n msg['To'] = receiver_email\n\n if timestamp:\n body += f\"\\nData e hora da marcação: {timestamp}\"\n \n message = f\"Subject: {subject}\\n\\n{body}\"\n try:\n server = smtplib.SMTP('smtp.office365.com', 587) # SERVIDOR SMTP MICROSOFT!\n server.starttls()\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, msg.as_string())\n server.quit()\n except Exception as e:\n print(f\"Erro ao enviar e-mail: {e}\")\n \n#Validando se é feriado ou final de semana \ndef is_valid_day(today, holidays_br, holidays_global):\n if today.weekday() >= 5:\n return False, 'Hoje é final de semana, não é possível marcar ponto!'\n elif today.date() in holidays_br or today.date() in holidays_global:\n return False, 'Hoje é feriado, não é possível marcar ponto!'\n return True, ''\n\n# Função do mexidão (Randomizador) :)\ndef generate_random_time(today, min_time, max_time):\n min_time_delta = datetime.timedelta(hours=min_time.hour, minutes=min_time.minute)\n max_time_delta = datetime.timedelta(hours=max_time.hour, minutes=max_time.minute)\n random_minutes = random.randint(0, int((max_time_delta - min_time_delta).total_seconds() // 60))\n return datetime.datetime.combine(today.date(), min_time) + datetime.timedelta(minutes=random_minutes)\n\ndef is_valid_execution_time(current_time, start_time, end_time):\n current_time_without_ms = datetime.time(current_time.hour, current_time.minute, current_time.second)\n if start_time <= current_time_without_ms <= end_time:\n return True\n return False\n\n#Verifica se uma marcação de ponto já foi registrada no arquivo de registro.\ndef check_previous_point(today, point_type):\n date_str = today.strftime(\"%Y-%m-%d\")\n with open(\"pontos.txt\", \"r\") as file:\n for line in file:\n date, time, point = line.strip().split(',')\n if date == date_str and point == point_type:\n return True\n return False\n\n#Registra uma marcação de ponto no arquivo de registro :)\ndef record_point(today, time, point_type):\n date_str = today.strftime(\"%Y-%m-%d\")\n time_str = time.strftime(\"%H:%M:%S\")\n with open(\"pontos.txt\", \"a\") as file:\n file.write(f\"{date_str},{time_str},{point_type}\\n\")\n\n#Request\ndef send_request(url, payload, headers):\n response = requests.post(url, data=payload, headers=headers)\n response_data = response.json()\n return response_data\n\n# Variável global para armazenar o dia atual\ncurrent_day = None\n\n#Função Principal onde chamado tudooo\ndef main():\n \n today = datetime.datetime.today()\n \n # Verificando se os arquivos \"pontos.txt\" e \"horarios.txt\" Existe :)\n ensure_file_exists(\"pontos.txt\")\n ensure_file_exists(\"horarios.txt\")\n \n # Mantém apenas as entradas de ponto do dia atual no arquivo \"pontos.txt\" :)\n ensure_current_day_log_file(today)\n \n #Definindo em qual horario o script pode rodar.:)\n execution_start_time = datetime.time(8, 55)\n execution_end_time = datetime.time(18, 10)\n \n #Definindo horario min e max para entrada. :)\n horario_entrada_min = datetime.time(8, 55)\n horario_entrada_max = datetime.time(9, 10)\n \n #Definindo horario min e max para saida do Almoço. :) \n horario_saida_almoco_min = datetime.time(12, 0)\n horario_saida_almoco_max = datetime.time(12, 20)\n \n #Definindo horario min e max para retorno do Almoço. :)\n horario_retorno_almoco_min = datetime.time(13, 0)\n horario_retorno_almoco_max = datetime.time(13, 15)\n \n #Definindo horario min e max para saida (termino do trabalho). :)\n horario_saida_min = datetime.time(18, 0)\n horario_saida_max = datetime.time(18, 10)\n \n #Gerando um horario aleatorio para marcar o ponto respeitando os horario min e max de cada etapa. :)\n horario_entrada = generate_random_time(today, horario_entrada_min, horario_entrada_max)\n horario_saida_almoco = generate_random_time(today, horario_saida_almoco_min, horario_saida_almoco_max)\n horario_retorno_almoco = generate_random_time(today, horario_retorno_almoco_min, horario_retorno_almoco_max)\n horario_saida = generate_random_time(today, horario_saida_min, horario_saida_max)\n \n current_time = datetime.datetime.now().time()\n current_time_without_ms = datetime.time(current_time.hour, current_time.minute, current_time.second)\n \n current_year = today.year\n holidays_br = holidays.Brazil(years=current_year)\n holidays_global = holidays.CountryHoliday('BR', years=current_year)\n \n global current_day # Isso é necessário para modificar a variável global dentro desta função :)\n \n url = 'https://cliente.apdata.com.br/everisparceiro/.net/index.ashx/SaveTimmingEvent'\n \n payload = {\n 'deviceID': '8001',\n 'userName': 'SEU_LOGIN',\n 'password': 'SUA_SENHA',\n 'eventType': '1',\n 'cracha': '',\n 'costCenter': '',\n 'leave': '',\n 'func': '0',\n 'captcha': '',\n 'tenantName': '',\n 'tsc': '',\n 'sessionID': '0',\n 'selectedEmployee': '0',\n 'selectedCandidate': '0',\n 'selectedVacancy': '0',\n 'dtFmt': 'd/m/Y',\n 'tmFmt': 'H:i:s',\n 'shTmFmt': 'H:i',\n 'dtTmFmt': 'd/m/Y H:i:s',\n 'language': '0',\n 'idEmployeeLogged': '0'\n }\n\n headers = {\n 'Content-Length': '306',\n 'Sec-Ch-Ua': '\"Chromium\";v=\"109\", \"Not_A Brand\";v=\"99\"',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Sec-Ch-Ua-Mobile': '?0',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5414.120 Safari/537.36',\n 'Sec-Ch-Ua-Platform': '\"Windows\"',\n 'Accept': '*/*',\n 'Origin': 'https://cliente.apdata.com.br',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Dest': 'empty',\n 'Referer': 'https://cliente.apdata.com.br/everisparceiro/',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',\n 'Cookie': 'X-Oracle-BMC-LBS-Route=bb089166a4d059141e589f5733aa94f02d71e92b; clockDeviceToken8001=nH6C/qScdsJSxp4tyTbzcGMegpWY8nGrKJ7+ZjgmX3xHmIA=; acceptedRequiredCookies=COOKIEACCEPTED; acceptedOptionalCookies=COOKIEACCEPTED; Aplanguage=0; FIN_COOKIE=true; apdataCookieIsEnabled=none; __zjc7220=5264592017; __z_a=1442784464723815612723815; authenticated=false; SessionID=; dynSID=; ts=; loginOK=false; dashPublicImg=dpi; X-Oracle-BMC-LBS-Realm=1'\n }\n \n# Verifica se estamos em um novo dia ou ainda não foram gerados horários\n if today.day != current_day or not os.path.exists(\"horarios.txt\"):\n current_day = today.day\n # Gerar novos horários aleatórios\n with open(\"horarios.txt\", \"w\") as file:\n for point_type, min_time, max_time in [\n (\"entrada\", datetime.time(8, 55), datetime.time(9, 10)),\n (\"saida_almoco\", datetime.time(12, 0), datetime.time(12, 20)),\n (\"retorno_almoco\", datetime.time(13, 0), datetime.time(13, 15)),\n (\"saida\", datetime.time(18, 0), datetime.time(18, 10)),\n ]:\n random_time = generate_random_time(today, min_time, max_time)\n file.write(f\"{point_type},{random_time.hour}:{random_time.minute}:{random_time.second}\\n\")\n \n # é Aqui que eu valido se é feriado ou fim de semana. :)\n valid, reason = is_valid_day(today, holidays_br, holidays_global)\n if not valid:\n print(reason)\n send_email('[BOT] - MARCAÇÃO DE PONTO', reason)\n exit()\n \n if not is_valid_execution_time(current_time_without_ms, execution_start_time, execution_end_time):\n print(f\"O horário atual ({current_time_without_ms}) está fora do intervalo de execução permitido. Encerrando o script.\")\n exit() \n \n # Inicialize uma lista com informações sobre os pontos a serem marcados\n point_steps = [\n (\"entrada\", horario_entrada),\n (\"saida_almoco\", horario_saida_almoco),\n (\"retorno_almoco\", horario_retorno_almoco),\n (\"saida\", horario_saida),\n ]\n\n # Verifica se a hora atual é maior ou igual ao horário de ponto\n with open(\"horarios.txt\", \"r\") as file:\n for line in file:\n point_type, point_time = line.strip().split(',')\n hour, minute, second = map(int, point_time.split(':'))\n if datetime.datetime.now().time() >= datetime.time(hour, minute, second):\n # Se a marcação do ponto ainda não foi registrada, registra\n if not check_previous_point(today, point_type):\n # Resto do código para registrar o ponto e enviar a solicitação...\n send_email('[BOT] - MARCAÇÃO DE PONTO', f'Marcação de ponto {point_type}: {datetime.datetime.now().time()}')\n record_point(today, datetime.datetime.now().time(), point_type)\n print(f\"Marcação de ponto '{point_type}' realizada com sucesso.\")\n time.sleep(2) \n response_data = send_request(url, payload, headers)\n else:\n print(f\"Marcação de ponto '{point_type}' já realizada. Ignorando.\")\n \n print('RETORNO DO SERVIDOR:')\n print(response_data)\n\n # Verificando resposta da requisição\n if response_data['success'] and 'MARCACAO EFETUADA' in response_data['msg']['msg']:\n print('Marcação realizada com sucesso!')\n send_email('[BOT] - MARCAÇÃO DE PONTO', 'Marcação realizada com sucesso!', timestamp=datetime.datetime.now())\n else:\n print('Usuário / Senha inválidos!')\n send_email('[BOT] - MARCAÇÃO DE PONTO', 'Usuário / Senha inválidos!')\n \n log_and_print(\"Starting script...\")\n\n\nif __name__ == \"__main__\":\n while True:\n try:\n main()\n except Exception as e:\n print(f\"Ocorreu um erro: {e}\")\n time.sleep(60) # Espera 60 segundos antes de executar novamente\n","repo_name":"lyonzin/MarcaPonto.py","sub_path":"MarcaPonto.py","file_name":"MarcaPonto.py","file_ext":"py","file_size_in_byte":12039,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31431785212","text":"import logging, signal, pty, os, fcntl, termios, subprocess\nfrom text import msg\nfrom error import KError as error\nlogger = logging.getLogger(__name__)\n\n# Return the SIGINT interrupt handler back to the OS default\ndef fix_sigint():\n signal.signal(signal.SIGINT, signal.SIG_DFL)\nfix_sigint()\n\n# Set a file-descriptor as non-blocking\ndef set_nonblock(fd):\n fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)\n\n# Clear HUPCL flag\ndef clear_hupcl(fd):\n attrs = termios.tcgetattr(fd)\n attrs[2] = attrs[2] & ~termios.HUPCL\n try:\n termios.tcsetattr(fd, termios.TCSADRAIN, attrs)\n except termios.error:\n pass\n\n# Support for creating a pseudo-tty for emulating a serial port\ndef create_pty(ptyname):\n mfd, sfd = pty.openpty()\n try:\n os.unlink(ptyname)\n except os.error:\n pass\n filename = os.ttyname(sfd)\n os.chmod(filename, 0o660)\n os.symlink(filename, ptyname)\n set_nonblock(mfd)\n old = termios.tcgetattr(mfd)\n old[3] = old[3] & ~termios.ECHO\n termios.tcsetattr(mfd, termios.TCSADRAIN, old)\n return mfd\n\ndef get_cpu_info():\n try:\n f = open('/proc/cpuinfo', 'r')\n data = f.read()\n f.close()\n except (IOError, OSError):\n logger.exception(\"Exception on read /proc/cpuinfo: %s\", traceback.format_exc())\n return \"?\"\n lines = [l.split(':', 1) for l in data.split('\\n')]\n lines = [(l[0].strip(), l[1].strip()) for l in lines if len(l) == 2]\n core_count = [k for k, v in lines].count(\"processor\")\n model_name = dict(lines).get(\"model name\", \"?\")\n return \"%d core %s\" % (core_count, model_name)\n\ndef get_version_from_file(klippy_src):\n try:\n with open(os.path.join(klippy_src, '.version')) as h:\n return h.read().rstrip()\n except IOError:\n pass\n return \"?\"\n\ndef get_git_version(from_file=True):\n klippy_src = os.path.dirname(__file__)\n\n # Obtain version info from \"git\" program\n gitdir = os.path.join(klippy_src, '..')\n prog = ('git', '-C', gitdir, 'describe', '--always',\n '--tags', '--long', '--dirty')\n try:\n process = subprocess.Popen(prog, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n ver, err = process.communicate()\n retcode = process.wait()\n if retcode == 0:\n return ver.strip()\n else:\n logger.warning(\"Error getting git version:\\n%s\", err.decode())\n except OSError:\n logger.exception(\"Exception on run: %s\", traceback.format_exc())\n if from_file:\n return get_version_from_file(klippy_src)\n return \"?\"\n\n# show methods and vars\ndef show_methods(obj):\n logger.info(\"OBJ '%s'\", obj)\n for m in sorted([method_name for method_name in dir(obj) if callable(getattr(obj, method_name))]):\n logger.info(\"\\tMETHOD: %s\", m)\n for a in sorted(vars(obj)):\n logger.info(\"\\tVAR: %s VALUE %s\", a, getattr(obj, a))\n\n","repo_name":"mfp20/klippy-next-proposal","sub_path":"klippy/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"17344247146","text":"# Author: Robert Luke \n#\n# License: BSD (3-clause)\n\nimport pandas as pd\nimport numpy as np\nimport mne\nfrom mne.transforms import apply_trans, _get_trans\nfrom mne.utils import _validate_type\nfrom mne.io import BaseRaw\n\n\ndef _read_fold_xls(fname, atlas=\"Juelich\"):\n \"\"\"Read fOLD toolbox xls file.\n\n The values are then manipulated in to a tidy dataframe.\n\n Note the xls files are not included as no license is provided.\n\n Parameters\n ----------\n fname : str\n Path to xls file.\n atlas : str\n Requested atlas.\n \"\"\"\n page_reference = {\"AAL2\": 2,\n \"AICHA\": 5,\n \"Brodmann\": 8,\n \"Juelich\": 11,\n \"Loni\": 14}\n\n tbl = pd.read_excel(fname,\n sheet_name=page_reference[atlas])\n\n # Remove the spacing between rows\n empty_rows = np.where(np.isnan(tbl[\"Specificity\"]))[0]\n tbl = tbl.drop(empty_rows).reset_index(drop=True)\n\n # Empty values in the table mean its the same as above\n for row_idx in range(1, tbl.shape[0]):\n for col_idx, col in enumerate(tbl.columns):\n if not isinstance(tbl[col][row_idx], str):\n if np.isnan(tbl[col][row_idx]):\n tbl.iloc[row_idx, col_idx] = \\\n tbl.iloc[row_idx - 1, col_idx]\n\n tbl[\"Specificity\"] = tbl[\"Specificity\"] * 100\n tbl[\"brainSens\"] = tbl[\"brainSens\"] * 100\n return tbl\n\n\ndef _generate_montage_locations(montage='standard_1005'):\n \"\"\"Get standard montage locations in dataframe.\n\n Data is returned in the same format as the eeg_positions library.\n\n Parameters\n ----------\n montage : str\n Standard MNE montage to use.\n \"\"\"\n montage = mne.channels.make_standard_montage(montage)\n coords = pd.DataFrame.from_dict(\n montage.get_positions()['ch_pos']).T\n coords[\"label\"] = coords.index\n coords = coords.rename(columns={0: \"x\", 1: \"y\", 2: \"z\"})\n\n return coords.reset_index(drop=True)\n\n\ndef _find_closest_standard_location(position, reference, trans_pos='mri'):\n \"\"\"Return closest montage label to coordinates.\n\n Parameters\n ----------\n position : array\n Coordinates.\n reference : dataframe\n As generated by _generate_montage_locations.\n trans_pos : str\n Apply a transformation to positions to specified frame.\n Use None for no transformation.\n \"\"\"\n p0 = np.array(position)\n if trans_pos is not None:\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n p0 = apply_trans(head_mri_t, p0)\n\n dists = np.zeros(reference[\"x\"].shape)\n\n for idx in range(len(dists)):\n p1 = np.array([reference[\"x\"][idx],\n reference[\"y\"][idx],\n reference[\"z\"][idx]])\n dists[idx] = np.linalg.norm(p0 - p1)\n\n min_idx = np.argmin(dists)\n\n return reference[\"label\"][min_idx]\n\n\ndef fold_landmark_specificity(raw, landmark, fold_files=[None],\n atlas=\"Juelich\"):\n \"\"\"Return the specificity of each channel to a specified brain landmark.\n\n Specificity values as stored in the fOLD toolbox\n :footcite:`morais2018fnirs`\n excel files.\n\n The data is not provided with MNE-NIRS. You must download the excel\n spreadsheets from the authors website and provide the paths using\n the ``fold_files`` argument.\n\n Parameters\n ----------\n raw : BaseRaw\n The fNIRS data.\n landmark : str\n Landmark of interest. Must be present in fOLD toolbox data file.\n fold_files : list\n Paths to fold toolbox files.\n atlas : str\n Brain atlas to use.\n\n Returns\n -------\n spec : array\n Specificity values for each channel to brain landmark.\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n if None in fold_files:\n raise ValueError(\"You must specify the path to fOLD xls files\")\n\n if not isinstance(landmark, str):\n raise ValueError(f\"Landmark must be a string. Got {type(landmark)}\")\n\n _validate_type(raw, BaseRaw, 'raw')\n\n reference_locations = _generate_montage_locations()\n\n fold_tbl = pd.DataFrame()\n for fname in fold_files:\n fold_tbl = fold_tbl.append(_read_fold_xls(fname, atlas=atlas))\n\n specificity = np.zeros(len(raw.ch_names))\n for cidx in range(len(raw.ch_names)):\n\n tbl = _source_detector_fold_table(raw, cidx,\n reference_locations, fold_tbl)\n\n if len(tbl) > 0:\n tbl[\"ContainsLmk\"] = [landmark in la for la in tbl[\"Landmark\"]]\n tbl = tbl.query(\"ContainsLmk == True\")[\"Specificity\"]\n\n if len(tbl) == 0:\n continue\n # print(f\"No data for {src_name}-{det_name}\")\n elif len(tbl) == 1:\n specificity[cidx] = tbl.values[0]\n else:\n raise RuntimeError(\"Multiple specificity values returned\")\n\n return np.array(specificity)\n\n\ndef fold_channel_specificity(raw, fold_files=[None], atlas=\"Juelich\"):\n \"\"\"Return the landmarks and specificity a channel is sensitive to.\n\n Specificity values as stored in the fOLD toolbox\n :footcite:`morais2018fnirs`\n excel files.\n\n The data is not provided with MNE-NIRS. You must download the excel\n spreadsheets from the authors website and provide the paths using\n the ``fold_files`` argument.\n\n Parameters\n ----------\n raw : BaseRaw\n The fNIRS data.\n fold_files : list\n Paths to fold toolbox files.\n atlas : str\n Brain atlas to use.\n\n Returns\n -------\n spec : list of dataframes\n List of dataframes, one for each channel.\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n if None in fold_files:\n raise ValueError(\"You must specify the path to fOLD xls files\")\n\n _validate_type(raw, BaseRaw, 'raw')\n\n reference_locations = _generate_montage_locations()\n\n fold_tbl = pd.DataFrame()\n for fname in fold_files:\n fold_tbl = fold_tbl.append(_read_fold_xls(fname, atlas=atlas))\n\n chan_spec = list()\n for cidx in range(len(raw.ch_names)):\n\n tbl = _source_detector_fold_table(raw, cidx,\n reference_locations, fold_tbl)\n chan_spec.append(tbl.reset_index(drop=True))\n\n return chan_spec\n\n\ndef _source_detector_fold_table(raw, cidx, reference_locations, fold_tbl):\n src = raw.info['chs'][cidx]['loc'][3:6]\n det = raw.info['chs'][cidx]['loc'][6:9]\n\n src_name = _find_closest_standard_location(src, # noqa\n reference_locations)\n det_name = _find_closest_standard_location(det, # noqa\n reference_locations)\n\n tbl = fold_tbl.query(\"Source == @src_name\"). \\\n query(\"Detector == @det_name\")\n # Try reversing source and detector\n if len(tbl) == 0:\n tbl = fold_tbl.query(\"Source == @det_name\"). \\\n query(\"Detector == @src_name\")\n\n return tbl\n","repo_name":"Nathan-Draudt/Prototype-Toolboxes","sub_path":"mne-thinned- v1/mne_nirs/mne_nirs/io/fold/_fold.py","file_name":"_fold.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33851948621","text":"\"\"\"\nCOMP.CS.100 Ensimmäinen Python-ohjelma.\nTekijä: Anna Rumiantseva\nOpiskelijanumero: 050309159\n\"\"\"\n\n\n# TODO:\n# a) Implement the class Player here.\nclass Player:\n \n def __init__(self, nimi):\n \n self.__nimi = nimi\n self.__ochki = 0\n self.__prozent_popadani = 0\n self.__avg = 0 \n self.__counter=0 \n self.__schet = 0\n \n def add_points(self, pts):\n self.__ochki += pts\n if self.__ochki >= 40 and self.__ochki <= 49:\n print(f\"{self.__nimi} needs only {50-self.__ochki} points. It's better to avoid knocking down the pins with higher points.\")\n self.ret_avg(pts)\n if self.__ochki > 50:\n print(f\"{self.__nimi} gets penalty points!\")\n self.__ochki = 25\n \n def get_name(self):\n \n return self.__nimi\n \n def get_points(self):\n \n return self.__ochki\n \n def has_won(self):\n \n if self.__ochki == 50:\n return True\n \n def prozent(self,pst):\n \n if pst > 0:\n self.__schet += 1\n self.__prozent_popadani = self.__schet /self.__counter * 100\n \n else:\n self.__prozent_popadani = self.__schet /self.__counter * 100\n \n \n \n def get_proc(self):\n \n return self.__prozent_popadani\n \n def ret_avg(self, pts):\n if self.__ochki <= 50:\n self.__counter += 1\n if self.__counter >= 2:\n self.__avg = self.__ochki / self.__counter\n if self.__avg < pts:\n print(f\"Cheers {self.__nimi}!\")\n else:\n self.__counter += 1\n \n\ndef main():\n # Here we define two variables which are the objects initiated from the\n # class Player. This is how the constructor of the class Player\n # (the method that is named __init__) is called!\n\n player1 = Player(\"Matti\")\n player2 = Player(\"Teppo\")\n\n throw = 1\n while True:\n\n # if throw is an even number\n if throw % 2 == 0:\n in_turn = player1\n\n # else throw is an odd number\n else:\n in_turn = player2\n\n pts = int(input(\"Enter the score of player \" + in_turn.get_name() +\n \" of throw \" + str(throw) + \": \"))\n # if in_turn.ret_avg(pts):\n # print(f\"Cheers {in_turn.get_name()}!\")\n \n in_turn.add_points(pts)\n\n # TODO:\n # c) Add a supporting feedback printout \"Cheers NAME!\" here.\n in_turn.prozent(pts)\n\n if in_turn.has_won():\n print(\"Game over! The winner is \" + in_turn.get_name() + \"!\")\n return\n\n print(\"\")\n print(\"Scoreboard after throw \" + str(throw) + \":\")\n print(f\"{player1.get_name()}: {player1.get_points()} p, hit percentage {player1.get_proc():.1f}\") # TODO: d)\n print(f\"{player2.get_name()}: {player2.get_points()} p, hit percentage {player2.get_proc():.1f}\") # TODO: d)\n print(\"\")\n\n throw += 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kettu-metsanen/python_course","sub_path":"kierros10/molkky_template.py","file_name":"molkky_template.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2315184932","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def hasPathSum(self, root, targetSum):\n \"\"\"\n :type root: TreeNode\n :type targetSum: int\n :rtype: bool\n \"\"\"\n self.status = False\n \n def recurse(node, curr_sum = 0):\n if node:\n curr_sum += node.val\n \n if node.left is None and node.right is None and curr_sum == targetSum: \n self.status = True\n return\n \n recurse(node.left, curr_sum)\n recurse(node.right, curr_sum)\n \n recurse(root)\n \n return self.status","repo_name":"iamabhishek98/leetcode_solutions","sub_path":"path-sum/path-sum.py","file_name":"path-sum.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72220889120","text":"from typing import Optional\n\n\ndef mult(a: int, n: int) -> int:\n if n == 0:\n return 0\n elif n < 0:\n raise ValueError('This func works only with positive integers')\n elif n == 1:\n return a\n return a + mult(a, n - 1)\n\n\nprint(mult(2, 4) == 8)\nprint(mult(2, 0) == 0)\ntry:\n print(mult(2, -4))\nexcept ValueError as msg:\n print(msg)\n\n","repo_name":"DanyloSamoylov/PY20092021","sub_path":"lesson25/3_25_3.py","file_name":"3_25_3.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1567518130","text":"import mido\n\n# load midi file\nmidi_file = mido.MidiFile(\"../data/liebesleid.mid\")\n\n# print midi file (used for debugging)\n# for i, track in enumerate(midi_file.tracks):\n# print('Track {}: {}'.format(i, track.name))\n# for j, event in enumerate(track):\n# print('{}: {}'.format(j, event))\n\nfor i, track in enumerate(midi_file.tracks): # for each track...\n for event in track: # for each event in the track\n if event.type == \"note_on\" or event.type == \"note_off\": # if that event turns a note on or turns a note off\n event.note += 2 # transpose its note up a major 2nd\n\n# save the altered midi file\nmidi_file.save(\"../data/exercise2_output.mid\")\n","repo_name":"joloujo/Mido-Exercises","sub_path":"Mido-Exercises/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16435567831","text":"import os\nimport cv2\n\nroot_dir = r'D:\\naolu\\car_component_inspection\\data\\data_formated'\n\nif __name__ == '__main__':\n count = 0\n for root, dirs, files in os.walk(root_dir):\n for f in files:\n if f.find('.jpg') != -1 or f.find('.png') != -1:\n img = cv2.imread(root + '/' + f)\n height, width, channels = img.shape\n if width < height:\n image = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)\n os.remove(root + '/' + f)\n cv2.imwrite(root + '/' + f, image)","repo_name":"BWang15/car_component_inspection","sub_path":"pre_processing/image_size_mismatch_removal.py","file_name":"image_size_mismatch_removal.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4304009532","text":"import unittest\nfrom unittest.mock import patch\nfrom unittest.mock import MagicMock\nfrom unittest.mock import Mock\nfrom robot import Robot\nfrom finder import Finder\n\nclass TestRobot(unittest.TestCase):\n \"\"\"\n Class to test Robot class\n \"\"\"\n def test_action_1(self):\n \"\"\"\n Basic Moves\n \"\"\"\n self.instance = Robot()\n instructions = ['M','L','R','?','Q']\n for x in instructions:\n self.instance.action(x)\n assert self.instance.x == 0\n assert self.instance.y == 1\n assert self.instance.face == 0\n assert self.instance.exit == True\n\n def test_action_2(self):\n \"\"\"\n Multiple Direction and Move\n \"\"\"\n self.instance = Robot()\n instructions = ['R','M','M','R','L']\n for x in instructions:\n self.instance.action(x)\n assert self.instance.x == 2\n assert self.instance.y == 0\n assert self.instance.face == 3\n\n def test_action_invalid(self):\n \"\"\"\n Invalid Move\n \"\"\"\n self.instance = Robot()\n instructions = ['r']\n for x in instructions:\n self.instance.action(x)\n assert self.instance.x == 0\n assert self.instance.y == 0\n assert self.instance.face == 0\n assert self.instance.exit == False\n\n @patch('robot.Robot.get_input', return_value='Q')\n def test_run_robot_exit(self, mock):\n self.instance = Robot()\n self.instance.runRobot()\n assert self.instance.exit == True\n\n\nclass TestFinder(unittest.TestCase):\n \"\"\"\n Class to test Finder class\n \"\"\"\n def test_finder_multiple(self):\n finder = Finder([None,\"dsa\",None,\"sad\"])\n assert finder.find(\"sad\") == [\"dsa\",\"sad\"]\n\n def test_finder_invalid(self):\n try:\n finder = Finder({})\n except ValueError as ve:\n assert True\n except Exception as ex:\n assert False\n\n def test_finder_single(self):\n finder = Finder([None,\"dsad\",None,\"rweew\",\"rtdfj\"])\n assert finder.find(\"ftdjr\") == \"rtdfj\"\n\n def test_finder_empty(self):\n finder = Finder([])\n assert finder.find(\"ftdjr\") == None\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"khingwe/Hiverassignment","sub_path":"test_hiver.py","file_name":"test_hiver.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43972675442","text":"#-*- coding:utf-8 -*-\r\nfrom django.shortcuts import render_to_response,render\r\nfrom django.http import HttpResponse,HttpResponseRedirect\r\nimport simplejson,sys,os,urllib,re,datetime,time,md5,hashlib,random,calendar\r\nfrom conn import crmdb\r\nfrom zz91page import *\r\ndb=crmdb()\r\nreload(sys)\r\nsys.setdefaultencoding('UTF-8')\r\nnowpath=os.path.dirname(__file__)\r\nexecfile(nowpath+\"/func/wl_function.py\")\r\nexecfile(nowpath+\"/func/crmtools.py\")\r\nexecfile(nowpath+\"/func/company_function.py\")\r\nzzc=customer()\r\nzzs=zzwl()\r\n\r\n#取出所有物流客户信息\r\ndef wl_list(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n page=request.GET.get('page')\r\n #是否为主管\r\n has_auth=zzc.is_hasauth(user_id=user_id)\r\n if not page:\r\n page=1\r\n searchlist={}\r\n order_number=request.GET.get(\"order_number\")\r\n if order_number:\r\n searchlist['order_number']=order_number\r\n else:\r\n order_number=''\r\n company_name=request.GET.get(\"company_name\")\r\n if company_name:\r\n searchlist['company_name']=company_name\r\n else:\r\n company_name=''\r\n wechat=request.GET.get(\"wechat\")\r\n if wechat:\r\n searchlist['wechat']=wechat\r\n else:\r\n wechat=''\r\n username=request.GET.get(\"username\")\r\n if username:\r\n searchlist['username']=username\r\n else:\r\n username=''\r\n mobile=request.GET.get(\"mobile\")\r\n if mobile:\r\n searchlist['mobile']=mobile\r\n else:\r\n mobile=''\r\n car_for=request.GET.get(\"car_for\")\r\n if car_for:\r\n searchlist['car_for']=car_for\r\n else:\r\n car_for=''\r\n weight=request.GET.get(\"weight\")\r\n if weight:\r\n searchlist['weight']=weight\r\n else:\r\n weight=''\r\n personid=request.GET.get(\"personid\")\r\n if personid:\r\n searchlist['personid']=personid\r\n else:\r\n personid=''\r\n time1=request.GET.get('time1')\r\n time2=request.GET.get('time2')\r\n if time1 and time2:\r\n searchlist['time1']=time1\r\n searchlist['time2']=time2\r\n else:\r\n time1=''\r\n time2=''\r\n star=request.GET.get('star')\r\n if star:\r\n searchlist['star']=star\r\n else:\r\n star=''\r\n orderstr=request.GET.get('orderstr')\r\n if orderstr:\r\n searchlist['orderstr']=orderstr\r\n else:\r\n orderstr=''\r\n dotype=request.GET.get('dotype')\r\n if dotype:\r\n searchlist['dotype']=dotype\r\n else:\r\n dotype=''\r\n searchurl=urllib.urlencode(searchlist)\r\n searchlist['user_id']=user_id\r\n #获得销售人员列表(selection)\r\n allsalesman=zzc.get_allsalesman(user_id=user_id,wl=1)\r\n funpage=zz91page()\r\n limitNum=funpage.limitNum(15)\r\n nowpage=funpage.nowpage(int(page))\r\n frompageCount=funpage.frompageCount()\r\n after_range_num = funpage.after_range_num(3)\r\n before_range_num = funpage.before_range_num(6)\r\n userallr=zzs.getwllist(frompageCount=frompageCount,limitNum=limitNum,searchlist=searchlist)\r\n listcount=userallr['count']\r\n listall=userallr['list']\r\n listcount = funpage.listcount(listcount)\r\n page_listcount=funpage.page_listcount()\r\n firstpage = funpage.firstpage()\r\n lastpage = funpage.lastpage()\r\n page_range = funpage.page_range()\r\n nextpage = funpage.nextpage()\r\n prvpage = funpage.prvpage()\r\n return render_to_response('wl/wl_list.html',locals())\r\n#添加物流客户\r\ndef wl_add(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n return render_to_response('wl/wl_add.html',locals())\r\ndef wl_save(request):\r\n company_name=request.POST.get('company_name')\r\n order_number=request.POST.get('order_number')\r\n wechat=request.POST.get('wechat')\r\n mobile=request.POST.get('mobile')\r\n username=request.POST.get('username')\r\n car_for=request.POST.get('car_for')\r\n weight=request.POST.get('weight')\r\n time=request.POST.get('time')\r\n main_business=request.POST.get('main_business')\r\n personid=request.session.get('user_id')\r\n register_time=datetime.datetime.now()\r\n sql='insert into wl_customer(company_name,order_number,wechat,mobile,username,car_for,weight,time,main_business,personid,register_time) values(%s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s)'\r\n result=db.updatetodb(sql,[company_name,order_number,wechat,mobile,username,car_for,weight,time,main_business,personid,register_time])\r\n sql='select id from wl_customer order by register_time desc '\r\n result1=db.fetchonedb(sql)\r\n last_insert_id=result1['id']\r\n sql='insert into wl_assign(uid,personid) values(%s, %s)'\r\n result2=db.updatetodb(sql,[last_insert_id,personid])\r\n return HttpResponseRedirect('list.html')\r\n#修改人员信息\r\ndef wl_mod(request):\r\n if request.method==\"POST\":\r\n company_name=request.POST.get('company_name')\r\n order_number=request.POST.get('order_number')\r\n wechat=request.POST.get('wechat')\r\n username=request.POST.get('username')\r\n mobile=request.POST.get(\"mobile\")\r\n car_for=request.POST.get('car_for')\r\n weight=request.POST.get('weight')\r\n time=request.POST.get('time')\r\n id=request.POST.get('id')\r\n if id:\r\n sql='update wl_customer set company_name=%s,order_number=%s,wechat=%s,username=%s,mobile=%s,car_for=%s,weight=%s,time=%s where id=%s'\r\n result=db.updatetodb(sql,[company_name,order_number,wechat,username,mobile,car_for,weight,time,id])\r\n return HttpResponseRedirect('list.html')\r\n else:\r\n id=request.GET.get('uid')\r\n if id:\r\n sql='select * from wl_customer where id=%s'\r\n result=db.fetchonedb(sql,[id])\r\n time=result['time']\r\n result['time']=formattime(time,flag=2)\r\n return render_to_response('wl/wl_mod.html',locals())\r\n\r\n#批量处理\r\ndef wl_all(request):\r\n check_box_list = request.REQUEST.getlist(\"check_box_list\")\r\n topersonid=request.POST.get('topersonid')\r\n value=request.POST.get('dostay',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n fdate=datetime.datetime.now()\r\n if not value:\r\n for id in check_box_list:\r\n sql='delete from wl_customer where id=%s'\r\n result=db.updatetodb(sql,[id])\r\n elif value=='assignto':\r\n for id in check_box_list:\r\n sql='insert into wl_assign(uid,personid,fdate) values(%s, %s, %s)'\r\n result=db.updatetodb(sql,[id,topersonid,fdate])\r\n elif value=='tomy':\r\n for id in check_box_list:\r\n sql='insert into wl_assign(uid,personid,fdate) values(%s, %s, %s)'\r\n result=db.updatetodb(sql,[id,user_id,fdate])\r\n elif value=='gonghai':\r\n for id in check_box_list:\r\n sql='delete from wl_assign where uid=%s and personid=%s'\r\n result=db.updatetodb(sql,[id,user_id])\r\n return HttpResponseRedirect('list.html')\r\n \r\n#单独界面显示物流客户信息\r\ndef wl_customershow(request):\r\n if request.method==\"POST\":\r\n uid=request.GET.get('uid')\r\n contactstate=request.POST.get('contactstate')\r\n star=request.POST.get('star')\r\n nextcontact_time=request.POST.get('nextcontact_time')\r\n contact_bz=request.POST.get('contact_bz')\r\n personid=request.session.get('user_id',default=None)\r\n lastcontact_time=datetime.datetime.now()\r\n fdate=datetime.datetime.now()\r\n if uid:\r\n sql='insert into wl_history(uid,contactstate,star,nextcontact_time,contact_bz,personid,fdate) values(%s,%s,%s,%s,%s,%s,%s)'\r\n result=db.updatetodb(sql,[uid,contactstate,star,nextcontact_time,contact_bz,personid,fdate])\r\n sql='update wl_customer set star=%s,nextcontact_time=%s,lastcontact_time=%s where id=%s'\r\n result=db.updatetodb(sql,[star,nextcontact_time,lastcontact_time,uid])\r\n return HttpResponseRedirect('list.html')\r\n else:\r\n id=request.GET.get('uid')\r\n if id:\r\n sql='select * from wl_customer where id=%s'\r\n result=db.fetchonedb(sql,[id])\r\n time=result['time']\r\n result['time']=formattime(time,flag=2)\r\n time=result['register_time']\r\n result['register_time']=formattime(time,flag=2)\r\n return render_to_response('wl/wl_customershow.html',locals())\r\n#操作记录\r\ndef wl_customer_history(request):\r\n page=request.GET.get('page')\r\n if not page:\r\n page=1\r\n searchlist={}\r\n uid=request.GET.get('uid')\r\n if uid:\r\n searchlist['uid']=uid\r\n funpage=zz91page()\r\n limitNum=funpage.limitNum(4)\r\n nowpage=funpage.nowpage(int(page))\r\n frompageCount=funpage.frompageCount()\r\n after_range_num = funpage.after_range_num(3)\r\n before_range_num = funpage.before_range_num(6)\r\n userallr=zzs.getcustomerhistory(searchlist=searchlist,frompageCount=frompageCount,limitNum=limitNum)\r\n listall=userallr['list']\r\n listcount=userallr['count']\r\n listcount = funpage.listcount(listcount)\r\n page_listcount=funpage.page_listcount()\r\n firstpage = funpage.firstpage()\r\n lastpage = funpage.lastpage()\r\n page_range = funpage.page_range()\r\n nextpage = funpage .nextpage()\r\n prvpage = funpage.prvpage()\r\n return render_to_response('wl/wl_customershow_history.html',locals())","repo_name":"cash2one/zzpython","sub_path":"客户关系管理系统/zz91crm/wl.py","file_name":"wl.py","file_ext":"py","file_size_in_byte":9537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38645585176","text":"import re\nimport sys\n\ndef parseData(main_file):\n count = 1\n reviews = list()\n pterms = list()\n rterms = list()\n scores = list()\n quote = '&' + 'quot' + ';'\n replace_punctuation = re.compile('[%s]' % re.escape('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^`{|}~'))\n for x in main_file:\n if \"product/productId: \" in x:\n reviews.append(str(count)+\",\")\n str_entry = x.replace(\"product/productId: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append(str_entry+\",\")\n\n elif \"product/title: \" in x:\n str_entry = x.replace(\"product/title: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n terms = replace_punctuation.sub(\" \", str_entry)\n terms = terms.split(\" \")\n for term in terms:\n if len(term)>2:\n pterms.append(term.lower()+\",\")\n pterms.append(str(count)+\"\\n\")\n\n reviews.append('\"'+str_entry+'\",')\n\n elif \"product/price: \" in x:\n str_entry = x.replace(\"product/price: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append(str_entry+\",\")\n\n elif \"review/userId: \" in x:\n str_entry = x.replace(\"review/userId: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append(str_entry+\",\")\n\n elif \"review/profileName: \" in x:\n str_entry = x.replace(\"review/profileName: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append('\"'+str_entry+'\",')\n\n elif \"review/helpfulness: \" in x:\n str_entry = x.replace(\"review/helpfulness: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append(str_entry+\",\")\n\n elif \"review/score: \" in x:\n str_entry = x.replace(\"review/score: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append(str_entry+\",\")\n scores.append(str(str_entry)+\",\")\n scores.append(str(count)+\"\\n\")\n\n\n elif \"review/time: \" in x:\n str_entry = x.replace(\"review/time: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append(str_entry+\",\")\n\n elif \"review/summary: \" in x:\n str_entry = x.replace(\"review/summary: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n terms = replace_punctuation.sub(\" \", str_entry)\n terms = terms.split(\" \")\n for term in terms:\n if len(term)>2:\n rterms.append(term.lower()+\",\")\n rterms.append(str(count)+\"\\n\")\n reviews.append('\"'+str_entry+'\",')\n\n elif \"review/text: \" in x:\n str_entry = x.replace(\"review/text: \",\"\").replace(\"\\\\\",\"\\\\\\\\\").replace('\"',quote).strip('\\n')\n reviews.append('\"'+str_entry+'\"\\n')\n terms = replace_punctuation.sub(\" \", str_entry)\n terms = terms.split(\" \")\n for term in terms:\n if len(term)>2:\n rterms.append(term.lower()+\",\")\n rterms.append(str(count)+\"\\n\")\n\n count+=1\n\n with open(\"pterms.txt\",\"a\") as pterms_file:\n for entry in pterms:\n pterms_file.write(entry)\n\n with open(\"scores.txt\",\"a\") as scores_file:\n for entry in scores:\n scores_file.write(str(entry))\n\n with open(\"rterms.txt\",\"a\") as rterms_file:\n for entry in rterms:\n rterms_file.write(str(entry))\n\n with open(\"reviews.txt\",\"a\") as reviews_file:\n for entry in reviews:\n reviews_file.write(entry)\n\nif __name__ == \"__main__\":\n parseData(sys.stdin)","repo_name":"k----n/InformationRetrievalSystem","sub_path":"data_parser.py","file_name":"data_parser.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32570801221","text":"import dulwich.repo\n\nfrom randovania.interface_common import preset_manager\nfrom randovania.layout.versioned_preset import VersionedPreset\n\n\nasync def test_add_then_delete_preset(tmp_path, default_preset):\n p = VersionedPreset.with_preset(default_preset.fork())\n\n dulwich.repo.Repo.init(tmp_path)\n manager = preset_manager.PresetManager(tmp_path.joinpath(\"presets\"))\n await manager.load_user_presets()\n\n assert manager.preset_for_uuid(p.uuid) is None\n manager.add_new_preset(p)\n assert manager.preset_for_uuid(p.uuid) == p\n manager.delete_preset(p)\n assert manager.preset_for_uuid(p.uuid) is None\n","repo_name":"vgm5/randovania","sub_path":"test/interface_common/test_preset_manager.py","file_name":"test_preset_manager.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"15504531871","text":"import random\nimport csv\npokemon = [\"watertotle\",\"squirtle\",\"pikachu\",\"bulbasor\",\"charmander\"]\nspecies = [\"water\",\"water\",\"magnet\",\"green\",\"fire\"]\nwriter = csv.writer(open('metadata.tsv', 'w'), delimiter='\\t', lineterminator='\\n')\nwriter.writerow([\"Pokemon\",\"Species\"])\nfor i in range(5):\n writer.writerow([pokemon[i],species[i]])\n\n\n\n\n# with open('values.tsv', 'w') as tsvfile:\n# writer = csv.writer(tsvfile, delimiter='\\t', lineterminator='\\n')\n# for i in range(5):\n# writer.writerow([random.random(),random.random()])","repo_name":"mihirp1998/EmbLang","sub_path":"vis_imagine_static_voxels/write_csv.py","file_name":"write_csv.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"38354664239","text":"from mpi4py import MPI\nimport random\nimport time\n\ndef create_matrix(n_rows, n_columns):\n matrix = []\n line = []\n element = 0\n\n while len(matrix) != n_rows:\n n = random.randint(1,99)\n line.append(n)\n element = element + 1\n\n\n if len(line) == n_columns:\n matrix.append(line)\n line = []\n\n return matrix\n\ndef create_null_matrix(n_rows, n_columns):\n matrix = []\n line = []\n element = 0\n\n while len(matrix) != n_rows:\n n = random.randint(1,99)\n line.append(0)\n element = element + 1\n\n\n if len(line) == n_columns:\n matrix.append(line)\n line = []\n\n return matrix\n\n# M1 = matrix 1, M2 = matrix 2, MF = final matrix\n# n = size of matrices, beg = beginning of iteration, end = end of iteration\ndef mult_matrix(m1, m2, mf, n, beg, end):\n\n # row\n for i in range(beg, end):\n # line\n for k in range(n):\n elem = 0\n # column\n for j in range(n):\n\n elem = elem + (m1[i][j] * m2[j][k])\n mf[i][k] = elem\n\ndef mult_matrix2(m1, m2, mf, n, beg, end, offset):\n\n # row\n for i in range(beg, end):\n # line\n for k in range(n):\n elem = 0\n # column\n for j in range(n):\n\n elem = elem + (m1[i][j] * m2[j][k])\n mf[i-offset][k] = elem\n # print(elem)\n\n# getting basic info\ncomm = MPI.COMM_WORLD\nrank = MPI.COMM_WORLD.Get_rank()\nsize = MPI.COMM_WORLD.Get_size()\n\n\ncounter = 0;\n# print(rank)\nif rank == 0:\n # print(\"----START----\")\n n = 256\n x = create_matrix(n,n)\n y = create_matrix(n,n)\n z1 = create_null_matrix(128,n)\n start = time.time()\n comm.send(n, dest=1, tag=7)\n comm.send(x, dest=1, tag=7)\n comm.send(y, dest=1, tag=7)\n mult_matrix(x, y, z1, n, 0, 128)\n z2 = comm.recv(source=1, tag=8)\n z = z1+z2\n end = time.time()\n print(end - start)\n # print(\"----END----\")\nelse:\n n = comm.recv(source=0, tag=7)\n x = comm.recv(source=0, tag=7)\n y = comm.recv(source=0, tag=7)\n z = create_null_matrix(128,n)\n mult_matrix2(x, y, z, n, 128, n, 128)\n comm.send(z, dest=0, tag=8)\n","repo_name":"finkenauer/parallel-python","sub_path":"mpi_matrix.py","file_name":"mpi_matrix.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8707590508","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 21 11:34:53 2020\n\n@author: suryakantkumar\n\"\"\"\n\n'''\nProblem : You will be given a list of 32 bit unsigned integers. \nFlip all the bits (1 --> 0 and 0 --> 1) and print the result as an unsigned integer.\n'''\n\n\nimport os \n\ndef flippingBits(n):\n binary = bin(n) # Converting number into binary\n b_str = str(binary)\n b_str = b_str[2:] # removing '0b' from binary number\n \n l = len(b_str)\n li = ['0']*32 # Created 32 bit array\n \n j = 0\n for i in range(32-l, 32): # Upadated array with the binay value of number\n li[i] = b_str[j]\n j += 1\n \n for i in range(len(li)): # Replace 0 with 1 and 1 with 0\n if li[i] == '0':\n li[i] = '1'\n else:\n li[i] = '0'\n \n number = 0\n k = 0\n for i in range(len(li)-1, -1, -1): # iterating over the array in reverse and counting the number bit by bit\n if li[i] == '1':\n number += 2 ** k\n k += 1\n else:\n k += 1\n \n return number\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n q = int(input())\n for q_itr in range(q):\n n = int(input())\n result = flippingBits(n)\n fptr.write(str(result) + '\\n')\n fptr.close()","repo_name":"SuryakantKumar/HackerRank-Problem-Solving","sub_path":"Easy Level/Flipping-Bits.py","file_name":"Flipping-Bits.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29788375385","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 12 11:56:20 2020\n\n@author: Ritwik Gupta\n\"\"\"\n#This corpus consists of 5 different departments from the Patrika newspaper in Hindi. The departments are Automobile,Sports,health,gadget and entertainment.\n\nimport os\n#IndianCorpusReader is a package which helps read and tokenize majority of the Indian Languages\nfrom nltk.corpus import IndianCorpusReader\ncorpusdir = 'Patrika/news'\n#Redirect to the corpus destination\ncorpus = os.listdir(corpusdir)\n\n#Create the corpus with 5 text files in Hindi\nnewcorpus = IndianCorpusReader(\"./Patrika/news\",corpus)\n\n#Read the files in the corpus\nfor i in sorted(newcorpus.fileids()):\n print(i)\n with newcorpus.open(i) as f:\n print(f.read().strip())\n#print(newcorpus.words(\"automobile_final_text.txt\"))\n \n#print(newcorpus.raw().strip())\n\n#Print the 5 files ids present in the corpus\nprint(newcorpus.fileids())\n\n#Check the content of the health file\nnewcorpus.words(fileids='health_final_text.txt')[0:10]\n\n#Sentence and word tokenize the health file from the corpus\nfrom nltk.tokenize import word_tokenize,sent_tokenize\nhealth = word_tokenize(\"\".join(newcorpus.words(fileids='health_final_text.txt')))\nhealth_sentences = sent_tokenizer(\"\".join(newcorpus.sent_tokenizer(fileids = \"health_final_text.txt\")))\n","repo_name":"Ritwik411/Wintersem_NLP","sub_path":"Hindi Corpus/Hindi_corpus.py","file_name":"Hindi_corpus.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9870552894","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 2 14:55:00 2021\r\n\r\n@author: jdodia\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 2 12:27:04 2021\r\n\r\n@author: jdodia\r\n\"\"\"\r\n# =============================================================================\r\n# importing necessary packages\r\n# \r\n# =============================================================================\r\nfrom tkinter import *\r\nimport shutil \r\nimport tkinter as tk \r\nfrom tkinter import messagebox, filedialog \r\n\r\n\r\nroot = Tk()\r\nroot.geometry(\"1000x500\")\r\nroot.title(\"Simulation with OpenFOAM using icoFoam solver\")\r\n# =============================================================================\r\n# Defining different widgets\r\n# \r\n# =============================================================================\r\n\r\n\r\n#Project name\r\ndef show_data():\r\n project = float(ent1.get())\r\n \r\nl1 = Label(root, text = \"Project name : \", bg = \"#E8D579\")\r\n\r\nent1 = Entry(root)\r\n\r\nl1.grid(row=0, column=0, pady = 5, padx = 5)\r\n\r\nent1.grid(row=0, column=1,pady = 5, padx = 5, columnspan = 2)\r\n\r\n# Boundary Condition starts---------------\r\nl3 = Label(root, text = \"Input Variables :\")\r\nl3.grid(row=0, column=5)\r\n\r\nInlet = [\"Pressure\", \"Velocity\", \"Flow\"]\r\ntext_Input=StringVar()\r\nvariable3 = StringVar(root)\r\nvariable3.set(Inlet[0])\r\nl7 = Label(root, text=\"Inlet Condition : \", bg =\"#E8D579\")\r\nl7.grid(row=1, column=5)\r\ny = OptionMenu(root, variable3, *Inlet)\r\ny.grid(row=1, column=7)\r\n\r\nOutlet = [\"Pressure\", \"Velocity\", \"Flow\"]\r\ntext_Input=StringVar()\r\nvariable4 = StringVar(root)\r\nvariable4.set(Outlet[0])\r\nl8 = Label(root, text=\"Outlet Condition : \", bg =\"#E8D579\")\r\nl8.grid(row=2, column=5)\r\nz = OptionMenu(root, variable4, *Outlet)\r\nz.grid(row=2, column=7)\r\n\r\n\r\n#Boundary Condition ends-------------\r\n\r\n#Project name end\r\n#------------------------------------\r\n#Solid model widget\r\ndef CreateWidgets(): \r\n link_Label = Label(root, text =\"Choose CAD file : \", \r\n bg = \"#E8D579\") \r\n link_Label.grid(row = 1, column = 0, \r\n pady = 5, padx = 5) \r\n \r\n root.sourceText = Entry(root, width = 50, \r\n textvariable = sourceLocation) \r\n root.sourceText.grid(row = 1, column = 1, \r\n pady = 5, padx = 5, \r\n columnspan = 2) \r\n \r\n source_browseButton = Button(root, text =\"Browse\", \r\n command = SourceBrowse, width = 15) \r\n source_browseButton.grid(row = 1, column = 3, \r\n pady = 5, padx = 5) \r\n \r\n destinationLabel = Label(root, text =\"Project location : \", \r\n bg =\"#E8D579\") \r\n destinationLabel.grid(row = 2, column = 0, \r\n pady = 5, padx = 5) \r\n \r\n root.destinationText = Entry(root, width = 50, \r\n textvariable = destinationLocation) \r\n root.destinationText.grid(row = 2, column = 1, \r\n pady = 5, padx = 5, \r\n columnspan = 2) \r\n \r\n dest_browseButton = Button(root, text =\"Browse\", \r\n command = DestinationBrowse, width = 15) \r\n dest_browseButton.grid(row = 2, column = 3, \r\n pady = 5, padx = 5) \r\n \r\n copyButton = Button(root, text =\"Mesh & Update\", \r\n command = MeshUpdate, width = 15) \r\n copyButton.grid(row = 4, column = 3, \r\n pady = 5, padx = 5) \r\n \r\ndef SourceBrowse(): \r\n \r\n root.files_list = list(filedialog.askopenfilenames(initialdir =\" \")) \r\n root.sourceText.insert('1', root.files_list) \r\n \r\ndef DestinationBrowse(): \r\n destinationdirectory = filedialog.askdirectory(initialdir =\" \") \r\n root.destinationText.insert('1', destinationdirectory) \r\n \r\ndef MeshUpdate(): \r\n files_list = root.files_list \r\n destination_location = destinationLocation.get() \r\n for f in files_list: \r\n shutil.copy(f, destination_location) \r\n \r\n messagebox.showinfo(\"SUCCESSFULL\") \r\n \r\nroot.config(background = \"black\") \r\n \r\nsourceLocation = StringVar() \r\ndestinationLocation = StringVar() \r\nCreateWidgets() \r\n\r\n#Solid model widget end\r\n#---------------------\r\n\r\n#Mesh widget starts\r\n\r\nMesh = [\"Coarse\", \"Medium\", \"Fine\"]\r\ntext_Input=StringVar()\r\nvariable = StringVar(root)\r\nvariable.set(Mesh[0])\r\nl5 = Label(root, text=\"Mesh Selection : \", bg =\"#E8D579\")\r\nl5.grid(row=4)\r\nw = OptionMenu(root, variable, *Mesh)\r\nw.grid(row=4, column=1)\r\n\r\n#Mesh widget ends\r\n#----------------------------\r\n# =============================================================================\r\n# \r\n# #solver widget start\r\n# \r\n# Solver = [\"icoFoam\", \"simpleFoam\", \"PISO\"]\r\n# text_Input=StringVar()\r\n# variable1 = StringVar(root)\r\n# variable1.set(Solver[0])\r\n# l6 = Label(root, text=\"Solver Selection : \", bg =\"#E8D579\")\r\n# l6.grid(row=5)\r\n# x = OptionMenu(root, variable1, *Solver)\r\n# x.grid(row=5, column=1)\r\n# #---------------\r\n# #Solver widget ends\r\n\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n# Widgets end\r\n# =============================================================================\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"jigarmech/automatecfd","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"39366932827","text":"def minimumWaitingTime(queries):\n \"\"\"\n TC: O(nlogn)\n SC: O(1)\n \"\"\"\n if not queries:\n return 0\n \n queries.sort()\n runningWaitingTime = 0\n totalWaitingTime = 0\n\n for q in queries:\n totalWaitingTime += runningWaitingTime\n runningWaitingTime += q\n \n return totalWaitingTime","repo_name":"nikhiilll/Data-Structures-and-Algorithms-Prep","sub_path":"Greedy Algorithms/Other/MinimumWaitingTime.py","file_name":"MinimumWaitingTime.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1217589611","text":"# coding: utf8\n\"\"\"\n...\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport enum\nimport json\nimport logging\nfrom typing import Optional\n\nfrom .base import (\n Parts,\n ParseError,\n SansIOBase,\n)\nfrom ..exceptions import DatabaseException\n\nlogger = logging.getLogger(__name__)\n\n\n@enum.unique\nclass ParserState(enum.Enum):\n START = \"STATE_START\"\n NEXT_BLOCK = \"STATE_NEXT_BLOCK\"\n META = \"STATE_META\"\n DATA = \"STATE_DATA\"\n FOOTER = \"STATE_FOOTER\"\n TOTALS = \"STATE_TOTALS\" # deprecated\n STATS = \"STATE_STATS\" # deprecated\n FINISHED = \"STATE_FINISHED\"\n ERROR = \"STATE_ERROR\"\n\n\nclass JSONCompactChunksParser(SansIOBase):\n \"\"\"\n A parser class for JSONCompact chunks.\n\n Assumes a very particular positioning of newlines and whitespaces (and the\n data structure). Use a clickhouse version whitelist for reliability.\n\n Structured similarly to `h11.Connection`.\n \"\"\"\n\n # minimal chunk size to ensure all the whitespaces for parsing actually fit\n # in and there's at least something in it.\n min_chunk_size = 1024\n # A bunch of things are expected to fit in this size:\n # metadata, each data line, footer.\n # On the other hand, garbage data will be read up to this size (+ chunk\n # size) into memory until raising an error.\n max_chunk_size = 8 * 1024 * 1024\n\n parts = Parts\n\n STATE_START = ParserState.START.value\n STATE_NEXT_BLOCK = ParserState.NEXT_BLOCK.value\n STATE_META = ParserState.META.value\n STATE_DATA = ParserState.DATA.value\n STATE_FOOTER = ParserState.FOOTER.value\n STATE_TOTALS = ParserState.TOTALS.value # deprecated\n STATE_STATS = ParserState.STATS.value # deprecated\n STATE_FINISHED = ParserState.FINISHED.value\n STATE_ERROR = ParserState.ERROR.value\n\n class RowTooLarge(ParseError):\n \"\"\" ... \"\"\"\n\n def __init__(self, autoparse=True):\n self._unread = []\n self._results = []\n self.state = self.STATE_START\n self._autoparse = autoparse\n\n def receive_data(self, data: Optional[bytes]):\n \"\"\" Add data to the internal receive buffer \"\"\"\n if not data:\n return # allow and ignore empty chunks, just in case.\n self._unread.append(data)\n\n @staticmethod\n def _splitpair(chunk, splitters, start=0):\n assert not isinstance(splitters, (bytes, str))\n for idx, (splitter, rightsplit) in enumerate(splitters):\n if rightsplit:\n func = chunk.rindex\n else:\n func = chunk.index\n try:\n pos = func(splitter, start)\n except ValueError: # `ValueError: subsection not found`\n continue\n\n return chunk[:pos], chunk[pos + len(splitter):], idx\n\n return chunk, None, None\n\n def _read_more(self, current_chunk):\n if not self._unread:\n return False, current_chunk\n extra_data = self._unread.pop(0)\n if current_chunk is not None:\n # max_chunk_size check\n l1 = len(current_chunk)\n l2 = len(extra_data)\n mcs = self.max_chunk_size\n if l1 <= mcs and l2 <= mcs and l1 + l2 > mcs:\n # Sample case: looking for metadata end, reading until it is\n # found, but it is not found and there's a lot of data.\n raise self.RowTooLarge(\n \"Tried to read more data than `max_chunk_size`\",\n dict(l1=l1, l2=l2, mcs=mcs))\n\n # Same as below, but only done when it is likely to help\n # (i.e. on large rows).\n if (l1 > self.min_chunk_size * 5 and not isinstance(current_chunk, bytearray)):\n current_chunk = bytearray(current_chunk)\n\n current_chunk += extra_data\n\n return True, current_chunk\n\n # # This should allow append (above) to happen inplace,\n # # but adds an extra copy.\n # extra_data = bytearray(extra_data)\n return True, extra_data\n\n def _read_until(self, current_chunk, end_markers):\n assert not isinstance(end_markers, (bytes, str))\n # Semantics note:\n # `right is None` means `not found`,\n # `right == b\"\"` means `endswith`\n left, right, idx = self._splitpair(current_chunk, end_markers)\n max_marker_len = max(len(marker[0]) for marker in end_markers)\n while right is None:\n chunk_prev_len = len(current_chunk)\n # raises ValueError:\n more, current_chunk = self._read_more(current_chunk)\n if not more:\n break\n # Try again.\n start_pos = chunk_prev_len - max_marker_len\n if start_pos < 0:\n start_pos = 0\n left, right, idx = self._splitpair(\n current_chunk,\n end_markers,\n start=start_pos,\n )\n if right is not None:\n return left, right, idx\n return None, current_chunk, None\n\n def _put_back(self, chunk):\n if not chunk:\n return\n self._unread.insert(0, chunk)\n\n def _add_result(self, part, item):\n if self._autoparse:\n item = json.loads(item)\n self._results.append((part, item))\n\n def next_event(self):\n \"\"\" ... \"\"\"\n if self.state == self.STATE_FINISHED: # terminating state\n return self.parts.FINISHED, None\n if self.state == self.STATE_ERROR: # terminating state\n raise ParseError(\n \"next event requested in error-state\",\n \"STATE_ERROR\", b\"\")\n\n if self._results:\n return self._results.pop(0)\n\n more, chunk = self._read_more(None)\n if not chunk:\n return self.parts.NEED_DATA, None\n\n while len(chunk) < self.min_chunk_size:\n more, chunk = self._read_more(chunk)\n if not more:\n break\n\n if self.state == self.STATE_START:\n prefix = b'{\\n\\t\"meta\":\\n\\t[\\n'\n chunk = chunk.lstrip() # allow extra whitespaces at start.\n\n if len(chunk) < len(prefix):\n self._put_back(chunk)\n return self.parts.NEED_DATA, None\n\n if not chunk.startswith(prefix):\n self.state = self.STATE_ERROR\n raise ParseError(\"unexpected data start\", \"STATE_START\", chunk)\n\n chunk = chunk[len(prefix):]\n self.state = self.STATE_META\n\n if self.state == self.STATE_META:\n # (implicit \"[\") + data + (implicit \"]\")\n # maybe: `meta_end = b\"\\n\\t\\t}\\n\\t],\\n\\n\"`\n meta_end = b\"\\n\\t],\\n\"\n meta_chunk, chunk, _ = self._read_until(\n chunk,\n (\n (meta_end, False),\n ))\n if meta_chunk is not None:\n self.state = self.STATE_NEXT_BLOCK\n self._add_result(self.parts.META, b\"[\" + meta_chunk + b\"]\")\n\n if self.state == self.STATE_NEXT_BLOCK:\n next_blocks = (\n (b'\\n\\t\"data\":\\n\\t[\\n', self.STATE_DATA), # data_start\n # Footer examples:\n # '\\n\\n\\t\"totals\": ['\n # '\\n\\n\\t\"extremes\":\\n\\t{'\n # '\\n\\n\\t\"rows\": '\n (b'\\n\\t\"', self.STATE_FOOTER),\n )\n for prefix, next_state in next_blocks:\n left, right, _ = self._splitpair(chunk, ((prefix, False),))\n if right is not None:\n if left.strip():\n self.state = self.STATE_ERROR\n raise ParseError(\n (\"unexpected data between blocks \"\n \"(to state {})\").format(next_state),\n \"STATE_NEXT_BLOCK\",\n chunk)\n chunk = right\n self.state = next_state\n break\n # `else:`\n if self.state == self.STATE_NEXT_BLOCK:\n if len(chunk) > self.min_chunk_size:\n self.state = self.STATE_ERROR\n raise ParseError(\n \"could not find the next block\",\n \"STATE_NEXT_BLOCK\", chunk)\n # Otherwise perhaps the chunk was too small.\n\n if self.state == self.STATE_DATA:\n # Up to multiple b\"...\\n\\t\\t[...],\\n...\" chunks\n data_end = b\"\\n\\t],\\n\"\n data_line_end = b\"],\\n\"\n data_chunk, chunk, match_idx = self._read_until(\n chunk,\n # Tricky ordering: most of the time this will result in going\n # over the data twice,\n # but it has to be done because `data_end` can be found after\n # the data block.\n (\n (data_end, False),\n (data_line_end, True),\n ))\n if data_chunk is not None:\n # maybe: `and b\"[\" in data_chunk`\n # maybe: `and data_chunk != b\"\\n\\t\"`\n self._add_result(\n self.parts.DATACHUNK,\n (b\"[\" +\n data_chunk +\n (b\"]]\" if match_idx == 1 else b\"]\")),\n )\n if match_idx == 0: # `data_end`\n self.state = self.STATE_NEXT_BLOCK\n\n if self.state == self.STATE_FOOTER:\n suffix = b\"\\t}\\n}\"\n stats_chunk, chunk, _ = self._read_until(chunk, ((suffix, False),))\n if stats_chunk is not None:\n self.state = self.STATE_FINISHED\n self._add_result(\n self.parts.STATS,\n b'{\"' + stats_chunk + suffix)\n chunk = chunk.strip()\n if chunk:\n # Data in this state is not expected. Check if it's a timeout error and log the error.\n # Raise ParseError otherwise.\n db_exc = DatabaseException(chunk.decode())\n if db_exc.code == '159': # Timeout exceeded\n logger.error(\"Got timeout error message after finished response processing.\")\n else:\n self.state = self.STATE_ERROR\n raise ParseError(\n \"Extra data after the end\",\n \"STATE_STATS\", chunk)\n\n if chunk:\n self._put_back(chunk)\n\n if self._results:\n return self._results.pop(0)\n return self.parts.NEED_DATA, None\n\n @classmethod\n def as_generator(cls, data_chunks, **kwargs):\n parser = cls(**kwargs)\n event = None\n chunk = None\n for chunk in data_chunks:\n parser.receive_data(chunk)\n event, data = parser.next_event()\n while (event != parser.parts.NEED_DATA and\n event != parser.parts.FINISHED):\n yield event, data\n event, data = parser.next_event()\n if event is not None and event != parser.parts.FINISHED:\n raise ValueError(\n \"Unexpected last event\",\n dict(event=event, data=data, chunk=chunk))\n","repo_name":"datalens-tech/datalens-backend","sub_path":"lib/clickhouse-sqlalchemy/clickhouse_sqlalchemy/parsers/jsoncompact.py","file_name":"jsoncompact.py","file_ext":"py","file_size_in_byte":11228,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"73735768163","text":"def is_armstrong_number(number):\n sum = 0 #initialize sum\n length = len(str(number)) #initialize length of number\n temp = number #set temp to numer\n\n while temp > 0:\n digit = temp % 10 #modulo of number will be last digit of number\n sum += digit ** length #add last digit to the answer and divide each by length\n temp //= 10 #number divided by 10\n\n if number == sum:\n return True\n else:\n return False","repo_name":"Davidxcr/Exercism.io-Solutions","sub_path":"armstrong-numbers/armstrong_numbers.py","file_name":"armstrong_numbers.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"4376905835","text":"import speech_recognition as sr\nimport pyaudio\nimport wave\nimport tkinter as tk\nfrom threading import Thread\n\nclass AudioText3:\n def __init__(\n self,\n format,\n channels,\n sample_rate,\n buffer_size,\n file_path,\n ):\n self.format = format\n self.channels = channels\n self.sample_rate = sample_rate\n self.buffer_size = buffer_size\n self.file_path = file_path\n self.is_recording = False\n\n def record_audio(self):\n try:\n audio = pyaudio.PyAudio()\n stream = audio.open(\n format=self.format,\n channels=self.channels,\n rate=self.sample_rate,\n input=True,\n frames_per_buffer=self.buffer_size,\n )\n\n print(\"GRABANDO........\")\n\n frames = []\n\n self.is_recording = True\n\n while self.is_recording:\n data = stream.read(self.buffer_size)\n frames.append(data)\n\n print(\"DETENIENDO GRABACIÓN...\")\n\n stream.stop_stream()\n stream.close()\n audio.terminate()\n\n x = wave.open(self.file_path, \"wb\")\n x.setnchannels(self.channels)\n x.setsampwidth(audio.get_sample_size(self.format))\n x.setframerate(self.sample_rate)\n x.writeframes(b\"\".join(frames))\n x.close()\n\n return {\n \"status\": \"success\",\n \"message\": \"Proceso completado\",\n \"file_path\": self.file_path,\n }\n except Exception as exception:\n return {\n \"status\": \"failed\",\n \"message\": f\"Error: {exception}\",\n }\n\n def transcribe_audio(self, audio_path):\n try:\n r = sr.Recognizer()\n audio_file = sr.AudioFile(audio_path)\n\n with audio_file as source:\n audio = r.record(source)\n\n text = r.recognize_google(audio, language=\"es-ES\")\n\n if text:\n return {\n \"status\": \"success\",\n \"message\": \"Transcripción exitosa\",\n \"text\": text,\n }\n return {\n \"status\": \"failed\",\n \"message\": \"Transcripción fallida\",\n }\n except Exception as exception:\n return {\n \"status\": \"failed\",\n \"message\": f\"Error: {exception}\",\n }\n\n\ndef start_recording():\n global recording_thread\n recording_thread = Thread(target=audio_text.record_audio)\n recording_thread.start()\n\ndef stop_recording():\n if recording_thread.is_alive():\n audio_text.is_recording = False\n recording_thread.join()\n \n recording_result = audio_text.transcribe_audio(file_path)\n \n if recording_result[\"status\"] == \"success\":\n transcribed_text = recording_result[\"text\"]\n print(\"Texto Transcrito:\", transcribed_text)\n else:\n print(\"Error en la transcripción.\")\n\nif __name__ == \"__main__\":\n format = pyaudio.paInt16\n channels = 2\n sample_rate = 44100\n buffer_size = 1024\n file_path = \"audio_recording.wav\"\n\n audio_text = AudioText3(\n format, channels, sample_rate, buffer_size, file_path\n )\n\n root = tk.Tk()\n root.title(\"Grabación de audio\")\n\n start_button = tk.Button(root, text=\"Iniciar Grabación\", command=start_recording)\n stop_button = tk.Button(root, text=\"Detener Grabación\", command=stop_recording)\n\n start_button.pack()\n stop_button.pack()\n\n root.mainloop()\n","repo_name":"VincentRolongDevelop/audioText","sub_path":"audioText3.py","file_name":"audioText3.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39323239154","text":"from Cache_Manager import Video_Cache_Manager\nfrom Structs import *\nfrom enum import Enum\nimport time\nfrom omxplayer.player import OMXPlayer\n\n# TRIAL FOR OMX PLAYER\nfrom pathlib import Path\nfrom time import sleep\n\n\nSTATIC_VIDEO_PATH = Path(\"./stock/static_low_q.mp4\")\n\nclass Status(Enum):\n IDLE = 0\n PLAYING = 1\n DOWNLOADING = 2\n\nclass Operating_Context:\n def __init__(self, status=Status.IDLE, cache_manager=Video_Cache_Manager(), cur_video=None, load_manager=False):\n self.status=status\n if load_manager:\n self.cache_manager = Video_Cache_Manager.load()\n else:\n self.cache_manager = cache_manager\n self.cur_video = cur_video\n self.seen_videos = set()\n\n self.player = None\n\ndef reload_playlist(context, videos_per_channel=5):\n context.cache_manager.clear_all()\n channels = [Channel(x, video_limit=videos_per_channel) for x in config.CHANNEL_LIST]\n # print(channels)\n for channel in channels:\n videos = channel.videos\n for video in videos:\n # If the video is not in queue or in seen videos, queue it up\n if video not in context.seen_videos and not context.cache_manager.video_in_queue(video):\n context.cache_manager.add_video(video, video.published_date)\n print(\"added video \", context.cache_manager.queue_size())\n\ndef downloading(context, videos_predownloaded = 3):\n context.status = Status.DOWNLOADING\n top_n_videos = [context.cache_manager.pop_video(x) for x in range(videos_predownloaded)]\n for video in top_n_videos:\n if video != None:\n video.download()\n context.cache_manager.add_video(video, video.published_date)\n\n playing(context)\n\n\ndef playing(context):\n if context.cache_manager.queue_size() == 0:\n idling(context)\n return\n\n context.status = Status.PLAYING\n cur_video = context.cache_manager.pop_video()\n\n if cur_video.is_ready():\n play_context_video(context, video_path=cur_video.disk_path)\n else:\n print(\"VIDEO IS NOT READY U BUM\")\n\n\ndef quit_context_player(context):\n if context.player != None:\n context.player.quit()\n\ndef player_position_callback(p, a):\n print(p, a)\ndef play_context_video(context, video_path, looping=False):\n quit_context_player(context)\n if looping:\n context.player = OMXPlayer(video_path, args=[\"--loop\"])\n else:\n context.player = OMXPlayer(video_path)\n # context.player.set_aspect_mode('stretch')\n context.player.set_alpha(100)\n context.player.positionEvent = player_position_callback\n context.player.play()\n\n\ndef idling(context, guaranteed_videos=5):\n context.status = Status.IDLE\n print(\"IDLING, finding videos to watch\")\n\n play_context_video(context, STATIC_VIDEO_PATH, looping=True)\n\n if not context.cache_manager.queue_size() >= guaranteed_videos:\n num_videos = 5\n retry = True\n # Exponential wait\n retry_wait = 2\n while retry:\n reload_playlist(context=context, videos_per_channel=num_videos)\n if context.cache_manager.queue_size() >= guaranteed_videos:\n retry = False\n else:\n print(\"Waiting {} seconds to fetch new videos\".format(retry_wait))\n time.sleep(retry_wait)\n retry_wait **= 2\n num_videos += 5\n downloading(context)\n\nif __name__ == \"__main__\":\n # Random test\n # status = Status.IDLE\n # cache_manager = Video_Cache_Manager()\n # print(cache_manager.__dict__)\n # c = Channel(config.CHANNEL_LIST[0], video_limit=3)\n # c.download_videos()\n # for video in c.videos:\n # date = datetime.datetime.strptime(video.published_date, \"%Y-%m-%dT%H:%M:%S\")\n # cache_manager.add_video(video, date)\n #\n # while cache_manager.data_struct.size() > 0:\n # cur_video = cache_manager.pop_video(delete=True)\n # print(cur_video.name)\n context = Operating_Context(load_manager=True)\n print(context.seen_videos)\n while True:\n if context.status == Status.IDLE:\n idling(context)\n else:\n playing(context)\n","repo_name":"R-Varun/RaspiTV","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32027063382","text":"import sublime\nimport sublime_plugin\nimport re\n\nfrom .canopy_parse_listener import CanopyParseData\nfrom .canopy_interface_manager import CanopyInterfaceManager\n\nclass CanopyJumpToLocalDefinitionCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n index = CanopyInterfaceManager.get_cursor_position()\n reference = CanopyParseData.references_by_index[index]\n current_topic = CanopyParseData.topics_by_index[index]\n\n if not reference:\n sublime.status_message('Cursor must be on a reference')\n\n target_subtopic = next(\n (subtopic\n for subtopic\n in CanopyParseData.subtopics\n if subtopic['topic']['name'] == current_topic['name']\n and subtopic['name'] == reference['target']\n ), None\n )\n\n if not target_subtopic:\n sublime.status_message('No subtopic in topic [' + current_topic['name'] + '] with name [' + reference['target'] + ']')\n else:\n CanopyInterfaceManager.set_cursor_position(target_subtopic['start'])\n","repo_name":"canopy-js/sublime","sub_path":"canopy_jump_to_local_definition.py","file_name":"canopy_jump_to_local_definition.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32956826848","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\nimport statistics\r\n\r\n#\r\n# Complete the 'quartiles' function below.\r\n#\r\n# The function is expected to return an INTEGER_ARRAY.\r\n# The function accepts INTEGER_ARRAY arr as parameter.\r\n#\r\n\r\ndef quartiles(arr):\r\n # Write your code here\r\n n=len(arr)\r\n arr=sorted(arr)\r\n left_arr= arr[:int((len(arr)/2))] if n%2==0 else arr[:int((len(arr)/2))]\r\n right_arr= arr[int((len(arr)/2)):] if n%2==0 else arr[int((len(arr)/2)+1):]\r\n \r\n median_arr = statistics.median(arr)\r\n left_median_arr = statistics.median(left_arr)\r\n right_median_arr = statistics.median(right_arr)\r\n \r\n return int(left_median_arr), int(median_arr), int(right_median_arr)\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n n = int(input().strip())\r\n\r\n data = list(map(int, input().rstrip().split()))\r\n\r\n res = quartiles(data)\r\n\r\n fptr.write('\\n'.join(map(str, res)))\r\n fptr.write('\\n')\r\n\r\n fptr.close()\r\n","repo_name":"dakshtrehan/HackerRank-Solutions","sub_path":"10 Days of Statistics with Python/Day1(1).py","file_name":"Day1(1).py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2470809499","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom src.utils import collatzArr\nfrom math import log\n\n\n# this program computes the frequency of each digit being a leading digit among all computed hailstone numbers\ndef main():\n inputs: list[int] = [7, 26, 72101241]\n X_LABEL: str = \"Stopping Points\"\n Y_LABEL: str = \"Hailstone numbers\"\n\n for n in inputs:\n plt.figure(figsize=(9.5, 4))\n\n data = collatzArr(n)\n\n logarithmic = list(map(lambda x: log(x), data))\n\n x = np.arange(len(data))\n\n plt.subplots_adjust(left=0.1, bottom=0.2, right=0.9, top=0.9, wspace=0.4, hspace=0.4)\n\n # first subplot\n plt.subplot(121)\n plt.plot(x, data)\n plt.title(\"Normal Sample\")\n plt.xlabel(X_LABEL)\n plt.ylabel(Y_LABEL)\n\n # second subplot\n plt.subplot(122)\n plt.plot(x, logarithmic)\n plt.title(\"Logarithmic Sample\")\n plt.xlabel(X_LABEL)\n plt.ylabel(Y_LABEL)\n\n plt.savefig(f\"../../out/plots/samples/n={n}.svg\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"timthedev07/collatz-conjecture","sub_path":"src/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39217987520","text":"import os\nimport re\n\n\npri_path = '/home/ishtar/Glycines'\nsec_path = '_Gly/vs_all_bact_'\n\nfor i in range(1, 21):\n # merg_glob_Gly_20_vs_all_bact_k5_bowtie.pseudosam\n # bowtie_sum_Gly_20_ext_all_mpq1_unique\n input_file_name = str('merg_glob_Gly_' + str(i) + '_vs_all_bact_k5_bowtie.pseudosam')\n output_file_name_1 = str('bowtie_summary_Gly_' + str(i) + '_ext_all_mpq20_unique')\n output_file_name_2 = str('bowtie_summary_Gly_' + str(i) + '_ext_all_mpq1_unique')\n\n full_path = os.path.join(pri_path, str(i) + sec_path + str(i))\n input_file_path = os.path.join(full_path, input_file_name)\n output_file_path_1 = os.path.join(full_path, output_file_name_1)\n output_file_path_2 = os.path.join(full_path, output_file_name_2)\n\n # file_path = os.path.join(pri_path, input_file_name)\n\n locNames = {}\n locusFile = open(\"/home/ishtar/allBact_locusNames_extend2\", \"r\")\n for line in locusFile:\n temp = line.strip().split(\"\\t\")\n temp2 = temp[1].split(\".\")[0]\n locNames[temp2] = temp[0]\n locusFile.close()\n\n bacNames = {}\n bacNamesReverse = {}\n bacNames_hL = {}\n bacNamesReverse_hL = {}\n dictFile = open(\"/home/ishtar/All_bact\", \"r\")\n for line in dictFile:\n line = re.sub(\"\\[\", \"\", line)\n line = re.sub(\"\\]\", \"\", line)\n line = re.sub(\"uncultured\\s+\", \"\", line)\n temp = line.strip().split(\"\\t\")\n temp2 = re.split(\"\\s+\", temp[1])\n temp3 = temp2[0]\n if len(temp2) >= 2:\n temp2 = temp2[0]\n else:\n temp2 = temp2[0]\n bacNames[temp[0]] = temp2\n if temp2 not in bacNamesReverse:\n bacNamesReverse[temp2] = temp[0]\n else:\n bacNamesReverse[temp2] = str(bacNamesReverse[temp2]) + \"\\t\" + str(temp[0])\n\n bacNames_hL[temp[0]] = temp3\n if temp3 not in bacNamesReverse_hL:\n bacNamesReverse_hL[temp3] = temp[0]\n else:\n bacNamesReverse_hL[temp3] = str(bacNamesReverse_hL[temp3]) + \"\\t\" + str(temp[0])\n dictFile.close()\n # print bacNamesReverse[\"Buchnera aphidicola\"]\n # 0/0\n\n # quallity=[]\n aligned = {}\n alignedSet = {}\n alignedSetMPQ = {}\n # alignedSetMPQ_hL={}\n readToSp20 = {}\n readToSp10 = {}\n not_unique20 = set()\n not_unique10 = set()\n # readToSp_hL = {}\n\n for fileName in [input_file_path]:\n print(fileName)\n inFile = open(fileName, \"r\")\n print(fileName)\n # count=0\n for line in inFile:\n # count+=1\n # if line[0]==\"@\":\n # continue\n temp = line.split(\"\\t\")\n # if temp[2]!=\"*\":\n locus = temp[2].split(\".\")[0]\n # quallity.append(float(temp[4]))\n if float(temp[4]) >= 20:\n if temp[0] in not_unique20:\n continue\n try:\n if bacNames[locNames[locus]] not in readToSp20[temp[0]]:\n not_unique20.add(temp[0])\n readToSp20.pop(temp[0])\n not_unique10.add(temp[0])\n if temp[0] in readToSp10:\n readToSp10.pop(temp[0])\n except:\n readToSp20[temp[0]] = set()\n readToSp20[temp[0]].add(bacNames[locNames[locus]])\n # try:\n # readToSp_hL[temp[0]].add(bacNames_hL[locNames[locus]])\n # except:\n # readToSp_hL[temp[0]]=set()\n # readToSp_hL[temp[0]].add(bacNames_hL[locNames[locus]])\n\n try:\n alignedSet[bacNames[locNames[locus]]].add(temp[0])\n # aligned[bacNames[locNames[locus]]]+=1\n except:\n # print locNames[locus[1:]]\n alignedSet[bacNames[locNames[locus]]] = set()\n alignedSet[bacNames[locNames[locus]]].add(temp[0])\n if float(temp[4]) >= 1:\n if temp[0] in not_unique10:\n continue\n try:\n if bacNames[locNames[locus]] not in readToSp10[temp[0]]:\n not_unique10.add(temp[0])\n readToSp10.pop(temp[0])\n except:\n readToSp10[temp[0]] = set()\n readToSp10[temp[0]].add(bacNames[locNames[locus]])\n try:\n alignedSetMPQ[bacNames[locNames[locus]]].add(temp[0])\n # aligned[bacNames[locNames[locus]]]+=1\n except:\n # print locNames[locus[1:]]\n alignedSetMPQ[bacNames[locNames[locus]]] = set()\n alignedSetMPQ[bacNames[locNames[locus]]].add(temp[0])\n # try:\n # alignedSetMPQ_hL[bacNames_hL[locNames[locus]]].add(temp[0])\n # except:\n # print locNames[locus[1:]]\n # alignedSetMPQ_hL[bacNames_hL[locNames[locus]]]=set()\n # alignedSetMPQ_hL[bacNames_hL[locNames[locus]]].add(temp[0])\n # aligned[bacNames[locNames[locus]]]=1\n # if count==200000:\n # print alignedSet\n # 0/0\n # break\n inFile.close()\n\n print(\"----------\")\n print(len(not_unique10))\n print(len(not_unique20))\n\n for bac in alignedSet:\n aligned[bac] = len(alignedSet[bac] - not_unique20)\n # print(aligned)\n sortAl = sorted(aligned.keys(), key=lambda xT: aligned[xT], reverse=True)\n # print np.mean(quallity)\n # print np.median(quallity)\n # print np.std(quallity)\n\n outFile = open(output_file_path_1, \"w\")\n for bac in sortAl:\n outFile.write(str(bac) + \"\\t\" + str(aligned[bac]) + \"\\t\" + str(bacNamesReverse[bac]) + \"\\n\")\n outFile.close()\n\n aligned = {}\n for bac in alignedSetMPQ:\n aligned[bac] = len(alignedSetMPQ[bac] - not_unique10)\n # print aligned\n sortAl = sorted(aligned.keys(), key=lambda xT: aligned[xT], reverse=True)\n\n outFile = open(output_file_path_2, \"w\")\n for bac in sortAl:\n outFile.write(str(bac) + \"\\t\" + str(aligned[bac]) + \"\\t\" + str(bacNamesReverse[bac]) + \"\\n\")\n outFile.close()\n","repo_name":"NanCuttySark/Apha_WGS","sub_path":"genera_look_in_bowtie_output_for_all.py","file_name":"genera_look_in_bowtie_output_for_all.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27111446217","text":"from geomdl import compatibility\n\nP = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nW = [0.5, 2, 1]\nPW = [[0.5, 1, 1.5, 0.5], [8, 10, 12, 2], [7, 8, 9, 1]]\nPW_ONES = [[1, 2, 3, 1], [4, 5, 6, 1], [7, 8, 9, 1]]\nPW_SEP = [[1, 2, 3, 0.5], [4, 5, 6, 2], [7, 8, 9, 1]]\n\n\n# Combine with a predefined set of weights\ndef test_combine_ctrlpts_weights1():\n check = compatibility.combine_ctrlpts_weights(P, W)\n\n assert PW == check\n\n\n# Combine with default weights\ndef test_combine_ctrlpts_weights2():\n check = compatibility.combine_ctrlpts_weights(P)\n\n assert PW_ONES == check\n\n\ndef test_generate_ctrlpts_weights():\n check = compatibility.generate_ctrlpts_weights(PW)\n\n assert PW_SEP == check\n\n\ndef test_generate_ctrlptsw():\n check = compatibility.generate_ctrlptsw(PW_SEP)\n\n assert PW == check\n\n\ndef test_separate_ctrlpts_weights():\n c_ctrlpts, c_weights = compatibility.separate_ctrlpts_weights(PW)\n\n assert P == c_ctrlpts\n assert W == c_weights\n\n\ndef test_change_ctrlpts_row_order():\n size_u = 3\n size_v = 4\n # the following is in u-order\n ctrlpts = [[0, 0, 0], [1, 0, 6], [2, 0, 0],\n [0, 1, 0], [1, 1, 0], [2, 1, 0],\n [0, 2, -3], [1, 2, 0], [2, 2, 3],\n [0, 3, 7], [1, 3, 8], [1, 3, 7]]\n # the following is in v-order\n result = [[0, 0, 0], [0, 1, 0], [0, 2, -3], [0, 3, 7],\n [1, 0, 6], [1, 1, 0], [1, 2, 0], [1, 3, 8],\n [2, 0, 0], [2, 1, 0], [2, 2, 3], [1, 3, 7]]\n\n check = compatibility.flip_ctrlpts_u(ctrlpts, size_u, size_v)\n\n assert check == result\n\n\ndef test_flip_ctrlpts():\n size_u = 3\n size_v = 4\n\n # the following is in v-order\n ctrlpts = [[0, 0, 0], [0, 1, 0], [0, 2, -3], [0, 3, 7],\n [1, 0, 6], [1, 1, 0], [1, 2, 0], [1, 3, 8],\n [2, 0, 0], [2, 1, 0], [2, 2, 3], [1, 3, 7]]\n\n # the following is in u-order\n result = [[0, 0, 0], [1, 0, 6], [2, 0, 0],\n [0, 1, 0], [1, 1, 0], [2, 1, 0],\n [0, 2, -3], [1, 2, 0], [2, 2, 3],\n [0, 3, 7], [1, 3, 8], [1, 3, 7]]\n\n check = compatibility.flip_ctrlpts(ctrlpts, size_u, size_v)\n\n assert check == result\n\n\ndef test_flip_ctrlpts2d():\n # the following is in v-order\n ctrlpts = [[[0, 0, 0], [0, 1, 0], [0, 2, -3], [0, 3, 7]],\n [[1, 0, 6], [1, 1, 0], [1, 2, 0], [1, 3, 8]],\n [[2, 0, 0], [2, 1, 0], [2, 2, 3], [1, 3, 7]]]\n\n # the following is in u-order\n result = [[[0, 0, 0], [1, 0, 6], [2, 0, 0]],\n [[0, 1, 0], [1, 1, 0], [2, 1, 0]],\n [[0, 2, -3], [1, 2, 0], [2, 2, 3]],\n [[0, 3, 7], [1, 3, 8], [1, 3, 7]]]\n\n check = compatibility.flip_ctrlpts2d(ctrlpts)\n\n assert check == result\n\n\ndef test_generate_ctrlptsw2d_ops():\n ctrlpts_weights_2d = [[[0, 0, 0, 1], [0, 1, 0, 0.5], [0, 2, -3, 0.25], [0, 3, 7, 0.75]],\n [[1, 0, 6, 1], [1, 1, 0, 0.5], [1, 2, 0, 0.25], [1, 3, 8, 0.75]],\n [[2, 0, 0, 1], [2, 1, 0, 0.5], [2, 2, 3, 0.25], [1, 3, 7, 0.75]]]\n\n ctrlptsw_2d = compatibility.generate_ctrlptsw2d(ctrlpts_weights_2d)\n check = compatibility.generate_ctrlpts2d_weights(ctrlptsw_2d)\n\n assert check == ctrlpts_weights_2d\n","repo_name":"orbingol/NURBS-Python","sub_path":"tests/test_compatibility.py","file_name":"test_compatibility.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":530,"dataset":"github-code","pt":"54"} +{"seq_id":"31013121682","text":"import os\nimport time\nimport argparse\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data.sampler import RandomSampler\nfrom utils.util import *\nfrom models import MMC_net\nfrom Setup3D import Config_3D, MMC_ClassificationDataset\n\nPrecautions_msg = '(Precautions for use) ---- \\n'\n\n'''\n- train.py\n\nCode containing the entire process\n\n#### Manual ####\nIf you are using Terminal,set the path and run the code below directlypycharm\n\nIn the case of pycharm: \nVerify that [Run -> Edit Configuration -> train.py] is selected\n-> Go to parameters and enter below -> Run/debug after click apply\nex)Printer task \n--kernel-type test --data-folder sampled_face/ --enet-type tf_efficientnet_b3_ns --n-epochs 50 --batch-size 32 --task-type 1 --img-type close\n\n*** def parse_args(): There is all the information about the execution parameters. \n*** def run(): A function that contains the whole process of learning. You can improve performance by applying various tricks here.\n** def main(): Run after distributing the data divided by the fold to the [def run].\n* def train_epoch(), def val_epoch() : Correction after understanding completely\n\n MMCLab, 허종욱, 2020python \n\n\n\n### 3D Project Terminal ###\n\n\n- Printer task\npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type P --img-type close\n- Filament task\npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type F --img-type close\n- Layer thickness task\npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type Q-T --img-type close\n- Number of shells task\npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type Q-S --img-type close\n- Device task \npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type D --img-type close\n- Reprint task\npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type R --img-type close\n\n\n- Multi-Task(Device & Printer)\npython train.py --enet-type CFTNet --task-type D --img-type close --side-task-type P --batch-size 32 --n-epochs 20\n- Multi-Task(Device & Layer thickness)\npython train.py --enet-type CFTNet --task-type D --img-type close --side-task-type Q-T --batch-size 32 --n-epochs 20\n\n<(Multi or Single) Modal-Task Setting>\n- Single-Modal-Task(Device)\npython train.py --enet-type CFTNet --task-type D --img-type both --batch-size 32 --n-epochs 20\n- Multi-Modal-Task(Device & Printer)\npython train.py --enet-type CFTNet --task-type D --img-type both --side-task-type P --batch-size 32 --epoch 20\n- Multi-Modal-Task(Device & Printer & Inspection data)\npython train.py --enet-type CFTNet --task-type D --img-type both --side-task-type P --batch-size 32 --epoch 20 --use-meta\n\n\n- Multi-Modal-Task(Printer & Number of shells)\npython train.py --enet-type CFTNet --task-type P --img-type both --side-task-type Q-S --batch-size 32 --epoch 20 --semi\n- Multi-Modal-Task(Device & Layer thickness)\npython train.py --enet-type CFTNet --task-type D --img-type both --side-task-type Q-T --batch-size 32 --epoch 20 --semi\n\n\n- Sanding-Processing-Task\npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type P --img-type close --sanding-processing\n- Coating-Processing-Task\npython train.py --enet-type CFTNet --n-epochs 20 --batch-size 32 --task-type P --img-type close --coating-processing\n\n\n- Printer task\npython train.py --enet-type tf_efficientnet_b3_ns --n-epochs 20 --batch-size 32 --task-type P --img-type full --baseline\n- Filament task\npython train.py --enet-type tf_efficientnet_b3_ns --n-epochs 20 --batch-size 32 --task-type F --img-type full --baseline\n- Layer thickness task \npython train.py --enet-type tf_efficientnet_b3_ns --n-epochs 20 --batch-size 32 --task-type Q-T --img-type full --baseline\n- Number of shells task \npython train.py --enet-type tf_efficientnet_b3_ns --n-epochs 20 --batch-size 32 --task-type Q-S --img-type full --baseline\n- Device task\npython train.py --enet-type tf_efficientnet_b3_ns --n-epochs 20 --batch-size 32 --task-type D --img-type full --baseline\n- Reprint task\npython train.py --enet-type tf_efficientnet_b3_ns --n-epochs 20 --batch-size 32 --task-type R --img-type full --baseline\n\n'''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--enet-type', type=str, required=True)\n # Network name to apply to learning\n # {resnest101, seresnext101,\n # tf_efficientnet_b7_ns,\n # tf_efficientnet_b6_ns,\n # tf_efficientnet_b5_ns...}\n\n parser.add_argument('--task-type', type=str, default='P', required=True)\n # Setting the task type to experiment with\n\n parser.add_argument('--side-task-type', type=str, default='')\n # Setting the multi-task type to experiment with\n\n parser.add_argument('--DEBUG', action='store_true')\n # Parameters for Debugging (Hold the experimental epoch at 5)\n\n parser.add_argument('--use-meta', action='store_true')\n # Whether to use inspection data in addition to the original\n\n parser.add_argument('--baseline', action='store_true')\n # Setting the network structure (our previous network)\n\n parser.add_argument('--semi', action='store_true')\n # Setting Semi-controlled task to experiment with\n\n parser.add_argument('--sanding-processing', action='store_true')\n # Setting sanding-processing task to experiment with\n\n parser.add_argument('--coating-processing', action='store_true')\n # Setting coating-processing task to experiment with\n\n parser.add_argument('--img-type', type=str, required=True)\n # Whether to use {close, full, both} image data\n\n parser.add_argument('--batch-size', type=int, default=16, required=True)\n # batch size\n\n parser.add_argument('--n-epochs', type=int, default=30, required=True)\n # number of epochs\n\n parser.add_argument('--weight-num', type=int, default=0)\n\n args, _ = parser.parse_known_args()\n return args\n\n\ndef train_epoch(model, loader, optimizer, Setup_3d):\n model.train()\n train_loss = []\n bar = tqdm(loader)\n\n for i, (data, target) in enumerate(bar):\n optimizer.zero_grad()\n\n if Setup_3d.use_meta:\n data, meta = data\n meta = meta.to(device)\n data = list(map(lambda x: x.to(device), data))\n target = list(map(lambda x: x.to(device), target)) if Setup_3d.multi else target.to(device)\n logits1, logits2, logits3 = model(data, meta)\n else:\n data = list(map(lambda x: x.to(device), data))\n target = list(map(lambda x: x.to(device), target)) if Setup_3d.multi else target.to(device)\n logits1, logits2, logits3 = model(data)\n\n loss = Setup_3d.select_loss(logits1, logits2, logits3, target)\n loss.backward()\n\n # gradient accumulation (When memory is low)\n if Setup_3d.accumulation_step:\n if (i + 1) % Setup_3d.accumulation_step == 0:\n optimizer.step()\n else:\n optimizer.step()\n\n loss_np = loss.detach().cpu().numpy()\n train_loss.append(loss_np)\n smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)\n bar.set_description('loss: %.5f, smooth_loss: %.5f' % (loss_np, smooth_loss))\n\n train_loss = np.mean(train_loss)\n return train_loss\n\n\ndef val_epoch(model, loader, Setup_3d, n_test=1):\n '''\n\n Output:\n val_loss, acc, TARGETS, PROBS\n '''\n\n def make_log(data):\n if Setup_3d.modal:\n logits = torch.zeros((data[0].shape[0], Setup_3d.out_dim)).to(device)\n probs = torch.zeros((data[0].shape[0], Setup_3d.out_dim)).to(device)\n else:\n logits = torch.zeros((data[0].shape[0], Setup_3d.out_dim)).to(device)\n probs = torch.zeros((data[0].shape[0], Setup_3d.out_dim)).to(device)\n return logits, probs\n\n model.eval()\n\n val_loss = []\n PROBS = []\n TARGETS = []\n OBJ_IDX = []\n\n with torch.no_grad():\n for i, (data, target, obj_id, model_id, printer_id) in enumerate(tqdm(loader)):\n if Setup_3d.use_meta:\n data, meta = data\n meta = meta.to(device)\n data = list(map(lambda x: x.to(device), data)) if Setup_3d.modal else data.to(device)\n target = target[0].to(device) if Setup_3d.multi else target.to(device)\n logits, probs = make_log(data)\n for I in range(n_test):\n l, l_2, l_3 = model(get_trans(data, I), meta)\n logits += l\n probs += l.softmax(1)\n else:\n data = list(map(lambda x: x.to(device), data))\n target = target[0].to(device) if Setup_3d.multi else target.to(device)\n logits, probs = make_log(data)\n for I in range(n_test):\n l, l_2, l_3 = model(get_trans(data, I))\n logits += l\n probs += l.softmax(1)\n\n loss = Setup_3d.criterion(logits, target)\n\n PROBS.append(probs.detach().cpu())\n TARGETS.append(target.detach().cpu())\n OBJ_IDX.append(obj_id)\n val_loss.append(loss.detach().cpu().numpy())\n\n val_loss = np.mean(val_loss)\n PROBS = torch.cat(PROBS).numpy()\n TARGETS = torch.cat(TARGETS).numpy()\n OBJ_IDX = torch.cat(OBJ_IDX).numpy()\n unique_idx = np.unique(OBJ_IDX).astype(np.int64)\n\n for u_id in unique_idx.tolist():\n res_list = np.where(OBJ_IDX == u_id)\n mean_prob = PROBS[res_list].mean(axis=0)\n PROBS[res_list] = mean_prob\n\n # accuracy\n acc = (PROBS.argmax(1) == TARGETS).mean() * 100.\n\n return val_loss, acc, TARGETS, PROBS.argmax(1)\n\n\ndef run(fold, Setup_3d):\n '''\n Learning progress main function\n\n :param fold: The partition number to be used for value in cross-validation\n :param df: Full Data List for DataFrame Learning\n :param meta_features, n_meta_features: Whether to use additional information other than images\n :param transforms_train, transforms_val: Dataset transform function\n '''\n\n df = Setup_3d.df_train_close if Setup_3d.img_type == 'close' or Setup_3d.img_type == 'both' else Setup_3d.df_train_full\n\n if args.DEBUG:\n Setup_3d.n_epochs = 5\n df_train = df[df['fold'] != fold].sample(Setup_3d.batch_size * 5)\n df_valid = df[df['fold'] == fold].sample(Setup_3d.batch_size * 5)\n\n else:\n df_train = df[df['fold'] != fold]\n df_valid = df[df['fold'] == fold]\n\n # https://discuss.pytorch.org/t/error-expected-more-than-1-value-per-channel-when-training/26274\n #\n if len(df_train) % Setup_3d.batch_size == 1:\n df_train = df_train.sample(len(df_train) - 1)\n if len(df_valid) % Setup_3d.batch_size == 1:\n df_valid = df_valid.sample(len(df_valid) - 1)\n\n # Read Dataset\n dataset_train = MMC_ClassificationDataset(df_train, 'train', Setup_3d\n ,Setup_3d.transforms_train, Setup_3d.transforms_fft)\n\n dataset_valid = MMC_ClassificationDataset(df_valid, 'valid', Setup_3d\n , Setup_3d.transforms_val)\n\n\n train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=Setup_3d.batch_size,\n sampler=RandomSampler(dataset_train), num_workers=Setup_3d.num_workers, pin_memory=True)\n valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=Setup_3d.batch_size,\n num_workers=3, pin_memory=True)\n\n\n side_name = \"_\" + Setup_3d.side_task_type if Setup_3d.side_task_type != \"\" else \"\"\n\n acc_max = 0.\n\n model = ModelClass(Setup_3d)\n\n model_file = os.path.join(Setup_3d.model_dir,\n f'{Setup_3d.weight_num}_{Setup_3d.kernel_type}_{Setup_3d.task_type}{side_name}_{Setup_3d.img_type}_{Setup_3d.n_epochs}_{Setup_3d.batch_size}{Setup_3d.type}_{fold}.pth')\n\n model = model.to(device)\n optimizer = optim.Adam(model.parameters(), lr=Setup_3d.init_lr)\n\n if DP:\n model = nn.DataParallel(model)\n\n scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, Setup_3d.n_epochs - 1)\n scheduler_warmup = GradualWarmupSchedulerV2(optimizer, multiplier=10, total_epoch=1,\n after_scheduler=scheduler_cosine)\n\n for epoch in range(1, Setup_3d.n_epochs + 1):\n print(time.ctime(), f'Fold {fold}, Epoch {epoch}')\n\n train_loss = train_epoch(model, train_loader, optimizer, Setup_3d)\n\n val_loss, acc, targets, probs = val_epoch(model, valid_loader, Setup_3d)\n\n content = time.ctime() + ' ' + f'Fold {fold}, Epoch {epoch}, lr: {optimizer.param_groups[0][\"lr\"]:.7f}, train loss: {train_loss:.5f}, valid loss: {(val_loss):.5f}, Acc: {(acc):.4f}'\n\n print(content)\n with open(os.path.join(Setup_3d.log_dir, f'log_{Setup_3d.kernel_type}.txt'), 'a') as appender:\n appender.write(content + '\\n')\n\n scheduler_warmup.step()\n if epoch == 2:\n scheduler_warmup.step() # bug workaround\n\n if acc > acc_max:\n print('acc_max ({:.6f} --> {:.6f}). Saving model ...'.format(acc_max, acc))\n torch.save(model.state_dict(), model_file)\n acc_max = acc\n\n\ndef main():\n '''\n ####################################################\n # 3d printer dataset : dataset.get_df_3d print\n ####################################################\n '''\n\n Setup_3d.get_df_3dprint()\n\n # Recall model transforms\n Setup_3d.get_transforms()\n\n folds = range(Setup_3d.k_fold)\n\n for fold in folds:\n run(fold, Setup_3d)\n\n\nif __name__ == '__main__':\n print('----------------------------')\n print(Precautions_msg)\n print('----------------------------')\n\n # make argument\n args = parse_args()\n Setup_3d = Config_3D(args)\n\n os.makedirs(Setup_3d.model_dir, exist_ok=True)\n os.makedirs(Setup_3d.log_dir, exist_ok=True)\n os.environ['CUDA_VISIBLE_DEVICES'] = Setup_3d.CUDA_VISIBLE_DEVICES\n\n Setup_3d.weight_num = int(len(os.listdir(Setup_3d.model_dir)) / 4)\n\n ModelClass = MMC_net\n\n # Whether to use a multi-GPU\n DP = len(os.environ['CUDA_VISIBLE_DEVICES']) > 1\n\n # Random seed settings for experimental reproduction\n set_seed(2359)\n device = torch.device('cuda')\n Setup_3d.criterion = nn.CrossEntropyLoss()\n\n # perform the main function\n main()","repo_name":"juhou/SI3DPpp","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17012371021","text":"import os\nimport typing\n\nfrom aiogram import types\n\nfrom i17obot import bot, config, dp\nfrom i17obot.templates import render_template\n\n\n@dp.message_handler(commands=[\"tutorial\"])\n@dp.callback_query_handler(text=\"tutorial_1\")\nasync def part_1(message: types.Message):\n if isinstance(message, types.CallbackQuery):\n response = await render_template(message.from_user.id, \"tutorial_part_1\")\n await tutorial_callback_query(\n message,\n response,\n media=os.path.join(config.BASE_DIR, \"data/i17obot-1.mp4\"),\n keyboards=[(\"Próximo >>\", \"tutorial_2\")],\n )\n return\n\n await types.ChatActions.upload_video()\n\n keyboard_markup = types.InlineKeyboardMarkup()\n keyboard_markup.row(\n types.InlineKeyboardButton(\"Próximo >>\", callback_data=\"tutorial_2\"),\n )\n\n response = await render_template(message.from_user.id, \"tutorial_part_1\")\n\n await bot.send_animation(\n chat_id=message.chat.id,\n animation=types.InputFile(os.path.join(config.BASE_DIR, \"data/i17obot-2.mp4\")),\n caption=response,\n parse_mode=\"markdown\",\n reply_markup=keyboard_markup,\n )\n\n\n@dp.callback_query_handler(text=\"tutorial_2\")\nasync def part_2(query: types.CallbackQuery):\n keyboards = (\n (\"<< Anterior\", \"tutorial_1\"),\n (\"Próximo >>\", \"tutorial_3\"),\n )\n response = await render_template(query.from_user.id, \"tutorial_part_2\")\n\n await tutorial_callback_query(\n query,\n message=response,\n media=os.path.join(config.BASE_DIR, \"data/i17obot-2.mp4\"),\n keyboards=keyboards,\n )\n\n\n@dp.callback_query_handler(text=\"tutorial_3\")\nasync def part_3(query: types.CallbackQuery):\n keyboards = ((\"<< Anterior\", \"tutorial_2\"),)\n response = await render_template(query.from_user.id, \"tutorial_part_3\")\n await tutorial_callback_query(\n query,\n message=response,\n media=os.path.join(config.BASE_DIR, \"data/dog_seriously_working.mp4\"),\n keyboards=[(\"<< Anterior\", \"tutorial_2\")],\n )\n\n\nasync def tutorial_callback_query(\n query: types.CallbackQuery,\n message: str,\n media: str,\n keyboards: typing.Sequence[typing.Tuple[str, str]],\n):\n keyboard_markup = types.InlineKeyboardMarkup()\n keyboard_markup.row(\n *[\n types.InlineKeyboardButton(label, callback_data=callback_data)\n for label, callback_data in keyboards\n ]\n )\n animation = types.InputMediaAnimation(\n media=types.InputFile(media), caption=message, parse_mode=\"markdown\",\n )\n\n await types.ChatActions.upload_video()\n await bot.edit_message_media(\n media=animation,\n chat_id=query.message.chat.id,\n message_id=query.message.message_id,\n reply_markup=keyboard_markup,\n )\n","repo_name":"rougeth/i17obot","sub_path":"i17obot/handlers/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"74262496483","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFeature building\n\"\"\"\n\nimport os\nimport sys\nbase_directory = os.path.dirname(os.path.abspath(__file__))\nbase_directory = os.path.dirname(os.path.dirname(base_directory))\nsys.path.append(base_directory)\n\n# import project config.py\nimport ai4eosc_thunder_nowcast_ml.config as cfg\nimport yaml\nimport pandas as pd\nimport csv\nimport numpy as np\nfrom datetime import datetime\n\ncurrentFuncName = lambda n=0: sys._getframe(n + 1).f_code.co_name\n\n\ndef print_log(log_line, verbose=True, time_stamp=True, log_file=cfg.LOG_FILE_PATH):\n tm = \"\"\n if time_stamp:\n tm = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S: \")\n if verbose:\n if log_file is None:\n print(tm + log_line)\n else:\n with open(log_file, 'a') as file:\n file.write(tm + log_line + \"\\n\")\n\n\ndef load_config_yaml(pathYaml, part=\"\"):\n with open(pathYaml) as yamlFile:\n config = yaml.safe_load(yamlFile)\n if part == \"\":\n return config\n else:\n return config[part]\n\n\ndef csv_data_to_one_file(source_path, dest_path, use_columns, forecast_time=None):\n data_files = os.listdir(source_path)\n data_files = [f for f in data_files if os.path.isfile(source_path + '/' + f)]\n df_csv_append = pd.DataFrame()\n for data_file in data_files:\n if os.path.isfile(source_path + '/' + data_file):\n print_log(f\"{currentFuncName()}: Reading file: {source_path}/{data_file} ...\")\n df = pd.read_csv(source_path + '/' + data_file, na_filter=False)\n use_columns_2 = [use_columns[i] for i in range(len(use_columns)) if use_columns[i] in df.columns]\n if forecast_time is not None:\n forecast_time_indices = [i for i in range(len(df['forecast'])) if df['forecast'][i] == forecast_time]\n df = df.iloc[forecast_time_indices]\n df = df[use_columns_2]\n if len(df_csv_append) == 0 or list(df.columns) == list(df_csv_append.columns):\n df_csv_append = pd.concat([df_csv_append, df], ignore_index=True)\n print_log(f\"{currentFuncName()}: OK\")\n else:\n print_log(f\"{currentFuncName()}: Error: headers don't match. Skipping this file.\")\n else:\n print_log(f\"{currentFuncName()}: Warning: Skipping file {source_path}/{data_file}, it does not exist.\")\n if len(df_csv_append) > 0:\n df_csv_append.to_csv(dest_path, index=False)\n else:\n print_log(f\"{currentFuncName()}: Warning: No such file\")\n\n\ndef make_raw_csv_data(source_path, dest_path, data_sources, file_types, use_columns, forecast_time=None):\n if not isinstance(data_sources, list) and not isinstance(data_sources, tuple):\n data_sources = (data_sources,)\n if not isinstance(file_types, list) and not isinstance(file_types, tuple):\n file_types = (file_types,)\n for ds in data_sources:\n for ft in file_types:\n csv_data_to_one_file(source_path + \"/\" + ds + \"/\" + ft,\n dest_path + \"/\" + ds + \"__\" + ft + \".csv\",\n use_columns, forecast_time)\n\n\ndef load_csv_files(source_path_list, config_yaml, header_list):\n if not isinstance(source_path_list, list) and not isinstance(source_path_list, tuple):\n source_path_list = (source_path_list,)\n output_df = list()\n output_header = list()\n j = 0\n # print(len(source_path_list))\n for i in range(len(source_path_list)):\n # print(source_path_list[i])\n # print(os.path.isfile(source_path_list[i]))\n if os.path.isfile(source_path_list[i]):\n output_df.append(pd.DataFrame())\n output_df[j] = pd.read_csv(source_path_list[i], na_filter=False)\n output_header.append(header_list[i])\n j = j + 1\n return output_df, output_header\n\n\ndef values_in_df_to_classes(dfs, column_list, threshold, val1, val2):\n if not isinstance(dfs, list) and not isinstance(dfs, tuple):\n dfs = (dfs,)\n for i in range(len(dfs)):\n for column in column_list:\n dfs[i][column] = np.float64(dfs[i][column])\n dfs[i][column][np.isnan(dfs[i][column])] = 0\n # dealing with only two classes for now\n dfs[i][column] = np.where(dfs[i][column] >= threshold, val1, val2)\n return dfs\n\n\ndef interval(t, dt):\n return (t - dt, t + dt)\n\n\ndef merge_csv_files(d_files, m_files, tolerance):\n if not isinstance(d_files, list) and not isinstance(d_files, tuple):\n d_files = (d_files,)\n if not isinstance(m_files, list) and not isinstance(m_files, tuple):\n m_files = (m_files,)\n\n timestamps = d_files[0]['timestamp']\n forecast = d_files[0]['forecast'] * 60 * 1000\n output = -np.ones((len(timestamps), len(d_files) + len(m_files)), dtype=np.int64)\n output[:, 0] = np.array(list(range(len(timestamps))))\n # d_files\n print_log(f\"{currentFuncName()}: Merging {len(d_files)} data files\")\n for i in range(1, len(d_files)):\n for j in range(len(timestamps)):\n time_interval = interval(timestamps[j] + forecast[j], tolerance)\n time_between_b = d_files[i]['timestamp'].between(time_interval[0], time_interval[1])\n time_between_i = np.where(time_between_b)[0]\n if len(time_between_i) >= 1:\n output[j, i] = time_between_i[0]\n # m_files\n print_log(f\"{currentFuncName()}: Merging {len(m_files)} measurement files\")\n for i in range(len(m_files)):\n for j in range(len(timestamps)):\n time_interval = interval(timestamps[j] + forecast[j], tolerance)\n time_between_b = m_files[i]['timestamp'].between(time_interval[0], time_interval[1])\n time_between_i = np.where(time_between_b)[0]\n if len(time_between_i) >= 1:\n output[j, i + len(d_files)] = time_between_i[0]\n\n use_this_rows = np.sum(output > 0, axis=1) == np.shape(output)[1]\n d_files_out = list()\n m_files_out = list()\n for i in range(len(d_files)):\n d_files_out.append(d_files[i].iloc[output[use_this_rows, i]])\n for i in range(len(m_files)):\n m_files_out.append(m_files[i].iloc[output[use_this_rows, i + len(d_files)]])\n\n return(d_files_out, m_files_out)\n\n\ndef get_proper_dates_indices(timestamps, config_yaml_date_settings):\n dates = [datetime.fromtimestamp(timestamps.iloc[i]) for i in range(len(timestamps))]\n datesY = [dates[i].year for i in range(len(dates))]\n datesM = [dates[i].month for i in range(len(dates))]\n datesD = [dates[i].day for i in range(len(dates))]\n\n indices = np.zeros((len(dates), 1))\n\n if config_yaml_date_settings is None:\n return [i for i in range(len(indices)) if indices[i] == 0]\n elif len(config_yaml_date_settings) != 1:\n print_log(f\"{currentFuncName()}: Error: Bad data splitting yaml config\")\n return [i for i in range(len(indices)) if indices[i] == 0]\n\n seasons = config_yaml_date_settings[0]\n seasonsY = eval(seasons['years'])\n seasonsM = eval(seasons['months'])\n seasonsD = eval(seasons['days'])\n\n if len(seasonsY) == 0 and len(seasonsM) == 0 and len(seasonsD) == 0: # none\n indices = indices\n elif len(seasonsY) == 0 and len(seasonsM) == 0 and len(seasonsD) > 0: # D\n for i in range(len(indices)):\n if datesD[i] in seasonsD:\n indices[i] = 1\n\n elif len(seasonsY) == 0 and len(seasonsM) > 0 and len(seasonsD) == 0: # M\n for i in range(len(indices)):\n if datesM[i] in seasonsM:\n indices[i] = 1\n\n elif len(seasonsY) == 0 and len(seasonsM) > 0 and len(seasonsD) == len(seasonsM): # M, D\n for j in range(len(seasonsM)):\n for i in range(len(indices)):\n if datesM[i] == seasonsM[j] and datesD[i] in seasonsD[j]:\n indices[i] = 1\n\n elif len(seasonsY) > 0 and len(seasonsM) == 0 and len(seasonsD) == 0: # Y\n for i in range(len(indices)):\n if datesY[i] in seasonsY:\n indices[i] = 1\n\n elif len(seasonsY) > 0 and len(seasonsM) == 0 and len(seasonsD) == len(seasonsY): # Y, D\n for j in range(len(seasonsY)):\n for i in range(len(indices)):\n if datesY[i] == seasonsY[j] and datesD[i] in seasonsD[j]:\n indices[i] = 1\n\n elif len(seasonsY) > 0 and len(seasonsM) == len(seasonsY) and len(seasonsD) == 0: # Y, M\n for j in range(len(seasonsY)):\n for i in range(len(indices)):\n if datesY[i] == seasonsY[j] and datesM[i] in seasonsM[j]:\n indices[i] = 1\n\n elif len(seasonsY) == len(seasonsM) == len(seasonsD) > 0: # Y, M, D\n same_length = True\n for i in range(len(seasonsM)):\n if len(seasonsM[i]) != len(seasonsD[i]):\n same_length = False\n\n if same_length == True:\n for k in range(len(seasonsY)):\n for j in range(len(seasonsM)):\n for i in range(len(indices)):\n if datesY[i] == seasonsY[k] and datesM[i] in seasonsM[k] and datesD[i] in seasonsD[k][j]:\n indices[i] = 1\n else:\n print_log(f\"{currentFuncName()}: Error: Bad input date format\")\n\n else:\n print_log(f\"{currentFuncName()}: Error: Bad input date format\")\n\n return [i for i in range(len(indices)) if indices[i] == 1]\n\n\ndef prepare_data_train(source_path, dest_path, dest_path_train_file, dest_path_test_file, dest_path_validate_file, config_yaml):\n print_log(f\"{currentFuncName()}:\")\n # csv_data_to_one_file\n make_raw_csv_data(source_path, dest_path,\n eval(config_yaml['all_data_models']),\n eval(config_yaml['all_data_sources']),\n eval(config_yaml['use_columns']) + eval(config_yaml['ORP_list']),\n config_yaml['forecast_time'])\n make_raw_csv_data(source_path, dest_path,\n config_yaml['measurements'],\n eval(config_yaml['all_data_sources']),\n eval(config_yaml['use_columns']) + eval(config_yaml['ORP_list']))\n\n # load_csv_files\n d_source_path_list = list()\n d_headers = list()\n for dm in eval(config_yaml['all_data_models']):\n for ds in eval(config_yaml['all_data_sources']):\n d_headers.append(dm + \"__\" + ds)\n d_source_path_list.append(dest_path + \"/\" + dm + \"__\" + ds + \".csv\")\n\n m_source_path_list = list()\n m_headers = list()\n for ds in eval(config_yaml['all_data_sources']):\n m_headers.append(config_yaml['measurements'] + \"__\" + ds)\n m_source_path_list.append(dest_path + \"/\" + config_yaml['measurements'] + \"__\" + ds + \".csv\")\n\n d_files, d_headers = load_csv_files(d_source_path_list, config_yaml, d_headers)\n m_files, m_headers = load_csv_files(m_source_path_list, config_yaml, m_headers)\n\n # data to classes\n threshold, val1, val2 = eval(config_yaml['threshold_value'])\n d_files = values_in_df_to_classes(d_files, eval(config_yaml['ORP_list']), threshold, val1, val2)\n m_files = values_in_df_to_classes(m_files, eval(config_yaml['ORP_list']), threshold, val1, val2)\n\n # merge_csv_dates\n d_files_out, m_files_out = merge_csv_files(d_files, m_files, config_yaml['time_tolerance'])\n\n # indices for train, test and validation\n train_i = get_proper_dates_indices(d_files_out[0]['timestamp'] / 1000, config_yaml['train']['seasons'])\n test_i = get_proper_dates_indices(d_files_out[0]['timestamp'] / 1000, config_yaml['test']['seasons'])\n val_i = get_proper_dates_indices(d_files_out[0]['timestamp'] / 1000, config_yaml['validate']['seasons'])\n\n train_d = [d_files_out[i].iloc[train_i].reset_index(drop=True) for i in range(len(d_files_out))]\n train_m = [m_files_out[i].iloc[train_i].reset_index(drop=True) for i in range(len(m_files_out))]\n test_d = [d_files_out[i].iloc[test_i].reset_index(drop=True) for i in range(len(d_files_out))]\n test_m = [m_files_out[i].iloc[test_i].reset_index(drop=True) for i in range(len(m_files_out))]\n val_d = [d_files_out[i].iloc[val_i].reset_index(drop=True) for i in range(len(d_files_out))]\n val_m = [m_files_out[i].iloc[val_i].reset_index(drop=True) for i in range(len(m_files_out))]\n headers_d = [[d_headers[i], ] * len(d_files_out[i].columns) for i in range(len(d_headers))]\n headers_m = [[m_headers[i], ] * len(m_files_out[i].columns) for i in range(len(m_headers))]\n headers = list()\n for i in range(len(headers_d)):\n headers = headers + headers_d[i]\n for i in range(len(headers_m)):\n headers = headers + headers_m[i]\n\n # save split data\n train = pd.concat(train_d + train_m, axis=1)\n train.columns = [headers, train.columns]\n train.to_csv(dest_path_train_file, index=False)\n\n test = pd.concat(test_d + test_m, axis=1)\n test.columns = [headers, test.columns]\n test.to_csv(dest_path_test_file, index=False)\n\n val = pd.concat(val_d + val_m, axis=1)\n val.columns = [headers, val.columns]\n val.to_csv(dest_path_validate_file, index=False)\n\n\ndef prepare_data_test(source_path, dest_path, dest_path_test_file, config_yaml):\n print_log(f\"{currentFuncName()}:\")\n # csv_data_to_one_file\n make_raw_csv_data(source_path, dest_path,\n eval(config_yaml['all_data_models']),\n eval(config_yaml['all_data_sources']),\n eval(config_yaml['use_columns']) + eval(config_yaml['ORP_list']),\n config_yaml['forecast_time'])\n make_raw_csv_data(source_path, dest_path,\n config_yaml['measurements'],\n eval(config_yaml['all_data_sources']),\n eval(config_yaml['use_columns']) + eval(config_yaml['ORP_list']))\n\n # load_csv_files\n d_source_path_list = list()\n d_headers = list()\n for dm in eval(config_yaml['all_data_models']):\n for ds in eval(config_yaml['all_data_sources']):\n d_headers.append(dm + \"__\" + ds)\n d_source_path_list.append(dest_path + \"/\" + dm + \"__\" + ds + \".csv\")\n\n m_source_path_list = list()\n m_headers = list()\n for ds in eval(config_yaml['all_data_sources']):\n m_headers.append(config_yaml['measurements'] + \"__\" + ds)\n m_source_path_list.append(dest_path + \"/\" + config_yaml['measurements'] + \"__\" + ds + \".csv\")\n\n d_files, d_headers = load_csv_files(d_source_path_list, config_yaml, d_headers)\n m_files, m_headers = load_csv_files(m_source_path_list, config_yaml, m_headers)\n\n # data to classes\n threshold, val1, val2 = eval(config_yaml['threshold_value'])\n d_files = values_in_df_to_classes(d_files, eval(config_yaml['ORP_list']), threshold, val1, val2)\n m_files = values_in_df_to_classes(m_files, eval(config_yaml['ORP_list']), threshold, val1, val2)\n\n # merge_csv_dates\n d_files_out, m_files_out = merge_csv_files(d_files, m_files, config_yaml['time_tolerance'])\n\n # indices for train, test and validation\n test_i = get_proper_dates_indices(d_files_out[0]['timestamp'] / 1000, config_yaml['test']['seasons'])\n\n test_d = [d_files_out[i].iloc[test_i].reset_index(drop=True) for i in range(len(d_files_out))]\n test_m = [m_files_out[i].iloc[test_i].reset_index(drop=True) for i in range(len(m_files_out))]\n headers_d = [[d_headers[i], ] * len(d_files_out[i].columns) for i in range(len(d_headers))]\n headers_m = [[m_headers[i], ] * len(m_files_out[i].columns) for i in range(len(m_headers))]\n headers = list()\n for i in range(len(headers_d)):\n headers = headers + headers_d[i]\n for i in range(len(headers_m)):\n headers = headers + headers_m[i]\n\n # save split data\n test = pd.concat(test_d + test_m, axis=1)\n test.columns = [headers, test.columns]\n test.to_csv(dest_path_test_file, index=False)\n\n\ndef prepare_data_predict(source_path, dest_path, dest_path_predict_file, config_yaml):\n print_log(f\"{currentFuncName()}:\")\n # csv_data_to_one_file\n make_raw_csv_data(source_path, dest_path,\n eval(config_yaml['all_data_models']),\n eval(config_yaml['all_data_sources']),\n eval(config_yaml['use_columns']) + eval(config_yaml['ORP_list']),\n config_yaml['forecast_time'])\n\n # load_csv_files\n d_source_path_list = list()\n d_headers = list()\n for dm in eval(config_yaml['all_data_models']):\n for ds in eval(config_yaml['all_data_sources']):\n d_headers.append(dm + \"__\" + ds)\n d_source_path_list.append(dest_path + \"/\" + dm + \"__\" + ds + \".csv\")\n \n print_log(f\"load_csv_files({d_source_path_list}, {config_yaml}, {d_headers})\")\n d_files, d_headers = load_csv_files(d_source_path_list, config_yaml, d_headers)\n\n # data to classes\n threshold, val1, val2 = eval(config_yaml['threshold_value'])\n d_files = values_in_df_to_classes(d_files, eval(config_yaml['ORP_list']), threshold, val1, val2)\n\n # merge_csv_dates\n d_files_out, m_files_out = merge_csv_files(d_files, [], config_yaml['time_tolerance'])\n\n # indices for train, test and validation\n predict_i = get_proper_dates_indices(d_files_out[0]['timestamp'] / 1000, config_yaml['predict']['seasons'])\n\n predict_d = [d_files_out[i].iloc[predict_i].reset_index(drop=True) for i in range(len(d_files_out))]\n headers_d = [[d_headers[i], ] * len(d_files_out[i].columns) for i in range(len(d_headers))]\n headers = list()\n\n for i in range(len(headers_d)):\n headers = headers + headers_d[i]\n\n # save split data\n predict = pd.concat(predict_d, axis=1)\n predict.columns = [headers, predict.columns]\n predict.to_csv(dest_path_predict_file, index=False)\n\n","repo_name":"MicroStep-MIS/UC-MicroStep-MIS-ai4eosc_thunder_nowcast_ml","sub_path":"ai4eosc_thunder_nowcast_ml/features/build_features.py","file_name":"build_features.py","file_ext":"py","file_size_in_byte":17713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13857939958","text":"#! /usr/bin/env python\n# encoding: utf-8\n# a1batross, mittorn, 2018\n\nfrom waflib import Logs\nimport os\nimport sys\n\ntop = '.'\n\ndef options(opt):\n return\n\ndef configure(conf):\n\n conf.env.CC = \"/opt/devkitpro/devkitPPC/bin/powerpc-eabi-gcc\"\n conf.env.CXX = \"/opt/devkitpro/devkitPPC/bin/powerpc-eabi-g++\"\n\n if conf.env.DEST_OS == 'win32':\n conf.load('winres')\n\ndef build(bld):\n source = ['game.cpp']\n includes = '. ../common ../public'\n libs = []\n \n if bld.env.DEST_OS == 'wii':\n source += ['../engine/platform/wii/dll_wii.c', '../public/crtlib.c']\n includes += ' ../engine/common ../engine/client ../engine'\n\n elif bld.env.DEST_OS != 'win32':\n libs += [ 'DL' ]\n else:\n libs += ['USER32', 'SHELL32']\n source += ['game.rc']\n\n bld(\n source = source,\n target = 'xash3d', # hl.exe\n features = 'c cxx cxxprogram',\n includes = includes,\n use = libs,\n install_path = bld.env.BINDIR,\n subsystem = bld.env.MSVC_SUBSYSTEM\n )\n","repo_name":"Division-Zero-GX/xash3d-wii","sub_path":"game_launch/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"54"} +{"seq_id":"33944226387","text":"\"\"\"Modify standard PyTorch distributions so they are compatible with this code.\n\nCode adapted from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom utils import AddBias\nfrom utils import init\n\n# Normal\nFixedNormal = torch.distributions.Normal\n\nlog_prob_normal = FixedNormal.log_prob\nFixedNormal.log_probs = lambda self, actions: log_prob_normal(\n self, actions).sum(-1, keepdim=True)\n\nentropy = FixedNormal.entropy\nFixedNormal.entropy = lambda self: entropy(self).sum(-1)\n\nFixedNormal.mode = lambda self: self.mean\n\n\nclass DiagGaussian(nn.Module):\n \"\"\"Implements a diagonal multivariate gaussian distribution.\"\"\"\n\n def __init__(self, num_inputs, num_outputs):\n super(DiagGaussian, self).__init__()\n\n init_ = lambda m: init(\n m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0))\n\n self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))\n self.logstd = AddBias(torch.zeros(num_outputs))\n\n def forward(self, x):\n action_mean = self.fc_mean(x)\n zeros = torch.zeros(action_mean.size())\n if x.is_cuda:\n zeros = zeros.cuda()\n\n action_logstd = self.logstd(zeros)\n return FixedNormal(action_mean, action_logstd.exp())\n","repo_name":"alexis-jacq/Learning_from_a_Learner","sub_path":"lfl/mujoco/distributions.py","file_name":"distributions.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16271265326","text":"import pickle\nimport argparse\nimport nltk\nimport pandas as pd\n\nfrom collections import Counter\nfrom pycocotools.coco import COCO\n\nclass word_holder(object):\n \n def __init__(self):\n self.word_to_idx = {}\n self.idx_to_word = {}\n self.idx = 0\n self.size = len(self.word_to_idx)\n \n def add_word(self, word):\n \n # if new word during vocab building, create new key\n if word not in self.word_to_idx:\n self.word_to_idx[self.idx] = self.idx\n self.idx_to_word[self.idx] = word\n self.idx += 1\n \n def __call__(self, word):\n \n # check if word is not part of learned vocab, don't throw error\n if word not in self.word_to_idx:\n return self.word_to_idx['']\n else:\n return self.word_to_idx[word]\n \ndef build_word_holder(json, rarity):\n \n # read in JSON\n coco = COCO(json)\n # slice captions\n captions = pd.DataFrame.from_dict(coco.anns)\n captions = list(captions.loc['caption'])\n \n # tokenize captions and word count\n vocab_freq = Counter()\n for caption in captions:\n tokens = nltk.tokenize.word_tokenize(caption.lower())\n vocab_freq.update(tokens)\n \n # select words that are not rare\n words = [word for word, freq in vocab_freq.items() if freq >= rarity]\n\n # build word_holder\n wh = word_holder()\n # pass in all words that met rarity threshold\n for word in words:\n wh.add_word(word)\n \n # add special tokens\n wh.add_word('')\n wh.add_word('')\n wh.add_word('')\n wh.add_word('')\n \n return wh\n\nif __name__ == '__main__':\n json_dir = './data/annotations/captions_val2014.json'\n rarity = 10\n wh = build_word_holder(json = json_dir, rarity = 10)\n \n # write binary\n with open('/home/b2jia/CODE4/PA4/vocabulary.pkl', \"wb\") as f:\n pickle.dump(wh, f) \n ","repo_name":"adamklie/CSE253_PA4","sub_path":"word_holder.py","file_name":"word_holder.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34968677976","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('dashboard/', views.dashboard, name=\"dashboard\"),\n path('product/', views.product, name=\"product\"),\n path('supplier/', views.supplier, name=\"supplier\"),\n path('customer/', views.customer, name=\"customer\"),\n path('sales/', views.sales, name=\"sales\"),\n path('purchase/', views.purchase, name=\"purchase\"),\n]\n","repo_name":"silverblazetech/POS","sub_path":"inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72311874400","text":"from .sub_resource import SubResource\n\n\nclass ApplicationGatewayBackendHttpSettings(SubResource):\n \"\"\"\n Backend address pool settings of application gateway\n\n :param id: Resource Id\n :type id: str\n :param port: Gets or sets the port\n :type port: int\n :param protocol: Gets or sets the protocol. Possible values include:\n 'Http', 'Https'\n :type protocol: str\n :param cookie_based_affinity: Gets or sets the cookie affinity. Possible\n values include: 'Enabled', 'Disabled'\n :type cookie_based_affinity: str\n :param request_timeout: Gets or sets request timeout\n :type request_timeout: int\n :param probe: Gets or sets probe resource of application gateway\n :type probe: :class:`SubResource `\n :param provisioning_state: Gets or sets Provisioning state of the backend\n http settings resource Updating/Deleting/Failed\n :type provisioning_state: str\n :param name: Gets name of the resource that is unique within a resource\n group. This name can be used to access the resource\n :type name: str\n :param etag: A unique read-only string that changes whenever the resource\n is updated\n :type etag: str\n \"\"\" \n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'port': {'key': 'properties.port', 'type': 'int'},\n 'protocol': {'key': 'properties.protocol', 'type': 'ApplicationGatewayProtocol'},\n 'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'ApplicationGatewayCookieBasedAffinity'},\n 'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'},\n 'probe': {'key': 'properties.probe', 'type': 'SubResource'},\n 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'etag': {'key': 'etag', 'type': 'str'},\n }\n\n def __init__(self, id=None, port=None, protocol=None, cookie_based_affinity=None, request_timeout=None, probe=None, provisioning_state=None, name=None, etag=None):\n super(ApplicationGatewayBackendHttpSettings, self).__init__(id=id)\n self.port = port\n self.protocol = protocol\n self.cookie_based_affinity = cookie_based_affinity\n self.request_timeout = request_timeout\n self.probe = probe\n self.provisioning_state = provisioning_state\n self.name = name\n self.etag = etag\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/Azure/azure-sdk-for-python/azure-mgmt-network/azure/mgmt/network/models/application_gateway_backend_http_settings.py","file_name":"application_gateway_backend_http_settings.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40972362547","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for, session\n)\n\nimport requests, werkzeug\nfrom flaskr.db import get_db\nfrom flaskr.classes import *\n\n# Parameters that are used several times in the code below\napi_path = 'https://api.themoviedb.org/3/'\nparams = {'api_key': 'df7d8e0359122d2f3e6348064a104074'}\n\n\ndef get_shows_from_search(query, kind='search_query', genres=None, show_id=None, page=1):\n \"\"\"\n This function handles the different API calls and returns the results.\n The different API calls have to be specified in the 'kind' parameter, possibilities are : 'search_query' (default),\n 'trending_day','trending_week','popular','top_rated','recommendation'\n All the results are stored in Show objects (cf classes.py).\n \"\"\"\n params['page'] = page\n\n if kind == 'search_query':\n params['query'] = query\n req = requests.get(api_path + 'search/tv', params)\n elif kind == 'trending_day':\n # Get the list of today's trending shows with an API call\n req = requests.get(api_path + 'trending/tv/day', params)\n elif kind == 'trending_week':\n # Get the list of today's trending shows with an API call\n req = requests.get(api_path + 'trending/tv/week', params)\n elif kind == 'popular':\n req = requests.get(api_path + 'tv/popular', params)\n elif kind == 'top_rated':\n req = requests.get(api_path + 'tv/top_rated', params)\n elif kind == 'recommendation' and show_id is not None:\n req = requests.get(api_path + 'tv/' + str(show_id) + '/recommendations', params)\n elif kind == 'discover' and genres is not None:\n params['with_genres'] = genres\n req = requests.get(api_path + 'discover/tv', params)\n else:\n print('Please enter a correct request type.')\n\n # Check the response status code and raise a custom exception if not 200\n if not req.ok:\n raise APIError(req.status_code)\n\n req_json = req.json()\n\n results = []\n if req_json[\"total_results\"] == 0:\n flash(\"No results were found for your search.\")\n\n for res in req_json[\"results\"]:\n results += [Show(res)]\n\n return results, req_json[\"total_pages\"]\n\n\ndef shows_to_session():\n\n if 'user_id' not in session:\n return None\n\n shows = []\n show_ids = get_db().execute(\n 'SELECT show_id'\n ' FROM shows_users '\n ' WHERE user_id = ?',\n (session['user_id'],)\n ).fetchall()\n\n for show in show_ids:\n shows += [show['show_id']]\n\n session['show_ids'] = shows\n return None\n\n\n\ndef make_multi_requests(show_ids):\n\n\n # lets make the new shows appear first\n temp = []\n for i in range(len(show_ids)):\n temp.append(show_ids[-(i+1)])\n show_ids = temp\n\n # lets launch all the API call threads\n APIrequest.initiate()\n APIrequest.show_ids = show_ids\n threads = []\n for i in show_ids:\n threads.append(APIrequest())\n\n for t in threads:\n t.start()\n\n for t in threads:\n t.join()\n\n results = [0] * len(show_ids)\n\n # we reorder the results\n for show_id in APIrequest.shows.keys():\n results[show_ids.index(show_id)] = APIrequest.shows[show_id]\n\n return results\n\n\ndef genre_str(genre):\n if len(genre) == 0:\n return None\n elif len(genre) == 1:\n return genre[0]\n else:\n genres = genre[0]\n for i in range(1, len(genre)):\n genres = genres + \", \"\n genres = genres + genre[i]\n return genres\n\n\n\n\n\n","repo_name":"AnnaBornert/FindYourShows","sub_path":"flaskr/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73084433123","text":"from utils.downloader import FileDownloader\nimport os \n\nclass ResourceDownloader:\n\n def __init__(self):\n self.downloader = FileDownloader()\n\n def download_resources(self, json_details, folder_path):\n image_urls = json_details['images']\n counter = 0\n\n for video_url in json_details['videos_url']:\n counter += 1\n video_extension = video_url.split('.')[-1]\n video_resource_name = 'video' + str(counter) + '.' + video_extension\n self.downloader.download(os.path.join(folder_path, video_resource_name), video_url)\n\n for image in image_urls.values():\n urls = image['image_urls']\n\n for url in urls:\n splitted_url = url.split('/')[-1].split('.')\n resource_name = splitted_url[0] + '.' + splitted_url[-1]\n self.downloader.download(os.path.join(folder_path, resource_name), url)\n \n def download_review_resources(self, json_details, folder_path):\n # downloader = FileDownloader()\n counter = 0\n\n for review_detail in json_details:\n\n if review_detail['rating'] == 5:\n\n for image in review_detail['image_urls']:\n counter += 1 \n extension = image.split('.')\n image_name = 'image' + str(counter).zfill(3) + '.' + extension[-1]\n self.downloader.download(os.path.join(folder_path, image_name), image)\n\n","repo_name":"Zselter07/AmazonScraper","sub_path":"utils/resource_downloader.py","file_name":"resource_downloader.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"40457141406","text":"import altair as alt\r\nimport pandas as pd\r\nimport streamlit as st \r\nimport matplotlib.pyplot as plt\r\nimport plotly.express as px\r\n# the foother for showing names Abdulaziz and Muhannad\r\nst.markdown(\r\n\"\"\"\r\n\r\n
\r\n Created by Abdulaziz and Muhannad\r\n
\r\n\"\"\",\r\nunsafe_allow_html=True,\r\n)\r\nalt.data_transformers.disable_max_rows()\r\nst.title('Data Analized')\r\n\r\n\r\nplayStore = pd.read_csv('googleplaystore.csv')\r\nplayStore['Price'] = playStore['Price'].str.replace('$', '')\r\nplayStore['Price'] = playStore['Price'].str.replace('Everyone', '0')\r\nplayStore['Reviews'] = playStore['Reviews'].str.replace('M', '')\r\nplayStore = playStore.astype({'Price':float,'Reviews':float})\r\nplayStore.drop(playStore[playStore['Rating'] == 19. ].index, inplace=True)\r\nplayStore.dropna(inplace=True)\r\n# converting the Last Updated column from Object to date \r\nplayStore[\"Last Updated\"] = pd.to_datetime(playStore[\"Last Updated\"], format=\"%B %d, %Y\")\r\n# getting the year only column \r\nplayStore[\"Year\"] = playStore[\"Last Updated\"].dt.year\r\n\r\n\r\n# sub header \r\nst.subheader('Pie plot of a sample data of 10 apps and their Rating')\r\nst.markdown('#### Column = Rating & App')\r\n# getting sample of data 10 rows for the following pie chart\r\nsampledata = playStore.head(10).copy()\r\nfig, ax = plt.subplots()\r\nexplode = (0, 0, 0.4, 0,0,0,0,0,0,0.4) \r\nax.pie(sampledata['Rating'], labels=sampledata['App'],explode=explode,\r\n shadow=True, startangle=90,autopct='%1.1f%%')\r\n# showing the chart\r\nst.pyplot(fig)\r\n\r\n# sub header \r\nst.subheader('Bubble Chart of a sample data of 30 apps ')\r\nst.markdown('#### Column = Review & Installs')\r\n# Bubble chart \r\nfig = px.scatter(\r\n playStore.head(30),\r\n x=\"Reviews\",\r\n y=\"Installs\",\r\n size=\"Reviews\",\r\n color=\"App\",\r\n hover_name=\"App\",\r\n log_x=True,\r\n size_max=60,\r\n)\r\n# showing the bubble chart\r\nfig\r\n\r\n# showing balloons \r\nst.balloons()\r\n","repo_name":"7eg/Assignmet-AM","sub_path":"pages/Page4.py","file_name":"Page4.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4459131070","text":"import annoy \nimport time\n\nclass MultiProbLSH:\n \"\"\"\n Multi-Probe LSH algorithm class.\n \"\"\"\n\n def __init__(self, index=None, n_trees=10, include_distances=True):\n \"\"\"\n Constructor for MultiProbLSH class.\n\n Args:\n index (annoy.AnnoyIndex): Pre-initialized Annoy index.\n n_trees (int): Number of trees to build the index with.\n include_distances (bool): Whether to include distance information in nearest neighbor queries.\n \"\"\"\n self.n_trees = n_trees\n self.include_distances = include_distances\n self.index = index\n\n def initialize(self, known_embeddings):\n \"\"\"\n Initializes the Multi-Probe LSH index with a set of known embeddings.\n\n Args:\n known_embeddings (list of np.array): List of known embeddings to use to build the index.\n \"\"\"\n index = annoy.AnnoyIndex(len(known_embeddings[0]), 'angular')\n\n # Index the known embeddings and labels\n for i, _ in enumerate(known_embeddings):\n index.add_item(i, known_embeddings[i])\n\n index.build(n_trees=self.n_trees)\n self.index = index\n\n def get_closest_neighbour(self, query_embedding, known_labels):\n \"\"\"\n Finds the closest neighbor of a given query embedding in the index.\n\n Args:\n query_embedding (np.array): Query embedding to find the closest neighbor for.\n known_labels (list): List of labels corresponding to the known embeddings.\n\n Returns:\n tuple: A tuple containing the label of the closest neighbor and the distance to that neighbor.\n \"\"\"\n closest_index, closest_distance = self.index.get_nns_by_vector(query_embedding, 1, search_k=-1,\n include_distances=self.include_distances)\n closest_label = known_labels[closest_index[0]]\n\n return closest_label, closest_distance[0]\n\n def test_performance(self, testX, testy, known_labels):\n \"\"\"\n Tests the performance of the Multi-Probe LSH index on a given test set.\n\n Args:\n testX (list of np.array): Test set embeddings.\n testy (list): List of true labels for the test set.\n known_labels (list): List of labels corresponding to the known embeddings.\n\n Returns:\n tuple: A tuple containing the accuracy, number of mislabeled samples, and the total query time.\n \"\"\"\n mis_labeled = 0\n start_time = time.time()\n\n for i, _ in enumerate(testX):\n closest_label, closest_distance = self.get_closest_neighbour(testX[i], known_labels)\n if testy[i] != closest_label:\n mis_labeled += 1\n\n end_time = time.time()\n total_time = end_time - start_time\n accuracy = 100 - (mis_labeled / len(known_labels))\n\n return accuracy, mis_labeled, total_time","repo_name":"BrianMburu/Multi-Probe-LSH","sub_path":"multi_probe_lsh.py","file_name":"multi_probe_lsh.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4749309441","text":"# coding: utf-8\n\nimport json\nimport re\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response, redirect\n\nfrom settings import GOOGLE_API_KEY, INSTITUICAO\nfrom app.models import *\n\nfrom haversine import haversine\n\ndef minify_html(view):\n def _view(request, *args, **kwargs):\n response = view(request, *args, **kwargs)\n html = response.content\n html = re.sub(r'\\n', '', html)\n html = re.sub(r' +', ' ', html)\n html = re.sub(r'> <', '><', html)\n response.content = html\n return response\n return _view\n\n\n@minify_html\ndef home(request):\n\n # Otimiza as coordenadas do polígono de cada local\n locais_otim = []\n for local in Local.objects.all():\n local.coordenadas = re.sub(r'(\\.\\d{6})(\\d*)', r'\\1', local.coordenadas).replace(' ', '')\n locais_otim.append(local)\n\n return render_to_response('app/index.html',{\n 'api_key': GOOGLE_API_KEY,\n 'instituicao': INSTITUICAO,\n 'coletores': Coletor.objects.all(),\n 'locais': locais_otim,\n 'tipos': TipoColetor.objects.all()\n })\n\n\ndef projeto(request):\n return redirect('http://www.barbacena.ifsudestemg.edu.br/conteudo/geocoleta')\n\n\ndef ajax_local(request, id_local):\n\n desc_local = Local.objects.get(id=id_local).descricao.replace('<', '')\n desc_local_processada = re.sub(r'\\[(.*?)]', '
\" target=\"_blank\">\\g<1>', desc_local).replace('\\n', '
')\n\n return HttpResponse(desc_local_processada)\n\n\ndef ajax_descartes(request):\n LIMITE = 100\n\n registros = Registro.objects.all()\n \n resp = [\n (r.latitude, r.longitude, r.data.strftime('%d/%m/%Y'), r.tipo_usado.id, r.tipo_usado.tipo, r.coletor.descricao)\n for r in Registro.objects.order_by('-data')[:LIMITE]\n ]\n \n return HttpResponse(json.dumps(resp).replace(', ', ','), mimetype='application/json') \n\n\ndef ajax_descarte(request, lat, lng, residuo, confirma):\n\n # Recebe de volta o id do coletor para registrar o descarte\n if int(confirma):\n coletor = Coletor.objects.get(id=int(confirma))\n id_residuo = int(residuo.split('_')[1])\n tipo = TipoColetor.objects.get(id=id_residuo)\n \n Registro(latitude=lat, longitude=lng, coletor=coletor, tipo_usado=tipo).save()\n \n return HttpResponse('', mimetype='application/json')\n\n\n # Busca o coletor mais próximo\n melhor_distancia = 0\n melhor_coletor = None\n \n id_residuo = int(residuo.split('_')[1])\n \n coletores = Coletor.objects.filter(tipo__id=id_residuo)\n \n for coletor in coletores:\n dist = haversine(float(lat), float(lng), float(coletor.latitude), float(coletor.longitude))\n \n if not melhor_distancia:\n melhor_distancia = dist\n melhor_coletor = coletor\n\n elif dist < melhor_distancia:\n melhor_distancia = dist\n melhor_coletor = coletor\n\n if not melhor_distancia or melhor_distancia > 200:\n return HttpResponse(json.dumps({'id_coletor': False, 'distancia': 0}), mimetype='application/json')\n\n return HttpResponse(json.dumps({'id_coletor': melhor_coletor.id, 'distancia': round(melhor_distancia, 2)}), mimetype='application/json')\n \n\ndef ajax_grafico(request, id_coletor):\n\n registros_coletor = [reg.tipo_usado.id for reg in Registro.objects.filter(coletor__id=id_coletor)]\n total_registros = len(registros_coletor)\n\n if not total_registros:\n return HttpResponse(json.dumps([]), mimetype='application/json')\n\n def porcentagem_residuo(id_res):\n return registros_coletor.count(id_res) * 100 / total_registros\n\n dados_grafico = [(t.tipo, porcentagem_residuo(t.id)) for t in TipoColetor.objects.all()]\n\n return HttpResponse(json.dumps(dados_grafico).replace(', ', ','), mimetype='application/json')\n\n\ndef not_found_404(request):\n return redirect('/')\n\n\ndef server_error_500(request):\n return redirect('/')","repo_name":"rafjaa/GeoColeta","sub_path":"geocoleta/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"pt","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"37096511017","text":"from os.path import isfile, isdir, join\nfrom os import listdir\nimport re\nimport json\n\ndef get_all_x_names_from_dir(path_to_dir, x_equals_file=None, pattern=\".*\", sort=True, withpath=False, numerical_sort=False):\n if x_equals_file != None:\n if x_equals_file:\n node_check = isfile\n else:\n node_check = isdir\n else:\n node_check = lambda x: True\n nodes = []\n node_name_list = listdir(path_to_dir)\n for node_name in node_name_list:\n path_node = join(path_to_dir, node_name)\n if node_check(path_node) and re.match(pattern, node_name):\n nodes.append(path_node if withpath else node_name)\n if sort:\n if numerical_sort:\n non_digit = re.compile(r'[^\\d]+')\n nodes.sort(key=lambda s: int(non_digit.sub('', s)))\n else:\n nodes.sort()\n return nodes\n\n\n# if ext is specified then pattern is ignored.\ndef get_all_file_names_from_dir(path_to_dir, ext=None, pattern=\".*\", sort=True, withpath=False, numerical_sort=False):\n if ext:\n pattern = \".*\\.%s$\" % ext\n return get_all_x_names_from_dir(path_to_dir, True, pattern, sort, withpath, numerical_sort)\n\ndef get_all_dir_names_from_dir(path_to_dir, pattern=\".*\", sort=True, withpath=False, numerical_sort=False):\n return get_all_x_names_from_dir(path_to_dir, False, pattern, sort, withpath, numerical_sort)\n\ndef save_dict_as_json(dict_, filename):\n with open(filename, 'w') as outfile:\n json.dump(dict_, outfile)\n\ndef load_dict_from_json(filename):\n with open(filename, 'r') as infile:\n dict_ = json.loads(infile.read())\n return dict_\n\ndef read_lines(path):\n with open(path, 'r') as f:\n return f.read().split('\\n')\n","repo_name":"cesarsalgado/cesarpy","sub_path":"io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15944072619","text":"#!/usr/bin/env python\n\nimport rospy\nimport time\nimport numpy as np\nfrom xycar_msgs.msg import xycar_motor\nfrom sensor_msgs.msg import LaserScan\n\nmotor_control = xycar_motor()\nlidar_ready = False\nlidar_msg = None\nspeed = 4\ndistance = 0.3\n \ndef lidar_callback(data):\n global lidar_msg, lidar_ready\n lidar_msg = data.ranges\n lidar_ready = True\n\nrospy.init_node('motor_driver')\npub = rospy.Publisher('xycar_motor', xycar_motor, queue_size=1)\nrospy.Subscriber(\"/scan\", LaserScan, lidar_callback)\n\n\ndef motor_pub(angle, speed): \n global pub\n global motor_control\n \n if check_obstacle() == True:\n speed = 0\n\n motor_control.angle = angle\n motor_control.speed = speed\n\n pub.publish(motor_control)\n\ndef check_obstacle():\n global lidar_msg\n if lidar_msg == None:\n return False\n \n obstacle = False\n \n mid_arr = np.array([])\n mid_arr = np.append(mid_arr, lidar_msg[491:504])\n mid_arr = np.append(mid_arr, lidar_msg[1:14])\n \n median_1 = np.nanpercentile(lidar_msg[99:126], 50)\n median_2 = np.nanpercentile(lidar_msg[71:98], 50)\n median_3 = np.nanpercentile(lidar_msg[43:70], 50)\n median_4 = np.nanpercentile(lidar_msg[15:42], 50)\n median_5 = np.nanpercentile(mid_arr, 50)\n median_6 = np.nanpercentile(lidar_msg[463:490], 50)\n median_7 = np.nanpercentile(lidar_msg[435:462], 50)\n median_8 = np.nanpercentile(lidar_msg[407:434], 50)\n median_9 = np.nanpercentile(lidar_msg[379:406], 50)\n \n min_value = [median_1, median_2, median_3, median_4, median_5, median_6, median_7, median_8, median_9]\n while 0.0 in min_value:\n min_value.remove(0.0)\n if 0 < min(min_value) < distance:\n return True\n else:\n return False\n\nwhile lidar_ready == False:\n continue\nprint(\"Lidar Ready ----------\")\n \nwhile not rospy.is_shutdown():\n angle = -50\n for i in range(30): \n motor_pub(angle, speed) \n time.sleep(0.1)\n\n angle = 0\n for i in range(50):\n motor_pub(angle, speed)\n time.sleep(0.1)\n\n angle = 50\n for i in range(30):\n motor_pub(angle, speed) \n time.sleep(0.1)\n\n angle = 0\n for i in range(50):\n motor_pub(angle, speed) \n time.sleep(0.1)\n \n","repo_name":"Paralies/KMU_University_Self_Driving","sub_path":"src/xycar_application/app_8_drive/src/app_8_drive.py","file_name":"app_8_drive.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13843478451","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport six\nimport datetime\nimport unittest\nimport simplejson as json\nfrom mock import patch\nfrom requests.models import Response\nfrom requests.sessions import Session\nfrom boxview import BoxView, BoxViewError\nfrom boxview.boxview import format_date, get_mimetype_from_headers\n\n\ntest_url = 'https://cloud.box.com/shared/static/4qhegqxubg8ox0uj5ys8.pdf'\n\ntest_document = {\n \"type\": \"document\",\n \"id\": \"2da6cf9261824fb0a4fe532f94d14625\",\n \"status\": \"done\",\n \"name\": \"Leaves of Grass\",\n \"created_at\": \"2013-08-30T00:17:37Z\",\n \"modified_at\": \"2013-08-30T00:17:37Z\"\n}\n\ntest_document_list = {\n \"document_collection\": {\n \"total_count\": 1,\n \"entries\": [test_document],\n }\n}\n\ntest_session = {\n \"type\": \"session\",\n \"id\": \"IyA4Ij8IzE_Wih20hML3ihxEOul1T4rxHLBtwa4IRg9m-FApz80OtEwst_RmnGq8SzJRsGaEU0UWSotJCW33KUeJ0Ah5uQ\",\n \"expires_at\": \"2013-09-11T19:52:09Z\"\n}\n\n\nclass BoxViewTestCase(unittest.TestCase):\n\n def setUp(self):\n self.api = BoxView('')\n\n def test_initials(self):\n # api key is required\n self.assertRaises(ValueError, BoxView)\n\n now = datetime.datetime.utcnow().replace(microsecond=0)\n dtfiso = now.isoformat()\n dfiso = now.date().isoformat()\n\n self.assertEqual(dtfiso, format_date(dtfiso))\n self.assertEqual(dtfiso, format_date(now))\n self.assertEqual(dfiso, format_date(now.date()))\n\n headers = {'Content-Type': 'text/plain'}\n self.assertEqual('text/plain', get_mimetype_from_headers(headers))\n headers = {'Content-Type': 'text/plain; charset=utf-8'}\n self.assertEqual('text/plain', get_mimetype_from_headers(headers))\n\n @patch.object(Session, 'request')\n def test_crate_document_from_url(self, mock_request):\n response = Response()\n response.status_code = 201\n response._content = json.dumps(test_document)\n mock_request.return_value = response\n\n result = self.api.create_document(url=test_url, name='Test Document')\n self.assertIsNotNone(result)\n self.assertEqual(result, test_document)\n\n # url of file param is required\n self.assertRaises(ValueError, self.api.create_document)\n\n @patch.object(Session, 'request')\n def test_create_document_from_url(self, mock_request):\n response = Response()\n response.status_code = 201\n response._content = json.dumps(test_document)\n mock_request.return_value = response\n\n result = self.api.create_document_from_url(test_url,\n name='Test Document')\n self.assertIsNotNone(result)\n self.assertEqual(result, test_document)\n\n @patch.object(Session, 'request')\n def test_create_document_from_file(self, mock_request):\n response = Response()\n response.status_code = 201\n response._content = json.dumps(test_document)\n mock_request.return_value = response\n\n stream = six.BytesIO()\n result = self.api.create_document_from_file(stream,\n name='Test Document')\n self.assertEqual(result, test_document)\n\n result = self.api.create_document_from_file(__file__,\n name='Test Document')\n self.assertEqual(result, test_document)\n\n @patch.object(Session, 'request')\n def test_get_document(self, mock_request):\n response = Response()\n response.status_code = 200\n response._content = json.dumps(test_document)\n mock_request.return_value = response\n\n result = self.api.get_document(test_document['id'])\n self.assertIsNotNone(result)\n self.assertEqual(result, test_document)\n\n @patch.object(Session, 'request')\n def test_get_documents(self, mock_request):\n response = Response()\n response.status_code = 200\n response._content = json.dumps(test_document_list)\n mock_request.return_value = response\n\n now = datetime.datetime.utcnow()\n result = self.api.get_documents(limit=10, created_before=now)\n self.assertIsNotNone(result)\n self.assertEqual(result, test_document_list)\n\n @patch.object(Session, 'request')\n def test_update_document(self, mock_request):\n response = Response()\n response.status_code = 200\n response._content = json.dumps(test_document)\n mock_request.return_value = response\n\n result = self.api.update_document(test_document['id'],\n name='TestDocument')\n self.assertIsNotNone(result)\n self.assertEqual(result, test_document)\n\n @patch.object(Session, 'request')\n def test_delete_document(self, mock_request):\n response = Response()\n response.status_code = 204\n mock_request.return_value = response\n\n self.api.delete_document(test_document['id'])\n\n @patch.object(Session, 'request')\n def test_get_document_content(self, mock_request):\n response = Response()\n response.status_code = 200\n response.headers['Content-Type'] = 'text/plain'\n response._content = 'test'\n response.raw = six.StringIO('test')\n mock_request.return_value = response\n\n stream = six.StringIO()\n mimetype = self.api.get_document_content(stream, test_document['id'])\n self.assertEqual(stream.getvalue(), response._content)\n self.assertEqual(mimetype, response.headers['Content-Type'])\n\n stream = six.StringIO()\n self.api.get_document_content(stream,\n test_document['id'],\n extension='.pdf')\n self.assertEqual(stream.getvalue(), response._content)\n\n stream = six.StringIO()\n self.api.get_document_content(stream,\n test_document['id'],\n extension='.zip')\n self.assertEqual(stream.getvalue(), response._content)\n\n stream = six.StringIO()\n # allowed only .zip and .pdf extensions\n self.assertRaises(ValueError,\n self.api.get_document_content,\n stream,\n test_document['id'],\n extension='.docx')\n\n @patch.object(Session, 'request')\n def test_get_document_content_to_string(self, mock_request):\n response = Response()\n response.status_code = 200\n response.headers['Content-Type'] = 'text/plain'\n response._content = 'test'\n response.raw = six.StringIO('test')\n mock_request.return_value = response\n\n doc_id = test_document['id']\n result, mimetype = self.api.get_document_content_to_string(doc_id)\n self.assertIsNotNone(result)\n self.assertEqual(result, response._content)\n self.assertEqual(mimetype, response.headers['Content-Type'])\n\n @patch.object(Session, 'request')\n def test_get_document_content_to_file(self, mock_request):\n response = Response()\n response.status_code = 200\n response.headers['Content-Type'] = 'text/plain'\n response._content = 'test'\n response.raw = six.StringIO('test')\n mock_request.return_value = response\n\n filename = 'boxview.txt'\n mimetype = self.api.get_document_content_to_file(filename,\n test_document['id'])\n self.assertEqual(mimetype, response.headers['Content-Type'])\n self.assertTrue(os.path.exists(filename))\n try:\n os.remove(filename)\n except OSError:\n pass\n\n @patch.object(Session, 'request')\n def test_get_document_content_mimetype(self, mock_request):\n response = Response()\n response.status_code = 200\n response.headers['Content-Type'] = 'text/plain'\n mock_request.return_value = response\n\n mimetype = self.api.get_document_content_mimetype(test_document['id'])\n self.assertEqual(mimetype, response.headers['Content-Type'])\n\n @patch.object(Session, 'request')\n def test_create_session(self, mock_request):\n response = Response()\n response.status_code = 201\n response._content = json.dumps(test_session)\n mock_request.return_value = response\n\n expires_at = datetime.datetime.utcnow()\n result = self.api.create_session(test_document['id'],\n duration=600,\n expires_at=expires_at)\n self.assertIsNotNone(result)\n self.assertEqual(result['id'], test_session['id'])\n\n @patch.object(Session, 'request')\n def test_ready_to_view(self, mock_request):\n response = Response()\n response.status_code = 200\n response._content = json.dumps(test_document)\n mock_request.return_value = response\n\n result = self.api.ready_to_view(test_document['id'])\n self.assertIsNotNone(result)\n self.assertTrue(bool(result))\n\n response._content = json.dumps(dict(test_document, status='error'))\n\n result = self.api.ready_to_view(test_document['id'])\n self.assertFalse(bool(result))\n\n @patch.object(Session, 'request')\n def test_request_error(self, mock_request):\n response = Response()\n response.status_code = 401\n response._content = 'Unauthorized'\n response.reason = 'Unauthorized'\n mock_request.return_value = response\n\n self.assertRaises(BoxViewError,\n self.api.get_document,\n test_document['id'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"CoachLogix/python-boxview","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"39568524115","text":"\"\"\"\nimport random\nconsonants = \"bcdfghjklmnprstvwz\"\nvowels = \"aeiouaeiouaeiouaei\"\n\nconsonant_letter = random.choice(consonants)\nconsonant_letter1 = random.choice(consonants)\nconsonant_letter2 = random.choice(consonants)\n\n#vowel_letter = random.choice(vowels)\nvowel_letter = vowels[random.randrange(0, len(vowels))]\nvowel_letter1 = random.choice(vowels)\nvowel_letter2 = random.choice(vowels)\n\n#output = consonant_letter + vowel_letter + consonant_letter1 + vowel_letter1 + consonant_letter2 + vowel_letter2\noutput = vowel_letter + consonant_letter + vowel_letter1 + consonant_letter1 + vowel_letter2 + consonant_letter2\n\nfinal_output = output.center(15)\n\nnum_spaces = 4\nnumber_of_stars = 4 * num_spaces\nstars = \"*\" * number_of_stars\ninitial_spaces = \" \" * num_spaces\n\nprint(stars)\nprint(stars)\nprint(final_output, sep = \"\")\nprint(stars)\nprint(stars)\n\"\"\"\n\nimport math\n\n#area = pi * r1 * r2\n#perimeter of ellipse = 2 * pi * sqrt((r1^2 + r2^2) / 2)\n\nfencing_per_metre = 75\ngrass_per_square_metre = 20\nmajor_radius = 10 \nminor_radius = 5\n\nnum_spaces = 25\nnumber_of_stars = 2 * num_spaces\nstars = \"*\" * number_of_stars\ninitial_spaces = \" \" * num_spaces\n\narea = (math.pi * (minor_radius * major_radius)) * 2 #area\nperimeter = 2 * math.pi * math.sqrt(((math.pow(major_radius,2) + (math.pow(minor_radius,2))) / 2))\n\ncost_grass = area * grass_per_square_metre\n\ncost_fencing = perimeter * fencing_per_metre\n\ntotal_cost = cost_grass + cost_fencing\n\nprint(stars)\nprint(\"Cost of laying grass (\", round(area), \" square metres): $\", cost_grass, sep= \"\")\nprint(\"Cost of fencing (50 metres): $\", cost_fencing, sep= \"\")\nprint(\"Total cost: $\", total_cost, sep= \"\")\nprint(stars)\n","repo_name":"trishalapiz/Python-practice","sub_path":"pat's a1.py","file_name":"pat's a1.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69859178722","text":"# -*- coding: UTF-8 -*-\nimport sys\nsys.path.append(\"./utils/\")\nsys.path.append(\"./models/\")\nimport torch\nimport argparse\nimport numpy as np\nimport random\nimport math\nimport os\nimport nnInit\nimport nnLoss\nimport nnPlot\n\nfrom torch.utils.data import DataLoader\nfrom scipy import stats\nfrom data import DataUtil\nfrom LinearModel import BasicLinear, BasicLinear_dropout, BiLinear, TwoLayerLinear\nfrom MaskedModel import MaskedModel1, MaskedModel2, MaskedModel3\nfrom FullHiddenModel import *\nfrom RankModel import *\nfrom Params import Params\nfrom hyperopt import fmin, tpe, hp\nfrom valuation import valTauLike\nfrom transform import daToRr\nfrom torch.optim import lr_scheduler\n\nparser = argparse.ArgumentParser()\n# data for training\nparser.add_argument('-tgt_s1', required = True, help = 'target sentence file if necessary')\nparser.add_argument('-tgt_s2', required = True, help = 'target sentence of system two')\nparser.add_argument('-tgt_ref', required = True, help = 'reference target sentence')\nparser.add_argument('-scores', required= True, help = 'the target')\n# output file\nparser.add_argument('-output', default = '/tmp/decState_params', help = 'path to save the output')\n# others\nparser.add_argument('-seq_len', type = int, default = 40, help = 'set the max length of the sequence')\nparser.add_argument('-batch_size', type = int, default = 100, help = 'batch size')\nparser.add_argument('-combine_data', default = False, help = 'combine the data before input to the model')\nparser.add_argument('-resume', default = False, help = 'set true, to load a existed model and continue training')\nparser.add_argument('-checkpoint', help = 'only work when resume setted true, point to the address of the model')\n# only for model: ELMo_modified\nparser.add_argument('-cand', nargs = '+', type = int, help = 'list of int, store the code of the features that will be used in the model')\n\n\ndef main():\n opt = parser.parse_args()\n # set models and loss\n if opt.resume:\n model = torch.load(opt.checkpoint)\n #model = ELMo()\n model = ELMo_modified(opt.cand)\n #model = ELMo_simplified()\n #model = Conv2dMlpModel_rank()\n #model = Regress() # for L1Loss\n loss = torch.nn.NLLLoss()\n #loss = torch.nn.L1Loss() # for L1Loss\n # set optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)\n # set lr scheduler\n lamb1 = lambda x: .1**(x//30)\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda = lamb1)\n # read data\n train = Data(opt.tgt_s1, opt.tgt_s2, opt.tgt_ref, opt.scores)\n dl_train = DataLoader(train, batch_size = opt.batch_size, shuffle = True)\n # train the model \n num_epochs = 30\n for epoch in range(num_epochs):\n scheduler.step()\n model.train()\n train_loss = 0\n train_taul = 0\n counter = 0\n for batch_idx, dat in enumerate(dl_train):\n counter += 1\n tgt_s1 = torch.autograd.Variable(dat[0], requires_grad = False)\n tgt_s2 = torch.autograd.Variable(dat[1], requires_grad = False)\n tgt_ref = torch.autograd.Variable(dat[2], requires_grad = False)\n scores = torch.autograd.Variable(dat[3], requires_grad = False)\n #scores = scores.float() # for L1Loss\n if opt.combine_data:\n inp = torch.cat([tgt_s1, tgt_s2, tgt_ref], 1)\n optimizer.zero_grad()\n if opt.combine_data:\n out = model(inp)\n out = model(tgt_s1, tgt_s2, tgt_ref)\n lo = loss(out, scores)\n lo.backward()\n train_loss += lo.data[0]\n optimizer.step()\n taul = evaluate_tau_like(out, scores)\n train_taul += taul\n if batch_idx % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tTaul: {:.6f}'.format(\n epoch,\n batch_idx * opt.batch_size,\n len(train), 100.*opt.batch_size*batch_idx/len(train),\n lo.data[0],\n taul\n ))\n print('====> Epoch: {} Average loss: {:.4f}\\tAverage Taul: {:.4f}'.format(\n epoch,\n train_loss/counter,\n train_taul/counter,\n ))\n torch.save(model, opt.output)\n\n\n\ndef result_transform_sf_to_score(x):\n a, b, c = x[0], x[1], x[2]\n if a > b and a > c:\n return -1\n elif b > a and b > c:\n return 0\n elif c > a and c > b:\n return 1\n else:\n # ???\n return 0\n\ndef evaluate_tau_like(arr1, arr2):\n \"\"\"\n arr1 comes from the model\n arr2 comes from the target file\n \"\"\"\n a1 = arr1.cpu()\n a2 = arr2.cpu()\n a1 = a1.data.numpy()\n a2 = a2.data.numpy()\n a1 = list(map(result_transform_sf_to_score, a1))\n #a1 = a1 - 1 # for L1Loss\n a2 = a2 - 1\n taul = valTauLike(a2, a1) # a2 should go first\n return taul\n\nclass Data:\n def __init__(self, tgt_s1, tgt_s2, tgt_ref, scores):\n \"\"\" \n 这个方法被我用崩了,原来的作用一点都没有体现出来,\n it may be beter to use array list??? \n \"\"\"\n #super(Data, self).__init__()\n self.data = {}\n self.data['tgt_s1'] = self.add_file(tgt_s1)\n self.data['tgt_s2'] = self.add_file(tgt_s2)\n self.data['tgt_ref'] = self.add_file(tgt_ref)\n self.data['scores'] = self.add_scores(scores)\n assert(len(self.data['scores']) == len(self.data['tgt_s1']) and \n len(self.data['scores']) == len(self.data['tgt_s2']) and\n len(self.data['scores']) == len(self.data['tgt_ref']))\n self.len = len(self.data['scores'])\n \n def add_file(self, path):\n \"\"\"\n return Variable of torch.FloatTensor\n \"\"\"\n return torch.from_numpy(np.load(path))\n \n def add_scores(self, path):\n # for softmax output, so we add one for each score\n return torch.LongTensor([int(li.rstrip('\\n')) + 1 for li in open(path)])\n \n def get_data(self):\n return self.data\n \n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n return (self.data['tgt_s1'][index], \n self.data['tgt_s2'][index], \n self.data['tgt_ref'][index],\n self.data['scores'][index])\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"HuangYiran/MasterArbeit","sub_path":"experiments/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19940078254","text":"import datetime\nimport json\nimport os\n\nfrom flask.cli import FlaskGroup\n\nfrom akamatsu import db, crypto_manager, init_app\nfrom akamatsu.models import FileUpload, Page, Post, Role, User\n\nimport click\n\n\ndef init_wrapper(info):\n \"\"\"Wrapper for the application initialization function.\"\"\"\n return init_app()\n\n\n@click.group(cls=FlaskGroup, create_app=init_wrapper)\ndef cli():\n \"\"\"Management script.\"\"\"\n pass\n\n\n# Begin user commands\n@cli.group()\ndef user():\n \"\"\"User related commands.\"\"\"\n pass\n\n\n@user.command()\n@click.argument('username')\n@click.argument('role')\ndef addrole(username, role):\n \"\"\"Add a role to a given user.\n\n \\b\n Args:\n username: the username to add the role to\n role: role name\n \"\"\"\n user = User.get_by_username(username)\n\n if not user:\n click.echo('User does not exist')\n return\n\n if role in user.role_names:\n click.echo('User already has that role')\n return\n\n\n role_exists = Role.query.filter_by(name=role).scalar()\n\n if not role_exists:\n click.echo('Role does not exist')\n return\n\n # Update roles\n roles = [r for r in user.role_names]\n roles.append(role)\n\n user.role_names = set(roles)\n\n try:\n correct = True\n db.session.commit()\n\n click.echo('Roles updated')\n\n except Exception as e:\n # Catch anything unknown\n correct = False\n\n click.echo('Error updating roles')\n click.echo(e)\n\n\n finally:\n if not correct:\n # Cleanup\n db.session.rollback()\n\n\n@user.command()\n@click.option('--username', help='username (must be unique)', prompt=True)\n@click.option('--email', help='email (must be unique)', prompt=True)\n@click.option('--password', help='password', prompt=True, hide_input=True)\ndef create(username, email, password):\n \"\"\"Add a new user to the database.\"\"\"\n hashed_password = crypto_manager.hash(password)\n\n new_user = User(\n username=username,\n email=email,\n password=hashed_password,\n is_active=True,\n notify_login=False\n )\n\n try:\n correct = True\n db.session.add(new_user)\n db.session.commit()\n\n click.echo('New user created')\n\n except Exception as e:\n # Catch anything unknown\n correct = False\n\n click.echo(\n 'Error creating user, make sure username and email are unique'\n )\n click.echo(e)\n\n finally:\n if not correct:\n # Cleanup\n db.session.rollback()\n\n\n@user.command()\n@click.argument('username')\ndef deactivate(username):\n \"\"\"Deactivate a user account.\n\n \\b\n Args:\n username: the username to disable\n \"\"\"\n user = User.get_by_username(username)\n\n if not user:\n click.echo('User does not exist')\n return\n\n if not user.is_active:\n click.echo('User is already deactivated')\n return\n\n user.is_active = False\n\n try:\n correct = True\n db.session.commit()\n\n click.echo('User deactivated')\n\n except Exception as e:\n # Catch anything unknown\n correct = False\n\n click.echo('Error deactivating user')\n click.echo(e)\n\n finally:\n if not correct:\n # Cleanup\n db.session.rollback()\n\n\n@user.command()\n@click.argument('username')\ndef activate(username):\n \"\"\"Activate a user account.\n\n \\b\n Args:\n username: the username to enable\n \"\"\"\n user = User.get_by_username(username)\n\n if not user:\n click.echo('User does not exist')\n return\n\n if user.is_active:\n click.echo('User is already active')\n return\n\n user.is_active = True\n\n try:\n correct = True\n db.session.commit()\n\n click.echo('User activated')\n\n except Exception as e:\n # Catch anything unknown\n correct = False\n\n click.echo('Error activating user')\n click.echo(e)\n\n finally:\n if not correct:\n # Cleanup\n db.session.rollback()\n\n\n@user.command()\n@click.argument('username')\n@click.option('--password', help='password', prompt=True, hide_input=True)\ndef password(username, password):\n \"\"\"Change the password of a user.\n\n \\b\n Args:\n username: user to change password for\n \"\"\"\n user = User.get_by_username(username)\n\n if not user:\n click.echo('User does not exist')\n return\n\n user.password = crypto_manager.hash(password)\n\n try:\n correct = True\n db.session.commit()\n\n click.echo('Password changed')\n\n except Exception as e:\n # Catch anything unknown\n correct = False\n\n click.echo('Failed to change password')\n click.echo(e)\n\n finally:\n if not correct:\n # Cleanup\n db.session.rollback()\n\n\n@user.command()\n@click.argument('username')\ndef roles(username):\n \"\"\"Show roles of a given user.\n\n \\b\n Args:\n username: the username to list roles for\n \"\"\"\n user = User.get_by_username(username)\n\n if not user:\n click.echo('User does not exist')\n return\n\n roles = ', '.join(user.role_names) or 'No roles'\n\n click.echo('Roles of user \"{}}\": {}'.format(username, roles))\n\n\n# Begin translation commands\n@cli.group()\ndef translate():\n \"\"\"Translation and localization commands.\"\"\"\n pass\n\n\n@translate.command()\ndef compile():\n \"\"\"Compile all languages.\"\"\"\n compile_cmd = (\n 'pybabel compile '\n '-d akamatsu/translations'\n )\n\n if os.system(compile_cmd):\n raise RuntimeError('compile command failed')\n\n\n@translate.command()\n@click.argument('lang')\ndef init(lang):\n \"\"\"Initialize a new language.\"\"\"\n extract_cmd = (\n 'pybabel extract '\n '-F babel.cfg '\n '-k \"lazy_gettext _l\" '\n '-o akamatsu/translations/messages.pot .'\n )\n\n init_cmd = (\n 'pybabel init '\n '-i akamatsu/translations/messages.pot '\n '-d akamatsu/translations '\n '-l {}'\n )\n\n if os.system(extract_cmd):\n raise RuntimeError('extract command failed')\n\n if os.system(init_cmd.format(lang)):\n raise RuntimeError('init command failed')\n\n\n@translate.command()\ndef update():\n \"\"\"Update message catalog.\"\"\"\n extract_cmd = (\n 'pybabel extract '\n '-F babel.cfg '\n '-k \"lazy_gettext _l\" '\n '-o akamatsu/translations/messages.pot .'\n )\n\n update_cmd = (\n 'pybabel update '\n '-i akamatsu/translations/messages.pot '\n '-d akamatsu/translations'\n )\n\n\n if os.system(extract_cmd):\n raise RuntimeError('extract command failed')\n\n if os.system(update_cmd):\n raise RuntimeError('update command failed')\n\n\nif __name__ == '__main__':\n cli()\n\n\n# Begin data commands\n@cli.group()\ndef data():\n \"\"\"Data related commands.\"\"\"\n pass\n\n\n\n@data.command(name='import')\n@click.argument('source', type=click.Path(exists=True))\ndef import_data(source):\n \"\"\"Import a backup file into the database.\n\n This does not check the input nor overwrite any existing data,\n but may cause conflicts.\n\n \\b\n Args:\n source: backup file (JSON)\n \"\"\"\n if not click.confirm('Do you want to import data from {}?'.format(source)):\n click.echo('Operation cancelled')\n return\n\n relations = {\n 'pages': {},\n 'posts': {}\n }\n\n with open(source, 'r', encoding='utf-8') as f:\n for line in [l.strip() for l in f if l.strip()]:\n struct = json.loads(line)\n entity = struct.get('entity')\n data = struct.get('data')\n\n if not data or not isinstance(data, dict):\n click.echo('[ERROR] Malformed data: {}'.format(line))\n continue\n\n if entity == 'user':\n new_user = User(\n username=data['username'],\n password=data['password'],\n reset_password_token=data['reset_password_token'],\n email=data['email'],\n is_active=data['is_active'],\n first_name=data['first_name'],\n last_name=data['last_name'],\n personal_bio=data['personal_bio'],\n notify_login=data['notify_login'],\n )\n\n new_user.role_names = set(data['roles'])\n\n db.session.add(new_user)\n\n elif entity == 'page':\n new_page = Page(\n title=data['title'],\n mini=data['mini'],\n route=data['route'],\n custom_head=data['custom_head'],\n content=data['content'],\n is_published=data['is_published'],\n comments_enabled=data['comments_enabled'],\n last_updated=datetime.datetime.strptime(\n data['last_updated'],\n '%Y-%m-%d %H:%M:%S'\n )\n )\n\n if data['ghosted']:\n relations['pages'][data['route']] = data['ghosted']\n\n db.session.add(new_page)\n\n elif entity == 'post':\n new_post = Post(\n title=data['title'],\n slug=data['slug'],\n content=data['content'],\n is_published=data['is_published'],\n comments_enabled=data['comments_enabled'],\n last_updated=datetime.datetime.strptime(\n data['last_updated'],\n '%Y-%m-%d %H:%M:%S'\n ),\n tag_names=data['tags']\n )\n\n if data['ghosted']:\n if data['slug'] not in relations['posts']:\n relations['posts'][data['slug']] = {\n 'ghost': None,\n 'authors': None\n }\n\n relations['posts'][data['slug']]['ghost'] = data['ghosted']\n\n if data['authors']:\n if data['slug'] not in relations['posts']:\n relations['posts'][data['slug']] = {\n 'ghost': None,\n 'authors': None\n }\n\n relations['posts'][data['slug']]['authors'] = data['authors']\n\n db.session.add(new_post)\n\n elif entity == 'upload':\n new_upload = FileUpload(\n path=data['path'],\n description=data['description'],\n mime=data['mime'],\n uploaded_at=datetime.datetime.strptime(\n data['uploaded_at'],\n '%Y-%m-%d %H:%M:%S'\n )\n )\n\n db.session.add(new_upload)\n\n else:\n click.echo('[ERROR] Invalid entity: {}'.format(line))\n continue\n\n try:\n correct = True\n db.session.commit()\n\n except Exception as e:\n correct = False\n\n click.echo('Error importing data')\n click.echo(e)\n\n return\n\n finally:\n if not correct:\n db.session.rollback()\n\n\n # Handle relations\n for route in relations['pages']:\n ghost_page = Page.query.filter_by(route=route).first()\n\n if not ghost_page:\n continue\n\n to_ghost = Page.query.filter_by(route=relations['pages'][route]).first()\n\n if not to_ghost:\n continue\n\n ghost_page.ghosted_id = to_ghost.id\n\n\n for slug in relations['posts']:\n post = Post.query.filter_by(slug=slug).first()\n\n if not post:\n continue\n\n if relations['posts'][slug]['ghost']:\n to_ghost = Post.query.filter_by(\n slug=relations['posts'][slug]['ghost']\n ).first()\n\n if to_ghost:\n post.ghosted_id = to_ghost.id\n\n if relations['posts'][slug]['authors']:\n authors = User.query.filter(\n User.username.in_(relations['posts'][slug]['authors'])\n )\n\n post.authors = [a for a in authors]\n\n try:\n correct = True\n db.session.commit()\n\n except Exception as e:\n correct = False\n\n click.echo('Error processing relations')\n click.echo(e)\n\n return\n\n finally:\n if not correct:\n db.session.rollback()\n\n click.echo('Finished data import!')\n\n\n@data.command(name='export')\n@click.argument('output', type=click.Path())\ndef export_data(output):\n \"\"\"Export database data to a backup file.\n\n This file is a newline-delimited JSON file, where each line is a data\n entity.\n\n \\b\n Args:\n output: backup file (JSON)\n \"\"\"\n if not click.confirm('Do you want to export data to {}?'.format(output)):\n click.echo('Operation cancelled')\n return\n\n out = open(output, 'w', encoding='utf-8')\n\n # Backup users\n for u in User.query:\n user = {\n 'username': u.username,\n 'password': u.password,\n 'reset_password_token': u.reset_password_token,\n 'email': u.email,\n 'is_active': u.is_active,\n 'first_name': u.first_name,\n 'last_name': u.last_name,\n 'personal_bio': u.personal_bio,\n 'notify_login': u.notify_login,\n 'roles': [r for r in u.role_names]\n }\n\n out.write(json.dumps({'entity': 'user', 'data': user})+'\\n')\n\n # Backup pages\n for p in Page.query:\n page = {\n 'title': p.title,\n 'mini': p.mini,\n 'route': p.route,\n 'custom_head': p.custom_head,\n 'content': p.content,\n 'is_published': p.is_published,\n 'comments_enabled': p.comments_enabled,\n 'ghosted': None, # Should be route\n 'last_updated': p.last_updated.strftime('%Y-%m-%d %H:%M:%S'),\n }\n\n if p.ghosted_id:\n page['ghosted'] = p.ghosted.route\n\n out.write(json.dumps({'entity': 'page', 'data': page})+'\\n')\n\n # Backup posts\n for p in Post.query:\n post = {\n 'title': p.title,\n 'slug': p.slug,\n 'content': p.content,\n 'is_published': p.is_published,\n 'comments_enabled': p.comments_enabled,\n 'last_updated': p.last_updated.strftime('%Y-%m-%d %H:%M:%S'),\n 'authors': [a.username for a in p.authors],\n 'ghosted': None, # Should be slug\n 'tags': [t for t in p.tag_names]\n }\n\n if p.ghosted_id:\n post['ghosted'] = p.ghosted.slug\n\n\n out.write(json.dumps({'entity': 'post', 'data': post})+'\\n')\n\n # Backup uploads\n for f in FileUpload.query:\n upload = {\n 'path': f.path,\n 'description': f.description,\n 'mime': f.mime,\n 'uploaded_at': f.uploaded_at.strftime('%Y-%m-%d %H:%M:%S')\n }\n\n out.write(json.dumps({'entity': 'upload', 'data': upload})+'\\n')\n\n out.close()\n","repo_name":"rmed/akamatsu","sub_path":"akamatsu/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":15124,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"69944011362","text":"from vrp.nodes.node import Node\r\nfrom vrp.edges.edge import Edge \r\nfrom vrp.routes.route import Route\r\nfrom vrp.solutions.solution import Solution\r\n\r\nimport math, random\r\nimport operator\r\n\r\n\r\nclass CWS :\r\n\r\n def __init__(self, filename, vehicle_capacity = 100) -> None:\r\n self.vehicle_capacity = vehicle_capacity\r\n self.depot, self.destinations = self.read_nodes(filename)\r\n self.create_destination_edges()\r\n self.solution = Solution(self.destinations)\r\n self.savings = []\r\n self.init_savings()\r\n \r\n def read_nodes(self, filename: str):\r\n with open(filename) as instance:\r\n i = 0\r\n nodes = []\r\n for line in instance:\r\n # array data with node data: x, y demand\r\n data = [float(x) for x in line.split()]\r\n aNode = Node(i, data[0], data[1], data[2])\r\n nodes.append(aNode)\r\n i += 1\r\n depot = nodes[0] # node 0 is depot\r\n destinations = nodes[1:]\r\n return depot, destinations\r\n \r\n def init_savings(self):\r\n for i in range(len(self.destinations)):\r\n destination_a = self.destinations[i]\r\n for j in range(i+1, len(self.destinations)):\r\n destination_b = self.destinations[j]\r\n ijEdge = Edge(destination_a, destination_b) # creates the (i, j) edge\r\n jiEdge = Edge(destination_b, destination_a)\r\n ijEdge.inverse_edge = jiEdge # sets the inverse edge\r\n jiEdge.inverse_edge = ijEdge\r\n # compute the Euclidean distance as cost\r\n ijEdge.cost = math.sqrt((destination_b.x - destination_a.x)**2 + (destination_b.y - destination_a.y)**2)\r\n jiEdge.cost = ijEdge.cost # assume symmectir costs\r\n # compute savins as proposed by Clark & Wright\r\n ijEdge.savings = destination_a.to_depot_edge.cost + destination_b.from_depot_edge.cost - ijEdge.cost\r\n jiEdge.savings = ijEdge.savings\r\n # save one edge in the savings list\r\n self.savings.append(ijEdge)\r\n self.savings.sort(key = operator.attrgetter(\"savings\"), reverse = True)\r\n\r\n def savings_from_geometric(self, beta = 0.3):\r\n savings_copy = self.savings.copy()\r\n end_list = []\r\n while(len(savings_copy) > 0):\r\n index = int(math.log(random.random()) / math.log(1 - beta))\r\n index = index % len(savings_copy)\r\n end_list.append(savings_copy[index])\r\n savings_copy.pop(index)\r\n self.savings = end_list\r\n\r\n def create_destination_edges(self):\r\n for node in self.destinations:\r\n dnEdge = Edge(self.depot, node) # creates (depot, node) edge\r\n ndEdge = Edge(node, self.depot)\r\n dnEdge.inverse_edge = ndEdge # sets the inverse edge\r\n ndEdge.inverse_edge = dnEdge\r\n # compute the Euclidean Distance as cost\r\n dnEdge.cost = math.sqrt((node.x - self.depot.x)**2 + (node.y - self.depot.y)**2)\r\n ndEdge.cost = dnEdge.cost # assume symetric costs\r\n # save in node a reference to the (depot, node) edge\r\n node.from_depot_edge = dnEdge\r\n node.to_depot_edge = ndEdge\r\n\r\n def are_routes_mergeable(self, node_a : Node, node_b : Node, route_a : Route, route_b : Route):\r\n is_different_route = route_a != route_b\r\n are_nodes_exterior = node_a.is_connected_to_depot and node_b.is_connected_to_depot\r\n is_total_demand_covered = self.vehicle_capacity >= route_a.demand + route_b.demand\r\n return is_different_route and are_nodes_exterior and is_total_demand_covered\r\n\r\n def get_depot_edge(self, route : Route, node : Node):\r\n origin = route.edges[0].origin\r\n end = route.edges[0].end\r\n return route.edges[0] if ( (origin == node and end == self.depot) or\r\n ( origin == self.depot and end == node)) else route.edges[-1]\r\n\r\n def are_multiple_edges(self, route: Route) -> bool:\r\n return len(route.edges) > 1\r\n \r\n def remove_depot_edge_from_route(self, route : Route, node : Node):\r\n depot_edge = self.get_depot_edge(route, node)\r\n route.remove_edge(depot_edge)\r\n # If there are multiple edges in a route, \r\n # then origin will be interior, i.e., \r\n # not directly connected to the depot\r\n if self.are_multiple_edges(route):\r\n node.is_connected_to_depot = False\r\n \r\n def merge(self, route_a : Route, node_a: Node, route_b : Route, node_b: Node, possible_common_edge : Edge):\r\n self.remove_depot_edge_from_route(route_a, node_a)\r\n self.remove_depot_edge_from_route(route_b, node_b)\r\n if route_a.edges[0].origin != self.depot :\r\n route_a.reverse()\r\n if route_b.edges[0].origin == self.depot :\r\n route_b.reverse()\r\n route_a.add_edge(possible_common_edge)\r\n route_a.demand += node_b.demand\r\n node_b.route = route_a\r\n for edge in route_b.edges:\r\n route_a.edges.append(edge)\r\n route_a.cost += edge.cost\r\n route_a.demand += edge.end.demand\r\n edge.end.route = route_a\r\n self.solution.cost -= possible_common_edge.savings\r\n self.solution.remove_route(route_b)\r\n\r\n def run(self) -> Solution:\r\n while (len(self.savings) > 0):\r\n possible_common_edge:Edge = self.savings.pop(0)\r\n origin:Node = possible_common_edge.origin\r\n end:Node = possible_common_edge.end\r\n route_a:Route = origin.route\r\n route_b:Route = end.route\r\n if ( self.are_routes_mergeable(origin, end, route_a, route_b) ):\r\n self.merge(route_a, origin, route_b, end, possible_common_edge)\r\n return self.solution\r\n\r\n ","repo_name":"emiliogq/vrp-cws","sub_path":"src/cws/cws_heuristic.py","file_name":"cws_heuristic.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33151381100","text":"#!/usr/bin/env python3\n\nimport os,sys\nimport traceback\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nfrom data_backup import Data_backup\nfrom solr import SOLR\nfrom solr import SOLR_CORE_NAME\n\n'''\n_id => str(_id)\n增加scene:db_name topic:collection\n如果有super_intention字段且为空,则替换为null\n如果有questions或equal_questions字段,拆开存储\n {question, question_ik, question_cn},并清除questions或equal_questions\n'''\n\nclass Update():\n def __init__(self, ip, db_name):\n self.db_name = db_name\n self.db = MongoClient('127.0.0.1', 27017)[db_name]\n self.core_name = SOLR_CORE_NAME\n self.solr_url = 'http://127.0.0.1:8999/solr'\n self.solr = SOLR(self.solr_url)\n\n def check_solr_core(self):\n if not self.solr.solr_core_exists(self.core_name):\n self.solr.create_solr_core(self.core_name)\n\n def update_data(self, collection):\n def insert(data):\n if not data:\n return\n data_one = data.copy()\n data_one['_id'] = str(data_one['_id'])\n data_one['scene'] = self.db_name\n data_one['topic'] = collection\n if 'super_intention' in data_one:\n if data_one['super_intention'] == '':\n data_one['super_intention'] = 'null'\n if 'equal_questions' in data_one:\n data_one.pop('equal_questions')\n for q in data['equal_questions']:\n data_one['question'] = q\n data_one['question_ik'] = q\n data_one['question_cn'] = q\n self.solr.update_solr(data_one, self.core_name)\n elif 'questions' in data_one:\n data_one.pop('questions')\n for q in data['questions']:\n data_one['question'] = q\n data_one['question_ik'] = q\n data_one['question_cn'] = q\n self.solr.update_solr(data_one, self.core_name)\n else:\n self.solr.update_solr(data_one, self.core_name)\n\n self.solr.delete_solr_by_query(self.core_name,\n 'scene_str:'+self.db_name+' AND topic_str:'+collection)\n data = [x for x in self.db[collection].find()]\n for d in data:\n insert(d)\n\n def update(self):\n try:\n collections = self.db.collection_names()\n if 'log' in collections:\n collections.remove('log')\n for collection in collections:\n print('start '+collection)\n self.update_data(collection)\n return 1\n except Exception:\n traceback.print_exc()\n return 0\n\nif __name__ == '__main__':\n up = Update('127.0.0.1', 'bank_psbc')\n up.update()\n","repo_name":"zhengxin2016/nlp_code","sub_path":"corpus/webservice/update_solr.py","file_name":"update_solr.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9915766418","text":"import csv\nimport os\nimport time\nfrom itertools import islice\nfrom threading import Lock, Thread\n\nimport cv2\nimport numpy as np\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import QPixmap, QImage, QIcon\nfrom PyQt5.QtWidgets import QWidget\n\nfrom pipeline_module.core.base_module import DictData\nfrom pipeline_module.core.task_solution import TaskSolution\nfrom pipeline_module.face_detection_module import FaceDetectionModule\nfrom pipeline_module.face_encoding_module import FaceEncodingModule\nfrom pipeline_module.face_match_module import FaceMatchModule\nfrom pipeline_module.video_modules import VideoModule\nfrom pipeline_module.vis_modules import DynamicAttendanceVisModule\nfrom smart_classroom.list_items import VideoSourceItem, FaceListItem, AttendanceItemWrapper\nfrom ui.dynamic_attendance import Ui_DynamicAttendance\nfrom utils.common import OffsetList, second2str, read_img_from_cn_path, read_encoding_json2npy\n\nface_bank_base_dir = 'resource/face_bank'\n\n\nclass DynamicAttendanceApp(QWidget, Ui_DynamicAttendance):\n init_attendance_task_signal = QtCore.pyqtSignal()\n push_frame_signal = QtCore.pyqtSignal(DictData)\n update_attendance_task_signal = QtCore.pyqtSignal(DictData)\n\n def __init__(self, parent=None):\n super(DynamicAttendanceApp, self).__init__(parent)\n self.setupUi(self)\n self.video_source = 0\n self.frame_data_list = OffsetList()\n self.opened_source = None\n self.playing = None\n self.playing_real_time = False\n\n # 视频事件\n # 设置视频源事件\n self.open_source_lock = Lock()\n self.open_source_btn.clicked.connect(\n lambda: self.open_source(self.video_source_txt.text() if len(self.video_source_txt.text()) != 0 else 0))\n self.video_resource_list.itemClicked.connect(lambda item: self.open_source(item.src))\n self.video_resource_file_list.itemClicked.connect(lambda item: self.open_source(item.src))\n\n self.close_source_btn.clicked.connect(self.close_source)\n self.play_video_btn.clicked.connect(self.play_video)\n self.stop_playing_btn.clicked.connect(self.stop_playing)\n self.video_process_bar.valueChanged.connect(self.change_frame)\n self.push_frame_signal.connect(self.push_frame)\n self.early_stop_video_btn.clicked.connect(self.close_ahead)\n # 设置其他信号槽事件\n self.show_raw_lbl_ckb.stateChanged.connect(self.change_frame)\n self.show_anno_ckb.stateChanged.connect(self.change_frame)\n self.face_match_threshold_dspin.valueChanged.connect(\n lambda: (self.change_frame(), self.update_attendance_list_widget())\n )\n self.init_attendance_task_signal.connect(self.init_attendance_task)\n self.update_attendance_task_signal.connect(self.update_attendance_task_list)\n # 初始化视频源\n self.init_video_source()\n # 初始化人脸数据库\n self.init_face_bank()\n\n # 其他事件\n # 签到表定位\n\n def local_to_cheater(x):\n self.stop_playing()\n if x.frame_num > 0:\n self.video_process_bar.setValue(x.frame_num)\n\n self.attended_list.itemClicked.connect(local_to_cheater)\n\n # 过滤\n def student_list_filter(txt: str, self=self):\n txt = txt.strip()\n if txt == '':\n for i in range(self.student_list.count()):\n self.student_list.item(i).setHidden(False)\n else:\n for i in range(self.student_list.count()):\n item = self.student_list.item(i)\n item.setHidden(item.name.find(txt) < 0)\n\n self.student_list_filter_txt.textChanged.connect(student_list_filter)\n\n self.class_list_filter_txt.textChanged.connect(lambda: self.refresh_face_bank())\n\n def refresh_face_bank(self):\n \"\"\"\n 刷新人脸库\n \"\"\"\n try:\n self.face_bank_list_cbx.clear()\n txt = self.class_list_filter_txt.text()\n for bank_name in self.face_banks:\n if txt == '' or bank_name.find(txt) > -1:\n self.face_bank_list_cbx.addItem(bank_name)\n except Exception as e:\n print('refresh_face_bank:', e)\n\n def init_face_bank(self):\n \"\"\"\n 初始化人脸库\n \"\"\"\n try:\n if not os.path.exists(face_bank_base_dir):\n os.makedirs(face_bank_base_dir)\n else:\n print(f\"本地视频目录已创建: {face_bank_base_dir}\")\n self.face_banks = os.listdir(face_bank_base_dir)\n self.refresh_face_bank()\n self.face_bank_list_cbx.currentTextChanged.connect(self.open_face_bank)\n # 初始化\n self.refresh_face_bank_btn.clicked.connect(lambda: self.open_face_bank())\n self.refresh_face_bank_btn.setIcon(QIcon(':/func/refresh.ico'))\n except Exception as e:\n print('init_face_bank:', e)\n\n def init_video_source(self):\n \"\"\"\n 初始化视频源\n \"\"\"\n # 添加视频通道\n VideoSourceItem(self.video_resource_list, \"摄像头\", 0).add_item()\n # 添加本地视频文件\n local_source = 'resource/videos/dynamic_attendance'\n if not os.path.exists(local_source):\n os.makedirs(local_source)\n else:\n print(f\"本地视频目录已创建: {local_source}\")\n videos = [*filter(lambda x: x.endswith('.mp4'), os.listdir(local_source))]\n for video_name in videos:\n VideoSourceItem(self.video_resource_file_list,\n video_name,\n os.path.join(local_source, video_name),\n ico_src=':/videos/multimedia.ico').add_item()\n\n with open('resource/video_sources.csv', 'r', encoding='utf-8') as f:\n reader = csv.reader(f)\n for row in islice(reader, 1, None):\n VideoSourceItem(self.video_resource_list, row[0], row[1],\n ico_src=':/videos/webcam.ico').add_item()\n\n def open_source(self, source):\n self.open_source_lock.acquire(blocking=True)\n if self.opened_source is not None:\n self.close_source()\n # Loading\n frame = np.zeros((480, 640, 3), np.uint8)\n (f_w, f_h), _ = cv2.getTextSize(\"Loading\", cv2.FONT_HERSHEY_TRIPLEX, 1, 2)\n\n cv2.putText(frame, \"Loading\", (int((640 - f_w) / 2), int((480 - f_h) / 2)),\n cv2.FONT_HERSHEY_TRIPLEX,\n 1, (255, 255, 255), 2)\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (self.video_screen.width() - 9, self.video_screen.height() - 9)) # 调整图像大小\n image_height, image_width, image_depth = frame.shape\n frame = QImage(frame.data, image_width, image_height, # 创建QImage格式的图像,并读入图像信息\n image_width * image_depth,\n QImage.Format_RGB888)\n self.video_screen.setPixmap(QPixmap.fromImage(frame))\n\n # 启动视频源\n def open_source_func(self):\n # 初始化签到任务\n self.init_attendance_task_signal.emit()\n fps = 12\n self.opened_source = TaskSolution() \\\n .set_source_module(VideoModule(source, fps=fps)) \\\n .set_next_module(FaceDetectionModule()) \\\n .set_next_module(FaceEncodingModule()) \\\n .set_next_module(FaceMatchModule(np.array([encoding for (_, encoding, _) in self.known_faces_data]))) \\\n .set_next_module(DynamicAttendanceVisModule(lambda d: self.push_frame_signal.emit(d),\n self.known_face_names))\n self.opened_source.start()\n self.playing_real_time = True\n self.open_source_lock.release()\n\n Thread(target=open_source_func, args=[self]).start()\n\n def push_frame(self, data):\n try:\n max_index = self.frame_data_list.max_index()\n time_process = self.frame_data_list[max_index].time_process if len(self.frame_data_list) > 0 else 0\n data.time_process = time_process + data.interval\n # 添加帧到视频帧列表\n self.frame_data_list.append(data)\n while len(self.frame_data_list) > 500:\n self.frame_data_list.pop()\n self.video_process_bar.setMinimum(self.frame_data_list.min_index())\n self.video_process_bar.setMaximum(self.frame_data_list.max_index())\n\n # 动态点名\n data.frame_num = max_index + 1\n self.update_attendance_task_signal.emit(data)\n\n # 判断是否进入实时播放状态\n if self.playing_real_time:\n self.video_process_bar.setValue(self.video_process_bar.maximum())\n except Exception as e:\n print(\"push_frame\", e)\n\n def init_attendance_task(self):\n \"\"\"\n 初始化签到任务\n \"\"\"\n self.absented_list.clear()\n self.attended_list.clear()\n self.attendance_task_list = [AttendanceItemWrapper(self.attended_list,\n self.absented_list,\n face_img,\n name) for (name, _, face_img) in self.known_faces_data]\n self.update_attendance_list_widget()\n\n def update_attendance_task_list(self, data):\n \"\"\"\n 更新签到任务数据\n :param data: 处理后的数据\n \"\"\"\n if hasattr(data, 'skipped'):\n return\n try:\n face_labels = data.face_labels\n face_probs = data.face_probs\n face_locations = data.face_locations\n frame = data.frame\n change_flag = False\n for face_label, \\\n face_prob, \\\n face_location in zip(face_labels,\n face_probs,\n face_locations):\n item = self.attendance_task_list[face_label]\n if item.set_matched_data(frame, 100 * (1 - face_prob), data.frame_num, face_location):\n item.show_on_attend_list(self.face_match_threshold_dspin.value())\n change_flag = True\n if change_flag:\n self.update_attended_students_num_lbl() # 更新签到学生数量显示\n if self.absented_list.count() <= 0 and self.auto_close_ahead_ckb.isChecked():\n self.close_ahead()\n except Exception as e:\n print(\"update_attendance_task_list\", e)\n\n def update_attended_students_num_lbl(self):\n \"\"\"\n 更新签到学生数量显示\n \"\"\"\n attended_student_num = self.attended_list.count()\n all_student_num = self.absented_list.count() + attended_student_num\n self.student_num_lbl.setText(f'人数:{attended_student_num}/{all_student_num}')\n\n def update_attendance_list_widget(self):\n \"\"\"\n 更新签到任务相关的列表组件\n \"\"\"\n try:\n for item in self.attendance_task_list:\n item: AttendanceItemWrapper\n item.show_on_attend_list(self.face_match_threshold_dspin.value())\n pass\n self.update_attended_students_num_lbl()\n except Exception as e:\n print(\"update_attendance_list_widget\", e)\n\n def close_source(self):\n try:\n if self.opened_source is not None:\n self.stop_playing()\n self.opened_source.close()\n self.opened_source = None\n self.frame_data_list.clear()\n self.video_process_bar.setMaximum(-1)\n self.playing_real_time = False\n self.attended_list.clear()\n self.absented_list.clear()\n except Exception as e:\n print('close_source', e)\n\n def close_ahead(self):\n if self.opened_source is not None:\n self.opened_source.close()\n\n def playing_video(self):\n try:\n while self.playing is not None and not self.playing_real_time:\n current_frame = self.video_process_bar.value()\n max_frame = self.video_process_bar.maximum()\n if current_frame < 0:\n continue\n elif current_frame < max_frame:\n data = self.frame_data_list[current_frame]\n if current_frame < max_frame:\n self.video_process_bar.setValue(current_frame + 1)\n time.sleep(data.interval)\n else:\n self.stop_playing()\n self.playing_real_time = True\n except Exception as e:\n print('playing_video', e)\n\n def stop_playing(self):\n if self.playing is not None:\n self.playing = None\n\n def play_video(self):\n if self.playing is not None:\n return\n self.playing = Thread(target=self.playing_video, args=())\n self.playing.start()\n\n def change_frame(self):\n try:\n if len(self.frame_data_list) == 0:\n return\n current_frame = self.video_process_bar.value()\n max_frame = self.video_process_bar.maximum()\n self.playing_real_time = current_frame == max_frame # 是否开启实时播放\n # 更新界面\n data = self.frame_data_list[current_frame]\n maxData = self.frame_data_list[max_frame]\n frame = data.get_draw_frame(show_raw=self.show_raw_lbl_ckb.isChecked(),\n show_locations=self.show_anno_ckb.isChecked(),\n threshold=1 - self.face_match_threshold_dspin.value() / 100)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (self.video_screen.width() - 9, self.video_screen.height() - 9)) # 调整图像大小\n image_height, image_width, image_depth = frame.shape\n frame = QImage(frame.data, image_width, image_height, # 创建QImage格式的图像,并读入图像信息\n image_width * image_depth,\n QImage.Format_RGB888)\n self.video_screen.setPixmap(QPixmap.fromImage(frame))\n # 显示时间\n current_time_process = second2str(data.time_process)\n max_time_process = second2str(maxData.time_process)\n\n self.time_process_label.setText(f\"{current_time_process}/{max_time_process}\")\n except Exception as e:\n print('change_frame', e)\n\n def close(self):\n self.close_source()\n\n def open(self):\n self.open_face_bank()\n\n def open_face_bank(self, bank_name=None):\n try:\n self.student_list.clear()\n bank_name = bank_name if bank_name is not None else self.face_bank_list_cbx.currentText()\n face_bank_dir = os.path.join(face_bank_base_dir, bank_name)\n self.known_face_names, self.known_faces_data = self.get_known_faces_data(face_bank_dir)\n for idx, (name, encoding, face_img) in enumerate(self.known_faces_data):\n FaceListItem(self.student_list,\n face_img,\n encoding,\n name,\n idx,\n face_bank_dir,\n None\n ).add_item()\n except Exception as e:\n print('open_face_bank: ', e)\n\n @staticmethod\n def get_known_faces_data(facebank):\n try:\n known_face_names = os.listdir(facebank) # 读取已经录入的人名\n known_faces_data = [(name, read_encoding_json2npy(\n os.path.join(facebank, name, 'encoding.json')\n ), read_img_from_cn_path(\n os.path.join(facebank, name, 'face.jpg')\n )) for name in known_face_names] # 人名,编码,图片\n\n return known_face_names, known_faces_data\n except Exception as e:\n print('get_known_faces_data', e)\n","repo_name":"hongyaohongyao/smart_classroom_demo","sub_path":"smart_classroom/dynamic_attendance_app.py","file_name":"dynamic_attendance_app.py","file_ext":"py","file_size_in_byte":16349,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"54"} +{"seq_id":"74087805920","text":"import cv2\nfrom ultralytics import YOLO\nimport easyocr\nimport sqlite3\nfrom flask import Flask, jsonify, request, send_file, render_template\nfrom PIL import Image\nimport base64\nimport io\nimport numpy as np\n\nmodel = YOLO('yolov8x.pt')\nALPR = YOLO('license_plate_detector.pt')\n\nreader = easyocr.Reader(['en'], gpu=False)\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef root():\n return render_template('index.html')\n \n@app.route(\"/detect\", methods=[\"POST\"])\ndef detect():\n file = request.files[\"image_file\"]\n\n #print(file.stream)\n\n image = np.asarray(bytearray(file.read()), dtype=np.uint8)\n image = cv2.imdecode(image, -1)\n #image = cv2.imread(file.stream)\n\n result = model.predict(image)[0].boxes\n\n classes = result.cls.tolist()\n classes = [round(x) for x in classes]\n\n index = 0\n\n for cls in classes:\n if (cls == 2):\n conf = round(result.conf[index].item(), 2)\n if (conf >= 0.5):\n cords_xyxy = result.xyxy[index].tolist()\n cords_xyxy = [round(x) for x in cords_xyxy]\n\n cv2.rectangle(image, (cords_xyxy[0],cords_xyxy[1]), (cords_xyxy[2],cords_xyxy[3]), (255, 0, 0), 2)\n\n cropped_img = image[cords_xyxy[1]:cords_xyxy[3], cords_xyxy[0]:cords_xyxy[2]]\n\n resultALPR = ALPR.predict(cropped_img)[0].boxes\n\n #classesALPR = resultALPR.cls.tolist()\n #classesALPR = [round(x) for x in classesALPR]\n\n #confALPR = round(resultALPR.conf[0].item(), 2) if resultALPR.conf.nelement() else 0\n\n confsALPR = resultALPR.conf.tolist()\n \n if len(confsALPR) > 0:\n indexALPR = 0\n confsALPR = [round(x, 2) for x in confsALPR]\n if len(confsALPR) > 1:\n indexALPR = confsALPR.index(max(confsALPR))\n confALPR = confsALPR[indexALPR]\n \n if (confALPR >= 0.5):\n cords_xyxyALPR = resultALPR.xyxy[indexALPR].tolist()\n cords_xyxyALPR = [round(x) for x in cords_xyxyALPR]\n\n cropped_img_plate = cropped_img[cords_xyxyALPR[1]:cords_xyxyALPR[3], cords_xyxyALPR[0]:cords_xyxyALPR[2]]\n\n resultOCR = reader.readtext(cropped_img_plate, allowlist = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n\n if len(resultOCR) > 0:\n characterizedResultOCR = \"\".join([x[1] for x in resultOCR])\n cv2.putText(cropped_img, str(characterizedResultOCR), (cords_xyxyALPR[0], cords_xyxyALPR[1]-15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (36,255,12), 2)\n\n cv2.rectangle(cropped_img, (cords_xyxyALPR[0],cords_xyxyALPR[1]), (cords_xyxyALPR[2],cords_xyxyALPR[3]), (255, 0, 0), 2)\n cv2.putText(cropped_img, str(confALPR), (cords_xyxyALPR[0], cords_xyxyALPR[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (36,255,12), 2)\n #cv2.putText(cropped_img, str(resultOCR), (cords_xyxyALPR[0], cords_xyxyALPR[1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (36,255,12), 1)\n\n # indexALPR = 0\n\n # print(\"already cropped\", classesALPR)\n\n # for clsALPR in classesALPR:\n # if clsALPR == 0:\n # confALPR = round(result.conf[indexALPR].item(), 2)\n # if (confALPR >= 0.5):\n # cords_xyxyALPR = resultALPR.xyxy[0].tolist()\n # cords_xyxyALPR = [round(x) for x in cords_xyxyALPR]\n\n # cv2.rectangle(cropped_img, (cords_xyxyALPR[0],cords_xyxyALPR[1]), (cords_xyxyALPR[2],cords_xyxyALPR[3]), (255, 0, 0), 2)\n\n # cropped_img_plate = cropped_img[cords_xyxyALPR[1]:cords_xyxyALPR[3], cords_xyxyALPR[0]:cords_xyxyALPR[2]]\n\n # resultOCR = reader.readtext(cropped_img_plate, allowlist = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n # indexALPR += 1\n\n index += 1\n # classesALPR = resultALPR.cls.tolist()\n # classesALPR = [round(x) for x in classesALPR]\n\n #indexALPR = 0\n\n # for clsALPR in classesALPR:\n # if (clsALPR == 0):\n # #confALPR = round(resultALPR.conf[indexALPR].item(), 2)\n # confALPR = round(resultALPR.conf[0].item(), 2) if resultALPR.conf.nelement() else 0\n # if (confALPR >= 0.5):\n # cords_xyxyALPR = resultALPR.xyxy[indexALPR].tolist()\n # cords_xyxyALPR = [round(x) for x in cords_xyxyALPR]\n\n # cv2.rectangle(cropped_img, (cords_xyxyALPR[0],cords_xyxyALPR[1]), (cords_xyxyALPR[2],cords_xyxyALPR[3]), (255, 0, 0), 2)\n\n # cv2.imshow(\"Cropped image\", image) \n # cv2.waitKey(0)\n # cv2.destroyAllWindows() \n\n #return \"hi\"\n\n is_success, buffer = cv2.imencode(\".jpg\", image)\n io_buffer = io.BytesIO(buffer)\n\n data = io_buffer.read()\n data = base64.b64encode(data).decode()\n\n return jsonify({\n 'msg': 'success', \n 'size': [image.shape[1], image.shape[0]], \n 'format': \"jpg\",\n 'img': data\n })\n\"\"\"\ncon = sqlite3.connect(\"entrance.db\")\ncur = con.cursor()\nprint(cur.execute(\"SELECT * FROM allowed\").fetchall())\n\nmodel = YOLO('yolov8x.pt')\nALPR = YOLO('license_plate_detector.pt')\n\nresult = model.predict(\"test_many.jpg\")[0]\n\nimg = cv2.imread('test_many.jpg') \n\nreader = easyocr.Reader(['en'], gpu=True)\n\nfor box in result.boxes:\n class_id = result.names[box.cls[0].item()]\n #print(class_id)\n if (class_id == 'car'):\n conf = round(box.conf[0].item(), 2)\n if (conf >= 0.8):\n cords_xyxy = box.xyxy[0].tolist()\n cords_xyxy = [round(x) for x in cords_xyxy]\n\n cv2.rectangle(img, (cords_xyxy[0],cords_xyxy[1]), (cords_xyxy[2],cords_xyxy[3]), (255, 0, 0), 2)\n\n cropped_img = img[cords_xyxy[1]:cords_xyxy[3], cords_xyxy[0]:cords_xyxy[2]]\n\n #cv2.imshow(\"Cropped image\", cropped_img) \n #cv2.waitKey(0)\n\n resultALPR = ALPR.predict(cropped_img)[0]\n\n boxALPR = resultALPR.boxes\n\n confALPR = round(boxALPR.conf[0].item(), 2) if boxALPR.conf.nelement() else 0\n\n if (confALPR > 0.5):\n cords_xyxy_ALPR = boxALPR.xyxy[0].tolist()\n cords_xyxy_ALPR = [round(x) for x in cords_xyxy_ALPR]\n\n cropped_img_plate = cropped_img[cords_xyxy_ALPR[1]:cords_xyxy_ALPR[3], cords_xyxy_ALPR[0]:cords_xyxy_ALPR[2]]\n\n resultOCR = reader.readtext(cropped_img_plate, allowlist = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n if (len(resultOCR)):\n data_from_db = cur.execute(\"SELECT * FROM allowed\")\n characterizedResultOCR = \"\".join([x[1] for x in resultOCR])\n print(characterizedResultOCR)\n for item in data_from_db:\n if (characterizedResultOCR in item):\n print(f\"Access is allowed for {characterizedResultOCR}\")\n break\n\n cv2.rectangle(cropped_img, (cords_xyxy_ALPR[0],cords_xyxy_ALPR[1]), (cords_xyxy_ALPR[2],cords_xyxy_ALPR[3]), (255, 0, 0), 2)\n\n cv2.imshow(\"Cropped image\", cropped_img_plate) \n cv2.waitKey(0)\n\n \n '''\n class_id = result.names[box.cls[0].item()]\n cords = box.xyxy[0].tolist()\n cords = [round(x) for x in cords]\n conf = round(box.conf[0].item(), 2)\n print(\"Object type:\", class_id)\n print(\"Coordinates:\", cords)\n print(\"Probability:\", conf)\n print(\"---\")\n\n cv2.rectangle(img, (cords[0],cords[1]), (cords[2],cords[3]), (255, 0, 0), 2)\n\n '''\n\n#print(model.model.names)\n\n#1262, 243, 1499, 450\n\ncv2.imshow('window_name', img) \ncv2.waitKey(0)\ncv2.destroyAllWindows() \n\"\"\"","repo_name":"breadx333/ALPR-system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74884004320","text":"import math\r\nimport mysql.connector as conct\r\n\r\nusername = \"\"\r\npassword = \"\"\r\n\r\nmysql = conct(host=\"localhost\", passwrd=password, database=\"root\", user=username)\r\n\r\n# admin functions\r\ndef removebc():\r\n bc = int(input('Enter Book Code: '))\r\n # Change table name and attribute name accordingly#\r\n d = 'delete from library where INBI=%S;'\r\n data = (bc)\r\n cursor = mysql.cursor() # Change the object name accordingly#\r\n cursor.execute(d, data)\r\n cursor.commit\r\n print('----------------------- BOOK REMOVED SUCCESSFULLY ------------------------------')\r\n\r\n\r\ndef add():\r\n bn = input(\"Enter Book Name: \")\r\n bc = int(input(\"Enter Book INBI number: \"))\r\n an = input(\"Enter Author's Name: \")\r\n rp = int(input(\"Enter Rental Price: \"))\r\n # (\"Name\", \"INBI number\", \"Author\", \"Rent Price\", \"Available Stock\")\r\n t = int(input(\"Enter Total Number Of Books: \"))\r\n \r\n data = (bn, bc, an, rp, t)\r\n\r\n insert = 'insert into library values (%S,%S,%S,%S,%S);'\r\n cursor = mysql.cursor() # Change the object name accordingly#\r\n cursor.execute(insert, data)\r\n mysql.commit\r\n print('------------------------- BOOK ADDED SUCCESSFULLY ------------------------------')\r\n\r\n\r\ndef updt():\r\n bn = input(\"Enter Book name: \")\r\n np = int(input(\"Enter Revised Price: \"))\r\n # Change table name and attribute name accordingly#\r\n q = 'update library set RPrice=%S where BookName=%S;'\r\n data = (np, bn)\r\n cursor = mysql.cursor() # Change the object name accordingly#\r\n cursor.execute(q, data)\r\n cursor.commit\r\n print(\"----------------------- PRICE UPDATED SUCCESSFULLY -----------------------------\")\r\n\r\n\r\ndef mainadmn():\r\n while True:\r\n print(\"\\n================================================================================\")\r\n print(\"======================= L I B R A R Y M A N A G E R ============================\")\r\n print(\"=============================== A D M I N ======================================\")\r\n\r\n print(\"1. Show Books\")\r\n print(\"2. Add Book\")\r\n print(\"3. Remove Book\")\r\n print(\"4. Availability\")\r\n print(\"5. Update Rental Price\")\r\n print(\"6. Return\")\r\n\r\n T = int(input('Enter Task Number: '))\r\n if T == 1:\r\n DisplayList()\r\n elif T == 2:\r\n add()\r\n elif T == 3:\r\n removebc()\r\n elif T == 4:\r\n avai()\r\n elif T == 5:\r\n updt()\r\n elif T == 6:\r\n main() # Assuming main is the menu where you can choose between admin login and customer login#\r\n else:\r\n print(\"\\n-ERROR-\")\r\n\r\n\r\n# user functions\r\ndef userAdmn():\r\n while True:\r\n\r\n print(\"\\n================================================================================\")\r\n print(\"============================= W E L C O M E ====================================\")\r\n print(\"=============================== U S E R ========================================\")\r\n print(\"1. Show Books\")\r\n print(\"2. Check Availability of a Book\")\r\n print(\"3. Rent book\")\r\n print(\"4. Return\")\r\n T = int(input('Enter Task Number: '))\r\n if T == 1:\r\n DisplayList()\r\n elif T == 2:\r\n avai()\r\n elif T == 3:\r\n rent()\r\n elif T == 4:\r\n main() # Assuming main is the menu where you can choose between admin login and customer login#\r\n else:\r\n print(\"\\n-ERROR-\")\r\n\r\n\r\ndef rent():\r\n bn = input(\"Enter Book name: \")\r\n cursor = mysql.cursor() # Change the object name accordingly#\r\n\r\n q = 'from library select Total where BookName=%S'\r\n cursor.execute(q, bn)\r\n t = int(cursor.fetchone())\r\n\r\n if t > 1:\r\n q = 'from library select RPrice where BookName=%S'\r\n cursor.execute(q, bn)\r\n r = int(cursor.fetchone())\r\n\r\n print(\"The rental price for three weeks of \", bn, \"is Rs\", r)\r\n print(\"1. Rent book\\n2. Cancel\")\r\n T = input(\"Enter Task number: \")\r\n\r\n if T == 1:\r\n q = 'update library set Total=%S where BookName=%S;'\r\n data = (t-1, bn)\r\n cursor.execute(q, data)\r\n print(\r\n \"---------------- You're book will be delivered to you're address ---------------\")\r\n print(\r\n \"----------- Please return in 3 weeks or the penalty is Rs.30 per day -----------\")\r\n else:\r\n print(\r\n \"------------------------------- cancelled --------------------------------------\")\r\n else:\r\n print(\"-------------------------------- Book not available ----------------------------\")\r\n\r\n\r\n# common functions\r\ndef avai():\r\n bn = input(\"Enter book name\")\r\n s = 'select Total from library where BookName=%S;'\r\n cursor = mysql.cursor() # Change the object name accordingly#\r\n cursor.execute(s, bn)\r\n cursor.commit\r\n data = cursor.fetchall()\r\n print(\"Number of available book is: \",data)\r\n input('\\n-PRESS ENTER TO CONTINUE-')\r\n\r\n\r\ndef DisplayList():\r\n\r\n head = (\"Name\", \"INBI number\", \"Author\", \"Rent Price\", \"Available Stock\")\r\n s = 'select * from library;'\r\n cursor = mysql.cursor()\r\n cursor.execute(s)\r\n r = cursor.fetchall()\r\n queryValue = (r) # the table query should be stored in this\r\n w = []\r\n heading = list(head)\r\n queryRes = []\r\n\r\n for i in range(0, len(queryValue)):\r\n queryRes.append(list(queryValue[i]))\r\n\r\n if(len(queryRes) == 0):\r\n print(\"-Error-\")\r\n return\r\n\r\n # To get total row width\r\n for i in range(0, len(heading)):\r\n lst = [heading[i], str(queryRes[0][i]), str(\r\n queryRes[1][i]), str(queryRes[2][i])]\r\n w.append(len(max(lst, key=len)))\r\n\r\n # to Print top line\r\n print(\"+\", end=\"\")\r\n for i in range(sum(w)+(3*len(heading)-1)-1):\r\n print(\"-\", end=\"\")\r\n print(\"-+\")\r\n\r\n # test heading print\r\n print(\"\", end=\"| \")\r\n for i in range(len(heading)):\r\n startLoc = math.ceil(w[i]/2) - math.floor(len(str(heading[i]))/2) - 1\r\n if startLoc < 0:\r\n startLoc = 0\r\n for j in range(w[i]-len(heading[i])+1):\r\n if(j == startLoc):\r\n print(heading[i], end=\" \")\r\n else:\r\n print(end=\" \")\r\n print(\"| \", end=\"\")\r\n print()\r\n\r\n # to print heading and other row seperator line\r\n print(\"\", end=\"+-\")\r\n for i in range(0, len(heading)):\r\n for j in range(w[i]):\r\n print(\"-\", end=\"\")\r\n if (i != len(heading)-1):\r\n print(end=\"-+-\")\r\n else:\r\n print(end=\"-+\")\r\n print()\r\n\r\n # to print other row\r\n for l in range(len(queryRes)):\r\n row = queryRes[l]\r\n print(\"\", end=\"| \")\r\n for i in range(len(row)):\r\n\r\n startLoc = math.ceil(w[i]/2) - math.floor(len(str(row[i]))/2) - 1\r\n if startLoc < 0:\r\n startLoc = 0\r\n for j in range(w[i]-len(str(row[i])) + 1):\r\n if(j == startLoc):\r\n print(str(row[i]), end=\" \")\r\n else:\r\n print(end=\" \")\r\n print(\"| \", end=\"\")\r\n print()\r\n\r\n # to Print bottom line\r\n print(\"+\", end=\"\")\r\n for i in range(sum(w)+(3*len(heading)-1)-1):\r\n print(\"-\", end=\"\")\r\n print(\"-+\")\r\n\r\n\r\ndef signup():\r\n u = input(\"\\nEnter username: \")\r\n p = input(\"Enter password: \")\r\n\r\n if (u == username and p == password):\r\n mainadmn()\r\n else:\r\n print(\"\\n-Incorrect username or password-\")\r\n\r\n\r\n#main function\r\ndef main():\r\n while True:\r\n\r\n print(\"\\n================================================================================\")\r\n print(\" W E L C O M E T O L I B R A R Y \")\r\n print(\"1. Login as admin\")\r\n print(\"2. Login as user\")\r\n T = int(input('Enter Task Number: '))\r\n if T == 1:\r\n signup()\r\n elif T == 2:\r\n userAdmn()\r\n else:\r\n print(\"\\n-ERROR-\")\r\n\r\nmain() #calling main function\r\n","repo_name":"dibyansh850/Class_12_Project","sub_path":"LIB.py","file_name":"LIB.py","file_ext":"py","file_size_in_byte":8075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24233345214","text":"import api.scripts\nfrom api.api_app.models import Task\nimport random, string\n\n\ndef randomword(length):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for _ in range(length))\n\n\nif __name__ == '__main__':\n for i in range(random.randint(1000, 10000)):\n model = Task(title=randomword(100), completed=bool(random.randint(0, 1)))\n model.save()\n print(\"everything is done successfully!\")\n","repo_name":"zhassbala/todo-api","sub_path":"tests/add_elements_to_db.py","file_name":"add_elements_to_db.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37284394072","text":"# -*- coding: utf8 -*-\nfrom Plugins.Extensions.IPTVPlayer.tools.iptvtools import printDBG\n#from Plugins.Extensions.IPTVPlayer.tools.iptvtypes import strwithmeta\nfrom Plugins.Extensions.IPTVPlayer.libs.e2ijson import loads as json_loads\nfrom Plugins.Extensions.IPTVPlayer.tsiplayer.libs.tstools import TSCBaseHostClass,resolve_liveFlash,tshost\nfrom Plugins.Extensions.IPTVPlayer.libs import ph\nimport re\n\ndef getinfo():\n\tinfo_={}\n\tname = 'Arembed'\n\thst = tshost(name)\t\n\tif hst=='': hst = 'http://c247.to'\n\tinfo_['host']= hst\n\tinfo_['name']=name\n\tinfo_['version']='1.0 17/06/2019'\n\tinfo_['dev']='RGYSoft'\n\tinfo_['cat_id']='100'#2\n\tinfo_['desc']='Live Bein Sports'\n\tinfo_['icon']='http://c247.to/embed_logo/Bein%20Sports%201%20to%204.png'\n\tinfo_['recherche_all']='0'\n\treturn info_\n\nclass TSIPHost(TSCBaseHostClass):\n\tdef __init__(self):\n\t\tTSCBaseHostClass.__init__(self,{'cookie':'coolkora.cookie'})\n\t\tself.MAIN_URL = getinfo()['host']\n\t\tself.USER_AGENT = 'Mozilla/5.0 (Android 4.4; Mobile; rv:41.0) Gecko/41.0 Firefox/41.0'\n\t\tself.HEADER = {'User-Agent': self.USER_AGENT, 'Connection': 'keep-alive', 'Accept-Encoding':'gzip', 'Content-Type':'application/x-www-form-urlencoded','Referer':self.getMainUrl(), 'Origin':self.getMainUrl()}\n\t\tself.defaultParams = {'header':self.HEADER, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE}\n\t\tself.getPage = self.cm.getPage\n\n\t\n\t\t\n\tdef showmenu(self,cItem):\n\t\tsts,data=self.getPage(self.MAIN_URL)\n\t\timage=self.MAIN_URL+'/embed_logo/Bein%20Sports%201%20to%204.png'\n\t\tif sts:\n\t\t\tprintDBG('dddddddd'+data)\t\t\n\t\t\tfilms_list = re.findall('
key > self.maxkeyvalue:\n logger.debug('Please enter a key in range 0-255')\n return False\n\n if self.get(key) != None and self.get(key) != False:\n if self.KeyValueDict[key][timestamp] > newtimestamp:\n return True\n\n if WriteLog:\n try:\n logfilehandle = open(self.logfilename, \"a\")\n logfilehandle.write(str(key) + \"::\" + newvalue+ \"::\" + str(newtimestamp) + \"\\n\")\n logfilehandle.close()\n except IOError:\n logger.error(\"log file named\" + self.logfilename + \" not found!\")\n return False\n\n self.KeyValueDict[key] = []\n self.KeyValueDict[key].insert(value,newvalue)\n self.KeyValueDict[key].insert(timestamp, newtimestamp)\n return True\n def sendMessageService(self, data, replica, time = int(time.time())):\n if 1 in replica:\n self.sendMessage(data,firstreplica, time)\n if 2 in replica:\n self.sendMessage(data,secondreplica, time)\n if 3 in replica:\n self.sendMessage(data,thirdreplica, time)\n if 4 in replica:\n self.sendMessage(data,fourthreplica, time)\n\n def sendMessage(self,data,replicaname,replicatimestamp):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host, port = replicaList[replicaname][1], int(replicaList[replicaname][2])\n sock.connect((host, port))\n putrequestmessage = keyValue_pb2.ReplicaPutRequest()\n putrequestmessage.key, putrequestmessage.id = data.clientputrequest.key, int(nodeName[0])\n putrequestmessage.value, putrequestmessage.timestamp = data.clientputrequest.value, replicatimestamp\n message = keyValue_pb2.KeyValueMessage()\n message.replicaputrequest.CopyFrom(putrequestmessage)\n sock.sendall(pickle.dumps(message))\n sock.close()\n return False\n except:\n return None\n def get(self, key):\n if key not in self.KeyValueDict:\n return False\n\n return self.KeyValueDict[key]\n\n def sendupdaterequestreplica(self,originalreplicaname,originalkey,originalvalue,sendupdatetimestamp):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = replicaList[originalreplicaname][1]\n port = int(replicaList[originalreplicaname][2])\n sock.connect((host, port ))\n putrequestmessage = keyValue_pb2.ReplicaPutRequest()\n logger.debug(\"Read Repair done on ->\" + replicaList[originalreplicaname][0])\n #logger.debug(originalreplicaname)\n putrequestmessage.id, putrequestmessage.key, putrequestmessage.value, putrequestmessage.timestamp = int(nodeName[0]), originalkey, originalvalue, sendupdatetimestamp\n message = keyValue_pb2.KeyValueMessage()\n message.replicaputrequest.CopyFrom(putrequestmessage)\n sock.sendall(pickle.dumps(message))\n sock.close()\n except:\n return None\n\n def sendgetrequestreplica(self,getrequestreplicaname,key):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host , port = replicaList[getrequestreplicaname][1], int(replicaList[getrequestreplicaname][2])\n sock.connect((host , port))\n getrequestmessage = keyValue_pb2.ReplicaGetRequest()\n getrequestmessage.id, getrequestmessage.key, getrequestmessage.timestamp, message = 1, key, 1, keyValue_pb2.KeyValueMessage()\n message.replicagetrequest.CopyFrom(getrequestmessage)\n sock.sendall(pickle.dumps(message))\n data = pickle.loads(sock.recv(10000))\n sock.close()\n return data\n except:\n logger.debug(\"\")\n\n def checkts(self,data1,data2,data3):\n if(data1.replicaresponse.timestamp >= data2.replicaresponse.timestamp and data1.replicaresponse.timestamp >= data3.replicaresponse.timestamp):\n return 1\n if(data2.replicaresponse.timestamp >= data1.replicaresponse.timestamp and data2.replicaresponse.timestamp >= data3.replicaresponse.timestamp):\n return 2\n if(data3.replicaresponse.timestamp >= data2.replicaresponse.timestamp and data3.replicaresponse.timestamp >= data1.replicaresponse.timestamp):\n return 3\n \n def readrepairhandle(self,data,clientkey):\n logger.debug( \"inside read repair replica handle\")\n #first replica functioning\n if(int(firstreplica) == int(nodeName[0])):\n return self.firstreadrepairreplicahandler(data,clientkey)\n #second replica functioning\n elif(int(secondreplica) == int(nodeName[0])):\n return self.secondreadrepairreplicahandler(data,clientkey)\n #third replica functioning\n elif(int(thirdreplica) == int(nodeName[0])):\n return self.thirdreadrepairreplicahandler(data,clientkey)\n #fourth replica functioning\n elif(int(fourthreplica) == int(nodeName[0])):\n return self.fourthreadrepairreplicahandler(data,clientkey)\n\n def firstreadrepairreplicahandler(self,data,clientkey): \n logger.debug( \"inside first read repair replica handler\") \n if(0 <= clientkey <= 63) :\n return self.handleputreplica(0,secondreplica,thirdreplica,clientkey)\n elif(64 <= clientkey <= 127) :\n return self.handleputreplica(secondreplica,thirdreplica,fourthreplica,clientkey)\n elif(128 <= clientkey <= 191) :\n return self.handleputreplica(0,thirdreplica,fourthreplica,clientkey)\n elif(192 <= clientkey <= 255) :\n return self.handleputreplica(0,secondreplica,fourthreplica,clientkey)\n \n def secondreadrepairreplicahandler(self,data,clientkey):\n logger.debug( \"inside second read repair replica handler\")\n if(0 <= clientkey <= 63) :\n return self.handleputreplica(0,firstreplica,thirdreplica,clientkey)\n elif(64 <= clientkey <= 127) :\n return self.handleputreplica(0,thirdreplica,fourthreplica,clientkey)\n elif(128 <= clientkey <= 191) :\n return self.handleputreplica(firstreplica,thirdreplica,fourthreplica,clientkey)\n elif(192 <= clientkey <= 255) :\n return self.handleputreplica(0,firstreplica,fourthreplica,clientkey)\n \n def thirdreadrepairreplicahandler(self,data,clientkey):\n logger.debug( \"inside third read repair replica handler\")\n if(0 <= clientkey <= 63) :\n return self.handleputreplica(0,firstreplica,secondreplica,clientkey)\n elif(64 <= clientkey <= 127) :\n return self.handleputreplica(0,firstreplica,thirdreplica,clientkey)\n elif(128 <= clientkey <= 191) :\n return self.handleputreplica(0,firstreplica,fourthreplica,clientkey)\n elif(192 <= clientkey <= 255) :\n return self.handleputreplica(firstreplica,secondreplica,fourthreplica,clientkey)\n\n def fourthreadrepairreplicahandler(self,data,clientkey):\n logger.debug( \"inside fourth read repair replica handler\")\n if(0 <= clientkey <= 63) :\n return self.handleputreplica(firstreplica,secondreplica,thirdreplica,clientkey)\n elif(64 <= clientkey <= 127) :\n return self.handleputreplica(0,secondreplica,thirdreplica,clientkey)\n elif(128 <= clientkey <= 191) :\n return self.handleputreplica(0,firstreplica,thirdreplica,clientkey)\n elif(192 <= clientkey <= 255) :\n return self.handleputreplica(0,firstreplica,secondreplica,clientkey)\n\n def handleputreplica(self,replica1,replica2,replica3,clientkey):\n if(replica1 == 0):\n if clientkey not in self.KeyValueDict:\n logger.debug('Exception: Key do not exists on any replica')\n return\n responce1ts = self.KeyValueDict[clientkey][timestamp]\n logger.debug( self.KeyValueDict[clientkey][value])\n readrepairlist = {}\n data2 = self.sendgetrequestreplica(replica2,clientkey)\n logger.debug( data2)\n data3 = self.sendgetrequestreplica(replica3,clientkey)\n #logger.debug( data3.replicaresponse.value)\n check = \"\"\n if(data2 is None and data3 is None):\n logger.debug('Exception: Cannot connect to any Replica')\n return\n \n #logger.debug( data3.replicaresponse.timestamp)\n if data2 is not None:\n if data2.replicaresponse.timestamp > self.KeyValueDict[clientkey][timestamp]:\n self.setDictionary(clientkey,data2.replicaresponse.value,data2.replicaresponse.timestamp)\n if self.KeyValueDict[clientkey][timestamp] > data2.replicaresponse.timestamp:\n self.sendupdaterequestreplica(replica2,clientkey,self.KeyValueDict[clientkey][value] ,self.KeyValueDict[clientkey][timestamp])\n if data3 is not None:\n if data3.replicaresponse.timestamp > self.KeyValueDict[clientkey][timestamp]:\n self.setDictionary(clientkey,data3.replicaresponse.value,data3.replicaresponse.timestamp)\n if self.KeyValueDict[clientkey][timestamp] > data3.replicaresponse.timestamp:\n self.sendupdaterequestreplica(replica3,clientkey,self.KeyValueDict[clientkey][value] ,self.KeyValueDict[clientkey][timestamp])\n return self.KeyValueDict[clientkey][value]\n \n elif replica1 != 0 :\n data1 = self.sendgetrequestreplica(replica1,clientkey)\n logger.debug( data1)\n \n data2 = self.sendgetrequestreplica(replica2,clientkey)\n logger.debug( data2)\n \n data3 = self.sendgetrequestreplica(replica3,clientkey)\n logger.debug( data3)\n checkvalue = self.checkts(data1,data2,data3)\n logger.debug( checkvalue)\n if(checkvalue == 1):\n logger.debug( \"inside check value\")\n self.sendupdaterequestreplica(replica2,clientkey,data1.replicaresponse.value,data1.replicaresponse.timestamp)\n self.sendupdaterequestreplica(replica3,clientkey,data1.replicaresponse.value,data1.replicaresponse.timestamp)\n if(checkvalue == 2):\n self.sendupdaterequestreplica(replica1,clientkey,data2.replicaresponse.value,data2.replicaresponse.timestamp)\n self.sendupdaterequestreplica(replica3,clientkey,data2.replicaresponse.value,data2.replicaresponse.timestamp)\n if(checkvalue == 3):\n self.sendupdaterequestreplica(replica1,clientkey,data3.replicaresponse.value,data3.replicaresponse.timestamp)\n self.sendupdaterequestreplica(replica2,clientkey,data3.replicaresponse.value,data3.replicaresponse.timestamp) \n return data1.replicaresponse.value\n\n def getvaluefromowner(self,data,clientkey):\n logger.debug( \"inside getvaluefromowner\")\n return self.readrepairhandle(data,clientkey)\n\n \n def clientputrequesthandler(self,data,clientsocket):\n logger.debug( \"inside client put request handle\")\n #first replica functioning\n if(int(firstreplica) == int(nodeName[0])):\n return self.firstclientputrequesthandler(data,clientsocket)\n #second replica functioning\n elif(int(secondreplica) == int(nodeName[0])):\n return self.secondclientputrequesthandler(data,clientsocket)\n #third replica functioning\n elif(int(thirdreplica) == int(nodeName[0])):\n return self.thirdclientputrequesthandler(data,clientsocket)\n #fourth replica functioning\n elif(int(fourthreplica) == int(nodeName[0])):\n return self.fourthclientputrequesthandler(data,clientsocket)\n\n def firstclientputrequesthandler(self,data,clientsocket):\n logger.debug(\"inside first client put request handler\")\n if(0 <= data.clientputrequest.key <= 63) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[2,3],self.KeyValueDict[data.clientputrequest.key][1])\n logger.debug( \"a\")\n elif(64 <= data.clientputrequest.key <= 127) :\n self.sendMessageService(data,[2,3,4],int(time.time()))\n\n elif(128 <= data.clientputrequest.key <= 191) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[3,4],self.KeyValueDict[data.clientputrequest.key][1])\n\n elif(192 <= data.clientputrequest.key <= 255) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[2,4],self.KeyValueDict[data.clientputrequest.key][1])\n\n\n def secondclientputrequesthandler(self,data,clientsocket):\n logger.debug(\"inside second client put request handler\")\n if(0 <= data.clientputrequest.key <= 63) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[1,3],self.KeyValueDict[data.clientputrequest.key][1])\n\n logger.debug( \"a\")\n elif(64 <= data.clientputrequest.key <= 127) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[3,4],self.KeyValueDict[data.clientputrequest.key][1])\n\n elif(128 <= data.clientputrequest.key <= 191) :\n self.sendMessageService(data,[1,3,4],int(time.time()))\n\n elif(192 <= data.clientputrequest.key <= 255) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[1,4],self.KeyValueDict[data.clientputrequest.key][1])\n\n\n def thirdclientputrequesthandler(self,data,clientsocket):\n logger.debug(\"inside third client put request handler\")\n if(0 <= data.clientputrequest.key <= 63) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[1,2],self.KeyValueDict[data.clientputrequest.key][1])\n\n logger.debug( \"a\")\n elif(64 <= data.clientputrequest.key <= 127) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[2,4],self.KeyValueDict[data.clientputrequest.key][1])\n\n elif(128 <= data.clientputrequest.key <= 191) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[1,4],self.KeyValueDict[data.clientputrequest.key][1])\n\n elif(192 <= data.clientputrequest.key <= 255) :\n self.sendMessageService(data,[1,2,4],int(time.time()))\n\n\n def fourthclientputrequesthandler(self,data,clientsocket):\n logger.debug(\"inside fourth client put request handler\")\n if(0 <= data.clientputrequest.key <= 63) :\n self.sendMessageService(data,[1,2,3],int(time.time()))\n\n elif(64 <= data.clientputrequest.key <= 127) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[2,3],self.KeyValueDict[data.clientputrequest.key][1])\n\n elif(128 <= data.clientputrequest.key <= 191) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[1,3],self.KeyValueDict[data.clientputrequest.key][1])\n\n elif(192 <= data.clientputrequest.key <= 255) :\n self.setDictionary(data.clientputrequest.key,data.clientputrequest.value,int(time.time()))\n self.sendMessageService(data,[1,2],self.KeyValueDict[data.clientputrequest.key][1])\n\n \n def replicahandle(self,data,clientsocket):\n self.logfilename = \"writelog\"+\"-\"+ str(nodeName[0])\n logger.debug( self.logfilename)\n if not os.path.exists(self.logfilename):\n logfilehandle = open(self.logfilename, \"w+\")\n else:\n logfilehandle = open(self.logfilename, \"r\")\n for line in logfilehandle:\n split = line.split('::')\n a,b, c = int(split[0]), split[1], int(split[2])\n self.setDictionary(a,b,c, False)\n \n if data.HasField(\"clientputrequest\") :\n logger.debug( \"inside client put\")\n logger.debug( self.KeyValueDict)\n thread = Thread(target = self.clientputrequesthandler(data,clientsocket))\n thread.daemon = True\n thread.start()\n\n if data.HasField(\"replicaputrequest\") :\n logger.debug( data)\n self.setDictionary(data.replicaputrequest.key,data.replicaputrequest.value,data.replicaputrequest.timestamp)\n logger.debug( self.KeyValueDict)\n\n if data.HasField(\"clientgetrequest\") :\n clientkey, clientid = data.clientgetrequest.key, data.clientgetrequest.id\n \n #return value for particular key\n returnval = self.getvaluefromowner(data,clientkey)\n\n # Start packing response to client\n ownerResponcemsg = keyValue_pb2.ClientResponse()\n ownerResponcemsg.key, ownerResponcemsg.id = clientkey, clientid\n\n if returnval is None or returnval == \"None\" :\n ownerResponcemsg.status, ownerResponcemsg.value = False, \"None\"\n else:\n ownerResponcemsg.status, ownerResponcemsg.value = True, returnval\n \n clientResponcemsg = keyValue_pb2.KeyValueMessage()\n clientResponcemsg.clientresponse.CopyFrom(ownerResponcemsg)\n try:\n clientsocket.sendall(pickle.dumps(clientResponcemsg))\n except:\n logger.error( \"ERROR ! socket exception while sending get val response to client\")\n\n if data.HasField(\"replicagetrequest\") :\n \n resp_mesg = keyValue_pb2.ReplicaResponse()\n replicaResponcemsg = keyValue_pb2.KeyValueMessage()\n resp_mesg.nodeid = nodeName[0]\n resp_mesg.key, resp_mesg.id = data.replicagetrequest.key, data.replicagetrequest.id\n if data.replicagetrequest.key in self.KeyValueDict:\n resp_mesg.timestamp = self.KeyValueDict[data.replicagetrequest.key][timestamp]\n resp_mesg.value = self.KeyValueDict[data.replicagetrequest.key][value]\n resp_mesg.status = True\n else:\n resp_mesg.value = \"None\"\n resp_mesg.status = False\n\n replicaResponcemsg.replicaresponse.CopyFrom(resp_mesg)\n\n try:\n clientsocket.sendall(pickle.dumps(replicaResponcemsg))\n except:\n logger.error( \"Exception: socker error while sending get val response to client\")\n\ndef startTheServer(serversocket):\n while True:\n (clientsocket, address) = serversocket.accept()\n data = pickle.loads(clientsocket.recv(1024))\n replica().replicahandle(data,clientsocket)\ndef readFromTheBlog():\n try:\n logger.debug(sys.argv[3])\n with open(str(sys.argv[3])) as file:\n for line in file:\n replicadata = line.strip().split(\" \")\n nodeNumber = re.findall('\\d+',replicadata[0].strip())\n replicalisttemp[int(nodeNumber[0])] = [replicadata[0].strip(), replicadata[1].strip() , replicadata[2].strip() ]\n except:\n logger.error(\"ERROR ! Not able to read input file, please check the format\")\n sys.exit(0)\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n logger.debug(\"Arguments Error: Try entering \")\n sys.exit(0)\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n hostname = socket.gethostbyname(socket.gethostname())\n port = int(sys.argv[2])\n sock.bind((hostname, port))\n sock.listen(5)\n replicalisttemp = {}\n logger.debug(\"\\nListening on \"+str(hostname)+\":\"+ str(port))\n if not os.path.exists(sys.argv[3]):\n logger.error(\"ERROR: Input file isn't available\")\n sys.exit(0)\n else:\n readFromTheBlog()\n\n for key in sorted(replicalisttemp.keys()):\n replicaList[key] = replicalisttemp[key]\n firstreplica, secondreplica, thirdreplica, fourthreplica = list(replicaList.keys())[0], list(replicaList.keys())[1], list(replicaList.keys())[2], list(replicaList.keys())[3]\n print(\"replica list ->\", replicaList)\n nodeName = re.findall('\\d+',sys.argv[1].strip())\n startTheServer(sock)","repo_name":"kamal333k/distributed-key-value-system","sub_path":"coordinator.py","file_name":"coordinator.py","file_ext":"py","file_size_in_byte":21540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31006168100","text":"from PyQt5.QtWidgets import QWidget\nfrom PyQt5 import QtCore, uic\n\n#from components.table_locacoes import TableLocacao\nfrom clas.locacao import Locacao as Loc\n\nimport models.model_reserva as Reservas\nimport models.model_plano as Planos\n\nclass CadLocacao(QWidget):\n def __init__(self, reservaAtual, winReserva):\n super().__init__()\n uic.loadUi(\"ui/ui_locacao.ui\", self)\n\n self.reservaAtual = reservaAtual\n self.winReserva = winReserva\n \n \"\"\"self.table = TableLocacao(self)\n self.verticalLayout_2.addWidget(self.table)\"\"\"\n\n self.locacaoAtual = None\n self.lista_reservas = []\n\n self.setEventos()\n\n self.setEventosCheckBox()\n\n self.campId_reserva.setText(str(self.reservaAtual.id))\n\n def setEventos(self):\n self.b_locar.clicked.connect(self.addLocacao)\n self.b_limpar.clicked.connect(self.limparCampos)\n self.b_ok.clicked.connect(self.carregaDadosPlanos)\n\n def carregaDadosReservas(self):\n self.lista_reservas = Reservas.getReservas()\n lista_combo = []\n for r in self.lista_reservas:\n lista_combo.append(str(r.id))\n self.campId.addItems(lista_combo)\n\n def carregaDadosPlanos(self, tipoLoc):\n tipoLoc = self.winReserva.comboTipos.currentText()\n self.lista_planos = Planos.getPlanosCateg(tipoLoc)\n for p in self.lista_planos:\n valorkm = p.valorKmEst\n self.calculaKm(valorkm)\n\n def setEventosCheckBox(self):\n self.ch_mec.stateChanged.connect(self.checkBoxSeg)\n self.ch_perdaT.stateChanged.connect(self.checkBoxSeg)\n self.ch_furto.stateChanged.connect(self.checkBoxSeg)\n self.cb_guincho.stateChanged.connect(self.checkBoxSer)\n self.cb_gps.stateChanged.connect(self.checkBoxSer)\n self.cb_cadeirinha.stateChanged.connect(self.checkBoxSer)\n\n def addLocacao(self):\n novoLocacao = self.getLocacao()\n if novoLocacao != None:\n if self.locacaoAtual == None:\n self.table.add(novoLocacao)\n else:\n novoLocacao.id = self.locacaoAtual.id\n self.table.update(novoLocacao)\n self.limparCampos()\n\n def getLocacao(self):\n id_res = self.campId_reserva.text()\n kmAtual = self.campKmInic.text()\n kmEstim = self.campKmEstim.text()\n #self.campKmEstim.returnPressed.connect(self.calculaKm)\n status = self.comboStatus.currentText()\n\n if ((id_res !=\"\") and (kmAtual != \"\") and (kmEstim != \"\") and (status != \"\")):\n return Loc (-1, self.campId_reserva.text(), self.campKmInic.text(), self.campKmEstim.text(), self.comboStatus.currentText())\n return None\n\n def limparCampos(self):\n self.locacaoAtual = None\n self.campId_reserva.setText(\"\")\n self.campKmInic.setText(\"\")\n self.campKmEstim.setText(\"\")\n self.comboStatus.setCurrentText(\"\")\n\n self.b_locar.setText(\"Confirmar\")\n self.b_limpar.setEnabled(False)\n self.campId_reserva.setEnabled(True)\n\n def insereLocacao(self, locacao):\n self.locacaoAtual = locacao\n self.campId_reserva.setText(str(locacao.id_res))\n self.campKmInic.setText(str(locacao.kmAtual))\n self.campKmEstim.setText(str(locacao.kmEstim))\n self.comboStatus.setCurrentText(locacao.status)\n\n self.b_locar.setText(\"Atualizar\")\n self.b_limpar.setEnabled(True)\n self.campId_reserva.setEnabled(False)\n\n def calculaKm(self, valor):\n pagarKm = 0.0\n kmEstim = self.campKmEstim.text()\n if valor == \"\":\n valor == 0.0\n else:\n valor == float(valor)\n\n if kmEstim == \"\":\n kmEstim == 0.0\n else: \n kmEstim == float(kmEstim)\n\n pagarKm = float(valor) * float(kmEstim)\n\n self.campValor.setText(str(\"%.2f\" %pagarKm))\n self.calculaPagar(pagarKm)\n\n def calculaPagar(self, pagarKm):\n valorPag = 0.0\n valorIn = self.winReserva.campValorP.text()\n if valorIn == \"\":\n valorIn == 0.0\n else:\n valorIn == float(valorIn)\n if valorPag == \"\":\n valorPag == 0.0\n else:\n valorPag == (float(valorPag))\n\n valorPag = pagarKm + float(valorIn)\n self.campPagar.setText(str(\"%.2f\" %valorPag))\n\n def checkBoxSeg(self):\n if self.ch_mec.isChecked():\n print(\"Marcou Mecânico\")\n\n if self.ch_perdaT.isChecked():\n print(\"Marcou perda Total\")\n\n if self.ch_furto.isChecked():\n print(\"Marcou Furto\")\n\n def checkBoxSer(self, state):\n if self.cb_guincho.isChecked:\n print(\"Marcou guincho\")\n\n if self.cb_gps.isChecked:\n print(\"Marcou gps\")\n \n if self.cb_cadeirinha.isChecked:\n print(\"Marcou cadeirinha\")","repo_name":"adaiasreis/Projeto","sub_path":"layouts/ui_locacao.py","file_name":"ui_locacao.py","file_ext":"py","file_size_in_byte":4891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17063263130","text":"# Input: s = \"pwwkew\"\n# Output: 3\n# Explanation: The answer is \"wke\", with the length of 3.\n# Notice that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n\ndef lengthOfLongestSubstring(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n max = 0\n strAux = \"\"\n for i in range(len(s)):\n for j in s: \n if j not in strAux:\n strAux = strAux + j \n else:\n if len(strAux) > max:\n max = len(strAux)\n strAux = \"\"\n s = s[1 : : ]\n break\n if max > len(s): break\n if len(strAux) > max:\n max = len(strAux)\n return(max)\n \n \n\nlengthOfLongestSubstring(\"ohomm\")\n \n \n ","repo_name":"tore0371/leetCodeProyects","sub_path":"3LongestSubstringWithoutRepeatingCharacters.py","file_name":"3LongestSubstringWithoutRepeatingCharacters.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14257897102","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS, cross_origin\nfrom controller import processExample\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route(\"/\")\n@cross_origin()\ndef hello_world():\n return \"

Hello, World!

\"\n\n@app.route(\"/fetchExample\", methods=[\"GET\", \"POST\"])\n@cross_origin()\ndef fetchExample():\n if request.method == \"GET\": # handling GET request\n points, cluster_names = processExample()\n resp = jsonify(data=points, clusters=cluster_names)\n return resp\n else: # handling POST request, which is only effective when ExampleWithInteractions.vue is loaded\n request_context = request.get_json() # JSON object\n method = request_context['method']\n points, cluster_names = processExample(method)\n resp = jsonify(data=points, clusters=cluster_names)\n return resp\n\n\nif __name__ == \"__main__\":\n app.run(port=3100, debug=True)","repo_name":"Sterfan-shi/ECS273-Winter2023","sub_path":"Assignment/Vue-Flask-Template/server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"15418726255","text":"# BOJ-02839 / 설탕 배달\n# devgeon, 2023.01.14, Python3\n# https://www.acmicpc.net/problem/2839\n\n\nclass sugar_bags():\n def __init__(self, weight, bags):\n self.weight = weight\n self.bags = bags\n self.counter = dict(zip(self.bags, [0]*len(self.bags)))\n\n def fill(self, bag, num=1):\n if num == 0:\n num = self.weight // bag\n self.weight -= bag * num\n self.counter[bag] += num\n\n def empty(self, bag, num=1):\n if num == 0:\n num = self.weight // bag\n self.weight += bag * num\n self.counter[bag] -= num\n\n def get_weight(self):\n return self.weight\n\n def get_counter(self):\n return self.counter\n\n def get_num_of_bags(self):\n return sum(self.counter.values())\n\n\ndef main():\n target_value = int(input())\n \n bags = sugar_bags(target_value, (3, 5))\n bags.fill(5, 0)\n bags.fill(3, 0)\n\n while bags.get_counter()[5] > 0:\n if bags.get_weight() == 0:\n break\n bags.empty(5)\n bags.fill(3, 0)\n\n if bags.get_weight() != 0:\n print(-1)\n else:\n print(bags.get_num_of_bags())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"devgeon/Problem-Solving","sub_path":"Greedy/BOJ-2839-설탕배달.py","file_name":"BOJ-2839-설탕배달.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16776242882","text":"#!/usr/bin/env python3\nfrom olc_webportalv2.users.models import User\nfrom django.test import TestCase\nfrom django.urls import reverse\nimport json\nimport os\n\nfrom olc_webportalv2.vir_typer.views import parse_report, sequence_consensus, sequence_html_string\nfrom olc_webportalv2.vir_typer.models import VirTyperFiles, VirTyperProject, VirTyperRequest, VirTyperResults\n__author__ = 'adamkoziol'\n\n\nclass SampleTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n user = User.objects.create(username='TestViews')\n user.set_password('password')\n user.save()\n project = VirTyperProject.objects.create(project_name='test_views',\n user=user)\n project.save()\n # Sample 1\n request = VirTyperRequest.objects.create(project_name=project,\n lab_ID='St. Hyacinthe',\n isolate_source='huître',\n LSTS_ID='2019FPP00000475214',\n putative_classification='Norovirus genogroup 1',\n sample_name='VI482',\n subunit=1,\n date_received='2019-08-01',\n analyst_name='Rachel'\n )\n request.save()\n files = VirTyperFiles.objects.create(sample_name=request,\n sequence_file='P19954_2019_VI482_11_GI_B05_M13-R17_G10_068.ab1')\n files.save()\n files = VirTyperFiles.objects.create(sample_name=request,\n sequence_file='P19954_2019_VI482_11_GI_B04_M13-R17_F10_070.ab1')\n files.save()\n files = VirTyperFiles.objects.create(sample_name=request,\n sequence_file='P19954_2019_VI482_11_GI_B03_M13-R17_E10_072.ab1')\n files.save()\n request = VirTyperRequest.objects.create(project_name=project,\n lab_ID='Burnaby',\n isolate_source='Oyster',\n LSTS_ID='2019FPP00000475215',\n putative_classification='Norovirus genogroup 2',\n sample_name='VI483',\n date_received='2019-08-09',\n analyst_name='Rachel'\n )\n request.save()\n # Sample 2\n files = VirTyperFiles.objects.create(sample_name=request,\n sequence_file='P19954_2019_VI483_11_GI_B05_M13-R17_G10_074.ab1')\n files.save()\n\n def test_vir_typer_home_login_required(self):\n resp = self.client.get(reverse('vir_typer:vir_typer_home'))\n self.assertEqual(resp.status_code, 302) # Should get 302 redirected if user is not logged in.\n\n def test_vir_typer_home_ok(self):\n self.client.login(username='TestViews', password='password')\n resp = self.client.get(reverse('vir_typer:vir_typer_home'))\n self.assertEqual(resp.status_code, 200)\n vir_typer_requests = VirTyperProject.objects.filter()\n for request in vir_typer_requests:\n self.assertEquals(request.project_name, 'test_views')\n\n def test_vir_typer_upload_login_required(self):\n resp = self.client.get(reverse('vir_typer:vir_typer_upload', kwargs={'vir_typer_pk': 1}))\n self.assertEqual(resp.status_code, 302) # Should get 302 redirected if user is not logged in.\n\n def test_vir_typer_upload_404_no_run(self):\n self.client.login(username='TestViews', password='password')\n resp = self.client.get(reverse('vir_typer:vir_typer_upload', kwargs={'vir_typer_pk': 123}))\n self.assertEqual(resp.status_code, 404)\n\n def test_vir_typer_upload_ok(self):\n self.client.login(username='TestViews', password='password')\n pk = VirTyperProject.objects.get(project_name='test_views').pk\n resp = self.client.get(reverse('vir_typer:vir_typer_upload', kwargs={'vir_typer_pk': pk}))\n self.assertEqual(resp.status_code, 200)\n\n def test_vir_typer_result_login_required(self):\n resp = self.client.get(reverse('vir_typer:vir_typer_results', kwargs={'vir_typer_pk': 1}))\n self.assertEqual(resp.status_code, 302) # Should get 302 redirected if user is not logged in.\n\n def test_vir_typer_result_404_no_run(self):\n self.client.login(username='TestViews', password='password')\n resp = self.client.get(reverse('vir_typer:vir_typer_results', kwargs={'vir_typer_pk': 123}))\n self.assertEqual(resp.status_code, 404)\n\n def test_vir_typer_result_ok(self):\n project = VirTyperProject.objects.get(project_name='test_views')\n testpath = os.path.abspath(os.path.dirname(__file__))\n json_output = os.path.join(testpath, 'virus_typer_outputs.json')\n with open(json_output, 'r') as json_report:\n project.report = json.load(json_report)\n project.save()\n self.client.login(username='TestViews', password='password')\n resp = self.client.get(reverse('vir_typer:vir_typer_results', kwargs={'vir_typer_pk': project.pk}))\n self.assertEqual(resp.status_code, 200)\n results = VirTyperResults.objects.all()\n self.assertEquals([result.trimmed_quality_stdev for result in results], ['5.59', '6.10', '6.23', '5.59'])\n\n def test_report_parse(self):\n testpath = os.path.abspath(os.path.dirname(__file__))\n json_output = os.path.join(testpath, 'virus_typer_outputs.json')\n with open(json_output, 'r') as json_report:\n json_data = str(json.load(json_report))\n pk = VirTyperProject.objects.get(project_name='test_views').pk\n vir_typer_samples = VirTyperRequest.objects.filter(project_name_id=pk)\n parse_report(vir_typer_json=json_data,\n vir_typer_samples=vir_typer_samples)\n results = VirTyperResults.objects.all()\n self.assertEquals([result.allele for result in results], ['00069', '00069', '00057', '00069'])\n\n def test_sequence_html(self):\n vir_typer_pk = VirTyperProject.objects.get(project_name='test_views').pk\n samples = list()\n outputs = list()\n\n testpath = os.path.abspath(os.path.dirname(__file__))\n json_output = os.path.join(testpath, 'virus_typer_outputs.json')\n with open(json_output, 'r') as json_report:\n json_data = str(json.load(json_report))\n pk = VirTyperProject.objects.get(project_name='test_views').pk\n vir_typer_samples = VirTyperRequest.objects.filter(project_name_id=pk)\n parse_report(vir_typer_json=json_data,\n vir_typer_samples=vir_typer_samples)\n\n for sample in VirTyperRequest.objects.filter(project_name__pk=vir_typer_pk):\n samples.append(sample.sample_name)\n for sorted_sample in sorted(samples):\n for sample in VirTyperRequest.objects.filter(project_name__pk=vir_typer_pk):\n if sample.sample_name == sorted_sample:\n sequences = list()\n sample_dict = dict()\n sample_dict['sequence'] = list()\n for vir_file in VirTyperFiles.objects.filter(sample_name__pk=sample.pk):\n result = VirTyperResults.objects.filter(sequence_file__id=vir_file.pk)\n for vir_typer_result in result:\n seq_identifier_well = os.path.splitext(vir_file.sequence_file)[0].split('_')[-2]\n seq_identifier_num = os.path.splitext(vir_file.sequence_file)[0].split('_')[-1]\n seq_identifier_code = '_'.join((seq_identifier_well, seq_identifier_num))\n sequences.append({sample.sample_name + '_' + seq_identifier_code: vir_typer_result\n .trimmed_sequence})\n consensus_sequence = sequence_consensus(sequences, vir_typer_pk)\n for vir_file in VirTyperFiles.objects.filter(sample_name__pk=sample.pk):\n result = VirTyperResults.objects.filter(sequence_file__id=vir_file.pk)\n for vir_typer_result in result:\n html_string, variable_locations = sequence_html_string(vir_typer_result.trimmed_sequence,\n consensus_sequence)\n sample_dict['sequence'].append(html_string + '\\n')\n outputs.append(sample_dict)\n for output in outputs:\n self.assertTrue(output['sequence'])\n self.assertTrue(output['sequence'][0].startswith(\"_log.txt\n\nimport subprocess\nbenchmarks = ['c432','c499','c880','c1355','c1908','c2670','c3540','c5315','c6288','c7552'] \nkey_size = ['32','64','128']\n\nfor i in range(0,10): #Loop for all benchmarks \n for j in range(0,3): #Loop for all sizes\n bk = benchmarks[i]\n ks = key_size[j]\n command = \"/root/CEERI/Modified_SAT/bin/sld /root/CEERI/Verilog_Files/Locked_Files/\"\n command+= bk+\"/\"+ks+\"/\"+bk+\"_K\"+ks+\"_0.bench\"+\" /root/CEERI/Verilog_Files/Benchmarks/\"+bk+\".bench\" #Define the command depending upon path\n result = subprocess.run(command, shell=True, capture_output=True, text=True) #Call Subprocess\n #print(result)\n if result.returncode == 0:\n filename = \"SAT_\"+benchmarks[i]+\"_log.txt\" #Define file name for logs\n header = benchmarks[i]+\"_K\"+key_size[j]+\"_Attack\"+\"\\n\"\n with open(filename, \"a\") as file: #Write to file\n file.write(header+\"\\n\")\n file.write(result.stdout+\"\\n\\n\")\n print(\"Output saved to \"+filename)\n else:\n print(\"Command failed with error:\", result.stderr)","repo_name":"Sky025/iSES-23","sub_path":"SAT attack/perform_sat.py","file_name":"perform_sat.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33986063888","text":"import numpy as np\nfrom PIL import Image \n\n\ndef change_to_rgb(img):\n \"\"\"If picuteres mode is RGBA change it to RGB\"\"\"\n\n if img.mode=='RGBA':\n background = Image.new(\"RGB\", img.size, (255, 255, 255))\n background.paste(img, mask=img.split()[3])\n img = background\n return img\n\n\ndef resize_picture(img_array, new_size):\n \"\"\"Resize picture into given size\n \n returns transformed picture and scale used during transformation\n \"\"\"\n img = Image.fromarray(img_array)\n # resize image\n original_size = img.size\n img.thumbnail((new_size, new_size))\n # scale\n scale_x = img.size[0] / original_size[0]\n scale_y = img.size[1] / original_size[1]\n image_arr = np.asarray(img)\n # Add padding\n data = np.zeros((new_size, new_size, 3))\n data[:image_arr.shape[0], :image_arr.shape[1], :] = image_arr\n return data, scale_x, scale_y\n\n\ndef preprocess_picture(file_name, new_size):\n \"\"\"Transform given picture into new_size * new_size array\n \n returns transformed picture and scale used during transformation\n \"\"\"\n img = Image.open(file_name)\n # Covert into RGB\n img = change_to_rgb(img)\n return resize_picture(np.asarray(img), new_size)\n\n\ndef preprocess_df(labels_df, img_size):\n \"\"\"Preprocess labeled pictures from df and save them as numpy arrays\n\n Returns:\n X -- preprocessed pictures\n Y -- scaled labels\n \"\"\"\n m = len(labels_df)\n X = np.zeros((m, img_size, img_size, 3))\n Y = np.zeros((m, 2))\n\n for index, row in labels_df.iterrows():\n file_name = row['file_name']\n picture_arr, scale_x, scale_y = preprocess_picture(file_name, img_size)\n x_cord = row['x_coord'] * scale_x\n y_cord = row['y_coord'] * scale_y\n X[index, :] = picture_arr\n Y[index, 0] = x_cord\n Y[index, 1] = y_cord\n return X, Y\n\n\ndef scale_target(target:list, picture_shape):\n \"\"\"Scale target into range [0, 1.0]\"\"\"\n\n for i in range(len(target)):\n if target[i].max() > 2.0:\n target[i] /= picture_shape\n return target\n\n\ndef rescale_target(target:list, picture_shape):\n \"\"\"Rescale target into range [0, picture_shape]\"\"\"\n\n for i in range(len(target)):\n if target[i].max() < 2.0:\n target[i] *= picture_shape\n return target","repo_name":"cicheck/find-the-nose","sub_path":"utills/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73962998883","text":"import os\nfrom image_captioning_utils.Descriptions import Descriptions\n\nif __name__ == '__main__':\n \"\"\"\n Load Flickr8k token file and save it into a text file\n \"\"\"\n # Input block\n directory = 'Flickr8k_text/'\n filename = 'Flickr8k.token.txt'\n output_file = 'descriptions.txt'\n # End of input block\n\n desc = Descriptions(os.path.join(directory, filename))\n desc.save_descriptions(output_file)\n vocabulary_set = desc.get_vocabulary_set()\n\n print('{} images with descriptions and {} pieces of vocabulary are loaded'.format(len(desc), len(vocabulary_set)))\n","repo_name":"chlin907/ImageCaptioningDeepLearning","sub_path":"preprocess_text.py","file_name":"preprocess_text.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35450559134","text":"documents = [\n {\"type\": \"passport\", \"number\": \"2207 876234\", \"name\": \"Василий Гупкин\"},\n {\"type\": \"invoice\", \"number\": \"11-2\", \"name\": \"Геннадий Покемонов\"},\n {\"type\": \"insurance\", \"number\": \"10006\", \"name\": \"Аристарх Павлов\"},\n {\"type\": \"passport\", \"number\": \"5455 028765\", \"name\": \"Василий Гупкин\"},\n {\"type\": \"passport\", \"number\": \"5400 028765\"},\n {\"type\": \"passport\", \"number\": \"5455 002299\", \"name\": \"маленький зеленый человечек\"},\n]\n\ndirectories = {\n '1': ['2207 876234', '11-2', '5455 028765'],\n '2': ['10006', '5400 028765', '5455 002299'],\n '3': []\n}\n\n\ndef get_owner(doc_num):\n \"\"\"\n Функция принимает номер документа и\n возвращает имя человека, которому он принадлежит\n \"\"\"\n for doc in documents:\n if doc['number'] == doc_num:\n return doc.get('name', 'У этого документа не указан владелец')\n\n return 'Такой документ в каталоге отсутствует'\n\n\ndef get_type(doc_num):\n \"\"\"\n Функция принимает номер документа и\n возвращает тип документа\n \"\"\"\n for doc in documents:\n if doc['number'] == doc_num:\n return doc.get('type', 'У этого документа не указан тип')\n\n return 'Такой документ в каталоге отсутствует'\n\n\ndef show_documents():\n \"\"\"\n выведет список всех документов,\n каждый документ на своей строке в формате\n passport \"2207 876234\" \"Василий Гупкин\"\n \"\"\"\n for doc in documents:\n doc_type = doc.get('type', 'тип отсутствует')\n doc_number = doc.get('number', 'номер отсутствует')\n doc_name = doc.get('name', 'имя отсутствует')\n print(f'{doc_type} \"{doc_number}\" \"{doc_name}\"')\n\n\ndef get_shell(doc_num):\n \"\"\"\n Функция принимает номер документа и\n возвращает полку, на которой он находится\n \"\"\"\n for shell, document in directories.items():\n if doc_num in document:\n return shell\n\n return 'Такой документ в каталоге отсутствует'\n\n\ndef add_document(doc_num, doc_type, doc_owner, doc_shell):\n \"\"\"\n Функция принимает номер документа, тип, владельца, номер полки и\n добавляет его в documents и directories\n \"\"\"\n if doc_shell in directories:\n document = {\"type\": doc_type, \"number\": doc_num, \"name\": doc_owner}\n documents.append(document)\n directories[doc_shell].append(doc_num)\n print('Документ успешно добавлен')\n else:\n print('Полки с таким номером не существует')\n answer = input('Создать эту полку и добавить в нее документ Y/N? ').lower()\n if answer == 'y':\n add_shell(doc_shell)\n add_document(doc_num, doc_type, doc_owner, doc_shell)\n\n\ndef delete_document(doc_num):\n \"\"\"\n Функция принимает номер документа и\n удаляет документ из documents и directories\n \"\"\"\n doc_for_delete = None\n for doc in documents:\n if doc['number'] == doc_num:\n doc_for_delete = doc\n\n if doc_for_delete:\n documents.remove(doc_for_delete)\n directories[get_shell(doc_num)].remove(doc_num)\n print('Документ успешно удален')\n else:\n print('Такой документ в каталоге отсутствует')\n\n\ndef move_document(doc_num, new_shell):\n \"\"\"\n Функция принимает номер документа,\n полку на которую нужно переместить документ,\n и перемещает документ\n \"\"\"\n if get_shell(doc_num) != 'Такой документ в каталоге отсутствует':\n doc_type = get_type(doc_num)\n doc_owner = get_owner(doc_num)\n delete_document(doc_num)\n add_document(doc_num, doc_type, doc_owner, new_shell)\n else:\n print('Такого документа в каталоге не обнаружено')\n\n\ndef add_shell(new_shell):\n \"\"\"\n Функция принимает номер новой полки и\n создает ее\n \"\"\"\n if new_shell in directories:\n print('Такая полка уже есть')\n else:\n directories[new_shell] = []\n print('Полка создана')\n\n\ndef show_help():\n print('Список команд, поддерживаемых программой:')\n print('1) P (people) - по номеру документа возвращает владельца')\n print('2) L (list) - выводит список всех документов')\n print('3) S (shelf) - по номеру документа возвращает номер полки')\n print('4) A (add) - добавляет новый документ в каталог')\n print('5) D (delete) - удаляет документ из каталога')\n print('6) M (move) - перемещает документ с одной полки на другую')\n print('7) AS (add shelf) - добавляет новую полку')\n print('8) H (help) - выводит список команд, поддерживаемых программой ')\n print('9) Q (quit) - прекращает работу')\n print('----И наш новый функционал!!!----')\n print('10) SA (Show All) - выводит имена всех владельцев документов')\n\n\ndef show_all_owners():\n print('Список всех владельцев:')\n for doc in documents:\n try:\n print(doc['name'])\n except KeyError:\n print(f'У документа {doc[\"number\"]} нет владельца')\n\n\ndef main():\n show_help()\n\n while True:\n user_input = input('\\nВведите команду: ').lower()\n\n if user_input == 'p' or user_input == '1':\n doc_num = input('Введите номер документа: ')\n print(get_owner(doc_num))\n\n elif user_input == 'l' or user_input == '2':\n show_documents()\n\n elif user_input == 's' or user_input == '3':\n doc_num = input('Введите номер документа: ')\n print(get_shell(doc_num))\n\n elif user_input == 'a' or user_input == '4':\n doc_num = input('Введите номер документа: ')\n doc_type = input('Введите тип документа: ')\n doc_owner = input('Введите имя владельца документа: ')\n doc_shell = input('Введите номер полки, на кототрой будет храниться документ: ')\n add_document(doc_num, doc_type, doc_owner, doc_shell)\n\n elif user_input == 'd' or user_input == '5':\n doc_num = input('Введите номер документа: ')\n delete_document(doc_num)\n\n elif user_input == 'm' or user_input == '6':\n doc_num = input('Введите номер документа: ')\n new_shell = input('Введите номер полки, на которую нужно переместить документ: ')\n move_document(doc_num, new_shell)\n\n elif user_input == 'as' or user_input == '7':\n new_shell = input('Введите номер новой полки: ')\n add_shell(new_shell)\n\n elif user_input == 'h' or user_input == '8':\n show_help()\n\n elif user_input == 'q' or user_input == '9':\n print('Пока!')\n break\n\n elif user_input == 'sa' or user_input == '10':\n show_all_owners()\n\n\nmain()\n","repo_name":"anvartdinovtimurlinux/py-30","sub_path":"py_homework_basic/2_3_exceptions_2.py","file_name":"2_3_exceptions_2.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33872295720","text":"from abc import ABC, abstractmethod\n\nfrom clickhouse_driver import Client, errors as ch_errors\n\nfrom src.models.message import Message\nfrom core.config import ClickHouseSettings\nfrom src.backoff import backoff\nfrom core.log_writer import logger\n\n\nclass AbstractWriter(ABC):\n \"\"\"Абстрактный класс для подключения к хранилищу.\"\"\"\n\n @abstractmethod\n def write_messages(self, messages: list[Message]):\n pass\n\n\nclass ClickHouseWriter(AbstractWriter):\n \"\"\"Класс для подключения к ClickHouse\"\"\"\n\n def __init__(self, conf: ClickHouseSettings):\n self.__conf = conf\n self.__client = None\n self.__connect()\n\n @backoff(error=(ch_errors.NetworkError, EOFError), start_sleep_time=2)\n def __connect(self):\n self.__client = Client(host=self.__conf.host)\n self.__client.execute('SHOW DATABASES')\n logger.info('ClickHouse connected!')\n\n def __write_message(self, messages: list[Message]):\n self.__client.execute(\n \"INSERT INTO default.movie_events (userId, movieId, event_ts) VALUES\",\n ((message.user_id, message.movie_id, message.event_ts,) for message in messages)\n )\n\n def write_messages(self, messages: list[Message]):\n \"\"\"\n Метод записи сообщений в ClickHouse\n\n :param messages: список сообщений\n \"\"\"\n try:\n self.__write_message(messages)\n except (ch_errors.NetworkError, EOFError):\n self.__connect()\n self.__write_message(messages)\n\n","repo_name":"san100791/ugc_sprint_1","sub_path":"etl_kafka_to_clikhouse/src/clickhouse_writer.py","file_name":"clickhouse_writer.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72004762403","text":"'''\nwhile True: # Начало цикла\n reply = input('Enter text: ') # Ввод текста с клавиатуры\n if reply == 'stop': # Если введем stop, то цикл закончится и програма выйдет\n break\n elif not reply.isdigit(): # проверка на ошибки ввода текста\n print('Bad' * 8)\n else: # Если мы прошли проверку ввода то программа будет выводить число возведенное в степень\n print(int(reply) ** 2)\nprint('Bye')\n\nwhile True:\n reply = input('Enter text: ')\n if reply == 'stop': break\n try:\n num = int(reply)\n except:\n print('Bad' * 8)\n else:\n print(num ** 2)\nprint('Bye')\n\nwhile True:\n reply = input('Enter text: ')\n if reply == 'stop':\n break\n try:\n num = int(reply)\n except:\n print('Bad' * 8)\n else:\n if num < 20:\n print('Low')\n else:\n print(num ** 2)\nprint('Bye')\n'''\n\nfrom tkinter import *\n\ndef clicked(): # Функция вывода\n res = txt.get()\n res = int(res)\n result = str(res ** 2)\n lbl.configure(text='Возведение числа в степень 2: {}'.format(result))\n\nwindow = Tk() # Объявляем окно программы\nwindow.title(\"Добро пожаловать в приложение PythonRU\") # Название кона программы\nwindow.geometry('400x250') # Объявляем размеры окна программы\nlbl = Label(window, text='Привет', font=('Arial Bold', 14)) # Виджет label - Создание текста в окне нашей программы\nlbl.grid(column=0, row=0) # Место расположение текста лейбла (столбец, строка)\ntxt = Entry(window, width=10) #, state='disabled') # Создание текстового поля, дял ввода пользователя (Размер поля(символов), включение/отключение пользовательского ввода)\ntxt.grid(column=1, row=0) # расположение текстового поля\ntxt.focus()\nbtn = Button(window, text='Клик!', bg='Black', fg='red', command=clicked) # Создание кнопки (Название кнопки, цвет кнопки, цвет надписи на кнопке, функция нажатия)\n # ВАЖНО!!! - Создать функцию нажатия для выполнения той или иной операции\nbtn.grid(column=2, row=0) # Расположение кнопки(столбец, строка)\nwindow.mainloop() # Функция mainloop - Запускает цикл окна программы, без него пользователь ничего не увидит\n\n","repo_name":"DarKsp123/My_projects","sub_path":"Test_DB.py","file_name":"Test_DB.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31652341067","text":"class Solution:\n def reverse(self, x: int) -> int:\n if x!=0:\n sign=int(x/abs(x))\n x=[*(str(abs(x)))]\n m=[]\n for i in range(len(x)):\n m+=x[-1*(i+1)]\n m=int(''.join(m))\n return sign*m if sign*m <= 2**31-1 and sign*m>= -2**31 else 0\n return x\n","repo_name":"biniyamNegasa/Competative-programming","sub_path":"Reverse_integer.py","file_name":"Reverse_integer.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72290784482","text":"from sanic import Sanic, response\nfrom sanic.response import text, json\n#logger infomation\nfrom sanic.log import logger\nfrom sanic import Blueprint\n\nfrom aiofiles import os as async_os\nfrom sanic.response import file_stream\n\napp = Sanic(__name__, load_env='MYAPP_')\n# app = Sanic('myapp')\n\"\"\"\nConfiguration of Project with DB\n\"\"\"\napp.config.DB_NAME = 'sanicDB'\napp.config['DB_USER'] = 'appuser'\n\ndb_settings = {\n 'DB_HOST': 'localhost',\n 'DB_NAME': 'appdb',\n 'DB_USER': 'appuser'\n}\napp.config.update(db_settings)\n\n\n# Serves files from the static folder to the URL /static\napp.static('/static', './static')\n\n\n# Sample Example\n@app.route(\"/\")\nasync def home(request):\n return response.text(\"Hello Sanic\")\n\n\n@app.route(\"/movie_list\")\nasync def index(request):\n file_path = \"/Users/livehealth/Downloads/imdb.json t.json\"\n\n file_stat = await async_os.stat(file_path)\n headers = {\"Content-Length\": str(file_stat.st_size)}\n\n file_content = await file_stream(\n file_path\n )\n json_file = json.loads(file_content)\n return json_file\n\n@app.route(\"/myhomepage\")\nasync def home(request):\n logger.info('Here is your log')\n return text(\"My home page!\")\n\nif __name__ == '__main__':\n# app.run(host='0.0.0.0', port=8000, debug=True)\n app.run(debug=True, access_log=True)\n\n\n","repo_name":"artikhot97/sanic_project","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5613366898","text":"# 咕噜咕噜的丁丁\n# 不浪费一分一秒\n# 你可以的\n# 时间:2021/9/23 20:51\n\nfile=open('d.txt','a')\nfile.write('hello')\nfile.close()\nfile.write('world')\nfile.flush()","repo_name":"cc852852/vippython","sub_path":"chap15/demo11.py","file_name":"demo11.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35472149136","text":"from collections import defaultdict \n\ndef substring(str): \n str_len = len(str) \n \n \n dist_count_char = len(set([x for x in str])) \n \n ctr, start_pos, start_pos_index, min_len = 0, 0, -1, 9999999999\n curr_count = defaultdict(lambda: 0) \n for i in range(str_len): \n curr_count[str[i]] += 1\n \n if curr_count[str[i]] == 1: \n ctr += 1\n \n if ctr == dist_count_char: \n while curr_count[str[start_pos]] > 1: \n if curr_count[str[start_pos]] > 1: \n curr_count[str[start_pos]] -= 1\n start_pos += 1\n \n len_window = i - start_pos + 1\n if min_len > len_window: \n min_len = len_window \n start_pos_index = start_pos \n return str[start_pos_index: start_pos_index + min_len] \n \nstr1 = input(\"\")\n\nprint(len(substring(str1))) \n","repo_name":"anikasahni29/maximlexam","sub_path":"maxmlsolution.py","file_name":"maxmlsolution.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16158449478","text":"import numpy as np\nimport cv2 as cv\nimport dlib\n\n\ndef shape_to_np(shape, dtype=\"int\"):\n coords = np.zeros((68, 2), dtype=dtype)\n for i in range(0, 68):\n coords[i] = (shape.part(i).x, shape.part(i).y)\n return coords\n\n\nfaceDetector = cv.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\nmona = cv.imread(\"mona.jpg\")\ngray = cv.cvtColor(mona, cv.COLOR_BGR2GRAY)\n\nrects = dlib.rectangles()\nfaces = faceDetector.detectMultiScale(gray, 1.8, 5)\n\nfor index, (x, y, w, h) in enumerate(faces):\n cv.rectangle(mona, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv.putText(mona, text=\"Face\", org=(x, y-5), fontFace=cv.FONT_HERSHEY_SIMPLEX,\n fontScale=0.5,\n color=(0, 0, 255), thickness=2)\n rects.append(dlib.rectangle(x, y, x + w, y + h))\n\nfor rect in rects:\n shape = predictor(gray, rect)\n shape = shape_to_np(shape)\n\n # lice\n a = np.append(shape[0:17], np.flip(shape[17:27], axis=0), axis=0)\n cv.fillPoly(mona, [a], (0, 255, 0))\n cv.polylines(mona, [a], True, (0, 255, 255), 2)\n\n # nos\n a = shape[27:31]\n cv.polylines(mona, [a], True, (0, 0, 255), 2)\n a = shape[30:36]\n cv.fillPoly(mona, [a], (255, 0, 255))\n cv.polylines(mona, [a], True, (0, 0, 255), 2)\n\n # usta\n # gornja\n a = np.append(shape[48:55], np.flip(shape[60:65], axis=0), axis=0)\n cv.fillPoly(mona, [a], (255, 255, 51))\n cv.polylines(mona, [a], True, (0, 255, 255), 2)\n # donja\n a = shape[54:65]\n cv.fillPoly(mona, [a], (0, 0, 255))\n cv.polylines(mona, [a], True, (255, 0, 255), 2)\n\n # levo oko\n a = np.append(shape[17:22], np.flip(shape[36:40], axis=0), axis=0)\n cv.fillPoly(mona, [a], (255, 0, 0))\n cv.polylines(mona, [a], True, (255, 0, 0), 2)\n\n a = shape[36:42]\n cv.fillPoly(mona, [a], (0, 0, 255))\n cv.polylines(mona, [a], True, (255, 0, 0), 2)\n\n # desno oko\n a = np.append(shape[22:27], np.flip(shape[42:46], axis=0), axis=0)\n cv.fillPoly(mona, [a], (255, 0, 255))\n cv.polylines(mona, [a], True, (0, 255, 0), 2)\n\n a = shape[42:48]\n cv.fillPoly(mona, [a], (255, 0, 255))\n cv.polylines(mona, [a], True, (0, 255, 255), 2)\n\n for index, (x, y) in enumerate(shape):\n if (0 <= index < 27) or (46 <= index < 48):\n\n circle = cv.circle(mona, (x, y), 6, (128, 128, 128), thickness=-1)\n elif (40 <= index < 42) or (42 <= index < 46) or (31 <= index < 36):\n pt1 = (x, y - 8)\n pt2 = (x - 7, y + 7)\n pt3 = (x + 7, y + 7)\n trianglePoints = np.array([pt1, pt2, pt3])\n cv.drawContours(mona, [trianglePoints], 0, (128, 128, 128), -1)\n else:\n cv.rectangle(mona, (x + 5, y + 5), (x - 5, y - 5), (128, 128, 128), -1)\n\ncv.imshow(\"output\", mona)\ncv.imwrite(\"output.jpg\", mona)\n\ncv.waitKey(0)\ncv.destroyAllWindows()\n","repo_name":"natalijapavlovic17321/Racunarski-vid","sub_path":"17321_Natalija_Pavlovic_3/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15663960230","text":"import socket\nimport threading\n\nclients = []\n\n\nclass Client(threading.Thread):\n def __init__(self, c, i, name):\n threading.Thread.__init__(self)\n self.c = c\n self.i = i\n self.name = name\n\n def GetMsg(self):\n while True:\n data = self.c.recv(1024).decode(\"utf-8\")\n if not data:\n break\n else:\n data = \"%s: %s\" % (self.name, data)\n print(data)\n counter = 0\n for client in clients:\n if counter != self.i:\n client.c.send(data.encode(\"utf-8\"))\n counter += 1\n\n def run(self):\n self.GetMsg()\n\n\nclass GetClient(threading.Thread):\n def __init__(self, s):\n threading.Thread.__init__(self)\n self.s = s\n\n def run(self):\n while True:\n self.s.listen(10) # waits for connection from a client\n c, addr = self.s.accept() # gets clients ip address\n cname = c.recv(1024).decode(\"utf-8\")\n print(\"Connection from %s %s\" % (cname, addr[0]))\n i = len(clients)\n clients.append(Client(c, i, cname))\n clients[-1].start()\n\n\nclass SendMsg(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n\n def run(self):\n while True:\n data = input()\n if data == \"q\":\n break\n else:\n data = \"%s: %s\" % (self.name, data)\n for c in clients:\n c.c.send(data.encode(\"utf-8\"))\n\n\ndef main():\n host = \"0.0.0.0\"\n port = 8565\n name = \"Server\"\n\n s = socket.socket()\n s.bind((host, port))\n\n print(\"Waiting for connections on %s:%s\" % (host, str(port)))\n\n get_clients = GetClient(s)\n send_msg = SendMsg(name)\n\n get_clients.start()\n send_msg.start()\n\n get_clients.join()\n send_msg.join()\n for c in clients:\n c.c.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mokytis/ChatServer","sub_path":"serverMuliConnections.py","file_name":"serverMuliConnections.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33050839663","text":"\ndef average(a):\n \"\"\"Return arithmetic mean of list of floating point numbers\"\"\"\n return sum(a)/len(a)\n\ndef main():\n numbers = []\n while True:\n s = input(\"Give me a number (enter to stop)?\")\n if not s: break\n try:\n n = float(s)\n numbers.append(n)\n except ValueError:\n print(\"Invalid number. Please give me valid floating point numbers.\")\n\n print(f\"Number of numbers: {len(numbers)}\")\n print(f\"Average: {average(numbers)}\")\n print(\"Numbers sorted:\")\n for i in sorted(numbers):\n print(i)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"superj4mes/web_dev_school_examples","sub_path":"lesson_python_basics/numbers_statistics2.py","file_name":"numbers_statistics2.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14386285418","text":"import os\nimport sys\nimport cv2\nimport math\nimport numpy as np\n\nt_ratio = .76\n\ndef _pbox2tbox(p_bbox, wr=0.25, hr=0.25):\n p_x1, p_y1, p_x2, p_y2 = p_bbox\n w = p_x2 - p_x1 + 1\n h = p_y2 - p_y1 + 1\n\n w2 = int(w * wr)\n t_x1 = p_x1 + w2\n t_x2 = p_x2 - w2\n\n h2 = int(h * hr)\n t_y1 = p_y1 + h2\n t_y2 = p_y2 - h2\n\n return t_x1, t_y1, t_x2, t_y2\n\ndef _pt_bboxes2res(pt_bboxes, w, h):\n pt_res = []\n for pt_bbox in pt_bboxes:\n j, p_x1, p_x2, p_y1, p_y2, t_x1, t_y1, t_x2, t_y2 = pt_bbox\n p_bbox = [p_x1, p_y1, p_x2, p_y2]\n t_bbox = [t_x1, t_y1, t_x2, t_y2]\n p_score, t_score = 0, 0\n pt_res.append(tuple([h, w, p_bbox, p_score, t_bbox, t_score]))\n return pt_res\n\ndef _tbox2pbox(t_bbox, w, h, t_ratio=0.76):\n # Torso\n x1 = t_bbox[0]\n y1 = t_bbox[1]\n x2 = t_bbox[2]\n y2 = t_bbox[3]\n # Person\n diff_x = x1 - x2\n diff_y = y1 - y2\n t_dit = diff_x * diff_x + diff_y * diff_y\n t_dit = math.sqrt(t_dit)\n t_dit = int(t_dit * t_ratio)\n px1 = x1 - t_dit\n py1 = y1 - t_dit\n px2 = x2 + t_dit\n py2 = y2 + t_dit\n px1 = max(1, px1)\n py1 = max(1, py1)\n px2 = min(w - 2, px2)\n py2 = min(h - 2, py2)\n p_bbox = [px1, py1, px2, py2]\n return p_bbox\n\ndef _tbox2pbox2(t_bbox, w, h, xr=0.72, yr=0.6):\n # Torso\n x1 = t_bbox[0]\n y1 = t_bbox[1]\n x2 = t_bbox[2]\n y2 = t_bbox[3]\n # Person\n diff_x = x1 - x2\n diff_y = y1 - y2\n t_dit = diff_x * diff_x + diff_y * diff_y\n t_dit = math.sqrt(t_dit)\n t_x = int(t_dit * xr)\n t_y = int(t_dit * yr)\n px1 = x1 - t_x\n py1 = y1 - t_y\n px2 = x2 + t_x\n py2 = y2 + t_y\n px1 = max(1, px1)\n py1 = max(1, py1)\n px2 = min(w - 2, px2)\n py2 = min(h - 2, py2)\n p_bbox = [px1, py1, px2, py2]\n return p_bbox\n\ndef _tbox2pbox3(p_bbox, t_bbox, w, h, xr=0.72, yr=0.68):\n # Torso\n p_x1, p_y1, p_x2, p_y2 = p_bbox\n t_x1, t_y1, t_x2, t_y2 = t_bbox\n # Person\n diff_x = t_x2 - t_x1\n diff_y = t_y2 - t_y1\n t_dit = diff_x * diff_x + diff_y * diff_y\n t_dit = math.sqrt(t_dit)\n t_x = int(t_dit * xr)\n t_y = int(t_dit * yr)\n px1 = t_x1 - t_x\n py1 = t_y1 - t_y\n px2 = t_x2 + t_x\n py2 = t_y2 + t_y\n px1 = max(1, px1)\n py1 = max(1, py1)\n px2 = min(w - 2, px2)\n py2 = min(h - 2, py2)\n\n p_bbox2 = [p_x1, py1, p_x2, py2]\n return p_bbox2\n\ndef tbox2pbox4pose(in_file, out_file, n_obj=9):\n\tfh1 = open(in_file)\n\tfh2 = open(out_file, \"w\")\n\n\tfor line in fh1.readlines():\n\t\tline = line.strip()\n\t\tinfo = line.split()\n\t\tinfo = [i.strip() for i in info]\n\t\tassert len(info) >= 2\n\t\tim_path, info = info[0], info[1:]\n\t\tim_path = im_path.strip()\n\t\tn_info = len(info)\n\t\tassert n_info >= n_obj\n\t\tassert n_info % n_obj == 0\n\n\n\t\tim = cv2.imread(im_path)\n\t\th, w, _ = im.shape\n\n\t\tres = im_path\n\t\tfor j in xrange(n_info / n_obj):\n\t\t\tj2 = j * n_obj\n\t\t\tobjidx, pbox, tbox = info[j2], info[j2+1: j2+5], info[j2+5: j2+n_obj]\n\t\t\tpbox = [int(float(c)) for c in pbox]\t\t\t\n\t\t\ttbox = [int(float(c)) for c in tbox]\t\t\t\n\t\t\tpbox2 = _tbox2pbox(tbox, w, h)\n\n\t\t\ttbox = [str(c) for c in tbox]\n\t\t\tpbox2 = [str(c) for c in pbox2]\n\t\t\tres = res + \" \" + objidx.strip() + \" \" + \\\n\t\t\t\t\t\t\t\" \".join(pbox2).strip() + \" \" + \\\n\t\t\t\t\t\t\t\" \".join(tbox).strip()\n\t\tfh2.write(res.strip() + \"\\n\")\n\tfh1.close()\n\tfh2.close()\n\nif __name__ == '__main__':\n\tin_file = \"/home/ddk/download/pose.test.nature.scene/pt_props.txt\"\n\tout_file = \"/home/ddk/download/pose.test.nature.scene/pt_props_m.txt\"\n\ttbox2pbox4pose(in_file, out_file)\n\n","repo_name":"zimenglan-sysu-512/pt-faster-rcnn","sub_path":"lib/utils/tbox2pbox4pose.py","file_name":"tbox2pbox4pose.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"37523580674","text":"def checkList(values):\n return len(values) > 0 and isinstance(values[0], list) and \\\n len(values[0]) > 0 and isinstance(values[0][0], float)\n\n\ndef checkShape(shape):\n return (shape[0] == 1 and shape[1] > 0) or (shape[0] > 0 and shape[1] == 1)\n\n\nclass Vector:\n def __init__(self, values):\n if isinstance(values, list):\n if checkList(values):\n self.values = values\n else:\n raise ValueError\n elif isinstance(values, int):\n if values < 0:\n raise ValueError\n self.values = []\n for i in range(values):\n self.values.append([float(i),])\n elif isinstance(values, tuple):\n if len(values) != 2 or \\\n not isinstance(values[0], int) or \\\n not isinstance(values[1], int) or \\\n values[0] >= values[1]:\n raise ValueError\n self.values = []\n for i in range(values[0], values[1]):\n self.values.append([float(i),])\n self.shape = (len(self.values), len(self.values[0]))\n if not checkShape(self.shape):\n raise ValueError\n\n def dot(self, other):\n if isinstance(other, Vector) and self.shape == other.shape:\n result = 0\n for i in range(len(self.values)):\n for j in range(len(self.values[i])):\n result += self.values[i][j] * other.values[i][j]\n return result\n else:\n raise TypeError(\"wrong types\")\n\n def T(self):\n newList = []\n if self.shape[0] == 1:\n for i in range(len(self.values[0])):\n newList.append([self.values[0][i],])\n else:\n innerList = []\n for line in self.values:\n innerList.append(line[0])\n newList.append(innerList)\n return Vector(newList)\n\n # add & radd : only vectors of same shape.\n def __add__(self, other):\n if isinstance(other, Vector) and self.shape == other.shape:\n newList = []\n for i in range(len(self.values)):\n innerList = []\n for j in range(len(self.values[i])):\n innerList.append(self.values[i][j] + other.values[i][j])\n newList.append(innerList)\n return Vector(newList)\n else:\n raise TypeError(\"wrong types\")\n\n def __radd__(self, other):\n return self.__add__(other)\n\n # sub & rsub : only vectors of same shape.\n def __sub__(self, other):\n if isinstance(other, Vector) and self.shape == other.shape:\n newList = []\n for i in range(len(self.values)):\n innerList = []\n for j in range(len(self.values[i])):\n innerList.append(self.values[i][j] - other.values[i][j])\n newList.append(innerList)\n return Vector(newList)\n else:\n raise TypeError(\"wrong types\")\n\n def __rsub__(self, other):\n return self.__sub__(other)\n\n # truediv : only with scalars (to perform division of Vector by a scalar).\n def __truediv__(self, num):\n if not isinstance(num, (int, float)):\n raise TypeError(\"wrong types\")\n if num == 0:\n raise ZeroDivisionError\n newList = []\n for line in self.values:\n innerList = []\n for i in range(len(line)):\n innerList.append(line[i] / num)\n newList.append(innerList)\n return Vector(newList)\n\n def __rtruediv__(self, num):\n raise NotImplementedError(\n \"Division of a scalar by a Vector is not defined here.\")\n\n # mul & rmul: only scalars (to perform multiplication of Vector by a scalar).\n\n def __mul__(self, num):\n if not isinstance(num, (int, float)):\n raise TypeError(\"wrong types\")\n newList = []\n for line in self.values:\n innerList = []\n for i in range(len(line)):\n innerList.append(line[i] * num)\n newList.append(innerList)\n return Vector(newList)\n\n def __rmul__(self, num):\n return self.__mul__(num)\n\n def __repr__(self):\n return self.values.__str__()\n\n def __str__(self):\n return self.values.__str__()\n","repo_name":"bbritva/Python_modules","sub_path":"day_01/ex02/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16911305034","text":"\"\"\"Python Program to Sum All the Items in a Dictionary\"\"\"\ndic1 = {'A': 25, 'B': 41, 'C': 32}\nprint(sum(dic1.values()))\n\n\"\"\"Python Program to Multiply All the Items in a Dictionary\"\"\"\ndic1 = {'A': 25, 'B': 41, 'C': 32}\ntotal = 1\n\nfor i in dic1:\n total = total * dic1[i]\nprint(total)\n\n\"\"\"Python Program to Multiply All the Items in a Dictionary\"\"\"\ndic1 = {'A': 25, 'B': 41, 'C': 32}\ndic2 = {'A': 21, 'B': 12, 'C': 62}\n\ndic3 = {key:value * dic2[key] for key,value in dic1.items() if key in dic2}\nprint(dic3)\n\n\"\"\"sum value of two different dictionaries which is having same key\"\"\"\nfrom collections import Counter\ndic1 = {'A': 25, 'B': 41, 'C': 32}\ndic2 = {'A': 21, 'B': 12, 'C': 62}\n\nA = Counter({'A': 25, 'B': 41, 'C': 32})\nB = Counter({'A': 21, 'B': 12, 'C': 62})\n\nSumKey = A + B\nprint(SumKey)\n\n\n","repo_name":"nekapoor7/Python-and-Django","sub_path":"Python/Dictionary/DictSum.py","file_name":"DictSum.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16661729650","text":"#How to create user define Exception\r\n\r\n\r\n\r\n#syntax\r\n'''\r\nclass ExceptionClassName(PredefinedExceptionName):\r\ndef __init__(self, arg):\r\nself.msg = arg\r\n\r\n\r\n\r\n'''\r\n#every Exception class in python is defined from upper level class Exception\r\nclass TooYoungException(Exception):\r\n def __init__(self, arg):\r\n self.msg = arg\r\n'''\r\n#How to raise user defined Exception explicitly, using keyword 'raise'\r\nraise TooYoungException(\"Too Young to get marry\")\r\n'''\r\n\r\n\r\n\r\nclass TooOldException(Exception):\r\n def __init__(self, arg):\r\n self.msg = arg\r\n\r\n\r\n\r\n#main module\r\nif __name__ == \"__main__\":\r\n age = int(input(\"Enter your age: \"))\r\n if age < 18:\r\n raise TooYoungException(\"You are too young to get marry, wait some time to get best match\")\r\n elif age > 60:\r\n raise TooOldException(\"You already crossed marriage age\")\r\n else:\r\n print(\"Best match is being searched, you will get notified through email\")\r\n\r\n","repo_name":"nandhakumarpk/Python3.9","sub_path":"Abstract Classes/User-Defined Error/real ude.py","file_name":"real ude.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29720794513","text":"from flask import Flask\nfrom flask_socketio import SocketIO\nimport base64\nimport os\nfrom io import BytesIO\nfrom PIL import Image\nimport subprocess\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '12345'\nsocketio = SocketIO(app, cors_allowed_origins=\"*\", async_mode=\"eventlet\", always_connect=True)\nport = int(os.environ.get(\"PORT\", 5000))\n\npathDetect=os.path.join(os.path.dirname(__file__), \"yolov5/detect.py\")\npathBest=os.path.join(os.path.dirname(__file__), \"yolov5/best.pt\")\npathTest=os.path.join(os.path.dirname(__file__), \"yolov5/test\")\n\n@app.route(\"/\")\ndef index():\n return \"Hola desde el server de FungiDeTECtor!\"\n\n@socketio.on('setImages')\ndef setImages(res):\n cleanImages('yolov5/test/')\n for data in res:\n data = data.split(',')[1]\n im = Image.open(BytesIO(base64.b64decode(data)))\n ruta = 'yolov5/test/test' + str(getLastNum('yolov5/test/')) + '.png'\n im.save(ruta, 'PNG')\n getImages()\n\ndef recognition():\n process = subprocess.Popen(\"python \"+pathDetect +\" --weights \"+pathBest+ \" --source \"+pathTest, shell=True)\n process.wait()\n\ndef getImages():\n recognition()\n os.chmod(\"yolov5/runs/detect/exp\"+getLasFolder('yolov5/runs/detect')+'/', 755)\n pathResults = os.path.join(os.path.dirname(__file__), \"yolov5/runs/detect/exp\"+getLasFolder('yolov5/runs/detect')+'/')\n resultsList=[]\n for filename in os.listdir(pathResults):\n with open(pathResults+filename, \"rb\") as f:\n im_b64 = base64.b64encode(f.read())\n data=im_b64.decode('utf-8')\n jsoData = {\n \"img\": data,\n }\n resultsList.append(jsoData)\n socketio.emit('getImages',resultsList, broadcast=True)\n\ndef getLastNum(path):\n num=0\n for filename in os.listdir(path):\n with open(path+filename, \"rb\") as f:\n num+=1\n if num==1:\n return ''\n return str(num+1)\n\ndef getLasFolder(path):\n num=0\n with os.scandir(path) as ficheros:\n for fichero in ficheros:\n if fichero.is_dir():\n num += 1\n if num==1:\n return ''\n return str(num)\n\ndef cleanImages(path):\n for filename in os.listdir(path):\n os.remove(path + filename)\n\nif __name__ == \"__main__\":\n #socketio.run()\n socketio.run(app, host='0.0.0.0', debug=True, port=port)","repo_name":"warnercp15/fungidetectorService","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15729348752","text":"import os\nimport openai\nimport pickle\nimport replicate\nimport cv2\nimport numpy as np\nfrom fastapi import FastAPI, File, UploadFile, Form\nopenai.api_key = os.environ[\"OPENAI_API_KEY\"] # set the OpenAI API key\n\n\napp = FastAPI()\n\n\n@app.get(\"/\")\nasync def root_endpoint():\n return {\"message\": \"Server is running\"}\n\n\n@app.post(\"/answer_question/\")\nasync def answer_question_endpoint(\n question: str = Form(...),\n user_id: str = Form(...),\n image: UploadFile = File(...),\n):\n image_path = f\"storage/{user_id}.jpg\" # path to the image\n # read the image data\n image_data = np.frombuffer(await image.read(), np.uint8)\n # decode the image data\n image_np = cv2.imdecode(image_data, cv2.IMREAD_COLOR)\n # save the image to the storage folder\n cv2.imwrite(image_path, image_np)\n # answer the question\n answer_str = answer_question(question, image_path, user_id)\n return {\"answer\": answer_str}\n\n\ndef get_chat_log_path(user_id):\n return f\"chat_logs/{user_id}.pkl\"\n\n\ndef image_related(chat_log_inline):\n \"\"\"\n Determines if the question in chat log is related to the image\n Args:\n chat_log_inline (str): The chat-log in a single string.\n Returns:\n bool: True if the question is related to the image, False otherwise.\n \"\"\"\n result = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n max_tokens=20,\n messages=[\n {\"role\": \"system\", \"content\": \"You are an AI assistant\"},\n {\"role\": \"user\", \"content\": f\"An AI assistant is designed to assist blind people by answering questions related to photos taken by the user. It utilizes the BLIP question-answering model to answer some of the questions. The following dialogue was provided:\\n\\n{chat_log_inline}\\nDetermine whether the AI assistant requires visual context or information about the user's surroundings to reply to the user's last message. If the AI assistant requires any of these, reply with 'YES'. If none of these are required, reply with 'NO'. Please note that you can only write 'YES' or 'NO'. Begin.\"},\n ],\n temperature=0.3,\n )[\"choices\"][0][\"message\"][\"content\"]\n\n return True if result.startswith(\"YES\") else False\n\n\ndef correct_answer(question, blip_answer, chat_log_inline):\n \"\"\"\n Corrects and returns the answer provided by BLIP\n Args:\n question (str): The question to be answered.\n blip_answer (str): The answer provided by the BLIP model.\n chat_log_inline (str): The chat-log in a single string.\n Returns:\n answer (str): The answer to the question.\n \"\"\"\n QnA = f\"User: {question} BLIP: {blip_answer}\"\n answer = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n max_tokens=64,\n messages=[\n {\"role\": \"system\", \"content\": \"You are an AI assistant\"},\n {\"role\": \"user\", \"content\": f\"We want to create an AI assistant to assist people with visual impairments by answering questions related to photos taken by the user. To accomplish this, we use the BLIP question-answering model. However, the model's responses are sometimes very brief, strange, and unpredictable. Your task is to write a proper reply to a user's prompt (User) based on BLIP's reply (BLIP) and nothing more. If you cannot provide a proper reply for any reason, just say 'I'm sorry. I cannot answer.' and nothing else. Do not mention the BLIP in your response. See the whole dialogue for reference:\\n\\n{chat_log_inline}\\nBegin.\\n\\n\" + QnA},\n ],\n temperature=0.5,\n )[\"choices\"][0][\"message\"][\"content\"]\n\n return answer\n\n\ndef answer_question(question, image_path, user_id):\n \"\"\"\n Answers the question using the BLIP model if the\n question is related to the image, otherwise uses GPT-3\n Args:\n question (str): The question to be answered.\n image_path (str): The path to the image.\n user_id (str): The user's ID.\n Returns:\n answer (str): The answer to the question.\n \"\"\"\n chat_log_path = get_chat_log_path(user_id)\n try: # load previous chat-log from pickle file if it exists\n with open(chat_log_path, \"rb\") as f:\n chat_log = pickle.load(f)\n except: # otherwise, create a new chat-log\n chat_log = []\n\n # append the question to the chat-log\n chat_log_inline = \" \".join([f\"User: {U}\\nAssistant: {A}\\n\" for U, A in chat_log])\n chat_log_inline += f\"User: {question}\\n\"\n\n if (image_related(question)): # use the BLIP model to answer the question\n model = replicate.models.get(\"salesforce/blip-2\")\n version = model.versions.get(\"4b32258c42e9efd4288bb9910bc532a69727f9acd26aa08e175713a0a857a608\")\n inputs = {\n 'image': open(image_path, \"rb\"),\n 'caption': False,\n 'question': question,\n 'use_nucleus_sampling': False,\n 'temperature': 0.7,\n }\n blip_answer = version.predict(**inputs)\n answer = correct_answer(question, blip_answer, chat_log_inline)\n\n else:\n answer = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n max_tokens=64,\n messages=[\n {\"role\": \"system\", \"content\": \"You are Botagoz - an AI assistant who helps people with visual impairments.\"},\n {\"role\": \"user\", \"content\": f\"You are Botagoz, an AI assistant designed to assist people with visual impairments. At times, you may require visual context, access to the user's camera, or information about the surroundings to answer some of the questions. Given the following dialogue with the user,\\n\\n{chat_log_inline}\\nplease write reply for the last message. Write only the reply without quotation marks and nothing else. Begin.\"},\n ],\n )[\"choices\"][0][\"message\"][\"content\"]\n \n # remove quotation marks from the beginning and end of the answer\n if answer.startswith('\"') and answer.endswith('\"'):\n answer = answer[1:-1]\n\n # append the answer to the chat-log\n chat_log.append((question, answer))\n\n # if the chat-log is too long, remove the oldest question\n if len(chat_log) > 10:\n chat_log.pop(0)\n\n # save the chat-log to a pickle file\n with open(chat_log_path, \"wb\") as f:\n pickle.dump(chat_log, f)\n\n return answer","repo_name":"armanbolatov/botagoz_app_backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6241,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1172809111","text":"import pandas as pd\nimport sys\nfrom ete3 import Tree, TreeStyle, NodeStyle\n\nfile=sys.argv[1]\n\n\n\ndef readTreeFromFile(file):\n try:\n f=open(file, 'r')\n except IOError:\n print (\"Unknown file: \"+file)\n sys.exit()\n\n line = \"\"\n for l in f:\n line += l.strip()\n\n f.close()\n t = Tree( line )\n return t\n\nt = readTreeFromFile(file)\n\nblAfter = list()\n\nfor node in t.traverse(\"postorder\"):\n blAfter.append(node.dist)\n\nblAfterDF = pd.DataFrame (blAfter, columns=[\"bls\"])\n#print(blBeforeDF.describe())\nprint (\"\\t\\t File \" + file + \" : \")\nprint(blAfterDF.describe())\nprint (\"##########################\")\nprint (\"##########################\\n\")\n","repo_name":"Boussau/DatingWithConsAndCal","sub_path":"Scripts/getBranchLengthStats.py","file_name":"getBranchLengthStats.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39367462757","text":"def levenshteinDistance(str1, str2):\n \"\"\"\n TC: O(nm) | SC: O(nm)\n \"\"\"\n edits = [[x for x in range(len(str2) + 1)] for y in range(len(str1) + 1)]\n for y in range(len(str1) + 1):\n edits[y][0] = y\n \n for i in range(1, len(str1) + 1):\n for j in range(1, len(str2) + 1):\n if str1[i - 1] == str2[j - 1]:\n edits[i][j] = edits[i - 1][j - 1]\n else:\n edits[i][j] = 1 + min(edits[i][j - 1], edits[i - 1][j - 1], edits[i - 1][j])\n \n return edits[-1][-1]\n\n","repo_name":"nikhiilll/Algorithms-using-Python","sub_path":"Medium-AE/LevenshteinDistance.py","file_name":"LevenshteinDistance.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74337218721","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 16 23:14:40 2022\n\n@author: ilirsheraj\n\"\"\"\ndef banner_text(text, screen_width = 80):\n if len(text) > screen_width - 4:\n raise ValueError(\"String '{}' is larger than the specified width {}\"\n .format(text, screen_width))\n \n if text == \"*\":\n print(\"*\"*screen_width)\n \n else:\n output_string = \"**{}**\".format(text.center(screen_width - 4))\n print(output_string)\n \n\n\nbanner_text(\"*\")\nbanner_text(\"Always look on the light side of life...\")\nbanner_text(\"If life seems jolly rotten\")\nbanner_text(\"There's something you've forgotten\")\nbanner_text(\"And that's to laugh and smile and dance and sing\")\nbanner_text(\" \")\nbanner_text(\"When you're feeling in the dumps\")\nbanner_text(\"Don't be silly chumps\")\nbanner_text(\"Just purse your lips and whistle - that's the thing\")\nbanner_text(\"And... Always look on the bright side of life\")\nbanner_text(\"*\")","repo_name":"ilirsheraj/PythonMasterClass","sub_path":"functions/banner.py","file_name":"banner.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20940814867","text":"# 1. Write a program `download_script.py`, which downloads a set of files from the internet.\n# The files to download are given as arguments to your program on the command-line as illustrated in the following:\n\n# ```bash\n# $ python download_script.py http://www.gutenberg.org/files/2701/2701-0.txt http://www.gutenberg.org/cache/epub/27525/pg27525.txt\n# Downloading file to ./2701-0.txt\n# Downloading file to ./pg27525.txt\n# ```\n\n# Reuse your `webget` module from exercises in notebook: 02-0c Modules.\n\n# 2. Modify the above program, so that it can download a list of files from stdin.\n# That is, so that you can reuse the output of one CLI command as input to your program.\n\n# ```bash\n# $ cat list_of_files.txt | python download_script.py\n# ```\n\nimport webget\nimport sys\nfrom urllib.parse import urlparse\n\n\nif __name__ == '__main__':\n if (sys.stdin):\n for link in sys.stdin.read().split('\\n'):\n webget.download(link)\n elif(sys.argv[1]):\n webget.download(url1)\n if(sys.argv[2]):\n webget.download(url2)\n","repo_name":"MalteMagnussen/PythonProjects","sub_path":"week2/download_script.py","file_name":"download_script.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31336879323","text":"# --------------\n# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2022/3/5 11:53\n# @project_name : ybjrun\n# @author :\tpujen_yuan\n# ------------\n\nfrom time import perf_counter\n\nfrom loguru import logger\nfrom starlette.middleware.base import BaseHTTPMiddleware\nfrom starlette.requests import Request\nfrom apps.config.config import auth as auth_conf\nfrom apps.utils.json_response import ForbiddenException, InvalidTokenException, ExpiredTokenException, JSONResponse\nfrom apps.extensions.jwt.simple_auth import SimpleAuth as Auth\nfrom fastapi import HTTPException\nfrom starlette.status import HTTP_400_BAD_REQUEST\n\n\nclass AuthMiddleware(BaseHTTPMiddleware):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # 认证的形式 1:使用默认的,2 使用自定义的\n self.auth_type = 2\n\n def check_auth_token(self, request):\n '''\n 第一步:检测URL地址和携带认证请求头字段信息\n :param request:\n :return:\n '''\n # 如果是使用系统自带的认证的虎,它的需要的认证请求头是必须是Authorization的这个的,当然也可以自定义,不过还不知道咋玩\n while_auth_ulr = auth_conf.ADMIN_WHILE_ROUTE\n\n # 只有不在白名单的地址需要进行认证的授权的校验\n if request.url.path not in while_auth_ulr and 'sys/randomImag' not in request.url.path and 'docs' not in request.url.path:\n if self.auth_type == 1:\n token = request.headers.get('Authorization', None)\n if not token:\n return ForbiddenException()\n else:\n # 从头部提取关键的授权码信息\n token = request.headers.get('X-Access-Token', None)\n if not token:\n # 从get里面进行提取\n return ForbiddenException()\n # 下面这种方式,会到全局异常捕获那进行处理\n # raise HTTPException(HTTP_400_BAD_REQUEST, 'Invalid token')\n\n return token\n\n def authenticate_credentials(self, token):\n '''\n 第2步:检测URL地址和携带认证请求头字段信息\n :param token:\n :return:\n '''\n isok, state, token_userinfo_result = Auth.verify_bearer_token_state(token=token)\n if not isok and state == 1:\n return InvalidTokenException()\n if not isok and state == 2:\n return ExpiredTokenException()\n\n return token_userinfo_result\n\n async def authenticate_credentials_user_info(self, token_userinfo_result):\n '''\n 进行TOken内部的包含的用户信息的验证\n :param token:\n :return:\n '''\n isok, isstatus = False, 2\n\n if not isok:\n return ForbiddenException(msg='该用户已经不存在,请联系管理员!')\n # 用户状态(1-正常,2-冻结)\n # if isstatus.get('status') == 2:\n # return ForbiddenException(msg='该用户已经被冻结,请联系管理员!')\n\n async def dispatch(self, request: Request, call_next):\n # # ---协程对象的返回-使用方法封装后---返回值的处理需要使用这样方式进行---注意返回的时候处理\n # if isinstance(token_result, JSONResponse):\n #\n # return token_result\n # 1:检测是否协调认证信息,没有则返回错误提示,有则返回对应的Token的值\n\n\n # 如果是使用系统自带的认证的虎,它的需要的认证请求头是必须是Authorization的这个的,当然也可以自定义,不过还不知道咋玩\n while_auth_ulr = auth_conf.ADMIN_WHILE_ROUTE\n\n # print('while_auth_ulr',while_auth_ulr)\n\n # 只有不在白名单的地址需要进行认证的授权的校验\n # print(\"鉴权出来11111111111111111\")\n # print('aaaaaaaaawhile_auth_ulr', while_auth_ulr)\n\n if request.scope[\"method\"]!='OPTIONS' and request.url.path not in while_auth_ulr and 'sys/randomImag' not in request.url.path and 'docs' not in request.url.path:\n if self.auth_type == 1:\n token = request.headers.get('Authorization', None)\n if not token:\n return ForbiddenException()\n else:\n # 从头部提取关键的授权码信息\n # print(\"鉴权出来11111111111111111\")\n token = request.headers.get('X-Access-Token', None)\n # print(\"鉴权出来11111111111111111\",token)\n if not token:\n # 从get里面进行提取\n return ForbiddenException()\n isok, state, token_userinfo_result = Auth.verify_bearer_token_state(token=token)\n if not isok and state == 1:\n return InvalidTokenException()\n if not isok and state == 2:\n return ExpiredTokenException()\n\n # 写入当前请求上下的当前对象\n request.state.token_userinfo_result = token_userinfo_result\n\n\n response = await call_next(request)\n\n return response\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"abao0713/ybjrun","sub_path":"apps/middleware/global_auth.py","file_name":"global_auth.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22569354317","text":"import logging\nimport threading\nimport datetime\n\nfrom splinter import Browser\nfrom selenium.webdriver.chrome.service import Service\nimport time\n\nimport logging\n\n# variable\n# Acount info\nUsername = \"\"\nPassword = \"\" \n# Website driver address\ndriverAddress = 'C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe'\n# url\nLOGIN_PAGE = 'https://stuinfosys.ntust.edu.tw/NTUSTSSOServ/SSO/Login/CourseSelection'\nINDEX_PAGE = 'https://courseselection.ntust.edu.tw/'\nMAIN_PAGE = 'https://courseselection.ntust.edu.tw/First/A06/A06'\nTIMEOUT = 30\n# Lesson Code\nlessonCode = [\n '',\n '',\n]\nlistLength = len(lessonCode)\nlistIndex = 0\n# using chrome drive\n#executable_path = {'executable_path':driverAddress}\n#browser = Browser('chrome', **executable_path)\nmy_service = Service(executable_path=driverAddress)\nbrowser = Browser('chrome')\n\ndef init():\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M',\n handlers=[logging.FileHandler('./log/output.log', 'w', 'utf-8'), ]\n )\n global browser\n browser.visit(INDEX_PAGE)\ndef login():\n global browser\n browser.fill('UserName',Username)\n time.sleep(1)\n browser.fill('Password',Password)\n time.sleep(1)\n browser.find_by_name('btnLogIn').click()\n time.sleep(1) \ndef enterMainPage():\n global browser\n browser.visit(MAIN_PAGE)\ndef takeLesson():\n global browser, listIndex\n browser.fill('CourseText',lessonCode[listIndex])\n browser.find_by_id('SingleAdd').click()\n now = time.time()\n alert = browser.get_alert()\n \n while alert == None:\n currentPage = browser.url\n if currentPage == LOGIN_PAGE:\n login()\n break\n if currentPage == INDEX_PAGE:\n enterMainPage()\n break\n if time.time()-now>TIMEOUT:\n browser.reload()\n break\n alert = browser.get_alert()\n \n if alert!=None:\n alert_text = alert.text\n alert.accept()\n else:\n alert_text = \"錯誤\"\n\n str = '\\\" %s \\\" Reply: %s' %(lessonCode[listIndex],alert_text.encode(\"utf-8\"))\n print(str.encode(\"utf-8\"))\n logging.info(str)\n\n listIndex += 1\n if listIndex == listLength:\n listIndex = 0\n \ninit()\nwhile True:\n try:\n currentPage = browser.url\n except:\n print(\"browser error\".encode(\"utf-8\"))\n continue\n\n if currentPage == LOGIN_PAGE:\n try:\n login()\n except:\n print(\"登入錯誤\".encode(\"utf-8\")) \n elif currentPage == INDEX_PAGE:\n try:\n enterMainPage()\n except:\n print(\"跳轉錯誤\".encode(\"utf-8\"))\n elif currentPage == MAIN_PAGE:\n try:\n takeLesson()\n except:\n print(\"加選錯誤\".encode(\"utf-8\"))\n else:\n browser.visit(INDEX_PAGE)\nprint(\"End\")\n","repo_name":"ken5170696/Lesson_Machine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6423842508","text":"import cv2\nimport math\nimport numpy as np\n\n# --------------------------------------------------- #\n# 画像合成 #\n# --------------------------------------------------- #\n\n\ndef ImageComposition(img2, result):\n img3 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # グレースケール化\n img3 = cv2.cvtColor(img3, cv2.COLOR_GRAY2BGR) # グレースケールのままカラー画像にする\n # コントラスト、明るさを変更する。\n img3 = adjust(img3, alpha=0.25)\n add = cv2.add(img3, result) # 画像を合成する\n return add\n\n# α はゲイン (gain) 、βはバイアス (bias)\n\n\ndef adjust(img, alpha=1.0, beta=0.0):\n # 積和演算を行う。\n dst = alpha * img + beta\n # [0, 255] でクリップし、uint8 型にする。\n return np.clip(dst, 0, 255).astype(np.uint8)\n\n\ndef FitImageSize_small(img1, img2):\n # height\n if img1.shape[0] > img2.shape[0]:\n height = img2.shape[0]\n width = img1.shape[1]\n img1 = cv2.resize(img1, (width, height))\n else:\n height = img1.shape[0]\n width = img2.shape[1]\n img2 = cv2.resize(img2, (width, height))\n\n # width\n if img1.shape[1] > img2.shape[1]:\n height = img1.shape[0]\n width = img2.shape[1]\n img1 = cv2.resize(img1, (width, height))\n else:\n height = img2.shape[0]\n width = img1.shape[1]\n img2 = cv2.resize(img2, (width, height))\n return img1, img2\n\n\ntry:\n img = cv2.imread('./img/2009.png')\n\n if img is None:\n print('ファイルを読み込めません')\n import sys\n sys.exit()\n\n cv2.imshow(\"img\", img)\n\n # 余白を取り除いたときに2つの画像が最も一致するような適切な余白(padding)の幅を見つける\n img_src = img\n img_diffs = []\n paddings = []\n for padding in range(1, 50):\n # 画像の余白を削除\n img = img_src[:, padding:-padding]\n\n # 画像を左右で分割する\n height, width, channels = img.shape[:3]\n img1 = img[:, :width//2]\n img2 = img[:, width//2:]\n\n # 画像サイズを合わせる(小さい方に)\n img1, img2 = FitImageSize_small(img1, img2)\n\n # 2つの画像の差分を算出\n img_diff = cv2.absdiff(img2, img1)\n img_diff_sum = np.sum(img_diff)\n\n img_diffs.append((img_diff, img_diff_sum))\n paddings.append(padding)\n\n # 差分が最も少ないものを選ぶ\n img_diff, _ = min(img_diffs, key=lambda x: x[1])\n index = img_diffs.index(min(img_diffs, key=lambda x: x[1]))\n cv2.imshow(\"img_diff\", img_diff)\n\n padding = paddings[index]\n # 画像の余白を削除\n img = img_src[:, padding:-padding]\n # 画像を左右で分割する\n height, width, channels = img.shape[:3]\n img1 = img[:, :width//2]\n img2 = img[:, width//2:]\n cv2.imshow(\"img2\", img2)\n\n # 画像サイズを合わせる(小さい方に)\n img2, img_diff = FitImageSize_small(img2, img_diff)\n # 画像合成\n add = ImageComposition(img2, img_diff)\n cv2.imshow(\"add\", add)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\nexcept:\n import sys\n print(\"Error:\", sys.exc_info()[0])\n print(sys.exc_info()[1])\n import traceback\n print(traceback.format_tb(sys.exc_info()[2]))\n","repo_name":"itouri/play-ground","sub_path":"opencv/saizeriya/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2416810375","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nimport xgboost as xgb\nfrom importlib.machinery import SourceFileLoader\nfolder = '../input/'\nevaluation = SourceFileLoader(\"module.name\", folder + \"evaluation.py\").load_module()\nfrom types import MethodType\n\ndef predict_proba(name,clf,X):\n if name=='xgb':\n return clf.predict(xgb.DMatrix(X))\n else:\n return clf.predict_proba(X)[:,1]\n \nxgb.Booster.predict_proba = predict_proba\n \ndef add_noise(array, level=0.40, random_seed=34):\n np.random.seed(random_seed)\n return level * np.random.random(size=array.size) + (1 - level) * array\n \ndef check_tests(clf,train,features,name):\n print('Checking tests for: ' + name + '...')\n # run the agreement test\n check_agreement = pd.read_csv(folder + 'check_agreement.csv', index_col='id')\n agreement_probs = predict_proba(name,clf,check_agreement[features])\n ks = evaluation.compute_ks(\n agreement_probs[check_agreement['signal'].values == 0],\n agreement_probs[check_agreement['signal'].values == 1],\n check_agreement[check_agreement['signal'] == 0]['weight'].values,\n check_agreement[check_agreement['signal'] == 1]['weight'].values)\n print('KS metric', ks, ks < 0.09)\n \n # perform evaluation on the training set itself\n train_eval = train[train['min_ANNmuon'] > 0.4]\n train_probs = predict_proba(name,clf,train_eval[features])\n AUC = evaluation.roc_auc_truncated(train_eval['signal'], train_probs)\n print('AUC before noise: ', AUC)\n \n # test correlation with mass\n check_correlation = pd.read_csv(folder + 'check_correlation.csv', index_col='id')\n correlation_probs = predict_proba(name,clf,check_correlation[features])\n cvm = evaluation.compute_cvm(correlation_probs, check_correlation['mass'])\n print('CvM metric', cvm, cvm < 0.002)\n \n # Add noise and repeat the test\n agreement_probs = add_noise(predict_proba(name,clf,check_agreement[features]))\n\n ks = evaluation.compute_ks(\n agreement_probs[check_agreement['signal'].values == 0],\n agreement_probs[check_agreement['signal'].values == 1],\n check_agreement[check_agreement['signal'] == 0]['weight'].values,\n check_agreement[check_agreement['signal'] == 1]['weight'].values)\n print('KS metric', ks, ks < 0.09)\n \n correlation_probs = add_noise(predict_proba(name,clf,check_correlation[features]))\n cvm = evaluation.compute_cvm(correlation_probs, check_correlation['mass'])\n print('CvM metric', cvm, cvm < 0.002)\n \n train_eval = train[train['min_ANNmuon'] > 0.4]\n train_probs = add_noise(predict_proba(name,clf,train_eval[features]))\n AUC = evaluation.roc_auc_truncated(train_eval['signal'], train_probs)\n print('AUC after noise: ', AUC)\n\nprint(\"Load the training/test data using pandas\")\ntrain = pd.read_csv(folder + \"training.csv\")\ntest = pd.read_csv(folder + \"test.csv\")\n\nprint(\"Eliminate SPDhits, which makes the agreement check fail\")\nfeatures = list(train.columns[1:-5])\nnum_estimators=500\nprint(\"Train a Random Forest model\")\nrf = RandomForestClassifier(verbose=0,n_estimators=num_estimators, n_jobs=-1, criterion=\"entropy\", random_state=1)\nrf.fit(train[features], train[\"signal\"])\n\n#check_tests(rf,train,features,'rf')\nnum_trees=1000\nprint(\"Train a XGBoost model\")\nparams = {\"objective\": \"binary:logistic\",\n \"eta\": 0.1,\n \"max_depth\": 7,\n \"min_child_weight\": 10,\n \"silent\": 1,\n \"subsample\": 0.7,\n \"colsample_bytree\": 0.7,\n \"seed\": 1}\n\ngbm = xgb.train(params, xgb.DMatrix(train[features], train[\"signal\"]), num_trees)\n#check_tests(gbm,train,features,'xgb')\n\nprint(\"Make predictions on the test set\")\ntest_probs = (0.6*rf.predict_proba(test[features])[:,1] +\n 0.4*gbm.predict(xgb.DMatrix(test[features])))#/2\nsubmission = pd.DataFrame({\"id\": test[\"id\"], \"prediction\": test_probs})\nsubmission.to_csv(\"rf_xgboost_submission_750.csv\", index=False)","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/flavours-of-physics/Deep/rf-xgboost.py","file_name":"rf-xgboost.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"1622725004","text":"from django.urls import path, include\n\nfrom . import views\n\n\napp_name = 'apiv1'\nurlpatterns = [\n path('posts/', views.PostListCreateAPIView.as_view()),\n path('posts//', views.PostRetrieveUpdateDestroyAPIView.as_view()),\n path('posts/mini/', views.PostMiniListAPIView.as_view()),\n path('posts/like//', views.PostLikeRetrieveUpdateDestroyAPIView.as_view()),\n path('users/', views.UserListCreateAPIView.as_view()),\n path('users//', views.UserRetrieveUpdateDestroyAPIView.as_view()),\n path('categories/', views.CategoryListAPIView.as_view()),\n path('comments/', views.CommentListCreateAPIView.as_view()),\n path('comments//', views.CommentRetrieveUpdateDestroyAPIView.as_view()),\n path('likes/', views.LikeListCreateAPIView.as_view()),\n path('likes//', views.LikeDestroyAPIView.as_view()),\n path('connections/', views.ConnectionListCreateAPIView.as_view()),\n path('connections//', views.ConnectionDestroyAPIView.as_view()),\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.jwt')),\n]\n","repo_name":"taki-21/doboku-post","sub_path":"backend/apiv1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16787263755","text":"import logging\nimport sys\n\nfrom utils.env import env\n\ndefault_level = env('DEFAULT_LOGGING_LEVEL', default=logging.INFO)\ndefault_logging_format = env('DEFAULT_LOGGING_FORMAT',\n default='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\nclass LoggingService:\n def __init__(self, name: str, *, logging_format: str = default_logging_format, level: int = default_level):\n self.name = name\n self.logging_format = logging_format\n self.level = level\n\n self._logger = None\n\n @property\n def logger(self):\n if self._logger:\n return self._logger\n\n self._logger = self._build_logger()\n return self._logger\n\n def info(self, message):\n self.logger.info(message)\n\n def warning(self, message):\n self.logger.warning(message)\n\n def exception(self, message):\n self.logger.exception(message)\n\n def _build_logger(self):\n logger = logging.getLogger(self.name)\n logger.setLevel(self.level)\n\n sys_stdout_stream_handler = self._build_stream_handler(sys.stdout)\n logger.addHandler(sys_stdout_stream_handler)\n\n return logger\n\n def _build_stream_handler(self, stream):\n formatter = logging.Formatter(self.logging_format)\n\n stream_handler = logging.StreamHandler(stream)\n stream_handler.setLevel(self.level)\n stream_handler.setFormatter(formatter)\n\n return stream_handler\n","repo_name":"DEV3L/python-azure-eventhub","sub_path":"utils/logging_service.py","file_name":"logging_service.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16324920131","text":"import os\nfrom .datasets import DatasetSR\nfrom torch.utils.data import DataLoader\n\ndef get_loaders(args):\n # datasets\n dataset_train = DatasetSR(root=os.path.join(args.root, 'train'), scale=args.scale, training=True, crop_size=args.crop_size)\n dataset_val = DatasetSR(root=os.path.join(args.root, 'val'), scale=args.scale, training=False, max_size=args.max_size)\n\n # loaders\n loader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n loader_eval = DataLoader(dataset_val, batch_size=1, shuffle=False, num_workers=1)\n loaders = {'train': loader_train, 'eval': loader_eval}\n\n return loaders\n","repo_name":"kligvasser/xUnit","sub_path":"super-resolution/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"54"} +{"seq_id":"879007312","text":"# coding: utf-8\n\nfrom pathlib import Path\nimport streamlit as st\nimport matplotlib.pyplot as plt\n\nimport plot\nimport base64\n\n\n# Title\nst.title(\"Matplotlib Style Configurator\")\nst.markdown(\"\"\"[GitHub repository](https://github.com/dhaitz/matplotlib-style-configurator) -\n [Plotting code](https://matplotlib.org/gallery/style_sheets/style_sheets_reference.html) -\n [Matplotlib style gallery ](https://tonysyu.github.io/raw_content/matplotlib-style-gallery/gallery.html) -\n [mplcyberpunk](https://github.com/dhaitz/mplcyberpunk) -\n [MPL stylesheets](https://github.com/dhaitz/matplotlib-stylesheets)\"\"\")\n\n\n# Sidebar: basic config selectors\nbase_styles = ['default'] + [style for style in plt.style.available if not style.startswith('_')] + [\n \"cyberpunk\",\n \"https://raw.githubusercontent.com/dhaitz/matplotlib-stylesheets/master/pitayasmoothie-dark.mplstyle\",\n \"https://raw.githubusercontent.com/dhaitz/matplotlib-stylesheets/master/pacoty.mplstyle\",\n \"https://raw.githubusercontent.com/dhaitz/matplotlib-stylesheets/master/pitayasmoothie-light.mplstyle\",\n]\nstyle = st.sidebar.selectbox(\"Choose base style:\", base_styles)\nplt.style.use(style)\n\nn_columns = st.sidebar.selectbox(\"Number of columns\", [1, 2, 3, 6], index=2)\n\n\n# Sidebar: parameter customization widgets\nst.sidebar.header(\"Customize style:\")\nst.sidebar.text(\"(Parameter list is non-exhaustive)\")\nparams = Path('parameters.txt').read_text().splitlines()\nfor param in params:\n widget_type = st.sidebar.checkbox if (type(plt.rcParams[param]) == bool) else st.sidebar.text_input\n\n if type(plt.rcParams[param]) == list: # can't put lists in text boxes -> use only first item\n plt.rcParams[param] = [widget_type(param, value=plt.rcParams[param][0])]\n else:\n plt.rcParams[param] = widget_type(param, value=plt.rcParams[param])\n\n\n# Draw plot\nfig = plot.plot_figure(style_label=style, n_columns=n_columns)\nst.pyplot(fig=fig)\nplt.close(fig)\n\n# Link to download stylesheet\ndef get_stylesheet_download_link(params, filename=\"my_style.mplstyle\"):\n \"\"\"Generates a download link for a stylesheet file. https://discuss.streamlit.io/t/heres-a-download-function-that-works-for-dataframes-and-txt/4052\"\"\"\n\n stylesheet_lines = []\n for param in params:\n if plt.rcParamsDefault[param] != plt.rcParams[param]: # only store parameters which were changed from the defaults.\n if type(plt.rcParams[param]) == list:\n value = ', '.join(plt.rcParams[param])\n else:\n value = plt.rcParams[param]\n stylesheet_lines.append(f\"{param}: {value}\".replace('#', ''))\n\n stylesheet_text = '\\n'.join(stylesheet_lines)\n b64 = base64.b64encode(stylesheet_text.encode()).decode()\n return f'Download stylesheet (Installation instructions)'\n\nst.markdown(get_stylesheet_download_link(params), unsafe_allow_html=True)\n\n\n# workaround to open in wide mode (https://github.com/streamlit/streamlit/issues/314#issuecomment-579274365)\nmax_width_str = f\"max-width: 1000px;\"\nst.markdown(f\"\"\"\"\"\", unsafe_allow_html=True)\n","repo_name":"dhaitz/matplotlib-style-configurator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"54"} +{"seq_id":"3200531118","text":"import pandas as pd\nimport numpy as np\nimport geoplotlib as gpl\nfrom geoplotlib.utils import read_csv, DataAccessObject, BoundingBox\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n\"\"\"\ndef show_geoplot\nDescription: generates and displays a map of the density of airbnb locations\nInputs: results: the Pandas Series object containing valid latitudes and longitudes\n\tmbc: the maximum bounds of the latitude and longitude of the data set\nOutputs: a display of the input data in a heat map\n\"\"\"\ndef show_geoplot(results, mbc):\n\tdata=DataAccessObject.from_dataframe(results[[\"lat\",\"lon\"]])\n\tgpl.hist(data, colorscale='sqrt', binsize=4)\n\tgpl.kde(data, bw=5, cut_below=1e-3)\n\tgpl.set_bbox(BoundingBox(mbc[0],mbc[1],mbc[2],mbc[3]))\n\tgpl.show()\n\n\"\"\"\ndef estimate_price\nDescription: gets the suggested price by latitude and longitude by taking an initial radius and weighting it the most, then\n\ttaking incrementally larger radii and giving them less and less weight as the radius increases.\n\tRadius is based on the total size of san francisco bookings, as indicated by the outer bounds of the points\nInputs: lat: the latitude query. lon: the longitude query. \n\tresults: the Pandas Series object containing valid latitudes, longitudes, and prices\n\tmbc: the maximum bounding coordinates of the latitude and longitude of the data set\nOutputs: the estimate for an airbnb booking at the given location\n\"\"\"\ndef estimate_price(lat, lon, results, mbc):\n\tradius=((mbc[0]-mbc[2])+(mbc[1]-mbc[3]))/128 \n\tavg_price=0\n\tcount=0\n\tfor i in range(len(results[\"lon\"])):\n\t\ttry:\n\t\t\tresults[\"price\"][i]\n\t\texcept:\n\t\t\tcontinue\n\t\tprice =results[\"price\"][i][1:]\n\t\ttry:\n\t\t\ttemp=price.index(\",\")\n\t\t\tprice=float(price[:temp]+price[temp+1:])\n\t\texcept ValueError:\n\t\t\tprice=float(price)\t\t\n\t\tif ((results[\"lat\"][i]-lat)**2+(results[\"lon\"][i]-lon)**2=0 and price_list[j+1]30)&(results[\"lon\"]>-130)&(results[\"lat\"]<50)&(results[\"lon\"]<-110)\n\tresults_filtered=results[results_filter]\n\t\n\t\n\t\n\tmax_bound_coords=(results_filtered[\"lat\"].max(), results_filtered[\"lon\"].max(),results_filtered[\"lat\"].min(),results_filtered[\"lon\"].min())\n\tshow_geoplot(results_filtered, max_bound_coords)\n\tcreate_price_by_neighbourhood_roomtype(pd.read_csv(\"airbnb-sep-2017/listings.csv\"))\n\tprint(\"Input a valid set of coordinates for San Francisco.\\n\"\n\t\t+\"Valid latitude coordinates are between \"+str(max_bound_coords[2])+\" and \"+str(max_bound_coords[0]))\n\tlat=float(input(\"Latitude: \"))\n\tprint(\"Valid longitude coordinates are between \"+str(max_bound_coords[3])+\" and \"+str(max_bound_coords[1]))\n\tlon=float(input(\"Longitude: \"))\n\tprint(\"The approximate price of an airbnb at these coordinates is: $\"+str(estimate_price(lat, lon, results_filtered, max_bound_coords)))\n\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"qverbeke/capitalonesummit","sub_path":"capone.py","file_name":"capone.py","file_ext":"py","file_size_in_byte":9132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43185522319","text":"\"\"\"\nBluetooth sensor JSON preparation translates \"rawjson\" and \"ready\" layers.\n\n@author Kenneth Perrine, Nadia Florez\n\"\"\"\nimport hashlib\n\nimport pandas as pd\n\nimport _setpath\nfrom atd_data_lake.support import etl_app, last_update\nfrom atd_data_lake import config\n\n# This sets up application information:\nAPP_DESCRIPTION = etl_app.AppDescription(\n appName=\"wt_ready.py\",\n appDescr=\"Performs JSON enrichment for Wavetronix data between the 'rawjson' and 'ready' Data Lake buckets\")\n\nclass WTReadyApp(etl_app.ETLApp):\n \"\"\"\n Application functions and special behavior around Wavetronix JSON final data enrichment.\n \"\"\"\n def __init__(self, args):\n \"\"\"\n Initializes application-specific variables\n \"\"\"\n super().__init__(\"wt\", APP_DESCRIPTION,\n args=args,\n purposeSrc=\"standardized\",\n purposeTgt=\"ready\",\n perfmetStage=\"Ready\")\n self.unitDataProv = None\n\n def etlActivity(self):\n \"\"\"\n This performs the main ETL processing.\n \n @return count: A general number of records processed\n \"\"\"\n # First, get the unit data for Wavetronix:\n self.unitDataProv = config.createUnitDataAccessor(self.storageSrc)\n self.unitDataProv.prepare(self.startDate, self.endDate)\n \n # Configure the source and target repositories and start the compare loop:\n count = self.doCompareLoop(last_update.LastUpdStorageCatProv(self.storageSrc),\n last_update.LastUpdStorageCatProv(self.storageTgt),\n baseExtKey=False)\n print(\"Records processed: %d\" % count)\n return count \n\n def innerLoopActivity(self, item):\n \"\"\"\n This is where the actual ETL activity is called for the given compare item.\n \"\"\"\n # Check for valid data files:\n if item.identifier.ext == \"unit_data.json\":\n return 0\n \n # Retrieve unit data closest to the date that we're processing:\n unitData = self.unitDataProv.retrieve(item.identifier.date)\n \n # Read in the file and call the transformation code.\n print(\"%s: %s -> %s\" % (item.label, self.storageSrc.repository, self.storageTgt.repository))\n data = self.storageSrc.retrieveJSON(item.label)\n outJSON = wtReady(unitData, data, self.processingDate)\n\n # Prepare for writing to the target:\n catalogElement = self.storageTgt.createCatalogElement(item.identifier.base, \"json\",\n item.identifier.date, self.processingDate)\n self.storageTgt.writeJSON(outJSON, catalogElement)\n\n # Performance metrics:\n self.perfmet.recordCollect(item.identifier.date, representsDay=True)\n \n return 1\n\ndef _createHash(row):\n \"\"\"\n Returns a hash that's based upon a row's contents from the data file\n \"\"\"\n toHash = str(row['device_type']) + str(row['device_name']) + str(row['device_ip']) + str(row['lat']) + str(row['lon'])\n hasher = hashlib.md5()\n hasher.update(bytes(toHash, \"utf-8\"))\n return hasher.hexdigest()\n\ndef wtReady(unitData, data, processingDate):\n \"\"\"\n Transforms Wavetronix data to \"ready\" JSON along with the unit data.\n \"\"\"\n # Step 1: Prepare header:\n header = data[\"header\"]\n header[\"processing_date\"] = str(processingDate) \n\n # Step 2: Convert the data and devices to Pandas dataframes:\n data = pd.DataFrame(data[\"data\"])\n devices = pd.DataFrame(unitData[\"devices\"])\n \n # Step 3: Tie device information to data rows:\n devices['device_id'] = devices.apply(_createHash, axis=1)\n data = data.merge(devices[['kits_id', 'device_id']],\n left_on='intID', right_on='kits_id', how='inner') \\\n .drop(columns=\"kits_id\")\n data.sort_values(by=[\"curDateTime\", \"detID\"], inplace=True)\n devices = devices[devices.device_id.isin(data.device_id.unique())]\n devices = devices.apply(lambda x: x.to_dict(), axis=1).tolist()\n \n # Step 4: Prepare the final data JSON buffer:\n data = data.apply(lambda x: x.to_dict(), axis=1).tolist()\n jsonized = {'header': header,\n 'data': data,\n 'devices': devices}\n return jsonized\n\ndef main(args=None):\n \"\"\"\n Main entry point. Allows for dictionary to bypass default command-line processing.\n \"\"\"\n curApp = WTReadyApp(args)\n return curApp.doMainLoop()\n\nif __name__ == \"__main__\":\n \"\"\"\n Entry-point when run from the command-line\n \"\"\"\n main()\n","repo_name":"cityofaustin/atd-data-lake","sub_path":"atd_data_lake/wt_ready.py","file_name":"wt_ready.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"18671998932","text":"\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk\nimport os\nimport cv2\nimport numpy as np\nfrom time import strftime\nimport datetime as dt\nfrom numpy.lib.shape_base import split\nimport mysql.connector\nfrom Student_mangament_system import Students\nfrom Attendance import Attendance\n\n\nclass MainWindow:\n def __init__(self, master):\n self.master = master\n self.master.geometry(\"900x600\")\n self.master.title(\"Automated Attendance System\")\n self.master.iconbitmap(r\"F:\\Attendance_sys\\v2\\icon.ico\")\n # main frame\n frame = Frame(self.master, bd=2, bg=\"dark slate gray\")\n frame.place(x=0, y=0, width=900, height=600)\n\n imgframe = Frame(frame, bd=2, bg=\"dark slate gray\")\n imgframe.place(x=0, y=0, width=150, height=100)\n img_label = Label(imgframe)\n img = Image.open(r\"F:\\Attendance_sys\\v3\\img.png\")\n img = img.resize((60, 60))\n img_label.image = ImageTk.PhotoImage(img)\n img_label['image'] = img_label.image\n img_label.grid(padx=38, pady=20)\n\n\n heading = Label(frame, text=\"Automated Attendance System\", font=(\"Helvetica\", 35, \"bold\", \"underline\", \"italic\"), bg=\"light cyan\", fg=\"dark slate gray\" )\n heading.grid(row=0, column=1, padx=125, pady=23, columnspan=4)\n\n sms_btn = Button(frame, text = \"Students Management System\", relief=RIDGE, width=30, height=3,\n font=(\"Helvetica\", 10, \"bold\", \"underline\"), bg=\"light cyan\", fg=\"dark slate gray\", command=self.student)\n sms_btn.grid(row=2,column=1, padx=50, pady=50)\n\n att_btn = Button(frame, text=\"Attendance\", relief=RIDGE, width=30, height=3,command=self.attendance,\n font=(\"Helvetica\", 10, \"bold\", \"underline\"), bg=\"light cyan\", fg=\"dark slate gray\")\n att_btn.grid(row=2, column=3, padx=50, pady=50)\n\n face_recog_btn = Button(frame, text=\"Mark Attendance\", relief=RIDGE, width=30, height=3, command=self.face_recog_attendance,\n font=(\"Helvetica\", 10, \"bold\", \"underline\"), bg=\"light cyan\", fg=\"dark slate gray\")\n face_recog_btn.grid(row=3, column=1, padx=50, pady=50)\n\n photo_btn = Button(frame, text=\"Images\", relief=RIDGE, width=30, height=3,\n font=(\"Helvetica\", 10, \"bold\", \"underline\"), bg=\"light cyan\", fg=\"dark slate gray\", command=self.photo)\n photo_btn.grid(row=3, column=3, padx=50, pady=50)\n\n train_btn = Button(frame, text=\"Train Images\", relief=RIDGE, width=30, height=3, command=self.train,\n font=(\"Helvetica\", 10, \"bold\", \"underline\"), bg=\"light cyan\", fg=\"dark slate gray\")\n train_btn.grid(row=4, column=1, padx=50, pady=50)\n\n train_btn = Button(frame, text=\"Exit\", relief=RIDGE, width=30, height=3, command=quit,\n font=(\"Helvetica\", 10, \"bold\", \"underline\"), bg=\"light cyan\", fg=\"dark slate gray\")\n train_btn.grid(row=4, column=3, padx=50, pady=50)\n\n\n def student(self):\n self.win = Toplevel(self.master)\n self.st = Students(self.win)\n\n def photo(self):\n os.startfile(r\"F:\\Attendance_sys\\v3\\data\")\n\n def train(self):\n data_dir = (\"F:\\\\Attendance_sys\\\\v3\\\\data\")\n path = [os.path.join(data_dir, file) for file in os.listdir(data_dir)]\n faces = []\n ids = []\n for image in path:\n img = Image.open(image).convert(\"L\")\n img_arr = np.array(img, 'uint8')\n split_path = os.path.split(image)[1].split('.')\n i = int(split_path[1])\n faces.append(img_arr)\n ids.append(i)\n cv2.imshow(\"TRAINING\",img_arr)\n cv2.waitKey(1)==13\n \n ids = np.array(ids)\n clf = cv2.face.LBPHFaceRecognizer_create()\n clf.train(faces,ids)\n clf.write(\"trainig_classifiers.xml\")\n cv2.destroyAllWindows()\n messagebox.showinfo(\"Result\", \"Training Complete\")\n\n\n \n def mark_attendance(self, u_id_data, name_data, roll_no_data, course_data):\n with open(r'F:\\Attendance_sys\\v3\\attendance.csv', 'r+', newline=\"\\n\") as f:\n data_list = f.readlines()\n name_list = []\n for lines in data_list:\n entry = lines.split(',')\n name_list.append(entry[0])\n \n \n if ((name_data not in name_list) and (roll_no_data not in name_list) and (course_data not in name_list) and (u_id_data not in name_list)):\n now = dt.datetime.now()\n d1 = now.strftime('%d-%m-%Y')\n dstring = now.strftime('%H:%M:%S')\n f.writelines(f\"\\n{u_id_data}, {name_data}, {roll_no_data}, {course_data}, {dstring}, {d1}, Present\")\n\n\n\n\n\n def face_recog_attendance(self):\n def draw_boundaries(img, classifier, scalefactor, minNeighbors, color, text, clf):\n\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n features = classifier.detectMultiScale(gray_img, scalefactor, minNeighbors)\n coord = []\n\n for (x, y, w, h) in features:\n cv2.rectangle(gray_img, (x, y), (x + w, y + h), (50, 255, 2), 3)\n id, predict = clf.predict(gray_img[y:y + h, x:x + w])\n accuracy = int((100 * (1 - predict / 300)))\n\n conn = mysql.connector.connect(host=\"localhost\", username=\"root\", password=\"Root#12\",\n database=\"attendancesystem\")\n c = conn.cursor()\n\n c.execute(\"SELECT Student_Names FROM `student_details` WHERE University_Id = \" + str(id))\n name_data = c.fetchone()\n name_data = \"+\".join(name_data)\n\n c.execute(\"SELECT Roll_No FROM `student_details` WHERE University_Id = \" + str(id))\n roll_no_data = c.fetchone()\n roll_no_data = \"+\".join(roll_no_data)\n\n c.execute(\"SELECT Course FROM `student_details` WHERE University_Id = \" + str(id))\n course_data = c.fetchone()\n course_data = \"+\".join(course_data)\n\n c.execute(\"SELECT University_Id FROM `student_details` WHERE University_Id = \" + str(id))\n u_id_data = c.fetchone()\n u_id_data = \"+\".join(u_id_data)\n\n if accuracy > 90:\n cv2.rectangle(img, (x, y), (x + w, y + h), (25, 255, 10), 3)\n cv2.putText(img, f\"University ID : {u_id_data}\", (x, y - 80), cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (255, 175, 35), 2)\n cv2.putText(img, f\"Name : {name_data}\", (x, y - 55), cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (255, 175, 35), 2)\n cv2.putText(img, f\"Roll No. : {roll_no_data}\", (x, y - 30), cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (255, 175, 35), 2)\n cv2.putText(img, f\"Course : {course_data}\", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (255, 175, 35), 2)\n self.mark_attendance(u_id_data, name_data, roll_no_data, course_data)\n\n else:\n cv2.rectangle(img, (x, y), (x + w, y + h), (10, 10, 255), 3)\n cv2.putText(img, \"UNKNOWN\", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (150, 50, 255), 2)\n\n coord = [x, y, w, h]\n return coord\n\n def recognize(img, clf, face_cascade):\n coordinates = draw_boundaries(img, face_cascade, 1.1, 10, (190, 130, 5), \"Face\", clf)\n return img\n\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n clf = cv2.face.LBPHFaceRecognizer_create()\n clf.read(r\"F:\\Attendance_sys\\v3\\trainig_classifiers.xml\")\n cap = cv2.VideoCapture(r\"F:\\Attendance_sys\\v3\\test\\ld.mp4\")\n while True:\n ret, frame = cap.read()\n print(\"ret status : \", ret)\n img = recognize(frame, clf, face_cascade)\n cv2.imshow(\"Face Recognition\", img)\n if cv2.waitKey(1) == 13:\n break\n cap.release()\n cv2.destroyAllWindows()\n\n def attendance(self):\n self.win = Toplevel(self.master)\n self.st = Attendance(self.win)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n \n window=Tk()\n main_win = MainWindow(window)\n window.mainloop()\n\n","repo_name":"Sahilofficial/Automated-Attendance-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1585387798","text":"# 섬의 개수\nimport sys\nfrom collections import deque\ninput=sys.stdin.readline\ndx=[0,0,1,-1,-1,1,-1,1]\ndy=[1,-1,0,0,-1,1,1,-1]\n\ndef bfs(x,y):\n q=deque()\n q.append((x,y))\n g[x][y]=0\n\n while len(q):\n x,y=q.popleft()\n for i in range(8):\n nx=x+dx[i]\n ny=y+dy[i]\n\n if nx<0 or ny<0 or nx>=h or ny>=w:\n continue\n if g[nx][ny]==1:\n g[nx][ny]=0\n q.append((nx,ny))\n\nwhile 1:\n w,h=map(int,input().split())\n if w==0 and h==0:\n break\n g=[[]*w for i in range(h)]\n cnt=0\n for i in range(h):\n g[i]=list(map(int,input().split()))\n for i in range(h):\n for j in range(w):\n if g[i][j]:\n bfs(i,j)\n cnt+=1\n print(cnt)","repo_name":"Mindlestick/CodingTest","sub_path":"5.DFS&BFS/BOJ4963.py","file_name":"BOJ4963.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74708105762","text":"#!/usr/bin/python3\n\nimport cv2\nimport sys\n\nfrom common import mosaic, KNearest, SVM, OCR\n\nfrom pregrandbux import Grandbux\n\nif __name__ == '__main__':\n# model = KNearest(k=4)\n model = SVM(C=2.67, gamma=5.383)\n\n ABC = 'abcdefghijklmnopqrstuvwxyz'\n ocr = OCR(ABC, model)\n do_load = len(sys.argv) > 1\n if do_load:\n# ocr.train('out-min', 'grandbux_svm.dat', 1.0)\n ocr.load('grandbux_svm.dat')\n samples = ocr.preprocess_hog(Grandbux(sys.argv[1]).segments(20))\n print('SOLVE: {}'.format(''.join(ocr.labels(ocr.predict(samples)))))\n else:\n print('training SVM...')\n ocr.train('out-min', 'grandbux_svm.dat')\n cv2.imshow('SVM', mosaic(25, ocr.digits))\n\n cv2.waitKey(0)\n","repo_name":"ya-mouse/opencv-captcha","sub_path":"grandbux.py","file_name":"grandbux.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18954421898","text":"\"\"\"\npython client for haipproxy\n\"\"\"\nimport time\nimport threading\n\nfrom ..utils import get_redis_conn\nfrom ..config.settings import (\n DATA_ALL, LOWEST_TOTAL_PROXIES)\nfrom .core import IPFetcherMixin\n\n__all__ = ['ProxyFetcher']\n\nlock = threading.RLock()\n\n\nclass Strategy:\n strategy = None\n\n def check(self, strategy):\n return self.strategy == strategy\n\n def get_proxies_by_stragery(self, pool):\n \"\"\"\n :param pool: pool is a list, which is mutable\n :return:\n \"\"\"\n raise NotImplementedError\n\n def process_feedback(self, pool, res, proxy, **kwargs):\n \"\"\"\n :param pool: ProxyFetcher's pool\n :param res: success or failure\n :param proxy: proxy ip\n :param kwargs: response time or expected response time\n :return: None\n \"\"\"\n raise NotImplementedError\n\n\nclass RobinStrategy(Strategy):\n def __init__(self):\n super().__init__()\n self.strategy = 'robin'\n\n def get_proxies_by_stragery(self, pool):\n if not pool:\n return None\n\n proxy = pool.pop(0)\n pool.append(proxy)\n return proxy\n\n def process_feedback(self, pool, res, proxy, **kwargs):\n if res == 'failure':\n if pool[-1] == proxy:\n with lock:\n if pool[-1] == proxy:\n pool.pop()\n return\n\n\nclass GreedyStrategy(Strategy):\n def __init__(self):\n self.strategy = 'greedy'\n\n def get_proxies_by_stragery(self, pool):\n if not pool:\n return None\n return pool[0]\n\n def process_feedback(self, pool, res, proxy, **kwargs):\n if res == 'failure':\n if pool[0] == proxy:\n with lock:\n if pool[0] == proxy:\n pool.pop(0)\n return\n expected_time = kwargs.get('expected')\n real_time = kwargs.get('real')\n if expected_time * 1000 < real_time:\n pool.pop(0)\n pool.append(proxy)\n\n\nclass ProxyFetcher(IPFetcherMixin):\n def __init__(self, usage, strategy='robin', fast_response=5, redis_args=None):\n \"\"\"\n :param usage: one of SCORE_MAPS's keys, such as https\n you must refresh pool\n :param strategy: the load balance of proxy ip, the value is\n one of ['robin', 'greedy']\n :param fast_response: if you use greedy strategy, it will be needed to\n decide whether a proxy ip should continue to be used\n :param redis_args: redis connetion args, it's a dict, whose keys\n include host, port, db and password\n \"\"\"\n # if there are multi parent classes, super is only used for the first parent according to MRO\n super().__init__(usage)\n self.strategy = strategy\n # pool is a queue, which is FIFO\n self.pool = list()\n self.fast_response = fast_response\n self.handlers = [RobinStrategy(), GreedyStrategy()]\n if isinstance(redis_args, dict):\n self.conn = get_redis_conn(**redis_args)\n else:\n self.conn = get_redis_conn()\n t = threading.Thread(target=self._refresh_periodically)\n t.setDaemon(True)\n t.start()\n\n def get_proxy(self):\n \"\"\"\n get one available proxy from redis, if there's none, None is returned\n :return:\n \"\"\"\n proxy = None\n self.refresh()\n for handler in self.handlers:\n if handler.strategy == self.strategy:\n proxy = handler.get_proxies_by_stragery(self.pool)\n return proxy\n\n def get_proxies(self):\n # the older proxies will not be dropped\n proxies = self.get_available_proxies(self.conn)\n # client_logger.info('{} proxies have been fetched'.format(len(proxies)))\n print('{} proxies have been fetched'.format(len(proxies)))\n self.pool.extend(proxies)\n return self.pool\n\n def proxy_feedback(self, res, proxy, response_time=None):\n \"\"\"\n client should give feedbacks after executing get_proxy()\n :param res: value of 'success' or 'failure'\n :param proxy: proxy ip\n :param response_time: the response time using current proxy ip\n \"\"\"\n for handler in self.handlers:\n if handler.strategy == self.strategy:\n handler.process_feedback(self.pool, res,\n proxy, real=response_time,\n expected=self.fast_response)\n\n def refresh(self):\n if len(self.pool) < LOWEST_TOTAL_PROXIES:\n self.get_proxies()\n\n def delete_proxy(self, proxy):\n pipe = self.conn.pipeline()\n pipe.srem(DATA_ALL, proxy)\n pipe.zrem(self.score_queue, proxy)\n pipe.zrem(self.speed_queue, proxy)\n pipe.zrem(self.ttl_queue, proxy)\n pipe.execute()\n\n def _refresh_periodically(self):\n \"\"\"refresh self.pool periodically, checking rate is 10 times/second\"\"\"\n while True:\n if len(self.pool) < int(2 * LOWEST_TOTAL_PROXIES):\n self.get_proxies()\n time.sleep(0.2)\n","repo_name":"picone/CloudMusicSimilarMan","sub_path":"NeteaseCloudMusicSpider/haipproxy/client/py_cli.py","file_name":"py_cli.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"54"} +{"seq_id":"40331026197","text":"\"\"\"\nGiven an array of integers nums which is sorted in ascending order, and an integer target, write a function to search target in nums. If target exists, then return its index. Otherwise, return -1.\n\nYou must write an algorithm with O(log n) runtime complexity.\n\n\nExample 1:\n\nInput: nums = [-1,0,3,5,9,12], target = 9\nOutput: 4\nExplanation: 9 exists in nums and its index is 4\nExample 2:\n\nInput: nums = [-1,0,3,5,9,12], target = 2\nOutput: -1\nExplanation: 2 does not exist in nums so return -1\n\nConstraints:\n\n1 <= nums.length <= 104\n-104 < nums[i], target < 104\nAll the integers in nums are unique.\nnums is sorted in ascending order.\n\nAnswer: Use binary search to find the target. If the target is found, return the index of the target. If the target is not found, return -1.\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n \"\"\"\n Algorithm:\n 1. Use binary search to find the target.\n 2. If the target is found, return the index of the target.\n 3. If the target is not found, return -1.\n Pattern: Binary Search\n Time Complexity: O(log n)\n Space Complexity: O(1)\n \"\"\"\n # Initialize the left and right indices\n left = 0\n right = len(nums) - 1\n\n # While the left index is less than or equal to the right index\n # Equal is because if there only one element in the array, the left and right indices will be the same\n while left <= right:\n # Calculate the middle index\n middle = (left + right) // 2\n\n # If the middle index is equal to the target\n if nums[middle] == target:\n # Return the middle index\n return middle\n\n # If the middle index is less than the target\n elif nums[middle] < target:\n # Set the left index to the middle index plus 1\n left = middle + 1\n\n # If the middle index is greater than the target\n else:\n # Set the right index to the middle index minus 1\n right = middle - 1\n\n # Return -1\n return -1\n","repo_name":"Joel-hanson/DSA-with-copilot","sub_path":"Binary Search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13747136105","text":"import os\nfrom app import app\nfrom flask import request, jsonify, send_file\nfrom Common.constants import ALLOWED_EXTENSIONS, SERVICE_FOR_DOWNLOAD\nfrom Common.components import file_controller, validator, file_utils\n\n\n@app.route('/api/file-upload', methods=['POST'])\ndef upload_file():\n app.logger.info(\"POST Method: file-upload was triggered.\\n Request: '{}';\\n Headers: '{}';\\n Files: '{}'\"\n .format(request, str(request.headers).strip().strip(), request.files))\n \n if 'file' not in request.files:\n response = jsonify({'message': 'No file part in the request'})\n response.status_code = 400\n return response\n\n file = request.files['file']\n \n if file.filename == '':\n response = jsonify({'message': 'No file selected for uploading'})\n response.status_code = 400\n return response\n elif file and validator.allowed_file(file_utils.extract_file_extension(file.filename)):\n requester_ip = request.remote_addr\n file_status = file_controller.upload(file, requester_ip)\n\n response = jsonify({'message': file_status})\n response.status_code = 200\n return response\n else:\n message = 'Allowed file types are: [{}]'.format(', '.join(ext for ext in ALLOWED_EXTENSIONS))\n\n response = jsonify({'message': message})\n response.status_code = 400\n return response\n\n\n@app.route('/api/url-submit', methods=['POST'])\ndef submit_url():\n app.logger.info(\"POST Method: url-submit was triggered.\\n Request: '{}';\\n Headers: '{}';\\n \"\n .format(request, str(request.headers).strip().strip(), ))\n\n submitted_url = request.form.get('url')\n if submitted_url is None:\n response = jsonify({'message': \"No URL found.\"})\n response.status_code = 400\n return response\n if not validator.validate_url(submitted_url):\n response = jsonify({'message': \"URL is not valid.\"})\n response.status_code = 400\n return response\n if not validator.allowed_file(file_utils.get_file_type(submitted_url)):\n message = 'Allowed file types are: [{}]'.format(', '.join(ext for ext in ALLOWED_EXTENSIONS))\n\n response = jsonify({'message': message})\n response.status_code = 400\n return response\n else:\n requester_ip = request.remote_addr\n file_status = file_controller.submit(submitted_url, requester_ip)\n\n response = jsonify({'message': file_status})\n response.status_code = 200\n return response\n\n\n@app.route('/api/feed', methods=['GET'])\ndef get_feed():\n app.logger.info(\"GET Method: feed was triggered.\")\n feed_events = file_controller.get_feed()\n\n response = jsonify({'feed': feed_events})\n response.status_code = 200\n return response\n\n\n@app.route('/api/analyzed-files', methods=['GET'])\ndef get_files():\n app.logger.info(\"GET Method: analyzed-files was triggered.\")\n files = file_controller.get_all_analyzed_files()\n\n response = jsonify({'files': files})\n response.status_code = 200\n return response\n\n\n@app.route('/api/search-files', methods=['GET'])\ndef get_file_by_name():\n app.logger.info(\"GET Method: search-files was triggered.\")\n\n search_query = request.args.get('search_query')\n if search_query:\n result = file_controller.search_by_query(search_query)\n\n if result is None:\n response = jsonify({'result': \"Invalid query\"})\n response.status_code = 200\n else:\n response = jsonify({'result': result})\n response.status_code = 200\n return response\n else:\n response = jsonify({\"message\": \"Request does not contain filename parameter.\"})\n response.status_code = 400\n return response\n\n\n@app.route('/api/extwatcher-service', methods=['GET'])\ndef download_service():\n path = os.path.join(app.config['DOWNLOAD_FOLDER'], SERVICE_FOR_DOWNLOAD)\n return send_file(path, as_attachment=True)\n\n\nif __name__ == \"__main__\":\n # app.run(debug=True)\n app.run(host= '0.0.0.0', debug=False)\n","repo_name":"viorelyo/ExtWatcher","sub_path":"AnalyzeServer/API/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41287307679","text":"\n\nfrom random import randrange\nfrom turtle import *\n\nfrom freegames import square, vector\n\nfood = vector(0, 0)\nsnake = [vector(10, 0)]\naim = vector(0, -10)\ncolor = 'green'\n\ndef change(x, y):\n \"\"\"Change snake direction.\"\"\"\n aim.x = x\n aim.y = y\n\n\ndef inside(head):\n \"\"\"Return True if head inside boundaries.\"\"\"\n return -200 < head.x < 190 and -200 < head.y < 190\n\ndef color3():\n \"\"\"Change snake color.\"\"\"\n randColor = randrange(1, 6)\n color = 'black'\n if randColor == 1:\n color = 'black'\n elif randColor == 2:\n color = 'pink'\n elif randColor == 3:\n color = 'gray'\n elif randColor == 4:\n color = 'cyan'\n elif randColor == 5:\n color = 'lightgreen'\n else:\n color = 'skyblue'\n return color\n\ndef move():\n \"\"\"Move snake forward one segment.\"\"\"\n head = snake[-1].copy()\n head.move(aim)\n\n if not inside(head) or head in snake:\n square(head.x, head.y, 9, 'red')\n update()\n return\n\n \n snake.append(head)\n\n global color\n if head == food:\n print('Snake:', len(snake))\n food.x = randrange(-15, 15) * 10\n food.y = randrange(-15, 15) * 10\n \n randColor = randrange(1, 6)\n if randColor == 1:\n color = 'green'\n elif randColor == 2:\n color = 'purple'\n elif randColor == 3:\n color = 'orange'\n elif randColor == 4:\n color = 'brown'\n elif randColor == 5:\n color = 'yellow'\n else:\n color = 'blue'\n print(color)\n else:\n snake.pop(0)\n\n clear()\n \n randNum = randrange(12)\n if randNum == 0:\n food.x -= 10\n elif randrange(100) == 1:\n food.x += 10\n elif randrange(10) == 2:\n food.y -= 10\n elif randrange(100) == 3:\n food.y += 10\n \n for body in snake:\n square(body.x, body.y, 9, color3())\n\n square(food.x, food.y, 9, color)\n update()\n ontimer(move, 100)\n\n\nsetup(420, 420, 370, 0)\nhideturtle()\ntracer(False)\nlisten()\nonkey(lambda: change(10, 0), 'Right')\nonkey(lambda: change(-10, 0), 'Left')\nonkey(lambda: change(0, 10), 'Up')\nonkey(lambda: change(0, -10), 'Down')\nmove()\ndone()\n","repo_name":"bc144/snake","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5029109787","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom qtpy.QtWidgets import QApplication, QMainWindow, QLabel\nfrom qtpy import QtCore\n\n\ndef main():\n # create the application\n app = QApplication(sys.argv)\n # create the main window\n window = QMainWindow()\n # create a label to hold our text\n label = QLabel(text=\"hello world!\", )\n # set the label to align in the center\n label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)\n # add our label to the central widget of the window\n window.setCentralWidget(label)\n # display the main window\n window.show()\n # start the Qt main loop execution\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n # execute the main function\n main()\n","repo_name":"bremme/QtPyWidgets","sub_path":"examples/functional/helloworld/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38395216057","text":"class TreeNode:\n def __init__(self, data, children=[]):\n self.data = data\n self.children = children\n\n def __str__(self, level=0):\n ret = \" \" * level + str(self.data) + \"\\n\"\n for i in self.children:\n ret += i.__str__(level + 1)\n return ret\n\n def addChild(self, TreeNode):\n self.children.append(TreeNode)\n\n\nowner = TreeNode('boss', [])\n\nCEO1 = TreeNode('CEO1', [])\nCEO2 = TreeNode('CEO2', [])\nowner.addChild(CEO1)\nowner.addChild(CEO2)\n\nemployer11 = TreeNode('employer11', [])\nemployer12 = TreeNode('employer12', [])\nemployer13 = TreeNode('employer13', [])\nCEO1.addChild(employer11)\nCEO1.addChild(employer12)\nCEO1.addChild(employer13)\n\nemployer21 = TreeNode('employer21', [])\nemployer22 = TreeNode('employer22', [])\nCEO2.addChild(employer21)\nCEO2.addChild(employer22)\n\nemployee11 = TreeNode('employee11', [])\nemployer11.addChild(employee11)\nprint(owner)\n","repo_name":"reza-nikzad/My-LeetCode-repo","sub_path":"Practice_UdemyCourse/Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24270410662","text":"import webapp2\nfrom handlers.mainhandler import MainHandler\nfrom models.blog import BlogPost\nfrom handlers.postpage import PostPage\n\nfrom handlers.login import Login, Logout\nfrom handlers.bloghandler import NewPost, EditPost, DeletePost, \\\n LikePost, UnlikePost, CommentPost, EditComment, DeleteComment\nfrom handlers.userregister import UserRegister\n\n\n# Main page displays all the posts\nclass MainPage(MainHandler):\n def get(self):\n posts = BlogPost.query().order(-BlogPost.created).fetch()\n self.render('base.html', posts=posts, user=self.user)\n\n\napp = webapp2.WSGIApplication \\\n ([('/', MainPage),\n ('/register', UserRegister),\n ('/login', Login),\n ('/logout', Logout),\n ('/blog/?', MainPage),\n ('/blog/([0-9]+)', PostPage),\n ('/blog/newpost', NewPost),\n ('/blog/([0-9]+)/edit', EditPost),\n ('/blog/([0-9]+)/delete', DeletePost),\n ('/blog/([0-9]+)/like', LikePost),\n ('/blog/([0-9]+)/unlike', UnlikePost),\n ('/blog/([0-9]+)/comment', CommentPost),\n ('/blog/([0-9]+)/([0-9]+)/edit', EditComment),\n ('/blog/([0-9]+)/([0-9]+)/delete', DeleteComment),\n ],\n debug=True)\n","repo_name":"daikikuchi/Multi-User-Blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20039750818","text":"from django.urls import path, include\nfrom rest_framework.routers import SimpleRouter\n\nfrom mainapp import apis, views\n\napp_name = \"mainapp\"\n\nrouter = SimpleRouter()\nrouter.register(\"clients\", apis.ClientViewSet, \"apis-clients\")\nrouter.register(\"clients/phones\", apis.PhoneNumberViewSet, \"apis-clients-phones\")\nrouter.register(\"clients/addresses\", apis.AddressViewSet, \"apis-clients-addresses\")\nrouter.register(\"compositions\", apis.CompositionViewSet, \"apis-compositions\")\nrouter.register(\"orders\", apis.OrderViewSet, \"apis-orders\")\nrouter.register(\"orders/lines\", apis.OrderLineViewSet, \"apis-orders-lines\")\n\nurlpatterns = [\n path(\"\", views.Dashboard.as_view(), name=\"dashboard\"),\n\n path(\"login/\", views.LoginView.as_view(), name=\"login\"),\n path(\"logout/\", views.logout_view, name=\"logout\"),\n\n path(\"clients/list/\", views.ClientListView.as_view(), name=\"clients-list\"),\n path(\"clients//delete/\", views.ClientDeleteView.as_view(), name=\"client-delete\"),\n path(\"clients//details/\", views.ClientDetailView.as_view(), name=\"client-details\"),\n\n path(\"menus/create/\", views.MenuCreateView.as_view(), name=\"menu-create\"),\n path(\"menus/list/\", views.MenuListView.as_view(), name=\"menus-list\"),\n path(\"menus//delete/\", views.MenuDeleteView.as_view(), name=\"menu-delete\"),\n path(\"menus//details/\", views.MenuDetailView.as_view(), name=\"menu-details\"),\n path(\"menus//update/\", views.MenuUpdateView.as_view(), name=\"menu-update\"),\n\n path(\"foods/create/\", views.FoodCreateView.as_view(), name=\"food-create\"),\n path(\"foods/list/\", views.FoodListView.as_view(), name=\"foods-list\"),\n path(\"foods//delete/\", views.FoodDeleteView.as_view(), name=\"food-delete\"),\n path(\"foods//details/\", views.FoodDetailView.as_view(), name=\"food-details\"),\n path(\"foods//update/\", views.FoodUpdateView.as_view(), name=\"food-update\"),\n\n path(\"extras/create/\", views.ExtraCreateView.as_view(), name=\"extra-create\"),\n path(\"extras/list/\", views.ExtraListView.as_view(), name=\"extras-list\"),\n path(\"extras//delete/\", views.ExtraDeleteView.as_view(), name=\"extra-delete\"),\n path(\"extras//details/\", views.ExtraDetailView.as_view(), name=\"extra-details\"),\n path(\"extras//update/\", views.ExtraUpdateView.as_view(), name=\"extra-update\"),\n\n path(\"orders/list/\", views.OrderListView.as_view(), name=\"orders-list\"),\n path(\"orders//delete/\", views.OrderDeleteView.as_view(), name=\"order-delete\"),\n path(\"orders//details/\", views.OrderDetailView.as_view(), name=\"order-details\"),\n path(\"orders//update/\", views.OrderUpdateView.as_view(), name=\"order-update\"),\n\n path(\"apis/login/\", apis.LoginApi.as_view()),\n path(\"apis/extras/\", apis.ExtrasListView.as_view()),\n path(\"apis/menus/\", apis.MenuListView.as_view()),\n path(\"apis/foods/\", apis.FoodListView.as_view()),\n path(\"apis/cities/\", apis.CityListView.as_view()),\n path(\"apis/\", include(router.urls))\n]\n","repo_name":"Simouche/zetraiteur-backend","sub_path":"mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35557960071","text":"import numpy\n\n\nclass Mesh(object):\n \"\"\"\n Build a quadrilateral structured unit square mesh.\n \"\"\"\n\n def __init__(self, nx: int, ny: int):\n \"\"\"\n Parameters\n ----------\n nx\n The number of cells in the x direction.\n ny\n The number of cells in the y direction.\n \"\"\"\n\n # Only two dimensional quadrilateral meshes supported\n self.reference_cell = ReferenceQuadrilateral()\n self.num_vertices = (nx + 1) * (ny + 1)\n self.num_cells = nx * ny\n\n # Create meshgrid and use matrix indexing 'ij' to\n # simplify topology computation and dofmap\n x = numpy.linspace(0, 1, nx + 1)\n y = numpy.linspace(0, 1, ny + 1)\n grid = numpy.array(numpy.meshgrid(x, y, indexing=\"ij\")).transpose()\n\n # Compute coordinate of all nodes in the mesh\n self.vertices = grid.reshape((nx + 1) * (ny + 1), 2)\n\n # Compute cells - cell-vertex connections\n self.cells = numpy.zeros((self.num_cells, 4), dtype=numpy.int)\n self._topology_computation(nx)\n\n # Store nx and ny to simplify plotting\n self.nx = nx\n self.ny = ny\n\n def _topology_computation(self, nx: int):\n \"\"\"\n Compute cell-vertex connections connections.\n \"\"\"\n for cell in range(self.num_cells):\n line = cell // nx\n rem = cell % nx\n self.cells[cell] = [\n line * (nx + 1) + rem,\n line * (nx + 1) + rem + 1,\n (line + 1) * (nx + 1) + rem,\n (line + 1) * (nx + 1) + rem + 1,\n ]\n\n def jacobian(self, i):\n \"\"\"\n Return the jacobian matrix for cell i.\n Note: since the mesh is structured the jacobian matrix\n is a constant and computed just once and cached.\n \"\"\"\n local_vert = self.vertices[self.cells[i]]\n dx = local_vert[1, 0] - local_vert[0, 0]\n dy = local_vert[2, 1] - local_vert[0, 1]\n\n jacobian = numpy.array([[dx, 0], [0, dy]])\n return jacobian\n\n def area(self, i: int):\n \"\"\"\n Return the area of cell i.\n Note: since the mesh is structured all elements have the same area.\n \"\"\"\n return numpy.linalg.det(self.jacobian(i))\n\n\nclass ReferenceQuadrilateral:\n \"\"\"\n Reference quadrilateral with defined vertices and topology.\n \"\"\"\n\n def __init__(self):\n self.dim = 2\n self.num_vertices = 4\n self.num_facets = 4\n self.coordinates = numpy.array(\n [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 1.0]])\n self.topology = numpy.array([0, 1, 2, 3])\n","repo_name":"IgorBaratta/simple_fem","sub_path":"simple_fem/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"15863100803","text":"from mxnet import nd\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\n\nfrom utils import process_one_seq\nfrom mxnet.gluon import loss as gloss\n\n\nclass Dataset(object):\n def __init__(self, x, y, index, default, pad, eos, no_mean, max_seq, test_size=0.4):\n \"\"\"\n Simple object to process the input Dataset for train\n :param x: list of (list of tokens)\n :param y: list of (list of tag)\n :param index: token index\n :param default: default value in index\n :param pad: pad token in index\n :param eos: eos token in index\n :param no_mean: the value without any mean in Y\n :param max_seq: max size available in X & Y\n \"\"\"\n self.seq_length = max_seq\n x = [[index.get(i, index[default]) for i in j] for j in x]\n x = [process_one_seq(i, max_seq, index[pad], EOS=index[eos]) for i in x]\n self.train_x, self.test_x, self.train_y, self.test_y = train_test_split(\n x, y, test_size=test_size, shuffle=True)\n self.test_x, self.valid_x, self.test_y, self.valid_y = train_test_split(\n self.test_x, self.test_y, test_size=0.5, shuffle=True)\n print('Train/Valid : %d/%d' % (len(self.train_y), len(self.test_y) * 2))\n print('Sentence max words: {shape}'.format(shape=self.seq_length))\n\n def __str__(self):\n return \"Training: {}, Testing: {}, Sequences Length: {}\".format(\n len(self.train_x), len(self.test_x), self.seq_length)\n\n\ndef evaluate(model, dataIterator, ctx, pad=None):\n \"\"\"\n The Evaluation function\n :param model: model object\n :param dataIterator: data iterator in mxnet\n :param ctx: context\n :param weight: NDArray weight matrix of Weighted SCE\n :param pad: Int\n padding id\n :param report: Boolean\n F1 Score report Matrix\n :return:\n \"\"\"\n loss = gloss.SoftmaxCrossEntropyLoss(sparse_label=False)\n if pad is not None:\n mask = True\n else:\n mask = False\n dataIterator.reset()\n total_loss = 0.0\n total_sample_num = 0\n y_pred, y_true = [], []\n for i, batch in enumerate(dataIterator):\n x = batch.data[0].as_in_context(ctx)\n y = batch.data[1].as_in_context(ctx)\n if mask:\n _mask = nd.not_equal(x, pad)\n pred = model(_mask)\n else:\n pred = model(x)\n bl = loss(pred, nd.one_hot(y,238)).as_in_context(ctx)\n total_sample_num = x.shape[0]\n total_loss += nd.sum(bl).asscalar()\n pred = nd.argmax(pred, axis=1)\n y_pred.extend(pred.asnumpy().tolist())\n y_true.extend(y.asnumpy().tolist())\n acc = metrics.accuracy_score(y_pred, y_true)\n# f1 = metrics.f1_score(y_pred, y_true, average='macro')\n avg_L = total_loss / float(total_sample_num)\n# if report:\n# return avg_L, acc, f1, metrics.classification_report(y_true, y_pred)\n# else:\n# return avg_L, acc, f1\n return avg_L, acc","repo_name":"hufei-neo/clf_mxnet","sub_path":"train_clfs.py","file_name":"train_clfs.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40440312767","text":"import os\nimport unittest\nfrom typing import Iterator, Iterable, List\n\nfrom forte.data.readers import OpenIEReader\nfrom forte.data.data_pack import DataPack\nfrom forte.pipeline import Pipeline\nfrom ft.onto.base_ontology import Sentence, PredicateMention, Document, \\\n PredicateArgument, PredicateLink, Token\n\n\nclass OpenIEReaderTest(unittest.TestCase):\n\n def setUp(self):\n # Define and config the pipeline.\n self.dataset_path: str = os.path.abspath(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n *([os.path.pardir] * 4),\n 'data_samples/openie'))\n\n self.pipeline: Pipeline = Pipeline[DataPack]()\n self.reader: OpenIEReader = OpenIEReader()\n self.pipeline.set_reader(self.reader)\n self.pipeline.initialize()\n\n def test_process_next(self):\n data_packs: Iterable[DataPack] = self.pipeline.process_dataset(\n self.dataset_path)\n file_paths: Iterator[str] = self.reader._collect(self.dataset_path)\n\n count_packs: int = 0\n\n for pack, file_path in zip(data_packs, file_paths):\n count_packs += 1\n expected_doc: str = \"\"\n with open(file_path, \"r\", encoding=\"utf8\", errors='ignore') as file:\n expected_doc = file.read()\n\n # Test document.\n actual_docs: List[Document] = list(pack.get(Document))\n self.assertEqual(len(actual_docs), 1)\n actual_doc: Document = actual_docs[0]\n self.assertEqual(actual_doc.text,\n expected_doc.replace('\\t', ' ').replace('\\n', ' ')\n + ' ')\n\n lines: List[str] = expected_doc.split('\\n')\n actual_sentences: Iterator[Sentence] = pack.get(Sentence)\n actual_predicates: Iterator[PredicateMention] = \\\n pack.get(PredicateMention)\n actual_args: Iterator[PredicateArgument] = \\\n pack.get(PredicateArgument)\n # Force sorting as Link entries have no order when retrieving from\n # data pack.\n actual_link_ids: Iterator[int] = \\\n iter(sorted(pack.get_ids_by_type(PredicateLink)))\n\n for line, actual_sentence, actual_full_predicate in \\\n zip(lines, actual_sentences, actual_predicates):\n line: str = line.strip()\n line: List[str] = line.split('\\t')\n\n # Test sentence.\n expected_sentence: str = line[0]\n self.assertEqual(actual_sentence.text, expected_sentence)\n\n # Test head predicate.\n actual_head_predicate: Token = actual_full_predicate.headword\n expected_head_predicate: str = line[1]\n self.assertEqual(actual_head_predicate.text,\n expected_head_predicate)\n\n # Test full predicate.\n expected_full_predicate: str = line[2]\n self.assertEqual(actual_full_predicate.text,\n expected_full_predicate)\n\n # Test argument.\n for expected_arg in line[3:]:\n actual_arg: PredicateArgument = next(actual_args)\n self.assertEqual(actual_arg.text, expected_arg)\n\n # Test predicate relation link.\n actual_link: PredicateLink = \\\n pack.get_entry(next(actual_link_ids))\n self.assertEqual(actual_link.get_parent().text,\n expected_full_predicate)\n self.assertEqual(actual_link.get_child().text, expected_arg)\n\n self.assertEqual(count_packs, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"miis-model-interface/forte","sub_path":"tests/forte/data/readers/openie_reader_test.py","file_name":"openie_reader_test.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31156770378","text":"from collections import OrderedDict\n\nfrom skopt import Optimizer\nfrom skopt.space import Categorical, Integer, Dimension, Real\nimport numpy as np\nimport itertools as it\n\nclass HyperParamSearch:\n\n def __init__(self, space: [Dimension]):\n self.space = space\n self._space_names = [dim.name for dim in space]\n\n self.train_eval = []\n self.validation_eval = []\n self.test_eval = []\n self.x = []\n self.y = []\n self.model_dir = []\n\n def tell(self, args, train_eval, validation_eval, test_eval,model_dir):\n self.train_eval.append(train_eval)\n self.validation_eval.append(validation_eval)\n self.test_eval.append(test_eval)\n self.x.append(args)\n self.y.append(-validation_eval[\"ll_mean\"])\n self.model_dir.append(model_dir)\n\n def ask(self):\n raise NotImplemented\n\n @property\n def best(self):\n res = OrderedDict()\n res[\"best\"] = min(self.y)\n params = self.x[np.argmin(self.y)]\n for i, dim in enumerate(self.space):\n res[dim.name] = params[i]\n\n return res\n\n @property\n def num_samples(self):\n raise NotImplemented\n\n @property\n def results(self):\n raise NotImplemented\n\n @property\n def space_names(self) -> [str]:\n return self._space_names\n\n @property\n def done(self):\n return len(self.x)\n\n def to_named_params(self, x):\n args = {}\n for i, item in enumerate(self.space):\n if isinstance(x[i], np.int64):\n x[i] = int(x[i])\n args[item.name] = x[i]\n\n return args\n\n @property\n def results(self):\n results = []\n for i, y in enumerate(self.y):\n res = OrderedDict()\n x = OrderedDict()\n res['y'] = y\n res['x'] = x\n res['train'] = self.train_eval[i]\n res['validation'] = self.validation_eval[i]\n res['test'] = self.test_eval[i]\n res['model_dir'] = self.model_dir[i]\n params = self.x[i]\n for i, dim in enumerate(self.space):\n x[dim.name] = params[i]\n\n results.append(res)\n\n return results\n\n @property\n def state(self):\n state = OrderedDict()\n state[\"class\"]=self.__class__.__name__\n state[\"space\"] = []\n for dim in self.space:\n dim_dict = OrderedDict()\n dim_dict['name']=dim.name\n dim_dict['type']=dim.__class__.__name__\n if isinstance(dim.bounds,tuple):\n dim_dict['bounds']=list(dim.bounds)\n state[\"space\"].append(dim_dict)\n return state\n\n @staticmethod\n def from_state(meta):\n clazz = meta[\"class\"]\n spaces = []\n for space in meta[\"space\"]:\n name = space['name']\n type = space['type']\n bounds = space['bounds']\n if type == 'Categorical':\n spaces.append(Categorical(name=name, categories=bounds))\n else:\n raise NotImplemented(type)\n\n if clazz == \"GridSearch\":\n return GridSearch(spaces)\n else:\n raise NotImplemented(clazz)\n\nclass GridSearch(HyperParamSearch):\n\n def __init__(self, space):\n super().__init__(space)\n if not np.all([isinstance(dim, Categorical) or isinstance(dim, Integer) for dim in space]):\n raise ValueError('All dimensional should be categorical')\n\n all_list_names = []\n all_lists = []\n self._num_samples = 1\n for dim in space:\n if isinstance(dim, Categorical):\n vals = dim.categories\n elif isinstance(dim, Integer):\n vals = np.arange(dim.low, dim.high + 1)\n else:\n raise ValueError(\"Not supported space: %s\" % str(dim))\n\n all_lists.append(vals)\n all_list_names.append(dim.name)\n\n self._num_samples *= len(vals)\n\n self._ask = it.product(*all_lists)\n\n\n def ask(self):\n point = next(self._ask)\n while point in self.x:\n point = next(self._ask)\n return point\n\n @property\n def num_samples(self):\n return self._num_samples\n\n\nclass GPOptimizer(HyperParamSearch):\n def __init__(self, space, samples, random_state=1):\n super().__init__(space)\n self._num_samples = samples\n self.optimizer = Optimizer(dimensions=space, random_state=1, base_estimator=\"GP\", acq_optimizer=\"auto\",\n n_initial_points=10)\n self.asked = 0\n\n def tell(self, args, train_eval, validation_eval, test_eval, model_dir):\n super().tell(args, train_eval, validation_eval, test_eval, model_dir)\n self.optimizer.tell(args, -validation_eval[\"ll_mean\"])\n\n def ask(self):\n self.asked += 1\n if self.asked <= self._num_samples:\n return self.optimizer.ask()\n else:\n raise StopIteration\n\n @property\n def num_samples(self):\n return self._num_samples\n","repo_name":"pawelc/NeuralLikelihoods","sub_path":"code/experiment/hyper_param_opt.py","file_name":"hyper_param_opt.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"2961164090","text":"import comfy\r\nimport nodes\r\nimport numpy as np\r\nimport torch\r\n\r\nclass RegionalPromptSimple:\r\n @classmethod\r\n def INPUT_TYPES(s):\r\n return {\r\n \"required\": {\r\n \"basic_pipe\": (\"BASIC_PIPE\",),\r\n \"mask\": (\"MASK\",),\r\n \"cfg\": (\"FLOAT\", {\"default\": 8.0, \"min\": 0.0, \"max\": 100.0}),\r\n \"sampler_name\": (comfy.samplers.KSampler.SAMPLERS,),\r\n \"scheduler\": (comfy.samplers.KSampler.SCHEDULERS,),\r\n \"wildcard_prompt\": (\"STRING\", {\"multiline\": True, \"dynamicPrompts\": False, \"placeholder\": \"wildcard prompt\"}),\r\n },\r\n }\r\n\r\n RETURN_TYPES = (\"REGIONAL_PROMPTS\", )\r\n FUNCTION = \"doit\"\r\n\r\n CATEGORY = \"Inspire/RegionalSampler\"\r\n\r\n def doit(self, basic_pipe, mask, cfg, sampler_name, scheduler, wildcard_prompt):\r\n if 'RegionalPrompt' not in nodes.NODE_CLASS_MAPPINGS:\r\n raise Exception(f\"[ERROR] To use RegionalPromptSimple, you need to install 'ComfyUI-Impact-Pack'\")\r\n\r\n model, clip, vae, positive, negative = basic_pipe\r\n\r\n iwe = nodes.NODE_CLASS_MAPPINGS['ImpactWildcardEncode']()\r\n kap = nodes.NODE_CLASS_MAPPINGS['KSamplerAdvancedProvider']()\r\n rp = nodes.NODE_CLASS_MAPPINGS['RegionalPrompt']()\r\n\r\n if wildcard_prompt != \"\":\r\n model, clip, positive, _ = iwe.doit(model=model, clip=clip, populated_text=wildcard_prompt)\r\n\r\n basic_pipe = model, clip, vae, positive, negative\r\n\r\n sampler = kap.doit(cfg, sampler_name, scheduler, basic_pipe)[0]\r\n regional_prompts = rp.doit(mask, sampler)[0]\r\n\r\n return (regional_prompts, )\r\n\r\n\r\ndef color_to_mask(color_mask, mask_color):\r\n try:\r\n if mask_color.startswith(\"#\"):\r\n selected = int(mask_color[1:], 16)\r\n else:\r\n selected = int(mask_color, 10)\r\n except Exception:\r\n raise Exception(f\"[ERROR] Invalid mask_color value. mask_color should be color value for RGB\")\r\n\r\n red = (selected >> 16) & 0xFF\r\n green = (selected >> 8) & 0xFF\r\n blue = selected & 0xFF\r\n\r\n mask_color = np.array([red, green, blue])\r\n image = 255. * color_mask.cpu().numpy()\r\n image = np.clip(image, 0, 255).astype(np.uint8)\r\n image = np.array(image).squeeze(0)\r\n\r\n h, w, _ = image.shape\r\n\r\n mask = [\r\n [1.0 if np.array_equal(pixel, mask_color) else 0.0 for pixel in row] for row in image\r\n ]\r\n return torch.tensor(mask).unsqueeze(0)\r\n\r\n\r\nclass RegionalPromptColorMask:\r\n @classmethod\r\n def INPUT_TYPES(s):\r\n return {\r\n \"required\": {\r\n \"basic_pipe\": (\"BASIC_PIPE\",),\r\n \"color_mask\": (\"IMAGE\",),\r\n \"mask_color\": (\"STRING\", {\"multiline\": False, \"default\": \"#FFFFFF\"}),\r\n \"cfg\": (\"FLOAT\", {\"default\": 8.0, \"min\": 0.0, \"max\": 100.0}),\r\n \"sampler_name\": (comfy.samplers.KSampler.SAMPLERS,),\r\n \"scheduler\": (comfy.samplers.KSampler.SCHEDULERS,),\r\n \"wildcard_prompt\": (\"STRING\", {\"multiline\": True, \"dynamicPrompts\": False, \"placeholder\": \"wildcard prompt\"}),\r\n },\r\n }\r\n\r\n RETURN_TYPES = (\"REGIONAL_PROMPTS\", \"MASK\")\r\n FUNCTION = \"doit\"\r\n\r\n CATEGORY = \"Inspire/RegionalSampler\"\r\n\r\n def doit(self, basic_pipe, color_mask, mask_color, cfg, sampler_name, scheduler, wildcard_prompt):\r\n mask = color_to_mask(color_mask, mask_color)\r\n rp = RegionalPromptSimple().doit(basic_pipe, mask, cfg, sampler_name, scheduler, wildcard_prompt)[0]\r\n return (rp, mask)\r\n\r\n\r\nclass RegionalConditioningSimple:\r\n @classmethod\r\n def INPUT_TYPES(s):\r\n return {\r\n \"required\": {\r\n \"clip\": (\"CLIP\", ),\r\n \"mask\": (\"MASK\",),\r\n \"strength\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.01}),\r\n \"set_cond_area\": ([\"default\", \"mask bounds\"],),\r\n \"prompt\": (\"STRING\", {\"multiline\": True, \"placeholder\": \"prompt\"}),\r\n },\r\n }\r\n\r\n RETURN_TYPES = (\"CONDITIONING\", )\r\n FUNCTION = \"doit\"\r\n\r\n CATEGORY = \"Inspire/Regional\"\r\n\r\n def doit(self, clip, mask, strength, set_cond_area, prompt):\r\n conditioning = nodes.CLIPTextEncode().encode(clip, prompt)[0]\r\n conditioning = nodes.ConditioningSetMask().append(conditioning, mask, set_cond_area, strength)[0]\r\n return (conditioning, )\r\n\r\n\r\nclass RegionalConditioningColorMask:\r\n @classmethod\r\n def INPUT_TYPES(s):\r\n return {\r\n \"required\": {\r\n \"clip\": (\"CLIP\", ),\r\n \"color_mask\": (\"IMAGE\",),\r\n \"mask_color\": (\"STRING\", {\"multiline\": False, \"default\": \"#FFFFFF\"}),\r\n \"strength\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.01}),\r\n \"set_cond_area\": ([\"default\", \"mask bounds\"],),\r\n \"prompt\": (\"STRING\", {\"multiline\": True, \"placeholder\": \"prompt\"}),\r\n },\r\n }\r\n\r\n RETURN_TYPES = (\"CONDITIONING\", \"MASK\")\r\n FUNCTION = \"doit\"\r\n\r\n CATEGORY = \"Inspire/Regional\"\r\n\r\n def doit(self, clip, color_mask, mask_color, strength, set_cond_area, prompt):\r\n mask = color_to_mask(color_mask, mask_color)\r\n\r\n conditioning = nodes.CLIPTextEncode().encode(clip, prompt)[0]\r\n conditioning = nodes.ConditioningSetMask().append(conditioning, mask, set_cond_area, strength)[0]\r\n return (conditioning, mask)\r\n\r\n\r\nNODE_CLASS_MAPPINGS = {\r\n \"RegionalPromptSimple //Inspire\": RegionalPromptSimple,\r\n \"RegionalPromptColorMask //Inspire\": RegionalPromptColorMask,\r\n \"RegionalConditioningSimple //Inspire\": RegionalConditioningSimple,\r\n \"RegionalConditioningColorMask //Inspire\": RegionalConditioningColorMask,\r\n}\r\nNODE_DISPLAY_NAME_MAPPINGS = {\r\n \"RegionalPromptSimple //Inspire\": \"Regional Prompt Simple (Inspire)\",\r\n \"RegionalPromptColorMask //Inspire\": \"Regional Prompt By Color Mask (Inspire)\",\r\n \"RegionalConditioningSimple //Inspire\": \"Regional Conditioning Simple (Inspire)\",\r\n \"RegionalConditioningColorMask //Inspire\": \"Regional Conditioning By Color Mask (Inspire)\",\r\n}\r\n","repo_name":"Rayzlaststop/ComfyUI","sub_path":"custom_nodes/ComfyUI-Inspire-Pack/regional_nodes.py","file_name":"regional_nodes.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32127508045","text":"\"\"\"Core Curate App Settings\n\nSettings with the following syntax can be overwritten at the project level:\nSETTING_NAME = getattr(settings, \"SETTING_NAME\", \"Default Value\")\n\"\"\"\nimport os\n\nfrom django.conf import settings\n\nif not settings.configured:\n settings.configure()\n\nREGISTRY_XSD_FILENAME = getattr(settings, \"REGISTRY_XSD_FILENAME\", \"\")\n\"\"\" str: Registry xsd filename used for the initialisation.\n\"\"\"\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = (os.path.join(BASE_DIR, \"core_curate_registry_app/locale\"),)\n\n\nXPATH_TITLE = \"/rsm:Resource[1]/rsm:identity[1]/rsm:title\"\n\"\"\" str : Xpath of the resource name/title\n\"\"\"\n","repo_name":"usnistgov/core_curate_registry_app","sub_path":"core_curate_registry_app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11816316035","text":"import time\nimport cv2\nimport numpy as np\nimport onnxruntime\nfrom human_keypoint_coco import *\n\n\nclass MoveNet_onnx(object):\n def __init__(self, model_path, input_h=416, input_w=416):\n self.input_h = input_h\n self.input_w = input_w\n\n self.model = onnxruntime.InferenceSession(model_path, providers=['CUDAExecutionProvider'])\n self.input_name = self.model.get_inputs()[0].name\n self.output_name = self.model.get_outputs()[0].name\n\n def pre_processing(self, img_bgr):\n img_rgb = img_bgr[:, :, [2, 1, 0]]\n img_h, img_w, _ = img_rgb.shape\n pad_image = np.zeros((self.input_h, self.input_w, 3), dtype=np.uint8)\n r = min(self.input_h / img_h, self.input_w / img_w)\n\n resize_img = cv2.resize(img_rgb, (int(img_w * r), int(img_h * r)))\n pad_image[0:int(img_h * r), 0:int(img_w * r), :] = resize_img\n pad_image = pad_image / 127.5 - 1\n pad_image = np.transpose(pad_image, (2, 0, 1))\n\n img_nhwc = pad_image[None, ].astype(np.float32)\n\n return img_nhwc, r\n\n def post_process(self, outputs, ratio):\n '''\n\n :param outputs:\n :param ratio:\n :return: kpt_lst:六人关键点列表, person_bbox:六人bbox, person_score:六人置信度\n '''\n t_h = self.input_h / ratio\n t_w = self.input_w / ratio\n\n kpt_lst = []\n person_bbox = []\n person_score = []\n\n for kpts in outputs[0]:\n person_score.append(kpts[-1])\n person_bbox.append(np.asarray([kpts[-5] * t_h, kpts[-4] * t_w, kpts[-3] * t_h, kpts[-2] * t_w])) # [ymin, xmin, ymax, xmax]\n\n k_lst = []\n for i in range(17):\n k_lst.append(([kpts[i * 3] * t_h, kpts[i * 3 + 1] * t_w, kpts[i * 3 + 2]]))\n\n kpt_lst.append(np.asarray(k_lst))\n\n return kpt_lst, person_bbox, person_score\n\n def draw_img(self, img, kpt_lst, person_bbox, person_score, thresh_person=0.3, thresh_kpt=0.2):\n for idx_person, kpts in enumerate(kpt_lst):\n if person_score[idx_person] > thresh_person:\n for i in range(len(SKELETON)):\n kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]\n if kpts[kpt_a][2] > thresh_kpt:\n x_a, y_a = kpts[kpt_a][1], kpts[kpt_a][0]\n cv2.circle(img, (int(x_a), int(y_a)), 6, CocoColors[i], -1)\n if kpts[kpt_b][2] > thresh_kpt:\n x_b, y_b = kpts[kpt_b][1], kpts[kpt_b][0]\n cv2.circle(img, (int(x_b), int(y_b)), 6, CocoColors[i], -1)\n if kpts[kpt_a][2] > thresh_kpt and kpts[kpt_b][2] > thresh_kpt:\n cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 2)\n\n cv2.rectangle(img, (int(person_bbox[idx_person][1]), int(person_bbox[idx_person][0])),\n (int(person_bbox[idx_person][3]), int(person_bbox[idx_person][2])), CocoColors[idx_person], 2)\n\n return img\n\n def detect(self, img_bgr):\n img_nhwc, r = self.pre_processing(img_bgr)\n outputs = self.model.run([self.output_name], {self.input_name: img_nhwc})[0]\n\n kpt_lst, person_bbox, person_score = self.post_process(outputs, r)\n\n return kpt_lst, person_bbox, person_score\n\n\nif __name__ == '__main__':\n model = MoveNet_onnx(r'../weights/movenet_multipose_416_sim_nopre.onnx', input_h=416, input_w=416)\n\n img_bgr = cv2.imread('../weights/test.jpg')\n # kpt_lst, person_bbox, person_score = model.detect(img_bgr)\n # print(kpt_lst, person_bbox, person_score)\n\n for _ in range(1000):\n s = time.time()\n y = model.detect(img_bgr) # fps: 17\n print('fps: ', 1 / (time.time() - s))\n\n #\n # img_draw = model.draw_img(img_bgr, kpt_lst, person_bbox, person_score)\n # cv2.imshow(' ', img_draw)\n # cv2.waitKey(0)\n\n # # video_path = r'rtsp://admin:tp123456@192.168.8.166:554/Streaming/Channels/101'\n # video_path = r'/home/mafneg/桌面/算法/挥手-江南豪园-球机_20230531.mp4'\n # cap = cv2.VideoCapture(video_path)\n #\n # ret, frame = cap.read()\n # while ret:\n # kpt_lst, person_bbox, person_score = model.detect(frame)\n #\n # img_draw = model.draw_img(frame, kpt_lst, person_bbox, person_score)\n # cv2.namedWindow('frame', cv2.WINDOW_NORMAL)\n # cv2.imshow('frame', img_draw)\n # cv2.waitKey(0)\n #\n # ret, frame = cap.read()\n\n\n","repo_name":"Yi196/Python","sub_path":"CoreML/inference_movenet_multipose_onnx_nopre.py","file_name":"inference_movenet_multipose_onnx_nopre.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42566424266","text":"# a set of two elements represents a undirected edges\r\n# a tuple of two elements represents a directed edge\r\n# representations include: adjacency matrix, array of adjacency list, and array of edges\r\n\r\nclass Graph:\r\n def __init__(self):\r\n self.nodes = set()\r\n self.edges = set()\r\n\r\n def add_edge(self, node1, node2):\r\n self.edges.add((node1, node2))\r\n self.nodes.add(node1)\r\n self.nodes.add(node2)\r\n\r\n def count_edges(self):\r\n return len(self.edges)\r\n\r\n def count_nodes(self):\r\n return len(self.nodes)\r\n\r\n\r\nif __name__ != 'main':\r\n net = Graph()\r\n net.add_edge(3, 2)\r\n net.add_edge(1, 4)\r\n net.add_edge(6, 4)\r\n net.add_edge(2, 1)\r\n net.nodes\r\n net.edges\r\n","repo_name":"sani3/Algorithms","sub_path":"graph_array_of_edges.py","file_name":"graph_array_of_edges.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72694824800","text":"#!/usr/bin/env python3\n\nimport curses\nimport pdb\nimport time\n\nimport config\nimport interface\nimport game\n\n\ndef main(stdscr = None):\n\n if stdscr is None:\n graphics = interface.DebugGraphics()\n else:\n graphics = interface.CursesGraphics(stdscr)\n\n board = game.Board(graphics)\n\n frames_until_drop = 0\n while True:\n frame_start_time = time.time()\n\n board.update_block_state()\n\n board.draw()\n\n # drop/spawn current block\n if frames_until_drop == 0:\n frames_until_drop = config.FALL_SPEED\n if board.can_drop_current_block():\n board.drop_current_block()\n else:\n board.spawn_block()\n else:\n frames_until_drop -= 1\n\n # handle user input\n key = graphics.read_input()\n if key != -1:\n if key in (curses.KEY_LEFT, ord('a')):\n board.move_current_block_left()\n elif key in (curses.KEY_RIGHT, ord('d')):\n board.move_current_block_right()\n elif key in (curses.KEY_UP, ord('w')):\n board.rotate_current_block_clockwise()\n elif key in (curses.KEY_DOWN, ord('s')):\n while board.can_drop_current_block():\n board.drop_current_block()\n\n board.update_block_state()\n\n if not config.DEBUG:\n frame_length = time.time() - frame_start_time\n frame_sleep = config.FRAME_LENGTH - frame_length\n time.sleep(frame_sleep)\n\n\nif __name__ == '__main__':\n if config.DEBUG:\n main()\n else:\n curses.wrapper(main)","repo_name":"bobwhitelock/termtris","sub_path":"termtris/termtris.py","file_name":"termtris.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20879985902","text":"# docs @ http://flask.pocoo.org/docs/1.0/quickstart/\n\nfrom flask import Flask, jsonify, request, render_template\napp = Flask(__name__)\n\n@app.route('/hello', methods=['GET', 'POST'])\ndef hello():\n\n # POST request\n if request.method == 'POST':\n print('Incoming..')\n print(request.get_json()) # parse as JSON\n return 'OK', 200\n\n # GET request\n else:\n message = {'greeting':'Hello from Flask!'}\n return jsonify(message) # serialize and use JSON headers\n\n@app.route('/test')\ndef test_page():\n # look inside `templates` and serve `index.html`\n return render_template('index.html')\n","repo_name":"healeycodes/talking-between-python-and-js","sub_path":"between-flask-and-browser/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"54"} +{"seq_id":"15558162531","text":"#! /usr/bin/env python\n\nfrom std_msgs.msg import Bool\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist, Vector3, PoseStamped\nfrom ardrone_joy.msg import AutoPilotCmd\nimport math\nimport time\nimport tf\nfrom tf.broadcaster import TransformBroadcaster\nimport numpy as np\nimport csv\n\nclass pose:\n def __init__(self):\n rospy.init_node('pose', anonymous=True)\n\n self.listener = tf.TransformListener()\n self.pos_publisher = rospy.Publisher('/ardrone/current_position', PoseStamped, queue_size=10)\n self.publish_pose()\n \n def publish_pose(self):\n while not rospy.is_shutdown():\n try:\n self.listener.waitForTransform(\"nav\", \"base_stabilized\", rospy.Time(), rospy.Duration(0.1))\n (trans,quaternion) = self.listener.lookupTransform(\"nav\", \"base_stabilized\", rospy.Time())\n except:\n continue\n\n pose = PoseStamped()\n\n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = \"world\"\n pose.pose.position.x = trans[0]\n pose.pose.position.y = trans[1]\n pose.pose.position.z = trans[2]\n\n pose.pose.orientation.x = quaternion[0]\n pose.pose.orientation.y = quaternion[1]\n pose.pose.orientation.z = quaternion[2]\n pose.pose.orientation.w = quaternion[3]\n \n self.pos_publisher.publish(pose)\n \n \nif __name__ == '__main__':\n pose()\n rospy.spin()\n","repo_name":"nichjang/senior_design","sub_path":"src/journey/pose.py","file_name":"pose.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"72534815841","text":"# 입력받은 문자열을 리스트로 바꿈\ninp = list(input(\"Enter a number as list : \"))\na = []\n# 그중에서 숫자만 추출하여 새 리스트 생성\nfor i in inp:\n if(i.isnumeric()):\n a.append(int(i))\n\na.sort()\n\n#중간값 설정\nmid = int(len(a)/2)\n#길이가 홀수일경우 중간값 그자체\nif(len(a) % 2 == 1):\n print(\"Median : %.1f\" % float(a[mid]))\n#짝수일경우 중간쪽에 위치한 두값 평균\nelse:\n ret = (a[mid] + a[mid-1])/2.0\n print(\"Median : %.1f\" % ret)","repo_name":"koty08/Python_AI","sub_path":"python_basic/HW02_A_고태영.py","file_name":"HW02_A_고태영.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39300869179","text":"sum = 0\r\nwhile(True):\r\n userInput = input(\"enter the item : \\n\")\r\n if (userInput!='q'):\r\n sum = sum + int(userInput)\r\n print(f\"Order total so far: {sum}\")\r\n \r\n \r\n else:\r\n print(f\"your total bill is {sum}\")\r\n break\r\n \r\n\r\n \r\n \r\n \r\n\r\n \r\n\r\n","repo_name":"sonu61/PythonExampless","sub_path":"Prc1.py","file_name":"Prc1.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12440059727","text":"from math import floor, pi\nimport time\nimport parakeet.freeze\n\n\ndef test_sphere_packer():\n # Set the volume size\n length_x = 30 # A\n length_y = 30\n length_z = 30\n volume = length_x * length_y * length_z # A^3\n\n # Determine the number of waters to place\n avogadros_number = 6.02214086e23\n molar_mass_of_water = 18.01528 # grams / mole\n density_of_water = 940.0 # kg / m^3\n mass_of_water = (density_of_water * 1000) * (volume * 1e-10**3) # g\n number_of_waters = int(\n floor((mass_of_water / molar_mass_of_water) * avogadros_number)\n )\n\n # Van der Waals radius of water\n van_der_waals_radius = 2.7 / 2.0 # A\n\n # Compute the total volume in the spheres\n volume_of_spheres = (4.0 / 3.0) * pi * van_der_waals_radius**3 * number_of_waters\n print(\"Fraction of volume filled: %.2f\" % (100 * volume_of_spheres / volume))\n\n # Create the grid\n grid = (\n int(floor(length_z / (2 * van_der_waals_radius))),\n int(floor(length_y / (2 * van_der_waals_radius))),\n int(floor(length_x / (2 * van_der_waals_radius))),\n )\n\n # Compute the node length and density\n node_length = max([length_z / grid[0], length_y / grid[1], length_x / grid[2]])\n density = number_of_waters / volume\n\n # Create the packer\n packer = parakeet.freeze.SpherePacker(\n grid, node_length, density, van_der_waals_radius, max_iter=10\n )\n print(len(packer))\n\n # Extract all the data and compute the time taken\n start_time = time.time()\n coords = []\n for s in packer:\n for g in s:\n coords.extend(g)\n print(len(s))\n print(\"Time to compute: %f\" % (time.time() - start_time))\n print(\n \"Num unplaced samples: %d / %d\"\n % (packer.num_unplaced_samples(), number_of_waters)\n )\n\n # Test overlaps\n min_distance_sq = (van_der_waals_radius * 2) ** 2\n for i in range(len(coords) - 1):\n ci = coords[i]\n for j in range(i + 1, len(coords)):\n cj = coords[j]\n d2 = (ci[0] - cj[0]) ** 2 + (ci[1] - cj[1]) ** 2 + (ci[2] - cj[2]) ** 2\n assert d2 >= min_distance_sq\n","repo_name":"rosalindfranklininstitute/parakeet","sub_path":"tests/test_sphere_packer.py","file_name":"test_sphere_packer.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"35588004683","text":"from functools import reduce\n\nimport wx\n\nfrom vistas.core.observers.camera import CameraObservable\nfrom vistas.core.preferences import Preferences\nfrom vistas.ui.controllers.project import ProjectChangedEvent\nfrom vistas.ui.controls.viewer_panel import ViewerPanel\nfrom vistas.ui.events import EVT_CAMERA_MODE_CHANGED, EVT_CAMERA_SYNC, CameraSyncEvent\n\n\nclass ViewerContainerPanel(wx.Panel):\n \"\"\"\n A container panel that provides access to all active viewer panels and handles adding, removing and resizing\n window rows and columns. Also provides access for synchronizing ViewerPanels when mouse events occur.\n \"\"\"\n\n class Row:\n def __init__(self):\n self.viewers = []\n self.num_viewers = 0\n self.prev_row = None\n\n def __init__(self, parent, id):\n super().__init__(parent, id)\n self.num_viewers = 0\n self.wireframe = False\n self.selection_view = False\n self.rows = []\n\n self.num_columns = Preferences.app().get('viewer_itemsperrow', 2)\n self.AddViewer()\n\n # Events\n self.Bind(wx.EVT_SIZE, self.OnSize)\n self.Bind(EVT_CAMERA_MODE_CHANGED, self.OnCameraModeChanged)\n self.Bind(EVT_CAMERA_SYNC, self.OnCameraSyncEvent)\n self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)\n\n def OnDestroy(self, event):\n self.Unbind(EVT_CAMERA_MODE_CHANGED)\n\n def AddViewer(self, new_viewer=None):\n # Add new row if necessary\n if self.num_viewers % self.num_columns == 0:\n self.AddRow()\n\n last_row = self.rows[-1]\n\n # Create new viewer\n if new_viewer is None:\n new_viewer = ViewerPanel(self, wx.ID_ANY)\n new_viewer.HideResizeAreas()\n new_viewer.ResetNeighbors()\n\n index = last_row.num_viewers\n last_row.viewers[index] = new_viewer\n last_row.num_viewers += 1\n self.num_viewers += 1\n\n # Size proportions for the new viewer\n new_viewer.width = 1 / last_row.num_viewers\n new_viewer.height = 1 / len(self.rows)\n\n for viewer in last_row.viewers[:index]:\n viewer.width *= index * (1 / last_row.num_viewers)\n\n # Set neighbors\n if last_row.num_viewers > 1:\n new_viewer.SetNeighbor(last_row.viewers[index - 1], ViewerPanel.WEST)\n last_row.viewers[index - 1].SetNeighbor(new_viewer, ViewerPanel.EAST)\n\n if last_row.prev_row is not None and last_row.prev_row.num_viewers >= last_row.num_viewers:\n for viewer in last_row.prev_row.viewers:\n new_viewer.SetNeighbor(viewer, ViewerPanel.NORTH)\n viewer.SetNeighbor(new_viewer, ViewerPanel.SOUTH)\n\n self.UpdateViewerSizes()\n\n observable = CameraObservable.get()\n if observable.is_sync:\n self.SyncAllCameras(False, False)\n self.SyncAllCameras(True, True)\n new_viewer.ResetCameraInteractor()\n\n def RemoveViewer(self, viewer=None):\n # Can't remove the last viewer\n if self.num_viewers < 2:\n return\n\n if viewer is None:\n row = self.rows[-1]\n viewer = row.viewers[row.num_viewers - 1]\n\n for row in self.rows:\n if viewer in row.viewers:\n index = row.viewers.index(viewer)\n viewer = row.viewers[index]\n row.viewers[index] = None\n viewer.legend_window.Destroy()\n viewer.Destroy()\n self.num_viewers -= 1\n self.Rebuild()\n return\n\n def RefreshAllViewers(self):\n for row in self.rows:\n for viewer in row.viewers[:row.num_viewers]:\n viewer.gl_canvas.Refresh()\n\n def UpdateViewerSizes(self):\n for row in self.rows:\n for viewer in row.viewers[:row.num_viewers]:\n x = 0\n y = 0\n\n neighbor = viewer.GetNeighbor(ViewerPanel.WEST)\n if neighbor:\n x = neighbor.GetPosition().x + neighbor.GetSize().GetWidth()\n\n neighbor = viewer.GetNeighbor(ViewerPanel.NORTH)\n if neighbor:\n y = neighbor.GetPosition().y + neighbor.GetSize().GetHeight()\n\n viewer.SetSize(\n x, y, self.GetSize().GetWidth() * viewer.width,\n self.GetSize().GetHeight() * viewer.height\n )\n viewer.gl_canvas.camera_controls.reposition()\n\n def OnSize(self, event):\n self.UpdateViewerSizes()\n\n def Rebuild(self):\n rows = self.rows\n self.rows = []\n self.num_viewers = 0\n\n for row in rows:\n for viewer in (x for x in row.viewers if x is not None):\n self.AddViewer(viewer)\n\n def AddRow(self):\n new_row = self.Row()\n new_row.viewers = list(None for _ in range(self.num_columns))\n\n if self.rows:\n new_row.prev_row = self.rows[-1]\n\n for row in self.rows:\n for viewer in row.viewers[:row.num_viewers]:\n viewer.height *= len(self.rows) * (1 / (len(self.rows) + 1))\n\n self.rows.append(new_row)\n\n def ProjectChanged(self, event):\n if event.change == ProjectChangedEvent.PROJECT_RESET:\n while self.num_viewers > 1:\n self.RemoveViewer()\n self.GetMainViewerPanel().RefreshScenes()\n self.GetMainViewerPanel().UpdateLegend()\n self.GetMainViewerPanel().UpdateOverlay()\n\n else:\n for row in self.rows:\n for i in range(row.num_viewers):\n row.viewers[i].ProjectChanged(event)\n\n def GetMainViewerPanel(self):\n return self.rows[0].viewers[0]\n\n def GetAllViewerPanels(self):\n return reduce(lambda x, y: x + y, (row.viewers[:row.num_viewers] for row in self.rows))\n\n def ToggleWireframe(self):\n self.wireframe = not self.wireframe\n\n for viewer in self.GetAllViewerPanels():\n viewer.camera.wireframe = self.wireframe\n viewer.camera.scene.render_bounding_boxes = self.wireframe\n viewer.Refresh()\n\n def ToggleSelectionView(self):\n self.selection_view = not self.selection_view\n\n for viewer in self.GetAllViewerPanels():\n viewer.camera.selection_view = self.selection_view\n viewer.Refresh()\n\n def OnCameraModeChanged(self, event):\n if CameraObservable.get().is_sync:\n self.SyncAllCameras(True, False)\n\n def OnCameraSyncEvent(self, event: CameraSyncEvent):\n if CameraObservable.get().is_sync:\n canvas = event.GetEventObject()\n for panel in self.GetAllViewerPanels():\n if canvas is not panel.gl_canvas:\n interactor = panel.gl_canvas.camera_interactor\n interactor.sync(event.interactor)\n\n def SyncAllCameras(self, do_sync, save_state):\n observable = CameraObservable.get()\n if do_sync:\n interactor = self.GetMainViewerPanel().gl_canvas.camera_interactor\n observable.sync_camera(interactor, save_state)\n for panel in self.GetAllViewerPanels():\n if panel is not self.GetMainViewerPanel():\n panel.gl_canvas.camera_controls.hide()\n else:\n main_panel_interactor = observable.global_interactor\n observable.unsync_camera()\n if main_panel_interactor is not None:\n self.GetMainViewerPanel().gl_canvas.camera_interactor = main_panel_interactor\n for panel in self.GetAllViewerPanels():\n if panel is not None and panel is not self.GetMainViewerPanel():\n panel.gl_canvas.camera_controls.show()\n","repo_name":"VISTAS-IVES/pyvistas","sub_path":"source/vistas/ui/controls/viewer_container_panel.py","file_name":"viewer_container_panel.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10063393610","text":"import os\nimport flask as f\nfrom flask_bcrypt import Bcrypt\n\ndef create_app(test_config=None):\n # create and configure the app\n app = f.Flask(__name__, instance_relative_config=True, static_url_path='',\n static_folder='../../web', template_folder=\"./templates\")\n\n # app.debug = False\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=False)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n with app.app_context():\n bcrypt = Bcrypt(app)\n f.g.bcrypt_log_rounds = app.config.get('BCRYPT_LOG_ROUNDS')\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # init db\n from . import db\n db.init_app(app)\n\n # add note bp\n from . import note\n app.register_blueprint(note.bp)\n # app.add_url_rule('/cmdnotes/api/notes', 'notes', note.notes)\n app.add_url_rule('/cmdnotes/api/notes_paging', 'notes_paging',\n note.get_notes_paging, methods=['POST'])\n app.add_url_rule('/cmdnotes/api/create_note', 'create_note',\n note.create_note, methods=['POST'])\n app.add_url_rule('/cmdnotes/api/update_note', 'update_note',\n note.update_note, methods=['POST'])\n app.add_url_rule('/cmdnotes/api/note', 'note',\n note.get_note, methods=['GET'])\n app.add_url_rule('/cmdnotes/api/delete_note', 'delete_note',\n note.delete_note, methods=['POST'])\n # app.add_url_rule('/cmdnotes/api/notes', endpoint='get_notes')\n\n from . import user\n app.register_blueprint(user.bp)\n app.add_url_rule('/cmdnotes/api/register', 'register',\n user.register, methods=['POST'])\n app.add_url_rule('/cmdnotes/api/login', 'login',\n user.login, methods=['POST'])\n\n return app\n","repo_name":"jtnotes/cmd-collector","sub_path":"server/cmd_notes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30607469625","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 02 15:59:36 2017\n\n@author: Sebastian Milde, Thomas Kuestner\n\"\"\"\n\nimport math\nimport numpy as np\nimport h5py\nimport inspect\nimport dis\nfrom sklearn.model_selection import KFold\nimport os\n\ndef expecting():\n \"\"\"Return how many values the caller is expecting\"\"\"\n f = inspect.currentframe()\n f = f.f_back.f_back\n c = f.f_code\n i = f.f_lasti\n bytecode = c.co_code\n instruction = bytecode[i+3]\n if instruction == dis.opmap['UNPACK_SEQUENCE']:\n howmany = bytecode[i+4]\n return howmany\n elif instruction == dis.opmap['POP_TOP']:\n return 0\n return 1\n\ndef fSplitDataset(allPatches, allY, allPats, sSplitting, patchSize, patchOverlap, split_ratio, sFolder, nfolds = 0):\n # TODO: adapt path\n iReturn = expecting()\n\n if len(patchSize) == 3:\n if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1] and allPatches.shape[2] == patchSize[2]:\n allPatches = np.transpose(allPatches, (3, 0, 1, 2))\n print(allPatches.shape)\n else:\n if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1]:\n allPatches = np.transpose(allPatches, (2, 0, 1))\n print(allPatches.shape)\n\n if sSplitting == \"normal\":\n print(\"Done\")\n nPatches = allPatches.shape[0]\n dVal = math.floor(split_ratio * nPatches)\n rand_num = np.random.permutation(np.arange(nPatches))\n rand_num = rand_num[0:int(dVal)].astype(int)\n print(rand_num)\n if len(patchSize) == 3:\n X_test = allPatches[rand_num, :, :, :]\n else:\n X_test = allPatches[rand_num, :, :]\n y_test = allY[rand_num]\n X_train = allPatches\n X_train = np.delete(X_train, rand_num, axis=0)\n y_train = allY\n y_train = np.delete(y_train, rand_num)\n print(X_train.shape)\n print(X_test.shape)\n print(y_train.shape)\n print(y_test.shape)\n\n if iReturn == 0:\n if len(patchSize) == 3:\n folder = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2])\n Path = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2]) + os.sep + 'normal_' + str(patchSize[0]) + str(patchSize[1]) + '.h5'\n else:\n folder = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1])\n Path = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + os.sep + 'normal_' + str(patchSize[0]) + str(patchSize[1]) + '.h5'\n\n if os.path.isdir(folder):\n pass\n else:\n os.makedirs(folder)\n\n print(Path)\n with h5py.File(Path, 'w') as hf:\n hf.create_dataset('X_train', data=X_train)\n hf.create_dataset('X_test', data=X_test)\n hf.create_dataset('y_train', data=y_train)\n hf.create_dataset('y_test', data=y_test)\n hf.create_dataset('patchSize', data=patchSize)\n hf.create_dataset('patchOverlap', data=patchOverlap)\n else:\n return [X_train], [y_train], [X_test], [y_test] # embed in a 1-fold list\n\n elif sSplitting == \"crossvalidation_data\":\n if nfolds == 0:\n kf = KFold(n_splits=len(np.unique(allPats)))\n else:\n kf = KFold(n_splits=nfolds)\n ind_split = 0\n X_trainFold = []\n X_testFold = []\n y_trainFold = []\n y_testFold = []\n\n for train_index, test_index in kf.split(allPatches):\n X_train, X_test = allPatches[train_index], allPatches[test_index]\n y_train, y_test = allY[train_index], allY[test_index]\n\n if iReturn == 0:\n if len(patchSize) == 3:\n folder = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2])\n Path = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2]) + os.sep + 'crossVal_data' + str(ind_split) + '_' + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2])+ '.h5'\n else:\n folder = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1])\n Path = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + os.sep + 'crossVal_data' + str(ind_split) + '_' + str(patchSize[0]) + str(patchSize[1]) + '.h5'\n if os.path.isdir(folder):\n pass\n else:\n os.makedirs(folder)\n\n with h5py.File(Path, 'w') as hf:\n hf.create_dataset('X_train', data=X_train)\n hf.create_dataset('X_test', data=X_test)\n hf.create_dataset('y_train', data=y_train)\n hf.create_dataset('y_test', data=y_test)\n hf.create_dataset('patchSize', data=patchSize)\n hf.create_dataset('patchOverlap', data=patchOverlap)\n else:\n X_trainFold.append(X_train)\n X_testFold.append(X_test)\n y_trainFold.append(y_train)\n y_testFold.append(y_test)\n\n ind_split += 1\n\n X_trainFold = np.asarray(X_trainFold)\n X_testFold = np.asarray(X_testFold)\n y_trainFold = np.asarray(y_trainFold)\n y_testFold = np.asarray(y_testFold)\n\n if iReturn > 0:\n return X_trainFold, y_trainFold, X_testFold, y_testFold\n\n elif sSplitting == \"crossvalidation_patient\":\n unique_pats = np.unique(allPats)\n\n X_trainFold = []\n X_testFold = []\n y_trainFold = []\n y_testFold = []\n\n for ind_split in unique_pats:\n train_index = np.where(allPats != ind_split)[0]\n test_index = np.where(allPats == ind_split)[0]\n X_train, X_test = allPatches[train_index], allPatches[test_index]\n y_train, y_test = allY[train_index], allY[test_index]\n\n if iReturn == 0:\n if len(patchSize) == 3:\n folder = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2])\n Path = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2]) + os.sep + 'crossVal' + str(ind_split) + '_' + str(patchSize[0]) + str(patchSize[1]) + str(patchSize[2])+ '.h5'\n else:\n folder = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1])\n Path = sFolder + os.sep + str(patchSize[0]) + str(patchSize[1]) + os.sep + 'crossVal' + str(\n ind_split) + '_' + str(patchSize[0]) + str(patchSize[1]) + '.h5'\n if os.path.isdir(folder):\n pass\n else:\n os.makedirs(folder)\n\n with h5py.File(Path, 'w') as hf:\n hf.create_dataset('X_train', data=X_train)\n hf.create_dataset('X_test', data=X_test)\n hf.create_dataset('y_train', data=y_train)\n hf.create_dataset('y_test', data=y_test)\n hf.create_dataset('patchSize', data=patchSize)\n hf.create_dataset('patchOverlap', data=patchOverlap)\n\n else:\n X_trainFold.append(X_train)\n X_testFold.append(X_test)\n y_trainFold.append(y_train)\n y_testFold.append(y_test)\n\n\n X_trainFold = np.asarray(X_trainFold, dtype='f')\n X_testFold = np.asarray(X_testFold, dtype='f')\n y_trainFold = np.asarray(y_trainFold, dtype='f')\n y_testFold = np.asarray(y_testFold, dtype='f')\n\n if iReturn > 0:\n return X_trainFold, y_trainFold, X_testFold, y_testFold\n\ndef fSplitDatasetCorrection(sSplitting, dRefPatches, dArtPatches, allPats, split_ratio, nfolds, test_index):\n \"\"\"\n Split dataset with three options:\n 1. normal: randomly split data according to the split_ratio without cross validation\n 2. crossvalidation_data: perform crossvalidation with mixed patient data\n 3. crossvalidation_patient: perform crossvalidation with separate patient data\n @param sSplitting: splitting mode 'normal', 'crossvalidation_data' or 'crossvalidation_patient'\n @param dRefPatches: reference patches\n @param dArtPatches: artifact patches\n @param allPats: patient index\n @param split_ratio: the ratio to split test data\n @param nfolds: folds for cross validation\n @return: testing and training data for both reference and artifact images\n \"\"\"\n train_ref_fold = []\n test_ref_fold = []\n train_art_fold = []\n test_art_fold = []\n\n # normal splitting\n if sSplitting == 'normal':\n nPatches = dRefPatches.shape[0]\n dVal = math.floor(split_ratio * nPatches)\n rand_num = np.random.permutation(np.arange(nPatches))\n rand_num = rand_num[0:int(dVal)].astype(int)\n\n test_ref_fold.append(dRefPatches[rand_num, :, :])\n train_ref_fold.append(np.delete(dRefPatches, rand_num, axis=0))\n test_art_fold.append(dArtPatches[rand_num, :, :])\n train_art_fold.append(np.delete(dArtPatches, rand_num, axis=0))\n\n # crossvalidation with mixed patient\n if sSplitting == \"crossvalidation_data\":\n if nfolds == 0:\n kf = KFold(n_splits=len(np.unique(allPats)))\n else:\n kf = KFold(n_splits=nfolds)\n\n for train_index, test_index in kf.split(dRefPatches):\n train_ref, test_ref = dRefPatches[train_index], dRefPatches[test_index]\n train_art, test_art = dArtPatches[train_index], dArtPatches[test_index]\n\n train_ref_fold.append(train_ref)\n train_art_fold.append(train_art)\n test_ref_fold.append(test_ref)\n test_art_fold.append(test_art)\n\n # crossvalidation with separate patient\n elif sSplitting == 'crossvalidation_patient':\n if test_index == -1:\n unique_pats = np.unique(allPats)\n else:\n unique_pats = [test_index]\n for ind_split in unique_pats:\n train_index = np.where(allPats != ind_split)[0]\n test_index = np.where(allPats == ind_split)[0]\n train_ref, test_ref = dRefPatches[train_index], dRefPatches[test_index]\n train_art, test_art = dArtPatches[train_index], dArtPatches[test_index]\n\n train_ref_fold.append(train_ref)\n train_art_fold.append(train_art)\n test_ref_fold.append(test_ref)\n test_art_fold.append(test_art)\n\n train_ref_fold = np.asarray(train_ref_fold, dtype='f')\n train_art_fold = np.asarray(train_art_fold, dtype='f')\n test_ref_fold = np.asarray(test_ref_fold, dtype='f')\n test_art_fold = np.asarray(test_art_fold, dtype='f')\n\n return train_ref_fold, test_ref_fold, train_art_fold, test_art_fold","repo_name":"thomaskuestner/CNNArt","sub_path":"utils/Training_Test_Split.py","file_name":"Training_Test_Split.py","file_ext":"py","file_size_in_byte":10817,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"54"} +{"seq_id":"14501960861","text":"\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import DBSCAN\n\niris=pd.read_csv(\"iris1.csv\")\niris.columns=[\"sepal_length\",\"sepal_width\",\"petal_length\",\"petal_width\",\"flower\"]\n\niris=iris.drop([\"flower\"],axis=1)\ndata=iris.values\n\ndef norm(x):\n \"\"\" Normalization of the data, assumed data contains only numerical values\"\"\"\n for i in range(x.shape[1]):\n meu=np.mean(x,axis=0)\n sigma=np.std(x,axis=0)\n x[:,i]=(x[:,i]-meu[i])/sigma[i]\n return x\ndata=norm(data)\nepsilon=0.76\nPmin=data.shape[1]+1\n\ndef dbscan(data,epsilon,Pmin):\n labels=np.zeros(len(data))\n cluster_name=1\n for i in range(data.shape[0]):\n if labels[i]==0 or labels[i]==-1:\n neighbours,ix=dist(data,data[i],labels,cluster_name)\n if len(neighbours)=Pmin:\n labels=expand(new_n,data,labels,cluster_name,Pmin) \n return labels\n\ndef clusters(data,labels):\n label=list(set(labels))\n clusters=[0]*len(label)\n x=np.column_stack((data,labels))\n for i in range(len(label)):\n clusters[i]=x[x[:,-1]==label[i]]\n return clusters\n\ndef graph(clusters):\n cl=[\"r\",\"g\",\"b\",\"k\",\"y\"]\n for i in range(len(clusters)):\n plt.scatter(clusters[i][:,2],clusters[i][:,3], color=cl[i])\n \n \ndb=dbscan(data,epsilon,Pmin) \nc=clusters(data,db)\ng=graph(c)\n\nlabels_sl = DBSCAN(eps=0.44, min_samples=5).fit_predict(data)\nclusters_sl=clusters(data,labels_sl)\ng_sl=graph(clusters_sl)\n\n### the two results are similar, however skilearn algorithem uses smaler epsilon for same clusttering \n###fit_predict(X) calls fit(X) fit_predict(X) returns cluster labels while fit(X) only preforms the clusttering.\n### fit_predict(X) gives the same as calling fit(X) first and calling .labels_ ","repo_name":"SonyaCopper/Machine-Learning-Algorithms-from-Scratch","sub_path":"DBSCAN_from_scratch_on_IRIS/DBSCAN_on_IRIS.py","file_name":"DBSCAN_on_IRIS.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38691336696","text":"import os\n\n# Measurements\n\n# VTK text\nTEXT_SIZE_SMALL = 11\nTEXT_SIZE = 12\nTEXT_SIZE_LARGE = 16\nTEXT_COLOUR = (1,1,1)\n\n(X,Y) = (0.03, 0.97)\n(XZ, YZ) = (0.05, 0.93)\nTEXT_POS_LEFT_UP = (X, Y)\n#------------------------------------------------------------------\nTEXT_POS_LEFT_DOWN = (X, 1-Y) # SetVerticalJustificationToBottom\n\nTEXT_POS_LEFT_DOWN_ZERO = (X, 1-YZ)\n#------------------------------------------------------------------\nTEXT_POS_RIGHT_UP = (1-X, Y) # SetJustificationToRight\n#------------------------------------------------------------------\nTEXT_POS_RIGHT_DOWN = (1-X, 1-Y) # SetVerticalJustificationToBottom &\n # SetJustificationToRight\n#------------------------------------------------------------------\nTEXT_POS_HCENTRE_DOWN = (0.5, 1-Y) # SetJustificationToCentered\n # ChildrticalJustificationToBottom\n\nTEXT_POS_HCENTRE_DOWN_ZERO = (0.5, 1-YZ)\n#------------------------------------------------------------------\nTEXT_POS_HCENTRE_UP = (0.5, Y) # SetJustificationToCentered\n#------------------------------------------------------------------\nTEXT_POS_VCENTRE_RIGHT = (1-X, 0.5) # SetVerticalJustificationToCentered\n # SetJustificationToRight\nTEXT_POS_VCENTRE_RIGHT_ZERO = (1-XZ, 0.5)\n#------------------------------------------------------------------\nTEXT_POS_VCENTRE_LEFT = (X, 0.5) # SetVerticalJustificationToCentered\n#------------------------------------------------------------------\n\n\n# Slice orientation\nAXIAL = 1\nCORONAL = 2\nSAGITAL = 3\nVOLUME = 4\nSURFACE = 5\nDENSIDADE = 6\n\n# Measure type\nLINEAR = 6\nANGULAR = 7\n\n# Colour representing each orientation\nORIENTATION_COLOUR = {'AXIAL': (1,0,0), # Red\n 'CORONAL': (0,1,0), # Green\n 'SAGITAL': (0,0,1)} # Blue\n\n\n# Camera according to slice's orientation\n#CAM_POSITION = {\"AXIAL\":(0, 0, 1), \"CORONAL\":(0, -1, 0), \"SAGITAL\":(1, 0, 0)}\n#CAM_VIEW_UP = {\"AXIAL\":(0, 1, 0), \"CORONAL\":(0, 0, 1), \"SAGITAL\":(0, 0, 1)}\nAXIAL_SLICE_CAM_POSITION = {\"AXIAL\":(0, 0, 1), \"CORONAL\":(0, -1, 0), \"SAGITAL\":(1, 0, 0), \"DENSIDADE\":(0, 0, 1)}\nAXIAL_SLICE_CAM_VIEW_UP = {\"AXIAL\":(0, 1, 0), \"CORONAL\":(0, 0, 1), \"SAGITAL\":(0, 0, 1), \"DENSIDADE\":(0, 1, 0)}\n\nSAGITAL_SLICE_CAM_POSITION = {\"AXIAL\":(0, 1, 0), \"CORONAL\":(1, 0, 0), \"SAGITAL\":(0, 0, -1), \"DENSIDADE\":(0, 1, 0)}\nSAGITAL_SLICE_CAM_VIEW_UP = {\"AXIAL\":(-1, 0, 0), \"CORONAL\":(0, 1, 0), \"SAGITAL\":(0, 1, 0), \"DENSIDADE\":(-1, 0, 0)}\n\nCORONAL_SLICE_CAM_POSITION = {\"AXIAL\":(0, 1, 0), \"CORONAL\":(0, 0, 1), \"SAGITAL\":(1, 0, 0),\"DENSIDADE\":(0, 1, 0)}\nCORONAL_SLICE_CAM_VIEW_UP = {\"AXIAL\":(0, 0, -1), \"CORONAL\":(0, 1, 0), \"SAGITAL\":(0, 1, 0), \"DENSIDADE\":(0, 0, -1)}\n\nSLICE_POSITION = {AXIAL:[AXIAL_SLICE_CAM_VIEW_UP, AXIAL_SLICE_CAM_POSITION],\n SAGITAL:[SAGITAL_SLICE_CAM_VIEW_UP, SAGITAL_SLICE_CAM_POSITION],\n CORONAL:[CORONAL_SLICE_CAM_VIEW_UP, CORONAL_SLICE_CAM_POSITION],\n DENSIDADE:[AXIAL_SLICE_CAM_VIEW_UP, AXIAL_SLICE_CAM_POSITION]}\n#Project Status\n#NEW_PROJECT = 0\n#OPEN_PROJECT = 1\n#CHANGE_PROJECT = 2\n#SAVE_PROJECT = 3\nPROJ_NEW = 0\nPROJ_OPEN = 1\nPROJ_CHANGE = 2\nPROJ_CLOSE = 3\n\nPROJ_MAX = 4\n\n\n####\nMODE_RP = 0\nMODE_NAVIGATOR = 1\nMODE_RADIOLOGY = 2\nMODE_ODONTOLOGY = 3\n\n\n\n\n\n\n# Mask threshold options\nTHRESHOLD_PRESETS_INDEX = 0 #Bone\nTHRESHOLD_HUE_RANGE = (0, 0.6667)\nTHRESHOLD_INVALUE = 5000\nTHRESHOLD_OUTVALUE = 0\n\n# Mask properties\nMASK_OPACITY = 0.40\n#MASK_OPACITY = 0.35\nMASK_COLOUR = [[0.33, 1, 0.33],\n [1, 1, 0.33],\n [0.33, 0.91, 1],\n [1, 0.33, 1],\n [1, 0.68, 0.33],\n [1, 0.33, 0.33],\n [0.33333333333333331, 0.33333333333333331, 1.0],\n #(1.0, 0.33333333333333331, 0.66666666666666663),\n [0.74901960784313726, 1.0, 0.0],\n [0.83529411764705885, 0.33333333333333331, 1.0]]#,\n #(0.792156862745098, 0.66666666666666663, 1.0),\n #(1.0, 0.66666666666666663, 0.792156862745098), # too \"light\"\n #(0.33333333333333331, 1.0, 0.83529411764705885),#],\n #(1.0, 0.792156862745098, 0.66666666666666663),\n #(0.792156862745098, 1.0, 0.66666666666666663), # too \"light\"\n #(0.66666666666666663, 0.792156862745098, 1.0)]\n\n\nMEASURE_COLOUR = [[1, 0, 0],\n [1, 0.4, 0],\n [0, 0, 1],\n [1, 0, 1],\n [0, 0.6, 0]]\n\nSURFACE_COLOUR = [(0.33, 1, 0.33),\n (1, 1, 0.33),\n (0.33, 0.91, 1),\n (1, 0.33, 1),\n (1, 0.68, 0.33),\n (1, 0.33, 0.33),\n (0.33333333333333331, 0.33333333333333331, 1.0),\n (1.0, 0.33333333333333331, 0.66666666666666663),\n (0.74901960784313726, 1.0, 0.0),\n (0.83529411764705885, 0.33333333333333331, 1.0),\n (0.792156862745098, 0.66666666666666663, 1.0),\n (1.0, 0.66666666666666663, 0.792156862745098),\n (0.33333333333333331, 1.0, 0.83529411764705885),\n (1.0, 0.792156862745098, 0.66666666666666663),\n (0.792156862745098, 1.0, 0.66666666666666663),\n (0.66666666666666663, 0.792156862745098, 1.0)]\n\n# Related to slice editor brush\nBRUSH_CIRCLE = 0 #\nBRUSH_SQUARE = 1\nDEFAULT_BRUSH_FORMAT = BRUSH_CIRCLE\n\nBRUSH_DRAW = 0\nBRUSH_ERASE = 1\nBRUSH_THRESH = 2\nDEFAULT_BRUSH_OP = BRUSH_THRESH\n\nBRUSH_COLOUR = (0,0,1.0)\nBRUSH_SIZE = 30\n\n# Surface creation values. Each element's list contains:\n# 0: imagedata reformat ratio\n# 1: smooth_iterations\n# 2: smooth_relaxation_factor\n# 3: decimate_reduction\n\n\nREDUCE_IMAGEDATA_QUALITY = 0\n\nICON_DIR = os.path.abspath(os.path.join('..', 'icons'))\nSAMPLE_DIR = os.path.abspath(os.path.join('..', 'samples'))\nDOC_DIR = os.path.abspath(os.path.join('..', 'docs'))\n\n\n\n# if 1, use vtkVolumeRaycastMapper, if 0, use vtkFixedPointVolumeRayCastMapper\nTYPE_RAYCASTING_MAPPER = 0\n\nfolder=RAYCASTING_PRESETS_DIRECTORY= os.path.abspath(os.path.join(\"..\",\n \"presets\",\n \"raycasting\"))\n\n\n\nLOG_FOLDER = os.path.join(os.path.expanduser('~'), '.invesalius', 'logs')\nif not os.path.isdir(LOG_FOLDER):\n os.makedirs(LOG_FOLDER)\n\nfolder = os.path.join(os.path.expanduser('~'), '.invesalius', 'presets')\nif not os.path.isdir(folder):\n os.makedirs(folder)\n\n\nUSER_RAYCASTING_PRESETS_DIRECTORY = folder\n\n# If 0 dont't blur, 1 blur\nRAYCASTING_WWWL_BLUR = 0\n\nRAYCASTING_PRESETS_FOLDERS = (RAYCASTING_PRESETS_DIRECTORY,\n USER_RAYCASTING_PRESETS_DIRECTORY)\n\n\n####\n#MODE_ZOOM = 0 #\"Set Zoom Mode\",\n#MODE_ZOOM_SELECTION = 1 #:\"Set Zoom Select Mode\",\n#MODE_ROTATE = 2#:\"Set Spin Mode\",\n#MODE_MOVE = 3#:\"Set Pan Mode\",\n#MODE_WW_WL = 4#:\"Bright and contrast adjustment\"}\n#MODE_LINEAR_MEASURE = 5\n\n\n# self.states = {0:\"Set Zoom Mode\", 1:\"Set Zoom Select Mode\",\n# 2:\"Set Spin Mode\", 3:\"Set Pan Mode\",\n# 4:\"Bright and contrast adjustment\"}\n\n\n#ps.Publisher().sendMessage('Set interaction mode %d'%\n# (MODE_BY_ID[id]))\n\n#('Set Editor Mode')\n#{0:\"Set Change Slice Mode\"}\n\n####\nMODE_SLICE_SCROLL = -1\nMODE_SLICE_EDITOR = -2\nMODE_SLICE_CROSS = -3\n\n############\n\n\n","repo_name":"jcdinis/ultrasound_segmentation","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"35668957482","text":"def soma_ate(n):\n \"\"\" Retorna a soma de 1+2+3 ... n \"\"\"\n ss = 0\n v = 1\n while v <= n: \n ss = ss + v\n v = v + 1\n return ss\n\nprint(soma_ate(8))\n","repo_name":"Gustavobflh/IntroPython","sub_path":"aula8_ex1.py","file_name":"aula8_ex1.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"43981025261","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx=np.linspace(-6,6,100)\nT=np.loadtxt(\"../4.1/tri.dat\")\nN=10**6\n\ndef pdf(x):\n if x <= 0:\n return 0.0\n elif x > 0 and x <= 1:\n return x\n elif x > 1 and x < 2:\n return 2 - x\n else:\n return 0.0\n\nvec_gauss=np.vectorize(pdf)\n\nF=[]\npdf=[]\nfor i in range(0,100):\n F.append(np.size(np.nonzero(T < x[i]))/N)\nfor i in range(0,99):\n pdf.append((F[i+1]-F[i])/(x[i+1]-x[i]))\n\nplt.scatter(x[0:99],pdf,label=\"Theoritical\")\nplt.plot(x,vec_gauss(x),label=\"Numerical\",color=\"orange\")\nplt.grid()\nplt.xlabel(\"x\")\nplt.ylabel(\"$p_T(x)$\")\nplt.legend()\nplt.show()\n","repo_name":"TYCN129/AI1110-Assignments","sub_path":"Manual 1/4.5/4.5_1.py","file_name":"4.5_1.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10964613553","text":"from collections import deque\nfrom queue import Queue\nfrom threading import Thread\nimport numpy as np\nimport cv2\nimport time\n\n\nclass EventRecorder:\n def __init__(self, buffer_size=64, timeout=1.0):\n self.buffer_size = buffer_size\n self.timeout = timeout\n\n self.frame_buffer = deque(maxlen=buffer_size)\n self.frame_queue = None\n self.is_recording = None\n self.thread = None\n self.writer = None\n\n self.frames_without_motion = 0\n self.consecutive_frames = 0\n self.frames_since_screenshot = np.inf\n\n def start(self, output_path, video_codec, fps):\n self.is_recording = True\n self.frame_queue = Queue()\n (height, width, _) = self.frame_buffer[0].shape\n self.writer = cv2.VideoWriter(\n output_path,\n video_codec,\n fps,\n (height, width)\n )\n\n for i in range(len(self.frame_buffer), 0, -1):\n self.frame_queue.put(self.frame_buffer[i - 1])\n\n self.thread = Thread(target=self.record_video, args=())\n self.thread.daemon = True\n self.thread.start()\n\n def update(self, frame):\n '''\n '''\n self.frame_buffer.appendleft(frame)\n\n if self.is_recording:\n self.frame_queue.put(frame)\n self.consecutive_frames += 1\n\n def record_video(self):\n while True:\n if not self.is_recording:\n return\n\n if not self.frame_queue.empty():\n frame = self.frame_queue.get()\n self.writer.write(frame)\n\n else:\n time.sleep(self.timeout)\n\n def take_screenshot(self, image, screenshot_path, delay=30):\n if self.frames_since_screenshot >= delay:\n cv2.imwrite(screenshot_path, image)\n self.frames_since_screenshot = 0\n\n self.frames_since_screenshot += 1\n\n def stop(self):\n self.is_recording = False\n self.consecutive_frames = 0\n self.thread.join()\n while not self.frame_queue.empty():\n frame = self.frame_queue.get()\n self.writer.write(frame)\n self.writer.release()\n","repo_name":"djalmada/crittercam","sub_path":"crittercam/eventrecorder.py","file_name":"eventrecorder.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73234644323","text":"# kanged from telebot\n# @OreBaka\n# Don't remove This\n\n\"\"\"\nGoogle Translate\nAvailable Commands:\ntr LanguageCode as reply to a message\n\"\"\"\n\nimport emoji\nfrom googletrans import Translator\n\nfrom userbot import CMD_HELP\nfrom userbot.events import register\n\n\n\n@register(outgoing=True, pattern='^tr(?: |$)(.*)')\nasync def _(event):\n if event.fwd_from:\n return\n if \"trim\" in event.raw_text:\n return\n x = await event.edit(\"Translating...\")\n input_str = event.pattern_match.group(1)\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n text = previous_message.message\n lan = input_str or \"ml\"\n elif \"|\" in input_str:\n lan, text = input_str.split(\"|\")\n else:\n await x.edit(\n f\"`tr LanguageCode` as reply to a message.\\nLanguage codes can be found [here](https://t.me/TeleBotHelpChat/22678)\",\n )\n return\n text = emoji.demojize(text.strip())\n lan = lan.strip()\n translator = Translator()\n try:\n translated = translator.translate(text, dest=lan)\n after_tr_text = translated.text\n output_str = \"\"\"\n**Tʀᴀɴsʟᴀᴛɪᴏɴ**\n**{} ➟ {}**\n`{}`\"\"\".format(\n translated.src, lan, after_tr_text\n )\n await x.edit(output_str)\n except Exception as exc:\n await x.edit(f\"Error\\n `{str(exc)}`\")\n\n\nCMD_HELP.update(\n {\n \"translate\": \"tr \\\n \\nUsage - Translate the replied text.\"\n }\n)\n","repo_name":"zalUseless/Simple-Userbot","sub_path":"userbot/modules/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"10498111125","text":"import dash\r\nfrom dash.dependencies import Input, Output\r\nimport dash_core_components as dcc\r\nimport dash_bootstrap_components as dbc\r\nimport dash_html_components as html\r\n\r\nimport flask\r\nimport pandas as pd\r\nimport time\r\nimport os\r\n\r\nimport plotly.graph_objects as go\r\nimport plotly.express as px\r\nserver = flask.Flask('app')\r\nserver.secret_key = os.environ.get('secret_key', 'secret')\r\n\r\n\r\nwd = os.getcwd()\r\n\r\n\r\noverall_platform_data = pd.read_csv(os.path.join(wd, 'overall_platforms_data.csv'))\r\nnetflix_df = pd.read_csv(os.path.join(wd, 'netflix_df.csv'))\r\nprimevideo_df = pd.read_csv(os.path.join(wd, 'primevideo_df.csv'))\r\nhulu_df = pd.read_csv(os.path.join(wd, 'hulu_df.csv'))\r\ndisneyplus_df = pd.read_csv(os.path.join(wd, 'disneyplus_df.csv'))\r\ntweets_per_day_df = pd.read_csv(os.path.join(wd, 'tweets_per_day.csv'))\r\noverall_performance_data = pd.read_csv(os.path.join(wd, 'overall_performance_data.csv'))\r\nchurn_likeliness_data = pd.read_csv(os.path.join(wd, 'churn_likeliness_data.csv'))\r\n\r\n\r\n\r\ncolors = {\r\n 'background': 'rgba(0,0,0,0)',\r\n 'text': '#7FDBFF'\r\n}\r\nmarker_colors = ('#00A8E1', '#E50914', '#66AA33', '#113CCF')\r\n\r\noverall_platform_data['positive_tweet_count'] = ''\r\noverall_platform_data['positive_tweet_percentage'] = ''\r\noverall_platform_data['negative_tweet_count'] = ''\r\noverall_platform_data['negative_tweet_percentage'] = ''\r\n\r\n# Iterated over a for loop to calculate the positive and negative tweet counts and percentages\r\nfor platform in (primevideo_df, netflix_df, hulu_df, disneyplus_df):\r\n platform_name = platform.loc[0, 'streaming_platform']\r\n \r\n negative_tweet_count = len(platform.loc[platform['sentiment'] == 'Negative'])\r\n negative_tweet_percentage = negative_tweet_count/overall_platform_data.loc[overall_platform_data['streaming_platform'] == platform_name, 'tweet_count']\r\n\r\n overall_platform_data.loc[overall_platform_data['streaming_platform'] == platform_name, 'negative_tweet_count'] = negative_tweet_count\r\n overall_platform_data.loc[overall_platform_data['streaming_platform'] == platform_name, 'negative_tweet_percentage'] = negative_tweet_percentage\r\n \r\n positive_tweet_count = len(platform.loc[platform['sentiment'] == 'Positive'])\r\n positive_tweet_percentage = positive_tweet_count/overall_platform_data.loc[overall_platform_data['streaming_platform'] == platform_name, 'tweet_count']\r\n \r\n overall_platform_data.loc[overall_platform_data['streaming_platform'] == platform_name, 'positive_tweet_count'] = positive_tweet_count\r\n overall_platform_data.loc[overall_platform_data['streaming_platform'] == platform_name, 'positive_tweet_percentage'] = positive_tweet_percentage\r\n\r\n\r\n\r\napp = dash.Dash('app', server=server, external_stylesheets=[dbc.themes.CYBORG, os.path.join(wd, 'style.css')])\r\n\r\napp.scripts.config.serve_locally = False\r\ndcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-basic-latest.min.js'\r\n\r\napp.layout = html.Div([\r\n html.H1(\"ANALYSIS OF TWITTER DATA\", style={'textAlign': 'center', 'color': colors['text']}),\r\n html.Div(\r\n className='row',\r\n children=[ \r\n dcc.Markdown(\"This dashboard shows the change in sentiment of web streaming services over time and the features that help us identify the churners which help us predict the likelihood of churn. We choose to focus on the top 4 web streaming services (Amazon Prime Video, Hulu, Disney Plus, & Netflix). This is particularly due to current Covid-19 times causing increasing usage and engagement of these services.\"),\r\n ],style={}\r\n ),\r\n html.Br(),\r\n html.Div(\r\n [dcc.Graph(\r\n id='graph1',\r\n figure=go.Figure(\r\n data = [\r\n go.Bar(\r\n name = 'Tweets',\r\n x = overall_platform_data['streaming_platform'],\r\n y = overall_platform_data['tweet_count'],\r\n marker_color = marker_colors),\r\n go.Bar(name = 'Retweets',\r\n x = overall_platform_data['streaming_platform'],\r\n y = overall_platform_data['retweet_count'],\r\n marker_color = marker_colors)],\r\n layout= {\r\n 'title': 'Tweets vs. ReTweets per Platform',\r\n 'xaxis_title' : 'Web Streaming Platforms', \r\n 'yaxis_title' : 'Tweet Count', \r\n 'showlegend': False,\r\n 'plot_bgcolor': colors['background'],\r\n 'paper_bgcolor': colors['background'],\r\n 'font': {\r\n 'color': colors['text']\r\n }\r\n }\r\n )\r\n ),\r\n dcc.Graph(\r\n id='graph4',\r\n figure=go.Figure(\r\n data = [go.Bar(\r\n name = 'Positive', \r\n x = overall_platform_data['streaming_platform'], \r\n y = overall_platform_data['positive_tweet_percentage'],\r\n marker_color = marker_colors),\r\n go.Bar(\r\n name = 'Negative', \r\n x = overall_platform_data['streaming_platform'], \r\n y = overall_platform_data['negative_tweet_percentage'],\r\n marker_color = marker_colors)],\r\n layout= {\r\n 'barmode' : 'group', \r\n 'xaxis_title' : 'Web Streaming Platforms', \r\n 'yaxis_title' : 'Percentage', \r\n 'title': 'Positive vs. Negetive Tweets',\r\n 'showlegend': False,\r\n 'plot_bgcolor': colors['background'],\r\n 'paper_bgcolor': colors['background'],\r\n 'font': {\r\n 'color': colors['text']\r\n } \r\n }\r\n )\r\n )],\r\n style={'columnCount': 2}\r\n ),\r\n html.Div(\r\n className='row',\r\n children=[ \r\n dcc.Markdown(\"We can see that Netflix is the leading platform in terms of tweets followed by Hulu and Disney Plus a close second and third with PrimeVideo lagging the group. In terms of sentiment DisneyPlus seems to have the highest percentage of positive and lowest percent of negative tweets, with Prime, Hulu, and Netflix in that order.\"),\r\n ],style={}\r\n ),\r\n html.Br(),\r\n dcc.Dropdown(\r\n id='my-dropdown1',\r\n options=[\r\n {'label': 'All', 'value': 'all'},\r\n {'label': 'Netflix', 'value': 'netflix'},\r\n {'label': 'Prime Video', 'value': 'primevideo'},\r\n {'label': 'Hulu', 'value': 'hulu'},\r\n {'label': 'Disney Plus', 'value': 'disneyplus'}\r\n ],\r\n value='all'\r\n ),\r\n html.Div(\r\n [dcc.Graph(\r\n id='graph2'\r\n ), \r\n dcc.Graph(\r\n id='graph3'\r\n )],\r\n style={'columnCount': 2}\r\n ), \r\n html.Div(\r\n className='row',\r\n children=[ \r\n dcc.Markdown(\"Interestingly we see a big spike in Disney for the first week that may be useful further study which brought its overall performance much higher. This means that although Netflix is currently holding the largest presence – this may be threatened by DisneyPlus surge were it to be a repeat threat as it did have the highest tweet count in a day for the period we studied. Thus, time is a critical factor and to properly understand the trend and anomalies and how we deal with them for machine learning purposes will be crucial for our analysis. The streaming companies can see from the tweet count and sentiment polarity distribution over time to observe if any recently added content causes any change in customer sentiment and thus, they can identify what kind of content is well received.\"),\r\n ],style={}\r\n ),\r\n html.Br(), \r\n\r\n html.Div(\r\n [\r\n html.Div(\r\n className='row',\r\n children=[ \r\n dcc.Markdown(\"Select a value to see the churn likeliness rate\"),\r\n ],style={'textAlign': 'center'}\r\n ),\r\n dcc.Dropdown(\r\n id='my-dropdown2',\r\n options=[\r\n {'label': 'Netflix', 'value': 'Netflix'},\r\n {'label': 'Prime Video', 'value': 'PrimeVideo'},\r\n {'label': 'Hulu', 'value': 'Hulu'},\r\n {'label': 'Disney Plus', 'value': 'DisneyPlus'}\r\n ],\r\n value='Netflix'\r\n ),\r\n dcc.Graph(\r\n id='graph6'\r\n\r\n ),\r\n dcc.Graph(\r\n id='graph5',\r\n figure=go.Figure(\r\n data = [\r\n go.Bar(\r\n name = 'MorePreferred',\r\n x = overall_performance_data['StreamingPlatform'],\r\n y = overall_performance_data['MorePreferredPercentage'],\r\n marker_color = marker_colors),\r\n go.Bar(name = 'LessPreferred',\r\n x = overall_performance_data['StreamingPlatform'],\r\n y = overall_performance_data['LessPreferredPercentage'],\r\n marker_color = marker_colors)],\r\n layout = {\r\n 'barmode' : 'group', \r\n 'xaxis_title' : 'Web Streaming Platforms', \r\n 'yaxis_title' : 'Percentage', \r\n 'title' : 'More Preferred vs Less Preferred % of the Streaming Platforms',\r\n 'showlegend': False,\r\n 'plot_bgcolor': colors['background'],\r\n 'paper_bgcolor': colors['background'],\r\n 'font': {\r\n 'color': colors['text']\r\n } \r\n }\r\n )\r\n )],\r\n style={'columnCount': 2}\r\n ), \r\n html.Div(\r\n className='row',\r\n children=[ \r\n dcc.Markdown(\"The findings can help the streaming companies to get an idea where they stand in the current market compared to the other streaming services according to the customer tweets. For example, it is visible from the more preferred vs less preferred graph that despite having 69 million US subscribers, out of all the tweets mentioned, Netflix is the less preferred service more than 80% of the time.\"),\r\n ],style={}\r\n )], className=\"container\")\r\n\r\n\r\n@app.callback(Output('graph2', 'figure'),\r\n [Input('my-dropdown1', 'value')])\r\ndef update_graph(selected_dropdown_value):\r\n if selected_dropdown_value == \"all\":\r\n dff = tweets_per_day_df\r\n else:\r\n dff = tweets_per_day_df[tweets_per_day_df['platform'] == selected_dropdown_value]\r\n fig = px.line(\r\n dff,\r\n x = 'date',\r\n y = 'tweet_count',\r\n color = 'platform',\r\n title = 'Tweet Count per day for the Web Streaming Platforms', \r\n )\r\n fig.layout = {\r\n 'title' : 'Tweet Count per day for the Web Streaming Platforms',\r\n 'showlegend': False,\r\n 'plot_bgcolor': colors['background'],\r\n 'paper_bgcolor': colors['background'],\r\n 'font': {\r\n 'color': colors['text']\r\n }\r\n }\r\n return fig\r\n\r\n\r\n@app.callback(Output('graph3', 'figure'),\r\n [Input('my-dropdown1', 'value')])\r\ndef update_graph(selected_dropdown_value):\r\n if selected_dropdown_value == \"all\":\r\n dff = tweets_per_day_df\r\n else:\r\n dff = tweets_per_day_df[tweets_per_day_df['platform'] == selected_dropdown_value]\r\n fig = px.line(\r\n dff,\r\n x = 'date',\r\n y = 'sentiment_polarity',\r\n color = 'platform',\r\n title = 'Average Sentiment per day for the Web Streaming Platforms'\r\n )\r\n fig.layout = {\r\n 'title' : 'Average Sentiment per day for the Web Streaming Platforms',\r\n 'showlegend': False,\r\n 'plot_bgcolor': colors['background'],\r\n 'paper_bgcolor': colors['background'],\r\n 'font': {\r\n 'color': colors['text']\r\n }\r\n }\r\n return fig\r\n\r\n@app.callback(Output('graph6', 'figure'),\r\n [Input('my-dropdown2', 'value')])\r\ndef update_graph(selected_dropdown_value):\r\n \r\n count = 'Count' + selected_dropdown_value\r\n churn = 'ChurnLikeliness'+ selected_dropdown_value\r\n fig_pie = px.pie(churn_likeliness_data, values=count, names=churn)\r\n fig_pie.layout = {\r\n 'title' : 'Likely to Churn Rate',\r\n #'showlegend': False,\r\n 'plot_bgcolor': colors['background'],\r\n 'paper_bgcolor': colors['background'],\r\n 'font': {\r\n 'color': colors['text']\r\n }\r\n }\r\n return fig_pie\r\n\r\nif __name__ == '__main__':\r\n app.run_server()","repo_name":"AnkitRajSri/Twitter-Sentiment-and-Customer-Churn-Detection","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15706359945","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\nDATA_DIR = '../input/'\ntrain = pd.read_csv(DATA_DIR + 'train.csv')\ntest = pd.read_csv(DATA_DIR + 'test.csv')\n\n\n# In[ ]:\n\n\ntrain.head(2)\n\n\n# In[ ]:\n\n\ntrain.info()\n\n\n# 欠損値の確認\n\n# In[ ]:\n\n\nprint ('===== train =====')\nfor column in train.columns:\n tab = '\\t\\t: ' if len(column)<8 else '\\t: '\n print (column + tab + str(train[column].isnull().sum()))\n\nprint ('\\n===== test =====')\nfor column in test.columns:\n tab = '\\t\\t: ' if len(column)<8 else '\\t: '\n print (column + tab + str(test[column].isnull().sum()))\n\n\n# 年齢を平均値で埋める\n\n# In[ ]:\n\n\n#train[\"Age\"].fillna(train.Age.mean(), inplace=True)\n#test[\"Age\"].fillna(train.Age.mean(), inplace=True)\n\n\n# 階級ごとの年齢の中央値\n\n# In[ ]:\n\n\ntrain[['Pclass', 'Age']].boxplot(by='Pclass')\nplt.show()\n\n\n# 年齢を階級ごとの中央値で埋める\n\n# In[ ]:\n\n\npclass_median = train.groupby('Pclass')['Age'].median()\nfill_train_age = pd.DataFrame(train[['Pclass', 'Age']])\nfill_test_age = pd.DataFrame(test[['Pclass', 'Age']])\nfor pclass in [1, 2, 3]:\n fill_train_age.loc[fill_train_age['Pclass']==pclass, 'Age'] = pclass_median[pclass]\n fill_test_age.loc[fill_test_age['Pclass']==pclass, 'Age'] = pclass_median[pclass]\ntrain[\"Age\"].fillna(fill_train_age['Age'], inplace=True)\ntest[\"Age\"].fillna(fill_test_age['Age'], inplace=True)\n\n\n# 乗船港の欠損値を最大数の乗船港で埋める\n\n# In[ ]:\n\n\nembarked = train.Embarked.value_counts()\ntrain[\"Embarked\"].fillna(embarked.index.max(), inplace=True)\n\n\n# 運賃の欠損値を、船室階級ごとの運賃の平均値で埋める\n\n# In[ ]:\n\n\npclass_mean = train.groupby('Pclass')['Fare'].mean()\nfill_fare = pd.DataFrame(test[['Pclass', 'Fare']])\nfor pclass in [1, 2, 3]:\n fill_fare.loc[fill_fare['Pclass']==pclass, 'Fare'] = pclass_mean[pclass]\ntest[\"Fare\"].fillna(fill_fare['Fare'], inplace=True)\n\n\n# 文��列型のデータを整数化する\n\n# In[ ]:\n\n\ntrain = pd.concat((train, pd.get_dummies(train['Sex'])),axis=1)\ntrain = pd.concat((train, pd.get_dummies(train['Embarked'])),axis=1)\n\n\n# In[ ]:\n\n\ntrain['Family'] = train['SibSp'] + train['Parch'] + 1\n\n\n# In[ ]:\n\n\ntrain.head(1)\n\n\n# In[ ]:\n\n\nX_train = train[[\n 'Pclass', 'Age', 'SibSp', 'Parch', 'Family', \n 'Fare', 'C', 'Q', 'S', 'female', 'male'\n]]\nY_train = train['Survived']\n\n\n# In[ ]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# In[ ]:\n\n\nclf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\nclf.fit(X_train, Y_train)\nclf.score(X_train, Y_train)\n\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import accuracy_score\n\n\n# In[ ]:\n\n\nX_train = np.array(X_train)\nY_train = np.array(Y_train)\nskf = StratifiedKFold(n_splits=3)\nfor fold, (train_index, test_index) in enumerate(skf.split(X_train, Y_train)):\n x_train, x_test = X_train[train_index], X_train[test_index]\n y_train, y_test = Y_train[train_index], Y_train[test_index]\n clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\n clf.fit(x_train, y_train)\n y_pred = clf.predict(x_test)\n accuracy = accuracy_score(y_test, y_pred)\n print ('K-Fold: %d, Accuracy: %f' % (fold, accuracy))\n\n\n# In[ ]:\n\n\ntest = pd.concat((test, pd.get_dummies(test['Sex'])),axis=1)\ntest = pd.concat((test, pd.get_dummies(test['Embarked'])),axis=1)\ntest['Family'] = test['SibSp'] + test['Parch'] + 1\n\nX_test = test[[\n 'Pclass', 'Age', 'SibSp', 'Parch', 'Family', \n 'Fare', 'C', 'Q', 'S', 'female', 'male'\n]]\n\n\n# In[ ]:\n\n\nclf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\nclf.fit(X_train, Y_train)\nY_prediction = clf.predict(X_test)\nsubmission = pd.DataFrame({\n 'PassengerId': test['PassengerId'],\n 'Survived': Y_prediction\n})\nsubmission.to_csv('./submission_2.csv', index=False)\n\n\n# 特徴量の重要度を表示する\n\n# In[ ]:\n\n\nfti = clf.feature_importances_ \nfor i, feat in enumerate(X_test.columns):\n print('{0:20s} : {1:>.6f}'.format(feat, fti[i]))\n\n","repo_name":"nischalshrestha/automatic_wat_discovery","sub_path":"Notebooks/py/tanakaso/titanic-machine-learning-random-forest/titanic-machine-learning-random-forest.py","file_name":"titanic-machine-learning-random-forest.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"32407596226","text":"from ..items import NewsItem\nfrom scrapy import Request, Spider\n\n\nclass SpiderNews(Spider):\n name = \"news\"\n domain = \"https://www.leparisien.fr/\"\n\n categories = [\"politique\", \"sports\", \"economie\", \"societe\", \"environnement\", \"faits-divers\", \"culture-loisirs\"]\n\n def start_requests(self):\n for category in self.categories:\n for page in range(1,101):\n yield Request(f\"{self.domain}{category}/{page}/\", callback=self.parse_category, meta={'category': category})\n\n def parse_category(self, response):\n article_urls = response.css(\"div.flex-feed > div.story-preview > a::attr(href)\").extract()\n for article_url in article_urls:\n if article_url.startswith(\"//www\"):\n article_url = \"https:\" + article_url\n yield Request(article_url, callback=self.parse_article, meta=response.meta)\n \n\n def parse_article(self, response):\n\n sections = response.css(\"div.article-section section.content\")\n headline = \"\\n\".join(response.css(\"header.article_header > *::text\").extract())\n article = \"\\n\".join([\"\".join(section.css(\"p.paragraph *::text\").extract()) for section in sections])\n\n item = NewsItem()\n\n item['category'] = response.meta['category']\n item['article'] = article\n item['headline'] = headline\n\n yield item","repo_name":"ledatascientist/scraping","sub_path":"datasets/datasets/spiders/articles_presse.py","file_name":"articles_presse.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15448316314","text":"from label import Label\nimport pygame\nclass TextBox(Label):\n def __init__(self, rect, text,max_len=None):\n super().__init__(rect, text,pygame.Color(\"white\"),pygame.Color(\"gray\"))\n self.active = True\n self.blink = True\n self.blink_timer = 0\n self.delta = 0\n self.max_len = max_len\n self.executed = False\n\n def execute(self):\n self.executed = True\n\n def get_event(self, event):\n if event.type == pygame.KEYDOWN and self.active:\n if event.key in (pygame.K_RETURN, pygame.K_KP_ENTER):\n self.execute()\n elif event.key == pygame.K_BACKSPACE:\n if len(self.text) > 0:\n c = self.delta + len(self.text)\n a = self.text[:c-1]\n b = self.text[c:]\n self.text = a + b\n elif event.key == pygame.K_TAB:pass\n elif event.key == pygame.K_LEFT:\n if self.delta < len(self.text):\n self.delta -= 1\n elif event.key == pygame.K_RIGHT:\n if self.delta < 0:\n self.delta +=1\n print(self.delta)\n else:\n c = self.delta + len(self.text)\n a = self.text[:c]+event.unicode\n b = self.text[c:]\n self.text = a + b\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.active = self.rect.collidepoint(event.pos)\n was = None\n id = 0\n if self.active and self.text:\n for i in range(len(self.text)+1):\n self.rendered_text = self.font.render(self.text[:i], 1, self.font_color)\n d = self.rendered_text.get_rect(x=self.rect.x + 2, centery=self.rect.centery)\n if not was or abs(d.width+d.x - event.pos[0]) < abs(was.width+was.x-event.pos[0]): was,id = d,i\n self.delta = -(len(self.text)-id)\n\n def update(self):\n if pygame.time.get_ticks() - self.blink_timer > 200:\n self.blink = not self.blink\n self.blink_timer = pygame.time.get_ticks()\n\n def render(self, surface):\n surface.fill(self.bgcolor, self.rect)\n self.rendered_text = self.font.render(self.text, 1, self.font_color)\n self.rendered_rect = self.rendered_text.get_rect(x=self.rect.x + 2, centery=self.rect.centery)\n\n while (self.rect.width <= self.rendered_rect.width) if not self.max_len else (len(self.text) > self.max_len):\n self.text = self.text[:-1]\n self.rendered_text = self.font.render(self.text, 1, self.font_color)\n self.rendered_rect = self.rendered_text.get_rect(x=self.rect.x + 2, centery=self.rect.centery)\n if self.blink and self.active:\n self.rer_text = self.font.render(self.text[:self.delta] if self.delta != 0 else self.text, 1,\n self.font_color)\n self.p_rect = self.rer_text.get_rect(x=self.rect.x + 2, centery=self.rect.centery)\n pygame.draw.line(surface, pygame.Color(\"black\"),\n (self.p_rect.right, self.p_rect.top + 2),\n (self.p_rect.right, self.p_rect.bottom - 2))\n surface.blit(self.rendered_text, self.rendered_rect)\n\n","repo_name":"Kukushenok/YandexMapProgramm","sub_path":"textbox.py","file_name":"textbox.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23083959326","text":"import os\nimport sys\n\n# To use the common.py module\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nimport common\nimport dbs_view as dbv\nimport dbs_exec as dbe\n\nimport socket\nimport threading\n\n# Constants\nSERVER_IP = '127.0.0.1'\nSERVER_PORT = 1069\nLISTEN_COUNT = 10\n\ndef createServerSocket(serverIP=SERVER_IP, serverPort=SERVER_PORT, listenCount=LISTEN_COUNT):\n\tserverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tserverSocket.bind((serverIP, serverPort))\n\tserverSocket.listen(listenCount)\n\treturn serverSocket\n\ndef handleClient(client: socket.socket, address: any) -> None:\n\tstatus = common.recvEncryptedMessage(client, 0)\n\tif not status[0] or status[1] == '':\n\t\tprint('TODO FIX HANDLE CLIENT') \n\t\tquit()\n\tkey = int(status[1])\n\tdbv.loginMenu(client, key, address)\n\ndef main():\n\tstatus = dbe.createDatabase()\n\tif not status[0]:\n\t\tprint('Creation of DBS failed due to {}, program will be terminated...'.format(status[1]))\n\t\tsys.exit(1)\n\t\n\tprint('Databases initialized...')\n\tdbv.loadMenus()\n\tprint('Menu text loaded...')\n\tserverSocket = createServerSocket()\n\tprint('Server socket is now available...')\n\twhile True:\n\t\tclientSocket, address = serverSocket.accept()\n\t\tprint('Made connection with ', address)\n\t\tthreading.Thread(target=handleClient, args=(clientSocket, address)).start()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"geekyprawins/distributed-banking-system","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10479340178","text":"\"\"\"\nMerge two XML files based on join properties and optionally output the merged data to a new XML file.\n\"\"\"\nimport argparse\nimport os\nimport re\nfrom typing import List, Tuple\n\nfrom lxml import etree\n\n\nclass MergeStrategy:\n \"\"\"\n MergeStrategy is an abstract class that defines the merge method.\n \"\"\"\n def merge(self, left: etree._Element, right: etree._Element, join_properties: List[str]) -> etree._Element:\n \"\"\"\n Merge two element trees based on join properties.\n \"\"\"\n raise NotImplementedError\n\n\nclass LeftOuterJoinStrategy(MergeStrategy):\n \"\"\"\n LeftOuterJoinStrategy is a concrete class that defines the merge method. It merges the two XML files using a left outer join strategy.\n \"\"\"\n def merge(self, left: etree._Element, right: etree._Element, join_properties: List[str]) -> etree._Element:\n join_dict = {}\n for elem in right:\n join_key = tuple(elem.find(prop).text for prop in join_properties)\n join_dict[join_key] = elem\n for elem in left:\n join_key = tuple(elem.find(prop).text for prop in join_properties)\n join_elem = join_dict.get(join_key)\n if join_elem is not None:\n join_dict.pop(join_key)\n left.extend(join_dict.values())\n return left\n\n\nclass RightOuterJoinStrategy(MergeStrategy):\n \"\"\"\n LeftOuterJoinnStrategy is a concrete class that defines the merge method. It merges the two XML files using a right outer join strategy.\n \"\"\"\n def merge(self, left: etree._Element, right: etree._Element, join_properties: List[str]) -> etree._Element:\n join_dict = {}\n for elem in left:\n join_key = tuple(elem.find(prop).text for prop in join_properties)\n join_dict[join_key] = elem\n for elem in right:\n join_key = tuple(elem.find(prop).text for prop in join_properties)\n join_elem = join_dict.get(join_key)\n if join_elem is not None:\n join_dict.pop(join_key)\n right.extend(join_dict.values())\n return right\n\n\ndef parse_command_line_args() -> argparse.Namespace:\n \"\"\"Parse the command line arguments and return the file names, properties, and output file name.\n\n Returns:\n argparse.Namespace: The parsed command line arguments\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Merge two XML files based on join properties')\n # Required left file, right file, and join properties\n parser.add_argument('left_file', help='Path to the left XML file')\n parser.add_argument('right_file', help='Path to the right XML file')\n parser.add_argument('join_properties', nargs='+', help='List of join properties as xpath strings')\n # Optional output file name\n parser.add_argument('-o', '--output', help='Path to the output XML file', default=None)\n # Optional merge strategy\n parser.add_argument('-s', '--strategy', help='Merge strategy', choices=['left', 'right'], default='left')\n\n return parser.parse_args()\n\n\ndef validate_output_filename(out_path: str) -> None:\n \"\"\"Validate the output file name and ensure that it is valid, writable and doesn't already exist.\n\n Args:\n out_file (str): The output file path\n\n Raises:\n ValueError: If the output file name is invalid or already exists\n \"\"\"\n\n # Check if the output file name contains any invalid characters\n if not is_valid_filename(out_path):\n raise ValueError(\"The output file name contains invalid characters.\")\n\n # Check if the output file name has the .xml extension\n if not has_xml_extension(out_path):\n raise ValueError(\"The output file name must have the .xml extension.\")\n\n # Check that the output file doesn't already exist\n if file_exists(out_path):\n raise ValueError(\"The output file already exists.\")\n\n # Check that the output directory is writable\n if not is_writable_directory(os.path.dirname(out_path)):\n raise ValueError(\"The output directory is not writable.\")\n\n\ndef is_valid_filename(filename: str) -> bool:\n \"\"\"Check if the filename contains any invalid characters.\n\n Args:\n filename (str): The filename to check\n\n Returns:\n bool: True if the filename is valid, False otherwise\n \"\"\"\n return re.compile(r\"[^*?<>|]+\").fullmatch(filename) is not None\n\n\ndef has_xml_extension(filename: str) -> bool:\n \"\"\"Check if the filename has the .xml extension.\n\n Args:\n filename (str): The filename to check\n\n Returns:\n bool: True if the filename has the .xml extension, False otherwise\n \"\"\"\n return filename.endswith(\".xml\")\n\n\ndef file_exists(filepath: str) -> bool:\n \"\"\"Check if the file already exists.\n\n Args:\n filepath (str): The file path to check\n\n Returns:\n bool: True if the file exists, False otherwise\n \"\"\"\n return os.path.exists(filepath)\n\n\ndef is_writable_directory(directory: str) -> bool:\n \"\"\"Check if the directory is writable.\n\n Args:\n directory (str): The directory to check\n\n Returns:\n bool: True if the directory is writable, False otherwise\n \"\"\"\n return os.access(directory, os.W_OK)\n\n\ndef validate_props_xpath(props_xpath: List[str]) -> None:\n \"\"\"\n Checks that each xpath string is valid using lxml.\n If not, throws an error listing each invalid xpath string.\n \"\"\"\n invalid_props = []\n for prop in props_xpath:\n try:\n etree.XPath(prop)\n except etree.XPathSyntaxError as ex:\n invalid_props.append(f\"{prop}: {ex}\")\n if invalid_props:\n error_message = \"\\n\".join(invalid_props)\n raise ValueError(f\"The following xpath strings are invalid:\\n\\n{error_message}\")\n\n\ndef parse_xml_files(xml_file: str) -> Tuple[etree._Element, etree.XMLSchema]:\n \"\"\"\n Parse XML file that returns a tuple of the root element and the schema\n \"\"\"\n parser = etree.XMLParser(resolve_entities=False, strip_cdata=False)\n tree = etree.parse(xml_file, parser=parser)\n schema_root = etree.XMLSchema(tree)\n root = tree.getroot()\n return root, schema_root\n\n\ndef validate_xml_data(l_data: etree._Element, l_schema: etree.XMLSchema, r_data: etree._Element, r_schema: etree.XMLSchema, join_props_xpath: List[str]) -> None:\n \"\"\"\n Validate XML data\n\n Args:\n left_data (etree._Element): The XML data from the left file\n right_data (etree._Element): The XML data from the right file\n join_properties (List[str]): The properties to join on as xpath strings\n\n Raises:\n ValueError: If the XML schema does not match between the files\n ValueError: If the join properties do not match to at least one element in both left_data and right_data\n \"\"\"\n errors = []\n # Test the left schema against the right data and vice versa\n if not l_schema.validate(r_data):\n errors.append('Left schema does not match right data')\n if not r_schema.validate(l_data):\n errors.append('Right schema does not match left data')\n # Test the join properties exist in both files\n for prop in join_props_xpath:\n left_prop = l_data.xpath(prop)\n right_prop = r_data.xpath(prop)\n if not left_prop or not right_prop:\n errors.append('Join property {prop} does not match to at least one element in both files')\n if errors:\n error_message = \"\\n\\t\".join(errors)\n raise ValueError(f\"Invalid XML data: \\n\\n\\t{error_message}\")\n\n\ndef merge_data(left: etree._Element, right: etree._Element, join_properties: List[str], merge_strategy: MergeStrategy = LeftOuterJoinStrategy()) -> etree._Element:\n \"\"\"Merge the data from the two XML files, uniquely identifying each record using the specified properties.\n\n Args:\n left_data (etree._Element): The XML data from the left file\n right_data (etree._Element): The XML data from the right file\n join_properties (List[str]): The properties to join on as xpath strings\n merge_strategy (MergeStrategy): The merge strategy to use. Defaults to LeftOuterJoinnStrategy.\n\n Returns:\n etree._Element: The merged XML data\n \"\"\"\n return merge_strategy.merge(left, right, join_properties)\n\n\ndef write_merged_data_to_file(xml_data: etree._Element, output_file: str = None) -> None:\n \"\"\"\n Write the merged data to the output file.\n\n Args:\n xml_data (etree._Element): The merged XML data\n output_file (str, optional): The output file path. Defaults to None.\n \"\"\"\n # Write the merged data to the output file\n if output_file:\n try:\n with open(output_file, 'wb') as file:\n file.write(etree.tostring(\n xml_data, encoding='utf-8', xml_declaration=True))\n except IOError as io_error:\n raise IOError(f'Unable to write to output file {output_file}') from io_error\n else:\n print(etree.tostring(xml_data, encoding='unicode'))\n\n\ndef main() -> None:\n \"\"\"\n Main function\n \"\"\"\n # Parse the command line arguments\n args = parse_command_line_args()\n # Normalize the output file name if it is provided\n if args.output is not None:\n args.output = os.path.abspath(os.path.normpath(args.output))\n # Validate the output file name\n validate_output_filename(args.output)\n # Validate the xpath strings\n validate_props_xpath(args.join_properties)\n # Parse the XML files\n left_data, left_schema = parse_xml_files(args.left_file)\n right_data, right_schema = parse_xml_files(args.right_file)\n # Validate the XML data\n validate_xml_data(left_data, left_schema, right_data, right_schema, args.join_properties)\n # Merge the data, using the args.strategy to specify the merge strategy\n if args.strategy == 'right':\n merge_strategy = RightOuterJoinStrategy()\n else:\n merge_strategy = LeftOuterJoinStrategy()\n merged_data = merge_data(left_data, right_data, args.join_properties, merge_strategy)\n # Write the merged data to the output file\n write_merged_data_to_file(merged_data, args.output)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"matracey/merge-xml","sub_path":"merge_xml.py","file_name":"merge_xml.py","file_ext":"py","file_size_in_byte":10093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36621575253","text":"from flask import render_template,request,redirect,session,flash\nfrom flask_app import app\nfrom flask_app.models.band_model import Bands\n\n@app.route('/add_band')\ndef add_band():\n return render_template('new_band.html')\n\n@app.route('/adding_band',methods=['POST'])\ndef adding_band():\n if not Bands.validate_create(request.form):\n return redirect('/add_band')\n data = {\n **request.form,\n 'users_id':session['id']\n }\n Bands.create_band(data)\n return redirect('/bands')\n\n@app.route('/delete_band/')\ndef delete_band(id):\n data = {\n 'id':id\n }\n Bands.delete_band(data)\n return redirect('/bands')\n\n@app.route('/edit_band/')\ndef edit_band(id):\n one_band = Bands.get_one_band({'id':id})\n\n return render_template('edit_band.html',one_band= one_band)\n\n@app.route('/editing/',methods=['POST'])\ndef editing(id):\n if not Bands.validate_create(request.form):\n return redirect(f'/edit_band/{id}')\n data={\n **request.form,\n 'id':id\n }\n Bands.edit_band(data)\n return redirect('/bands')\n\n@app.route('/my_bands')\ndef my_bands():\n data={\n 'id':session['id']\n }\n one_band = Bands.my_bands(data)\n return render_template('my_bands.html',bands=one_band)","repo_name":"zqkl/project2","sub_path":"flask_app/controllers/band_controller.py","file_name":"band_controller.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39761940489","text":"\"\"\"Merges LCOV tracefiles\n\n- Arguments are paths of input tracefiles\n- Merged tracefile is written to stdout\n\"\"\"\nimport sys\n\ndef main():\n inputs = sys.argv[1:]\n\n data = {} # map[test][file][line] = covered\n\n for path in inputs:\n with open(path) as fh:\n contents = fh.read()\n tn = None\n sf = None\n for line in contents.split('\\n'):\n line = line.strip()\n if line[:3] == \"TN:\":\n tn = line[3:]\n if tn not in data:\n data[tn] = {}\n elif line[:3] == \"SF:\":\n sf = line[3:]\n if sf not in data[tn]:\n data[tn][sf] = {}\n elif line[:3] == \"DA:\":\n [num, hit] = line[3:].split(\",\")\n num = int(num)\n hit = hit != \"0\" # convert to bool\n if num not in data[tn][sf]:\n data[tn][sf][num] = hit\n else:\n data[tn][sf][num] |= hit\n\n for t in sorted(data):\n for s in sorted(data[t]):\n if len(data[t][s]) == 0: # skip files with no instrumented lines\n continue\n sys.stdout.write(\"TN:{}\\n\".format(t))\n sys.stdout.write(\"SF:{}\\n\".format(s))\n for l in sorted(data[t][s]):\n sys.stdout.write(\"DA:{},{}\\n\".format(l, int(data[t][s][l])))\n sys.stdout.write('end_of_record\\n')\n\nif __name__ == '__main__':\n main()\n","repo_name":"Flank/mirror-goog-studio-main","sub_path":"bazel/coverage/merge_lcov.py","file_name":"merge_lcov.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"22690092153","text":"import os\nimport sys\nimport hashlib\nimport tempfile\nimport shutil\n\nimport docutils.nodes\nimport docutils.parsers\nimport docutils.parsers.rst\nimport docutils.parsers.rst.directives\nimport docutils.statemachine\n\nimport sphinx\nimport sphinx.domains.std\nimport sphinx.roles\nimport sphinx.util\nimport sphinx.util.logging\nimport sphinx.util.nodes\nimport sphinx.util.docutils\n\nimport json\nimport slugify\n\nfrom . import doxygen_generator\nfrom . import doxygen_parser\nfrom . import doxygen_downloader\nfrom . import run\nfrom . import template_render\nfrom . import wurfapi_error\nfrom . import link_mapper\nfrom . import link_provider\nfrom . import location_mapper\nfrom . import check_api_schema\nfrom . import collapse_inline_namespaces\nimport wurfapi\n\n\nVERSION = \"9.1.0\"\n\n# Having a \"global\" logger er .py file seems to be the Sphinx way of\n# doing it. We are ok with that here since we don't have any way of\n# doing dependency injection anyway. However, our Sphinx independent\n# code should ask for the logger as a dependency.\nlogger = sphinx.util.logging.getLogger(__name__)\n\n\nclass WurfapiTarget(sphinx.util.docutils.SphinxDirective):\n\n # The WurfapiTarget is used to generate named labels in the\n # documentation e.g. to member functions etc.\n # Solution adapted from here:\n # https://github.com/sphinx-doc/sphinx/issues/2025\n has_content = False\n required_arguments = 1\n optional_arguments = 0\n\n # The options that may be passed to the directive\n option_spec = {\n # unchanged: Returns the text argument, unchanged. Returns an empty\n # string (\"\") if no argument is found.\n \"label\": docutils.parsers.rst.directives.unchanged\n }\n\n def run(self):\n \"\"\"Called by Sphinx.\n\n Process the directive.\n\n Documentation on creating directives are available here:\n http://docutils.sourceforge.net/docs/howto/rst-directives.html\n\n :return: List of Docutils/Sphinx nodes that will be inserted into the\n document where the directive was encountered.\n \"\"\"\n\n # Replace one or more spaces with a single space\n targetname = sphinx.util.ws_re.sub(\" \", self.arguments[0].strip()).lower()\n node = docutils.nodes.target(\"\", \"\", names=[targetname])\n if \"label\" in self.options:\n node[\"label\"] = self.options[\"label\"]\n self.state.document.note_explicit_target(node)\n return [node]\n\n\ndef map_wurfapi_named_target(app, doctree):\n def lookup_name(id):\n for name, (docname, labelid) in app.env.domaindata[\"std\"][\"anonlabels\"].items():\n if labelid == id:\n return (name, docname)\n else:\n return (None, None)\n\n labels = app.env.domaindata[\"std\"][\"labels\"]\n for node in doctree.traverse(docutils.nodes.target):\n if \"label\" in node:\n name, docname = lookup_name(node[\"refid\"])\n\n if name not in labels:\n labels[name] = (docname, node[\"refid\"], node[\"label\"])\n\n\nclass WurfapiDirective(sphinx.util.docutils.SphinxDirective):\n\n # The wurfapiDirective requires a single path argument, which is allowed to\n # contain whitepace. This is to allow for long paths which may span\n # multiple lines. The path argument should name a valid template.\n #\n # Same approach as for the image directive:\n # http://docutils.sourceforge.net/docs/howto/rst-directives.html#id10\n required_arguments = 1\n final_argument_whitespace = True\n\n # A selector may be specified. Some templates may require it. @todo\n # document how to handle situations where:\n # 1. A selector is not needed but passed\n # 2. A selector is needed but not passed\n option_spec = {\n # unchanged: Returns the text argument, unchanged. Returns an empty\n # string (\"\") if no argument is found.\n \"selector\": docutils.parsers.rst.directives.unchanged,\n \"user_data\": docutils.parsers.rst.directives.unchanged,\n }\n\n def run(self):\n \"\"\"Called by Sphinx.\n\n Process the directive.\n\n Documentation on creating directives are available here:\n http://docutils.sourceforge.net/docs/howto/rst-directives.html\n\n :return: List of Docutils/Sphinx nodes that will be inserted into the\n document where the directive was encountered.\n \"\"\"\n env = self.state.document.settings.env\n app = env.app\n api = app.wurfapi_api\n user_data = self._user_data()\n selector = self._selector()\n user_path = app.config.wurfapi.get(\"user_templates\", None)\n\n if user_path:\n # Make sure it is relative to the documentation directory\n user_path = os.path.join(app.srcdir, user_path)\n\n if selector and selector not in api:\n raise wurfapi_error.WurfapiError(\n 'Selector \"{}\" not in API possible values are {}'.format(\n selector, api.keys()\n )\n )\n\n template = template_render.TemplateRender(user_path=user_path)\n\n data = template.render(\n selector=selector,\n api=api,\n filename=self._template_file(),\n user_data=user_data,\n )\n\n # Dump the rst to a file - mostly for debugging purposes\n rst_file = self.slug() + \".rst\"\n\n logger.debug(\"writing rst output: %s\", rst_file)\n\n rst_path = os.path.join(app.wurfapi_output_path, rst_file)\n with open(rst_path, \"w\") as f:\n f.write(data)\n\n return self.insert_rst(data)\n\n def _template_file(self):\n \"\"\"Return the template file passed as an option to the directive\"\"\"\n\n # The path function returns the path argument unwrapped (with newlines\n # removed). Raises ValueError if no argument is found.\n return docutils.parsers.rst.directives.path(self.arguments[0])\n\n def _source_file(self):\n \"\"\"Return the source .rst file where the directive is located\"\"\"\n source_file, _ = self.get_source_info()\n return source_file\n\n def _selector(self):\n \"\"\"Return the selector or None\"\"\"\n return self.options[\"selector\"] if \"selector\" in self.options else None\n\n def _user_data(self):\n \"\"\"Return the user_data or None\"\"\"\n return self.options[\"user_data\"] if \"user_data\" in self.options else None\n\n def slug(self):\n \"\"\"Return the slug for this directive\"\"\"\n\n project_root = self.config.wurfapi[\"project_root\"]\n\n source_file = self._source_file()\n source_file = os.path.relpath(source_file, project_root)\n source_file, _ = os.path.splitext(source_file)\n\n template_file = self._template_file()\n template_file = os.path.relpath(template_file, project_root)\n template_file, _ = os.path.splitext(template_file)\n\n selector = self._selector()\n user_data = self._user_data()\n\n to_slug = source_file + \"_\" + template_file\n to_slug += \"\" if not selector else \"_\" + selector\n to_slug += \"\" if not user_data else \"_u_\" + user_data\n\n return slugify.slugify(text=to_slug, separator=\"_\")\n\n def insert_rst(self, rst):\n \"\"\"Replaces the content of the directive with the rst generated\n content.\n\n Documentation on how to do this is available here:\n http://www.sphinx-doc.org/en/stable/extdev/markupapi.html\n \"\"\"\n rst = rst.split(\"\\n\")\n view = docutils.statemachine.ViewList(initlist=rst, source=\"wurfapi\")\n\n node = docutils.nodes.paragraph()\n sphinx.util.nodes.nested_parse_with_titles(\n state=self.state, content=view, node=node\n )\n\n return node.children\n\n\ndef main():\n # This is intentional don't delete.\n print(\"hello wurfapi\")\n\n\ndef generate_doxygen(app):\n\n source_paths = []\n for source_path in app.config.wurfapi[\"source_paths\"]:\n\n source_path = os.path.join(app.srcdir, source_path)\n\n if not os.path.exists(source_path):\n raise RuntimeError(\"Missing source path {}\".format(source_path))\n\n source_paths.append(source_path)\n\n # Create the XML in a temp location\n project = app.config.project.lower() if app.config.project else \"\"\n # Remove whitespace https://stackoverflow.com/a/2077944/1717320\n project = \"_\".join(project.split())\n\n source_hash = hashlib.sha1(\",\".join(source_paths).encode(\"utf-8\")).hexdigest()[:6]\n\n output_path = os.path.join(\n tempfile.gettempdir(), \"wurfapi-\" + project + \"-\" + source_hash\n )\n\n if os.path.isdir(output_path):\n shutil.rmtree(output_path, ignore_errors=True)\n\n if not os.path.exists(output_path):\n os.makedirs(name=output_path)\n\n # Store the output path\n app.wurfapi_output_path = output_path\n\n logger.info(\n \"wurfapi source_path={} output_path={}\".format(source_paths, output_path)\n )\n\n parser_config = app.config.wurfapi[\"parser\"]\n assert parser_config[\"type\"] == \"doxygen\"\n\n if parser_config[\"download\"]:\n\n if \"download_path\" in parser_config:\n download_path = parser_config[\"download_path\"]\n else:\n download_path = None\n\n doxygen_executable = doxygen_downloader.ensure_doxygen(\n download_path=download_path\n )\n else:\n doxygen_executable = \"doxygen\"\n\n # Check if we should be recursive\n recursive = app.config.wurfapi[\"recursive\"]\n\n generator = doxygen_generator.DoxygenGenerator(\n doxygen_executable=doxygen_executable,\n runner=run,\n recursive=recursive,\n source_paths=source_paths,\n output_path=output_path,\n warnings_as_error=parser_config[\"warnings_as_error\"],\n )\n\n output = generator.generate()\n\n logger.info(\"wurfapi doxygen XML {}\".format(output))\n\n if \"patch_api\" in parser_config:\n patch_api = parser_config[\"patch_api\"]\n else:\n patch_api = []\n\n # Get project root\n if \"project_root\" in app.config.wurfapi:\n project_root = app.config.wurfapi[\"project_root\"]\n else:\n project_root = str(\n run.run(command=\"git rev-parse --show-toplevel\", cwd=app.srcdir).stdout\n ).strip()\n app.config.wurfapi[\"project_root\"] = project_root\n\n if \"include_paths\" in app.config.wurfapi:\n include_paths = app.config.wurfapi[\"include_paths\"]\n\n # These are specified relative to the conf.py\n include_paths = [os.path.join(app.srcdir, p) for p in include_paths]\n\n else:\n include_paths = []\n\n # Location mapper\n mapper = location_mapper.LocationMapper(\n project_root=project_root, include_paths=include_paths, log=logger\n )\n\n parser = doxygen_parser.DoxygenParser(\n doxygen_path=output, location_mapper=mapper, patch_api=patch_api, log=logger\n )\n\n api = parser.parse_index()\n\n if \"collapse_inline_namespaces\" in parser_config:\n selectors = parser_config[\"collapse_inline_namespaces\"]\n else:\n selectors = []\n\n api = collapse_inline_namespaces.collapse_inline_namespaces(\n api=api, selectors=selectors\n )\n\n # Instatiate the link provider\n provider = link_provider.LinkProvider(user_mappings=[])\n\n # Try to find additonal links across the API - making it possible for the\n # user to jump more conveniently around in the docs\n mapper = link_mapper.LinkMapper(api=api, link_provider=provider)\n api = mapper.map()\n\n # Dump the API\n with open(os.path.join(app.doctreedir, \"wurfapi_api.json\"), \"w\") as f:\n json.dump(api, f, indent=4, sort_keys=True)\n\n # Run schema checks on the API\n check_api_schema.check_api_schema(api=api)\n\n # Store the final API\n app.wurfapi_api = api\n\n\nclass WurfapiRole:\n def __init__(self):\n # We will piggyback on the XRefRole in sphinx. This XRefRole setup is called for 'ref'. See\n # https://github.com/sphinx-doc/sphinx/blob/80b0a16e1c5f7266522a50284a003a0e17cf5ff7/sphinx/domains/std.py#L589\n self.xref = sphinx.roles.XRefRole(\n lowercase=True, innernodeclass=docutils.nodes.inline, warn_dangling=True\n )\n\n def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]):\n \"\"\"\n The call is passed into the Sphinx roles. From XRefRole to ReferenceRole to SphinxRole.\n This ensures that Sphinx handles the .rst reference as if it was written as a full unique name\n of an API element. This is ensured if enough information is given to be able to deduce and distinguish\n the unique name from the text given.\n All the function arguments are extracted from the .rst file where the role is called\n\n :param name: The name of the role called e.g 'ref'\n :param rawtext: The full text with :role:`text`\n :param text: The text within the ``\n :param lineno: The line number\n :param inliner: The docutils inliner\n\n Optional:\n :param options: additional options to give to the role\n :param content: additional content to pass\n \"\"\"\n app = inliner.document.settings.env.app\n api = app.wurfapi_api\n\n # We will look for all possible matches for the text given in the .rst\n # If an exact match is found (e.g `rely::encoder` refers to the class and\n # not any class-members) we choose this as the match.\n matches = []\n for key in api.keys():\n if key == text:\n matches = [key]\n break\n if key.startswith(text):\n matches.append(key)\n\n # No matches = error\n if len(matches) == 0:\n msg = inliner.reporter.error(\n f\"Could not find a possible match for {text} in API (line:{lineno}).\"\n )\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n\n # Multiple matches = error\n if len(matches) > 1:\n msg = inliner.reporter.error(\n f\"More than one possible match for {text} in API (line:{lineno}).\\n\"\n f\"Possible matches:\\n{', '.join(matches)}.\"\n )\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n\n # Change the text to the correct match\n text = matches[0]\n\n # The XRefRole has to be invoked in the standard domain.\n # We are not sure why, but for now it works.\n name = \"std:ref\"\n rawtext = f\":std:ref:`{text}`\"\n\n # Let XRefRole handle the reference with the correct match.\n nodes = self.xref.__call__(\n name, rawtext, text, lineno, inliner, options, content\n )\n return nodes\n\n\ndef setup(app):\n \"\"\"Entry point for the extension. Sphinx will call this function when the\n module is added to the \"extensions\" list in Sphinx's conf.py file.\n\n :param app: The application object, which is an instance of Sphinx.\n \"\"\"\n\n # Create a logger\n logger.info(\"Initializing wurfapi extension\")\n\n # Add the wurfapi configuration value\n app.add_config_value(name=\"wurfapi\", default=None, rebuild=True)\n\n # Add the new directive - added to the document by writing:\n #\n # ..wurfapi::\n #\n app.add_directive(name=\"wurfapi\", cls=WurfapiDirective)\n\n # Add the ..wurfapitarget directive\n app.add_directive(name=\"wurfapitarget\", cls=WurfapiTarget)\n\n # Add the wurfapi role\n app.add_role(\n \"wurfapi\",\n WurfapiRole(),\n )\n\n # Generate the XML\n app.connect(event=\"builder-inited\", callback=generate_doxygen)\n\n # Map labels\n app.connect(\"doctree-read\", map_wurfapi_named_target)\n\n # We use the doctreedir as build directory. The default for this\n # is inside _build/.doctree folder\n build_dir = os.path.join(app.doctreedir, \"wurfapi\")\n\n # Run Doxygen on the source code\n\n return {\"version\": VERSION}\n","repo_name":"steinwurf/wurfapi","sub_path":"src/wurfapi/wurfapi_directive.py","file_name":"wurfapi_directive.py","file_ext":"py","file_size_in_byte":15819,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"73321965280","text":"def squared_sum(n):\r\n \"\"\"Find squared sum of natural numbers up to n.\"\"\"\r\n result = 0\r\n for num in range(1, n+1):\r\n result += num ** 2\r\n return result\r\n\r\n\r\ndef sum_of_squares(n):\r\n \"\"\"Find sum of squares of natural numbers up to n.\"\"\"\r\n result = 0\r\n for num in range(1, n+1):\r\n result += num\r\n return result * result\r\n\r\n\r\ndef main():\r\n n = int(input('\\nEnter a number: '))\r\n print(sum_of_squares(n) - squared_sum(n))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"akulchik/project-euler","sub_path":"Sum Square Difference.py","file_name":"Sum Square Difference.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29577732244","text":"import re\nimport time\n\nwith open('data') as file_in:\n data = file_in.read().split('\\n')\n\nstart = time.time()\n\nbegin2end = dict()\nend2begin = dict()\npattern = 'Step (.) must be finished before step (.) can begin.'\n\nfor element in data:\n matchobj = re.match(pattern=pattern, string=element)\n parent, child = matchobj.groups()\n if parent not in begin2end:\n begin2end[parent] = list()\n begin2end[parent].append(child)\n\n if child not in end2begin:\n end2begin[child] = list()\n end2begin[child].append(parent)\n\nfor element in begin2end:\n begin2end[element] = sorted(set(begin2end[element]))\nfor element in end2begin:\n end2begin[element] = sorted(set(end2begin[element]))\n\nchildren = list(end2begin.keys())\nparents = list(begin2end.keys())\n\nfor child in children:\n if child in parents:\n parents.remove(child)\n\nall_nodes_len = len(set(children + parents))\n\nthe_path = ''\nnext_steps = list(parents)\n\nwhile len(the_path) < all_nodes_len:\n next_steps = sorted(set(next_steps))\n for candidate in next_steps:\n if candidate not in end2begin:\n the_path += candidate\n next_steps.remove(candidate)\n next_steps.extend(begin2end[candidate])\n break\n\n the_result = True\n for pp in end2begin[candidate]:\n if the_path.count(pp) == 0:\n the_result = False\n\n if not the_result:\n continue\n\n the_path += candidate\n next_steps.remove(candidate)\n if candidate in begin2end:\n next_steps.extend(begin2end[candidate])\n break\n\nprint('Part1:', the_path)\n\nduration = time.time() - start\nprint('Duration: {0:.3} seconds'.format(duration))\nstart = time.time()\n\nworkers = [''] * 5\n\nfor idx, parent in enumerate(parents):\n workers[idx] = parent * (ord(parent) - ord('A') + 61)\n the_path = the_path.replace(parent, '')\n\nsecond = 0\ndone = ''\n\nwhile the_path !='' or \"\".join(workers) !='':\n for idx, worker in enumerate(workers):\n if len(worker) == 1:\n done += worker[0]\n workers[idx] = ''\n else:\n workers[idx] = worker[1:]\n\n for idx, worker in enumerate(workers):\n if worker == '':\n\n for candidate in the_path:\n result = True\n for parent in end2begin[candidate]:\n if parent not in done:\n result = False\n\n if result:\n workers[idx] = candidate * (ord(candidate) - ord('A') + 61)\n the_path = the_path.replace(candidate, '')\n break\n\n second += 1\n\nprint('Part2:', second)\nduration = time.time() - start\nprint('Duration: {0:.3} seconds'.format(duration))\n","repo_name":"pavelo22/Advent-of-Code-2018","sub_path":"Day07/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13267347643","text":"# coding: utf-8\n\n\"\"\"\n分割链表\n\n先查询一个数是否存在?\n1.是否要挪,如何挪\n大于的用big_head 串起来\n小于等于的保留原位置\n到最后再把大于的接回去\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def partition(self, head: ListNode, x: int) -> ListNode:\n \"\"\"\n 好像理解错了题目意思... 题意:在位置x\n :param head:\n :param x:\n :return:\n \"\"\"\n dummy = ListNode(next=head)\n h = dummy\n\n tem = ListNode()\n t = tem\n\n while dummy.next:\n if dummy.next.val >= x:\n tem.next = ListNode(dummy.next.val)\n tem = tem.next\n dummy.next = dummy.next.next\n else:\n dummy = dummy.next\n\n else:\n dummy.next = t.next\n return h.next\n\n def partition_1(self, head: ListNode, x: int) -> ListNode:\n \"\"\"\n 复杂操作直接用俩俩表就行,不用切来切去\n :param head:\n :param x:\n :return:\n \"\"\"\n fk_head = ListNode(-1000000)\n fk_head.next = head\n cur = fk_head\n bigger_nums_head = ListNode(-1000000)\n bh = bigger_nums_head\n\n while cur and cur.next:\n if cur.next.val >= x: # 直到遇到小于等于x的cur指针才可以移动\n\n while cur.next and cur.next.val >= x:\n bh.next = cur.next #大的用新头剪接上 tail赋值None\n cur.next = cur.next.next # cur 往后指\n\n bh.next.next = None\n bh = bh.next # 移动新头指针\n if cur.next: # 如果cur是最后一个就不移动了,保留最后一个节点\n cur = cur.next # 移动cur指针\n\n else:\n if cur.next: # 如果cur是最后一个就不移动了,保留最后一个节点\n cur = cur.next # 否则移动指针\n\n if bigger_nums_head.next:\n cur.next = bigger_nums_head.next\n\n return fk_head.next\n\n\n def test(self):\n\n nn1 = ListNode(1)\n nn2 = ListNode(4)\n nn3 = ListNode(3)\n nn4 = ListNode(2)\n nn5 = ListNode(5)\n nn6 = ListNode(2)\n\n nn1.next = nn2\n nn2.next = nn3\n nn3.next = nn4\n nn4.next = nn5\n nn5.next = nn6\n\n res = self.partition_1(nn1, 3)\n while res:\n print(res.val)\n res = res.next\n\nSolution().test()","repo_name":"echoocking/DontForget","sub_path":"leetcode/86_sep_with_one_num.py","file_name":"86_sep_with_one_num.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41991173080","text":"from distutils.core import setup, Extension\nimport os\n\nsrl_py_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nmodules = [\n Extension('srlencoder',\n sources=['srl_encoder_module.c', 'srl_encoder.c'],\n depends=['srl_encoder.h',\n 'srl_buffer.h',\n os.path.join(srl_py_root, 'shared', 'srl_inline.h'),\n os.path.join(srl_py_root, 'shared', 'util.h'),\n os.path.join(srl_py_root, 'shared', 'ptable.h'),\n ],\n include_dirs=[os.path.join(srl_py_root, 'shared')],\n ),\n ]\n\nsetup(name='sereal',\n version='0.01',\n description='Encode data structures in Sereal format.',\n ext_modules=modules\n )\n","repo_name":"Sereal/Sereal","sub_path":"Python/Encoder/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":409,"dataset":"github-code","pt":"54"} +{"seq_id":"12785845710","text":"import logging\nimport shutil\nimport unittest\nimport tempfile\nfrom pathlib import Path\n\nimport jsonpickle\n\nfrom framebot.utils import get_logger, LoggingObject, safe_json_dump\n\n\nclass TestUtils(unittest.TestCase):\n\n def setUp(self) -> None:\n self.test_dir = Path(tempfile.mkdtemp())\n\n def tearDown(self) -> None:\n shutil.rmtree(self.test_dir)\n\n def test_safe_json_dump(self):\n test_json_file = self.test_dir.joinpath(\"test_json.json\")\n test_obj = {\"intParam\": 1, \"strParam\": \"test\"}\n safe_json_dump(test_json_file, test_obj)\n self.assertTrue(test_json_file.exists())\n with open(test_json_file, \"r\") as f:\n read_obj = f.read()\n read_obj = jsonpickle.decode(read_obj)\n self.assertEqual(read_obj, test_obj)\n test_json_file.unlink()\n safe_json_dump(str(test_json_file), test_obj)\n self.assertTrue(test_json_file.exists())\n with open(test_json_file, \"r\") as f:\n read_obj = f.read()\n read_obj = jsonpickle.decode(read_obj)\n self.assertEqual(read_obj, test_obj)\n\n def test_get_logger(self):\n logger_name = \"test_logger\"\n level = logging.DEBUG\n logger = get_logger(logger_name, level)\n self.assertEqual(logger.name, logger_name)\n self.assertEqual(logger.level, level)\n\n # default level\n logger = get_logger(logger_name)\n self.assertEqual(logger.level, logging.INFO)\n # None parameters\n with self.assertRaises(ValueError):\n get_logger(None, level)\n with self.assertRaises(ValueError):\n get_logger(logger_name, None)\n\n def test_logging_object(self):\n logging_object = LoggingObject()\n self.assertEqual(logging_object.logger.name, type(logging_object).__name__)\n\n class LoggingObjectSubclass(LoggingObject):\n pass\n\n logging_object = LoggingObjectSubclass()\n self.assertEqual(logging_object.logger.name, LoggingObjectSubclass.__name__)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"thecodingbob/framebot","sub_path":"test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"37495211930","text":"import torch.multiprocessing as mp\nfrom queue import Queue\nimport time \nimport random\nimport os\n\ndef mp_exec(resources,configs,func):\n '''\n @ resources : list of gpu devices\n @ configs : list of params\n @ func : f(dev,cfg)\n '''\n q=Queue()\n for res in resources:\n q.put(res)\n pool=mp.Pool()\n def put_back_dev(dev,cfg):\n def callback(*args):\n print(f\"Device {dev} Finish cfg {cfg} \")\n q.put(dev)\n print(*args)\n return callback\n\n for idx,cfg in enumerate(configs):\n dev = q.get()\n print(f\"Start config {cfg} on device {dev}\")\n pool.apply_async(func,args=[dev,cfg],callback=put_back_dev(dev,cfg),error_callback=put_back_dev(dev,cfg))\n\n pool.close()\n pool.join()","repo_name":"wondergo2017/DIDA","sub_path":"DIDA/utils/mp.py","file_name":"mp.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"18503828073","text":"import shutil\nimport tempfile\nfrom http import HTTPStatus\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom ..forms import PostForm\nfrom ..models import Comment, Group, Post, User\n\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass PostFormsTests(TestCase):\n \"\"\"Проверка форм\"\"\"\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username=\"Les\")\n cls.group = Group.objects.create(title=\"Тестовый тайтл\",\n slug=\"test-slug\",\n description=\"Тестовое описание\")\n cls.group_second = Group.objects.create(title=\"Тестовый тайтл2\",\n slug=\"test-slug2\",\n description=\"test desc2\")\n cls.post = Post.objects.create(author=cls.user,\n text=\"Тестовый текст\",\n group=cls.group)\n cls.form = PostForm()\n cache.clear()\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n \"\"\"Создаем клиента авторизованного и неавторизованного\"\"\"\n self.authorized_client = Client()\n self.guest_client = Client()\n self.authorized_client.force_login(self.user)\n\n def test_create_post_form(self):\n \"\"\"Валидная форма создает запись в базе Post\"\"\"\n small_gif = (b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B')\n uploaded = SimpleUploadedFile(name='small.gif',\n content=small_gif,\n content_type='image/gif')\n\n posts_count = Post.objects.count()\n form_data = {\n 'text': \"Новый пост\",\n 'group': self.group.pk,\n 'image': uploaded\n }\n response = self.authorized_client.post(reverse(\"posts:post_create\"),\n data=form_data)\n post = Post.objects.first()\n self.assertRedirects(\n response,\n reverse(\"posts:profile\", kwargs={\"username\": self.user.username}))\n self.assertEqual(Post.objects.count(), posts_count + 1)\n self.assertEqual(post.group, self.group)\n self.assertEqual(post.text, form_data[\"text\"])\n self.assertEqual(post.author, self.user)\n self.assertTrue(Post.objects.filter(image='posts/small.gif').exists())\n\n def test_create_post_form_status_code(self):\n \"\"\"Проверка статус кодов для формы поста\"\"\"\n post_url = {\n HTTPStatus.OK: reverse(\"posts:post_create\"),\n HTTPStatus.OK: reverse(\"posts:post_edit\", kwargs={\"post_id\": 1}),\n }\n\n for status, urls in post_url.items():\n with self.subTest(urls=urls):\n response = self.authorized_client.get(urls)\n self.assertEqual(response.status_code, status)\n\n def test_edit_post_form(self):\n \"\"\"Валидная форма редактирует запись в Post\"\"\"\n form_data = {\n \"text\": \"Новый пост 1\",\n \"group\": self.group.pk,\n }\n response = self.authorized_client.post(\n reverse(\"posts:post_edit\", kwargs={\"post_id\": self.post.pk}),\n data=form_data,\n )\n post = Post.objects.get(pk=self.post.pk)\n self.assertRedirects(\n response,\n reverse(\"posts:post_detail\", kwargs={\"post_id\": self.post.pk}))\n self.assertEqual(post.text, form_data[\"text\"])\n self.assertEqual(self.post.group.pk, form_data[\"group\"])\n\n def test_guest_users_forms(self):\n \"\"\"Тест неавторизованных пользователей\"\"\"\n test_urls = {\n HTTPStatus.FOUND: reverse(\"posts:post_create\"),\n HTTPStatus.FOUND: reverse(\"posts:post_edit\", kwargs={\"post_id\":\n 1}),\n }\n\n for status, urls in test_urls.items():\n with self.subTest(urls=urls):\n response = self.guest_client.get(urls)\n self.assertEqual(response.status_code, status)\n\n def test_not_authorized_user_posted(self):\n \"\"\"Тест неавторизованных пользователей\"\"\"\n form_data = {\n \"text\": \"Новый пост\",\n \"group\": self.group.pk,\n }\n self.guest_client.post(reverse(\"posts:post_create\"), data=form_data)\n self.assertEqual(Post.objects.count(), 1)\n\n def test_authorized_comments(self):\n \"\"\"Валидная форма сохраняет коммент в БД\"\"\"\n comments_count = Comment.objects.count()\n comment_data = {'text': 'test text'}\n self.authorized_client.post(reverse('posts:add_comment',\n kwargs={'post_id': self.post.id}),\n data=comment_data)\n comment = Comment.objects.first()\n self.assertEqual(Comment.objects.count(), comments_count + 1)\n self.assertEqual(comment.text, 'test text')\n\n def test_guest_comment(self):\n \"\"\"Тест неавторизованный пользователь не может оставить коммент\"\"\"\n comments_count = Comment.objects.count()\n comment_data = {'text': 'Im not authorized'}\n self.guest_client.post(reverse('posts:add_comment',\n kwargs={'post_id': self.post.id}),\n data=comment_data)\n self.assertEqual(comments_count, 0)\n","repo_name":"platonov1727/hw05_final","sub_path":"yatube/posts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10533508774","text":"from __future__ import annotations\n\nfrom typing import List, Optional\n\n__all__ = ['Chart', 'Local', 'Remote']\n\n\nclass Local:\n def __init__(self, data):\n self.users: Optional[List[int]] = data['users']\n\n\nclass Remote:\n def __init__(self, data):\n self.users: Optional[List[int]] = data['users']\n\n\nclass Chart:\n def __init__(self, data):\n self.span: str = data['span']\n self.limit: int = data['limit']\n self.offset: int = data['offset']\n self.local: Local = Local(data['local'])\n self.remote: Remote = Remote(data['remote'])\n","repo_name":"yupix/Mi.py","sub_path":"mi/framework/models/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"39776512164","text":"'''18. WAP to accept 6 values and find max and second max value'''\nmax=0\nmax1=0\nfor i in range(6):\n num=int(input(\"Enter a number\"))\n if num>max:\n max1=max\n max=num\n elif num>max1:\n max1=num\nprint(\"first maximum number is: \",max)\nprint(\"second maximum number is: \",max1)\n\n \n ","repo_name":"madar1992/logical","sub_path":"maxSecondmax.py","file_name":"maxSecondmax.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40292424574","text":"import argparse\nimport tensorflow as tf\nimport numpy as np\nfrom imageio import imwrite\nimport glob\nimport time\nimport os\nimport tqdm\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2\nimport base64\nimport json\nimport shutil\nimport h5py\n\n# from cv2.ximgproc import guidedFilter\n\n# os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n# os.environ['KMP_DUPLICATE_LIB_OK']='True'\n\ndef fliplr(image, mask):\n image = cv2.flip(image, 1)\n mask = cv2.flip(mask, 1)\n return image, mask\n\n\nimport gc\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--frozen_model_filename\", default=\"results/frozen_model.pb\", type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--dir_inputs\", default=None, type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--dir_outputs\", default=None, type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--dir_copies\", default=None, type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--dir_save\", default=None, type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--save_images\", default=False, type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--only_sky_human\", default=False, type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--use_seg\", default=False, type=str,\n help=\"Frozen model file to import\")\nparser.add_argument(\"--save_copies\", default=False, type=str,\n help=\"Frozen model file to import\")\nargs = parser.parse_args()\nargs.save_images = args.save_images == 'True'\nargs.only_sky_human = args.only_sky_human == 'True'\nargs.use_seg = args.use_seg == 'True'\nargs.save_copies = args.save_copies == 'True'\n\n\nclass datagen_bad:\n def get_p(self):\n if not args.only_sky_human:\n palette = []\n for i in range(151):\n palette.append(i)\n palette = np.array(palette)\n return palette\n else:\n palette = []\n palette = np.array(palette)\n return palette\n\n\ndef get_ave_xy(hmi, n_points=4, thresh=0.1):\n '''\n hmi : heatmap np array of size (height,width)\n n_points : x,y coordinates corresponding to the top densities to calculate average (x,y) coordinates\n\n\n convert heatmap to (x,y) coordinate\n x,y coordinates corresponding to the top densities\n are used to calculate weighted average of (x,y) coordinates\n the weights are used using heatmap\n\n if the heatmap does not contain the probability >\n then we assume there is no predicted landmark, and\n x = -1 and y = -1 are recorded as predicted landmark.\n '''\n if n_points < 1:\n ## Use all\n hsum, n_points = np.sum(hmi), len(hmi.flatten())\n ind_hmi = np.array([range(96)] * 96)\n i1 = np.sum(ind_hmi * hmi) / hsum\n ind_hmi = np.array([range(96)] * 96).T\n i0 = np.sum(ind_hmi * hmi) / hsum\n else:\n ind = hmi.argsort(axis=None)[-n_points:] ## pick the largest n_points\n topind = np.unravel_index(ind, hmi.shape)\n index = np.unravel_index(hmi.argmax(), hmi.shape)\n i0, i1, hsum = 0, 0, 0\n for ind in zip(topind[0], topind[1]):\n h = hmi[ind[0], ind[1]]\n hsum += h\n i0 += ind[0] * h\n i1 += ind[1] * h\n\n i0 /= hsum\n i1 /= hsum\n if hsum / n_points <= thresh:\n i0, i1 = -1, -1\n return ([i1, i0, hsum / n_points]) # if n_points else 0])\n\n\ndef transfer_xy_coord(hm, n_points=64, thresh=0.1):\n '''\n hm : np.array of shape (height,width, n-heatmap)\n\n transfer heatmap to (x,y) coordinates\n\n the output contains np.array (Nlandmark * 2,)\n * 2 for x and y coordinates, containing the landmark location.\n '''\n # print(hm.shape)\n assert len(hm.shape) == 3\n Nlandmark = hm.shape[-1]\n # est_xy = -1*np.ones(shape = (Nlandmark, 2))\n est_xy = []\n for i in range(Nlandmark):\n hmi = hm[:, :, i]\n est_xy.extend(get_ave_xy(hmi, n_points, thresh))\n return (est_xy) ## (Nlandmark * 2,)\n\n\ndef create_color_mask_18(mask):\n bsh = np.stack([mask[0, :, :] == 0, mask[0, :, :] == 1, mask[0, :, :] == 2], axis=-1).astype(np.float32)\n arch_ind = np.isin(mask[0, :, :], [3]) # yellow\n fol_ind = np.isin(mask[0, :, :], [4]) # white\n bsh[arch_ind, :] = [1, 1, 0]\n bsh[fol_ind, :] = [1, 1, 1]\n\n arch_ind = np.isin(mask[0, :, :], [5]) # yellow\n fol_ind = np.isin(mask[0, :, :], [6]) # white\n bsh[arch_ind, :] = [1, 1, 0.75]\n bsh[fol_ind, :] = [1, 0.75, 1]\n\n arch_ind = np.isin(mask[0, :, :], [7]) # yellow\n fol_ind = np.isin(mask[0, :, :], [8]) # white\n bsh[arch_ind, :] = [0.25, 1, 0.75]\n bsh[fol_ind, :] = [0.25, 0.75, 1]\n\n arch_ind = np.isin(mask[0, :, :], [9]) # yellow\n fol_ind = np.isin(mask[0, :, :], [10]) # white\n bsh[arch_ind, :] = [0.25, 0.25, 0.75]\n bsh[fol_ind, :] = [0.25, 0.75, 0]\n\n arch_ind = np.isin(mask[0, :, :], [11]) # yellow\n fol_ind = np.isin(mask[0, :, :], [12]) # white\n bsh[arch_ind, :] = [0.75, 0.75, 0.25]\n bsh[fol_ind, :] = [0.25, 0.25, 0.25]\n\n arch_ind = np.isin(mask[0, :, :], [13]) # yellow\n fol_ind = np.isin(mask[0, :, :], [14]) # white\n bsh[arch_ind, :] = [0.5, 0.75, 0.25]\n bsh[fol_ind, :] = [0.25, 0.5, 0.25]\n\n arch_ind = np.isin(mask[0, :, :], [15]) # yellow\n fol_ind = np.isin(mask[0, :, :], [16]) # white\n bsh[arch_ind, :] = [0.5, 0.75, 0.5]\n bsh[fol_ind, :] = [0.25, 0.5, 0.5]\n\n arch_ind = np.isin(mask[0, :, :], [17]) # yellow\n fol_ind = np.isin(mask[0, :, :], [18]) # white\n bsh[arch_ind, :] = [0.4, 0.75, 0.5]\n bsh[fol_ind, :] = [0.4, 0.5, 0.5]\n\n # fol_ind = np.isin(mask[0, :, :], [151]) # edges\n #\n # bsh[fol_ind,:] = [0,1,1]\n bsh = bsh * 255\n # print(np.unique(bsh))\n\n return bsh\n\n\n# def get_colored_image(img):\n# class_indexes = np.argmax(img, axis=-1)\n# class_indexes = np.reshape(class_indexes, [-1])\n# color_image = palette_np[class_indexes]\n# color_image = np.reshape(color_image, [512, 512, 3])\n# return color_image\n\n# metrics = Metrics(config_m, 151,datagen_bad())\n\ndef load_graph(frozen_graph_filename):\n # We load the protobuf file from the disk and parse it to retrieve the\n # unserialized graph_def\n with tf.gfile.GFile(frozen_graph_filename, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n # Then, we import the graph_def into a new Graph and returns it\n with tf.Graph().as_default() as graph:\n # The name var will prefix every op/nodes in your graph\n # Since we load everything in a new graph, this is not needed\n tf.import_graph_def(graph_def, name=\"prefix\")\n return graph\n\n\ndef get_x(path):\n input = cv2.imread(path)\n input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)\n h, w, _ = input.shape\n scale = 320.0 / max(input.shape[:2])\n h = round(input.shape[0] * scale)\n w = round(input.shape[1] * scale)\n input = cv2.resize(input, (int(w), int(h)))\n\n h, w, _ = input.shape\n in_ = input.copy()\n sh = 0\n sw = 0\n eh = 1\n ew = 1\n if w < 320:\n in_ = np.zeros((320, 320, 3)) # + mean\n in_[:, :, 0] = 123\n in_[:, :, 1] = 116\n in_[:, :, 2] = 103\n diff = 320 - w\n sw = (diff / 2) / 320\n ew = (w + (diff / 2)) / 320\n in_[:, int(diff / 2):w + int(diff / 2)] = input\n if h < 320:\n in_ = np.zeros((320, 320, 3)) # + mean\n in_[:, :, 0] = 123\n in_[:, :, 1] = 116\n in_[:, :, 2] = 103\n\n diff = 320 - h\n\n sh = (diff / 2) / 320\n eh = (h + (diff / 2)) / 320\n in_[int(diff / 2):h + int(diff / 2), :] = input\n input = in_\n\n return input.astype(int), sh, sw, eh, ew\n\n\ndef get_x_513(path):\n input = cv2.imread(path)\n input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)\n input = cv2.resize(input, (513, 513))\n # input = cv2.resize(input, (2049, 1025))\n input = (np.true_divide(input, 255) * 2) - 1\n # input = np.true_divide(input, 255)\n # if input.shape[2] == 4:\n # input = input[:,:,:3]\n # input = np.array([input])\n\n return input\n\n\ndef get_x_shape(path):\n input = cv2.imread(path)\n input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)\n # h, w, _ = input.shape\n # scale = 1024 / min(input.shape[:2])\n # h = round(input.shape[0] * scale)\n # w = round(input.shape[1] * scale)\n # input = cv2.resize(input, (int(w), int(h)))\n\n # input = (np.true_divide(input, 255) * 2) - 1\n return input\n\n\n\ndef create_color_mask(mask):\n bsh = np.stack([mask[0, :, :] == 0, mask[0, :, :] == 3, mask[0, :, :] == 13], axis=-1).astype(np.int32)\n arch_ind = np.isin(mask[0, :, :], [2, 26, 49, 85, 62, 80, 133, 35]) # yellow\n fol_ind = np.isin(mask[0, :, :], [5, 10, 18, 67, 73]) # white\n bsh[arch_ind, :] = [1, 1, 0]\n bsh[fol_ind, :] = [1, 1, 1]\n\n fol_ind = np.isin(mask[0, :, :], [151]) # edges\n\n bsh[fol_ind, :] = [0, 1, 1]\n\n return bsh\n\n\ndef proc(x1, y1, x2, y2, sh, sw, eh, ew):\n if h < w:\n if x1 < 0.5:\n x1 = (x1 - sh) / (0.5 - sh)\n x1 /= 2\n else:\n x1 = ((1 - x1) - (1 - eh)) / (0.5 - (1 - eh))\n x1 /= 2\n x1 = 1 - x1\n if x2 < 0.5:\n x2 = (x2 - sh) / (0.5 - sh)\n x2 /= 2\n else:\n x2 = ((1 - x2) - (1 - eh)) / (0.5 - (1 - eh))\n x2 /= 2\n x2 = 1 - x2\n\n elif w < h:\n if y1 < 0.5:\n y1 = (y1 - sw) / (0.5 - sw)\n y1 /= 2\n else:\n y1 = ((1 - y1) - (1 - ew)) / (0.5 - (1 - ew))\n y1 /= 2\n y1 = 1 - y1\n\n if y2 < 0.5:\n y2 = (y2 - sw) / (0.5 - sw)\n y2 /= 2\n else:\n y2 = ((1 - y2) - (1 - ew)) / (0.5 - (1 - ew))\n y2 /= 2\n y2 = 1 - y2\n return x1, y1, x2, y2\n\n\nif __name__ == '__main__':\n # if args.use_seg\n graph = load_graph(args.frozen_model_filename)\n\n\n x = graph.get_tensor_by_name('prefix/tower_0/images:0')\n training = graph.get_tensor_by_name('prefix/training_flag:0')\n y = graph.get_tensor_by_name('prefix/tower_0/boxes:0'),\n y_sc = graph.get_tensor_by_name('prefix/tower_0/scores:0')\n\n list_inputs = sorted(glob.glob(args.dir_inputs + '/*'))\n\n with tf.Session(graph=graph) as sess:\n\n inputs = []\n for i in tqdm.tqdm(list_inputs):\n input, sh, sw, eh, ew = get_x(i)\n\n\n st = time.time()\n y_out, _ = sess.run([y, y_sc],\n feed_dict={\n x: np.array([input]),\n training: False\n })\n end = time.time()\n y_out = y_out[0][0]\n _ = _[0]\n\n\n bboxs = []\n scores = []\n for j in range(len(_)):\n if _[j] > 0.4:\n bboxs.append(y_out[j])\n scores.append(_[j])\n\n out = get_x_shape(i)\n h, w, _ = out.shape\n ssh, ssw, _ = input.shape\n if args.use_seg:\n # out2 = np.zeros((h,w,3))\n out2 = out.copy()\n for j in range(len(bboxs)):\n x1, y1, x2, y2 = tuple(bboxs[j])\n\n x1, y1, x2, y2 = proc(x1, y1, x2, y2, sh, sw, eh, ew)\n # x1, y1, x2, y2 = tuple(bboxs[j])\n x1 = int(x1 * h)\n x2 = int(x2 * h)\n y1 = int(y1 * w)\n y2 = int(y2 * w)\n\n cv2.rectangle(out, (y1,x1), (y2, x2), (0,255,0), 10, -1)\n plt.imshow(out)\n plt.show()\n continue","repo_name":"andriyrizhiy/kur_mask_face","sub_path":"freeze_run.py","file_name":"freeze_run.py","file_ext":"py","file_size_in_byte":11946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17762329631","text":"\"\"\"\nНапишите программу, которая меняет местами столбцы в матрице.\n\nФормат входных данных\nНа вход программе на разных строках подаются два натуральных числа nn и mm — количество строк и столбцов в матрице,\nзатем элементы матрицы построчно через пробел, затем числа ii и jj — номера столбцов, подлежащих обмену.\n3\n4\n11 12 13 14\n21 22 23 24\n31 32 33 34\n0 1\nn, m = int(input()), int(input())\nmatrix = [input().split() for _ in range(n)]\ncol1, col2 = [int(i) for i in input().split()]\n\nfor i in range(n):\n matrix[i][col1], matrix[i][col2] = matrix[i][col2], matrix[i][col1]\n\nfor row in matrix:\n print(*row)\n\"\"\"\nn, m = int(input()), int(input())\nmatrix = []\nfor i in range(n):\n temp = [int(num) for num in input().split()]\n matrix.append(temp)\ncols = [int(c) for c in input().split()]\n\nmatrix_result = [[0] * m for _ in range(n)]\n\n\ndef print_matrix(my_matrix, n, m, width=1):\n for r in range(n):\n for c in range(m):\n print(str(my_matrix[r][c]).ljust(width), end=' ')\n print()\n\n\nfor i in range(n):\n for j in range(m):\n if j == cols[0]:\n matrix_result[i][j] += matrix[i][cols[1]]\n elif j == cols[1]:\n matrix_result[i][j] += matrix[i][cols[0]]\n else:\n matrix_result[i][j] += matrix[i][j]\n\nprint_matrix(matrix_result, n, m)\n","repo_name":"DAlferova/stepik_advanced","sub_path":"P3_list/matrix2_t3_change_col.py","file_name":"matrix2_t3_change_col.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42144914650","text":"#蛮力法\ndef mlfun(s1,s2):\n \"\"\"判断两个字符串是否为异序词\n\n s1:字符串1\n\n s2:字符串2\"\"\"\n\n def zh(l):\n \"\"\"输出字符串的所有排列方式\"\"\"\n if len(l) <= 1:\n return l\n cl = []\n for i in range(len(l)):\n for j in zh(l[0:i] + l[i + 1:]):\n cl.append(l[i] + j)\n\n cl = list(set(cl))\n return cl\n\n #输出字符串1所有的排列顺序\n alist = zh(s1)\n\n #判断字符串2是否在字符串所有的排列顺序里\n #如果是,返回True , 否则返回 False\n if s2 in alist:\n return True\n else:\n return False\n\na = mlfun(\"12355\",\"32145\")\nprint(a)\n\n\n\n\n","repo_name":"linzhongxiazhi/student_python","sub_path":"数据结构/异序词检测/蛮力法比较异序词.py","file_name":"蛮力法比较异序词.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22004591377","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 25 19:01:56 2019\n\n@author: joscelynec\n\"\"\"\n\"\"\"\nIterative Binary Search of a Sorted Array\nAdapted from:\nhttps://codereview.stackexchange.com/questions/117180/python-2-binary-search\n\"\"\"\ndef binary_search(array, value):\n start, stop = 0, len(array)\n while start < stop:\n offset = start + stop >> 1\n sample = array[offset]\n if sample < value:\n start = offset + 1\n elif sample > value:\n stop = offset\n else:\n return offset\n return -1\n\n\n\"\"\"\nFinds pivot index in sorted rotated list\nfor example, for [6, 7, 8, 9, 10, 1, 2, 3, 4]\npivot = 5\n\"\"\"\ndef find_pivot(input_list):\n start = 0\n end = len(input_list) - 1\n while start <= end:\n mid = (start + end)//2\n if input_list[start] <= input_list[end]: #check if list is not rotated\n return start\n elif input_list[start] <= input_list[mid]: #first to mid is sorted, pivot is in other half of list\n start = mid +1\n else: #pivot is in first half of list\n end = mid \n return start\n \n \n\n\"\"\"\nModify/adapt binary search to search a rotated sorted input_list\nin O(nlog n) time\nUses find_pivot(input_list) and then binary search on two sub lists\nFind the index by searching in a rotated sorted input_list\nArgs:\n input_list(input_list), number(int): Input input_list to search and the target\nReturns:\n int: Index or -1\n\"\"\"\ndef rotated_input_list_search(input_list, number):\n #Null input\n if input_list == None or number == None:\n return -1\n #Empty List\n if input_list == [] or number == None:\n return -1\n \n pivot_index = find_pivot(input_list)\n #perform binary search on each list divided into at the pivot\n temp = binary_search(input_list[0: pivot_index], number)\n if temp != -1:\n return temp\n temp = binary_search(input_list[pivot_index: len(input_list)], number)\n if temp != -1:\n return temp + pivot_index\n #number not found\n return -1\n#print(rotated_input_list_search([6, 7, 8, 1, 2, 3, 4], 6))\n\n\n\ndef linear_search(input_list, number):\n #Null input\n if input_list == None or number == None:\n return -1\n #Empty List\n if input_list == [] or number == None:\n return -1\n for index, element in enumerate(input_list):\n if element == number:\n return index\n return -1\n#Udacity test function modified for None or Empty inputs\ndef test_function(test_case):\n if test_case == None:\n print(\"None\")\n return\n \n input_list = test_case[0]\n if input_list == [None]:\n print(\"None\")\n return\n \n number = test_case[1]\n if number == None:\n print(\"None\")\n return\n \n \n if linear_search(input_list, number) == rotated_input_list_search(input_list, number):\n print(\"Pass\")\n else:\n print(\"Fail\")\n \n\ntest_function(None)#Null Inputs\ntest_function([[None], None])#Null Input_lists\ntest_function([[None], 6])#Null Input_lists\ntest_function([[], None])#Empty List, None for number\ntest_function([[], 6])#Empty List\ntest_function([[8], 8])#Singleton List with value\ntest_function([[8], 7])#Singleton List without value\n#More tests two element lists\ntest_function([[7,8], 8])\ntest_function([[8,5], 8])\ntest_function([[7,8], 9])\ntest_function([[8,5], 4])\n#Full cycle of a sorted list, value present and not present\ntest_function([[1, 2, 3, 4, 5], 3])\ntest_function([[1, 2, 3, 4, 5], 6])\ntest_function([[2, 3, 4, 5, 1], 3])\ntest_function([[2, 3, 4, 5, 1], 6])\ntest_function([[3, 4, 5, 1, 2], 3])\ntest_function([[3, 4, 5, 1, 2], 6])\ntest_function([[4, 5, 1, 2, 3], 3])\ntest_function([[4, 5, 1, 2, 3], 6])\ntest_function([[5, 1, 2, 3, 4], 3])\ntest_function([[5, 1, 2, 3, 4], 6])\n#Udacity supplied tests\ntest_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])\ntest_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])\ntest_function([[6, 7, 8, 1, 2, 3, 4], 8])\ntest_function([[6, 7, 8, 1, 2, 3, 4], 1])\ntest_function([[6, 7, 8, 1, 2, 3, 4], 10])\n\"\"\"\nNone\nNone\nNone\nNone\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\nPass\n\"\"\"\n\n\n\n\n\n\n","repo_name":"pjoscely/Udacity-Data-S.-and-Algos-Project-2","sub_path":"Rotated Sorted Array.py","file_name":"Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17430822168","text":"#!/usr/bin/python3\n#Noel & KJ\n# 09 October 2019\n\n'''Using a while loop, create a loop that prints numbers from 1-15. '''\n\nnumber = 1\n\nwhile number < 16:\n print(number)\n number = number + 1\n \nprint(\"Done\")","repo_name":"NoelGlamann/PythonPractices","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20296893364","text":"from Machine import Process, process_generator\r\nimport time\r\n\r\n\r\nclass FCFS:\r\n tmp_queue = []\r\n current_time = 0\r\n\r\n ready_queue = []\r\n gantt = []\r\n turnaround_time = []\r\n pid_turn_time = []\r\n response_time = []\r\n waiting_time = []\r\n pid_response_time = []\r\n\r\n def __init__(self, lst):\r\n self.process_list = lst\r\n self.waiting_time = [0 for i in range(len(self.process_list))]\r\n\r\n def search(self, pid):\r\n for p in self.process_list:\r\n if p.p_id == pid:\r\n return p\r\n\r\n def run(self):\r\n for i in range(len(self.process_list)):\r\n p = self.process_list[i]\r\n if p.io_time == 0:\r\n self.tmp_queue.append((p.arrival_time, p.p_id, p.cpu_burst2 + p.cpu_burst1, \"cpu_2\"))\r\n else:\r\n self.tmp_queue.append(\r\n (p.arrival_time, p.p_id, p.cpu_burst1, \"cpu_1\")) # ( arrival_time, p_id, cpu_burst1)\r\n\r\n while len(self.tmp_queue) != 0:\r\n # sort based on arrival_time if equal --> sort based on p_id\r\n self.tmp_queue = sorted(self.tmp_queue)\r\n value = self.tmp_queue.pop(0)\r\n\r\n if self.current_time < value[0]: # add idle time waiting for next process to come\r\n idle_time = value[0] - self.current_time\r\n self.current_time += idle_time\r\n time.sleep(idle_time / 1000)\r\n\r\n # calculate waiting time\r\n arrav_val = value[0]\r\n self.waiting_time[value[1] - 1] += (self.current_time - arrav_val)\r\n\r\n if int(value[2]) != 0:\r\n self.gantt.append(\r\n (value[1], self.current_time, self.current_time + int(value[2]))) # (p_id, start_time, end_time)\r\n self.current_time += value[2] # update current time after running first job\r\n\r\n time.sleep(value[2] / 1000) # time to run process\r\n\r\n if value[3] == \"cpu_1\": # add cpu_burst2\r\n p = self.search(value[1])\r\n self.tmp_queue.append((self.current_time + p.io_time, value[1], p.cpu_burst2, \"cpu_2\"))\r\n\r\n print(\"gantt : p_id\", \"start time\", \"end_time\", sep=\"\\t\")\r\n print(self.gantt)\r\n print(f'total time : {self.current_time}')\r\n\r\n def avg_turnaround_time(self):\r\n for pid in range(len(self.process_list)):\r\n process = self.search(pid + 1)\r\n\r\n for i in range(len(self.gantt) - 1, -1, -1):\r\n if (pid + 1) == self.gantt[i][0]:\r\n p_end_time_in_cpu = self.gantt[i][2]\r\n p_end_time = p_end_time_in_cpu\r\n if process.cpu_burst2 == 0:\r\n p_end_time += process.io_time\r\n break\r\n self.turnaround_time.append(p_end_time - process.arrival_time)\r\n self.pid_turn_time.append((process.p_id, p_end_time - process.arrival_time))\r\n\r\n print(f'turn around time : {self.pid_turn_time}')\r\n avg = sum(self.turnaround_time) / len(self.turnaround_time)\r\n print(f'avg turn around time : {avg}')\r\n return avg\r\n\r\n def throughput(self):\r\n thrput = len(self.process_list) * 1000 / self.current_time\r\n print(f'throughput : {thrput}')\r\n return thrput\r\n\r\n def cpu_utilization(self):\r\n idle = 0\r\n for i in range(len(self.gantt) - 1):\r\n idle += self.gantt[i + 1][1] - self.gantt[i][2]\r\n\r\n res = (self.current_time - idle) * 100 / self.current_time\r\n print(f'cpu utilization : {res}%')\r\n return res\r\n\r\n def avg_response_time(self):\r\n for pid in range(len(self.process_list)):\r\n process = self.search(pid + 1)\r\n\r\n for i in range(len(self.gantt)):\r\n if (pid + 1) == self.gantt[i][0]:\r\n self.response_time.append(self.gantt[i][1] - process.arrival_time)\r\n self.pid_response_time.append((process.p_id, self.gantt[i][1] - process.arrival_time))\r\n break\r\n\r\n print(f'response time : {self.pid_response_time}')\r\n avg = sum(self.response_time) / len(self.response_time)\r\n print(f'avg response time : {avg}')\r\n return avg\r\n\r\n def avg_waiting_time(self):\r\n print(f'waiting time : {self.waiting_time}')\r\n avg = sum(self.waiting_time) / len(self.waiting_time)\r\n print(f'avg waiting time : {avg}')\r\n return avg\r\n","repo_name":"salidotir/OS-scheduling-algorithms","sub_path":"FCFS.py","file_name":"FCFS.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38417726040","text":"import pandas as pd\r\ndt=({'English':[74,79,48,53,68],\r\n 'Physics':[76,78,80,76,73],\r\n 'Chemistry':[57,74,55,89,70],\r\n 'Biology':[76,85,63,68,59],\r\n 'IP':[82,93,69,98,79]})\r\ndf=pd.DataFrame(dt, index=['Akshit','Bharat','Chetan','Dhaval','Gaurang'])\r\ndf.drop(index='Chetan',inplace=True)\r\ndf.drop(['Dhaval','Gaurang'],inplace=True)\r\n\r\nprint(df)\r\n\r\n","repo_name":"mohdshayyan/PROGRAMS","sub_path":"newProj18.py","file_name":"newProj18.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1975322261","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# */AIPND-revision/intropyproject-classify-pet-images/classify_images.py\n# \n# PROGRAMMER: minpo jung\n# DATE CREATED: 2013.05.01 \n# REVISED DATE: 2013.05.01\n# PURPOSE: Create a function classify_images that uses the classifier function \n# to create the classifier labels and then compares the classifier \n# labels to the pet image labels. This function inputs:\n# -The Image Folder as image_dir within classify_images and function \n# and as in_arg.dir for function call within main. \n# -The results dictionary as results_dic within classify_images \n# function and results for the functin call within main.\n# -The CNN model architecture as model wihtin classify_images function\n# and in_arg.arch for the function call within main. \n# This function uses the extend function to add items to the list \n# that's the 'value' of the results dictionary. You will be adding the\n# classifier label as the item at index 1 of the list and the comparison \n# of the pet and classifier labels as the item at index 2 of the list.\n#\n##\nfrom classifier import classifier\n\ndef classify_images(images_dir, results_dic, model):\n print('classify_image function started....')\n for key in results_dic:\n try:\n # Get image file path\n image_file_path = images_dir + '/' + key\n \n # Get classifier labels\n classifier_labels = classifier(image_file_path, model)\n \n # Format classifier labels\n classifier_labels = classifier_labels.lower().strip()\n \n # Compare pet image label and classifier label\n pet_image_label = results_dic[key][0]\n match = int(pet_image_label in classifier_labels.split(', '))\n \n # Extend the results_dic with classifier label and the match\n results_dic[key].extend([classifier_labels, match])\n except Exception as e:\n print(f\"Error processing file {key}: {str(e)}\")\n print('classify_image function ended....')\n print('==================================')\n print()\n return results_dic\n \n# Print the content of result_dic\n#main code\n'''\nprint('start classify_images.py check')\n\nfor key, value in result_dic.items():\n print(\"Filename: \", key)\n print(\"Pet label: \", value[0])\n print(\"Classifier label: \", value[1])\n print(\"Match (1 = match, 0 = no match): \", value[2])\n print(\"\\n\")\nprint('end classify_images.py check ')\n'''","repo_name":"minpo1974/udacityreport","sub_path":"classify_images.py","file_name":"classify_images.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37758051960","text":"from argparse import ArgumentParser\nfrom distutils.command.sdist import sdist\nfrom re import A\nfrom experiment import get_browsers, get_sites\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sigfig\n\nRESULT_FOLDER = \"results\"\nPLOT_FOLDER = \"img\"\nUNIT_OF_METRICS = {\n \"power of core\": \"W\",\n \"power of cpu\": \"W\",\n \"energy consumption of core\": \"J\",\n \"energy consumption of cpu\": \"J\",\n \"duration\": \"s\"\n}\nGENERATE_FOR_EVERY_SITE = False\n\ndef main():\n sites = [\"wikipedia\", \"hackernews\", \"stackoverflow\", \"reddit\", \"sparknotes\", \"dw\", \"nu\", \"nytimes\"]\n\n parser = ArgumentParser(description='Run selenium tests with or without uBlock Origin')\n parser.add_argument('--date',type=int, default=1646053732, help=\"Which amount of seconds is used as date in results folders\")\n parser.add_argument('--n_iterations', type=int, default=6, help=\"How many times the loop was executed\")\n parser.add_argument('--metric', type=str, default=\"power of core\", choices=UNIT_OF_METRICS.keys(), help=\"What metric to visualize\")\n args = parser.parse_args()\n\n visualize(sites, args.date, args.n_iterations, args.metric)\n\ndef read_result(site, date, i, adblocker_used):\n results = []\n with open(f\"{RESULT_FOLDER}/{date}-{i}/{site}_{adblocker_used}\") as file:\n #Discard first line (header)\n file.readline()\n line = file.readline()\n while line != \"\":\n usage = read_usage(line)\n if usage is not None:\n if usage[2] > 100000000:\n print(\"Removed a duration outlier\")\n else:\n results.append(usage)\n line = file.readline()\n if results != []:\n return np.average(results, axis=0)\n return None\n\ndef read_usage(line):\n data = line.split(\";\")\n core, cpu_usage, duration, exit_code = data\n if int(exit_code) == 0:\n return [int(core), int(cpu_usage), int(duration)]\n else:\n print(\"exit code -1\")\n return None\n\ndef visualize(sites, date, n, metric):\n data = {}\n for site in sites:\n data[site] = {}\n for ad_blocker_used in [True, False]:\n data[site][ad_blocker_used] = [read_result(site, date, i, ad_blocker_used) for i in range(n) if read_result(site, date, i, ad_blocker_used) is not None]\n data[\"average\"] = {}\n data[\"average\"][True] = [np.average([data[site][True][i] for site in sites if len(data[site][True]) > i and len(data[site][False]) > i], axis=0) for i in range(n)]\n data[\"average\"][False] = [np.average([data[site][False][i] for site in sites if len(data[site][True]) > i and len(data[site][False]) > i], axis=0) for i in range(n)]\n\n\n sites.append(\"average\")\n if GENERATE_FOR_EVERY_SITE:\n for site in sites:\n generate_boxplot_per_site(data, site, n, metric)\n\n generate_large_boxplot(data, sites, n, metric)\n print_table(data, sites, n, metric)\n\ndef extract_metric(site_data, metric):\n if metric == \"power of core\":\n return [float(sd[0]) / float(sd[2]) for sd in site_data]\n elif metric == \"duration\":\n return [int(sd[2]) / 1000000.0 for sd in site_data]\n elif metric == \"power of cpu\":\n return [float(sd[1]) / float(sd[2]) for sd in site_data]\n elif metric == \"energy consumption of core\":\n return [float(sd[0]) / 1000000.0 for sd in site_data]\n elif metric == \"energy consumption of cpu\":\n return [float(sd[1]) / 1000000.0 for sd in site_data]\n\n\ndef extract_metric_of_site(data, site, metric):\n return [extract_metric(data[site][True], metric), extract_metric(data[site][False], metric)]\n\ndef generate_boxplot_per_site(data, site, n, metric):\n fig = plt.figure(figsize=(10, 7))\n plt.boxplot(extract_metric_of_site(data, site, metric))\n plt.title(f\"Energy comparison of {site}\")\n plt.ylabel(f\"{metric.capitalize()} ({UNIT_OF_METRICS[metric]})\")\n plt.xticks([1, 2], labels=[\"With adblocker\", \"Without adblocker\"])\n plt.savefig(f\"{PLOT_FOLDER}/boxplot-{site}-{metric}\")\n # plt.show()\n\n#######################################################################################################\n# Two-color boxplot, based on https://stackoverflow.com/questions/16592222/matplotlib-group-boxplots #\n#######################################################################################################\n# function for setting the colors of the box plots pairs\ndef generate_large_boxplot(data, sites, n, metric):\n def setBoxColors(bp):\n plt.setp(bp['boxes'][0], color='blue')\n plt.setp(bp['caps'][0], color='blue')\n plt.setp(bp['caps'][1], color='blue')\n plt.setp(bp['whiskers'][0], color='blue')\n plt.setp(bp['whiskers'][1], color='blue')\n # plt.setp(bp['fliers'][0], color='blue')\n # plt.setp(bp['fliers'][1], color='blue')\n plt.setp(bp['medians'][0], color='blue')\n\n plt.setp(bp['boxes'][1], color='red')\n plt.setp(bp['caps'][2], color='red')\n plt.setp(bp['caps'][3], color='red')\n plt.setp(bp['whiskers'][2], color='red')\n plt.setp(bp['whiskers'][3], color='red')\n plt.setp(bp['medians'][1], color='red')\n\n fig = plt.figure()\n ax1 = plt.axes()\n pos = [1, 2]\n for site in sites:\n # if metric == \"duration\":\n # bp = plt.scatter(extract_metric_of_site(data, site, metric), lineoffsets=pos, linewidth=0.75, orientation=\"vertical\")\n # else:\n bp = plt.boxplot(extract_metric_of_site(data, site, metric), positions = pos, widths = 0.6)\n setBoxColors(bp)\n pos[0] += 3\n pos[1] += 3\n\n # set axes limits and labels\n plt.xlim(0,3*len(sites))\n ax1.set_xticklabels(sites)\n ax1.tick_params(axis='x', rotation=45)\n ax1.set_xticks([1.5 + 3*n for n in range(len(sites))])\n\n # draw temporary red and blue lines and use them to create a legend\n hB, = plt.plot([1,1],'b-')\n hR, = plt.plot([1,1],'r-')\n plt.legend((hB, hR),('With adblocker', 'Without adblocker'))\n hB.set_visible(False)\n hR.set_visible(False)\n\n plt.ylabel(f\"{metric.capitalize()} ({UNIT_OF_METRICS[metric]})\")\n plt.title(f\"Energy comparison of using adblocker (n={n})\")\n plt.tight_layout()\n plt.savefig(f'{PLOT_FOLDER}/boxplot-complete-{metric.replace(\" \", \"-\")}.png')\n # plt.show()\n\ndef print_table(data, sites, n, metric):\n table_string = f\"| Website | Average {metric} with adblocker ({UNIT_OF_METRICS[metric]}) | Average {metric} without adblocker ({UNIT_OF_METRICS[metric]})| Change of {metric} from adblocker |\\n| --- | --- | --- | --- |\"\n\n for site in sites:\n extracted = extract_metric_of_site(data, site, metric)\n with_adblocker = np.average(extracted[0])\n without_adblocker = np.average(extracted[1])\n absolute_change = sigfig.round(with_adblocker - without_adblocker, sigfigs=4)\n percentual_change = round(float(with_adblocker - without_adblocker) / float(without_adblocker) * 100.0, 1)\n table_string += f\"\\n| {site.capitalize()} | {sigfig.round(with_adblocker, sigfigs=4)} | {sigfig.round(without_adblocker, sigfigs=4)} | {absolute_change} ({percentual_change}%) |\"\n\n print(table_string)\n\nif __name__ == \"__main__\":\n main()","repo_name":"ThijsRay/CS4415-Sustainable-Software-Engineering","sub_path":"project1/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6595771038","text":"#%%\nimport numpy as np\nimport pandas as pd\nimport json\nfrom importlib import reload\n# import thirdparty\n\n#%%\nfrom preprocess.transform import get_balance_data_from_df\nfrom visualization.EML import plot_evaluation, draw_xg_feature_importance\nfrom feature.valid import evaluate\nfrom visualization.EDA import draw_boxplot, draw_pandemic_contrast\nfrom logitboost import LogitBoost\nfrom xgboost import XGBClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nimport statsmodels.api as sm\nfrom models.LogisticVariants import TransferLogistic\n\n# %%\n# to prove the heterogeneity\n# 1. test the above and need to adjust the lasso so that it will excess the iv method\n# 2. add more model like xgboost\n# 3. save all the needed results\n# previous best feature by iv method ↓\n# key_feature = [\n# \"contacts_number_statistic_pct_black_ratio\",\n# \"contacts_number_statistic_pct_cnt_to_black\",\n# \"contacts_number_statistic_pct_cnt_be_black\",\n# \"contacts_query_org_cnt_3\",\n# \"contacts_query_be_query_cnt_05\",\n# \"contacts_gray_score_most_familiar_be_all\",\n# \"contacts_rfm_call_cnt_to_applied\",\n# \"user_searched_history_by_day_d30_pct_cnt_org_cf\",\n# \"LOANTERM\",\n# \"contacts_gray_score_min\",\n# \"user_searched_history_by_day_d15_pct_cnt_org_cash\",\n# \"contacts_number_statistic_pct_router_ratio\",\n# \"contacts_query_to_query_cnt_9\",\n# \"user_searched_history_by_day_m4_cnt_org_cf\",\n# \"user_searched_history_by_day_d30_pct_cnt_org_cash\",\n# \"user_searched_history_by_day_d7_pct_cnt_org_cc\",\n# \"contacts_query_to_query_cnt_05\",\n# \"user_searched_history_by_day_m4_pct_cnt_org_all\",\n# \"user_searched_history_by_day_m4_pct_cnt_org_cash\",\n# \"user_searched_history_by_day_d90_pct_cnt_all\",\n# ]\nkey_feature = json.load(open(\"../data/feature_columns(LASSO).json\"))\ndf = pd.read_csv(\"../data/cleaned_all_data.csv\")\ninterval_dict = json.load(open(\"../data/interval.json\"))\n\n#%%\n\n# get df and interval_dict and get key_feature\n\n\n# TODO: new method to write the cost function curve\n\n#%%\n# balance dataset testing\nfor ratio in [0.2, 0.4, 0.5, 0.6, 0.8]:\n (\n feature_train_balance,\n label_train_balance,\n feature_test_balance,\n label_test_balance,\n ) = get_balance_data_from_df(df, interval_dict, key_feature, ratio)\n rf_balance = RandomForestClassifier(max_depth=4, random_state=0)\n rf_balance.fit(feature_train_balance, label_train_balance)\n rf_pred_balance = rf_balance.predict_proba(feature_test_balance)[:, 1]\n rf_evaluation = evaluate(\n label_test_balance,\n rf_pred_balance,\n save_path=\"../data/rf_balance(%s)_evaluation.json\" % ratio,\n )\n plot_evaluation(\n label_test_balance, rf_pred_balance, \"../figure\", method=\"RF_balance_%s\" % ratio\n )\n# %%\n\n\n#%%\n# Other classification models\n\n\n(\n feature_train_balance,\n label_train_balance,\n feature_test_balance,\n label_test_balance,\n) = get_balance_data_from_df(df, interval_dict, key_feature, 0.5)\n\nlr = LogisticRegression(\n C=0.05,\n class_weight=None,\n dual=False,\n fit_intercept=True,\n intercept_scaling=1,\n max_iter=100,\n multi_class=\"ovr\",\n n_jobs=1,\n penalty=\"l2\",\n random_state=None,\n solver=\"liblinear\",\n tol=0.0001,\n verbose=0,\n warm_start=False,\n)\n\nlr.fit(feature_train_balance, label_train_balance)\nlr.fit(feature_train_balance, label_train_balance)\nlr_pred = lr.predict_proba(feature_test_balance)[:, 1]\nlr_evaluation = evaluate(\n label_test_balance, lr_pred, save_path=\"../data/lr_evaluation.json\"\n)\nplot_evaluation(label_test_balance, lr_pred, \"../figure\", method=\"LR\")\n\n# show the detail of the logistic model\n\n#%%\nsvm = SVC(C=1.0, probability=True)\nsvm_index = list(np.random.choice(list(feature_train_balance.index), 10000))\nsvm.fit(feature_train_balance.loc[svm_index, :], label_train_balance[svm_index])\nsvm_pred = svm.predict_proba(feature_test_balance)[:, 1]\nsvm_evaluation = evaluate(\n label_test_balance, svm_pred, save_path=\"../data/svm_evaluation.json\"\n)\nplot_evaluation(label_test_balance, svm_pred, \"../figure\", method=\"SVM\")\n#%%\nrf = RandomForestClassifier(max_depth=4, random_state=0)\nrf.fit(feature_train_balance, label_train_balance)\nrf_pred = rf.predict_proba(feature_test_balance)[:, 1]\nrf_evaluation = evaluate(\n label_test_balance, rf_pred, save_path=\"../data/rf_evaluation.json\"\n)\nplot_evaluation(label_test_balance, rf_pred, \"../figure\", method=\"RF\")\n\n#%%\nfrom feature import valid\n\nreload(valid)\nxg = XGBClassifier(\n learning_rate=0.01,\n n_estimators=10, # 树的个数-10棵树建立xgboost\n max_depth=4, # 树的深度\n min_child_weight=1, # 叶子节点最小权重\n gamma=0.0, # 惩罚项中叶子结点个数前的参数\n subsample=1, # 所有样本建立决策树\n colsample_btree=1, # 所有特征建立决策树\n scale_pos_weight=1, # 解决样本个数不平衡的问题\n random_state=27, # 随机数\n slient=0,\n)\nxg.fit(feature_train_balance, label_train_balance)\nxg_pred = xg.predict_proba(feature_test_balance)[:, 1]\nxg_evaluation = valid.evaluate(\n label_test_balance, xg_pred, save_path=\"../data/xg_evaluation.json\"\n)\nplot_evaluation(label_test_balance, xg_pred, \"../figure\", method=\"XG\")\n#%%\n\nlb = LogitBoost(n_estimators=200, random_state=0) # base_estimator=LogisticRegression()\nlb.fit(feature_train_balance, label_train_balance)\nlb_pred = lb.predict_proba(feature_test_balance)[:, 1]\nlb_evaluation = evaluate(\n label_test_balance, lb_pred, save_path=\"../data/lb_evaluation.json\"\n)\nplot_evaluation(label_test_balance, lb_pred, \"../figure\", method=\"LB\")\n#%%\nfrom feature import valid\n\n#%%\n# Auto-tunan_column_listne model for pandemic\n# 1. XGboost\n# 2. XGboost - additive learning\n# 3. LogisticBoosting - additive learning\n# 4. dummy Logistic\n# 5. Transfer Logistic\nfrom preprocess import transform\n\nreload(transform)\nparams = {\"objective\": \"reg:linear\", \"verbose\": False}\nbest_model = XGBClassifier(\n learning_rate=0.01,\n n_estimators=10, # 树的个数-10棵树建立xgboost\n max_depth=4, # 树的深度\n min_child_weight=1, # 叶子节点最小权重\n gamma=0.0, # 惩罚项中叶子结点个数前的参数\n subsample=1, # 所有样本建立决策树\n colsample_btree=1, # 所有特征建立决策树\n scale_pos_weight=1, # 解决样本个数不平衡的问题\n random_state=27, # 随机数\n slient=0,\n)\n(\n train_feature_2019,\n train_label_2019,\n test_feature_2020,\n test_label_2020,\n) = transform.get_contrast_data_from_df(df, interval_dict, key_feature)\n\ndraw_boxplot(train_feature_2019, test_feature_2020, \"../figure/箱线对比图.png\")\nbest_model.fit(train_feature_2019, train_label_2019)\nxg_2020_pred = best_model.predict_proba(test_feature_2020)[:, 1]\nxg_2020_evaluation = valid.evaluate(\n test_label_2020, xg_2020_pred, save_path=\"../data/xg(2020)_evaluation.json\"\n)\nplot_evaluation(test_label_2020, xg_2020_pred, \"../figure\", method=\"XG_2020\")\n\n#%%\n# additive learning for xgboost\nimport xgboost as xgb\n\nglimse_index = list(\n np.random.choice(list(test_feature_2020.index), 1000, replace=False)\n)\ntest_index = list(set(test_feature_2020.index) - set(glimse_index))\nparams = best_model.get_xgb_params()\nxg_2020_train = xgb.DMatrix(\n test_feature_2020.loc[glimse_index, :], label=test_label_2020[glimse_index]\n)\nxg_2020_test = xgb.DMatrix(\n test_feature_2020.loc[test_index, :], label=test_label_2020[test_index]\n)\nbest_model.save_model(\"../data/xg_2019.model\")\nadditive_xg = xgb.train(params, xg_2020_train, 5, xgb_model=\"../data/xg_2019.model\")\nadditive_xg_pred = additive_xg.predict(xg_2020_test)\nadditive_xg_evaluation = valid.evaluate(\n test_label_2020[test_index],\n additive_xg_pred,\n save_path=\"../data/additive_xg(2020)_evaluation.json\",\n)\nplot_evaluation(\n test_label_2020[test_index],\n additive_xg_pred,\n \"../figure\",\n method=\"additive_XG_2020\",\n)\n#%%\n\n\n# test p-values\nlr_base = LogisticRegression(C=1e30).fit(train_feature_2019, train_label_2019)\ntf = TransferLogistic(lr_base)\ntf.fit(test_feature_2020.loc[glimse_index, :], test_label_2020[glimse_index])\ntf_2020_pred = tf.predict_proba(test_feature_2020.loc[test_index, :])[:, 1]\ntf_2020_evaluation = evaluate(\n test_label_2020[test_index],\n tf_2020_pred,\n save_path=\"../data/tf(2020)_evaluation.json\",\n)\nplot_evaluation(\n test_label_2020[test_index], tf_2020_pred, \"../figure\", method=\"TF_2020\"\n)\n\n#%%\n\ndraw_pandemic_contrast(\n best_model,\n tf,\n train_feature_2019,\n test_feature_2020.loc[test_index, :],\n \"../figure/all_year_proba_curve.png\",\n)\n\n#%%\n\ndraw_xg_feature_importance(xg, save_path=\"../figure/feature_importance(xgboost).png\")\n# %%\n\nfrom sklearn.cluster import KMeans\nimport numpy as np\nfrom models.ClusterDividePredict import ClusterPredictor\nfrom models import ClusterDividePredict\n\nreload(ClusterDividePredict)\nClusterPredictor = ClusterDividePredict.ClusterPredictor\nkmeans = KMeans(n_clusters=5, random_state=0)\npredict_models = [\n LogisticRegression(\n C=0.05,\n class_weight=None,\n dual=False,\n fit_intercept=True,\n intercept_scaling=1,\n max_iter=100,\n multi_class=\"ovr\",\n n_jobs=1,\n penalty=\"l2\",\n random_state=None,\n solver=\"liblinear\",\n tol=0.0001,\n verbose=0,\n warm_start=False,\n )\n for _ in range(20)\n]\ncluster_predictor = ClusterPredictor(kmeans, predict_models)\ncluster_predictor.fit(\n pd.concat(\n [train_feature_2019, test_feature_2020.loc[glimse_index, :]], ignore_index=True\n ),\n pd.concat([train_label_2019, test_label_2020[glimse_index]], ignore_index=True),\n)\ncp_2020_pred = cluster_predictor.predict_proba(test_feature_2020.loc[test_index, :])[\n :, 1\n]\ncp_2020_evaluation = evaluate(\n test_label_2020[test_index],\n cp_2020_pred,\n save_path=\"../data/cp(2020)_evaluation.json\",\n)\nplot_evaluation(\n test_label_2020[test_index], cp_2020_pred, \"../figure\", method=\"CP_2020\"\n)\n\n# %%\nfrom models.CascadePredict import CascadePredictor\n\nCaP = CascadePredictor(\n [RandomForestClassifier() for i in range(2)], lambda x: x < 0.7, 0.0\n)\nCaP.fit_list(\n [train_feature_2019, test_feature_2020.loc[glimse_index, :]],\n [train_label_2019, test_label_2020[glimse_index]],\n)\ncap_2020_pred = CaP.predict_proba(test_feature_2020.loc[test_index, :])[:,1]\ncap_2020_evaluation = evaluate(\n test_label_2020[test_index],\n cap_2020_pred,\n save_path=\"../data/cap(2020)_evaluation.json\",\n)\nplot_evaluation(\n test_label_2020[test_index], cap_2020_pred, \"../figure\", method=\"CAP_2020\"\n)","repo_name":"SmartDataLab/DefaultPredict","sub_path":"src/model_selection.py","file_name":"model_selection.py","file_ext":"py","file_size_in_byte":10654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32628181609","text":"from django.core.urlresolvers import reverse, resolve\r\nfrom django import template\r\nfrom humblebola.models import *\r\nfrom humblebola import analytics\r\n\r\nregister = template.Library()\r\n\r\n\r\n@register.inclusion_tag('team_nav_bar.html', takes_context=True)\r\ndef team_nav_bar(context):\r\n league = context['league']\r\n current_tournament = context['tournament']\r\n team = context['team']\r\n request = context['request']\r\n games = Game.objects.filter(\r\n league_id=league.id,\r\n schedule__gt=current_tournament.start_date,\r\n schedule__lt=current_tournament.end_date)\r\n\r\n child_tournaments = Tournament.objects.filter(\r\n league_id=league.id,\r\n id__in=PlayerTournamentTeam.objects.filter(\r\n team_id=team.id).distinct('tournament_id').values('tournament_id'))\r\n\r\n parent_tournaments = Tournament.objects.filter(\r\n id__in=Tournament.objects.filter(\r\n league_id=league.id,\r\n id__in=PlayerTournamentTeam.objects.filter(\r\n team_id=team.id).distinct(\r\n 'tournament_id').values(\r\n 'tournament_id')).values_list('parent_id', flat=True))\r\n\r\n team_tournaments = child_tournaments | parent_tournaments\r\n\r\n tournaments_table = []\r\n for tournament in team_tournaments.order_by('start_date', 'id'):\r\n tournaments_table.append({'tournament': tournament})\r\n\r\n # Handle context based previous & next tournament link. Built-in handler\r\n # for PBA so it doesn't include parent tournament.\r\n\r\n if league.id == 1:\r\n try:\r\n previous_tournament = Tournament.objects.filter(\r\n league_id=league.id,\r\n parent_id__isnull=False,\r\n id__lt=current_tournament.id,\r\n ).order_by('-start_date')[0]\r\n\r\n previous_tournament_link = reverse(resolve(\r\n request.path_info).view_name, args=[\r\n league.code,\r\n team.code,\r\n previous_tournament.id])\r\n\r\n except IndexError:\r\n previous_tournament = None\r\n previous_tournament_link = None\r\n try:\r\n next_tournament = Tournament.objects.filter(\r\n league_id=league.id,\r\n parent_id__isnull=False,\r\n id__gt=current_tournament.id,\r\n ).order_by('start_date')[0]\r\n\r\n next_tournament_link = reverse(resolve(\r\n request.path_info).view_name, args=[\r\n league.code,\r\n team.code,\r\n next_tournament.id])\r\n except IndexError:\r\n next_tournament = None\r\n next_tournament_link = None\r\n else:\r\n try:\r\n previous_tournament = Tournament.objects.filter(\r\n league_id=league.id,\r\n id__lt=current_tournament.id,\r\n ).order_by('-start_date')[0]\r\n\r\n previous_tournament_link = reverse(resolve(\r\n request.path_info).view_name, args=[\r\n league.code,\r\n team.code,\r\n previous_tournament.id])\r\n\r\n except IndexError:\r\n previous_tournament = None\r\n previous_tournament_link = None\r\n try:\r\n next_tournament = Tournament.objects.filter(\r\n league_id=league.id,\r\n id__gt=current_tournament.id,\r\n ).order_by('start_date')[0]\r\n\r\n next_tournament_link = reverse(resolve(\r\n request.path_info).view_name, args=[\r\n league.code,\r\n team.code,\r\n next_tournament.id])\r\n\r\n except IndexError:\r\n next_tournament = None\r\n next_tournament_link = None\r\n\r\n record = analytics.get_wins_losses(games, team)\r\n pace = analytics.get_pace(games, team)\r\n ortg = analytics.get_eff_ratings(games.filter(game_type=0), 'off', team)\r\n drtg = analytics.get_eff_ratings(games.filter(game_type=0), 'def', team)\r\n\r\n return {'league': league,\r\n 'team': team,\r\n 'current_tournament': current_tournament,\r\n 'previous_tournament': previous_tournament,\r\n 'next_tournament': next_tournament,\r\n 'previous_tournament_link': previous_tournament_link,\r\n 'next_tournament_link': next_tournament_link,\r\n 'tournaments_table': tournaments_table,\r\n 'record': record,\r\n 'pace': pace,\r\n 'ortg': ortg,\r\n 'drtg': drtg,\r\n }\r\n","repo_name":"nicobaguio/Humblebola-Stats","sub_path":"humblebola/templatetags/team_nav_tags.py","file_name":"team_nav_tags.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30153680197","text":"# https://www.acmicpc.net/problem/10989\n\n# 계수정렬(Counting Sort) : 중복되는 값들의 개수를 계산하여 순차적으로 출력 \n# 메모리 측면에서 우수. 좁은 범위의 숫자들에 대해서는 성능 우월함.\n\nimport sys\n\ninput = sys.stdin.readline\n\nlength = int(input())\nnums = [0] * 10001\n\nfor _ in range(length):\n num = int(input())\n nums[num] += 1\n\nfor num in range(1, 10001):\n if nums[num] != 0:\n for _ in range(nums[num]):\n sys.stdout.write(str(num) + \"\\n\")\n\n# =========================================","repo_name":"eagerithm/algorithms","sub_path":"bugoverdose/sort/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73795047201","text":"import torch\nfrom torch import nn\nimport functools\nfrom torchsummary import summary\n\n\nSN = torch.nn.utils.spectral_norm\n\ndef _get_norm_layer_2d(norm):\n if norm == 'none':\n return nn.Identity\n elif norm == 'batch_norm':\n return nn.BatchNorm2d\n elif norm == 'instance_norm':\n return functools.partial(nn.InstanceNorm2d, affine=True)\n elif norm == 'layer_norm':\n return lambda num_features: nn.GroupNorm(1, num_features)\n else:\n raise NotImplementedError\n\n\nclass Discriminator(nn.Module):\n\n def __init__(self,\n input_channels=3,\n last_kernel_size=4,\n dim=64,\n depth=3,\n norm='batch_norm'):\n\n super(Discriminator, self).__init__()\n\n Norm = _get_norm_layer_2d(norm)\n\n def conv_norm_lrelu(in_dim, out_dim, kernel_size=4, stride=2, padding=1):\n return nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size, stride=stride,\n padding=padding, bias=False),\n Norm(out_dim),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout(0.3)\n )\n\n layers = []\n\n # first layer\n out_dim = dim\n layers.append(nn.Conv2d(input_channels, out_dim,\n kernel_size=4, stride=2, padding=1, bias=False))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n layers.append(nn.Dropout(0.3))\n\n # middle layer\n for _ in range(depth):\n in_dim = out_dim\n out_dim = in_dim * 2\n layers.append(conv_norm_lrelu(in_dim, out_dim))\n\n # last layer\n layers.append(nn.Conv2d(out_dim, 1, kernel_size=last_kernel_size, stride=1, padding=0, bias=False))\n\n self.net = nn.Sequential(*layers)\n\n def forward(self, x):\n y = self.net(x)\n return y\n\nif __name__ == '__main__':\n\n D = Discriminator()\n\n summary(D, input_size=(3, 1, 1))\n\n","repo_name":"yzy1996/generative-latent-optimization-pytorch","sub_path":"models/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15228736929","text":"from requests import get\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport json\ndata={}\ndata[\"IG\"]={}\ndef get_url(url,online=True):\n if online :\n r=get(url)\n if r.status_code==200:\n return r.text\n else:\n return None\n else:\n try:\n f=open(url,\"r\")\n except:\n return None\n else:\n f.read()\n f.close\n\ndef get_links(url):\n t=get_url(url)\n if t!=None:\n b=BeautifulSoup(t,\"lxml\")\n l=b.find_all(\"a\" , class_=\"cover\")\n L=[]\n for e in l: \n L.append(e[\"href\"])\n return L\ndef get_img(url):\n t=get_url(url)\n b=BeautifulSoup(t,\"lxml\")\n l=[]\n c=b.find(\"div\" , class_=\"product\").find(\"img\")\n if c:\n return(c[\"src\"])\ndef all_img(l):\n for i in l:\n get_img(i)\n \ndef get_name(url):\n t=get_url(url)\n b=BeautifulSoup(t,\"lxml\")\n c=b.find(\"div\" , class_=\"title\").find(\"h1\")\n return(c.text)\n \ndef get_genre(url):\n s=0\n t=get_url(url)\n b=BeautifulSoup(t,\"lxml\")\n l=b.find(\"div\" , class_=\"tags\").find(\"a\" , class_=\"tag tag1\")\n d=b.find(\"div\" , class_=\"tags\").find(\"a\" , class_=\"tag tag8\")\n c=b.find(\"div\" , class_=\"tags\").find(\"a\" , class_=\"tag tag32\")\n j=b.find(\"div\" , class_=\"tags\").find(\"a\" , class_=\"tag tag15\")\n if l:\n return(l[\"content\"])\n elif d:\n return(d[\"content\"])\n elif c:\n return(c[\"content\"])\n elif j:\n return(j[\"content\"])\n else:\n s=1\n \ndef get_price(url):\n t=get_url(url)\n b=BeautifulSoup(t,\"lxml\")\n c=b.find(\"div\" , class_=\"prices\").find(\"div\" , class_=\"price\")\n return(c.text)\ndef get_info(url):\n global data\n a=get_name(url)\n b=get_genre(url)\n c=get_price(url)\n j=get_img(url)\n d={}\n if (a!=None and b!=None and c!=None):\n h=\"work\"\n data[\"IG\"][a]={\"Genre\":b,\"Prix\":c,\"Image\":j}\n\ndef get_allinfo(l):\n for i in l:\n get_info(i)\n h=\"instant\" \n n=(json.dumps(data, sort_keys=True, indent=4))\n with open(\"C:\\\\Users\\\\Bouhmid\\\\Desktop\\\\Projet\\\\Try\\\\\"+h+\".txt\",\"a\") as f:\n f.write(n)\n print(n)\ndef down_img(url):\n t=get_url(url)\n b=BeautifulSoup(t,\"lxml\")\n l=[]\n c=b.find(\"div\" , class_=\"product\").find(\"img\")\n temp=c[\"src\"]\n if temp[:1]==\"/\":\n image=\"https://www.instant-gaming.com/en/\" + temp\n else:\n image=temp\n filename=c[\"alt\"]\n imagefile=open(filename + \".jpeg\" , \"wb\")\n ##### J'ai juste utilisé get(img).content pour avoir le contenu de l'image\n imagefile.write(get(image).content)\n imagefile.close()\nh=\"instant\" \nwith open(\"C:\\\\Users\\\\Bouhmid\\\\Desktop\\\\Projet\\\\Try\\\\\"+h+\".txt\",\"a\") as f:\n f.close()\nl=get_links(\"https://www.instant-gaming.com/en/\")\n#print(l)\n#s=all_img(l)\n#print(s)\n#get_allinfo(l)\nget_allinfo(l)\n#down_allimg(l)\n#down_info(\"https://www.instant-gaming.com/en/840-buy-key-gogcom-cyberpunk-2077/\")\n\n","repo_name":"SghaierAhmed1/WebScraping-Django","sub_path":"I_G.py","file_name":"I_G.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72743255521","text":"import math as mt\r\nimport random\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom keras.preprocessing import image\r\nfrom scipy.misc import imsave\r\n\r\nvalImageSize = (2**7, 2**7)\r\nvalNumImage = 1024\r\nvalXScale = [-5, 5]\r\nvalYScale = [[-25, 0], [-20, 10]]\r\n# a, b, c, d , [min, max]\r\nvalParScale = [[8., 12.], [0.1, 0.3], [0.5, 1.2], [3., 7.]]\r\nvalXRes = 50\r\nvalNumRandom = 500000\r\nvalKURPar = [10., 0.2, 0.8, 5.]# a, b, c, d\r\n\r\ndef funKUR(x, parSet=valKURPar, parYScale=valYScale):\r\n a, b, c, d = parSet\r\n [y1Min, y1Max], [y2Min, y2Max] = parYScale\r\n f1 = sum([-a * mt.exp(-b * (x[i]**2 + x[i + 1]**2)**0.5)\r\n for i in range(2)])\r\n f2 = sum([abs(x[i])**c + d * mt.sin(x[i]**3)\r\n for i in range(3)])\r\n f1 = np.clip(f1, y1Min, y1Max)\r\n f2 = np.clip(f2, y2Min, y2Max)\r\n \r\n return f1, f2\r\n\r\ndef funF(parSet=valKURPar, parScale=valImageSize,\r\n parXScale=valXScale, parYScale=valYScale, parRes=valXRes):\r\n xMin, xMax = parXScale\r\n [y1Min, y1Max], [y2Min, y2Max] = parYScale\r\n y1Scl, y2Scl = y1Max - y1Min, y2Max - y2Min\r\n xScale = xMax - xMin\r\n _, sclMax = parScale\r\n sclMin = 0\r\n\r\n f = [[], []]\r\n xSet = []\r\n for i in range(parRes):\r\n for j in range(parRes):\r\n for k in range(parRes):\r\n x1 = xMin + xScale * i / (parRes - 1)\r\n x2 = xMin + xScale * j / (parRes - 1)\r\n x3 = xMin + xScale * k / (parRes - 1)\r\n xSet.append([x1, x2, x3])\r\n for x in xSet:\r\n f1, f2 = funKUR(x, parSet=parSet)\r\n f[0].append(int((f1 - y1Min) * (sclMax - sclMin - 1) / y1Scl))\r\n f[1].append(int((f2 - y2Min) * (sclMax - sclMin - 1) / y2Scl))\r\n return f\r\n\r\ndef funFR(parSet=valKURPar, parScale=valImageSize,\r\n parXScale=valXScale, parYScale=valYScale, parNum=valNumRandom):\r\n a, b, c, d = parSet\r\n xMin, xMax = parXScale\r\n [y1Min, y1Max], [y2Min, y2Max] = parYScale\r\n y1Scl, y2Scl = y1Max - y1Min, y2Max - y2Min\r\n xScale = xMax - xMin\r\n _, sclMax = parScale\r\n sclMin = 0\r\n\r\n parXSet = [[xMin + xScale * np.random.random()\r\n for j in range(3)]\r\n for i in range(parNum)]\r\n f = [[], []]\r\n for i in range(parNum):\r\n x = parXSet[i]\r\n f1, f2 = funKUR(x, parSet=parSet)\r\n f[0].append(int((f1 - y1Min) * (sclMax - sclMin - 1) / y1Scl))\r\n f[1].append(int((f2 - y2Min) * (sclMax - sclMin - 1) / y2Scl))\r\n return f\r\n\r\ndef funPF(parImgX):\r\n parY = np.copy(parImgX)\r\n parPF = []\r\n for i in range(parY.shape[1]):\r\n for j in range(parY.shape[0]):\r\n if parY[parY.shape[0] - j - 1][i] == 0:\r\n parPF.append([i, j])\r\n \r\n parPop = []\r\n for i in range(len(parPF) - 1):\r\n for j in range(i + 1, len(parPF)):\r\n if (parPF[i][0] < parPF[j][0]) and (parPF[i][1] < parPF[j][1]):\r\n parPop.append(j)\r\n if (parPF[i][0] > parPF[j][0]) and (parPF[i][1] > parPF[j][1]):\r\n parPop.append(i)\r\n parPop = set(parPop)\r\n parPop = list(parPop)\r\n parPop.sort()\r\n for i in range(len(parPop)):\r\n parPop[i] -= i\r\n for popIndex in parPop:\r\n parPF.pop(popIndex)\r\n \r\n return parPF\r\n'''\r\nvarParSet = [\r\n [valParScale[j][0] +\r\n (valParScale[j][1] - valParScale[j][0]) * np.random.random()\r\n for j in range(4)\r\n ] for i in range(10)]\r\nfor i in range(3):\r\n print(varParSet[i])\r\n #f = funF(parSet=varParSet[i])\r\n f = funFR(parSet=varParSet[i])\r\n plt.plot(f[0], f[1], 'bo', ms=1)\r\n plt.show()\r\nplt.close()\r\n'''\r\n#\"\"\"\r\nvarParSet = []\r\nvarParSet = [\r\n [random.uniform(valParScale[j][0], valParScale[j][1])\r\n for j in range(4)]\r\n for i in range(valNumImage)]\r\nprint('Parameters sets done.')\r\nvarParSet.append(valKURPar)\r\n\r\nvalImageIndex = [i for i in range(valNumImage)]\r\nrandom.shuffle(valImageIndex)\r\nfor i in range(valNumImage + 1):\r\n if i % 10 == 0:\r\n timStart = time.time()\r\n\r\n f = [[], []]\r\n f12 = []\r\n imgX = 255 * np.ones(valImageSize, dtype='uint8')\r\n imgY = 255 * np.ones(valImageSize, dtype='uint8')\r\n #f = funFR(parSet=varParSet[i])\r\n f = funF(parSet=varParSet[i])\r\n for F in zip(f[0], f[1]):\r\n imgX[valImageSize[1] - 1 - F[1]][F[0]] = 0\r\n f12 = funPF(imgX)\r\n for xPF in f12:\r\n imgY[valImageSize[1] - 1 - xPF[1]][xPF[0]] = 0\r\n '''\r\n plt.subplot(121)\r\n plt.imshow(imgX, cmap='Greys_r')\r\n plt.subplot(122)\r\n plt.imshow(imgY, cmap='Greys_r')\r\n plt.show()\r\n plt.close()\r\n #'''\r\n #'''\r\n if i == valNumImage:\r\n fnameX = './datSet/KUR/datX/' + 'imgX_KUR.bmp'\r\n fnameY = './datSet/KUR/datY/' + 'imgY_KUR.bmp'\r\n else:\r\n fnameX = './datSet/KUR/datX/' + 'imgX_{}.bmp'.format(valImageIndex[i])\r\n fnameY = './datSet/KUR/datY/' + 'imgY_{}.bmp'.format(valImageIndex[i])\r\n imsave(fnameX, imgX)\r\n imsave(fnameY, imgY)\r\n #'''\r\n if i % 10 == 9:\r\n timEnd = time.time()\r\n print('Generated Samples: {}, Used {}s.'.format(\r\n i + 1, timEnd - timStart))\r\nprint('GENERATED DONE.')\r\n#\"\"\"\r\n","repo_name":"lrlgogo/DCNN-for-PF-of-MOOP","sub_path":"funKUR.py","file_name":"funKUR.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"3623737992","text":"import os\nimport random\nimport uuid\n\nfrom azure.storage.filedatalake import (\n DataLakeServiceClient,\n)\n\n\ndef access_control_sample(filesystem_client):\n # create a parent directory\n dir_name = \"testdir\"\n print(\"Creating a directory named '{}'.\".format(dir_name))\n directory_client = filesystem_client.create_directory(dir_name)\n\n # populate the directory with some child files\n create_child_files(directory_client, 35)\n\n # get and display the permissions of the parent directory\n acl_props = directory_client.get_access_control()\n print(\"Permissions of directory '{}' are {}.\".format(dir_name, acl_props['permissions']))\n\n # set the permissions of the parent directory\n new_dir_permissions = 'rwx------'\n directory_client.set_access_control(permissions=new_dir_permissions)\n\n # get and display the permissions of the parent directory again\n acl_props = directory_client.get_access_control()\n print(\"New permissions of directory '{}' are {}.\".format(dir_name, acl_props['permissions']))\n\n # iterate through every file and set their permissions to match the directory\n for file in filesystem_client.get_paths(dir_name):\n file_client = filesystem_client.get_file_client(file.name)\n\n # get the access control properties of the file\n acl_props = file_client.get_access_control()\n\n if acl_props['permissions'] != new_dir_permissions:\n file_client.set_access_control(permissions=new_dir_permissions)\n print(\"Set the permissions of file '{}' to {}.\".format(file.name, new_dir_permissions))\n else:\n print(\"Permission for file '{}' already matches the parent.\".format(file.name))\n\n\ndef create_child_files(directory_client, num_child_files):\n import concurrent.futures\n import itertools\n # Use a thread pool because it is too slow otherwise\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n def create_file():\n # generate a random name\n file_name = str(uuid.uuid4()).replace('-', '')\n directory_client.get_file_client(file_name).create_file()\n\n futures = {executor.submit(create_file) for _ in itertools.repeat(None, num_child_files)}\n concurrent.futures.wait(futures)\n print(\"Created {} files under the directory '{}'.\".format(num_child_files, directory_client.path_name))\n\n\ndef run():\n account_name = os.getenv('STORAGE_ACCOUNT_NAME', \"\")\n account_key = os.getenv('STORAGE_ACCOUNT_KEY', \"\")\n\n # set up the service client with the credentials from the environment variables\n service_client = DataLakeServiceClient(account_url=\"{}://{}.dfs.core.windows.net\".format(\n \"https\",\n account_name\n ), credential=account_key)\n\n # generate a random name for testing purpose\n fs_name = \"testfs{}\".format(random.randint(1, 1000))\n print(\"Generating a test filesystem named '{}'.\".format(fs_name))\n\n # create the filesystem\n filesystem_client = service_client.create_file_system(file_system=fs_name)\n\n # invoke the sample code\n try:\n access_control_sample(filesystem_client)\n finally:\n # clean up the demo filesystem\n filesystem_client.delete_file_system()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/storage/azure-storage-file-datalake/samples/datalake_samples_access_control.py","file_name":"datalake_samples_access_control.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"16200706622","text":"import json\nimport numpy as np\nimport os\n\nfrom PyQt5 import QtWidgets, QtCore\nfrom twisted.internet.defer import inlineCallbacks\n\nfrom client_tools.widgets import ClickableLabel, SuperSpinBox\n\nclass DIM3000_RFClient(QtWidgets.QGroupBox):\n name = None\n DeviceProxy = None\n updateID = np.random.randint(0, 2**31 - 1)\n amplitudeDisplayUnits = [(0, 'dBm')]\n amplitudeDigits = None\n frequencyDisplayUnits = [(-6, 'uHz'), (-3, 'mHz'), (0, 'Hz'), (3, 'kHz'), \n (6, 'MHz'), (9, 'GHz')]\n frequencyDigits = None\n fmfreqDisplayUnits = [(-6, 'uHz/V'), (-3, 'mHz/V'), (0, 'Hz/V'), (3, 'kHz/V'), \n (6, 'MHz/V'), (9, 'GHz/V')]\n fmfreqDigits = None\n spinboxWidth = 80\n \n def __init__(self, reactor, cxn=None):\n QtWidgets.QDialog.__init__(self)\n self.reactor = reactor\n reactor.callInThread(self.initialize)\n self.connectLabrad()\n \n def initialize(self):\n import labrad\n cxn = labrad.connect(name=self.name, host=os.getenv('LABRADHOST') , password = '')\n self.device = self.DeviceProxy(cxn)\n self.reactor.callFromThread(self.populateGUI)\n self.fm_dev = self.device.fm_dev\n\n def populateGUI(self):\n self.nameLabel = ClickableLabel('' + self.name + '')\n self.stateButton = QtWidgets.QPushButton()\n self.stateButton.setCheckable(True)\n \n self.frequencyLabel = ClickableLabel('Frequency: ')\n self.frequencyBox = SuperSpinBox(self.device._frequency_range, \n self.frequencyDisplayUnits, \n self.frequencyDigits)\n self.frequencyBox.setFixedWidth(self.spinboxWidth)\n \n self.amplitudeLabel = ClickableLabel('Amplitude: ')\n self.amplitudeBox = SuperSpinBox(self.device._amplitude_range, \n self.amplitudeDisplayUnits, \n self.amplitudeDigits)\n self.amplitudeBox.setFixedWidth(self.spinboxWidth)\n \n self.fmstateButton = QtWidgets.QPushButton()\n self.fmfreqBox = SuperSpinBox(self.device._fmfreq_range,\n self.fmfreqDisplayUnits,\n self.fmfreqDigits)\n self.fmfreqBox.setFixedWidth(self.spinboxWidth)\n self.fmfreqBox.setReadOnly(True)\n \n self.fmstateButton.setCheckable(True)\n self.fmdevLabel = ClickableLabel('FM Dev: ')\n self.fmdevBox = QtWidgets.QDoubleSpinBox()\n self.fmdevBox.setKeyboardTracking(False)\n self.fmdevBox.setRange(*self.device._fmdev_range)\n# self.fmdevBox.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)\n self.fmdevBox.setDecimals(0)\n \n self.layout = QtWidgets.QGridLayout() \n self.layout.addWidget(self.nameLabel, 0, 0, 1, 1, \n QtCore.Qt.AlignRight)\n self.layout.addWidget(self.stateButton, 0, 1)\n self.layout.addWidget(self.frequencyLabel, 1, 0, 1, 1, \n QtCore.Qt.AlignRight)\n self.layout.addWidget(self.frequencyBox, 1, 1)\n self.layout.addWidget(self.amplitudeLabel, 2, 0, 1, 1, \n QtCore.Qt.AlignRight)\n self.layout.addWidget(self.amplitudeBox, 2, 1)\n \n self.layout.addWidget(self.fmstateButton, 3, 0, 1, 1,\n QtCore.Qt.AlignRight)\n self.layout.addWidget(self.fmfreqBox, 3, 1)\n self.layout.addWidget(self.fmdevLabel, 4, 0, 1, 1,\n QtCore.Qt.AlignRight)\n self.layout.addWidget(self.fmdevBox, 4, 1)\n \n self.setLayout(self.layout)\n\n self.setWindowTitle(self.name)\n self.setFixedSize(120 + self.spinboxWidth, 180)\n \n self.connectSignals()\n self.reactor.callInThread(self.getAll)\n \n def getAll(self):\n self.getRFState()\n self.getFMState()\n self.getFrequency()\n self.getAmplitude()\n self.getFMdev()\n self.getFMfreq()\n \n def getFMdev(self):\n fm_dev = self.fm_dev\n self.reactor.callFromThread(self.displayFMdev, fm_dev)\n \n def displayFMdev(self, fm_dev):\n self.fmdevBox.setValue(fm_dev)\n \n def getFMfreq(self):\n fm_freq = 3200*2**int(self.fm_dev)\n self.reactor.callFromThread(self.displayFMfreq, fm_freq)\n \n def displayFMfreq(self, fm_freq):\n self.fmfreqBox.display(fm_freq)\n \n def getRFState(self):\n rf_state = self.device.state\n self.reactor.callFromThread(self.displayRFState, rf_state)\n\n def displayRFState(self, rf_state):\n if rf_state:\n self.stateButton.setChecked(1)\n self.stateButton.setText('RF ON')\n else:\n self.stateButton.setChecked(0)\n self.stateButton.setText('RF OFF')\n \n def getFMState(self):\n fm_state = self.device.fm\n self.reactor.callFromThread(self.displayFMState, fm_state)\n\n def displayFMState(self, fm_state):\n if fm_state:\n self.fmstateButton.setChecked(1)\n self.fmstateButton.setText('FM ON')\n else:\n self.fmstateButton.setChecked(0)\n self.fmstateButton.setText('FM OFF')\n \n def getFrequency(self):\n frequency = self.device.frequency\n self.reactor.callFromThread(self.displayFrequency, frequency)\n\n def displayFrequency(self, frequency):\n self.frequencyBox.display(frequency)\n \n def getAmplitude(self):\n amplitude = self.device.amplitude\n self.reactor.callFromThread(self.displayAmplitude, amplitude)\n\n def displayAmplitude(self, amplitude):\n self.amplitudeBox.display(amplitude)\n\n def connectSignals(self):\n self.nameLabel.clicked.connect(self.onNameLabelClick)\n self.frequencyLabel.clicked.connect(self.onFrequencyLabelClick)\n self.amplitudeLabel.clicked.connect(self.onAmplitudeLabelClick)\n \n self.stateButton.released.connect(self.onNewRFState)\n self.fmstateButton.released.connect(self.onNewFMState)\n \n self.frequencyBox.returnPressed.connect(self.onNewFrequency)\n self.amplitudeBox.returnPressed.connect(self.onNewAmplitude)\n \n self.fmdevBox.valueChanged.connect(self.onNewFMDev)\n \n def onNameLabelClick(self):\n self.reactor.callInThread(self.getAll)\n \n def onFrequencyLabelClick(self):\n self.reactor.callInThread(self.getFrequency)\n \n def onAmplitudeLabelClick(self):\n self.reactor.callInThread(self.getAmplitude)\n \n def onNewRFState(self):\n rf_state = self.stateButton.isChecked()\n self.reactor.callInThread(self.setRFState, rf_state)\n \n def setRFState(self, rf_state):\n self.device.state = rf_state\n self.reactor.callFromThread(self.displayRFState, rf_state)\n\n def onNewFMState(self):\n fm_state = self.fmstateButton.isChecked()\n self.reactor.callInThread(self.setFMState, fm_state)\n \n def setFMState(self, fm_state):\n self.device.fm = fm_state\n self.reactor.callFromThread(self.displayFMState, fm_state)\n \n def onNewFMDev(self):\n self.fm_dev = self.fmdevBox.value()\n fm_dev = self.fm_dev\n self.reactor.callInThread(self.setFMDev, fm_dev)\n \n def setFMDev(self, fm_dev):\n self.device.set_fm_dev(fm_dev)\n self.getFMdev()\n self.getFMfreq()\n \n def onNewFrequency(self):\n frequency = self.frequencyBox.value()\n self.reactor.callInThread(self.setFrequency, frequency)\n\n def setFrequency(self, frequency):\n self.device.frequency = frequency\n self.reactor.callFromThread(self.displayFrequency, frequency)\n \n def onNewAmplitude(self):\n amplitude = self.amplitudeBox.value()\n self.reactor.callInThread(self.setAmplitude, amplitude)\n\n def setAmplitude(self, amplitude):\n self.device.amplitude = amplitude\n self.reactor.callFromThread(self.displayAmplitude, amplitude)\n \n @inlineCallbacks\n def connectLabrad(self):\n from labrad.wrappers import connectAsync\n self.cxn = yield connectAsync(name=self.name, host=os.getenv('LABRADHOST'), password='')\n yield self.cxn.update.signal__signal(self.updateID)\n yield self.cxn.update.addListener(listener=self.receiveUpdate, source=None, \n ID=self.updateID)\n yield self.cxn.update.register(self.name)\n \n def receiveUpdate(self, c, updateJson):\n update = json.loads(updateJson)\n state = update.get('state')\n if state is not None:\n self.displayState(state)\n frequency = update.get('frequency')\n if frequency is not None:\n self.displayFrequency(frequency)\n amplitude = update.get('amplitude')\n if amplitude is not None:\n self.displayAmplitude(amplitude)\n \n def closeEvent(self, x):\n self.reactor.stop()\n\nclass MultipleClientContainer(QtWidgets.QWidget):\n name = None\n def __init__(self, client_list, reactor):\n QtWidgets.QDialog.__init__(self)\n self.client_list = client_list\n self.reactor = reactor\n self.populateGUI()\n \n def populateGUI(self):\n self.layout = QtWidgets.QHBoxLayout()\n for client in self.client_list:\n self.layout.addWidget(client)\n self.setFixedSize(240 * len(self.client_list), 220)\n self.setWindowTitle(self.name)\n self.setLayout(self.layout)\n\n def closeEvent(self, x):\n super(MultipleClientContainer, self).closeEvent(x)\n self.reactor.stop()\n\nif __name__ == '__main__':\n from rf2.devices.dim3000reda import DIM3000REDAProxy\n from rf2.devices.dim3000blue2 import DIM3000BLUE2Proxy\n\n class DIM3000REDAClient(DIM3000_RFClient):\n name = 'RED_A'\n DeviceProxy = DIM3000REDAProxy\n \n frequencyDigits = 6\n amplitudeDigits = 2\n fmfreqDigits = 1\n \n class DIM3000BLUE2Client(DIM3000_RFClient):\n name = 'BLUE_2'\n DeviceProxy = DIM3000BLUE2Proxy\n \n frequencyDigits = 6\n amplitudeDigits = 2\n fmfreqDigits = 1\n \n from PyQt5 import QtWidgets\n app = QtWidgets.QApplication([])\n from client_tools import qt5reactor \n qt5reactor.install()\n from twisted.internet import reactor\n\n widgets = [\n DIM3000REDAClient(reactor),\n DIM3000BLUE2Client(reactor),\n ]\n \n widget = MultipleClientContainer(widgets, reactor)\n widget.show()\n reactor.suggestThreadPoolSize(30)\n reactor.run()\n","repo_name":"jameszheng1990/LabRad_Strontium_A","sub_path":"rf2/clients/dim3000_client.py","file_name":"dim3000_client.py","file_ext":"py","file_size_in_byte":10719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36184842993","text":"#Script to test the decision tree evaluation process\nimport numpy as np\nfrom dqn import DQN\nfrom dec_process import look_ahead\nfrom exceptions import GetStateError\nfrom fireplace import cards\nfrom fireplace.exceptions import GameOver\nfrom interface import *\nfrom fireplace.utils import play_turn\n\n\n\n\n\ndef main():\n\n #Set up the network for the first time\n features = 263\n h1 = 50\n h2 = 50\n\n dqn = DQN(features, h1, h2, \"models/test_1\")\n\n #Initialize the game\n initialize()\n game = setup_game()\n ai_player = game.current_player\n\n try:\n while True:\n if game.current_player == ai_player:\n action_choice = look_ahead(game, dqn)\n print(\"Action chosen was: \", action_choice)\n import pdb; pdb.set_trace()\n perform_action(action_choice, ai_player, game)\n else:\n actions = get_actions(game.current_player)\n index = random.randint(0, len(actions)-1)\n perform_action(actions[index], game.current_player, game)\n\n\n except GetStateError:\n print(\"Error with get_state function\")\n except GameOver:\n print(\"Game ended normally\")\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"dillondaudert/Hearthstone-AI","sub_path":"src/dec_test.py","file_name":"dec_test.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"13805955706","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport sys\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QMessageBox\nfrom PyQt5.QtCore import QCoreApplication\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.title = \"PyQt5 Window :)\"\n self.top = 100\n self.left = 100\n self.width = 680\n self.height = 500\n\n self.InitWindow()\n\n def InitWindow(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.top, self.left, self.width, self.height)\n self.setWindowIcon(QtGui.QIcon(\"icon.png\"))\n\n btn = QPushButton(\"Close\", self)\n btn.move(50, 50)\n btn.setToolTip(\"close the app\")\n btn.clicked.connect(self.CloseApp)\n\n self.show()\n\n def CloseApp(self):\n reply = QMessageBox.question(self, \"Close\", \"Do you want to quit?\",\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n self.close()\n\n\ndef main():\n App = QApplication(sys.argv)\n window = Window()\n sys.exit(App.exec())\n\n##############################################################################\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jabbalaci/GUI-Dev-with-Python","sub_path":"01_examples/close.py","file_name":"close.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70825906403","text":"from neunet.autograd import Tensor\r\nimport numpy as np\r\n\r\n\r\n\r\nclass _AvgPool2dTensor(Tensor):\r\n def __init__(self, data, args, op):\r\n super().__init__(data, args, op)\r\n\r\n def backward(self, grad):\r\n (\r\n X,\r\n kernel_size,\r\n stride,\r\n padding,\r\n input_size,\r\n output_size,\r\n windows,\r\n ) = self.args\r\n \r\n batch_size, in_channels, in_height, in_width = input_size\r\n\r\n grad_X = np.zeros((batch_size, in_channels, in_height + 2 * padding[0], in_width + 2 * padding[1]))\r\n for i in range(output_size[0]):\r\n for j in range(output_size[1]):\r\n grad_X[:, :, i*stride[0]:i*stride[0]+kernel_size[0], j*stride[1]:j*stride[1]+kernel_size[1]] += grad[:, :, i, j, None, None]/(kernel_size[0] * kernel_size[1])\r\n\r\n grad_X = remove_padding(grad_X, padding)\r\n\r\n X.backward(grad_X)\r\n\r\n\r\n\r\nclass AvgPool2d:\r\n \r\n def __init__(self, kernel_size, stride=None, padding=0):\r\n self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)\r\n self.stride = stride if isinstance(stride, tuple) else (stride, stride) if stride else self.kernel_size\r\n self.padding = padding if isinstance(padding, tuple) else (padding, padding)\r\n\r\n self.input_size = None\r\n\r\n def build(self):\r\n self.kernel_height, self.kernel_width = self.kernel_size\r\n self.input_height, self.input_width = self.input_size[2:]\r\n\r\n if self.padding == \"valid\":\r\n self.padding == (0, 0, 0, 0)\r\n elif self.padding == \"same\" or self.padding == \"real same\":\r\n if self.padding == \"same\":\r\n padding_up_down = self.dilation[0] * (self.kernel_height - 1) - self.stride[0] + 1 \r\n padding_left_right = self.dilation[1] * (self.kernel_width - 1) - self.stride[1] + 1\r\n elif self.padding == \"real same\":\r\n padding_up_down = (self.stride[0] - 1) * (self.input_height - 1) + self.dilation[0] * (self.kernel_height - 1)\r\n padding_left_right = (self.stride[1] - 1) * (self.input_width- 1) + self.dilation[1] * (self.kernel_width - 1)\r\n\r\n if padding_up_down % 2 == 0:\r\n padding_up, padding_down = padding_up_down // 2, padding_up_down // 2\r\n else:\r\n padding_up, padding_down = padding_up_down // 2, padding_up_down - padding_up_down // 2\r\n\r\n if padding_left_right % 2 == 0:\r\n padding_left, padding_right = padding_left_right // 2, padding_left_right // 2\r\n else:\r\n padding_left, padding_right = padding_left_right // 2, padding_left_right - padding_left_right // 2\r\n \r\n\r\n self.padding = (abs(padding_up), abs(padding_down), abs(padding_left), abs(padding_right))\r\n\r\n elif len(self.padding) == 2:\r\n self.padding = (self.padding[0], self.padding[0], self.padding[1], self.padding[1]) #(up, down, left, right) padding ≃ (2 * vertical, 2 *horizontal) padding\r\n\r\n\r\n self.output_height = (self.input_height + self.padding[0] + self.padding[1] - self.kernel_size[0]) // self.stride[0] + 1\r\n self.output_width = (self.input_width + self.padding[2] + self.padding[3] - self.kernel_size[1]) // self.stride[1] + 1\r\n self.output_size = (self.output_height, self.output_width)\r\n\r\n # self.stride_compared_input_height = (self.output_height - 1) * self.stride[0] - self.padding[0] - self.padding[1] + self.dilated_kernel_height\r\n # self.stride_compared_input_width = (self.output_width - 1) * self.stride[1] - self.padding[2] - self.padding[3] + self.dilated_kernel_width\r\n \r\n # self.prepared_input_height = (self.stride_compared_input_height + self.padding[0] + self.padding[1])\r\n # self.prepared_input_width = (self.stride_compared_input_width + self.padding[2] + self.padding[3])\r\n\r\n # self.kernel = set_dilation_stride(np.ones((self.kernel_height, self.kernel_width)), self.dilation, value = np.nan)\r\n\r\n\r\n \r\n \r\n def forward(self, X):\r\n if self.input_size == None:\r\n self.input_size = X.shape\r\n self.build()\r\n \r\n X_data = set_padding(X.data, self.padding)\r\n \r\n batch_size, in_channels, in_height, in_width = X_data.shape\r\n \r\n batch_str, channel_str, kern_h_str, kern_w_str = X_data.strides\r\n windows = np.lib.stride_tricks.as_strided(\r\n X_data,\r\n (batch_size, in_channels, self.output_size[0], self.output_size[1], self.kernel_size[0], self.kernel_size[1]),\r\n (batch_str, channel_str, self.stride[0] * kern_h_str, self.stride[1] * kern_w_str, kern_h_str, kern_w_str)\r\n )\r\n\r\n O = np.mean(windows, axis=(4, 5))\r\n\r\n return _AvgPool2dTensor(O, [X, self.kernel_size, self.stride, self.padding, self.input_size, self.output_size, \r\n windows], \"maxpool2d\")\r\n\r\n\r\n def __call__(self, X):\r\n return self.forward(X)\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\ndef set_padding(layer, padding):\r\n padded_layer = np.zeros(\r\n ( \r\n layer.shape[0],\r\n layer.shape[1],\r\n layer.shape[2] + padding[0] + padding[1],\r\n layer.shape[3] + padding[2] + padding[3],\r\n ),\r\n )\r\n\r\n padded_layer[\r\n :,\r\n :,\r\n padding[0] :padded_layer.shape[2] - padding[1],\r\n padding[2] :padded_layer.shape[3] - padding[3],\r\n ] = layer\r\n\r\n return padded_layer\r\n\r\n\r\ndef remove_padding(layer, padding):\r\n unpadded_layer = np.zeros(\r\n (\r\n layer.shape[0],\r\n layer.shape[1],\r\n layer.shape[2] - padding[0] - padding[1],\r\n layer.shape[3] - padding[2] - padding[3],\r\n )\r\n )\r\n \r\n unpadded_layer = layer[\r\n :,\r\n :,\r\n padding[0] :layer.shape[2] - padding[1],\r\n padding[2] :layer.shape[3] - padding[3],\r\n ]\r\n\r\n return unpadded_layer\r\n\r\n\r\n\r\n\r\n\r\n# class Maxpool():\r\n\r\n# def __init__(self, kernel_size, stride, padding, dilation=1): # dilation is not supported yet\r\n# self.kernel_size = kernel_size\r\n# self.stride = stride\r\n# self.padding = padding\r\n# self.dilation = dilation\r\n \r\n\r\n# def forward(self, x):\r\n# self.x = x\r\n# self.n, self.c, self.h, self.w = x.shape\r\n# self.h_out = (self.h + 2*self.padding - self.dilation*(self.kernel_size-1) - 1)//self.stride + 1\r\n# self.w_out = (self.w + 2*self.padding - self.dilation*(self.kernel_size-1) - 1)//self.stride + 1\r\n# self.x_col = self.im2col(x)\r\n# self.x_col = self.x_col.reshape(-1, self.kernel_size*self.kernel_size)\r\n# self.out = np.max(self.x_col, axis=1)\r\n# self.out = self.out.reshape(self.n, self.c, self.h_out, self.w_out)\r\n# return self.out\r\n\r\n# def backward(self, dout):\r\n# dout = dout.reshape(-1, 1)\r\n# self.dx_col = np.zeros_like(self.x_col)\r\n# self.dx_col[np.arange(self.dx_col.shape[0]), np.argmax(self.x_col, axis=1)] = dout.reshape(-1)\r\n# self.dx_col = self.dx_col.reshape(self.n, self.c, self.h_out, self.w_out, self.kernel_size, self.kernel_size)\r\n# self.dx = self.col2im(self.dx_col)\r\n# return self.dx\r\n\r\n# def im2col(self, x):\r\n# x = np.pad(x, ((0, 0), (0, 0), (self.padding, self.padding), (self.padding, self.padding)), 'constant')\r\n# x_col = np.zeros((self.n, self.c, self.h_out, self.w_out, self.kernel_size, self.kernel_size))\r\n# for i in range(self.h_out):\r\n# for j in range(self.w_out):\r\n# x_col[:, :, i, j, :, :] = x[:, :, i*self.stride:i*self.stride+self.kernel_size, j*self.stride:j*self.stride+self.kernel_size]\r\n# x_col = x_col.reshape(-1, self.kernel_size*self.kernel_size)\r\n# return x_col\r\n\r\n# def col2im(self, x_col):\r\n# x_col = x_col.reshape(self.n, self.c, self.h_out, self.w_out, self.kernel_size, self.kernel_size)\r\n# x = np.zeros((self.n, self.c, self.h + 2*self.padding, self.w + 2*self.padding))\r\n# for i in range(self.h_out):\r\n# for j in range(self.w_out):\r\n# x[:, :, i*self.stride:i*self.stride+self.kernel_size, j*self.stride:j*self.stride+self.kernel_size] += x_col[:, :, i, j, :, :]\r\n# return x[:, :, self.padding:self.padding+self.h, self.padding:self.padding+self.w]\r\n\r\n# x = np.random.randn(10, 4, 28, 28)\r\n# layer = Maxpool(2, 3, 0)\r\n# myy = layer.forward(x)\r\n# print(myy.shape)\r\n# print(myy)\r\n\r\n# mydx = layer.backward(np.ones_like(myy))\r\n# print(layer.dx.shape)\r\n# # print(layer.dx)\r\n\r\n# from torch.nn import MaxPool2d\r\n# import torch\r\n\r\n# x = torch.tensor(x, requires_grad=True, dtype=torch.float32)\r\n# layer = MaxPool2d(2, 3, 0)\r\n\r\n# y = layer(x)\r\n# y.backward(torch.ones_like(y))\r\n# print(x.grad)\r\n# print(x.grad.shape)\r\n\r\n# print(np.allclose(y.data, myy.data))\r\n# print(np.allclose(x.grad, mydx))","repo_name":"AkiRusProd/numpy-nn-model","sub_path":"neunet/nn/layers/avgpool2d.py","file_name":"avgpool2d.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"54"} +{"seq_id":"21046273503","text":"from typing import List\n\nfrom pydantic import BaseModel\n\n\nclass Quarter(BaseModel):\n subject_code: str\n course_number: str\n instr_type: str\n instr_method: str\n section: str\n crn: int\n course_title: str\n meet_time: List[str]\n instructor: str\n\n class Config:\n orm_mode = True\n","repo_name":"AoWangPhilly/Term-Master-Schedule-API","sub_path":"app/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72011450081","text":"import numpy as np\nimport OandaControl\n\nclass LimitOrderControl:\n def __init__(self) -> None:\n self.OandaIns = OandaControl.OandaControl()\n self.threshold = 0.21\n\n def LimitPriceClac(self):\n #OrderBookを取得\n OrderBook = self.OandaIns.OrderBook()\n \n #現在の為替の中値を取得\n NowPrice = self.OandaIns.NowRate()['ltp']\n #0.05の倍数で丸め込み\n NowPrice = round(NowPrice * 20) / 20\n\n #現在価格を中心に±1と現在の値を0埋めして文字型に\n under = str(NowPrice - 1).ljust(7, '0')\n over = str(NowPrice + 1).ljust(7, '0')\n NowPrice = str(NowPrice).ljust(7, '0')\n\n #underとoverが位置するインデックスを取得\n under = np.where(OrderBook[:, 0]==under)[0][0]\n over = np.where(OrderBook[:, 0]==over)[0][0]\n NowPrice = np.where(OrderBook[:, 0]==NowPrice)[0][0]\n\n #orderbookを切り出して数値型に変換\n LongPrice = OrderBook[under:NowPrice, 0]\n ShortPrice = OrderBook[NowPrice:over, 0]\n LongNet = OrderBook[under:NowPrice, 1].astype(np.float32) - OrderBook[under:NowPrice, 2].astype(np.float32)\n ShortNet = OrderBook[NowPrice:over, 1].astype(np.float32) - OrderBook[NowPrice:over, 2].astype(np.float32)\n\n #価格-純額で2次元リストを作成 \n short = [[ShortPrice[i], ShortNet[i]] for i in range(len(ShortNet))]\n long = [[LongPrice[i], LongNet[i]] for i in range(len(LongNet))][::-1]\n del ShortPrice, LongPrice, ShortNet, LongNet\n\n #short値反転を検出\n #反転の次の値がスタートになるので注意\n shortStart = longStart = 0\n for i in range(len(short)):\n #ショートの場合buyが多い場合検出\n if short[i][1] > 0:\n shortStart = i + 1\n \n #ロングの場合はsellが多い場合検出\n if long[i][1] < 0:\n longStart = i + 1\n \n #結果を格納する変数を作成\n result = {}\n\n #反転の値からスタートしてオーダーが閾より多い部分を切り出す\n for i in range(shortStart, len(short)):\n if self.threshold < abs(short[i][1]):\n result['short'] = short[i][0]\n break\n\n for i in range(longStart, len(long)):\n if self.threshold < long[i][1]:\n result['long'] = long[i][0]\n break\n \n return result","repo_name":"UtsumiYoji/oandaFx","sub_path":"LimitOrderControl.py","file_name":"LimitOrderControl.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"132715235","text":"import config\nimport requests\nimport csv\n\nheaders = {'Authorization' : 'Bearer ' + '%s' % config.API_KEY}\n\ndef main():\n course_id = 109395\n\n #payload = {'term_id[]': term_id}\n\n url = config.API_URL + 'api/v1/' + 'courses/'+ str(course_id)\n print(url)\n results = requests.get(url, headers = headers)\n print(\"The call returned \\\"\" + str(results.status_code) + \"\\\"\")\n\n response = results.text\n print(response)\n\nif __name__ == \"__main__\":\n main()","repo_name":"maespey/EIT_Canvas_Tools","sub_path":"get_course.py","file_name":"get_course.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"}