content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
import os
import argparse
import re
import lhtmlLib
meta_default = {
'wrap-auto': False,
'add_title_id': False,
'title': 'Webpage',
'css': [],
'js': [],
'wrap-custom-pre': '',
'wrap-custom-post': '',
'directory_include': [os.getcwd()+'/'],
}
if __name__== '__main__':
parser = argparse.ArgumentParser(description='Lightweight HTML')
parser.add_argument('inputFile', help='Input filepath')
parser.add_argument('-w','--wrapAuto', help='Wrap content in basic HTML template', action='store_true')
parser.add_argument('-o','--outputFile', help='Output filepath')
args = parser.parse_args()
meta_main = {}
if args.wrapAuto==True:
meta_main['wrap-auto']=True
meta_main['directory_include'] = [os.getcwd()+'/']
f_in = args.inputFile
if os.path.isfile(f_in):
dir_to_include = os.path.dirname(f_in)
if len(dir_to_include)>0:
meta_main['directory_include'].append(os.getcwd()+'/'+dir_to_include+'/')
with open(f_in) as fid_in:
txt_in = fid_in.read()
if txt_in[-1]!='\n':
txt_in = txt_in+'\n'
html = run(txt_in, meta_main)
if html[-1]!='\n':
html = html+'\n'
if args.outputFile:
with open(args.outputFile,'w') as f_out:
f_out.write(html)
else:
print(html)
else:
print(f"Error: unrecognized input file [{f_in}]") | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
302,
198,
198,
11748,
300,
6494,
25835,
628,
220,
220,
198,
28961,
62,
12286,
796,
1391,
198,
220,
705,
37150,
12,
23736,
10... | 2.26461 | 616 |
import random
import re
from PIL import Image
from nonebot import on_command, on_message, on_notice, require, get_driver, on_regex
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import Message, Event, Bot
from src.libraries.maimaidx.image import *
from random import randint
from src.libraries.dbutil import has_group, get_group_uim
help = on_command('help')
@help.handle()
poke = on_notice(rule=_group_poke, priority=10, block=True)
@poke.handle()
| [
11748,
4738,
198,
11748,
302,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
4844,
13645,
1330,
319,
62,
21812,
11,
319,
62,
20500,
11,
319,
62,
42138,
11,
2421,
11,
651,
62,
26230,
11,
319,
62,
260,
25636,
198,
6738,
4844,
13645... | 3.018987 | 158 |
import sys
from typing import Any, Dict
WIN = sys.platform.startswith("win")
# Versions of all packages possibly used in our tests
# Only apply _exe_if_win to entry_points, NOT scripts
PKG: Dict[str, Dict[str, Any]] = {
"ansible": {
"spec": "ansible==2.9.13",
"apps": [
"ansible",
"ansible-config",
"ansible-connection",
"ansible-console",
"ansible-doc",
"ansible-galaxy",
"ansible-inventory",
"ansible-playbook",
"ansible-pull",
"ansible-test",
"ansible-vault",
],
"apps_of_dependencies": [],
},
"awscli": {
"spec": "awscli==1.18.168",
"apps": [
"aws",
"aws.cmd",
"aws_bash_completer",
"aws_completer",
"aws_zsh_completer.sh",
],
"apps_of_dependencies": _exe_if_win(
[
"pyrsa-decrypt", # rsa EXE
"pyrsa-encrypt", # rsa EXE
"pyrsa-keygen", # rsa EXE
"pyrsa-priv2pub", # rsa EXE
"pyrsa-sign", # rsa EXE
"pyrsa-verify", # rsa EXE
]
)
+ [
"jp.py", # jmespath.py NO_EXE
"rst2html.py", # docutils NO_EXE
"rst2html4.py", # docutils NO_EXE
"rst2html5.py", # docutils NO_EXE
"rst2latex.py", # docutils NO_EXE
"rst2man.py", # docutils NO_EXE
"rst2odt.py", # docutils NO_EXE
"rst2odt_prepstyles.py", # docutils NO_EXE
"rst2pseudoxml.py", # docutils NO_EXE
"rst2s5.py", # docutils NO_EXE
"rst2xetex.py", # docutils NO_EXE
"rst2xml.py", # docutils NO_EXE
"rstpep2html.py", # docutils NO_EXE
],
},
"b2": {
"spec": "b2==2.0.2",
"apps": _exe_if_win(["b2"]),
"apps_of_dependencies": _exe_if_win(["chardetect", "tqdm"]),
},
"beancount": {
"spec": "beancount==2.3.3",
"apps": _exe_if_win(
[
"bean-bake",
"bean-check",
"bean-doctor",
"bean-example",
"bean-extract",
"bean-file",
"bean-format",
"bean-identify",
"bean-price",
"bean-query",
"bean-report",
"bean-sql",
"bean-web",
"treeify",
"upload-to-sheets",
]
),
"apps_of_dependencies": _exe_if_win(
[
"chardetect", # chardet EXE
"py.test", # pytest EXE
"pyrsa-decrypt", # rsa EXE
"pyrsa-encrypt", # rsa EXE
"pyrsa-keygen", # rsa EXE
"pyrsa-priv2pub", # rsa EXE
"pyrsa-sign", # rsa EXE
"pyrsa-verify", # rsa EXE
"pytest", # pytest EXE
]
)
+ ["bottle.py"], # bottle NO_EXE
},
"beets": {
"spec": "beets==1.4.9",
"apps": _exe_if_win(["beet"]),
"apps_of_dependencies": _exe_if_win(
[
"mid3cp",
"mid3iconv",
"mid3v2",
"moggsplit",
"mutagen-inspect",
"mutagen-pony",
"unidecode", # unidecode EXE
]
),
},
"black": {
"spec": "black==20.8b1",
"apps": _exe_if_win(["black", "black-primer", "blackd"]),
"apps_of_dependencies": [],
},
"cactus": {
"spec": "cactus==3.3.3",
"apps": _exe_if_win(["cactus"]),
"apps_of_dependencies": _exe_if_win(["keyring"])
+ [
"asadmin",
"bundle_image",
"cfadmin",
"cq",
"cwutil",
"django-admin.py",
"dynamodb_dump",
"dynamodb_load",
"elbadmin",
"fetch_file",
"glacier",
"instance_events",
"kill_instance",
"launch_instance",
"list_instances",
"lss3",
"markdown2",
"mturk",
"pyami_sendmail",
"route53",
"s3put",
"sdbadmin",
"taskadmin",
],
},
"chert": {
"spec": "chert==19.1.0",
"apps": _exe_if_win(["chert"]),
"apps_of_dependencies": _exe_if_win(["ashes", "markdown_py"]) + ["ashes.py"],
},
"cloudtoken": {
"spec": "cloudtoken==0.1.707",
"apps": ["cloudtoken", "cloudtoken.app", "cloudtoken_proxy.sh", "awstoken"],
"apps_of_dependencies": _exe_if_win(["chardetect", "flask", "keyring"])
+ ["jp.py"],
},
"coala": {
"spec": "coala==0.11.0",
"apps": _exe_if_win(
["coala", "coala-ci", "coala-delete-orig", "coala-format", "coala-json"]
),
"apps_of_dependencies": _exe_if_win(["chardetect", "pygmentize"]) + ["unidiff"],
},
"cookiecutter": {
"spec": "cookiecutter==1.7.2",
"apps": _exe_if_win(["cookiecutter"]),
"apps_of_dependencies": _exe_if_win(["chardetect", "slugify"]),
},
"cython": {
"spec": "cython==0.29.21",
"apps": _exe_if_win(["cygdb", "cython", "cythonize"]),
"apps_of_dependencies": [],
},
"datasette": {
"spec": "datasette==0.50.2",
"apps": _exe_if_win(["datasette"]),
"apps_of_dependencies": _exe_if_win(["hupper", "uvicorn"]) + ["pint-convert"],
},
"diffoscope": {
"spec": "diffoscope==154",
"apps": _exe_if_win(["diffoscope"]),
"apps_of_dependencies": [],
},
"doc2dash": {
"spec": "doc2dash==2.3.0",
"apps": _exe_if_win(["doc2dash"]),
"apps_of_dependencies": _exe_if_win(
[
"chardetect", # chardet EXE
"pybabel", # babel EXE
"pygmentize", # pygments EXE
"sphinx-apidoc", # sphinx EXE
"sphinx-autogen", # sphinx EXE
"sphinx-build", # sphinx EXE
"sphinx-quickstart", # sphinx EXE
]
)
+ [
"rst2html.py", # docutils NO_EXE
"rst2html4.py", # docutils NO_EXE
"rst2html5.py", # docutils NO_EXE
"rst2latex.py", # docutils NO_EXE
"rst2man.py", # docutils NO_EXE
"rst2odt.py", # docutils NO_EXE
"rst2odt_prepstyles.py", # docutils NO_EXE
"rst2pseudoxml.py", # docutils NO_EXE
"rst2s5.py", # docutils NO_EXE
"rst2xetex.py", # docutils NO_EXE
"rst2xml.py", # docutils NO_EXE
"rstpep2html.py", # docutils NO_EXE
],
},
"doitlive": {
"spec": "doitlive==4.3.0",
"apps": _exe_if_win(["doitlive"]),
"apps_of_dependencies": [],
},
"gdbgui": {
"spec": "gdbgui==0.14.0.1",
"apps": _exe_if_win(["gdbgui"]),
"apps_of_dependencies": _exe_if_win(["flask", "pygmentize"]),
},
"gns3-gui": {
"spec": "gns3-gui==2.2.15",
"apps": _exe_if_win(["gns3"]),
"apps_of_dependencies": _exe_if_win(["distro", "jsonschema"]),
},
"grow": {
"spec": "grow==1.0.0a10",
"apps": ["grow"],
"apps_of_dependencies": _exe_if_win(
[
"chardetect", # chardet EXE
"gen_protorpc", # EXE
"html2text", # html2text EXE
"markdown_py", # Markdwon EXE
"pybabel", # babel EXE
"pygmentize", # pygments EXE
"pyrsa-decrypt", # rsa EXE
"pyrsa-encrypt", # rsa EXE
"pyrsa-keygen", # rsa EXE
"pyrsa-priv2pub", # rsa EXE
"pyrsa-sign", # rsa EXE
"pyrsa-verify", # rsa EXE
"slugify", # python_slugify EXE
"watchmedo", # watchdog EXE
]
),
},
"guake": {
"spec": "guake==3.7.0",
"apps": _exe_if_win(["guake", "guake-toggle"]),
"apps_of_dependencies": _exe_if_win(["pbr"]),
},
"gunicorn": {
"spec": "gunicorn==20.0.4",
"apps": _exe_if_win(["gunicorn"]),
"apps_of_dependencies": [],
},
"howdoi": {
"spec": "howdoi==2.0.7",
"apps": _exe_if_win(["howdoi"]),
"apps_of_dependencies": _exe_if_win(["chardetect", "keep", "pygmentize"]),
},
"httpie": {
"spec": "httpie==2.3.0",
"apps": _exe_if_win(["http", "https"]),
"apps_of_dependencies": _exe_if_win(["chardetect", "pygmentize"]),
},
"hyde": {
"spec": "hyde==0.8.9",
"apps": _exe_if_win(["hyde"]),
"apps_of_dependencies": _exe_if_win(["markdown_py", "pygmentize"])
+ ["smartypants"],
},
"ipython": {
"spec": "ipython==7.16.1",
"apps": _exe_if_win(["iptest", "iptest3", "ipython", "ipython3"]),
"apps_of_dependencies": _exe_if_win(["pygmentize"]), # pygments EXE
},
"isort": {
"spec": "isort==5.6.4",
"apps": _exe_if_win(["isort"]),
"apps_of_dependencies": [],
},
"jaraco-financial": {
"spec": "jaraco.financial==2.0",
"apps": _exe_if_win(
[
"clean-msmoney-temp",
"fix-qif-date-format",
"launch-in-money",
"ofx",
"record-document-hashes",
]
),
"apps_of_dependencies": _exe_if_win(["keyring", "chardetect", "calc-prorate"]),
},
"jupyter": {
"spec": "jupyter==1.0.0",
"apps": [],
"apps_of_dependencies": _exe_if_win(
[
"iptest", # EXE
"iptest3", # EXE
"ipython", # EXE
"ipython3", # EXE
"jsonschema", # jsonschema EXE
"jupyter", # EXE
"jupyter-bundlerextension", # EXE
"jupyter-console", # EXE
"jupyter-kernel", # EXE
"jupyter-kernelspec", # EXE
"jupyter-migrate", # EXE
"jupyter-nbconvert", # EXE
"jupyter-nbextension", # EXE
"jupyter-notebook", # EXE
"jupyter-qtconsole", # EXE
"jupyter-run", # EXE
"jupyter-serverextension", # EXE
"jupyter-troubleshoot", # EXE
"jupyter-trust", # EXE
"pygmentize", # pygments EXE
]
),
},
"kaggle": {
"spec": "kaggle==1.5.12",
"apps": _exe_if_win(["kaggle"]),
"apps_of_dependencies": list(
set(_exe_if_win(["chardetect", "slugify", "tqdm"]))
),
},
"kibitzr": {
"spec": "kibitzr==6.0.0",
"apps": _exe_if_win(["kibitzr"]),
"apps_of_dependencies": _exe_if_win(["chardetect", "doesitcache"]),
},
"klaus": {
"spec": "klaus==1.5.2",
"apps": ["klaus"],
"apps_of_dependencies": _exe_if_win(["dulwich", "flask", "pygmentize"])
+ ["dul-receive-pack", "dul-upload-pack"],
},
"kolibri": {
"spec": "kolibri==0.14.3",
"apps": _exe_if_win(["kolibri"]),
"apps_of_dependencies": [],
},
"lektor": {
"spec": "Lektor==3.2.0",
"apps": _exe_if_win(["lektor"]),
"apps_of_dependencies": _exe_if_win(
[
"chardetect", # chardet EXE
"flask", # flask EXE
"pybabel", # babel EXE
"slugify", # python_slugify EXE
"watchmedo", # watchdog EXE
]
)
+ ["EXIF.py"],
},
"localstack": {
"spec": "localstack==0.12.1",
"apps": ["localstack", "localstack.bat"],
"apps_of_dependencies": _exe_if_win(["chardetect", "dulwich"])
+ ["jp.py", "dul-receive-pack", "dul-upload-pack"],
},
"mackup": {
"spec": "mackup==0.8.29",
"apps": _exe_if_win(["mackup"]),
"apps_of_dependencies": [],
}, # ONLY FOR mac, linux
"magic-wormhole": {
"spec": "magic-wormhole==0.12.0",
"apps": _exe_if_win(["wormhole"]),
"apps_of_dependencies": _exe_if_win(
[
"automat-visualize", # EXE
"cftp", # EXE
"ckeygen", # EXE
"conch", # EXE
"mailmail", # EXE
"pyhtmlizer", # EXE
"tkconch", # EXE
"tqdm", # tqdm EXE
"trial", # EXE
"twist", # EXE
"twistd", # EXE
"wamp", # EXE
"xbrnetwork", # EXE
]
)
+ (["pywin32_postinstall.py", "pywin32_testall.py"] if WIN else []),
},
"mayan-edms": {
"spec": "mayan-edms==3.5.2",
"apps": ["mayan-edms.py"],
"apps_of_dependencies": _exe_if_win(
[
"celery", # EXE
"chardetect", # chardet EXE
"django-admin", # EXE
"gunicorn", # EXE
"jsonschema", # jsonschema EXE
"sqlformat", # sqlparse EXE
"swagger-flex", # EXE
"update-tld-names", # # EXE
]
)
+ ["django-admin.py", "jsonpointer"],
},
"mkdocs": {
"spec": "mkdocs==1.1.2",
"apps": _exe_if_win(["mkdocs"]),
"apps_of_dependencies": _exe_if_win(
[
"livereload", # EXE
"futurize", # future EXE
"pasteurize", # future EXE
"nltk", # EXE
"tqdm", # tqdm EXE
"markdown_py", # Markdwon EXE
]
),
},
"mycli": {
"spec": "mycli==1.22.2",
"apps": _exe_if_win(["mycli"]),
"apps_of_dependencies": _exe_if_win(["pygmentize", "sqlformat", "tabulate"]),
},
"nikola": {
"spec": "nikola==8.1.1",
"apps": _exe_if_win(["nikola"]),
"apps_of_dependencies": _exe_if_win(
[
"chardetect", # chardet EXE
"doit", # EXE
"mako-render", # mako EXE
"markdown_py", # Markdwon EXE
"natsort", # EXE
"pybabel", # babel EXE
"pygmentize", # pygments EXE
"unidecode", # unidecode EXE
]
)
+ [
"rst2html.py", # docutils NO_EXE
"rst2html4.py", # docutils NO_EXE
"rst2html5.py", # docutils NO_EXE
"rst2latex.py", # docutils NO_EXE
"rst2man.py", # docutils NO_EXE
"rst2odt.py", # docutils NO_EXE
"rst2odt_prepstyles.py", # docutils NO_EXE
"rst2pseudoxml.py", # docutils NO_EXE
"rst2s5.py", # docutils NO_EXE
"rst2xetex.py", # docutils NO_EXE
"rst2xml.py", # docutils NO_EXE
"rstpep2html.py", # docutils NO_EXE
],
},
"nox": {
"spec": "nox==2020.8.22",
"apps": _exe_if_win(["nox", "tox-to-nox"]),
"apps_of_dependencies": _exe_if_win(["virtualenv"])
+ [
"activate-global-python-argcomplete",
"python-argcomplete-check-easy-install-script",
"python-argcomplete-tcsh",
"register-python-argcomplete",
], # from argcomplete
},
"pelican": {
"spec": "pelican==4.5.0",
"apps": _exe_if_win(
[
"pelican",
"pelican-import",
"pelican-plugins",
"pelican-quickstart",
"pelican-themes",
]
),
"apps_of_dependencies": _exe_if_win(["pygmentize", "unidecode"])
+ [
"rst2html.py", # docutils NO_EXE
"rst2html4.py", # docutils NO_EXE
"rst2html5.py", # docutils NO_EXE
"rst2latex.py", # docutils NO_EXE
"rst2man.py", # docutils NO_EXE
"rst2odt.py", # docutils NO_EXE
"rst2odt_prepstyles.py", # docutils NO_EXE
"rst2pseudoxml.py", # docutils NO_EXE
"rst2s5.py", # docutils NO_EXE
"rst2xetex.py", # docutils NO_EXE
"rst2xml.py", # docutils NO_EXE
"rstpep2html.py", # docutils NO_EXE
],
},
"platformio": {
"spec": "platformio==5.0.1",
"apps": _exe_if_win(["pio", "piodebuggdb", "platformio"]),
"apps_of_dependencies": _exe_if_win(
["chardetect", "pyserial-miniterm", "pyserial-ports", "tabulate"]
)
+ ["bottle.py", "readelf.py"],
},
"ppci": {
"spec": "ppci==0.5.8",
"apps": _exe_if_win(
[
"ppci-archive",
"ppci-asm",
"ppci-build",
"ppci-c3c",
"ppci-cc",
"ppci-dbg",
"ppci-disasm",
"ppci-hexdump",
"ppci-hexutil",
"ppci-java",
"ppci-ld",
"ppci-llc",
"ppci-mkuimage",
"ppci-objcopy",
"ppci-objdump",
"ppci-ocaml",
"ppci-opt",
"ppci-pascal",
"ppci-pedump",
"ppci-pycompile",
"ppci-readelf",
"ppci-wabt",
"ppci-wasm2wat",
"ppci-wasmcompile",
"ppci-wat2wasm",
"ppci-yacc",
]
),
"apps_of_dependencies": [],
},
"prosopopee": {
"spec": "prosopopee==1.1.3",
"apps": _exe_if_win(["prosopopee"]),
"apps_of_dependencies": _exe_if_win(["futurize", "pasteurize", "pybabel"]),
},
"ptpython": {
"spec": "ptpython==3.0.7",
"apps": _exe_if_win(
[
"ptipython",
"ptipython3",
"ptipython3.8",
"ptpython",
"ptpython3",
"ptpython3.8",
]
),
"apps_of_dependencies": _exe_if_win(["pygmentize"]), # pygments EXE
},
"pycowsay": {
"spec": "pycowsay==0.0.0.1",
"apps": _exe_if_win(["pycowsay"]),
"apps_of_dependencies": [],
},
"pylint": {
"spec": "pylint==2.3.1",
"apps": _exe_if_win(["epylint", "pylint", "pyreverse", "symilar"]),
"apps_of_dependencies": _exe_if_win(["isort"]),
},
"retext": {
"spec": "ReText==7.1.0",
"apps": _exe_if_win(["retext"]),
"apps_of_dependencies": _exe_if_win(
[
"chardetect", # chardet EXE
"markdown_py", # Markdwon EXE
"pygmentize", # pygments EXE
"pylupdate5", # EXE
"pyrcc5", # EXE
"pyuic5", # EXE
]
)
+ [
"rst2html.py", # docutils NO_EXE
"rst2html4.py", # docutils NO_EXE
"rst2html5.py", # docutils NO_EXE
"rst2latex.py", # docutils NO_EXE
"rst2man.py", # docutils NO_EXE
"rst2odt.py", # docutils NO_EXE
"rst2odt_prepstyles.py", # docutils NO_EXE
"rst2pseudoxml.py", # docutils NO_EXE
"rst2s5.py", # docutils NO_EXE
"rst2xetex.py", # docutils NO_EXE
"rst2xml.py", # docutils NO_EXE
"rstpep2html.py", # docutils NO_EXE
],
},
"robotframework": {
"spec": "robotframework==3.2.2",
"apps": _exe_if_win(["rebot", "robot"]),
"apps_of_dependencies": [],
},
"shell-functools": {
"spec": "shell-functools==0.3.0",
"apps": [
"filter",
"foldl",
"foldl1",
"ft-functions",
"map",
"sort_by",
"take_while",
],
"apps_of_dependencies": [],
},
"speedtest-cli": {
"spec": "speedtest-cli==2.1.2",
"apps": _exe_if_win(["speedtest", "speedtest-cli"]),
"apps_of_dependencies": [],
},
"sphinx": {
"spec": "Sphinx==3.2.1",
"apps": _exe_if_win(
["sphinx-apidoc", "sphinx-autogen", "sphinx-build", "sphinx-quickstart"]
),
"apps_of_dependencies": _exe_if_win(["chardetect", "pybabel", "pygmentize"])
+ [
"rst2html.py", # docutils NO_EXE
"rst2html4.py", # docutils NO_EXE
"rst2html5.py", # docutils NO_EXE
"rst2latex.py", # docutils NO_EXE
"rst2man.py", # docutils NO_EXE
"rst2odt.py", # docutils NO_EXE
"rst2odt_prepstyles.py", # docutils NO_EXE
"rst2pseudoxml.py", # docutils NO_EXE
"rst2s5.py", # docutils NO_EXE
"rst2xetex.py", # docutils NO_EXE
"rst2xml.py", # docutils NO_EXE
"rstpep2html.py", # docutils NO_EXE
],
},
"sqlmap": {
"spec": "sqlmap==1.4.10",
"apps": _exe_if_win(["sqlmap"]),
"apps_of_dependencies": [],
},
"streamlink": {
"spec": "streamlink==1.7.0",
"apps": _exe_if_win(["streamlink"] + (["streamlinkw"] if WIN else [])),
"apps_of_dependencies": _exe_if_win(["chardetect"]) + ["wsdump.py"],
},
"taguette": {
"spec": "taguette==0.9.2",
"apps": _exe_if_win(["taguette"]),
"apps_of_dependencies": _exe_if_win(["alembic", "mako-render"])
+ ["vba_extract.py"],
},
"term2048": {
"spec": "term2048==0.2.7",
"apps": _exe_if_win(["term2048"]),
"apps_of_dependencies": [],
},
"tox-ini-fmt": {
"spec": "tox-ini-fmt==0.5.0",
"apps": _exe_if_win(["tox-ini-fmt"]),
"apps_of_dependencies": _exe_if_win(["py.test", "pytest"]), # pytest EXE
},
"visidata": {
"spec": "visidata==2.0.1",
"apps": _exe_if_win(["visidata"]) + ["vd"],
"apps_of_dependencies": [],
},
"vulture": {
"spec": "vulture==2.1",
"apps": _exe_if_win(["vulture"]),
"apps_of_dependencies": [],
},
"weblate": {
"spec": "Weblate==4.3.1",
"apps": _exe_if_win(["weblate"]),
"apps_of_dependencies": _exe_if_win( # TODO: check if _exe_if_win (can't install)
[
"borg",
"borgfs",
"build_firefox.sh",
"build_tmdb",
"buildxpi.py",
"celery",
"chardetect", # chardet EXE
"csv2po",
"csv2tbx",
"cygdb",
"cython",
"cythonize",
"django-admin",
"django-admin.py", # NO_EXE
"flatxml2po",
"get_moz_enUS.py",
"html2po",
"html2text", # html2text EXE
"ical2po",
"idml2po",
"ini2po",
"json2po",
"jsonschema", # jsonschema EXE
"junitmsgfmt",
"misaka",
"moz2po",
"mozlang2po",
"odf2xliff",
"oo2po",
"oo2xliff",
"php2po",
"phppo2pypo",
"po2csv",
"po2flatxml",
"po2html",
"po2ical",
"po2idml",
"po2ini",
"po2json",
"po2moz",
"po2mozlang",
"po2oo",
"po2php",
"po2prop",
"po2rc",
"po2resx",
"po2sub",
"po2symb",
"po2tiki",
"po2tmx",
"po2ts",
"po2txt",
"po2web2py",
"po2wordfast",
"po2xliff",
"po2yaml",
"poclean",
"pocommentclean",
"pocompendium",
"pocompile",
"poconflicts",
"pocount",
"podebug",
"pofilter",
"pogrep",
"pomerge",
"pomigrate2",
"popuretext",
"poreencode",
"porestructure",
"posegment",
"posplit",
"poswap",
"pot2po",
"poterminology",
"pretranslate",
"prop2po",
"pydiff",
"pypo2phppo",
"rc2po",
"resx2po",
"sqlformat", # sqlparse EXE
"sub2po",
"symb2po",
"tbx2po",
"tiki2po",
"tmserver",
"ts2po",
"txt2po",
"web2py2po",
"weblate-discover",
"xliff2odf",
"xliff2oo",
"xliff2po",
"yaml2po",
]
),
},
"youtube-dl": {
"spec": "youtube-dl==2020.9.20",
"apps": _exe_if_win(["youtube-dl"]),
"apps_of_dependencies": [],
},
"zeo": {
"spec": "ZEO==5.2.2",
"apps": _exe_if_win(["runzeo", "zeo-nagios", "zeoctl", "zeopack"]),
"apps_of_dependencies": _exe_if_win(
[
"fsdump",
"fsoids",
"fsrefs",
"fstail",
"repozo",
"zconfig",
"zconfig_schema2html",
"zdaemon",
]
),
},
}
| [
11748,
25064,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
198,
198,
37620,
796,
25064,
13,
24254,
13,
9688,
2032,
342,
7203,
5404,
4943,
628,
198,
198,
2,
18535,
507,
286,
477,
10392,
5457,
973,
287,
674,
5254,
198,
2,
5514,
4174,
4... | 1.587806 | 16,434 |
from __future__ import division
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import add_remaining_self_loops
from torch_geometric.laf import ElementAggregationLayer, FractionalElementAggregationLayer, ScatterAggregationLayer
from itertools import repeat
#import laf
from ..inits import uniform
import lhsmdu
class SAGEConv(MessagePassing):
r"""The GraphSAGE operator from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper
.. math::
\mathbf{\hat{x}}_i &= \mathbf{\Theta} \cdot
\mathrm{mean}_{j \in \mathcal{N(i) \cup \{ i \}}}(\mathbf{x}_j)
\mathbf{x}^{\prime}_i &= \frac{\mathbf{\hat{x}}_i}
{\| \mathbf{\hat{x}}_i \|_2}.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
normalize (bool, optional): If set to :obj:`True`, output features
will be :math:`\ell_2`-normalized. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def forward(self, x, edge_index, edge_weight=None, size=None):
""""""
if size is None and torch.is_tensor(x):
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, 1, x.size(0))
x = self.propagate(edge_index, size=size, x=x,
edge_weight=edge_weight)
if torch.is_tensor(x):
x = torch.matmul(x, self.weight)
else:
x = (None if x[0] is None else torch.matmul(x[0], self.weight),
None if x[1] is None else torch.matmul(x[1], self.weight))
return F.relu(x)
class SAGEConvCorrect(MessagePassing):
r"""The GraphSAGE operator from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper
.. math::
\mathbf{\hat{x}}_i &= \mathbf{\Theta} \cdot
\mathrm{mean}_{j \in \mathcal{N(i) \cup \{ i \}}}(\mathbf{x}_j)
\mathbf{x}^{\prime}_i &= \frac{\mathbf{\hat{x}}_i}
{\| \mathbf{\hat{x}}_i \|_2}.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
normalize (bool, optional): If set to :obj:`True`, output features
will be :math:`\ell_2`-normalized. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def forward(self, x, edge_index, edge_weight=None, size=None):
""""""
if size is None and torch.is_tensor(x):
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, 1, x.size(0))
x = self.propagate(edge_index, size=size, x=x,
edge_weight=edge_weight)
if torch.is_tensor(x):
x = torch.matmul(x, self.weight)
else:
x = (None if x[0] is None else torch.matmul(x[0], self.weight),
None if x[1] is None else torch.matmul(x[1], self.weight))
return F.relu(x)
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
20471,
1330,
25139,
2357,
198,
6738,
28034,
62,
469,
16996,
13,
20471,
13,
42946,
1330,
16000,
14478,
278,
... | 2.194805 | 1,617 |
# 3rd party
import numpy # type: ignore
import pytest
# this package
from si_unit_pandas import CelsiusType, TemperatureArray
_non_empty_sets = [
{1},
{1, 2},
{1, 2, 3},
{1, 2, 3, 4},
{1, 2, 3, 4, 5},
]
_non_empty_lists = [
[1],
[1, 2],
[1, 2, 3],
[1, 2, 3, 4],
[1, 2, 3, 4, 5],
]
_non_empty_tuples = [
(1, ),
(1, 2),
(1, 2, 3),
(1, 2, 3, 4),
(1, 2, 3, 4, 5),
]
_non_empty_arrays = [
numpy.array([1]),
numpy.array([1, 2]),
numpy.array([1, 2, 3]),
numpy.array([1, 2, 3, 4]),
numpy.array([1, 2, 3, 4, 5]),
]
@pytest.mark.parametrize("seq", [*_non_empty_sets, *_non_empty_lists, *_non_empty_tuples, *_non_empty_arrays])
@pytest.mark.parametrize("seq", _non_empty_arrays)
@pytest.mark.parametrize("seq", [*_non_empty_sets, *_non_empty_lists, *_non_empty_tuples, *_non_empty_arrays])
@pytest.mark.parametrize("seq", [*_non_empty_lists, *_non_empty_tuples, *_non_empty_arrays])
@pytest.mark.parametrize(
"seq", [
[],
(),
{},
numpy.array([]),
*_non_empty_sets,
*_non_empty_lists,
*_non_empty_tuples,
*_non_empty_arrays,
]
)
| [
2,
513,
4372,
2151,
198,
11748,
299,
32152,
220,
1303,
2099,
25,
8856,
198,
11748,
12972,
9288,
198,
198,
2,
428,
5301,
198,
6738,
33721,
62,
20850,
62,
79,
392,
292,
1330,
34186,
6030,
11,
34467,
19182,
198,
198,
62,
13159,
62,
289... | 1.883721 | 602 |
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), 'helpers'))
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../') | [
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
16794,
364,
6,
4008,
198,
1820,
15235,
796,
28686,
13,
6978,
13,
15908,
... | 2.444444 | 72 |
import requests
import sys
import random
from time import sleep
from collections import deque
from lxml import html
# The class that handles craigslist item monitoring and parsing.
# Flushes the stdout of the scraper (a file) so that output is live.
# Interface for creating the scraper.
| [
11748,
7007,
198,
11748,
25064,
198,
11748,
4738,
198,
6738,
640,
1330,
3993,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
300,
19875,
1330,
27711,
628,
198,
2,
383,
1398,
326,
17105,
15671,
40704,
2378,
9904,
290,
32096,
13,
628,
198,... | 4.083333 | 72 |
import os
from django.shortcuts import render, HttpResponse, redirect
from django.http import Http404
from django.contrib import messages
from django.utils import timezone
from django.contrib.auth import logout
from . import forms
from . import models
MUSIC_TYPES = ['mp3', 'ogg', 'm4a', 'wav', 'opus']
VIDEO_TYPES = ['mp4', 'webm',]
MAX_SPACE = 524_288_000
| [
11748,
28686,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
367,
29281,
31077,
11,
18941,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208... | 2.824427 | 131 |
import construct as cs
import construct_typed as cst
import dataclasses
import typing as t
from . import GalleryItem
constr = cs.Struct(
"permissions" / cs.FlagsEnum(cs.Int8ul, R=4, W=2, X=1),
)
gallery_item = GalleryItem(
construct=constr,
example_binarys={
"read": bytes([4]),
"read_write": bytes([6]),
},
) | [
11748,
5678,
355,
50115,
198,
11748,
5678,
62,
774,
9124,
355,
269,
301,
198,
11748,
4818,
330,
28958,
198,
11748,
19720,
355,
256,
198,
6738,
764,
1330,
12917,
7449,
628,
198,
1102,
2536,
796,
50115,
13,
44909,
7,
198,
220,
220,
220,... | 2.482014 | 139 |
nums = [12,100,14,27,33]
for num in nums:
if num%5 == 0:
print(num)
break
else:
print("Not found")
| [
77,
5700,
796,
685,
1065,
11,
3064,
11,
1415,
11,
1983,
11,
2091,
60,
198,
198,
1640,
997,
287,
997,
82,
25,
198,
220,
220,
220,
611,
997,
4,
20,
6624,
657,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7,
22510,
8,
198,
... | 1.734177 | 79 |
import glob
import multiprocessing
import os
import json
from django.core.exceptions import ImproperlyConfigured
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Load Secrets
secrets = load_secrets()
# Get a secret
proc_name = 'districtwebsites'
pidfile = '/var/run/gunicorn/www_slcschools_org.pid'
worker_tmp_dir = '/srv/gunicorn/www_slcschools_org'
bind = 'unix:/var/run/gunicorn/www_slcschools_org.sock'
workers = multiprocessing.cpu_count() * 3 + 1
worker_class = 'sync'
timeout = 3600
raw_env = [
'DJANGO_SETTINGS_MODULE={0}'.format(get_secret('DJANGO_SETTINGS_MODULE')),
]
reload = get_secret('GUNICORN_RELOAD')
if reload:
reload_extra_files = watch_extra_files()
| [
11748,
15095,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
198,
198,
2,
10934,
13532,
2641,
262,
1628,
588,
428,
25,... | 2.568562 | 299 |
fat=1
n=5
while n>=1:
fat = fat * n
n-=1
print(f"O fatoria de 5! = {fat}") | [
17359,
28,
16,
198,
77,
28,
20,
198,
4514,
299,
29,
28,
16,
25,
198,
220,
220,
220,
3735,
796,
3735,
1635,
299,
198,
220,
220,
220,
299,
12,
28,
16,
198,
198,
4798,
7,
69,
1,
46,
277,
1352,
544,
390,
642,
0,
796,
1391,
17359... | 1.693878 | 49 |
from django.contrib import admin
from .models import Movie
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
15875,
198
] | 3.933333 | 15 |
# Copyright 2019 Ivan Bondarenko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import os
import pickle
import sys
from typing import Tuple, List
from sklearn.datasets import fetch_20newsgroups
try:
from impartial_text_cls.impartial_text_cls import ImpartialTextClassifier
from impartial_text_cls.utils import parse_hidden_layers_description
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from impartial_text_cls.impartial_text_cls import ImpartialTextClassifier
from impartial_text_cls.utils import parse_hidden_layers_description
if __name__ == '__main__':
main()
| [
2,
15069,
13130,
21798,
12812,
5757,
7204,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 3.380117 | 342 |
# Generated by Django 2.0 on 2018-08-10 16:28
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
2864,
12,
2919,
12,
940,
1467,
25,
2078,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.892857 | 28 |
"""This module provide multiple test of ingestion services"""
import shutil
from time import sleep
from conftest import ValueStorage
import logging
import json
from server_automation.configuration import config
from discrete_kit.validator.json_compare_pycsw import *
from discrete_kit.functions.shape_functions import ShapeToJSON
from server_automation.functions.executors import *
from server_automation.postgress import postgress_adapter
_log = logging.getLogger("server_automation.tests.test_ingestion_discrete")
if config.DEBUG_MODE_LOCAL:
initial_mapproxy_config = postgress_adapter.get_mapproxy_configs()
def test_manual_discrete_ingest():
"""
This test will test full e2e discrete ingestion
"""
stop_watch()
# ================================================================================================================ #
try:
resp = init_ingestion_src(config.TEST_ENV)
error_msg = None
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_manual_discrete_ingest.__name__}] Failed: on creating and updating layerSource folder [{error_msg}]"
_log.info(f"{resp}")
# triggering and validate start of new manuel job
product_id, product_version = resp["resource_name"].split("-")
ValueStorage.discrete_list.append(
{"product_id": product_id, "product_version": product_version}
)
source_directory = resp["ingestion_dir"]
_log.info(f"{product_id} {product_version}")
sleep(5)
write_text_to_file('//tmp//shlomo.txt',
{'source_dir': source_directory, 'product_id_version': ValueStorage.discrete_list,
'test_name': test_manual_discrete_ingest.__name__,
'folder_to_delete': ValueStorage.folder_to_delete})
# ================================================================================================================ #
try:
status_code, content, source_data = start_manual_ingestion(
source_directory, config.TEST_ENV
)
except Exception as e:
status_code = "unknown"
content = str(e)
assert status_code == config.ResponseCode.Ok.value, (
f"Test: [{test_manual_discrete_ingest.__name__}] Failed: trigger new ingest with status code: [{status_code}]\n"
f"details: [{content}]"
)
_log.info(f"manual ingestion - source_data: {source_data}")
_log.info(f"manual ingestion - content: {content}")
_log.info(f"manual ingestion - status code: {status_code}")
# ================================================================================================================ #
# validating following and completion of ingestion job
try:
if config.FOLLOW_JOB_BY_MANAGER: # following based on job manager api
_log.info("Start following job-tasks based on job manager api")
ingestion_follow_state = follow_running_job_manager(
product_id, product_version
)
else: # following based on bff service
ingestion_follow_state = follow_running_task(product_id, product_version)
resp = ingestion_follow_state["status"] == config.JobStatus.Completed.name
error_msg = ingestion_follow_state["message"]
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_manual_discrete_ingest.__name__}] Failed: on following ingestion process [{error_msg}]"
_log.info(f"manual ingestion following task response: {resp}")
# this timeout is for mapproxy updating time of new layer on configuration
sleep(config.SYSTEM_DELAY)
pycsw_record = None
# ================================================================================================================ #
# validate new discrete on pycsw records
try:
resp, pycsw_record, links = validate_pycsw2(
source_data, product_id, product_version
)
# todo this is legacy records validator based graphql -> for future needs maybe
# resp, pycsw_record = executors.validate_pycsw(config.GQK_URL, product_id, source_data)
state = resp["validation"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
if config.VALIDATION_SWITCH:
assert state, (
f"Test: [{test_manual_discrete_ingest.__name__}] Failed: validation of pycsw record\n"
f"related errors:\n"
f"{error_msg}"
)
_log.info(f"manual ingestion validation - response: {resp}")
_log.info(f"manual ingestion validation - pycsw_record: {pycsw_record}")
_log.info(f"manual ingestion validation - links: {links}")
# ================================================================================================================ #
# validating new discrete on mapproxy
try:
resp = validate_new_discrete(pycsw_record, product_id, product_version)
state = resp["validation"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
if config.VALIDATION_SWITCH:
assert state, (
f"Test: [{test_manual_discrete_ingest.__name__}] Failed: validation of mapproxy layer\n"
f"related errors:\n"
f"{error_msg}"
)
_log.info(f"manual ingestion validate new discrete - response: {resp}")
if config.DEBUG_MODE_LOCAL:
cleanup_env(product_id, product_version, initial_mapproxy_config)
def test_watch_discrete_ingest():
"""
This test ingestion by watching shared folder
"""
# config.TEST_ENV = 'PROD'
# stop watching folder as prerequisites
try:
resp = stop_watch()
state = resp["state"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
assert (
state
), f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on stop agent watch [{error_msg}]"
_log.info(f"watch ingestion - stop watch response: {resp}")
try:
resp = init_watch_ingestion_src(config.TEST_ENV)
error_msg = None
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on creating and updating layerSource folder [{error_msg}]"
_log.info(f"{resp}")
# triggering and validate start of new manuel job
product_id, product_version = resp["resource_name"].split("-")
ValueStorage.discrete_list.append(
{"product_id": product_id, "product_version": product_version}
)
source_directory = resp["ingestion_dir"]
ValueStorage.folder_to_delete = source_directory.split("/watch/")[-1]
_log.info(f"{product_id} {product_version}")
_log.info(f"watch ingestion init - source_directory: {source_directory}")
write_text_to_file('//tmp//shlomo.txt',
{'source_dir': source_directory, 'product_id_version': ValueStorage.discrete_list,
'test_name': test_watch_discrete_ingest.__name__,
'folder_to_delete': ValueStorage.folder_to_delete})
try:
state, content, source_data = start_watch_ingestion(
source_directory, config.TEST_ENV
)
except Exception as e:
status_code = "unknown"
content = str(e)
assert state, (
f"Test: [{test_watch_discrete_ingest.__name__}] Failed: Trigger ingest process from watch agent: [{status_code}]\n"
f"details: [{content}]"
)
_log.info(f"watch ingestion ,start watch - state: {state}")
_log.info(f"watch ingestion ,start watch - content: {content}")
_log.info(f"watch ingestion ,start watch - source_data: {source_data}")
sleep(config.SYSTEM_DELAY) # validate generation of new job
# validating following and completion of ingestion job
try:
if config.FOLLOW_JOB_BY_MANAGER: # following based on job manager api
_log.info("Start following job-tasks based on job manager api")
ingestion_follow_state = follow_running_job_manager(
product_id, product_version
)
else: # following based on bff service
ingestion_follow_state = follow_running_task(product_id, product_version)
_log.info("Start following job-tasks based on bff api")
resp = ingestion_follow_state["status"] == config.JobStatus.Completed.name
error_msg = ingestion_follow_state["message"]
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on following ingestion process [{error_msg}]"
_log.info(f"watch ingestion following task response:{resp}")
# this timeout is for mapproxy updating time of new layer on configuration
sleep(config.SYSTEM_DELAY)
pycsw_record = None
# validate new discrete on pycsw records
try:
# shape_folder_path = executors.get_folder_path_by_name(source_directory, 'Shape')
# read_json_from_shape_file = ShapeToJSON(shape_folder_path)
resp, pycsw_record, links = validate_pycsw2(
source_data, product_id, product_version
)
# todo this is legacy records validator based graphql -> for future needs maybe
# resp, pycsw_record = executors.validate_pycsw(config.GQK_URL, product_id, source_data)
state = resp["validation"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
if config.VALIDATION_SWITCH:
assert state, (
f"Test: [{test_watch_discrete_ingest.__name__}] Failed: validation of pycsw record\n"
f"related errors:\n"
f"{error_msg}"
)
_log.info(f"watch ingestion ,watch validation - response: {resp}")
_log.info(f"watch ingestion ,watch validation - pycsw_record: {pycsw_record}")
_log.info(f"watch ingestion ,watch validation - links: {links}")
# validating new discrete on mapproxy
try:
resp = validate_new_discrete(pycsw_record, product_id, product_version)
state = resp["validation"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
if config.VALIDATION_SWITCH:
assert state, (
f"Test: [{test_watch_discrete_ingest.__name__}] Failed: validation of mapproxy layer\n"
f"related errors:\n"
f"{error_msg}"
)
resp = stop_watch()
_log.info(
f'watch ingestion, Finish running watch ingestion. Watch status: [{resp["reason"]}]'
)
# if config.DEBUG_MODE_LOCAL:
# cleanup_env(product_id, product_version, initial_mapproxy_config)
def teardown_module(module): # pylint: disable=unused-argument
"""
This method been executed after test running - env cleaning
"""
stop_watch()
pvc_handler = azure_pvc_api.PVCHandler(
endpoint_url=config.PVC_HANDLER_ROUTE, watch=False
)
if config.CLEAN_UP:
if config.VALIDATION_SWITCH:
if (
config.TEST_ENV == config.EnvironmentTypes.QA.name
or config.TEST_ENV == config.EnvironmentTypes.DEV.name
):
# ToDo : Handle PVC - test it
try:
error_msg = None
resp = pvc_handler.delete_ingestion_directory(
api=config.PVC_DELETE_DIR
)
# folder_param=ValueStorage.folder_to_delete,
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on following ingestion process (Folder delete) : [{error_msg}]"
_log.info(f"Teardown - Finish PVC folder deletion")
elif config.TEST_ENV == config.EnvironmentTypes.PROD.name:
if os.path.exists(config.NFS_ROOT_DIR_DEST):
shutil.rmtree(config.NFS_ROOT_DIR_DEST)
_log.info(f"Teardown - Finish NFS folder deletion")
else:
raise NotADirectoryError(
f"Failed to delete directory because it doesnt exists: [{config.NFS_ROOT_DIR_DEST}]"
)
else:
raise ValueError(f"Illegal environment value type: {config.TEST_ENV}")
if config.CLEAN_UP and config.DEBUG_MODE_LOCAL:
for p in ValueStorage.discrete_list:
cleanup_env(p["product_id"], p["product_version"], initial_mapproxy_config)
if config.DEBUG_MODE_LOCAL:
config.PVC_UPDATE_ZOOM = True
config.MAX_ZOOM_TO_CHANGE = 4 # 4
# test_manual_discrete_ingest()
# test_watch_discrete_ingest()
| [
37811,
1212,
8265,
2148,
3294,
1332,
286,
38382,
2594,
37811,
198,
11748,
4423,
346,
198,
6738,
640,
1330,
3993,
198,
6738,
369,
701,
395,
1330,
11052,
31425,
198,
11748,
18931,
198,
11748,
33918,
198,
6738,
4382,
62,
2306,
296,
341,
13... | 2.405714 | 5,425 |
import sympy, random, pygame
from sympy import *
from pygame.locals import *
from Files import Var, Sprite, Thing
x, y = symbols("x y")
pygame.font.init()
WHITE = (255, 255, 255)
frames = 0
waiting = 'done'
derivatives = [x + 3, x**2 + 5, x**2 + 6*x, tan(x), sec(x)]
integrals = [x + 5, x + 2, 1/(1 + x**2)]
questiontypes = [derivatives, integrals]
questiontype = random.choice(questiontypes)
question = random.choice(questiontype)
if questiontype == integrals:
kind = "integral"
elif questiontype == derivatives:
kind = "derivative"
response = ''
# NOTE natural log does not work without a special function.
# integral -1/sqrt(1-x**2) returns -asin(x) NOT acos(x) error with simplify
# Do not use e. Sympy's syntax for e is exp() so it will be too confusing for audience.
| [
11748,
10558,
88,
11,
4738,
11,
12972,
6057,
201,
198,
6738,
10558,
88,
1330,
1635,
201,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
201,
198,
6738,
13283,
1330,
12372,
11,
33132,
11,
21561,
201,
198,
201,
198,
87,
11,
331,
... | 2.585139 | 323 |
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Padmanabhan Krishnan, Cisco Systems, Inc.
#
# Service Constants
import dfa.server.services.constants as services_const
AUTO_NWK_CREATE = True
DEVICE = ''
SCHED_POLICY = 'max_sched'
VLAN_ID_MIN = services_const.VLAN_ID_MIN
VLAN_ID_MAX = services_const.VLAN_ID_MAX
MOB_DOMAIN_NAME = 'md0'
HOST_PROF = 'serviceNetworkUniversalDynamicRoutingESProfile'
HOST_FWD_MODE = 'proxy-gateway'
PART_PROF = 'vrf-common-universal-external-dynamic-ES'
EXT_PROF = 'externalNetworkUniversalDynamicRoutingESProfile'
EXT_FWD_MODE = 'anycast-gateway'
IN_IP_START = '100.100.2.0/24'
IN_IP_END = '100.100.20.0/24'
OUT_IP_START = '200.200.2.0/24'
OUT_IP_END = '200.200.20.0/24'
DUMMY_IP_SUBNET = '9.9.9.0/24'
IN_SERVICE_SUBNET = 'FwServiceInSub'
IN_SERVICE_NWK = 'FwServiceInNwk'
SERV_PART_NAME = 'CTX-ext'
OUT_SERVICE_SUBNET = 'FwServiceOutSub'
OUT_SERVICE_NWK = 'FwServiceOutNwk'
DUMMY_SERVICE_RTR = 'DUMMY_SRVC_RTR'
DUMMY_SERVICE_NWK = 'DUMMY_SRVC_NWK'
TENANT_EDGE_RTR = 'Cisco_TenantEdge'
FW_TENANT_EDGE = 'TE'
FW_CR_OP = 'CREATE'
FW_DEL_OP = 'DELETE'
RESULT_FW_CREATE_INIT = 'FAB_CREATE_PEND'
RESULT_FW_CREATE_DONE = 'FAB_CREATE_DONE'
RESULT_FW_DELETE_INIT = 'FAB_DELETE_PEND'
RESULT_FW_DELETE_DONE = 'FAB_DELETE_DONE'
FW_CONST = 'Firewall'
INIT_STATE_STR = 'INIT'
OS_IN_NETWORK_CREATE_FAIL = 'OS_IN_NETWORK_CREATE_FAIL'
OS_INIT_STATE = OS_IN_NETWORK_CREATE_FAIL
OS_IN_NETWORK_CREATE_SUCCESS = 'OS_IN_NETWORK_CREATE_SUCCESS'
OS_OUT_NETWORK_CREATE_FAIL = 'OS_OUT_NETWORK_CREATE_FAIL'
OS_OUT_NETWORK_CREATE_SUCCESS = 'OS_OUT_NETWORK_CREATE_SUCCESS'
OS_DUMMY_RTR_CREATE_FAIL = 'OS_DUMMY_RTR_CREATE_FAIL'
OS_DUMMY_RTR_CREATE_SUCCESS = 'OS_DUMMY_RTR_CREATE_SUCCESS'
OS_CREATE_SUCCESS = OS_DUMMY_RTR_CREATE_SUCCESS
DCNM_IN_NETWORK_CREATE_FAIL = 'DCNM_IN_NETWORK_CREATE_FAIL'
DCNM_INIT_STATE = DCNM_IN_NETWORK_CREATE_FAIL
DCNM_IN_NETWORK_CREATE_SUCCESS = 'DCNM_IN_NETWORK_CREATE_SUCCESS'
DCNM_IN_PART_UPDATE_FAIL = 'DCNM_IN_PART_UPDATE_FAIL'
DCNM_IN_PART_UPDATE_SUCCESS = 'DCNM_IN_PART_UPDATE_SUCCESS'
DCNM_OUT_PART_CREATE_FAIL = 'DCNM_OUT_PART_CREATE_FAIL'
DCNM_OUT_PART_CREATE_SUCCESS = 'DCNM_OUT_PART_CREATE_SUCCESS'
DCNM_OUT_NETWORK_CREATE_FAIL = 'DCNM_OUT_NETWORK_CREATE_FAIL'
DCNM_OUT_NETWORK_CREATE_SUCCESS = 'DCNM_OUT_NETWORK_CREATE_SUCCESS'
DCNM_OUT_PART_UPDATE_FAIL = 'DCNM_OUT_PART_UPDATE_FAIL'
DCNM_OUT_PART_UPDATE_SUCCESS = 'DCNM_OUT_PART_UPDATE_SUCCESS'
DCNM_CREATE_SUCCESS = DCNM_OUT_PART_UPDATE_SUCCESS
# FABRIC_PREPARE_SUCCESS = DCNM_OUT_PART_UPDATE_SUCCESS
FABRIC_PREPARE_SUCCESS = 'FABRIC_PREPARE_SUCCESS'
OS_IN_NETWORK_DEL_FAIL = 'OS_IN_NETWORK_DEL_FAIL'
OS_IN_NETWORK_DEL_SUCCESS = 'OS_IN_NETWORK_DEL_SUCCESS'
OS_OUT_NETWORK_DEL_FAIL = 'OS_OUT_NETWORK_DEL_FAIL'
OS_OUT_NETWORK_DEL_SUCCESS = 'OS_OUT_NETWORK_DEL_SUCCESS'
OS_DUMMY_RTR_DEL_FAIL = 'OS_DUMMY_RTR_DEL_FAIL'
OS_DUMMY_RTR_DEL_SUCCESS = 'OS_DUMMY_RTR_DEL_SUCCESS'
OS_DEL_SUCCESS = 'OS_DUMMY_RTR_DEL_SUCCESS'
DCNM_IN_NETWORK_DEL_FAIL = 'DCNM_IN_NETWORK_DEL_FAIL'
DCNM_IN_NETWORK_DEL_SUCCESS = 'DCNM_IN_NETWORK_DEL_SUCCESS'
DCNM_IN_PART_UPDDEL_FAIL = 'DCNM_IN_PART_UPDDEL_FAIL'
DCNM_IN_PART_UPDDEL_SUCCESS = 'DCNM_IN_PART_UPDDEL_SUCCESS'
DCNM_OUT_PART_DEL_FAIL = 'DCNM_OUT_PART_DEL_FAIL'
DCNM_OUT_PART_DEL_SUCCESS = 'DCNM_OUT_PART_DEL_SUCCESS'
DCNM_OUT_NETWORK_DEL_FAIL = 'DCNM_OUT_NETWORK_DEL_FAIL'
DCNM_OUT_NETWORK_DEL_SUCCESS = 'DCNM_OUT_NETWORK_DEL_SUCCESS'
DCNM_OUT_PART_UPDDEL_FAIL = 'DCNM_OUT_PART_UPDDEL_FAIL'
DCNM_OUT_PART_UPDDEL_SUCCESS = 'DCNM_OUT_PART_UPDDEL_SUCCESS'
DCNM_DELETE_SUCCESS = DCNM_IN_NETWORK_DEL_SUCCESS
INIT = 0
MAX_STATE = FABRIC_PREPARE_SUCCESS # 17
INIT_STATE = 100
OS_IN_NETWORK_STATE = INIT_STATE + 1
OS_OUT_NETWORK_STATE = OS_IN_NETWORK_STATE + 1
OS_DUMMY_RTR_STATE = OS_OUT_NETWORK_STATE + 1
OS_COMPL_STATE = OS_DUMMY_RTR_STATE
DCNM_IN_NETWORK_STATE = OS_DUMMY_RTR_STATE + 1
DCNM_IN_PART_UPDATE_STATE = DCNM_IN_NETWORK_STATE + 1
DCNM_OUT_PART_STATE = DCNM_IN_PART_UPDATE_STATE + 1
DCNM_OUT_NETWORK_STATE = DCNM_OUT_PART_STATE + 1
DCNM_OUT_PART_UPDATE_STATE = DCNM_OUT_NETWORK_STATE + 1
FABRIC_PREPARE_DONE_STATE = DCNM_OUT_PART_UPDATE_STATE + 1
# The below is for debug display
fw_state_fn_dict = {}
fw_state_fn_dict[INIT_STATE] = 'INIT_STATE'
fw_state_fn_dict[OS_IN_NETWORK_STATE] = 'OS_IN_NETWORK_CREATE_STATE'
fw_state_fn_dict[OS_OUT_NETWORK_STATE] = 'OS_OUT_NETWORK_CREATE_STATE'
fw_state_fn_dict[OS_DUMMY_RTR_STATE] = 'OS_DUMMY_RTR_CREATE_STATE'
fw_state_fn_dict[DCNM_IN_NETWORK_STATE] = 'DCNM_IN_NETWORK_CREATE_STATE'
fw_state_fn_dict[DCNM_IN_PART_UPDATE_STATE] = 'DCNM_IN_PART_UPDATE_STATE'
fw_state_fn_dict[DCNM_OUT_PART_STATE] = 'DCNM_OUT_PART_CREATE_STATE'
fw_state_fn_dict[DCNM_OUT_NETWORK_STATE] = 'DCNM_OUT_NETWORK_CREATE_STATE'
fw_state_fn_dict[DCNM_OUT_PART_UPDATE_STATE] = 'DCNM_OUT_PART_UPDATE_STATE'
fw_state_fn_dict[FABRIC_PREPARE_DONE_STATE] = 'FABRIC_PREPARE_DONE_STATE'
fw_state_fn_del_dict = {}
fw_state_fn_del_dict[INIT_STATE] = 'INIT_STATE'
fw_state_fn_del_dict[OS_IN_NETWORK_STATE] = 'OS_IN_NETWORK_DELETE_STATE'
fw_state_fn_del_dict[OS_OUT_NETWORK_STATE] = 'OS_OUT_NETWORK_DELETE_STATE'
fw_state_fn_del_dict[OS_DUMMY_RTR_STATE] = 'OS_DUMMY_RTR_DELETE_STATE'
fw_state_fn_del_dict[DCNM_IN_NETWORK_STATE] = 'DCNM_IN_NETWORK_DELETE_STATE'
fw_state_fn_del_dict[DCNM_IN_PART_UPDATE_STATE] = 'DCNM_IN_PART_UPDDEL_STATE'
fw_state_fn_del_dict[DCNM_OUT_PART_STATE] = 'DCNM_OUT_PART_DELETE_STATE'
fw_state_fn_del_dict[DCNM_OUT_NETWORK_STATE] = 'DCNM_OUT_NETWORK_DELETE_STATE'
fw_state_fn_del_dict[DCNM_OUT_PART_UPDATE_STATE] = 'DCNM_OUT_PART_UPDDEL_STATE'
fw_state_fn_del_dict[FABRIC_PREPARE_DONE_STATE] = 'FABRIC_PREPARE_DONE_STATE'
| [
2,
15069,
1946,
28289,
11998,
11,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
... | 2.127448 | 2,911 |
//中止条件
if i>=m or j>=n:
return;
if i==m-1 and j==n-1:
res.append(ans)
return
//递归调用
dfs(i+1,j,ans,res)
ans.append([i+1,j])
dfs(i,j+1,ans,res)
ans.pop()
ans.append([i,j+1])
ans=[]
res=[]
dfs(0,0,ans,res)
return len(res)
| [
1003,
40792,
29826,
95,
30266,
94,
20015,
114,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
611,
1312,
29,
28,
76,
393,
474,
29,
28,
77,
25,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.329897 | 291 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Openstack Client Test code.
'''
import unittest
import credentials
import osnetwork_client as neutronclient
class OSTest(unittest.TestCase):
'''
Test Openstack client handlers.
'''
def get_openstack_credentials(self, rcfile):
'''
Get OpenStack cloud credentials
'''
if rcfile is None:
return None
creds = credentials.Credentials(rcfile, None, None)
credict = creds.get_credentials()
return credict
def test_credentials(self):
'''
Test reading credentials
'''
credict = self.get_openstack_credentials("./openrc")
self.failUnless(credict['username'] == 'admin')
self.failUnless(credict['tenant_name'] == 'admin')
self.failUnless(credict['password'] == 'password')
self.failUnless(
credict['auth_url'] == 'http://172.22.191.163:5000/v2.0')
def test_ext_network_create(self):
'''
Test network creation and list.
'''
credict = self.get_openstack_credentials("./openrc")
neutron_handle = neutronclient.NeutronManage(credict)
self.failUnless(neutron_handle is not None)
# Create external network.
network = {
'network': {
'name': 'ext-net',
'admin_state_up': True,
'shared': True,
'router:external': True,
'provider:network_type': 'flat',
'provider:physical_network': 'physnet1'
}
}
subnet = {
'subnet': {
'name': 'subnet-ext',
'enable_dhcp': False,
'cidr': '172.22.191.160/25',
'allocation_pools': [{'start': '172.22.191.164',
'end': '172.22.191.169'}],
'dns_nameservers': ['171.70.168.183'],
'gateway_ip': '172.22.191.131',
'ip_version': 4
}
}
# Create network and subnet.
new_nw = neutron_handle.neutron_create_network(net_dict=network,
subnet_dict=subnet)
self.failUnless(new_nw is not None)
# Get network based on network name
new_nw = neutron_handle.neutron_get_networks(network_name="ext-net")
self.failUnless(new_nw[0]['router:external'] is True)
# Get subnet based on network id.
subnet_list = neutron_handle.neutron_get_subnets(
network_id=new_nw[0]['id'])
self.failUnless(subnet_list[0]['enable_dhcp'] is False)
self.failUnless(subnet_list[0]['cidr'] == '172.22.191.128/25')
# Delete network
newlist = neutron_handle.neutron_delete_networks(
network_name="ext-net")
self.failUnless(len(newlist) == 0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
11505,
25558,
20985,
6208,
2438,
13,
198,
7061,
6,
198,
198,
11748,
555,
715,
395,
198,
11748,
180... | 1.971545 | 1,476 |
#!/usr/bin/env python
import sys
from django.conf import settings
settings.configure(
DATABASES={
'default': {'ENGINE': 'django.db.backends.sqlite3'}
},
INSTALLED_APPS=[
'htpayway',
],
MIDDLEWARE_CLASSES=[],
)
if __name__ == '__main__':
from django.test.simple import run_tests
failures = run_tests(['htpayway'], verbosity=1)
sys.exit(failures)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
628,
198,
33692,
13,
11250,
495,
7,
198,
220,
220,
220,
360,
1404,
6242,
1921,
1546,
34758,
198,
220,
220,
220,
220,
220,
... | 2.268571 | 175 |
from django.shortcuts import render | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543
] | 4.375 | 8 |
from __future__ import absolute_import
import annoy
from ann_benchmarks.algorithms.base import BaseANN
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
10072,
198,
6738,
1529,
62,
26968,
14306,
13,
282,
7727,
907,
13,
8692,
1330,
7308,
22846,
628
] | 3.851852 | 27 |
from typing import List
import webbrowser
import sys
import requests
import bs4
if __name__ == "__main__":
main()
# https://www.google.com/search?q=dfs&tbm=nws
# rso > div:nth-child(2) > g-card > div > div > div.dbsr > a
# rso > div:nth-child(1) > g-card > div > div > div.dbsr > a
| [
6738,
19720,
1330,
7343,
198,
11748,
3992,
40259,
198,
11748,
25064,
198,
11748,
7007,
198,
11748,
275,
82,
19,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198,
198,
2,
3740,... | 2.377049 | 122 |
from .logic_parser import logicparse | [
6738,
764,
6404,
291,
62,
48610,
1330,
9156,
29572
] | 4 | 9 |
import random
while True:
print('1. Lancia il dado\n2. Esci')
user = int(input())
if user == 1:
num = random.randint(1,6)
print('Numero generato : ',num)
else:
break | [
11748,
4738,
201,
198,
201,
198,
4514,
6407,
25,
201,
198,
220,
220,
220,
3601,
10786,
16,
13,
21534,
544,
4229,
288,
4533,
59,
77,
17,
13,
8678,
979,
11537,
201,
198,
220,
220,
220,
2836,
796,
493,
7,
15414,
28955,
201,
198,
220,... | 2.009346 | 107 |
from fcfs import FCFS
| [
6738,
277,
66,
9501,
1330,
10029,
10652,
198
] | 2.75 | 8 |
import model
import config
import torch
import time
import tools
import json
import _pickle
import os
import copy
import numpy as np
import random
import math
import pprint
from torch.utils.data import BatchSampler, SequentialSampler, RandomSampler
from transformers import AutoTokenizer, AutoModel, AdamW
torch.backends.cudnn.benchmark = True
def find_clusters(unit_loc, index_clusters):
""" 找到某个位置在clusters中的哪个cluster
return:
cluster的下标,若未找到,返回-1
"""
if unit_loc in index_clusters:
return index_clusters[unit_loc]
return -1
if __name__ == "__main__":
train()
| [
11748,
2746,
198,
11748,
4566,
198,
11748,
28034,
198,
11748,
640,
198,
11748,
4899,
198,
11748,
33918,
198,
11748,
4808,
27729,
293,
198,
11748,
28686,
198,
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
1... | 2.428571 | 252 |
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
import numpy as np
from omegaconf import OmegaConf, DictConfig
from torch_utils import persistence
from torch_utils.ops import bias_act
from torch_utils import misc
from torch_utils.ops.fast_bilinear_mult import fast_manual_bilinear_mult_row
#----------------------------------------------------------------------------
@persistence.persistent_class
#----------------------------------------------------------------------------
@persistence.persistent_class
#----------------------------------------------------------------------------
@persistence.persistent_class
@persistence.persistent_class
@persistence.persistent_class
@persistence.persistent_class
@persistence.persistent_class
class PeriodicConstInput(nn.Module):
"""
It is like constant input, but periodic
"""
@persistence.persistent_class
class GridInput(nn.Module):
"""
For COCO-GAN, our input is grid-like and consists on 2 things:
- learnable coordinates
- high-frequency (up to 1 image for the whole period)
"""
@persistence.persistent_class
class ModulatedCoordFuser(nn.Module):
"""
CoordFuser which concatenates coordinates across dim=1 (we assume channel_first format)
"""
def forward(self, x: Tensor, w: Tensor=None, left_borders_idx: Tensor=None, dtype=None, memory_format=None) -> Tensor:
"""
Dims:
@arg x is [batch_size, in_channels, img_size, img_size]
@arg w is [batch_size, w_dim]
@return out is [batch_size, in_channels + fourier_dim + cips_dim, img_size, img_size]
"""
assert memory_format is torch.contiguous_format
if self.cfg.fallback:
return x
batch_size, in_channels, img_size = x.shape[:3]
if left_borders_idx is not None:
raw_coords = generate_shifted_coords(left_borders_idx, img_size, self.cfg.grid_size, self.cfg.w_coord_dist, device=x.device)
else:
raw_coords = generate_coords(batch_size, img_size, x.device) # [batch_size, coord_dim, img_size, img_size]
if self.cfg.no_fourier_fallback:
coord_embs = raw_coords
elif self.cfg.logarithmic:
if (not self.cfg.growth_schedule.enabled) \
or (self.coord_embs_cache is None) \
or (self.coord_embs_cache.shape != (batch_size, 2, self.basis.shape[0])) \
or (self.coord_embs_cache.device != x.device):
if self.cfg.growth_schedule.enabled:
growth_weights = self.growth_weights.repeat(4) # [0,1,2] => [0,1,2,0,1,2,...]
basis = self.basis * growth_weights.unsqueeze(1) # [dim, 2]
else:
basis = self.basis # [dim, 2]
bases = basis.unsqueeze(0).repeat(batch_size, 1, 1) # [batch_size, dim, 2]
raw_coord_embs = torch.einsum('bdc,bcxy->bdxy', bases, raw_coords) # [batch_size, dim, img_size, img_size]
if self.use_cosine:
self.coord_embs_cache = torch.cat([raw_coord_embs.sin(), raw_coord_embs.cos()], dim=1) # [batch_size, dim * 2, img_size, img_size]
else:
self.coord_embs_cache = raw_coord_embs.sin()
self.coord_embs_cache = self.coord_embs_cache.contiguous()
coord_embs = self.coord_embs_cache
else:
mod = self.affine(w) # [batch_size, W_size + b_size]
W = self.cfg.fourier_scale * mod[:, :self.W_size] # [batch_size, W_size]
W = W.view(batch_size, self.dim, self.cfg.coord_dim) # [batch_size, fourier_dim, coord_dim]
bias = mod[:, self.W_size:].view(batch_size, self.dim, 1, 1) # [batch_size, fourier_dim, 1]
raw_coord_embs = (torch.einsum('bdc,bcxy->bdxy', W, raw_coords) + bias) # [batch_size, coord_dim, img_size, img_size]
if self.use_cosine:
coord_embs = torch.cat([raw_coord_embs.sin(), raw_coord_embs.cos()], dim=1) # [batch_size, dim * 2, img_size, img_size]
else:
coord_embs = raw_coord_embs.sin()
coord_embs = coord_embs.to(dtype=dtype, memory_format=memory_format)
out = torch.cat([x, coord_embs], dim=1) # [batch_size, in_channels + fourier_dim, img_size, img_size]
if self.cfg.use_cips_embs > 0:
cips_embs = self.cips_embs.repeat([batch_size, 1, 1, 1])
cips_embs = cips_embs.to(dtype=dtype, memory_format=memory_format)
out = torch.cat([out, cips_embs], dim=1) # [batch_size, in_channels + fourier_dim + cips_emb, img_size, img_size]
return out
def fmm_modulate(
conv_weight: Tensor,
fmm_weights: nn.Module,
fmm_mod_type: str='mult',
demodulate: bool=False,
fmm_add_weight: float=1.0,
activation: Optional[str]=None) -> Tensor:
"""
Applies FMM fmm_weights to a given conv weight tensor
"""
batch_size, out_channels, in_channels, kh, kw = conv_weight.shape
assert fmm_weights.shape[1] % (in_channels + out_channels) == 0
rank = fmm_weights.shape[1] // (in_channels + out_channels)
lhs = fmm_weights[:, : rank * out_channels].view(batch_size, out_channels, rank)
rhs = fmm_weights[:, rank * out_channels :].view(batch_size, rank, in_channels)
modulation = lhs @ rhs # [batch_size, out_channels, in_channels]
modulation = modulation / np.sqrt(rank)
misc.assert_shape(modulation, [batch_size, out_channels, in_channels])
modulation = modulation.unsqueeze(3).unsqueeze(4) # [batch_size, out_channels, in_channels, 1, 1]
if activation == "tanh":
modulation = modulation.tanh()
elif activation in ['linear', None]:
pass
elif activation == 'sigmoid':
modulation = modulation.sigmoid() - 0.5
else:
raise NotImplementedError
if fmm_mod_type == 'mult':
out = conv_weight * (modulation + 1.0)
elif fmm_mod_type == 'add':
out = conv_weight + fmm_add_weight * modulation
else:
raise NotImplementedError
if demodulate:
out = out / out.norm(dim=[2, 3, 4], keepdim=True)
return out
def generate_coords(batch_size: int, img_size: int, device='cpu', align_corners: bool=False) -> Tensor:
"""
Generates the coordinates in [-1, 1] range for a square image
if size (img_size x img_size) in such a way that
- upper left corner: coords[0, 0] = (-1, -1)
- upper right corner: coords[img_size - 1, img_size - 1] = (1, 1)
"""
if align_corners:
row = torch.linspace(-1, 1, img_size, device=device).float() # [img_size]
else:
row = (torch.arange(0, img_size, device=device).float() / img_size) * 2 - 1 # [img_size]
x_coords = row.view(1, -1).repeat(img_size, 1) # [img_size, img_size]
y_coords = x_coords.t().flip(dims=(0,)) # [img_size, img_size]
coords = torch.stack([x_coords, y_coords], dim=2) # [img_size, img_size, 2]
coords = coords.view(-1, 2) # [img_size ** 2, 2]
coords = coords.t().view(1, 2, img_size, img_size).repeat(batch_size, 1, 1, 1) # [batch_size, 2, img_size, img_size]
return coords
def generate_logarithmic_basis(
resolution: int,
max_num_feats: int=np.float('inf'),
remove_lowest_freq: bool=False,
use_diagonal: bool=True) -> Tensor:
"""
Generates a directional logarithmic basis with the following directions:
- horizontal
- vertical
- main diagonal
- anti-diagonal
"""
max_num_feats_per_direction = np.ceil(np.log2(resolution)).astype(int)
bases = [
generate_horizontal_basis(max_num_feats_per_direction),
generate_vertical_basis(max_num_feats_per_direction),
]
if use_diagonal:
bases.extend([
generate_diag_main_basis(max_num_feats_per_direction),
generate_anti_diag_basis(max_num_feats_per_direction),
])
if remove_lowest_freq:
bases = [b[1:] for b in bases]
# If we do not fit into `max_num_feats`, then trying to remove the features in the order:
# 1) anti-diagonal 2) main-diagonal
while (max_num_feats_per_direction * len(bases) > max_num_feats) and (len(bases) > 2):
bases = bases[:-1]
basis = torch.cat(bases, dim=0)
# If we still do not fit, then let's remove each second feature,
# then each third, each forth and so on
# We cannot drop the whole horizontal or vertical direction since otherwise
# model won't be able to locate the position
# (unless the previously computed embeddings encode the position)
# while basis.shape[0] > max_num_feats:
# num_exceeding_feats = basis.shape[0] - max_num_feats
# basis = basis[::2]
assert basis.shape[0] <= max_num_feats, \
f"num_coord_feats > max_num_fixed_coord_feats: {basis.shape, max_num_feats}."
return basis
@torch.no_grad()
def generate_random_coords_shifts(batch_size: int, period_length: float=4.0, device: str='cpu') -> Tensor:
"""
Generates shift the coordinates 1 half-period to the right
Our half-period occupies the range [-1, 1]
To shift it to [1, 3] we need to add U[-1, 1] * 0.5 * period_length to each coordinates set
So, it just generates a random array of 0/2 values, like [0,0,2,2,0,2,...]
"""
horizontal_shifts = (torch.rand(batch_size, device=device) - 0.5) * 2.0 * (0.5 * period_length) # [batch_size]
vertical_shifts = torch.zeros(batch_size, device=device) # Do not shift vertical coordinates
shifts = torch.cat([
horizontal_shifts.unsqueeze(1),
vertical_shifts.unsqueeze(1),
], dim=1).unsqueeze(2).unsqueeze(3).contiguous() # [batch_size, 2, 1, 1]
return shifts
@torch.no_grad()
| [
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
1330,
309,
22854,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
299,
32152,
355,
... | 2.330012 | 4,215 |
from tools.tools import setup_data_loaders
from VariationalAutoEncoder.utils import evaluate_vae, train_vae
from VariationalAutoEncoder.VariationalAutoEncoder import VAE
import torch
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO
from pyro.optim import Adam
from tqdm import tqdm
from tools.training_monitoring import LossesMonitor
from classifier.ParametersLogisticRegressionVAE import ParameterLogisticRegressionVAE
from classifier.LogisticRegression_VAE import LogisticRegression
from tools.Visualisations import compare_images
from classifier.utils import train_classifier, evaluate_classifier
from classifier.utils import return_model_accurary
from tools.Visualisations import t_SNE, describe_statistic_per_label, show_confusion_matrix
SMOKE_TEST = False
TEST_FREQUENCY = 5
pyro.enable_validation(True)
pyro.distributions.enable_validation(False)
pyro.set_rng_seed(0)
pyro.clear_param_store()
LEARNING_RATE = 1.0e-3
USE_CUDA = False
NUM_EPOCHS = 1 if SMOKE_TEST else 31
train_loader, test_loader = setup_data_loaders(batch_size=256, use_cuda=USE_CUDA)
###################################################
# Variational Autoencoder definition and training #
###################################################
vae = VAE(use_cuda=USE_CUDA)
adam_args = {"lr": LEARNING_RATE}
optimizer = Adam(adam_args)
svi = SVI(vae.model, vae.guide, optimizer, loss=Trace_ELBO())
loss_monitor_vae = LossesMonitor()
# training loop
for epoch in range(NUM_EPOCHS):
total_epoch_loss_train = train_vae(svi, train_loader, use_cuda=USE_CUDA)
loss_monitor_vae.append_values(epoch, -total_epoch_loss_train, set="train")
print("[epoch %03d] average training loss: %.4f" % (epoch, total_epoch_loss_train))
if epoch % TEST_FREQUENCY == 0:
total_epoch_loss_test = evaluate_vae(svi, test_loader, use_cuda=USE_CUDA)
loss_monitor_vae.append_values(epoch, -total_epoch_loss_test, set="test")
print("[epoch %03d] average test loss: %.4f" % (epoch, total_epoch_loss_test))
if not SMOKE_TEST:
loss_monitor_vae.show_losses(type_loss="Evidence lower bound (ELBO)")
compare_images(vae, test_loader)
vae.encoder.freeze()
######################################
# Classifier definition and training #
######################################
classifier = LogisticRegression(number_hidden_units=vae.z_dim, num_classes=10, encoder=vae.encoder)
parameters_LR_VAE = ParameterLogisticRegressionVAE()
if SMOKE_TEST:
parameters_LR_VAE.num_epochs = 2
optimizer = torch.optim.SGD(classifier.parameters(), lr=parameters_LR_VAE.learning_rate)
loss_monitor_classifier = LossesMonitor()
for epoch in tqdm(range(parameters_LR_VAE.num_epochs)):
loss_train = train_classifier(classifier, optimizer, train_loader)
loss_monitor_classifier.append_values(epoch, loss_train, set="train")
print("[epoch %03d] average training loss: %.4f" % (epoch, loss_train))
if epoch % TEST_FREQUENCY == 0:
loss_test = evaluate_classifier(classifier, test_loader)
loss_monitor_classifier.append_values(epoch, loss_train, set="test")
print("[epoch %03d] average testing loss: %.4f" % (epoch, loss_test))
if not SMOKE_TEST:
loss_monitor_classifier.show_losses(type_loss="Cross entropy")
accuracy = return_model_accurary(classifier, test_loader)
print('Accuracy of the model on the 10000 test images: %d %%' % accuracy)
t_SNE(test_loader, vae)
describe_statistic_per_label(test_loader, vae)
show_confusion_matrix(train_loader,classifier)
| [
6738,
4899,
13,
31391,
1330,
9058,
62,
7890,
62,
2220,
364,
198,
198,
6738,
15965,
864,
27722,
27195,
12342,
13,
26791,
1330,
13446,
62,
33353,
11,
4512,
62,
33353,
198,
6738,
15965,
864,
27722,
27195,
12342,
13,
23907,
864,
27722,
2719... | 2.805732 | 1,256 |
import pytest
import torch
from torchstruct import TensorStruct
| [
11748,
12972,
9288,
201,
198,
11748,
28034,
201,
198,
201,
198,
6738,
28034,
7249,
1330,
309,
22854,
44909,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,... | 1.942308 | 52 |
from sklearn.naive_bayes import GaussianNB as GNB
from sklearn.svm import SVC
from preprocessor import preprocess
learn() | [
6738,
1341,
35720,
13,
2616,
425,
62,
24406,
274,
1330,
12822,
31562,
32819,
355,
15484,
33,
198,
6738,
1341,
35720,
13,
82,
14761,
1330,
311,
15922,
198,
6738,
662,
41341,
1330,
662,
14681,
198,
198,
35720,
3419
] | 3.297297 | 37 |
# Copyright 2015 NetApp, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
REST webservice utilities
"""
from oslo_log import log as logging
import requests
from six.moves import urllib
from netapp_lib.exceptions import NetAppLibException
from netapp_lib.i18n import _, _LE
LOG = logging.getLogger(__name__)
class WebServicesException(NetAppLibException):
"""Web Service Failure"""
message = _("A webservice exception occurred.")
class WebserviceClient(object):
"""Base client for NetApp Storage web services."""
def _validate_params(self, scheme, host, port):
"""Does some basic validation for web service params."""
if host is None or port is None or scheme is None:
raise ValueError('One of the required inputs from host, '
'port or scheme not found.')
if scheme not in ('http', 'https'):
raise ValueError('Invalid transport type.')
def _create_endpoint(self, scheme, host, port, service_path):
"""Creates end point url for the service."""
netloc = '%s:%s' % (host, port)
self._endpoint = urllib.parse.urlunparse((scheme, netloc, service_path,
None, None, None))
def _init_connection(self):
"""Do client specific set up for session and connection pooling."""
self.conn = requests.Session()
if self._username and self._password:
self.conn.auth = (self._username, self._password)
def _eval_response(self, response):
"""Evaluates response before passing result to invoker."""
pass
| [
2,
15069,
1853,
3433,
4677,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
26... | 2.866667 | 750 |
# coding: utf-8
"""
Idfy.Signature
Sign contracts, declarations, forms and other documents using digital signatures. ## Last update Last build date for this endpoint: 18.03.2019
"""
import pprint
import re
from typing import List, Dict
from datetime import datetime as datetime
class Link(object):
"""NOTE: This class is generated by Eivind.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'href': str,
'rel': str,
'content_type': str,
'error': str,
'resource_status': str
}
attribute_map = {
'href': 'href',
'rel': 'rel',
'content_type': 'contentType',
'error': 'error',
'resource_status': 'resourceStatus'
}
@property
def href(self):
"""Gets the href of this Link.
:return: The href of this Link.
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Link.
:param href: The href of this Link.
:type: str
"""
self._href = href
@property
def rel(self):
"""Gets the rel of this Link.
:return: The rel of this Link.
:rtype: str
"""
return self._rel
@rel.setter
def rel(self, rel):
"""Sets the rel of this Link.
:param rel: The rel of this Link.
:type: str
"""
self._rel = rel
@property
def content_type(self):
"""Gets the content_type of this Link.
:return: The content_type of this Link.
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this Link.
:param content_type: The content_type of this Link.
:type: str
"""
self._content_type = content_type
@property
def error(self):
"""Gets the error of this Link.
:return: The error of this Link.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this Link.
:param error: The error of this Link.
:type: str
"""
self._error = error
@property
def resource_status(self):
"""Gets the resource_status of this Link.
:return: The resource_status of this Link.
:rtype: str
"""
return self._resource_status
@resource_status.setter
def resource_status(self, resource_status):
"""Sets the resource_status of this Link.
:param resource_status: The resource_status of this Link.
:type: str
"""
self._resource_status = resource_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
5121,
24928,
13,
11712,
1300,
628,
220,
220,
220,
5865,
8592,
11,
31713,
11,
5107,
290,
584,
4963,
1262,
4875,
17239,
13,
220,
220,
22492,
4586,
4296,
220,
220,
... | 2.176296 | 2,025 |
# Copyright 2015 Christian Aichinger <Greek0@gmx.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import re
import tokenize
import pytest
from docker_cleanup import error, parser
class AttributeComparator:
"""Generic comparator compatible with arbitrary objects
AttributeComparator objects are constructed with a *cls* argument and
arbitrary keyword arguments and compare equal to all objects that are
subclasses of *cls* and where the keyword arguments match the object's
attributes::
class A:
pass
a = A()
a.x = 7
a.y = 8
assert a == AttributeComparator(A, x=7, y=8)
assert a != AttributeComparator(A, x=7, y=9)
assert a != AttributeComparator(dict, x=7, y=8)
# None can be passed as *cls* if the class doesn't matter.
assert a == AttributeComparator(None, x=7, y=8)
Constructor arguments:
cls: A class that other objects have to be subclasses of to compare
True. If no subclass check is desired, None can be passed.
**kwargs: Keyword arguments to be compared against other
object's dictionary.
"""
_sentinel = object()
#################################################
# Tests for ImportStatement
#################################################
no_imports = [
"outport sys;",
"DELETE IF True;",
"7;",
"'abc';",
]
@pytest.mark.parametrize("input", no_imports)
valid_imports = [
('IMPORT sys;', ['sys']),
('IMPORT os.path;', ['os.path']),
('IMPORT sys, os.path;', ['sys', 'os.path']),
]
@pytest.mark.parametrize("input,expected", valid_imports)
invalid_imports = [
"IMPORT",
"IMPORT sys",
"IMPORT \n;",
"IMPORT sys\n;",
"IMPORT ;",
"IMPORT .;",
"IMPORT ..;",
"IMPORT sys os;",
"IMPORT 7;",
"IMPORT 'abc';",
]
@pytest.mark.parametrize("input", invalid_imports)
#################################################
# Tests for ImportStatement
#################################################
no_deletes = [
"IMPORT sys;",
"FORCE nothing;",
"FORCE IMAGE IF True;",
"FORCE CONTAINER IF True;",
"IMAGE IF True;",
"CONTAINER IF True;",
"7;",
"'abc';",
]
@pytest.mark.parametrize("input", no_deletes)
# Should de-duplicate if more entries are added.
valid_deletes = [
('DELETE IMAGE IF True;', 'image', 'True'),
('DELETE IMAGE IF image.Attr;', 'image', 'image.Attr'),
('DELETE IMAGE IF image.Name == "A";', 'image', 'image.Name == "A"'),
('DELETE IMAGE IF image.Name == "A" and image.Attr;', 'image',
'image.Name == "A" and image.Attr'),
('DELETE IMAGE IF image.Name == "A" and\n image.Attr;', 'image',
'image.Name == "A" and\n image.Attr'),
]
valid_deletes += [(smt.replace('DELETE', 'KEEP'), etype, eexpr)
for smt, etype, eexpr in valid_deletes]
valid_deletes += [('FORCE ' + smt, etype, eexpr)
for smt, etype, eexpr in valid_deletes]
valid_deletes += [
(smt.replace('IMAGE', 'CONTAINER').replace('image', 'container'),
'container',
eexpr.replace('image', 'container'))
for smt, etype, eexpr in valid_deletes]
@pytest.mark.parametrize("input,expected_type,expected_expr", valid_deletes)
invalid_deletes = [
"DELETE;",
"DELETE IMAGE;",
"DELETE IMAGE IF;",
"DELETE IMAGE IF True",
"DELETE ME IF True;",
"DELETE IMAGE IF image.x == 7\nDELETE;",
"DELETE IMAGE IF image.x == 7\nFORCE DELETE;",
"DELETE IMAGE IF image.x == 7\nIMPORT sys;",
"DELETE IMAGE IF image.x == 'ab\ncd';",
'DELETE IMAGE IF image.x == "ab\ncd";',
"DELETE IMAGE IF image.x == §;",
]
invalid_deletes += [smt.replace('DELETE', 'KEEP') for smt in invalid_deletes]
invalid_deletes += ['FORCE ' + smt for smt in invalid_deletes]
invalid_deletes += [
smt.replace('IMAGE', 'CONTAINER').replace('image', 'container')
for smt in invalid_deletes]
@pytest.mark.parametrize("input", invalid_deletes)
#################################################
# Tests for Parser
#################################################
valid_parser_inputs = [
("""""", []),
(""" \n """, []),
("""# Comment""", []),
("""# Comment\n\n\n # Another comment""", []),
("""
IMPORT sys;
FORCE DELETE IMAGE IF image.Dangling;
FORCE DELETE CONTAINER IF not container.Running;
""",
[AttributeComparator(parser.ImportStatement),
AttributeComparator(parser.ExpressionStatement, force=True),
AttributeComparator(parser.ExpressionStatement, force=True),
]),
("""
DELETE IMAGE IF (None);
""",
[AttributeComparator(parser.ExpressionStatement, force=False)]),
]
@pytest.mark.parametrize("input,statements", valid_parser_inputs)
invalid_parser_inputs = [
"§",
"'ab\ncd'",
'"ab\ncd"',
]
@pytest.mark.parametrize("input", invalid_parser_inputs)
parser_input_exceptions = [
("DELETE IMAGE IF (;", '(', (1, 16)),
("DELETE IMAGE IF );", ')', (1, 16)),
("DELETE IMAGE IF [;", '[', (1, 16)),
("DELETE IMAGE IF ];", ']', (1, 16)),
("DELETE IMAGE IF {;", '{', (1, 16)),
("DELETE IMAGE IF };", '}', (1, 16)),
("DELETE IMAGE IF ''';", "'", (1, 16)),
('DELETE IMAGE IF """;', '"', (1, 16)),
("DELETE IMAGE IF (();", '(', (1, 16)),
("DELETE IMAGE IF )();", ')', (1, 16)),
("DELETE IMAGE IF ()(;", '(', (1, 18)),
("DELETE IMAGE IF ());", ')', (1, 18)),
("DELETE IMAGE;", ';', (1, 12)),
]
@pytest.mark.parametrize("input,char,pos", parser_input_exceptions)
| [
2,
15069,
1853,
4302,
317,
488,
3889,
1279,
44059,
15,
31,
70,
36802,
13,
3262,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.480162 | 2,470 |
# Na identação do python utilizar apenas 4 espaços como convenção
print("Olá meu caro seja bem-vindo ao seu programa PYTHON\n")
num_1=int(input("Digite um número interiro :"))
print("Vamos trabalhar com soma agora ")
num_2=int(input("\nDigite outro número interiro :"))
total=num_1+num_2
if total <=5:
print(f" A soma é {total} e esse total é menor ou igual a 5 ")
elif total <= 10 :
print(f" A soma é {total} e esse total é menor ou igual a 10 ")
elif total <=50:
print(f" \nA soma é {total} e esse total é menor ou igual a 50 ")
else:
print(f" \nA soma é {total} e esse total é maior que 50 ")
print("\n FIM do Programa")
| [
2,
11013,
1852,
64,
16175,
28749,
466,
21015,
7736,
528,
283,
2471,
268,
292,
604,
1658,
8957,
16175,
418,
401,
78,
7292,
16175,
28749,
198,
4798,
7203,
30098,
6557,
502,
84,
1097,
78,
384,
6592,
307,
76,
12,
50172,
78,
257,
78,
384... | 2.304659 | 279 |
# !/usr/bin/env python3
# Author: C.K
# Email: theck17@163.com
# DateTime:2021-01-21 21:30:34
# Description:
import os, sys
if __name__ == "__main__":
pass
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
6434,
25,
327,
13,
42,
198,
2,
9570,
25,
262,
694,
1558,
31,
24136,
13,
785,
198,
2,
7536,
7575,
25,
1238,
2481,
12,
486,
12,
2481,
2310,
25,
1270,
25,
2682,
198,
2,
... | 2.246575 | 73 |
# -*- coding:utf-8 -*-
# -*- author:ZuoJianHao -*-
# -*- coding:utf-8 -*-
# -*- author:ZuoJianHao -*-
import tensorflow as tf
import cv2
import numpy as np
weights = "./yolov3_tiny_widerface_boxes_int8.tflite"
image_path = "./416_416.jpg"
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = original_image[np.newaxis, ...].astype(np.float32)
interpreter = tf.lite.Interpreter(model_path=weights)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred_bbox = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
print(pred_bbox) | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
532,
9,
12,
1772,
25,
57,
20895,
41,
666,
39,
5488,
532,
9,
12,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
532,
9,
12,
1772,
25,
57,
2... | 2.548387 | 341 |
# -*- coding: utf-8 -*-
import copy
# Build the dictionaries to transliterate Serbian cyrillic to latin and vice versa.
# This dictionary is to transliterate from cyrillic to latin.
SR_CYR_TO_LAT_DICT = {
u'А': u'A', u'а': u'a',
u'Б': u'B', u'б': u'b',
u'В': u'V', u'в': u'v',
u'Г': u'G', u'г': u'g',
u'Д': u'D', u'д': u'd',
u'Ђ': u'Đ', u'ђ': u'đ',
u'Е': u'E', u'е': u'e',
u'Ж': u'Ž', u'ж': u'ž',
u'З': u'Z', u'з': u'z',
u'И': u'I', u'и': u'i',
u'Ј': u'J', u'ј': u'j',
u'К': u'K', u'к': u'k',
u'Л': u'L', u'л': u'l',
u'Љ': u'Lj', u'љ': u'lj',
u'М': u'M', u'м': u'm',
u'Н': u'N', u'н': u'n',
u'Њ': u'Nj', u'њ': u'nj',
u'О': u'O', u'о': u'o',
u'П': u'P', u'п': u'p',
u'Р': u'R', u'р': u'r',
u'С': u'S', u'с': u's',
u'Т': u'T', u'т': u't',
u'Ћ': u'Ć', u'ћ': u'ć',
u'У': u'U', u'у': u'u',
u'Ф': u'F', u'ф': u'f',
u'Х': u'H', u'х': u'h',
u'Ц': u'C', u'ц': u'c',
u'Ч': u'Č', u'ч': u'č',
u'Џ': u'Dž', u'џ': u'dž',
u'Ш': u'Š', u'ш': u'š',
}
# This dictionary is to transliterate from Serbian latin to cyrillic.
# Let's build it by simply swapping keys and values of previous dictionary.
SR_LAT_TO_CYR_DICT = {y: x for x, y in iter(SR_CYR_TO_LAT_DICT.items())}
# Build the dictionaries to transliterate Montenegrin cyrillic to latin and vice versa.
# Montenegrin Latin is based on Serbo-Croatian Latin, with the addition of the two letters Ś and Ź,
# to replace the digraphs SJ and ZJ.
# These parallel the two letters of the Montenegrin Cyrillic alphabet not found in Serbian, С́ and З́.
# These, respectively, could also be represented in the original alphabets as šj and žj, and шj and жj.
# Source: https://en.wikipedia.org/wiki/Montenegrin_alphabet#Latin_alphabet
# Also see: http://news.bbc.co.uk/2/hi/8520466.stm
ME_CYR_TO_LAT_DICT = copy.deepcopy(SR_CYR_TO_LAT_DICT)
ME_CYR_TO_LAT_DICT.update({
u'С́': u'Ś', u'с́': u'ś', # Montenegrin
u'З́': u'Ź', u'з́': u'ź' # Montenegrin
})
# This dictionary is to transliterate from Montenegrin latin to cyrillic.
ME_LAT_TO_CYR_DICT = {y: x for x, y in iter(ME_CYR_TO_LAT_DICT.items())}
# Build the dictionaries to transliterate Macedonian cyrillic to latin and vice versa.
MK_CYR_TO_LAT_DICT = copy.deepcopy(SR_CYR_TO_LAT_DICT)
# Differences with Serbian:
# 1) Between Ze (З з) and I (И и) is the letter Dze (Ѕ ѕ), which looks like the Latin letter S and represents /d͡z/.
MK_CYR_TO_LAT_DICT[u'Ѕ'] = u'Dz'
MK_CYR_TO_LAT_DICT[u'ѕ'] = u'dz'
# 2) Dje (Ђ ђ) is replaced by Gje (Ѓ ѓ), which represents /ɟ/ (voiced palatal stop).
# In some dialects, it represents /d͡ʑ/ instead, like Dje
# It is written ⟨Ǵ ǵ⟩ in the corresponding Macedonian Latin alphabet.
del MK_CYR_TO_LAT_DICT[u'Ђ']
del MK_CYR_TO_LAT_DICT[u'ђ']
MK_CYR_TO_LAT_DICT[u'Ѓ'] = u'Ǵ'
MK_CYR_TO_LAT_DICT[u'ѓ'] = u'ǵ'
# 3) Tshe (Ћ ћ) is replaced by Kje (Ќ ќ), which represents /c/ (voiceless palatal stop).
# In some dialects, it represents /t͡ɕ/ instead, like Tshe.
# It is written ⟨Ḱ ḱ⟩ in the corresponding Macedonian Latin alphabet.
del MK_CYR_TO_LAT_DICT[u'Ћ']
del MK_CYR_TO_LAT_DICT[u'ћ']
MK_CYR_TO_LAT_DICT[u'Ќ'] = u'Ḱ'
MK_CYR_TO_LAT_DICT[u'ќ'] = u'ḱ'
# This dictionary is to transliterate from Macedonian latin to cyrillic.
MK_LAT_TO_CYR_DICT = {y: x for x, y in iter(MK_CYR_TO_LAT_DICT.items())}
# This dictionary is to transliterate from Russian cyrillic to latin.
RU_CYR_TO_LAT_DICT = {
u"А": u"A", u"а": u"a",
u"Б": u"B", u"б": u"b",
u"В": u"V", u"в": u"v",
u"Г": u"G", u"г": u"g",
u"Д": u"D", u"д": u"d",
u"Е": u"E", u"е": u"e",
u"Ё": u"YO", u"ё": u"yo",
u"Ж": u"ZH", u"ж": u"zh",
u"З": u"Z", u"з": u"z",
u"И": u"I", u"и": u"i",
u"Й": u"J", u"й": u"j",
u"К": u"K", u"к": u"k",
u"Л": u"L", u"л": u"l",
u"М": u"M", u"м": u"m",
u"Н": u"N", u"н": u"n",
u"О": u"O", u"о": u"o",
u"П": u"P", u"п": u"p",
u"Р": u"R", u"р": u"r",
u"С": u"S", u"с": u"s",
u"Т": u"T", u"т": u"t",
u"У": u"U", u"у": u"u",
u"Ф": u"F", u"ф": u"f",
u"Х": u"H", u"х": u"h",
u"Ц": u"C", u"ц": u"c",
u"Ч": u"CH", u"ч": u"ch",
u"Ш": u"SH", u"ш": u"sh",
u"Щ": u"SZ", u"щ": u"sz",
u"Ъ": u"#", u"ъ": u"#",
u"Ы": u"Y", u"ы": u"y",
u"Ь": u"'", u"ь": u"'",
u"Э": u"EH", u"э": u"eh",
u"Ю": u"JU", u"ю": u"ju",
u"Я": u"JA", u"я": u"ja",
}
# This dictionary is to transliterate from Russian latin to cyrillic.
RU_LAT_TO_CYR_DICT = {y: x for x, y in RU_CYR_TO_LAT_DICT.items()}
RU_LAT_TO_CYR_DICT.update({
u"X": u"Х", u"x": u"х",
u"W": u"Щ", u"w": u"щ",
u"'": u"ь",
u"#": u"ъ",
u"JE": u"ЖЕ", u"Je": u"Же", u"je": u"же",
u"YU": u"Ю", u"Yu": u"Ю", u"yu": u"ю",
u"YA": u"Я", u"Ya": u"Я", u"ya": u"я",
u"iy": u"ый", # dobriy => добрый
})
# Transliterate from Tajik cyrillic to latin
TJ_CYR_TO_LAT_DICT = copy.deepcopy(RU_CYR_TO_LAT_DICT)
# Change Mapping according to ISO 9 (1995)
TJ_CYR_TO_LAT_DICT[u"Э"] = u"È"
TJ_CYR_TO_LAT_DICT[u"э"] = u"è"
TJ_CYR_TO_LAT_DICT[u"ъ"] = u"’"
TJ_CYR_TO_LAT_DICT[u"Ч"] = u"Č"
TJ_CYR_TO_LAT_DICT[u"ч"] = u"č"
TJ_CYR_TO_LAT_DICT[u"Ж"] = u"Ž"
TJ_CYR_TO_LAT_DICT[u"ж"] = u"ž"
TJ_CYR_TO_LAT_DICT[u"Ё"] = u"Ë"
TJ_CYR_TO_LAT_DICT[u"ё"] = u"ë"
TJ_CYR_TO_LAT_DICT[u"Ш"] = u"Š"
TJ_CYR_TO_LAT_DICT[u"ш"] = u"š"
TJ_CYR_TO_LAT_DICT[u"Ю"] = u"Û"
TJ_CYR_TO_LAT_DICT[u"ю"] = u"û"
TJ_CYR_TO_LAT_DICT[u"Я"] = u"Â"
TJ_CYR_TO_LAT_DICT[u"я"] = u"â"
# delete letters not used
del TJ_CYR_TO_LAT_DICT[u"Ц"]
del TJ_CYR_TO_LAT_DICT[u"ц"]
del TJ_CYR_TO_LAT_DICT[u"Щ"]
del TJ_CYR_TO_LAT_DICT[u"щ"]
del TJ_CYR_TO_LAT_DICT[u"Ы"]
del TJ_CYR_TO_LAT_DICT[u"ы"]
# update the dict for the additional letters in the tajik cyrillic alphabet ( Ғ, Ӣ, Қ, Ӯ, Ҳ, Ҷ )
# see https://en.wikipedia.org/wiki/Tajik_alphabet#Cyrillic
TJ_CYR_TO_LAT_DICT.update({
u"Ғ": u"Ǧ", u"ғ": u"ǧ",
u"Ӣ": u"Ī", u"ӣ": u"ī",
u"Қ": u"Q", u"қ": u"q",
u"Ӯ": u"Ū", u"ӯ": u"ū",
u"Ҳ": u"Ḩ", u"ҳ": u"ḩ",
u"Ҷ": u"Ç", u"ҷ": u"ç"
})
# transliterate from latin tajik to cyrillic
TJ_LAT_TO_CYR_DICT = {y: x for x, y in iter(TJ_CYR_TO_LAT_DICT.items())}
# Transliterate from Bulgarian cyrillic to latin
BG_CYR_TO_LAT_DICT = copy.deepcopy(RU_CYR_TO_LAT_DICT)
# There are a couple of letters that don't exist in Bulgarian:
del BG_CYR_TO_LAT_DICT[u"Ё"]
del BG_CYR_TO_LAT_DICT[u"ё"]
del BG_CYR_TO_LAT_DICT[u"Ы"]
del BG_CYR_TO_LAT_DICT[u"ы"]
del BG_CYR_TO_LAT_DICT[u"Э"]
del BG_CYR_TO_LAT_DICT[u"э"]
# Some letters that are pronounced diferently
BG_CYR_TO_LAT_DICT[u"Й"] = u"Y"
BG_CYR_TO_LAT_DICT[u"й"] = u"y"
BG_CYR_TO_LAT_DICT[u"Ц"] = u"TS"
BG_CYR_TO_LAT_DICT[u"ц"] = u"ts"
BG_CYR_TO_LAT_DICT[u"Щ"] = u"SHT"
BG_CYR_TO_LAT_DICT[u"щ"] = u"sht"
BG_CYR_TO_LAT_DICT[u"Ю"] = u"YU"
BG_CYR_TO_LAT_DICT[u"ю"] = u"yu"
BG_CYR_TO_LAT_DICT[u"Я"] = u"YA"
BG_CYR_TO_LAT_DICT[u"я"] = u"ya"
# The following letters use the pre-2012 "Andreichin" system for lettering,
# because in the newest "Ivanov" system "a" and "y" translate to two Bulgarian
# letters and choosing to which one depends on the word and text context
# https://en.wikipedia.org/wiki/Romanization_of_Bulgarian
BG_CYR_TO_LAT_DICT[u"Ъ"] = u"Ă"
BG_CYR_TO_LAT_DICT[u"ъ"] = u"ă"
BG_CYR_TO_LAT_DICT[u"Ь"] = u"J"
BG_CYR_TO_LAT_DICT[u"ь"] = u"j"
# Transliterate from latin Bulgarian to cyrillic.
BG_LAT_TO_CYR_DICT = {y: x for x, y in iter(BG_CYR_TO_LAT_DICT.items())}
BG_LAT_TO_CYR_DICT.update({
u"ZH": u"Ж", u"Zh": u"Ж", u"zh": u"ж",
u"TS": u"Ц", u"Ts": u"Ц", u"ts": u"ц",
u"CH": u"Ч", u"Ch": u"Ч", u"ch": u"ч",
u"SH": u"Ш", u"Sh": u"Ш", u"sh": u"ш",
u"SHT": u"Щ", u"Sht": u"Щ", u"sht": u"щ",
u"YU": u"Ю", u"Yu": u"Ю", u"yu": u"ю",
u"YA": u"Я", u"Ya": u"Я", u"ya": u"я",
})
# Transliterate from Ukrainian
UA_CYR_TO_LAT_DICT = copy.deepcopy(RU_CYR_TO_LAT_DICT)
# Change mapping to match with Scientific Ukrainian
UA_CYR_TO_LAT_DICT[u"Г"] = u"H"
UA_CYR_TO_LAT_DICT[u"г"] = u"h"
UA_CYR_TO_LAT_DICT[u"Ж"] = u"Ž"
UA_CYR_TO_LAT_DICT[u"ж"] = u"ž"
UA_CYR_TO_LAT_DICT[u"И"] = u"Y"
UA_CYR_TO_LAT_DICT[u"и"] = u"y"
UA_CYR_TO_LAT_DICT[u"Х"] = u"X"
UA_CYR_TO_LAT_DICT[u"х"] = u"x"
UA_CYR_TO_LAT_DICT[u"Ч"] = u"Č"
UA_CYR_TO_LAT_DICT[u"ч"] = u"č"
UA_CYR_TO_LAT_DICT[u"Ш"] = u"Š"
UA_CYR_TO_LAT_DICT[u"ш"] = u"š"
UA_CYR_TO_LAT_DICT[u"Щ"] = u"Šč"
UA_CYR_TO_LAT_DICT[u"щ"] = u"šč"
UA_CYR_TO_LAT_DICT[u"Ю"] = u"Ju"
UA_CYR_TO_LAT_DICT[u"Я"] = u"Ja"
# Delete unused letters
del UA_CYR_TO_LAT_DICT[u"Ё"]
del UA_CYR_TO_LAT_DICT[u"ё"]
del UA_CYR_TO_LAT_DICT[u"Ъ"]
del UA_CYR_TO_LAT_DICT[u"ъ"]
del UA_CYR_TO_LAT_DICT[u"Ы"]
del UA_CYR_TO_LAT_DICT[u"ы"]
del UA_CYR_TO_LAT_DICT[u"Э"]
del UA_CYR_TO_LAT_DICT[u"э"]
# Update for Ukrainian letters
UA_CYR_TO_LAT_DICT.update({
u"Ґ": u"G", u"ґ": u"g",
u"Є": u"Je", u"є": u"je",
u"І": u"I", u"і": u"i",
u"Ї": u"Ï", u"ї": u"ï"
})
# Latin to Cyrillic
UA_LAT_TO_CYR_DICT = {y: x for x, y in iter(UA_CYR_TO_LAT_DICT.items())}
# Bundle up all the dictionaries in a lookup dictionary
TRANSLIT_DICT = {
'sr': { # Serbia
'tolatin': SR_CYR_TO_LAT_DICT,
'tocyrillic': SR_LAT_TO_CYR_DICT
},
'me': { # Montenegro
'tolatin': ME_CYR_TO_LAT_DICT,
'tocyrillic': ME_LAT_TO_CYR_DICT
},
'mk': { # Macedonia
'tolatin': MK_CYR_TO_LAT_DICT,
'tocyrillic': MK_LAT_TO_CYR_DICT
},
'ru': { # Russian
'tolatin': RU_CYR_TO_LAT_DICT,
'tocyrillic': RU_LAT_TO_CYR_DICT
},
'tj': { # Tajik
'tolatin': TJ_CYR_TO_LAT_DICT,
'tocyrillic': TJ_LAT_TO_CYR_DICT
},
'bg': { # Bulgarian
'tolatin': BG_CYR_TO_LAT_DICT,
'tocyrillic': BG_LAT_TO_CYR_DICT
},
'ua': { # Ukrainian
'tolatin': UA_CYR_TO_LAT_DICT,
'tocyrillic': UA_LAT_TO_CYR_DICT
}
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4866,
198,
2,
10934,
262,
48589,
3166,
284,
4779,
2676,
378,
39619,
269,
2417,
359,
291,
284,
3042,
259,
290,
7927,
25470,
13,
198,
198,
2,
770,
22155,
318,
284... | 1.677592 | 5,797 |
#####################################################################################
#####################################################################################
#### --------------------------------------------------------------------------- ####
#### ASSIGNING 1992 NLCD CATEGORIES to 2010 TRACT GEOGRAPHIES ####
#### --------------------------------------------------------------------------- ####
#####################################################################################
#####################################################################################
### RUN in ArcGIS PRO 2.8.1
##################################
## PREPARE WORKSPACE ##
##################################
## Import packages
import arcpy # need ArcGIS license
from arcpy import env
import os, zipfile, urllib # for downloading, unzipping files
from urllib import request
## Set workspace
base = "D:/HHUUD10"
env.workspace = base
## Set preferences
env.outputCoordinateSystem = arcpy.SpatialReference("USA Contiguous Albers Equal Area Conic") # coordinate system in use
env.extent = "MAXOF" # for raster operations
env.qualifiedFieldNames = False # good for joins
# Create temp folder
arcpy.management.CreateFolder(base, "temp")
path = os.path.join(base, "temp") # create path
# Establish Map
aprx = arcpy.mp.ArcGISProject("CURRENT")
# Create GDB
arcpy.management.CreateFileGDB(os.path.join(base, "gis_files"), "nlcd92.gdb")
############################################################
## DOWNLOAD/UNZIP 1992 NLCD CATEGORIES ##
############################################################
## Create list of URLs--available via the USGS (https://water.usgs.gov/GIS/metadata/usgswrd/XML/nlcde92.xml#stdorder)
urls = ["https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_1.zip",
"https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_2.zip",
"https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_3.zip",
"https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_4.zip"]
## List of output names
outputs = ["nlcd92_1", "nlcd92_2", "nlcd92_3", "nlcd92_4"]
## Run Loop downloading and unzipping raster files
for i, j in zip(urls, outputs):
zip_path, _ = urllib.request.urlretrieve(i, j) # retrieve files from URLs
with zipfile.ZipFile(zip_path, "r") as f:
f.extractall(path) # unzip files to temp folder created above
## NOTE: The above block of code can sometimes spit back errors. Re-running it from the top a second time worked for us.
############################################################
## RECLASSIFY & CONDUCT ZONAL HISTOGRAM ##
############################################################
## Change workspace
env.workspace = path
## Grab rasters in list
rasters = ["nlcde92_1/nlcde1.tif", "nlcde92_2/nlcde2.tif", "nlcde92_3/nlcde3.tif", "nlcde92_4/nlcde4.tif"]
outfolder = os.path.join(base, "gis_files", "nlcd92.gdb")
## Reclassify into 3-class Rasters (simplifies following step)
for r in rasters:
output = os.path.join(outfolder, "nlcd" + r[15:16] + "_recl") # make name (e.g.) "nlcd1_recl"
arcpy.gp.Reclassify_sa(r, "value", '11 12 1;21 22 2;23 3; 25 84 4;85 2;86 99 4', output, "NODATA") # for codes, see below:
## 1992 NLCD Codes Specified in Reclassify Step (source: https://water.usgs.gov/GIS/metadata/usgswrd/XML/nlcde92.xml#stdorder):
## ---- Water (1) ---- ##
# 11 - Open Water
# 12 - Perennial Ice/Snow
## ---- "Developed" (2) ---- ##
# 21 - Low Intensity Residential
# 22 - High Intensity Residential
# 23 - Commercial/Industrial/Transportation
# 85 - Urban/Recreational Grasses
## ---- Other (3) ---- ##
# All other numbers thru 99
## Prepare Zonal Histogram
env.workspace = outfolder # change workspace to gdb just created
rasters = arcpy.ListRasters() # rasters created above
t10 = os.path.join(base, "gis_files/database1.gdb/t10") # grab t10 polygon from database1.gdb
## Do Zonal Histogram (output as tables in tables folder)
for r in rasters:
output = r[:5] + "_zh" # outputs: rast1_zh, rast2_zh, etc.
arcpy.sa.ZonalHistogram(t10, "GISJOIN", r, output, "") # zonal histogram
## DELETE TEMP FOLDER
arcpy.management.Delete(path)
## Clear shapefiles from map display
for m in aprx.listMaps():
for lyr in m.listLayers("nlcd*"):
m.removeLayer(lyr)
## Clear tables from map display
for m in aprx.listMaps():
for tab in m.listTables("nlcd*"):
m.removeTable(tab)
| [
201,
198,
29113,
29113,
14468,
4242,
2,
201,
198,
29113,
29113,
14468,
4242,
2,
201,
198,
4242,
16529,
32284,
1303,
21017,
201,
198,
4242,
220,
220,
220,
220,
220,
220,
220,
220,
220,
24994,
3528,
15871,
9768,
399,
5639,
35,
327,
6158... | 2.767148 | 1,662 |
import os
import shlex
import subprocess
from typing import List
import shutil
from jinja2 import Environment, FileSystemLoader
from pesto.cli import PROCESSING_FACTORY_PATH
from pesto.cli.core.build_config import BuildConfig
from pesto.cli.core.utils import PESTO_LOG
| [
11748,
28686,
198,
11748,
427,
2588,
198,
11748,
850,
14681,
198,
6738,
19720,
1330,
7343,
198,
11748,
4423,
346,
198,
198,
6738,
474,
259,
6592,
17,
1330,
9344,
11,
9220,
11964,
17401,
198,
198,
6738,
28064,
78,
13,
44506,
1330,
41755,... | 3.358025 | 81 |
import unittest
from unittest.mock import MagicMock
import os
import cv2
import numpy as np
from subt.octomap import Octomap, data2maplevel, frontiers
# vim: expandtab sw=4 ts=4
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
198,
11748,
28686,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
13284,
13,
38441,
296,
499,
1330,
2556,
296,
499,... | 2.815385 | 65 |
from ..waterframe import WaterFrame
def read_df(df, index_time, index_depth=None, metadata={}, vocabulary={}):
"""
Get a WaterFrame from a pandas DataFrame.
Parameters
----------
df: Pandas.DataFrame
index_time: str
Column with the TIME index
index_depth: str
Column with the DEPTH index. If index_depth is None, DEPTH = 0
metadata: dict
vocabulary: dict
Returns
-------
wf: mooda.WaterFrame
"""
_df = df.copy()
_df.reset_index(inplace=True)
wf = WaterFrame()
wf.metadata = metadata
wf.vocabulary = vocabulary
if index_depth is None:
_df['DEPTH'] = 0
_df.rename(columns={index_time: 'TIME'}, inplace=True)
# Add QC columns
keys = _df.keys()
for key in keys:
if key.endswith('_QC'):
continue
if f'{key}_QC' in keys:
continue
else:
_df[f'{key}_QC'] = 0
# Reindex
_df.set_index(['DEPTH', 'TIME'], drop=True, inplace=True)
wf.data = _df
return wf
| [
6738,
11485,
7050,
14535,
1330,
5638,
19778,
198,
198,
4299,
1100,
62,
7568,
7,
7568,
11,
6376,
62,
2435,
11,
6376,
62,
18053,
28,
14202,
11,
20150,
34758,
5512,
25818,
34758,
92,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
... | 2.147117 | 503 |
#!/usr/bin/env python
# pylint: disable=too-complex,no-name-in-module,import-error,relative-import,missing-returns-doc,too-many-instance-attributes,too-many-branches,too-many-statements,too-many-arguments,too-many-locals,no-member,invalid-name
"""Appetite Main.
Parent appetite class
"""
import os
import sys
import traceback
import csv
import shutil
import tarfile
import json
import time
from distutils.dir_util import copy_tree
from multiprocessing import Pool
import argparse
import modules.logger as Logger
import modules.consts as Consts
import modules.conn_manager as ConnManager
import modules.app_versioning as AppVersioning
import modules.helpers as Helpers
import modules.appetite_args as AppetiteArgs
from modules.appetite_core import AppetiteHosts, AppetiteHost
from modules.repo_manager import RepoManager
from modules.deployment_methods import DeploymentMethodsManager
def parse_args():
"""Parse args from the command line
:return: args
"""
parser = argparse.ArgumentParser(
description='Deploy apps based on the manifest')
for arg in AppetiteArgs.ARGS_PARAMS:
parser.add_argument(*arg['args'], **arg['kvargs'])
return parser.parse_args()
class Appetite(object):
"""Main appetite class"""
# Stores and manages host and their applications
appetite_hosts = AppetiteHosts()
run_check = Helpers.RunSingleInstance()
@property
def populate_apps_to_hosts(self):
"""Parses the manifest and adds apps to hosts
:return: None
"""
Helpers.check_file(self.manifest_path)
with open(self.manifest_path, 'rU') as csvfile:
mreader = csv.reader(csvfile, delimiter=',', quotechar='"')
first_row = True
# Go though each app
for row in mreader:
# Remove header if it exists
if first_row:
# Defines column headers in manifest
column_headers = {col_name: -1
for col_name in
Consts.DEFAULT_COLUMN_HEADER}
# Get indexes for headers from the first row
num_columns = len(row)
for k in column_headers:
value_index = next((index for index in range(0, num_columns)
if row[index].lower() == k), -1)
if value_index < 0:
Logger.errorout("Manifest header is missing", header=k)
column_headers[k] = value_index
first_row = False
continue
if len(row) > 1:
row_values = Helpers.create_obj({
"commit_id": row[column_headers['commitid']],
"app_clean": self.deployment_manager.name_filter.sub("", row[
column_headers['application']]),
"app": row[column_headers['application']],
"deployment": row[column_headers['deploymentmethod']],
"white_list": row[column_headers['whitelist']].split(','),
"black_list": row[column_headers['blacklist']].split(',')
})
app_folder = os.path.join(self.apps_folder, row_values.app)
if self.args.build_test_apps:
# for testing - create test folders for apps
if not os.path.exists(app_folder):
Helpers.create_path(os.path.join(app_folder, "folder"), True)
app_test_file = "%s/%s.txt" % (app_folder, row_values.app_clean)
with open(app_test_file, 'wb') as touch:
touch.write("")
# Go through each host and see
# if the app is needed for the host
for host in self.appetite_hosts:
self.add_to_host(host, row_values)
self.bootstrap_firstrun_hosts(host, row_values)
if self.args.new_host_brakes and next((True for host in self.appetite_hosts if host.bootstrap), False):
self.args.num_connections = 1
if self.appetite_hosts.is_empty():
Logger.errorout("Manifest misconfiguration, "
"no apps for any hosts")
def bootstrap_firstrun_hosts(self, host, row_values):
"""Function used to bootstrap apps on the first run"""
first_run = self.is_bootstrap(host)
if first_run:
# For the special case when instance is new,
# start up apps have to be included
if first_run['update_method'] == row_values.deployment:
if not host.bootstrap:
host.bootstrap = True
Logger.info("Bootstrapping host", host=host.hostname)
# get host name to check againt
check_hostname = next((host.hostname for host in self.appetite_hosts
if host.app_class == first_run['app_class']), "")
return self.add_to_host(host, row_values, check_hostname,
first_run['ref_method'],
True)
return False
def is_bootstrap(self, host):
"""Check if host is bootstrapped"""
# Only run if the manifest is not found on the host (new app instance)
if self.args.firstrun and not host.manifest_found:
first_run = next((su_bootstrap for su_bootstrap in
self.deployment_manager.startup_bootstrap if
su_bootstrap['ref_class'] == host.app_class and
Helpers.check_name_formatting(
self.name_formatting, host.hostname)), None)
host.restart = True
return first_run
def add_to_host(self, host, row_values, check_hostname=None, deployment=None, is_firstrun=False):
"""Check and add app to host"""
check_hn = check_hostname if check_hostname else host.hostname
if Helpers.check_host(check_hn,
row_values.black_list,
row_values.white_list):
self.add_app(host, row_values, deployment, is_firstrun, check_hn)
return True
return False
def add_app(self, host, row_values, deployment, is_firstrun, ref_hostname=None):
"""Add app to host"""
host.add_app(self.args.refname,
AppetiteHost.create_app(
self.repo_manager,
self.deployment_manager,
row_values.app,
row_values.app_clean,
deployment if deployment else row_values.deployment,
row_values.commit_id,
ref_hostname if ref_hostname else host.hostname,
is_firstrun))
def create_host_directories_and_tar(self):
"""Main packaging function
Works in 3 parts:
1. Validate app data and configurations
2. Create tmp directories for each host with loaded apps and manifest
3. Package (tar) up host tmp directories for distribution
"""
Helpers.delete_path(self.tmp_folder)
Helpers.create_path(self.tars_folder, True)
self.repo_manager.set_commit_id()
master_commit_log = self.repo_manager.get_commit_log()
errors_found = False
changes_found = False
for host in self.appetite_hosts: # pylint: disable=too-many-nested-blocks
# Per host build apps folder and tar up based on class
hostname = host.hostname
apps = host.get_apps(self.args.refname)
tarname = host.tarname
apps = sorted(apps, key=lambda app: app.commit_id)
tmp_hostname_dir = os.path.join(self.hosts_folder, hostname)
tmp_hostname_meta = os.path.join(tmp_hostname_dir, Consts.META_DIR)
apps_meta = []
if len(apps) < 1:
Logger.warn("Host with no apps", hostname=hostname)
continue
# Parse the remote meta file from the host
# This file might not exist
remote_meta_file = host.local_meta_file
remote_metas_loaded = False
if os.path.exists(remote_meta_file):
try:
with open(remote_meta_file) as remote_data_file:
remote_metas_master = json.load(remote_data_file)
remote_metas_content = remote_metas_master['content'] \
if 'content' in remote_metas_master else remote_metas_master
remote_metas = [
AppetiteHost.create_app_from_object(self.repo_manager,
self.deployment_manager,
meta_data)
for meta_data in remote_metas_content]
remote_metas_loaded = True
except Exception as exception:
Logger.error("Problems loading meta file",
error=exception.message,
path=remote_meta_file)
elif not self.args.dryrun:
Logger.warn("Local version of remote meta not found", file=remote_meta_file)
ordered_unique_apps = sorted(list(set(apps)), key=lambda single_app:
(single_app.name,
single_app.commit_id,
single_app.method_name))
for iapp in ordered_unique_apps:
app_occurrences = apps.count(iapp)
if app_occurrences > 1:
Logger.warn("Dup app found", host=host.hostname,
app_info=iapp.app_key,
occurences=app_occurrences)
# Validate app data and configurations
# Go through the apps and checks to see if there are any errors
# This is where the remote meta is compared to the newly generated
# lists of apps from the manifest
for app in apps:
raw_app_path = os.path.join(self.apps_folder, app.name)
# Check the commit Id for problems
if app.commit_id:
self.repo_manager.set_commit_id(app.commit_id)
else: # pylint: disable=else-if-used
if self.args.strict_commitids:
Logger.error("Application with missing commit Id", hostname=hostname,
app=app.name)
errors_found = True
continue
else:
app._commit_id = master_commit_log['app_commit_id'] # pylint: disable=protected-access
self.repo_manager.set_commit_id(app.commit_id)
# Checks if app listed in the manifest
# exists with the correct commit id
if Helpers.check_path(raw_app_path):
meta_to_append = None
app.refresh_version_info(self.args.refname, Consts.META_APP_UNCHANGED)
remote_meta = None
# Check to see what has changed
if remote_metas_loaded:
# Searches remote meta to see if application already exists
remote_meta = next((rmeta for rmeta in remote_metas
if app.check_names(rmeta)), None)
if remote_meta:
# If app does exist on system, have the commit ids changed
if remote_meta.commit_id != app.commit_id:
meta_to_append = app.set_status_changed()
else:
# meta has not changed so use existing meta
meta_to_append = app.clone
meta_to_append.update_app_version(app)
# to track if an app is removed from the remote meta
remote_metas.remove(remote_meta)
if not meta_to_append:
# There is no remote meta so all files should be added
meta_to_append = app.set_status_added()
if remote_meta and meta_to_append:
meta_outcome = Helpers.debug_app_versions(meta_to_append,
remote_meta,
meta_to_append.status)
Logger.debug("Check meta logic",
outcome=meta_outcome)
if meta_to_append.has_changed:
Logger.info("App change", logic=meta_outcome)
apps_meta.append(meta_to_append)
else:
Logger.error("Missing application",
hostname=hostname,
app=app.name,
path=raw_app_path)
continue
if remote_metas_loaded and len(remote_metas) > 0:
# Any apps left in the remote meta do not exist in the current
# manifest and should be deleted
delete_list = []
for deleted_app in remote_metas:
if deleted_app.method_info:
deleted_app.set_status_deleted()
# Added logic check to catch method changes
added_app_found = next((app for app in apps_meta
if app.status == Consts.META_APP_ADDED
and app.name == deleted_app.name and
app.method_info['path'] ==
deleted_app.method_info['path']), None)
if added_app_found:
added_app_found.set_status_changed()
else:
delete_list.append(deleted_app)
else:
Logger.error(
"Problems with method info for deleted app.",
hostname=hostname, app=deleted_app.name)
apps_meta += delete_list
# Only do something if there has been a change
if len([app for app in apps_meta if not app.is_unchanged]) < 1:
continue
# No point continuing if there is no connection to the host
if not self.check_host_connection(host):
continue
# Clean command lines for auth params
# This data is ingested so creds should be removed
# apps_meta = [updated_app.clone for updated_app in apps_meta]
if not self.args.disable_logging:
for updated_app in apps_meta:
Logger.log_event(updated_app.to_dict)
# Applications that actually needs to be updated
tar_apps = sorted([updated_app for updated_app in apps_meta if updated_app.updated],
key=lambda tar_app: tar_app.app)
use_templating = self.template_values and self.args.templating
# Checking will allow templating otherwise will skip steps
Helpers.create_path(os.path.join(tmp_hostname_meta, Consts.HOST_LOGS_FOLDER_NAME), True)
if len(tar_apps) > 0:
# All error checks have been done above, build out
# the hosts directory and tar up
for updated_app in tar_apps:
app_path = os.path.join(tmp_hostname_dir, updated_app.method_info['path'])
Helpers.create_path(app_path, True)
raw_app_path = os.path.join(self.apps_folder, updated_app.name)
self.repo_manager.set_commit_id(updated_app.commit_id)
if updated_app.update_method_is_copy:
app_dest = os.path.join(app_path, updated_app.app_clean)
else:
app_dest = app_path
copy_tree(raw_app_path, app_dest)
lookups_inclusion_location = os.path.join(app_dest,
self.deployment_manager.
inclusion_filename)
ignore_dir = os.path.join(app_dest, Consts.TMP_IGNORE_DIR)
# Ignore files/folders set in the global configurations
if self.args.install_ignore:
content_ignored_results = Helpers.move_regexed_files(self.args.install_ignore.split(';'),
app_dest,
ignore_dir)
files_included = content_ignored_results['files_moved']
if len(files_included) > 0:
Logger.error("Globally these files should not exist in the App. "
"The files have been removed from the install.",
files=files_included,
hostname=hostname,
app=updated_app.name)
# Users should not have the capability to include files from the
# global ignore.
Helpers.delete_path(ignore_dir)
# Defined folders/files are to move out of application.
# This is defined in the deploymentmethods.conf
# If an app is installed for the first time, all files should be included
if 'install_ignore' in updated_app.method_info and not updated_app.is_added:
Helpers.move_regexed_files(updated_app.method_info['install_ignore'],
app_dest,
ignore_dir)
# If there is a inclusion file, include files back into app.
# This is defined on a per app basis
if os.path.isfile(lookups_inclusion_location):
with open(lookups_inclusion_location, "r") as f:
lines = [l.strip() for l in f.readlines()]
lookup_inclusion_results = Helpers.move_regexed_files(lines,
ignore_dir,
app_dest)
if lookup_inclusion_results['errors_found']:
Logger.error("Lookup inclusion error found",
paths=lookup_inclusion_results['path_errors'],
hostname=hostname,
app=updated_app.name)
# Problem with host inclusion,
# move to next host
continue
updated_app.method_info['inclusions'] = \
lookup_inclusion_results['filed_moved']
# Update objects with inclusions
updated_app.copy_value_to_method_info('inclusions', apps_meta)
os.remove(lookups_inclusion_location)
Helpers.delete_path(ignore_dir)
if use_templating and not updated_app.method_info['skip_templating']:
# Can template based on vars from templated
# values, hosts vars and app vars
Helpers.template_directory(app_dest,
[self.template_values,
host.to_dict,
updated_app.to_dict])
# Should only change access and create version file if a whole app is copied
if updated_app.update_method_is_copy:
for host_path, host_dir, host_files in os.walk(app_dest): # pylint: disable=unused-variable
for host_file in host_files:
# Splunk apps can have active binaries in multiple languages
# This is a catch all to make sure apps have all the required
# permissions.
chmod = 0755
os.chmod(os.path.join(host_path, host_file), chmod)
with open(os.path.join(app_dest, Helpers.get_app_version_filename()), "w") as f:
f.write(updated_app.to_json)
AppVersioning.create_app_version(app_dest,
updated_app.
commit_log['app_abbrev_commit_id'])
apps_distro = Helpers.content_wrapper(apps_meta,
Consts.META_CURRENT,
hostname,
self.track)
# Meta file used as source of truth on instance
master_meta = self.create_meta_files(tmp_hostname_meta, '', apps_distro)
# check be used to update and test manifest changes locally
if self.args.dryrun:
Helpers.create_path(host.local_meta_file)
shutil.copy(master_meta, host.local_meta_file)
# Always want clean logs ingested
selected_apps = Helpers.select_and_update_apps(apps_meta,
Consts.META_CURRENT,
False)
self.create_meta_log(tmp_hostname_meta, '', selected_apps, Helpers.get_utc())
host.updates = Helpers.content_process(apps_meta,
Consts.META_UPDATED,
hostname,
self.track,
True)
# Create the meta change file
self.create_meta_files(tmp_hostname_meta,
'_update',
Helpers.content_convert(host.updates))
# Clean updates file for logging
selected_apps = Helpers.select_and_update_apps(apps_meta,
Consts.META_UPDATED,
True)
self.create_meta_log(tmp_hostname_meta, '_update', selected_apps, Helpers.get_utc())
Logger.info("Changes found", updates=Helpers.content_wrapper(apps_meta,
Consts.META_UPDATED,
hostname,
self.track,
True))
# Package (tar) up host tmp directories for distribution
tar = tarfile.open(os.path.join(self.tars_folder, "%s.tar.gz" % tarname), "w:gz")
tar.add(tmp_hostname_dir, arcname=os.path.basename(self.base_name))
tar.close()
changes_found = True
if errors_found:
sys.exit(1)
self.repo_manager.set_commit_id()
return changes_found
@property
def track(self):
"""Reference to Track info
The track info contains the uuid, datatime and commit id of the current job
"""
return self.repo_manager.track
def print_track_info(self, changed, track=None):
"""Pretty print track info"""
ref_track = track if track else self.track
ref_track['changed'] = changed
print(json.dumps(ref_track, sort_keys=True, indent=4,
separators=(',', ': ')))
def update_manifests(self, hosts=None, check_if_exists=False):
"""Loads local manifest
Tries to get the remote manifest from the host
"""
if not hosts:
hosts = self.appetite_hosts.hosts
if isinstance(hosts, AppetiteHost):
hosts = [hosts]
results = self._thread_hosts('update_manifest', hosts, check_if_exists)
# Since threading does not share variables, the results are copied back into the
# host objects
if results:
for i, host in enumerate(hosts):
if not host.from_dict(results[i]):
Logger.warn("Threading host mismatch")
def update_manifest(self, host, check_if_exists=False):
"""Loads local manifest for a host for local host"""
host.manifest_found = os.path.isfile(host.local_meta_file)
if not check_if_exists or not host.manifest_found:
if self.check_host_connection(host):
host.manifest_found = ConnManager.get_json_file(host, self.meta_remote_file,
host.local_meta_file, True)
return host.get_threaded_values
def create_meta_filename(self, host_meta_path, postfix, extension, timestamp=None):
"""create file name for the meta content"""
if timestamp:
filtered_timestamp = Helpers.filter_timestamp(timestamp)
return "%s%s_%s.%s" % (os.path.join(host_meta_path,
Consts.HOST_LOGS_FOLDER_NAME,
self.meta_name),
postfix, filtered_timestamp, extension)
else:
return "%s%s.%s" % (os.path.join(host_meta_path,
host_meta_path,
self.meta_name),
postfix, extension)
def create_meta_files(self, host_meta_path, postfix, content, timestamp=None):
"""Creates a meta json file
Create a single json file with a host meta object
"""
created_meta = self.create_meta_filename(host_meta_path, postfix, 'json', timestamp)
with open(created_meta, "w") as f:
if timestamp:
f.write(json.dumps(content))
else:
f.write(json.dumps(content, sort_keys=True, indent=4, separators=(',', ': ')))
return created_meta
def create_meta_log(self, host_meta_path, postfix, content, timestamp=None):
"""Creates a meta json log file
Create file with multiple entries for content.
This is used for logging
"""
created_meta_log = self.create_meta_filename(host_meta_path, postfix, 'log', timestamp)
with open(created_meta_log, "a") as f:
for entry in content:
f.write("%s\n" % entry.to_json)
return created_meta_log
def update_hosts(self):
"""Update each host
Installs apps and run commands to each host.
"""
# When running check, no connections to host will be used
changed_hosts = [host for host in self.appetite_hosts if host.updates]
# Lists Sites
host_sites = list(set([host.site for host in self.appetite_hosts]))
host_sites.sort()
# Organize scripts to run in order
for script_seq in Consts.DM_COMMANDS_SEQUENCE:
for boot_group in self.boot_ordering:
host_group = [host for host in changed_hosts if host.app_class in boot_group]
# If site override is enabled then do all hosts
if self.args.site_override:
if len(host_group) > 0:
Logger.info("Starting script run hosts", site='all', boot_group=boot_group,
script_level=script_seq)
self._thread_hosts('update_host', host_group, script_seq)
continue
# By default will use sites to break up installs
for host_site in host_sites:
host_site_group = [host for host in host_group if host.site == host_site]
if len(host_site_group) > 0:
Logger.info("Starting script run hosts", site=str(host_site), boot_group=boot_group,
script_level=script_seq)
self._thread_hosts('update_host', host_site_group, script_seq)
def _thread_hosts(self, update_funct, hosts, *args):
"""Helper function to set up threading for hosts"""
# If single thread/host is used, no threading is needed
if self.args.num_connections == 1 or len(hosts) < 2:
for host in hosts:
Helpers.call_func((self, update_funct, host) + args)
return
host_pool = Pool(processes=self.args.num_connections)
iter_hosts = [(self, update_funct, host) + args for host in hosts]
results = host_pool.map(Helpers.call_func, iter_hosts)
host_pool.close()
host_pool.join()
return results
@staticmethod
def check_host_connection(host):
"""Checks to see if appetite can connect to the host"""
if host.can_connect is None:
host.can_connect = ConnManager.check_connection(host)
if not host.can_connect:
Logger.error("Can not connect to host",
host=host.hostname)
return host.can_connect
def update_host(self, host, update_method):
"""Update function for host
Separate function used to update a single host
"""
if not self.check_host_connection(host):
return
commands = []
self.template_values = self.appetite_hosts.build_meta(self.template_values)
# Run commands if specified
if len(host.updates[update_method]) > 0:
commands = self.ssh_app_commands.enhance_commands(host,
host.updates[update_method], [self.template_values,
host.to_dict])
not_update_command = update_method != Consts.DM_COMMANDS_SEQUENCE[1]
self.run_commands(commands, host, not_update_command, True)
# If just running a script, should ignore all function related to app deployment
if not_update_command:
return
apps = host.updates['content']
# Delete apps
deleted_apps = list(set([app.path(self.args.app_folder) for app in apps if
app.method_info['delete_first'] or
app.status == Consts.META_APP_DELETED]))
for delete_app in deleted_apps:
ConnManager.delete(host, delete_app, True)
# Clear old version files
changed_apps = list(set([app.path(self.args.app_folder) for app in apps if
app.status == Consts.META_APP_CHANGED]))
for changed_app in changed_apps:
ConnManager.clear_files(host, changed_app,
"%s*" % Consts.VERSIONS_FILENAME, True)
# Install apps and new manifests
ConnManager.untar(host, self.base_location, True)
# In case the command already has a restart in it
restart_notfound = next((False for command in commands if command['command'].name == "restart"), True)
# Restart App if needed
if (host.updates["restart"] or host.restart) and restart_notfound:
commands.append(self.ssh_app_commands.enhance_commands(host,
[ConnManager.COMMAND_RESTART_NAME], [self.template_values,
host.to_dict])[0])
self.run_commands(commands, host)
# Get latest manifest since host has been updated
self.update_manifest(host)
# Clean up old manifest files
ConnManager.rotate_logs(host, self.meta_remote_logs_folder,
Consts.DEFAULT_LOG_RETENTION, True)
def run_commands(self, commands, host, run_commands=False, pre_install=False):
"""Run listed commands"""
for command in commands:
command_object = command['command']
if run_commands or command_object.pre_install == pre_install:
if self.ssh_app_commands.run_command(command, host) and not self.args.dryrun \
and command_object.delay > 0:
time.sleep(command_object.delay)
def call_func(args):
"""Call a class function with a single param"""
args['func_ref'](args['host'])
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
279,
2645,
600,
25,
15560,
28,
18820,
12,
41887,
11,
3919,
12,
3672,
12,
259,
12,
21412,
11,
11748,
12,
18224,
11,
43762,
12,
11748,
11,
45688,
12,
7783,
82,
12,
15390,
11,
1882... | 1.865093 | 18,257 |
expected_output = {
"interfaces": {
"GigabitEthernet2.390": {
"ipv4": {
"neighbors": {
"10.12.90.1": {
"age": "-",
"ip": "10.12.90.1",
"link_layer_address": "fa16.3eff.9c9e",
"origin": "static",
"protocol": "Internet",
"type": "ARPA",
},
"10.12.90.2": {
"age": "139",
"ip": "10.12.90.2",
"link_layer_address": "fa16.3eff.5a76",
"origin": "dynamic",
"protocol": "Internet",
"type": "ARPA",
},
}
}
},
"GigabitEthernet2.410": {
"ipv4": {
"neighbors": {
"10.12.110.1": {
"age": "-",
"ip": "10.12.110.1",
"link_layer_address": "fa16.3eff.9c9e",
"origin": "static",
"protocol": "Internet",
"type": "ARPA",
}
}
}
},
}
}
| [
40319,
62,
22915,
796,
1391,
198,
220,
220,
220,
366,
3849,
32186,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
38,
328,
29968,
36,
490,
3262,
17,
13,
25964,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 1.352156 | 974 |
from webpie import WPApp, WPHandler
WPApp(Handler).run_server(8080)
| [
6738,
3992,
21749,
1330,
370,
4537,
381,
11,
370,
11909,
392,
1754,
198,
198,
54,
4537,
381,
7,
25060,
737,
5143,
62,
15388,
7,
1795,
1795,
8,
198
] | 2.464286 | 28 |
n = int(input("n="))
for a in range(n+1):
continue
lst = []
i = 2
while i <= n:
if a[i] != 0:
lst.append(a[i])
for j in range(i, n+1, i):
a[j] = 0
i += 1
print(lst) | [
77,
796,
493,
7,
15414,
7203,
77,
2625,
4008,
628,
198,
1640,
257,
287,
2837,
7,
77,
10,
16,
2599,
198,
220,
220,
220,
2555,
198,
75,
301,
796,
17635,
198,
72,
796,
362,
198,
4514,
1312,
19841,
299,
25,
198,
220,
220,
220,
611,
... | 1.702479 | 121 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
ADJECTIVES = {
'colorful': _('colorful'),
'hairy': _('hairy'),
'wet': _('wet'),
'flying': _('flying'),
'burning': _('burning'),
'dirty': _('dirty'),
}
NOUNS = {
'cat': _('cat'),
'snake': _('snake'),
'car': _('car'),
'house': _('house'),
'computer': _('computer'),
'ship': _('ship'),
'guitar': _('guitar'),
'tree': _('tree'),
'mobile_phone': _('mobile phone'),
'triangle': _('triangle'),
'bird': _('bird'),
'fish': _('fish'),
'mug': _('mug'),
'clock': _('clock'),
'flower': _('flower'),
'lock': _('lock'),
'bicycle': _('bicycle'),
'chair': _('chair'),
'backpack': _('backpack'),
'umbrella': _('umbrella'),
'shoe': _('shoe'),
'hat': _('hat'),
'spider': _('spider'),
}
BANNED_COMBINATIONS = [
('burning', 'cat'),
('burning', 'snake'),
('burning', 'bird'),
('burning', 'fish'),
('burning', 'flower'),
('burning', 'spider'),
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
628,
198... | 2.241237 | 485 |
from rest_framework import serializers
from quotes.models import Quote
from quotes.models import QuoteCategory | [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
13386,
13,
27530,
1330,
19879,
198,
6738,
13386,
13,
27530,
1330,
19879,
27313,
220
] | 4.666667 | 24 |
# косяк, мой подсчет метрик не работает если там нет трушных 1
"""
CDSDSDS
"""
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def ts_train_test_split(df, len_seq,
points_ahead=1, gap=0, shag=1, intersection=True,
test_size=None,train_size=None, random_state=None, shuffle=True, shuffle_target='only_train'):
"""
Функция которая разбивает временной ряд на трейн и тест выборки
Временной ряд здесь это вообще вся история
Функционал позволяет разбивать ....
Parameters
----------
df : pd.DataFrame
Array of shape (n_samples, n_features) with timestamp index.
points_ahead : int, default=0
Сколько точек вперед прогнозируем, отражается в y
gap : int, default=0
Сколько точек между трейном и тестом. Условно говоря,
если крайняя точка train а это t, то первая точка теста t + gap +1.
Параметр создан, чтобы можно было прогнозировать одну точку через большой
дополнительный интервал времени.
shag : int, default=1.
Шаг генерации выборки. Если первая точка была t у 1-ого сэмпла трейна,
то у 2-ого сэмла трейна она будет t + shag, если intersection=True, иначе
тоже самое но без пересечений значений ряда.
intersection : bool, default=True
Наличие значений ряда (одного момента времени) в различных сэмплах выборки.
test_size : float or int or timestamp for df, or list of timestamps, default=0.25.
Может быть 0, тогда вернет значения X,y
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split.
If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25. *
*https://github.com/scikit-learn/scikit-learn/blob/95119c13a/sklearn/model_selection/_split.py#L2076
If timestamp for df, for X_test we will use set from df[t:] **
If list of timestamps [t1,t2], for X_test we will use set from df[t1:t2] **
!!! Важно, если timestamp мы всегда захватываем и слева и српава.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size. *
*https://github.com/scikit-learn/scikit-learn/blob/95119c13a/sklearn/model_selection/_split.py#L2076
If timestamp for df, for X_train we will use set for train from df[:t] **
If list of timestamps [t1,t2], for X_train we will use set for train from df[t1:t2] **
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.*
*https://github.com/scikit-learn/scikit-learn/blob/95119c13a/sklearn/model_selection/_split.py#L2076
shuffle : bool, default=True
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None. *
shuffle_target: {'only_train', 'all'}, str. Default = only_train.
In the case of 'only_train' we random shuffle only X_train, and y_train.
Test samples are unused for the shuffle. Any sample from X_test is later
than any sample from X_train. This is also true for respectively
In case of 'all' in analogy with sklearn.model_selection.train_test_split
Returns
-------
(X_train, X_test, y_train, y_test) : tuple
Tuple containing train-test split of inputs
TODO
--------
t-test of timestamp
** todo реализовать
ошибка прогнозирует через одну.
туда же через точки пробывать модели.
Examples
--------
>>> X = np.ones((4, 3))
>>> y = np.ones(4)
>>> sklearn_template(X, y)
(z, xmin, xmax) # this should match the actual output
"""
# """
# df - требование, но если тебе не хочется то просто сделай np.array на выходе и все
# Разбить временные ряды на трейн и тест
# len_seq- количество времменых точек в трейн
# points_ahead - количество времменых точек в прогнозе
# gap - расстояние между концом трейна и началом теста
# intersection - если нет, то в выборке нет перескающих множеств (временнызх моментов)
# shag - через сколько прыгаем
# train_size - float от 0 до 1
#
# return list of dfs
#
# """
#TODO требования к входным данным прописать
#TODO переписать энергоэффективно чтобы было
#TODO пока временные характеристики int_ами пора бы в pd.TimdDelta
# нет индексов
assert len_seq + points_ahead + gap + 1 <= len(df)
how='seq to seq'
# -------------------------------------------------------
#
# -------------------------------------------------------
x_start=0
x_end= x_start + len_seq
y_start = x_end + gap +1
y_end = y_start + points_ahead
if intersection:
# ради вычислительной нагрузки такой кастыль
else:
X = []
y = []
while y_end <= len(df):
X.append(df[x_start:x_end])
y.append(df[y_start:y_end])
x_start= compute_new_x_start(x_start,y_end,shag)
x_end= x_start + len_seq
y_start = x_end + gap +1
y_end = y_start + points_ahead
if (test_size==0) | (len(X)==1):
indices = np.array(range(len(X)))
np.random.seed(random_state)
if shuffle:
print(indices)
np.random.shuffle(indices)
print(indices)
X = [X[i] for i in indices]
y = [y[i] for i in indices]
return X,[],y,[]
else:
return train_test_split(X,y,
test_size=test_size,
train_size=train_size,
random_state=random_state,
shuffle=shuffle,
)
def split_by_repeated(series,df=None):
"""
retrun dict with lists of ts whwre keys is unique values
ts[ts.diff()!=0] побыстрее будет
"""
series = series.copy().dropna()
if len(series.unique())==1:
result = {series.unique()[0]:series}
elif len(series.unique())>1:
result = {uni:[] for uni in series.unique()}
recent_i=0
recent_val=series.values[0]
for i in range(len(series)):
val = series.values[i]
if (recent_val == val):
continue
else:
result[recent_val].append(series[recent_i:i])
recent_i=i
recent_val = val
if i == len(series)-1:
if (recent_val == val):
result[recent_val].append(series[recent_i:i+1])
else:
result[recent_val].append(series[recent_i:i+1])
else:
raise NameError('0 series')
if df is not None:
new_result = {uni:[] for uni in series.unique()}
for key in result:
for i in range(len(result[key])):
if len(result[key][i]) <=1:
continue
else:
new_result[key].append(df.loc[result[key][i].index])
return new_result
else:
return result
def df2dfs(df, # Авторы не рекомендуют так делать,
resample_freq = None, # требования
thereshold_gap = None,
koef_freq_of_gap = 1.2, # 1.2 проблема которая возникает которую 02.09.2021 я написал в ИИ
plot = False,
col = None):
"""
Функция которая преообратает raw df до требований к входу на DL_AD
то есть разбивает df на лист of dfs by gaps
Не ресемлирует так как это тяжелая задача, но если частота реже чем
koef_freq_of_gap of thereshold_gap то это воспринимается как пропуск.
Основной посыл: если сигнал приходит чаще, то он не уползает сильно,
а значит не приводит к аномалии, а если редко то приводит, поэтому воспри-
нимается как пропуск.
plot - очень долго
Parameters
----------
df : pd.DataFrame
Исходный временной ряд полностью за всю историю
resample_freq: pd.TimeDelta (optional, default=None)
Частота дискретизации временного ряда.
Если default то самая частая частота дискретизации. При этом,
если нет выраженной частоты вылетит ошибка.
thereshold_gap : pd.TimeDelta (optional, default=None)
Порог периода, превышая который функция будет воспринимать
данный период как пропуск.
koef_freq_of_gap : float or int (optional if thereshold_gap==None,
default=1.2)
thereshold_gap = koef_freq_of_gap * resample_freq
plot : bool (optional, default=True)
If true, then отрисуется нарезка
If false, then не отрисуется нарезка
col : int of str (optional, default=True)
Название или номер колонки для отрисовки
Если None первая колонка
Returns
-------
dfs : list of pd.DataFrame
Список времменных рядов без пропусков с относительно стабильной
частотой дискретизации.
"""
df = df.dropna(how='all').dropna(1,how='all')
dts = df.dropna(how='all').index.to_series().diff()
if resample_freq is None:
dts_dist = dts.value_counts()
if dts_dist[0] > dts_dist[1:].sum():
resample_freq = dts_dist.index[0]
else:
print(dts_dist)
raise Exception("Необходимо самостоятельно обработать функцию так как нет преобладающей частоты дискретизации")
thereshold_gap = resample_freq*koef_freq_of_gap if thereshold_gap is None else thereshold_gap
gaps = (dts > thereshold_gap).astype(int).cumsum()
dfs = [df.loc[gaps[gaps==stage].index] for stage in gaps.unique()]
if plot:
f, ax = plt.subplots()
if col is None:
col = df.columns[0]
else:
if type(col)==type(int):
col = df.columns[col]
for df in dfs:
df[col].plot(ax=ax)
return dfs
| [
2,
12466,
118,
15166,
21727,
40623,
31583,
11,
12466,
120,
25443,
117,
12466,
123,
25443,
112,
21727,
141,
229,
16843,
20375,
12466,
120,
16843,
20375,
21169,
18849,
31583,
12466,
121,
16843,
220,
21169,
16142,
140,
109,
15166,
20375,
16142... | 1.609406 | 6,613 |
from django.core.cache import cache
from django.db.models.signals import post_save
def treenav_save_other_object_handler(sender, instance, created, **kwargs):
"""
This signal attempts to update the HREF of any menu items that point to
another model object, when that objects is saved.
"""
# import here so models don't get loaded during app loading
from django.contrib.contenttypes.models import ContentType
from .models import MenuItem
cache_key = 'django-treenav-menumodels'
if sender == MenuItem:
cache.delete(cache_key)
menu_models = cache.get(cache_key)
if not menu_models:
menu_models = []
for menu_item in MenuItem.objects.exclude(content_type__isnull=True):
menu_models.append(menu_item.content_type.model_class())
cache.set(cache_key, menu_models)
# only attempt to update MenuItem if sender is known to be referenced
if sender in menu_models:
ct = ContentType.objects.get_for_model(sender)
items = MenuItem.objects.filter(content_type=ct, object_id=instance.pk)
for item in items:
if item.href != instance.get_absolute_url():
item.href = instance.get_absolute_url()
item.save()
def connect_post_save_handler(**kwargs):
"""
Connect post_save (of all models) to treenav handler above.
Called from apps.py during app loading.
"""
post_save.connect(treenav_save_other_object_handler)
def disconnect_post_save_handler(sender, **kwargs):
"""
Disconnect post_save signal during migrations of any application.
This prevents MenuItem from being accessed before it is installed in the
database. This also means that data migrations run during other app
migrations will NOT call the treenav handler if they have associated TreeNav
items, they will need to be manually updated.
"""
post_save.disconnect(treenav_save_other_object_handler)
| [
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
21928,
628,
198,
4299,
256,
1361,
615,
62,
21928,
62,
847,
62,
15252,
62,
30281,
7,
82,
2194,
11,
45... | 2.83237 | 692 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import math
import re
| [
2,
220,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
220,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
220,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
220,
5115,
6634,
9238,... | 3.938967 | 213 |
from django.db import models
from django.conf import settings
from posts.models import Post
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
6851,
13,
27530,
1330,
2947,
628
] | 3.916667 | 24 |
from model.group import Group
import random
| [
6738,
2746,
13,
8094,
1330,
4912,
198,
11748,
4738,
628,
628
] | 4.272727 | 11 |
import pandas as pd
| [
11748,
19798,
292,
355,
279,
67,
628
] | 3 | 7 |
import datetime
import glob
import os
import sys
import tempfile
from decimal import ROUND_HALF_UP, Decimal
from pathlib import Path
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from laske_export.models import LaskePaymentsLog
from leasing.models import Invoice, Vat
from leasing.models.invoice import InvoicePayment
| [
11748,
4818,
8079,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
20218,
7753,
198,
6738,
32465,
1330,
371,
15919,
62,
39,
1847,
37,
62,
8577,
11,
4280,
4402,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
426... | 3.623853 | 109 |
"""Cryptopals Set 1.
Basics
"""
from base64 import b64decode
from dataclasses import dataclass
from itertools import zip_longest
import cryptopalsolutions.algorithms.binary as binary_helper
import cryptopalsolutions.algorithms.language as language_helper
import cryptopalsolutions.models.solutions as cryptopal_solutions
from Cryptodome.Cipher import AES
@dataclass
class Problem1Solver:
"""The solver for Cryptopals Set 1 Problem 1.
Convert Hex to Base64.
"""
hex_string: str
def solve(self) -> str:
"""Convert hex to base64."""
return binary_helper.hex_to_base64(self.hex_string).decode()
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename) as input_file:
return cls(hex_string=input_file.read())
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
return cls(hex_string=input_string)
@dataclass
class Problem2Solver:
"""The solver for Cryptopals Set 1 Problem 2.
Fixed XOR
"""
hex_string: str
xor_string: str
def solve(self) -> str:
"""XOR hex string with xor string."""
return binary_helper.xor_hex(self.hex_string, self.xor_string)
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename) as input_file:
lines = input_file.readlines()
if len(lines) < 2:
raise cryptopal_solutions.InvalidInputError("\n".join(lines))
return cls(hex_string=lines[0], xor_string=lines[1])
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
strings = input_string.split(" ")
if len(strings) < 2:
raise cryptopal_solutions.InvalidInputError(" ".join(strings))
return cls(hex_string=strings[0], xor_string=strings[1])
@dataclass
class Problem3Solver:
"""The solver for Cryptopals Set 1 Problem 3.
Single-byte XOR cipher
"""
hex_string: str
language_processor: language_helper.LanguageProcessor = (
language_helper.CompleteStringLanguageProcessor
)
def solve(self) -> str:
"""Decrypt the hex string encrypted with single-byte XOR."""
return self.language_processor.best_option(
(
binary_helper.single_byte_xor(self.hex_string, single_byte)
for single_byte in range(256)
)
)
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename) as input_file:
return cls(hex_string=input_file.read())
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
return cls(hex_string=input_string)
@classmethod
def from_block(cls, block: bytes):
"""Build the solver from a ciphertext block."""
return cls(
hex_string=block.hex(),
language_processor=language_helper.PartialStringLanguageProcessor,
)
@dataclass
class Problem4Solver:
"""The solver for Cryptopals Set 1 Problem 5.
Detect single-character XOR
"""
hex_strings: list[str]
language_processor: language_helper.LanguageProcessor = (
language_helper.CompleteStringLanguageProcessor
)
def solve(self) -> str:
"""Break single-character XOR."""
return self.language_processor.best_option(
(Problem3Solver(hex_string).solve() for hex_string in self.hex_strings)
)
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename) as input_file:
return cls(hex_strings=(line.strip() for line in input_file.readlines()))
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
raise cryptopal_solutions.InputNotSupportedError(
cryptopal_solutions.InputMethod.STRING
)
@dataclass
class Problem5Solver:
"""The solver for Cryptopals Set 1 Problem 5.
Repeating key XOR
"""
plaintext: str
xor_key: str
def solve(self) -> str:
"""Encrypt the plaintext string with a repeating-key XOR."""
return (
binary_helper.encrypt_repeating_key_xor(self.plaintext, self.xor_key)
.encode()
.hex()
)
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename) as input_file:
lines = [line.strip() for line in input_file.readlines()]
if len(lines) < 2:
raise cryptopal_solutions.InvalidInputError("\n".join(lines))
return cls(plaintext="\n".join(lines[1:]), xor_key=lines[0])
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
raise cryptopal_solutions.InputNotSupportedError(
cryptopal_solutions.InputMethod.STRING
)
@dataclass
class Problem6Solver:
"""The solver for Cryptopals Set 1 Problem 6.
Break repeating-key XOR
"""
ciphertext: bytes
def solve(self) -> str:
"""Break the repeating key XOR cipher.
- Guess the key size
- Break ciphertext into chunks of key_size based on mod key_size
- Solve each single-byte XOR individually
- Reconstruct the chunks into their original composition
"""
key_size = binary_helper.guess_keysize(self.ciphertext)
ciphertext_blocks = [
self.ciphertext[start::key_size] for start in range(key_size)
]
plaintext_blocks = [
Problem3Solver.from_block(ciphertext).solve()
for ciphertext in ciphertext_blocks
]
return "".join(
"".join(blocks) for blocks in zip_longest(*plaintext_blocks, fillvalue="")
)
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename, "rb") as input_file:
lines = [line.strip() for line in input_file.readlines()]
return cls(ciphertext=b64decode(b"".join(lines)))
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
raise cryptopal_solutions.InputNotSupportedError(
cryptopal_solutions.InputMethod.STRING
)
@dataclass
class Problem7Solver:
"""The solver for Cryptopals Set 1 Problem 7.
AES in ECB mode
"""
ciphertext: bytes
key: bytes
def solve(self) -> str:
"""Decrypt a ciphertext string with AES ECB mode."""
return AES.new(self.key, AES.MODE_ECB).decrypt(self.ciphertext).decode()
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename, "rb") as input_file:
lines = [line.strip() for line in input_file.readlines()]
if len(lines) < 2:
raise cryptopal_solutions.InvalidInputError("\n".join(lines))
return cls(ciphertext=b64decode(b"".join(lines[1:])), key=lines[0])
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
raise cryptopal_solutions.InputNotSupportedError(
cryptopal_solutions.InputMethod.STRING
)
@dataclass
class Problem8Solver:
"""The solver for Cryptopals Set 1 Problem 8.
Detect AES in ECB mode
"""
hex_strings: list[str]
def solve(self) -> str:
"""Determine AES ECB with ciphertexts from 16 byte key."""
return next(
filter(
binary_helper.has_repeated_cipertext,
self.hex_strings,
)
).decode()
@classmethod
def from_file(cls, filename: str):
"""Build the solver from an input file."""
with open(filename, "rb") as input_file:
return cls(hex_strings=[line.strip() for line in input_file.readlines()])
@classmethod
def from_str(cls, input_string: str):
"""Build the solver from an input string."""
raise cryptopal_solutions.InputNotSupportedError(
cryptopal_solutions.InputMethod.STRING
)
| [
37811,
23919,
404,
874,
5345,
352,
13,
198,
198,
15522,
873,
198,
37811,
198,
6738,
2779,
2414,
1330,
275,
2414,
12501,
1098,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
340,
861,
10141,
1330,
19974,
62,
6511,
395,
... | 2.38416 | 3,548 |
import pandas as pd
import pyflux as pf
import sys
import date_util
import common
if __name__ == "__main__":
fn = sys.argv[1]
start_date = sys.argv[2]
end_date = sys.argv[3]
predict_start = sys.argv[4]
predict_end = sys.argv[5]
action_count = common.load_action_stat(fn)
count, date, real_value = get_history_pay(action_count, start_date, end_date, predict_start, predict_end)
predict_value = arima(count, date, 14)
print(predict_value, real_value)
if len(real_value) == 0 or len(predict_value) == 0:
exit()
print(common.shop_cost(predict_value, real_value))
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
69,
22564,
355,
279,
69,
198,
11748,
25064,
198,
11748,
3128,
62,
22602,
198,
11748,
2219,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
2471... | 2.413386 | 254 |
# TODO: Test verbose name and plural needed for translation.
# TODO: Incorporate HTML process to send message as text/html content type.
| [
2,
16926,
46,
25,
6208,
15942,
577,
1438,
290,
22801,
2622,
329,
11059,
13,
198,
2,
16926,
46,
25,
3457,
31150,
378,
11532,
1429,
284,
3758,
3275,
355,
2420,
14,
6494,
2695,
2099,
13,
198
] | 3.914286 | 35 |
import nep
import time
client = nep.client('127.0.0.1', 8000) # Create a new client instance
while True:
msg = {"message":"client request"} # Message to send as request
client.send_info(msg) # Send request
print (client.listen_info()) # Wait for server response
time.sleep(1) # Wait one second
| [
11748,
25919,
220,
198,
11748,
640,
220,
198,
198,
16366,
796,
25919,
13,
16366,
10786,
16799,
13,
15,
13,
15,
13,
16,
3256,
38055,
8,
1303,
13610,
257,
649,
5456,
4554,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
31456,
796,
1977... | 3.038462 | 104 |
from glob import glob
import os
import h5py
import resnet_one
import torch
from tqdm import tqdm
import torch.nn as nn
from flags import FLAGS
import json
from get_dataloaders import get_data
from training_functions import train_function, validation_function, validation_summary, test_function
from scoring_functions import get_batch_top3score
if __name__ == '__main__':
main()
| [
6738,
15095,
1330,
15095,
198,
11748,
28686,
198,
11748,
289,
20,
9078,
198,
11748,
581,
3262,
62,
505,
198,
11748,
28034,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
9701,
1... | 3.318966 | 116 |
from cfr.input_structures.utility_matrix import UtilMatrix
| [
6738,
269,
8310,
13,
15414,
62,
7249,
942,
13,
315,
879,
62,
6759,
8609,
1330,
7273,
346,
46912,
628
] | 3.157895 | 19 |
# -*- coding: utf-8 -*-
import re
import scrapy
import json
from locations.items import GeojsonPointItem
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
302,
198,
198,
11748,
15881,
88,
198,
11748,
33918,
198,
198,
6738,
7064,
13,
23814,
1330,
2269,
13210,
1559,
12727,
7449,
628
] | 2.918919 | 37 |
from pathlib import Path
import numpy as np
import pandas as pd
if __name__ == '__main__':
main()
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.717949 | 39 |
"""
A module to show off a native coroutine pipeline.
This pipe allows us to send messages back-and-forth between coroutines,
simulating what we can do with the yield statement/expression. However,
it is very arcane it shows with the native coroutines are not as useful
for animation or other modern applications. In fact, as the name says
-- asyncio -- it really only useful for reading date from large files
or from a web server.
Author: Walker M. White
Date: November 2, 2020
"""
import asyncio
class Pipe(object):
"""
A class representing a communication pipe between coroutines
This class works by having an event attribute, which works like
a semaphore (a topic you would learn about in a 4000 level course)
"""
# Attribute _buffer: The buffer storing the communicated data
# Invariant: _buffer is a (possibly empty) list
#
# Attribute _stop: Whether to shut down this pipe permanently
# Invariant: _stop is a boolean
#
# Attribute _event: The even to synchronize the two coroutines
# Invariant: _event is a asyncio.Event()
def __init__(self):
"""
Initializes a new communication pipe
"""
self._buffer = []
self._stop = False
self._event = asyncio.Event()
def send(self,value):
"""
Sends a piece of data into the pipeline buffer
This data will then be sent to the first available coroutine
awaiting on this pipe.
Paramater value: The data to sent
Precondition: NONE (value can be anything)
"""
if not self._stop:
self._buffer.append(value)
self._event.set()
def stop(self):
"""
Stops this pipeline permanently.
Any coroutine waiting on this pipeline will immediately
receive None.
"""
self._stop = True
self._event.set()
async def receive(self):
"""
Returns the next bit of data in the pipe.
This method returns a value immediately if the buffer is not
empty. Otherwise it blocks any coroutine that awaits on it
until the buffer is not empty
"""
while not self._stop:
await self._event.wait()
self._event.clear()
if self._stop:
return None
elif len(self._buffer) > 0:
return self._buffer.pop(0)
async def helper(pipe):
"""
A coroutine that can receive and send a message via a pipe
Parameter pipe: The pipe to communicate
Precondition: pipe is a Pipe object
"""
value = await pipe.receive()
print('Value is',value)
pipe.send(44)
async def main():
"""
The main, parent coroutine
"""
p = Pipe()
print('Creating the task')
t = asyncio.create_task(helper(p))
print('Sending the message')
p.send(99)
# Let t take over
print('Waiting on the task')
await t
print('Awaiting on an answer back.')
value = await p.receive()
print('Value back is',value)
# Wait for t to finsh
print('The task is complete')
if __name__ == '__main__':
asyncio.run(main())
| [
37811,
198,
32,
8265,
284,
905,
572,
257,
6868,
1162,
28399,
11523,
13,
198,
198,
1212,
12656,
3578,
514,
284,
3758,
6218,
736,
12,
392,
12,
25718,
1022,
1162,
448,
1127,
11,
198,
14323,
8306,
644,
356,
460,
466,
351,
262,
7800,
264... | 2.691002 | 1,178 |
from wallpaper_downloader import site_parser
from bs4 import BeautifulSoup
def test_good_page_url():
"""
Test '_get_page_html' function of site_parser module.
Function is tested with the good URL of the page.
"""
page_html = site_parser._get_page_html(
"https://www.smashingmagazine.com/category/wallpapers/",
)
assert type(page_html) == BeautifulSoup
def test_bad_page_url():
"""
Test '_get_page_html' function of site_parser module.
Function is tested with the good URL of the page.
"""
try:
site_parser._get_page_html(
"https://www.smashingmagazine.com/category/wallpaper/",
)
except SystemExit:
assert True
else:
assert False
| [
6738,
39328,
62,
15002,
263,
1330,
2524,
62,
48610,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
4299,
1332,
62,
11274,
62,
7700,
62,
6371,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
6208,
705,
62,
1136,
62,
... | 2.547945 | 292 |
#!/usr/bin/python3
# Mirrorscript v2 By Hazmirul Afiq
import subprocess, requests, re, sys
import operator
import argparse, apt, os
import threading
from shutil import copyfile
result_url = []
ping_result = []
mirrors = {}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}
if __name__ == "__main__":
# Check if user is root first.
if os.getuid() != 0:
sys.exit("[!] Must run as root/sudo\n")
# Argument parser
parser = argparse.ArgumentParser(description='Kali Mirrorscripts by Vineet Bhawsar automatically select the best kali mirror server and apply the configuration')
parser.add_argument('-v','--verbose', help='enable verbose output', action="store_true")
parser.add_argument('-https', help='use HTTPS in apt transport (default HTTP)', action="store_true")
parser.add_argument('-src', help='enable sources packages (default disable)', action="store_true")
args = parser.parse_args()
# Initialize arguments
https = True if args.https else False
verbose = True if args.verbose else False
sources = True if args.src else False
# Banner
print("#")
print("# Mirrorscripts - By "Vineet Bhawsar")
print("# Automatically select the best Kali mirror and apply the configuration")
print("# https://github.com/IceM4nn/mirrorscript-v2")
print("# https://www.metahackers.pro/speed-kali-linux-update/")
print("#\n")
if https:
cache = apt.Cache()
cache.open()
package = "apt-transport-https"
print("[-] Checking if '" + package + "' package is installed.")
try:
if cache[package].is_installed:
if verbose:
print("\t- "+package+" is installed\n")
else:
print("\t! "+package+" is NOT installed. Attempting to install ...")
cache[package].mark_install()
print("\t- Installing "+package+"\n")
try:
cache.commit()
print("\n\t- "+package+" installed succesfully")
except Exception as e:
print("\t! package "+package+" is failing to install")
print("\t "+str(e))
sys.exit(1)
except KeyError as e:
print("[!] The package \"" + package + "\" could not found in local apt cache. You may need to install it manually later after you've done update kali.")
print(" For the time being, re-run the script without https support.")
sys.exit(1)
print("[+] Getting mirror list ...")
response = requests.get('https://http.kali.org/README.mirrorlist', headers=headers).text
urls = re.findall(r'(?:href="http(?:s|))(.*)(?:/README")',response)[2:]
if verbose:
print("[+] Found (" + str(len(urls)) + ") lists of mirrors:")
for url in urls:
print("\t- https" + url)
print("")
print("[+] Checking mirrors ...")
schema = 'https' if https else 'http'
new_urls = fetch_url(urls,schema)
print("[+] Finding the best latency")
hosts = []
for hostname in new_urls:
hostname = hostname.split("//")[-1].split("/")[0].split('?')[0]
hosts.append(hostname)
# sending ping in threads
ping_s(hosts)
if verbose:
print("")
# sorted to fastest mirror
sorted_mirrors = sorted(mirrors.items(), key=operator.itemgetter(1))
print("[+] Fastest mirror: " + str(sorted_mirrors[0]))
print("[+] Preparing ...")
# Making backup
if verbose:
print("\t- Making a backup file /etc/apt/sources.list.bk ...")
copyfile('/etc/apt/sources.list', '/etc/apt/sources.list.bk')
if verbose:
print("\t- Checking sources.list for older entries ...")
contents = []
file = open("/etc/apt/sources.list", "r+")
if verbose:
print("\t- Commenting older entries ...")
i = 0
for line in file.readlines():
if (re.search(r'^deb http(?:s|)://http\.kali\.org/kali', line, re.I)) or (re.search(r'^deb-src http(?:s|)://http\.kali\.org/kali', line, re.I)):
newline = "#" + line
file.write(newline)
contents.append(newline)
elif re.search(r'^# Autogenerated script by MirrorScripts-V2', line, re.I):
print("\t! Found previous applies! Commenting it out ...")
contents.append(line)
i = 1
elif i == 1:
if not line.startswith("#"):
newline = "#" + line
file.write(newline)
contents.append(newline)
else:
contents.append(line)
i = i+1
elif i == 2:
if not line.startswith("#"):
newline = "#" + line
file.write(newline)
contents.append(newline)
else:
contents.append(line)
i = 0
else:
contents.append(line)
file.seek(0)
file.truncate()
file.seek(0)
for line in contents:
file.write(line)
file.close()
if verbose:
print("\t- Done\n")
print("[+] Updating sources.list with new entry ...")
matching = [s for s in urls if sorted_mirrors[0][0] in s]
new_mirror = schema + matching[0]
if verbose:
print("\t- Your new mirror: " + new_mirror + "\n")
temp = "sh -c \'echo \"\n# Autogenerated script by MirrorScripts-V2\" >> /etc/apt/sources.list\'"
subprocess.Popen(temp, shell=True, stdout=subprocess.PIPE).stdout.read()
line = "deb " + new_mirror + " kali-rolling main contrib non-free"
temp = "sh -c \'echo %s >> /etc/apt/sources.list\'"
subprocess.Popen(temp % line, shell=True, stdout=subprocess.PIPE).stdout.read()
line = "deb-src " + new_mirror + " kali-rolling main contrib non-free"
if not sources:
line = "#" + line
temp = "sh -c \'echo \"%s\" >> /etc/apt/sources.list\'"
subprocess.Popen(temp % line, shell=True, stdout=subprocess.PIPE).stdout.read()
print("[+] Done!")
if verbose:
print("\t- Run 'sudo apt clean; sudo apt update' for the changes to load.\n")
else:
print("")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
7381,
5965,
6519,
410,
17,
2750,
17064,
10793,
377,
2483,
25011,
198,
11748,
850,
14681,
11,
7007,
11,
302,
11,
25064,
198,
11748,
10088,
198,
11748,
1822,
29572,
11,
15409,
11,
28686,... | 2.577485 | 2,123 |
import os
import re
| [
11748,
28686,
198,
11748,
302,
628
] | 3.5 | 6 |
"""
Created on July 02, 2018
@author: Alejandro Molina
"""
from spn.algorithms.MPE import get_mpe_top_down_leaf, add_node_mpe
from spn.structure.leaves.parametric.Inference import continuous_log_likelihood, gamma_log_likelihood, \
discrete_log_likelihood, categorical_log_likelihood, categorical_dictionary_log_likelihood
from spn.structure.leaves.parametric.Parametric import (
Gaussian,
Gamma,
LogNormal,
Poisson,
Bernoulli,
Categorical,
Geometric,
Exponential,
CategoricalDictionary,
NegativeBinomial,
Hypergeometric,
)
import numpy as np
import logging
logger = logging.getLogger(__name__)
| [
37811,
198,
41972,
319,
2901,
7816,
11,
2864,
198,
198,
31,
9800,
25,
9300,
47983,
17958,
1437,
198,
37811,
198,
6738,
599,
77,
13,
282,
7727,
907,
13,
7378,
36,
1330,
651,
62,
76,
431,
62,
4852,
62,
2902,
62,
33201,
11,
751,
62,
... | 2.7 | 240 |
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class PressureInletBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'name': 'str',
'pressure': 'TotalPBC',
'pressure_rgh': 'TotalPBC',
'gauge_pressure': 'OneOfPressureInletBCGaugePressure',
'gauge_pressure_rgh': 'TotalPBC',
'temperature': 'OneOfPressureInletBCTemperature',
'passive_scalars': 'list[FixedValuePSBC]',
'phase_fraction': 'FixedValuePFBC',
'net_radiative_heat_flux': 'OneOfPressureInletBCNetRadiativeHeatFlux',
'topological_reference': 'TopologicalReference'
}
attribute_map = {
'type': 'type',
'name': 'name',
'pressure': 'pressure',
'pressure_rgh': 'pressureRgh',
'gauge_pressure': 'gaugePressure',
'gauge_pressure_rgh': 'gaugePressureRgh',
'temperature': 'temperature',
'passive_scalars': 'passiveScalars',
'phase_fraction': 'phaseFraction',
'net_radiative_heat_flux': 'netRadiativeHeatFlux',
'topological_reference': 'topologicalReference'
}
def __init__(self, type='PRESSURE_INLET_V31', name=None, pressure=None, pressure_rgh=None, gauge_pressure=None, gauge_pressure_rgh=None, temperature=None, passive_scalars=None, phase_fraction=None, net_radiative_heat_flux=None, topological_reference=None, local_vars_configuration=None): # noqa: E501
"""PressureInletBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._name = None
self._pressure = None
self._pressure_rgh = None
self._gauge_pressure = None
self._gauge_pressure_rgh = None
self._temperature = None
self._passive_scalars = None
self._phase_fraction = None
self._net_radiative_heat_flux = None
self._topological_reference = None
self.discriminator = None
self.type = type
if name is not None:
self.name = name
if pressure is not None:
self.pressure = pressure
if pressure_rgh is not None:
self.pressure_rgh = pressure_rgh
if gauge_pressure is not None:
self.gauge_pressure = gauge_pressure
if gauge_pressure_rgh is not None:
self.gauge_pressure_rgh = gauge_pressure_rgh
if temperature is not None:
self.temperature = temperature
if passive_scalars is not None:
self.passive_scalars = passive_scalars
if phase_fraction is not None:
self.phase_fraction = phase_fraction
if net_radiative_heat_flux is not None:
self.net_radiative_heat_flux = net_radiative_heat_flux
if topological_reference is not None:
self.topological_reference = topological_reference
@property
def type(self):
"""Gets the type of this PressureInletBC. # noqa: E501
This boundary condition is suitable for inlet and open boundaries where the value of <b>pressure</b> is known. Schema name: PressureInletBC # noqa: E501
:return: The type of this PressureInletBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this PressureInletBC.
This boundary condition is suitable for inlet and open boundaries where the value of <b>pressure</b> is known. Schema name: PressureInletBC # noqa: E501
:param type: The type of this PressureInletBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def name(self):
"""Gets the name of this PressureInletBC. # noqa: E501
:return: The name of this PressureInletBC. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PressureInletBC.
:param name: The name of this PressureInletBC. # noqa: E501
:type: str
"""
self._name = name
@property
def pressure(self):
"""Gets the pressure of this PressureInletBC. # noqa: E501
:return: The pressure of this PressureInletBC. # noqa: E501
:rtype: TotalPBC
"""
return self._pressure
@pressure.setter
def pressure(self, pressure):
"""Sets the pressure of this PressureInletBC.
:param pressure: The pressure of this PressureInletBC. # noqa: E501
:type: TotalPBC
"""
self._pressure = pressure
@property
def pressure_rgh(self):
"""Gets the pressure_rgh of this PressureInletBC. # noqa: E501
:return: The pressure_rgh of this PressureInletBC. # noqa: E501
:rtype: TotalPBC
"""
return self._pressure_rgh
@pressure_rgh.setter
def pressure_rgh(self, pressure_rgh):
"""Sets the pressure_rgh of this PressureInletBC.
:param pressure_rgh: The pressure_rgh of this PressureInletBC. # noqa: E501
:type: TotalPBC
"""
self._pressure_rgh = pressure_rgh
@property
def gauge_pressure(self):
"""Gets the gauge_pressure of this PressureInletBC. # noqa: E501
:return: The gauge_pressure of this PressureInletBC. # noqa: E501
:rtype: OneOfPressureInletBCGaugePressure
"""
return self._gauge_pressure
@gauge_pressure.setter
def gauge_pressure(self, gauge_pressure):
"""Sets the gauge_pressure of this PressureInletBC.
:param gauge_pressure: The gauge_pressure of this PressureInletBC. # noqa: E501
:type: OneOfPressureInletBCGaugePressure
"""
self._gauge_pressure = gauge_pressure
@property
def gauge_pressure_rgh(self):
"""Gets the gauge_pressure_rgh of this PressureInletBC. # noqa: E501
:return: The gauge_pressure_rgh of this PressureInletBC. # noqa: E501
:rtype: TotalPBC
"""
return self._gauge_pressure_rgh
@gauge_pressure_rgh.setter
def gauge_pressure_rgh(self, gauge_pressure_rgh):
"""Sets the gauge_pressure_rgh of this PressureInletBC.
:param gauge_pressure_rgh: The gauge_pressure_rgh of this PressureInletBC. # noqa: E501
:type: TotalPBC
"""
self._gauge_pressure_rgh = gauge_pressure_rgh
@property
def temperature(self):
"""Gets the temperature of this PressureInletBC. # noqa: E501
:return: The temperature of this PressureInletBC. # noqa: E501
:rtype: OneOfPressureInletBCTemperature
"""
return self._temperature
@temperature.setter
def temperature(self, temperature):
"""Sets the temperature of this PressureInletBC.
:param temperature: The temperature of this PressureInletBC. # noqa: E501
:type: OneOfPressureInletBCTemperature
"""
self._temperature = temperature
@property
def passive_scalars(self):
"""Gets the passive_scalars of this PressureInletBC. # noqa: E501
Please choose a boundary condition for passive scalar (T). # noqa: E501
:return: The passive_scalars of this PressureInletBC. # noqa: E501
:rtype: list[FixedValuePSBC]
"""
return self._passive_scalars
@passive_scalars.setter
def passive_scalars(self, passive_scalars):
"""Sets the passive_scalars of this PressureInletBC.
Please choose a boundary condition for passive scalar (T). # noqa: E501
:param passive_scalars: The passive_scalars of this PressureInletBC. # noqa: E501
:type: list[FixedValuePSBC]
"""
self._passive_scalars = passive_scalars
@property
def phase_fraction(self):
"""Gets the phase_fraction of this PressureInletBC. # noqa: E501
:return: The phase_fraction of this PressureInletBC. # noqa: E501
:rtype: FixedValuePFBC
"""
return self._phase_fraction
@phase_fraction.setter
def phase_fraction(self, phase_fraction):
"""Sets the phase_fraction of this PressureInletBC.
:param phase_fraction: The phase_fraction of this PressureInletBC. # noqa: E501
:type: FixedValuePFBC
"""
self._phase_fraction = phase_fraction
@property
def net_radiative_heat_flux(self):
"""Gets the net_radiative_heat_flux of this PressureInletBC. # noqa: E501
:return: The net_radiative_heat_flux of this PressureInletBC. # noqa: E501
:rtype: OneOfPressureInletBCNetRadiativeHeatFlux
"""
return self._net_radiative_heat_flux
@net_radiative_heat_flux.setter
def net_radiative_heat_flux(self, net_radiative_heat_flux):
"""Sets the net_radiative_heat_flux of this PressureInletBC.
:param net_radiative_heat_flux: The net_radiative_heat_flux of this PressureInletBC. # noqa: E501
:type: OneOfPressureInletBCNetRadiativeHeatFlux
"""
self._net_radiative_heat_flux = net_radiative_heat_flux
@property
def topological_reference(self):
"""Gets the topological_reference of this PressureInletBC. # noqa: E501
:return: The topological_reference of this PressureInletBC. # noqa: E501
:rtype: TopologicalReference
"""
return self._topological_reference
@topological_reference.setter
def topological_reference(self, topological_reference):
"""Sets the topological_reference of this PressureInletBC.
:param topological_reference: The topological_reference of this PressureInletBC. # noqa: E501
:type: TopologicalReference
"""
self._topological_reference = topological_reference
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PressureInletBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PressureInletBC):
return True
return self.to_dict() != other.to_dict()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
3184,
29990,
7824,
628,
220,
220,
220,
383,
2196,
286,
262,
4946,
17614,
3188,
25,
657,
13,
15,
13,
15,
198,
220,
220,
220,
2980,
515,
416,
25,
3740,
1378,
965... | 2.312037 | 5,259 |
import time
import uuid
import re
from pathlib import Path
import subprocess
import requests
import base64
from netaddr import *
from urllib3.exceptions import InsecureRequestWarning
from core import settings, helpers, audit, db, storage
from core.models import action
from plugins.inga.models import inga
from plugins.remote.includes import helpers as remoteHelpers
# Remote / Local Fuctions | [
11748,
640,
198,
11748,
334,
27112,
198,
11748,
302,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
850,
14681,
198,
11748,
7007,
198,
11748,
2779,
2414,
198,
6738,
2010,
29851,
1330,
1635,
198,
6738,
2956,
297,
571,
18,
13,
1069,
1175... | 3.989899 | 99 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import os
import typing
import deepdiff
import pytest
import pytest_mock
import fair.configuration as fdp_conf
import fair.identifiers as fdp_id
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
@pytest.mark.configuration
| [
11748,
28686,
198,
11748,
19720,
198,
198,
11748,
2769,
26069,
198,
11748,
12972,
9288,
198,
11748,
12972,
9288,
62,
76,
735,
198,
198,
11748,
3148,
13,
11250,
3924,
355,
277,
26059,
62,
10414,
198,
11748,
3148,
13,
738,
13350,
355,
277... | 2.980488 | 205 |
from collections import OrderedDict
import hashlib
from django.db import models
from django.conf import settings
from django.urls import reverse
from django.utils.safestring import mark_safe
import jsonfield
from model_utils.models import TimeStampedModel
from tworaven_apps.utils.basic_response import (ok_resp, err_resp)
from tworaven_apps.utils.json_helper import json_dumps
from tworaven_apps.ta2_interfaces.static_vals import \
(CALLBACK_URL, DETAILS_URL,
KEY_FITTED_SOLUTION_ID, KEY_PIPELINE_ID)
STATUS_IN_PROGRESS = 'IN_PROGRESS'
STATUS_ERROR = 'ERROR'
STATUS_COMPLETE = 'COMPLETE'
STATUS_LIST = (STATUS_IN_PROGRESS,
STATUS_ERROR, STATUS_COMPLETE)
REQUEST_STATUS_CHOICES = [(x, x) for x in STATUS_LIST]
RESPONSE_STATUS_CHOICES = [(x, x) for x in (STATUS_ERROR, STATUS_COMPLETE)]
class StoredRequest(TimeStampedModel):
"""For storing TA2 responses, especially streaming responses"""
name = models.CharField(\
blank=True,
max_length=255,
help_text='auto-generated')
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
workspace = models.CharField(\
help_text='Used to identify this problem',
max_length=255)
request_type = models.CharField(\
help_text='API request name',
max_length=255)
status = models.CharField(\
max_length=255,
default=STATUS_IN_PROGRESS,
choices=REQUEST_STATUS_CHOICES)
is_finished = models.BooleanField(default=False)
search_id = models.CharField(\
max_length=255,
blank=True)
pipeline_id = models.IntegerField(\
default=-1,
help_text=('Not always used'))
request = jsonfield.JSONField(\
help_text='JSON sent by user',
load_kwargs=dict(object_pairs_hook=OrderedDict))
hash_id = models.CharField(help_text='Used for urls (auto-generated)',
max_length=255,
blank=True)
user_message = models.TextField(help_text='Mainly for error messages',
max_length=255,
blank=True)
def __str__(self):
"""reference name"""
return self.name
class Meta:
"""ordering, etc"""
ordering = ('-created',)
def save(self, *args, **kwargs):
"""Set a name if one isn't specified"""
if not self.id:
super(StoredRequest, self).save(*args, **kwargs)
if not self.name:
self.name = '(%s) %s' % \
(self.id, self.request_type)
if not self.hash_id:
hash_str = '%s %s' % (self.id, self.created)
self.hash_id = hashlib.sha224(hash_str.encode('utf-8')).hexdigest()
if self.status in (STATUS_COMPLETE, STATUS_ERROR):
self.is_finished = True
else:
self.is_finished = False
# truncate fields ()
super(StoredRequest, self).save(*args, **kwargs)
def get_absolute_url(self):
"""for the admin"""
return self.get_callback_url(is_pretty=True)
def get_callback_url(self, is_pretty=False):
"""Callback url for returning "as_dict" info"""
if not self.id:
return None
callback_url = reverse('view_stored_request',
kwargs=dict(hash_id=self.hash_id))
if is_pretty:
callback_url = '%s?pretty=True' % callback_url
return callback_url
def set_error_status(self, user_message=None, is_finished=True):
"""For the StoredRequest, set the status and message"""
self.status = STATUS_ERROR
self.is_finished = is_finished
if user_message:
self.user_message = user_message
self.save()
return ok_resp(self)
def has_error_occurred(self):
"""convenience method to check if status == STATUS_ERROR"""
return self.status == STATUS_ERROR
def request_as_json(self, wrap_in_html=True):
"""Display OrderedDict as JSON"""
if not self.request:
return '(n/a)'
json_info = json_dumps(self.request, indent=4)
if json_info.success:
if wrap_in_html:
json_str = '<pre>%s</pre>' % json_info.result_obj
else:
json_str = json_info.result_obj
else:
json_str = 'Error: %s' % json_info.err_msg
return mark_safe(json_str)
'''
def as_json_string(self, **kwargs):
"""Convert as_dict result to a JSON string
kwargs:
- indent_level = default 4.
- short_version = default False. If True, includes StoredRequest
"""
indent_level = kwargs.get('indent_level', 4)
dict_info = self.as_dict(**kwargs)
json_info = json_dumps(dict_info, indent=indent_level)
if json_info.success:
json_str = '%s' % json_info.result_obj
else:
json_str = 'Error: %s' % json_info.err_msg
return json_str
'''
def as_dict(self, **kwargs):
"""Return info as a dict
kwargs:
- short_version = default False. If True, includes StoredRequest
"""
short_version = kwargs.get('short_version', False)
attr_names = ('id', 'name', 'hash_id',
'is_finished', 'is_error',
'search_id', 'pipeline_id',
'status', 'user_message',
'workspace', 'request_type',
DETAILS_URL,
'request', 'request_as_json')
od = OrderedDict()
for key in attr_names:
if key == 'is_error':
od[key] = self.has_error_occurred()
elif key == 'is_finished':
od[key] = self.is_finished
elif key == 'request_as_json':
od[key] = self.request_as_json(wrap_in_html=False)
elif key == DETAILS_URL:
od[DETAILS_URL] = self.get_callback_url()
else:
od[key] = self.__dict__.get(key)
od['created'] = self.created.isoformat()
od['modified'] = self.modified.isoformat()
if short_version is True:
# used by StoredResponse.as_dict()
return od
# Iterate through the related StoredResponse objects
#
response_list = []
unread_cnt = 0
for resp in self.storedresponse_set.all():
if not resp.sent_to_user:
unread_cnt += 1
response_list.append(resp.as_dict(short_version=True))
od['responses'] = dict(count=len(response_list),
unread_count=unread_cnt,
list=response_list)
return od
class StoredResponse(TimeStampedModel):
"""For storing TA2 responses, especially streaming responses"""
stored_request = models.ForeignKey(StoredRequest,
on_delete=models.CASCADE)
pipeline_id = models.IntegerField(\
default=-1,
help_text=('Not always used'))
is_finished = models.BooleanField(default=False)
sent_to_user = models.BooleanField(\
help_text='Sent to the UI for user viewing',
default=True)
status = models.CharField(\
max_length=255,
default=STATUS_COMPLETE,
choices=RESPONSE_STATUS_CHOICES)
response = jsonfield.JSONField(\
help_text='JSON received by the TA2',
load_kwargs=dict(object_pairs_hook=OrderedDict))
additionalInfo = jsonfield.JSONField(\
blank=True,
help_text=('Extra JSON added to response.'
' For example, associated scoreIds.'
' {scoreIds: []}'),
load_kwargs=dict(object_pairs_hook=OrderedDict))
hash_id = models.CharField(help_text='Used for urls (auto-generated)',
max_length=255,
blank=True)
def save(self, *args, **kwargs):
"""Set a name if one isn't specified"""
if not self.id:
super(StoredResponse, self).save(*args, **kwargs)
if not self.hash_id:
hash_str = 'rsp-%s%s' % (self.id, self.created)
self.hash_id = hashlib.sha224(hash_str.encode('utf-8')).hexdigest()
# Update the status
#
if self.status in (STATUS_COMPLETE, STATUS_ERROR):
self.is_finished = True
else:
self.is_finished = False
super(StoredResponse, self).save(*args, **kwargs)
class Meta:
"""ordering, etc"""
ordering = ('-created',)
def __str__(self):
"""reference name"""
return '%s' % self.stored_request
def search_id(self):
"""Return the search_id from the StoredRequest"""
return self.stored_request.search_id
def get_absolute_url(self):
"""for the admin"""
return self.get_callback_url(is_pretty=True)
def get_callback_url(self, is_pretty=False):
"""Callback url for returning "as_dict" info"""
if not self.id:
return None
callback_url = reverse('view_stored_response',
kwargs=dict(hash_id=self.hash_id))
if is_pretty:
callback_url = '%s?pretty=True' % callback_url
return callback_url
@staticmethod
def get_callback_url_via_id(stored_response_id):
"""For returning the callback url with only the id"""
assert stored_response_id, 'A stored_response_id is required'
try:
stored_response = StoredResponse.objects.get(pk=stored_response_id)
except StoredResponse.DoesNotExist:
return err_resp('Failed to find StoredResponse')
return ok_resp(stored_response.get_callback_url())
def link_to_request(self):
"""Admin link to request"""
if not self.stored_request:
return '(n/a)'
url_str = '<a href="%s">view request</a>' % \
reverse('admin:ta2_interfaces_storedrequest_change',
args=(self.stored_request.id,))
return mark_safe(url_str)
def response_as_json(self, wrap_in_html=True):
"""Display OrderedDict as JSON"""
if not self.response:
return '(n/a)'
json_info = json_dumps(self.response, indent=4)
if json_info.success:
if wrap_in_html:
json_str = '<pre>%s</pre>' % json_info.result_obj
else:
json_str = json_info.result_obj
else:
json_str = 'Error: %s' % json_info.err_msg
return mark_safe(json_str)
def as_dict(self, **kwargs):
"""Return info as a dict
kwargs:
- short_version = default False. If True, includes StoredRequest
"""
short_version = kwargs.get('short_version', False)
# Retrieve kwargs (or default vals)
attr_names = ('id', 'hash_id', 'pipeline_id',
'is_finished', 'is_error',
'status', 'sent_to_user',
DETAILS_URL)
od = OrderedDict()
for key in attr_names:
if key == 'is_error':
od[key] = self.has_error_occurred()
elif key == DETAILS_URL:
od[DETAILS_URL] = self.get_callback_url()
else:
od[key] = self.__dict__.get(key)
if key == 'pipeline_id':
od[KEY_PIPELINE_ID] = self.__dict__.get(key)
od['created'] = self.created.isoformat()
od['modified'] = self.modified.isoformat()
od['response'] = self.response
od['response_as_json'] = self.response_as_json(wrap_in_html=False)
if short_version is True:
# used if part of StoredRequest.as_dict() list
return od
od['stored_request'] = self.stored_request.as_dict(short_version=True)
if self.additionalInfo:
od['additionalInfo'] = self.additionalInfo
return od
def has_error_occurred(self):
"""convenience method to check if status == STATUS_ERROR"""
return self.status == STATUS_ERROR
def mark_as_sent_to_user(self):
"""Mark the response as read"""
StoredResponse.mark_as_read(self)
@staticmethod
def mark_as_read(stored_response):
"""Mark the response as read"""
assert isinstance(stored_response, StoredResponse), \
'stored_response must be a StoredResponse instance'
# Is 'sent_to_user' already set?
if stored_response.sent_to_user is True:
return False
stored_response.sent_to_user = True
stored_response.save()
return True
@staticmethod
def add_err_response(stored_request, response, **kwargs):
"""Given a StoredRequest, create a StoredResponse with an error"""
if not isinstance(stored_request, StoredRequest):
return err_resp('"stored_request" must be a StoredRequest')
stored_response = StoredResponse(\
stored_request=stored_request,
response=response,
status=STATUS_ERROR,
is_finished=True)
# Save the pipeline id
#
pipeline_id = kwargs.get('pipeline_id')
if pipeline_id:
#
# Has a pipeline_id been specified?
#
stored_response.pipeline_id = pipeline_id
stored_request.pipeline_id = pipeline_id
#
elif stored_request.pipeline_id:
#
# Nope, is there a pipeline_id available in the StoredRequest
#
stored_response.pipeline_id = stored_request.pipeline_id
# Save Response
stored_response.save()
# Save request
if kwargs.get('request_complete', True) is True:
stored_request.status = STATUS_COMPLETE
stored_request.save()
return ok_resp(stored_response)
@staticmethod
def add_stream_err_response(stored_request, response, **kwargs):
"""Create a StoredResponse with an error -- but leave the Request open,
e.g., not complete"""
if not isinstance(stored_request, StoredRequest):
return err_resp('"stored_request" must be a StoredRequest')
kwargs['request_complete'] = False
return StoredResponse.add_err_response(stored_request,
response,
**kwargs)
@staticmethod
def add_stream_success_response(stored_request, response, **kwargs):
"""Given a StoredRequest, create a StoredResponse with a success response"""
if not isinstance(stored_request, StoredRequest):
return err_resp('"stored_request" must be a StoredRequest')
kwargs['request_complete'] = False
return StoredResponse.add_success_response(stored_request,
response,
**kwargs)
@staticmethod
def add_success_response(stored_request, response, **kwargs):
"""Given a StoredRequest, create a StoredResponse with a success response"""
if not isinstance(stored_request, StoredRequest):
return err_resp('"stored_request" must be a StoredRequest')
stored_response = StoredResponse(\
stored_request=stored_request,
response=response,
status=STATUS_COMPLETE,
is_finished=True)
new_pipeline_id = kwargs.get('pipeline_id')
# Save the pipeline id
#
if new_pipeline_id:
#
# Has a pipeline_id been specified?
#
stored_response.pipeline_id = new_pipeline_id
stored_request.pipeline_id = new_pipeline_id
elif stored_request.pipeline_id:
#
# Nope, is there a pipeline_id available in the StoredRequest
#
stored_response.pipeline_id = stored_request.pipeline_id
stored_response.save()
# ---------------------------------
# Update request object
# ---------------------------------
if (not stored_request.search_id) and kwargs.get('search_id'):
stored_request.search_id = kwargs['search_id']
# For streaming responses, we want to keep the STATUS as STATUS_IN_PROGRESS
#
if kwargs.get('request_complete', True) is True:
stored_request.status = STATUS_COMPLETE
stored_request.save()
return ok_resp(stored_response)
@staticmethod
def add_response(stored_request_id, response, pipeline_id=None):
"""Retrieve the StoredRequest, set the status and message"""
try:
stored_request = StoredRequest.objects.get(pk=stored_request_id)
except StoredRequest.DoesNotExist:
return err_resp('Failed to find Stored Request')
stored_response = StoredResponse(\
stored_request=stored_request,
response=response)
if pipeline_id:
stored_response.pipeline_id = pipeline_id
stored_response.save()
return ok_resp(stored_response)
def use_id_as_pipeline_id(self):
"""Use the StoredResponse.id as the pipeline id"""
if not self.id:
return err_resp('The StoredResponse must be saved before using this method')
return self.set_pipeline_id(self.id)
def set_pipeline_id(self, pipeline_id):
"""Set the pipeline id in the current StoredRequest and save it."""
if not pipeline_id:
return err_resp('pipeline_id not set')
if not str(pipeline_id).isdigit():
return err_resp('The pipeline_id must be a number, an integer.')
self.pipeline_id = pipeline_id
self.save()
return ok_resp(self)
def get_value_by_key(self, key):
"""Used for pulling a value from the response"""
if not self.response:
return err_resp('No response available')
if not key in self.response:
return err_resp('Key not found in response')
return ok_resp(self.response[key])
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
12234,
8019,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
1... | 2.062418 | 9,196 |
import os
import pandas as pd
import numpy as np
import gin
import shutil
from sklearn.model_selection import train_test_split
@gin.configurable
| [
11748,
28686,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
39733,
201,
198,
11748,
4423,
346,
201,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
... | 2.888889 | 54 |
from unittest import TestCase
from Blackjack.blackjack import Hand, Card, Deck
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
2619,
19650,
13,
13424,
19650,
1330,
7157,
11,
5172,
11,
20961,
628,
628
] | 3.608696 | 23 |
import MySQLdb
import urllib2, urllib, re, sys
import xml
from datetime import datetime
##removes all records older than 30 days.. run as cron job
#main file, run as parseweather(woeid)
if __name__ == '__main__':
cleandb()
| [
11748,
33476,
9945,
198,
11748,
2956,
297,
571,
17,
11,
2956,
297,
571,
11,
302,
11,
25064,
198,
11748,
35555,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2235,
2787,
5241,
477,
4406,
4697,
621,
1542,
1528,
492,
1057,
355,
1067... | 2.898734 | 79 |
from Model import *
from numpy import sign
| [
6738,
9104,
1330,
1635,
198,
6738,
299,
32152,
1330,
1051,
198
] | 3.909091 | 11 |
"""BlockDAG generator
This package contains code to build and compare BlockDAG in a lightweight-as-possible manner.
The main method (build_block_dag) performs a Kahn topological sort of the vertices (dict) and edges
(list of (u, v) tuples) building blocks for each vertex as needed.
"""
import json
import hashlib
import collections.abc
from merklelib import MerkleTree
def build_block_dag(
vertices: dict,
edges: list,
hash_function=_default_hash,
data_fields=None,
append_hashes=False,
):
"""Builds and returns (optionally appending to the original data) BlockDAG signature data for a
graph. Performs a Kahn topological sort of the vertices and edges, inclusively filtering by
data_fields. The final signature is built by concatenating and sorting the hashes from each
leaf in the graph, inserting them all into a Merkle Tree and taking the root.
Parameters
----------
vertices : dict
A dictionary of vertex information.
edges : list
A list of (u, v) tuples containing the keys in the vertices.
hash_function : function
A function used to generate signatures throughout, as hex-digests.
data_fields : list
A list of keys used to inclusively filter data in all vertices.
If none, will include all fields.
append_hashes : bool, default=False
If true, data hash data and will be appended to each vertex's value dictionary.
The whole graph signature however, will not be added to the graph.
Returns
-------
output_signatures : dict
A dictionary containing the signature information for each vertex and for the whole graph.
"""
# pylint: disable=R0914
_check_args_build_block_dag(vertices, edges, data_fields, append_hashes)
dropset = {}
workingset = {}
outputset = {}
neighbourset = {}
leaves = []
visited = []
queue = collections.deque()
for v_id, vertex in vertices.items():
dropset[v_id] = [vertex, 0, 0] # Data, in-degree, out-degree
workingset[v_id] = [None, []] # Block_data, Parent_hashes
neighbourset[v_id] = []
for src, dest in edges:
dropset[dest][1] += 1
dropset[src][2] += 1
neighbourset[src].append(dest)
for v_id, degree_info in dropset.items():
if degree_info[1] == 0:
queue.append(v_id)
if not neighbourset[v_id]:
leaves.append(v_id)
while queue:
v_id = queue.pop()
workingset[v_id][0] = _build_hash_payload(
dropset[v_id][0], data_fields, hash_function
)
hashes = {}
hashes.update(workingset[v_id][0])
hashes["parent_hashes"] = workingset[v_id][1]
_build_block_hash(hashes, hash_function)
outputset[v_id] = hashes
visited.append(v_id)
for neighbour in neighbourset[v_id]:
dropset[neighbour][1] -= 1
workingset[neighbour][1].append(outputset[v_id]["hash"])
if dropset[neighbour][1] == 0:
queue.append(neighbour)
if len(visited) != len(dropset):
raise AssertionError("Some vertices untraversed")
leaf_vertices = []
for leaf in leaves:
leaf_vertices.append(outputset[leaf])
outputset["signature"] = _generate_graph_signature(leaf_vertices, hash_function)
if append_hashes:
for vid, vertex in vertices.items():
vertex.update(outputset[vid])
return outputset
def compare_dags(vertices_1: dict, vertices_2: dict):
"""Compares two BlockDags and finds vertices which differ between them.
Parameters
----------
vertices_1 : dict
The dictionary of the first BlockDAG containing all information built with build_hash_dag.
vertices_2 : dict
The dictionary of the first BlockDAG containing all information built with build_hash_dag.
Returns
-------
identical : bool
True if DAGs are identical, False otherwise.
difference_list_1 : list
List of vertex labels from the first set which differ.
difference_list_2 : list
List of vertex labels form the second set which differ.
"""
# Assumes vertices_1 contains the hash signatures for this data
_check_args_compare_dags(vertices_1, vertices_2)
if vertices_1["signature"] == vertices_2["signature"]:
# They match, no work to be done
return True, [], []
sigmap_1 = {}
sigmap_2 = {}
for key, val in vertices_1.items():
if not hasattr(val, "__get__"):
continue
if val.get("hash") is not None:
sigmap_1[val["hash"]] = key
else:
raise ValueError(f"Vertex {key} does not contain hash")
for key, val in vertices_2.items():
if not hasattr(val, "__get__"):
continue
if val.get("hash") is not None:
sigmap_2[val["hash"]] = key
else:
raise ValueError(f"Vertex {key} does not contain hash")
sigset_1 = set(sigmap_1.keys())
sigset_2 = set(sigmap_2.keys())
difs_1 = sigset_1.difference(sigset_2)
difs_2 = sigset_2.difference(sigset_1)
out_1 = []
out_2 = []
for sig in difs_1:
out_1.append(sigmap_1[sig])
for sig in difs_2:
out_2.append(sigmap_2[sig])
return False, sorted(out_1), sorted(out_2)
def pretty_print(vertices=None, edges=None, signatures=None, indent=4):
"""Prints the given vertices, edges and generated signatures as an indented json dump
Parameters
----------
vertices : dict
The dictionary of vertices to print
edges : list
The list of (u, v) tuples to print
signatures : dict
The dictionary of generated signatures (from build_block_dag)
indent : int, default=True
The level of json indentation
"""
if vertices:
print("------\tVERTICES\t------")
print(json.dumps(vertices, indent=indent))
if edges:
print("------\tEDGES\t------")
print(json.dumps(edges, indent=indent))
if signatures:
print("------\tSIGNATURES\t------")
print(json.dumps(signatures, indent=indent))
def pretty_prints(vertices=None, edges=None, signatures=None, indent=4):
"""Returns the string tht would be printed, edges and generated signatures as an indented json
dump.
Parameters
----------
vertices : dict
The dictionary of vertices to print
edges : list
The list of (u, v) tuples to print
signatures : dict
The dictionary of generated signatures (from build_block_dag)
indent : int, default=True
The level of json indentation
"""
ret = ""
if vertices:
ret += "------\tVERTICES\t------\n"
ret += json.dumps(vertices, indent=indent)
if edges:
ret += "------\tEDGES\t------\n"
ret += json.dumps(edges, indent=indent)
if signatures:
ret += "------\tSIGNATURES\t------\n"
ret += json.dumps(signatures, indent=indent)
return ret
| [
37811,
12235,
35,
4760,
17301,
198,
198,
1212,
5301,
4909,
2438,
284,
1382,
290,
8996,
9726,
35,
4760,
287,
257,
18700,
12,
292,
12,
79,
4733,
5642,
13,
198,
464,
1388,
2446,
357,
11249,
62,
9967,
62,
67,
363,
8,
17706,
257,
42264,
... | 2.478184 | 2,842 |
from pathlib import Path
calib_set_json_path = (Path(__file__).parent
/ 'databases/geometry_calibration/calibration_set.json')
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
9948,
571,
62,
2617,
62,
17752,
62,
6978,
796,
357,
15235,
7,
834,
7753,
834,
737,
8000,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.220588 | 68 |
from datetime import datetime, date
import sys
if sys.version_info >= (2, 7):
from nose.tools import assert_dict_equal
import xlwings as xw
try:
import numpy as np
from numpy.testing import assert_array_equal
except ImportError:
np = None
try:
import pandas as pd
from pandas import DataFrame, Series
from pandas.util.testing import assert_frame_equal, assert_series_equal
except ImportError:
pd = None
# Defaults
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
# Keyword args on default converters
@xw.func
@xw.arg('x', ndim=1)
@xw.func
@xw.arg('x', ndim=2)
@xw.func
@xw.arg('x', transpose=True)
@xw.func
@xw.ret(transpose=True)
@xw.func
@xw.arg('x', dates=date)
@xw.func
@xw.arg('x', dates=date)
@xw.func
@xw.arg('x', dates=datetime)
@xw.func
@xw.arg('x', empty='empty')
if sys.version_info >= (2, 7):
# assert_dict_equal isn't available on nose for PY 2.6
# Dicts
@xw.func
@xw.arg('x', dict)
@xw.func
@xw.arg('x', dict, transpose=True)
@xw.func
# Numpy Array
if np:
@xw.func
@xw.arg('x', np.array)
@xw.func
@xw.arg('x', np.array)
@xw.func
@xw.arg('x', np.array)
@xw.func
@xw.arg('x', np.array)
@xw.func
@xw.arg('x', np.array)
# Keyword args on Numpy arrays
@xw.func
@xw.arg('x', np.array, ndim=1)
@xw.func
@xw.arg('x', np.array, ndim=2)
@xw.func
@xw.arg('x', np.array, transpose=True)
@xw.func
@xw.ret(transpose=True)
@xw.func
@xw.arg('x', np.array, dates=date)
@xw.func
@xw.arg('x', np.array, empty='empty')
@xw.func
# Pandas Series
if pd:
@xw.func
@xw.arg('x', pd.Series, header=False, index=False)
@xw.func
@xw.arg('x', pd.Series, header=False, index=True)
@xw.func
@xw.arg('x', pd.Series, header=True, index=False)
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
@xw.func
@xw.arg('x', pd.Series, header=False, index=2)
@xw.func
@xw.ret(pd.Series, index=False)
@xw.func
@xw.ret(pd.Series, index=True)
@xw.func
@xw.ret(pd.Series, index=False)
@xw.func
@xw.func
@xw.ret(pd.Series, index=True, header=True)
@xw.func
@xw.ret(pd.Series, header=True, index=2)
@xw.func
@xw.ret(pd.Series, header=True, index=2)
@xw.func
@xw.ret(pd.Series, header=False, index=2)
@xw.func
@xw.arg('x', pd.Series)
@xw.func
@xw.ret(pd.Series)
@xw.func
@xw.ret(pd.Series, index=False)
# Pandas DataFrame
if pd:
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=False)
@xw.func
@xw.ret(pd.DataFrame, index=False, header=False)
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=True)
@xw.func
@xw.ret(pd.DataFrame, index=False, header=True)
@xw.func
@xw.arg('x', pd.DataFrame, index=True, header=False)
@xw.func
@xw.ret(pd.DataFrame, index=True, header=False)
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=False)
@xw.func
@xw.ret(pd.DataFrame, index=2, header=False)
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=1)
@xw.func
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=1)
@xw.func
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=2)
@xw.func
@xw.ret(pd.DataFrame, index=False, header=2)
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=2)
@xw.func
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=2)
@xw.func
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=2)
@xw.func
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=2)
@xw.func
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=1)
@xw.func
@xw.func
@xw.arg('x', pd.DataFrame)
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func
@xw.func(category=1)
@xw.func(category=2)
@xw.func(category=3)
@xw.func(category=4)
@xw.func(category=5)
@xw.func(category=6)
@xw.func(category=7)
@xw.func(category=8)
@xw.func(category=9)
@xw.func(category=10)
@xw.func(category=11)
@xw.func(category=12)
@xw.func(category=13)
@xw.func(category=14)
try:
@xw.func(category=15)
except Exception as e:
assert e.args[0] == 'There is only 14 build-in categories available in Excel. Please use a string value to specify a custom category.'
else:
assert False
try:
@xw.func(category=0)
except Exception as e:
assert e.args[0] == 'There is only 14 build-in categories available in Excel. Please use a string value to specify a custom category.'
else:
assert False
@xw.func(category='custom category')
try:
@xw.func(category=1.1)
except Exception as e:
assert e.args[0] == 'Category 1.1 should either be a predefined Excel category (int value) or a custom one (str value).'
else:
assert False
@xw.func
@xw.func
if __name__ == "__main__":
xw.serve()
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
198,
11748,
25064,
198,
361,
25064,
13,
9641,
62,
10951,
18189,
357,
17,
11,
767,
2599,
198,
220,
220,
220,
422,
9686,
13,
31391,
1330,
6818,
62,
11600,
62,
40496,
198,
11748,
2124,
75,
... | 2.019305 | 2,590 |
# -*- coding: utf-8 -*-
# @Time : 2021/08/14 16:30
# @Author : srcrs
# @Email : srcrs@foxmail.com
import requests,json,time,re,login,logging,traceback,os,random,notify,datetime
from lxml.html import fromstring
#每日签到,1积分 ,第七天得到 1G 日包
#位置: 我的 --> 我的金币 | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
33448,
14,
2919,
14,
1415,
1467,
25,
1270,
198,
2,
2488,
13838,
220,
1058,
12351,
3808,
198,
2,
2488,
15333,
220,
220,
1058,
12351,... | 1.706667 | 150 |
import komand
from .schema import ConnectionSchema, Input
# Custom imports below
import jbxapi
from komand.exceptions import ConnectionTestException
| [
11748,
479,
296,
392,
198,
6738,
764,
15952,
2611,
1330,
26923,
27054,
2611,
11,
23412,
198,
198,
2,
8562,
17944,
2174,
198,
11748,
474,
65,
87,
15042,
198,
6738,
479,
296,
392,
13,
1069,
11755,
1330,
26923,
14402,
16922,
628
] | 3.775 | 40 |
import struct
import unittest
import zstandard as zstd
ss = struct.Struct("=QQ")
| [
11748,
2878,
198,
11748,
555,
715,
395,
198,
198,
11748,
1976,
20307,
355,
1976,
19282,
198,
198,
824,
796,
2878,
13,
44909,
7203,
28,
48,
48,
4943,
628,
198
] | 2.931034 | 29 |
import inspect
import typing
from typing import Callable, Dict, Any
from flask_resql import resql as rs
from flask_resql.resql import gen_args_from_params
| [
11748,
10104,
198,
11748,
19720,
198,
6738,
19720,
1330,
4889,
540,
11,
360,
713,
11,
4377,
198,
198,
6738,
42903,
62,
411,
13976,
1330,
581,
13976,
355,
44608,
198,
6738,
42903,
62,
411,
13976,
13,
411,
13976,
1330,
2429,
62,
22046,
... | 3.333333 | 48 |
# coding: utf-8
"""
Canopy.Api
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from canopy.openapi.api_client import ApiClient
from canopy.openapi.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SimVersionApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def sim_version_get_document(self, sim_version, document_path, **kwargs): # noqa: E501
"""sim_version_get_document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_document(sim_version, document_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sim_version: (required)
:param str document_path: (required)
:param str tenant_id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetSimVersionDocumentQueryResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_get_document_with_http_info(sim_version, document_path, **kwargs) # noqa: E501
def sim_version_get_document_with_http_info(self, sim_version, document_path, **kwargs): # noqa: E501
"""sim_version_get_document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_document_with_http_info(sim_version, document_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sim_version: (required)
:param str document_path: (required)
:param str tenant_id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetSimVersionDocumentQueryResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'sim_version',
'document_path',
'tenant_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_get_document" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'sim_version' is set
if self.api_client.client_side_validation and ('sim_version' not in local_var_params or # noqa: E501
local_var_params['sim_version'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sim_version` when calling `sim_version_get_document`") # noqa: E501
# verify the required parameter 'document_path' is set
if self.api_client.client_side_validation and ('document_path' not in local_var_params or # noqa: E501
local_var_params['document_path'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `document_path` when calling `sim_version_get_document`") # noqa: E501
collection_formats = {}
path_params = {}
if 'sim_version' in local_var_params:
path_params['simVersion'] = local_var_params['sim_version'] # noqa: E501
if 'document_path' in local_var_params:
path_params['documentPath'] = local_var_params['document_path'] # noqa: E501
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/{simVersion}/documents/{documentPath}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSimVersionDocumentQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_get_documents(self, sim_version, **kwargs): # noqa: E501
"""sim_version_get_documents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_documents(sim_version, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sim_version: (required)
:param str tenant_id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetSimVersionDocumentsQueryResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_get_documents_with_http_info(sim_version, **kwargs) # noqa: E501
def sim_version_get_documents_with_http_info(self, sim_version, **kwargs): # noqa: E501
"""sim_version_get_documents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_documents_with_http_info(sim_version, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sim_version: (required)
:param str tenant_id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetSimVersionDocumentsQueryResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'sim_version',
'tenant_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_get_documents" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'sim_version' is set
if self.api_client.client_side_validation and ('sim_version' not in local_var_params or # noqa: E501
local_var_params['sim_version'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sim_version` when calling `sim_version_get_documents`") # noqa: E501
collection_formats = {}
path_params = {}
if 'sim_version' in local_var_params:
path_params['simVersion'] = local_var_params['sim_version'] # noqa: E501
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/{simVersion}/documents', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSimVersionDocumentsQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_get_downloads(self, sim_version, **kwargs): # noqa: E501
"""sim_version_get_downloads # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_downloads(sim_version, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sim_version: (required)
:param str tenant_id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetSimVersionDownloadsQueryResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_get_downloads_with_http_info(sim_version, **kwargs) # noqa: E501
def sim_version_get_downloads_with_http_info(self, sim_version, **kwargs): # noqa: E501
"""sim_version_get_downloads # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_downloads_with_http_info(sim_version, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sim_version: (required)
:param str tenant_id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetSimVersionDownloadsQueryResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'sim_version',
'tenant_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_get_downloads" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'sim_version' is set
if self.api_client.client_side_validation and ('sim_version' not in local_var_params or # noqa: E501
local_var_params['sim_version'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sim_version` when calling `sim_version_get_downloads`") # noqa: E501
collection_formats = {}
path_params = {}
if 'sim_version' in local_var_params:
path_params['simVersion'] = local_var_params['sim_version'] # noqa: E501
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/{simVersion}/downloads', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSimVersionDownloadsQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_get_sim_version(self, **kwargs): # noqa: E501
"""sim_version_get_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_sim_version(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_get_sim_version_with_http_info(**kwargs) # noqa: E501
def sim_version_get_sim_version_with_http_info(self, **kwargs): # noqa: E501
"""sim_version_get_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_sim_version_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'tenant_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_get_sim_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/current', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_get_wiki_document(self, wiki_version, document_path, **kwargs): # noqa: E501
"""sim_version_get_wiki_document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_wiki_document(wiki_version, document_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str wiki_version: (required)
:param str document_path: (required)
:param str tenant_id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetWikiDocumentQueryResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_get_wiki_document_with_http_info(wiki_version, document_path, **kwargs) # noqa: E501
def sim_version_get_wiki_document_with_http_info(self, wiki_version, document_path, **kwargs): # noqa: E501
"""sim_version_get_wiki_document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_wiki_document_with_http_info(wiki_version, document_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str wiki_version: (required)
:param str document_path: (required)
:param str tenant_id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetWikiDocumentQueryResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'wiki_version',
'document_path',
'tenant_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_get_wiki_document" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'wiki_version' is set
if self.api_client.client_side_validation and ('wiki_version' not in local_var_params or # noqa: E501
local_var_params['wiki_version'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `wiki_version` when calling `sim_version_get_wiki_document`") # noqa: E501
# verify the required parameter 'document_path' is set
if self.api_client.client_side_validation and ('document_path' not in local_var_params or # noqa: E501
local_var_params['document_path'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `document_path` when calling `sim_version_get_wiki_document`") # noqa: E501
collection_formats = {}
path_params = {}
if 'wiki_version' in local_var_params:
path_params['wikiVersion'] = local_var_params['wiki_version'] # noqa: E501
if 'document_path' in local_var_params:
path_params['documentPath'] = local_var_params['document_path'] # noqa: E501
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/{wikiVersion}/wiki/{documentPath}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetWikiDocumentQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_post_sim_version(self, sim_version_data, **kwargs): # noqa: E501
"""sim_version_post_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_post_sim_version(sim_version_data, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param NewSimVersionData sim_version_data: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_post_sim_version_with_http_info(sim_version_data, **kwargs) # noqa: E501
def sim_version_post_sim_version_with_http_info(self, sim_version_data, **kwargs): # noqa: E501
"""sim_version_post_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_post_sim_version_with_http_info(sim_version_data, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param NewSimVersionData sim_version_data: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'sim_version_data'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_post_sim_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'sim_version_data' is set
if self.api_client.client_side_validation and ('sim_version_data' not in local_var_params or # noqa: E501
local_var_params['sim_version_data'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sim_version_data` when calling `sim_version_post_sim_version`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'sim_version_data' in local_var_params:
body_params = local_var_params['sim_version_data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/current', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
1680,
11081,
13,
32,
14415,
628,
220,
220,
220,
1400,
6764,
2810,
357,
27568,
416,
4946,
15042,
35986,
3740,
1378,
12567,
13,
785,
14,
9654,
499,
270,
10141,
14,
... | 2.142215 | 15,582 |
# Generated by Django 2.1.1 on 2019-02-05 10:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
13130,
12,
2999,
12,
2713,
838,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import numpy as np
class FuzzyKmeans():
"""
Fuzzy Kmeans calculates cluster probabilities in regards to euclidian distance
Equivalent to vanilla Kmeans if we assign a given point to the cluster with the highest fuzzy probability
Parameters:
X: numpy array() data matrix
U: cluster probabilities
centers: center values
m: numeric, the degree of uncertainty, (fuzziness of cluster)
"""
def calculate_centers(self,U,X):
"""
Recalculates centers using a linear combination of fuzzy probs and X values
Normalized by the sum of all the fuzzy probs.
"""
m = self.m
ones = np.ones(X.shape[0]).reshape((X.shape[0],1))
denom = (1/(U**m).dot(ones)).reshape(U.shape[0],)
diagonal = np.diag(denom)
centers = diagonal.dot((U**m).dot(X))
return centers
def fit(self,k,exit=0.01,seed=1,maxiterations=100,argmax=False):
"""
Main clustering function
k is the number of clusters
exit is the exit criteria
set argmax = True for normal K-means
"""
X = self.X
np.random.seed(seed)
U = np.random.uniform(0,1,size=(k,X.shape[0])) #initialize cluster probabilities
centers = self.calculate_centers(U,X)
newcenters = 2*centers
count = 0
while np.linalg.norm((centers - newcenters),2) >= exit and count <= maxiterations:
newcenters = centers
U = self.calculate_fuzzy(X,centers)
centers = self.calculate_centers(U,X)
count += 1
self.U = U
self.centers = centers | [
11748,
299,
32152,
355,
45941,
198,
198,
4871,
376,
4715,
88,
42,
1326,
504,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
376,
4715,
88,
509,
1326,
504,
43707,
13946,
39522,
287,
13957,
284,
304,
36616,
19825,
5253,
198,
220,... | 2.248313 | 741 |
Python 2.7.9 (default, Sep 17 2016, 20:26:04)
[GCC 4.9.2] on linux2
Type "copyright", "credits" or "license()" for more information.
>>> ================================ RESTART ================================
>>>
Initializing...
1
Primary SAM3X8E Serial Connected
00
f1
31
32
2e
33
34
35
36
37
20
20
ff
55
34
35
36
37
20
20
ff
55
00
f1
31
32
2e
33
34
35
36
37
20
20
ff
55
00
f1
31
32
2e
33
34
35
36
37
20
20
ff
55
00
f1
31
32
2e
33
34
35
36
37
20
20
ff
55
00
f1
31
32
2e
33
34
35
36
37
20
20
ff
55
00
f1
31
32
2e
33
34
35
36
37
20
20
ff
55
00
f1
31
32
2e
33
34
35
Threading mode not enabled
Beginning Primary Loop
>>>
| [
37906,
362,
13,
22,
13,
24,
357,
12286,
11,
8621,
1596,
1584,
11,
1160,
25,
2075,
25,
3023,
8,
220,
198,
58,
38,
4093,
604,
13,
24,
13,
17,
60,
319,
32639,
17,
198,
6030,
366,
22163,
4766,
1600,
366,
66,
20696,
1,
393,
366,
43... | 2.016181 | 309 |
import numpy
import seaborn as sns
from matplotlib.ticker import PercentFormatter
from pandas.api.types import is_numeric_dtype, is_object_dtype, is_categorical_dtype
from grplot.features.plot.packedbubbles import plot as pb
from grplot.features.plot.treemaps import plot as tms
| [
11748,
299,
32152,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1330,
22512,
8479,
1436,
198,
6738,
19798,
292,
13,
15042,
13,
19199,
1330,
318,
62,
77,
39223,
62,
67,
4906,
11,
318,
62,
15... | 3.1 | 90 |
"""Testing facility for conkit.core.distance.Distance"""
import unittest
import numpy as np
from conkit.core.distance import Distance
from conkit.core.distogram import Distogram
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
37811,
44154,
6841,
329,
369,
15813,
13,
7295,
13,
30246,
13,
45767,
37811,
198,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
369,
15813,
13,
7295,
13,
30246,
1330,
34600,
198,
6738,
369,
15813,
13,
7295,
... | 3.157895 | 76 |
# Copyright (C) 2018-2021 OpenMMLab
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from .dist_utils import (DistOptimizerHook, all_reduce_dict,
allreduce_grads, reduce_mean)
from .lr_updater import CosineAnnealingUntilEpochLrUpdaterHook
from .misc import mask2ndarray, multi_apply, unmap
__all__ = [
'allreduce_grads', 'CosineAnnealingUntilEpochLrUpdaterHook',
'DistOptimizerHook', 'mask2ndarray', 'multi_apply', 'unmap',
'reduce_mean', 'all_reduce_dict'
]
| [
2,
15069,
357,
34,
8,
2864,
12,
1238,
2481,
4946,
44,
5805,
397,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
15069,
357,
34,
8,
12131,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
5... | 2.393305 | 239 |
"""
Testing that the request/response content limit policy limits the content-length of the
response body
"""
import pytest
from packaging.version import Version # noqa # pylint: disable=unused-import
from testsuite import rawobj, TESTED_VERSION # noqa # pylint: disable=unused-import
pytestmark = [pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-5244"),
pytest.mark.skipif("TESTED_VERSION < Version('2.11')"),
pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-6736")]
@pytest.fixture(scope="module")
def policy_settings():
"""
Enable the content limits policy and sets the request limit
"""
return rawobj.PolicyConfig("payload_limits", {"response": 100})
@pytest.fixture(scope="module")
def service_proxy_settings(private_base_url):
"""Use httpbin as backend url"""
return rawobj.Proxy(private_base_url("httpbin"))
@pytest.mark.parametrize("num_bytes,status_code", [(10, 200),
(100, 200),
(101, 413)])
def test_payload_limits_response(api_client, num_bytes, status_code):
"""
Tests that the backend response with a content_length greater than the limit
will produce 413 status code.
Also asserts that the 'Content-Length' header corresponds to the actual content length.
- send a request to the httpbin "/bytes/{num_bytes}" endpoint, that will produce a response
containing a body of length num_bytes
- if num_bytes < RESPONSE_LIMIT assert 200
- if num bytes > RESPONSE_LIMIT assert 413
"""
response = api_client().get(f"/bytes/{num_bytes}")
assert response.status_code == status_code
assert response.headers['Content-Length'] == str(len(response.content))
| [
37811,
198,
44154,
326,
262,
2581,
14,
26209,
2695,
4179,
2450,
7095,
262,
2695,
12,
13664,
286,
262,
198,
26209,
1767,
198,
37811,
198,
11748,
12972,
9288,
198,
6738,
16846,
13,
9641,
1330,
10628,
220,
1303,
645,
20402,
1303,
279,
2645... | 2.68006 | 672 |
""" Tests output file handling.
"""
from importlib import import_module
from typing import Any
from os.path import join
from unittest.mock import patch
import pytest
builder = import_module("build-export") # type: Any
CFG = """
[Source]
type = "directory"
srctype = 'xml'
cspdir = 'csp'
datadir = 'data'
srcdir = 'src'
[CSP]
export = 'none'
[Data]
export = 'none'
[Directory]
path = '{path}'
[Local]
"""
@pytest.mark.usefixtures("reload_modules")
def test_unknown_replacement(src_tree, tmp_path):
""" Tests log warning for unknown replacement
"""
# Create configuration
toml = CFG.format(path=src_tree, cspdir='csp')
toml += "outfile = 'out{replaceme}.xml'"
# Write to file
cfgfile = str(tmp_path / 'cfg.toml')
with open(cfgfile, 'wt') as f:
f.write(toml)
# Create export
args = ['builder', cfgfile, '--no-gui']
with patch('sys.argv', args):
builder.main()
# Make sure warning present in log file
log = tmp_path / 'cfg.log'
text = log.read_text()
assert "ignoring unrecognized replacement in outfile" in text, f"Unexpected log message {text}"
# Make sure output file has replacement string as-is
out = tmp_path / 'out{replaceme}.xml'
assert out.exists(), "Output filename doesn't have unaltered replacement string"
@pytest.mark.usefixtures("reload_modules")
def test_create_output_dir(src_tree, tmp_path):
""" Tests that output directory is created automatically
"""
# Create configuration
toml = CFG.format(path=src_tree, cspdir='csp')
subpath = join('subpath', 'out.xml')
toml = f"{toml}\noutfile='{subpath}'\n"
# Write to file
cfgfile = str(tmp_path / 'cfg.toml')
with open(cfgfile, 'wt') as f:
f.write(toml)
# Create export
args = ['builder', cfgfile, '--no-gui']
with patch('sys.argv', args):
builder.main()
# Make sure output file has replacement string as-is
out = tmp_path / 'subpath' / 'out.xml'
assert out.exists(), "Output file not in subpath"
| [
37811,
30307,
5072,
2393,
9041,
13,
198,
37811,
198,
198,
6738,
1330,
8019,
1330,
1330,
62,
21412,
198,
6738,
19720,
1330,
4377,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
1174... | 2.599493 | 789 |
from datetime import datetime
import pytest
from ..parse_mission_log_line import parse, UnexpectedATypeWarning
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
11485,
29572,
62,
3411,
62,
6404,
62,
1370,
1330,
21136,
11,
471,
42072,
1404,
2981,
20361,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
... | 2.980769 | 52 |
"""
Scrabble Hand
Given an array of scrabble tiles,
create a function that outputs the maximum possible score a player can achieve
by summing up the total number of points for all the tiles in their hand.
Each hand contains 7 scrabble tiles.
"""
from unittest import TestCase
tile, score = 'tile', 'score'
arg, expected = 'arg', 'expected'
| [
37811,
201,
198,
3351,
25619,
903,
7157,
201,
198,
15056,
281,
7177,
286,
6040,
397,
903,
19867,
11,
201,
198,
220,
220,
220,
2251,
257,
2163,
326,
23862,
262,
5415,
1744,
4776,
257,
2137,
460,
4620,
201,
198,
220,
220,
220,
416,
21... | 3.172414 | 116 |
features_data = np.array(sentences)
# แบ่งข้อมูลเป็น 10 ชุด โดยไม่เรียง
k_fold = KFold(n_splits=10, random_state=1992, shuffle=True)
word_features = None
accuracy_scores = []
for train_set, test_set in k_fold.split(features_data):
word_features = get_word_features(get_words_in_sentences(features_data[train_set].tolist()))
train_features = apply_features(extract_features, features_data[train_set].tolist())
test_features = apply_features(extract_features, features_data[test_set].tolist())
classifier = NaiveBayesClassifier.train(train_features)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(test_features):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
accuracy_scores.append(util.accuracy(classifier, test_features))
print('train: {} test: {}'.format(len(train_set), len(test_set)))
print('=================== Results ===================')
print('Accuracy {:f}'.format(accuracy_set[-1]))
print(' Positive Negative')
print('F1 [{:f} {:f}]'.format(
f_measure(refsets['pos'], testsets['pos']),
f_measure(refsets['neg'], testsets['neg'])
))
print('Precision [{:f} {:f}]'.format(
precision(refsets['pos'], testsets['pos']),
precision(refsets['neg'], testsets['neg'])
))
print('Recall [{:f} {:f}]'.format(
recall(refsets['pos'], testsets['pos']),
recall(refsets['neg'], testsets['neg'])
))
print('===============================================\n') | [
40890,
62,
7890,
796,
45941,
13,
18747,
7,
34086,
3007,
8,
198,
2,
220,
31479,
223,
19567,
248,
31479,
230,
19567,
229,
19567,
224,
31479,
231,
19567,
255,
19567,
94,
19567,
117,
19567,
98,
31479,
222,
19567,
249,
31479,
229,
19567,
2... | 2.315938 | 709 |
# coding: utf-8
from dateutil.parser import parse
from pycti import OpenCTIApiClient
# Variables
api_url = "https://demo.opencti.io"
api_token = "YOUR_TOKEN"
# OpenCTI initialization
opencti_api_client = OpenCTIApiClient(api_url, api_token)
# Define the date
date = parse("2019-12-01").strftime("%Y-%m-%dT%H:%M:%SZ")
# Create the author (if not exists)
organization = opencti_api_client.identity.create(
type="Organization",
name="My organization",
alias=["my-organization"],
description="A new organization.",
)
# Create the report
report = opencti_api_client.report.create(
name="My new report of my organization",
description="A report wrote by my organization",
published=date,
report_types=["internal-report"],
createdBy=organization["id"],
)
# Print
print(report)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136,
198,
198,
6738,
12972,
310,
72,
1330,
4946,
4177,
3539,
14415,
11792,
198,
198,
2,
15965,
2977,
198,
15042,
62,
6371,
796,
366,
5450,
1378,
9536,
7... | 2.768707 | 294 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 基础模块
import os
import sys
print ('filter_useless_data')
os.system('python filter_useless_data.py')
print ('gen_imprs')
os.system('python gen_imprs.py')
print ('gen_samples')
os.system('python gen_samples.py')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
10263,
253,
118,
163,
94,
222,
162,
101,
94,
161,
251,
245,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
... | 2.258621 | 116 |
from wolverine.module.service import MicroService, ServiceMessage, ServiceDef
from wolverine.test import TestMicroApp
#class TestService(object):
# def test_micro_service(event_loop):
# app = TestMicroApp(loop=event_loop)
# options = {'op_1': 'test', 'op_2': True}
# service = MicroService(app, name='test', version=2, **options)
# assert service.name == 'test'
# assert service.version == 2
# assert service.options['op_1'] == 'test'
# assert service.options['op_2']
# def test_service_message(self):
# message = ServiceMessage()
# message.data = [{'name': 'test', 'version': 1}]
# assert message.has_error() != True
# message.err({'exception': 'failed', 'severity': 'high'})
# assert message.has_error()
# assert message.response() == {
# 'data': [{'name': 'test', 'version': 1}],
# 'errors': [{'exception': 'failed', 'severity': 'high'}]
# }
# def test_service_def(self):
# service = ServiceDef(name='test', version='2')
# service.routes.append('test/method')
# assert service.fqn() == 'wolverine:service/test/2'
# assert str(service) == str({
# 'name': 'test',
# 'routes': ['test/method'],
# 'version': '2'
# }) | [
6738,
266,
14375,
500,
13,
21412,
13,
15271,
1330,
4527,
16177,
11,
4809,
12837,
11,
4809,
7469,
198,
6738,
266,
14375,
500,
13,
9288,
1330,
6208,
13031,
4677,
628,
198,
2,
4871,
6208,
16177,
7,
15252,
2599,
198,
198,
2,
220,
220,
2... | 2.255137 | 584 |