code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys, io
# Windows console uses the cp437 encoding, which only supports 256 characters,
# wich means that some unicode chars can't be rendered, so one quick fix is
# to escape those chars and print their actual code instead of rendering them.
# backslashreplace = replace with backslashed escape sequences (escape unsuported chars)
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,'cp437','backslashreplace')
if (len(sys.argv) != 2):
print("Usage: python youtube_first_page_titles.py <search query>")
sys.exit(-1)
query = sys.argv[1].replace(" ", "+")
html = urlopen("https://www.youtube.com/results?search_query=" + query)
bsObj = BeautifulSoup(html, "html.parser")
titles = bsObj.findAll("a", {"class":"yt-uix-tile-link"})
for title in titles:
print(title['title'])
|
[
"bs4.BeautifulSoup",
"io.TextIOWrapper",
"urllib.request.urlopen",
"sys.exit"
] |
[((418, 482), 'io.TextIOWrapper', 'io.TextIOWrapper', (['sys.stdout.buffer', '"""cp437"""', '"""backslashreplace"""'], {}), "(sys.stdout.buffer, 'cp437', 'backslashreplace')\n", (434, 482), False, 'import sys, io\n'), ((637, 701), 'urllib.request.urlopen', 'urlopen', (["('https://www.youtube.com/results?search_query=' + query)"], {}), "('https://www.youtube.com/results?search_query=' + query)\n", (644, 701), False, 'from urllib.request import urlopen\n'), ((710, 744), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (723, 744), False, 'from bs4 import BeautifulSoup\n'), ((576, 588), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (584, 588), False, 'import sys, io\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-25 10:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oauth', '0002_auto_20170612_1258'),
('main', '0016_auto_20170625_1315'),
]
operations = [
migrations.AddField(
model_name='club',
name='mentor',
field=models.ForeignKey(default=3, on_delete=django.db.models.deletion.CASCADE, related_name='cmentor', to='oauth.UserProfile'),
preserve_default=False,
),
migrations.AlterField(
model_name='club',
name='gallery',
field=models.ForeignKey(blank=True, help_text='Select a gallery to link to this club.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='photologue.Gallery'),
),
migrations.AlterField(
model_name='society',
name='mentor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='smentor', to='oauth.UserProfile'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((473, 598), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(3)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""cmentor"""', 'to': '"""oauth.UserProfile"""'}), "(default=3, on_delete=django.db.models.deletion.CASCADE,\n related_name='cmentor', to='oauth.UserProfile')\n", (490, 598), False, 'from django.db import migrations, models\n'), ((751, 924), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Select a gallery to link to this club."""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""photologue.Gallery"""'}), "(blank=True, help_text=\n 'Select a gallery to link to this club.', null=True, on_delete=django.\n db.models.deletion.SET_NULL, to='photologue.Gallery')\n", (768, 924), False, 'from django.db import migrations, models\n'), ((1037, 1152), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""smentor"""', 'to': '"""oauth.UserProfile"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='smentor', to='oauth.UserProfile')\n", (1054, 1152), False, 'from django.db import migrations, models\n')]
|
# coding: utf-8
import json
import time
from datetime import datetime
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from sylogger import logger
def main():
run(stop_date = "20131101", provs = ['江苏'])
def run(stop_date = "20181101", provs = []):
stop_date = datetime.strptime(stop_date, r"%Y%m%d")
histaqi_data = {}
with open("url.json", "r", encoding='utf-8') as f:
base_urls = json.load(f)
if provs:
keys = list(base_urls.keys())
for key in keys:
if key not in provs: base_urls.pop(key)
for prov_name, city_urls in base_urls.items():
histaqi_data[prov_name] = {}
for city_name, city_url in city_urls.items():
try:
print(city_name, city_url)
response = connect(city_url)
html = response.text
soup = BeautifulSoup(html, "lxml")
boxs = soup.select("div.box.p")
box = boxs[0] # Box只有一个元素
lis = box.select("li > a")
city_data = {}
for li in lis:
city_mon_url = urljoin(city_url, li["href"])
mon_name = li["title"]
print(mon_name)
cur_date = datetime.strptime(mon_name[0:4] + mon_name[5:7], r"%Y%m")
if (cur_date - stop_date).days < 0:
break
else:
vals = read(city_mon_url)
city_data[mon_name] = vals
print(mon_name + " is done")
histaqi_data[prov_name][city_name] = city_data
# print(histaqi_data)
except Exception as identifier:
logger('logging.log', msg_type='error', msg=identifier)
with open(datetime.now().strftime(r"%Y%m%d") + ".json", "w") as f:
json.dump(histaqi_data, f, ensure_ascii = False, indent = 4)
continue
with open("aqi.json", "w") as f:
json.dump(histaqi_data, f, ensure_ascii = False, indent = 4)
# https://www.cnblogs.com/kongzhagen/p/6472746.html
# https://www.biaodianfu.com/python-requests-retry.html
# https://blog.csdn.net/xie_0723/article/details/52790786
def read(city_mon_url):
vals = []
response = connect(city_mon_url)
html = response.text
soup = BeautifulSoup(html, "lxml")
tds = soup.select("div.api_month_list td")
for td in tds:
if not td.attrs and not td.findChildren():
item = td.get_text().strip()
# print(item)
vals.append(item)
# exit(0)
return vals
def connect(url, timeout = 500, max_retries = 30, encoding = "gbk"):
headers = {'user-agent': 'my-app/0.0.1'}
request_retry = requests.adapters.HTTPAdapter(max_retries = max_retries)
s = requests.session()
s.mount('https://',request_retry)
s.mount('http://',request_retry)
try:
response = s.get(url, headers = headers, timeout = timeout)
except Exception as identifier:
print(identifier)
time.sleep(5)
response = s.get(url, headers = headers, timeout = timeout)
response.encoding = encoding
return response
if __name__ == "__main__":
main()
|
[
"requests.session",
"json.dump",
"json.load",
"urllib.parse.urljoin",
"requests.adapters.HTTPAdapter",
"time.sleep",
"datetime.datetime.strptime",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"sylogger.logger"
] |
[((297, 335), 'datetime.datetime.strptime', 'datetime.strptime', (['stop_date', '"""%Y%m%d"""'], {}), "(stop_date, '%Y%m%d')\n", (314, 335), False, 'from datetime import datetime\n'), ((1968, 1995), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (1981, 1995), False, 'from bs4 import BeautifulSoup\n'), ((2325, 2379), 'requests.adapters.HTTPAdapter', 'requests.adapters.HTTPAdapter', ([], {'max_retries': 'max_retries'}), '(max_retries=max_retries)\n', (2354, 2379), False, 'import requests\n'), ((2387, 2405), 'requests.session', 'requests.session', ([], {}), '()\n', (2403, 2405), False, 'import requests\n'), ((422, 434), 'json.load', 'json.load', (['f'], {}), '(f)\n', (431, 434), False, 'import json\n'), ((1640, 1696), 'json.dump', 'json.dump', (['histaqi_data', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(histaqi_data, f, ensure_ascii=False, indent=4)\n', (1649, 1696), False, 'import json\n'), ((2600, 2613), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2610, 2613), False, 'import time\n'), ((780, 807), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (793, 807), False, 'from bs4 import BeautifulSoup\n'), ((963, 992), 'urllib.parse.urljoin', 'urljoin', (['city_url', "li['href']"], {}), "(city_url, li['href'])\n", (970, 992), False, 'from urllib.parse import urljoin\n'), ((1058, 1114), 'datetime.datetime.strptime', 'datetime.strptime', (['(mon_name[0:4] + mon_name[5:7])', '"""%Y%m"""'], {}), "(mon_name[0:4] + mon_name[5:7], '%Y%m')\n", (1075, 1114), False, 'from datetime import datetime\n'), ((1396, 1451), 'sylogger.logger', 'logger', (['"""logging.log"""'], {'msg_type': '"""error"""', 'msg': 'identifier'}), "('logging.log', msg_type='error', msg=identifier)\n", (1402, 1451), False, 'from sylogger import logger\n'), ((1528, 1584), 'json.dump', 'json.dump', (['histaqi_data', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(histaqi_data, f, ensure_ascii=False, indent=4)\n', (1537, 1584), False, 'import json\n'), ((1466, 1480), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1478, 1480), False, 'from datetime import datetime\n')]
|
'''
Utilities for processing semantic types
'''
import pkg_resources
lines = list(map(lambda x: x.split('|'), open(pkg_resources.resource_filename(__name__, 'SemanticTypes_2018AB.txt')).readlines()))
abbreviation_to_id = {x[0]: x[1] for x in lines}
id_to_abbreviation = {x[1]: x[0] for x in lines}
groups = {line[2]:line[0] for line in
list(map(lambda x: x.split('|'), open(pkg_resources.resource_filename(__name__, 'SemGroups_2018.txt')).readlines()))
}
group_names = ["NONE"] + list(sorted(list(set(groups.values()))))
names = ["NONE"] + list(sorted(list(set(abbreviation_to_id.keys()))))
def get_sem_type(concepts):
types = []
for c in concepts:
types += c.semtypes
return types
def get_semantic_group_from_concept(concepts):
types = []
for concept in concepts:
for semtype in concept.semtypes:
if semtype in abbreviation_to_id:
types.append(groups[abbreviation_to_id[semtype]])
return types
|
[
"pkg_resources.resource_filename"
] |
[((117, 186), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""SemanticTypes_2018AB.txt"""'], {}), "(__name__, 'SemanticTypes_2018AB.txt')\n", (148, 186), False, 'import pkg_resources\n'), ((382, 445), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""SemGroups_2018.txt"""'], {}), "(__name__, 'SemGroups_2018.txt')\n", (413, 445), False, 'import pkg_resources\n')]
|
"""
base repository template
"""
__all__ = (
"CategoryIterValLazyDict", "PackageMapping", "VersionMapping", "tree"
)
import os
from snakeoil.klass import jit_attr
from snakeoil.mappings import DictMixin, LazyValDict
from snakeoil.osutils import pjoin
from snakeoil.sequences import iflatten_instance
from ..ebuild.atom import atom
from ..operations import repo
from ..restrictions import boolean, packages, restriction, values
from ..restrictions.util import collect_package_restrictions
class IterValLazyDict(LazyValDict):
__slots__ = ()
def __str__(self):
return str(list(self))
def force_regen(self, key):
if key in self._vals:
del self._vals[key]
else:
self._keys = tuple(x for x in self._keys if x != key)
class CategoryIterValLazyDict(IterValLazyDict):
__slots__ = ()
def force_add(self, key):
if key not in self:
s = set(self._keys)
s.add(key)
self._keys = tuple(s)
def force_remove(self, key):
if key in self:
self._keys = tuple(x for x in self._keys if x != key)
__iter__ = IterValLazyDict.keys
def __contains__(self, key):
if self._keys_func is not None:
return key in list(self.keys())
return key in self._keys
class PackageMapping(DictMixin):
def __init__(self, parent_mapping, pull_vals):
self._cache = {}
self._parent = parent_mapping
self._pull_vals = pull_vals
def __getitem__(self, key):
o = self._cache.get(key)
if o is not None:
return o
if key not in self._parent:
raise KeyError(key)
self._cache[key] = vals = self._pull_vals(key)
return vals
def keys(self):
return self._parent.keys()
def __contains__(self, key):
return key in self._cache or key in self._parent
def force_regen(self, cat):
try:
del self._cache[cat]
except KeyError:
pass
class VersionMapping(DictMixin):
def __init__(self, parent_mapping, pull_vals):
self._cache = {}
self._parent = parent_mapping
self._pull_vals = pull_vals
def __getitem__(self, key):
o = self._cache.get(key)
if o is not None:
return o
if not key[1] in self._parent.get(key[0], ()):
raise KeyError(key)
val = self._pull_vals(key)
self._cache[key] = val
return val
def keys(self):
for cat, pkgs in self._parent.items():
for pkg in pkgs:
yield (cat, pkg)
def force_regen(self, key, val):
if val:
self._cache[key] = val
else:
self._cache.pop(key, None)
class tree:
"""Template for all repository variants.
Args:
frozen (bool): controls whether the repository is mutable or immutable
Attributes:
raw_repo: if wrapping a repo, set raw_repo per instance to it
livefs (bool): set it to True if it's a repository representing a livefs
package_class: callable to generate a package instance, must override
configured (bool): if a repo is unusable for merging/unmerging
without being configured, set it to False
frozen_settable (bool): controls whether frozen is able to be set
on initialization
operations_kls: callable to generate a repo operations instance
categories (dict): available categories in the repo
packages (dict): mapping of packages to categories in the repo
versions (dict): mapping of versions to packages in the repo
frozen (bool): repository mutability status
lock: TODO
"""
raw_repo = None
is_supported = True
livefs = False
package_class = None
configured = True
frozen_settable = True
operations_kls = repo.operations
pkg_masks = frozenset()
def __init__(self, frozen=False):
self.categories = CategoryIterValLazyDict(
self._get_categories, self._get_categories)
self.packages = PackageMapping(self.categories, self._get_packages)
self.versions = VersionMapping(self.packages, self._get_versions)
if self.frozen_settable:
self.frozen = frozen
self.lock = None
def configure(self, *args):
"""Return a configured form of the repository."""
raise NotImplementedError(self, "configure")
def _get_categories(self, *args):
"""this must return a list, or sequence"""
raise NotImplementedError(self, "_get_categories")
def _get_packages(self, category):
"""this must return a list, or sequence"""
raise NotImplementedError(self, "_get_packages")
def _get_versions(self, package):
"""this must return a list, or sequence"""
raise NotImplementedError(self, "_get_versions")
def __getitem__(self, cpv):
cpv_inst = self.package_class(*cpv)
if cpv_inst.fullver not in self.versions[(cpv_inst.category, cpv_inst.package)]:
raise KeyError(cpv)
return cpv_inst
def __setitem__(self, *vals):
raise AttributeError
def __delitem__(self, cpv):
raise AttributeError
def __iter__(self):
"""Filtered iterator over all the repo's packages.
All packages with metadata issues are skipped.""
"""
return self.itermatch(packages.AlwaysTrue)
def __len__(self):
return sum(len(v) for v in self.versions.values())
def __contains__(self, obj):
"""Determine if a path or a package is in a repo."""
if isinstance(obj, str):
path = os.path.normpath(obj)
try:
repo_path = os.path.realpath(getattr(self, 'location'))
except AttributeError:
return False
# existing relative path
if not path.startswith(os.sep) and os.path.exists(pjoin(repo_path, path)):
return True
# existing full path
fullpath = os.path.realpath(os.path.abspath(path))
if fullpath.startswith(repo_path) and os.path.exists(fullpath):
return True
return False
else:
for pkg in self.itermatch(obj):
return True
return False
def has_match(self, atom, **kwds):
kwds.pop("sorter", None)
kwds.pop("yield_none", None)
for pkg in self.itermatch(atom, **kwds):
return True
return False
def match(self, atom, **kwds):
return list(self.itermatch(atom, **kwds))
def itermatch(self, restrict, sorter=None, pkg_filter=None, versioned=True,
raw_pkg_cls=None, pkg_cls=None, force=None, yield_none=False):
"""Generator that yields packages match a restriction.
:type restrict: :obj:`pkgcore.restrictions.packages.PackageRestriction`
instance.
:param restrict: restriction to search via
:param sorter: callable to do sorting during searching-
if sorting the results, use this instead of sorting externally.
:param pkg_filter: callable to do package filtering
:param versioned: boolean controlling returning versioned or unversioned pkgs
:param raw_pkg_cls: custom package class to use for generating raw pkg instances
:param pkg_cls: custom package class to override raw pkg instances with
:param yield_none: if True then itermatch will yield None for every
non-matching package. This is meant for use in combination with
C{twisted.task.cooperate} or other async uses where itermatch
should not wait many (wallclock) seconds between yielding
packages. If you override this method you should yield
None in long-running loops, strictly calling it for every package
is not necessary.
"""
if not isinstance(restrict, restriction.base):
raise TypeError(
f"restrict must be a pkgcore.restriction.restrictions.base instance: "
f"got {restrict!r}")
if sorter is None:
sorter = iter
if pkg_filter is None:
pkg_filter = iter
if raw_pkg_cls is None:
if versioned:
raw_pkg_cls = self.package_class
else:
raw_pkg_cls = lambda *args: args
if isinstance(restrict, atom):
candidates = [(restrict.category, restrict.package)]
else:
candidates = self._identify_candidates(restrict, sorter)
if force is None:
match = restrict.match
elif force:
match = restrict.force_True
else:
match = restrict.force_False
return self._internal_match(
candidates, match, raw_pkg_cls=raw_pkg_cls, pkg_cls=pkg_cls,
yield_none=yield_none, sorter=sorter, pkg_filter=pkg_filter,
versioned=versioned)
def _internal_gen_candidates(self, candidates, sorter, raw_pkg_cls, pkg_filter, versioned):
for cp in sorter(candidates):
if versioned:
pkgs = (raw_pkg_cls(cp[0], cp[1], ver) for ver in self.versions.get(cp, ()))
else:
if self.versions.get(cp, ()):
pkgs = (raw_pkg_cls(cp[0], cp[1]),)
else:
pkgs = ()
pkgs = iter(pkgs)
yield from sorter(pkg_filter(pkgs))
def _internal_match(self, candidates, match_func, pkg_cls, yield_none=False, **kwargs):
for pkg in self._internal_gen_candidates(candidates, **kwargs):
if pkg_cls is not None:
pkg = pkg_cls(pkg)
if match_func(pkg):
yield pkg
elif yield_none:
yield None
def _identify_candidates(self, restrict, sorter):
# full expansion
if not isinstance(restrict, boolean.base) or isinstance(restrict, atom):
return self._fast_identify_candidates(restrict, sorter)
dsolutions = [
([c.restriction
for c in collect_package_restrictions(x, ("category",))],
[p.restriction
for p in collect_package_restrictions(x, ("package",))])
for x in restrict.iter_dnf_solutions(True)]
# see if any solution state isn't dependent on cat/pkg in anyway.
# if so, search whole search space.
for x in dsolutions:
if not x[0] and not x[1]:
if sorter is iter:
return self.versions
return (
(c, p)
for c in sorter(self.categories)
for p in sorter(self.packages.get(c, ())))
# simple cases first.
# if one specifies categories, and one doesn't
cat_specified = bool(dsolutions[0][0])
pkg_specified = bool(dsolutions[0][1])
pgetter = self.packages.get
if any(True for x in dsolutions[1:] if bool(x[0]) != cat_specified):
if any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified):
# merde. so we've got a mix- some specify cats, some
# don't, some specify pkgs, some don't.
# this may be optimizable
return self.versions
# ok. so... one doesn't specify a category, but they all
# specify packages (or don't)
pr = values.OrRestriction(
*tuple(iflatten_instance(
(x[1] for x in dsolutions if x[1]), values.base)))
return (
(c, p)
for c in sorter(self.categories)
for p in sorter(pgetter(c, [])) if pr.match(p))
elif any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified):
# one (or more) don't specify pkgs, but they all specify cats.
cr = values.OrRestriction(
*tuple(iflatten_instance(
(x[0] for x in dsolutions), values.base)))
cats_iter = (c for c in sorter(self.categories) if cr.match(c))
return (
(c, p)
for c in cats_iter for p in sorter(pgetter(c, [])))
return self._fast_identify_candidates(restrict, sorter)
def _fast_identify_candidates(self, restrict, sorter):
pkg_restrict = set()
cat_restrict = set()
cat_exact = set()
pkg_exact = set()
for x in collect_package_restrictions(restrict,
("category", "package",)):
if x.attr == "category":
cat_restrict.add(x.restriction)
elif x.attr == "package":
pkg_restrict.add(x.restriction)
for e, s in ((pkg_exact, pkg_restrict), (cat_exact, cat_restrict)):
l = [x for x in s
if isinstance(x, values.StrExactMatch) and not x.negate]
s.difference_update(l)
e.update(x.exact for x in l)
del l
if restrict.negate:
cat_exact = pkg_exact = ()
if cat_exact:
if not cat_restrict and len(cat_exact) == 1:
# Cannot use pop here, cat_exact is reused below.
c = next(iter(cat_exact))
if not pkg_restrict and len(pkg_exact) == 1:
cp = (c, pkg_exact.pop())
if cp in self.versions:
return [cp]
return []
cats_iter = [c]
else:
cat_restrict.add(values.ContainmentMatch(frozenset(cat_exact)))
cats_iter = sorter(self._cat_filter(cat_restrict))
elif cat_restrict:
cats_iter = self._cat_filter(
cat_restrict, negate=restrict.negate)
else:
cats_iter = sorter(self.categories)
if pkg_exact:
if not pkg_restrict:
if sorter is iter:
pkg_exact = tuple(pkg_exact)
else:
pkg_exact = sorter(pkg_exact)
return (
(c, p)
for c in cats_iter for p in pkg_exact)
else:
pkg_restrict.add(values.ContainmentMatch(frozenset(pkg_exact)))
if pkg_restrict:
return self._package_filter(
cats_iter, pkg_restrict, negate=restrict.negate)
elif not cat_restrict:
if sorter is iter and not cat_exact:
return self.versions
else:
return (
(c, p) for c in
cats_iter for p in sorter(self.packages.get(c, ())))
return (
(c, p)
for c in cats_iter for p in sorter(self.packages.get(c, ())))
def _cat_filter(self, cat_restricts, negate=False):
sentinel = not negate
cats = [x.match for x in cat_restricts]
for x in self.categories:
for match in cats:
if match(x) == sentinel:
yield x
break
def _package_filter(self, cats_iter, pkg_restricts, negate=False):
sentinel = not negate
restricts = [x.match for x in pkg_restricts]
pkgs_dict = self.packages
for cat in cats_iter:
for pkg in pkgs_dict.get(cat, ()):
for match in restricts:
if match(pkg) == sentinel:
yield (cat, pkg)
break
def notify_remove_package(self, pkg):
"""internal function
notify the repository that a pkg it provides is being removed
"""
ver_key = (pkg.category, pkg.package)
l = [x for x in self.versions[ver_key] if x != pkg.fullver]
if not l:
# dead package
wipe = list(self.packages[pkg.category]) == [pkg.package]
self.packages.force_regen(pkg.category)
if wipe:
self.categories.force_regen(pkg.category)
self.versions.force_regen(ver_key, tuple(l))
def notify_add_package(self, pkg):
"""internal function
notify the repository that a pkg is being added to it
"""
ver_key = (pkg.category, pkg.package)
s = set(self.versions.get(ver_key, ()))
s.add(pkg.fullver)
if pkg.category not in self.categories:
self.categories.force_add(pkg.category)
self.packages.force_regen(pkg.category)
self.versions.force_regen(ver_key, tuple(s))
@property
def operations(self):
return self.get_operations()
def get_operations(self, observer=None):
return self.operations_kls(self)
def __bool__(self):
try:
next(iter(self.versions))
return True
except StopIteration:
return False
def __str__(self):
if self.aliases:
return str(self.aliases[0])
return repr(self)
@property
def aliases(self):
potentials = (getattr(self, key, None) for key in ('repo_id', 'location'))
return tuple(x for x in potentials if x is not None)
@jit_attr
def masked(self):
"""Base package mask restriction."""
return packages.OrRestriction(*self.pkg_masks)
|
[
"snakeoil.sequences.iflatten_instance",
"os.path.abspath",
"snakeoil.osutils.pjoin",
"os.path.exists",
"os.path.normpath"
] |
[((5705, 5726), 'os.path.normpath', 'os.path.normpath', (['obj'], {}), '(obj)\n', (5721, 5726), False, 'import os\n'), ((6107, 6128), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (6122, 6128), False, 'import os\n'), ((6180, 6204), 'os.path.exists', 'os.path.exists', (['fullpath'], {}), '(fullpath)\n', (6194, 6204), False, 'import os\n'), ((5980, 6002), 'snakeoil.osutils.pjoin', 'pjoin', (['repo_path', 'path'], {}), '(repo_path, path)\n', (5985, 6002), False, 'from snakeoil.osutils import pjoin\n'), ((11607, 11673), 'snakeoil.sequences.iflatten_instance', 'iflatten_instance', (['(x[1] for x in dsolutions if x[1])', 'values.base'], {}), '((x[1] for x in dsolutions if x[1]), values.base)\n', (11624, 11673), False, 'from snakeoil.sequences import iflatten_instance\n'), ((12071, 12129), 'snakeoil.sequences.iflatten_instance', 'iflatten_instance', (['(x[0] for x in dsolutions)', 'values.base'], {}), '((x[0] for x in dsolutions), values.base)\n', (12088, 12129), False, 'from snakeoil.sequences import iflatten_instance\n')]
|
#!/bin/env python
# -*- coding: utf-8 -*-
##
# test_live.py: Tests Azure Quantum functionality Live.
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
## IMPORTS ##
import pytest
import warnings
## TESTS ##
def connect():
import qsharp.azure
return qsharp.azure.connect(
credential="environment"
)
def has_completed(job) -> bool:
"""Check if the job has completed."""
return (
job.status == "Succeeded"
or job.status == "Failed"
or job.status == "Cancelled"
)
def wait_until_completed(job):
import time
import qsharp.azure
max_poll_wait_secs = 5
timeout_secs = 30
poll_wait = 0.2
total_time = 0.
while not has_completed(job):
if total_time >= timeout_secs:
raise TimeoutError(f"The wait time has exceeded {timeout_secs} seconds.")
time.sleep(poll_wait)
total_time += poll_wait
job = qsharp.azure.status(job.id)
poll_wait = (
max_poll_wait_secs
if poll_wait >= max_poll_wait_secs
else poll_wait * 1.5
)
def test_ionq_targets():
"""
Tests that we can fetch targets from the service,
and that the workspace includes the targets we need for submission
"""
targets = connect()
assert len(targets) > 2
target_ids = [t.id for t in targets]
assert 'ionq.simulator' in target_ids
assert 'ionq.qpu' in target_ids
def test_ionq_submit():
"""
Test that the SampleQrng operation can be submitted successfully on the ionq.simulator
"""
import time
import qsharp
from Microsoft.Quantum.Tests import SampleQrng
# Make sure we can simulate locally:
count = 3
result = SampleQrng.simulate(count=count, name='andres')
assert len(result) == count
import qsharp.azure
connect()
t = qsharp.azure.target("ionq.simulator")
assert isinstance(t, qsharp.azure.AzureTarget)
assert t.id == "ionq.simulator"
job = qsharp.azure.submit(SampleQrng, count=count, name="andres")
assert isinstance(job, qsharp.azure.AzureJob)
assert not job.id == ''
print("Submitted job: ", job.id)
try:
wait_until_completed(job)
except TimeoutError:
warnings.warn("IonQ execution exceeded timeout. Skipping fetching results.")
else:
job = qsharp.azure.status()
assert isinstance(job, qsharp.azure.AzureJob)
assert job.status == "Succeeded"
histogram = {
'[0,0,0]': 0.125,
'[0,0,1]': 0.125,
'[0,1,0]': 0.125,
'[0,1,1]': 0.125,
'[1,0,0]': 0.125,
'[1,0,1]': 0.125,
'[1,1,0]': 0.125,
'[1,1,1]': 0.125
}
retrieved_histogram = qsharp.azure.output()
assert isinstance(retrieved_histogram, dict)
assert histogram == retrieved_histogram
def test_honeywell_targets():
"""
Tests that we can fetch targets from the service,
and that the workspace includes the targets we need for submission
"""
targets = connect()
assert len(targets) > 2
target_ids = [t.id for t in targets]
assert 'honeywell.hqs-lt-s1' in target_ids
assert 'honeywell.hqs-lt-s1-apival' in target_ids
def test_honeywell_submit():
"""
Test that the RunTeleport operation can be submitted successfully on the honeywell apival target
"""
import qsharp
from Microsoft.Quantum.Tests import RunTeleport
# Make sure we can simulate locally:
expected = True
result = RunTeleport.simulate(doPlus=expected)
assert result == 0 if expected else 1
import qsharp.azure
connect()
t = qsharp.azure.target("honeywell.hqs-lt-s1-apival")
assert isinstance(t, qsharp.azure.AzureTarget)
assert t.id == "honeywell.hqs-lt-s1-apival"
job = qsharp.azure.submit(RunTeleport, doPlus=expected)
assert isinstance(job, qsharp.azure.AzureJob)
assert not job.id == ''
print("Submitted job: ", job.id)
try:
wait_until_completed(job)
except TimeoutError:
warnings.warn("Honeywell execution exceeded timeout. Skipping fetching results.")
else:
job = qsharp.azure.status()
assert isinstance(job, qsharp.azure.AzureJob)
if job.status == "Succeeded":
retrieved_histogram = qsharp.azure.output()
assert isinstance(retrieved_histogram, dict)
assert '0' in retrieved_histogram
|
[
"qsharp.azure.status",
"Microsoft.Quantum.Tests.RunTeleport.simulate",
"qsharp.azure.submit",
"time.sleep",
"Microsoft.Quantum.Tests.SampleQrng.simulate",
"qsharp.azure.connect",
"warnings.warn",
"qsharp.azure.output",
"qsharp.azure.target"
] |
[((291, 337), 'qsharp.azure.connect', 'qsharp.azure.connect', ([], {'credential': '"""environment"""'}), "(credential='environment')\n", (311, 337), False, 'import qsharp\n'), ((1747, 1794), 'Microsoft.Quantum.Tests.SampleQrng.simulate', 'SampleQrng.simulate', ([], {'count': 'count', 'name': '"""andres"""'}), "(count=count, name='andres')\n", (1766, 1794), False, 'from Microsoft.Quantum.Tests import SampleQrng\n'), ((1875, 1912), 'qsharp.azure.target', 'qsharp.azure.target', (['"""ionq.simulator"""'], {}), "('ionq.simulator')\n", (1894, 1912), False, 'import qsharp\n'), ((2011, 2070), 'qsharp.azure.submit', 'qsharp.azure.submit', (['SampleQrng'], {'count': 'count', 'name': '"""andres"""'}), "(SampleQrng, count=count, name='andres')\n", (2030, 2070), False, 'import qsharp\n'), ((3565, 3602), 'Microsoft.Quantum.Tests.RunTeleport.simulate', 'RunTeleport.simulate', ([], {'doPlus': 'expected'}), '(doPlus=expected)\n', (3585, 3602), False, 'from Microsoft.Quantum.Tests import RunTeleport\n'), ((3693, 3742), 'qsharp.azure.target', 'qsharp.azure.target', (['"""honeywell.hqs-lt-s1-apival"""'], {}), "('honeywell.hqs-lt-s1-apival')\n", (3712, 3742), False, 'import qsharp\n'), ((3853, 3902), 'qsharp.azure.submit', 'qsharp.azure.submit', (['RunTeleport'], {'doPlus': 'expected'}), '(RunTeleport, doPlus=expected)\n', (3872, 3902), False, 'import qsharp\n'), ((883, 904), 'time.sleep', 'time.sleep', (['poll_wait'], {}), '(poll_wait)\n', (893, 904), False, 'import time\n'), ((951, 978), 'qsharp.azure.status', 'qsharp.azure.status', (['job.id'], {}), '(job.id)\n', (970, 978), False, 'import qsharp\n'), ((2364, 2385), 'qsharp.azure.status', 'qsharp.azure.status', ([], {}), '()\n', (2383, 2385), False, 'import qsharp\n'), ((2783, 2804), 'qsharp.azure.output', 'qsharp.azure.output', ([], {}), '()\n', (2802, 2804), False, 'import qsharp\n'), ((4202, 4223), 'qsharp.azure.status', 'qsharp.azure.status', ([], {}), '()\n', (4221, 4223), False, 'import qsharp\n'), ((2263, 2339), 'warnings.warn', 'warnings.warn', (['"""IonQ execution exceeded timeout. Skipping fetching results."""'], {}), "('IonQ execution exceeded timeout. Skipping fetching results.')\n", (2276, 2339), False, 'import warnings\n'), ((4096, 4182), 'warnings.warn', 'warnings.warn', (['"""Honeywell execution exceeded timeout. Skipping fetching results."""'], {}), "(\n 'Honeywell execution exceeded timeout. Skipping fetching results.')\n", (4109, 4182), False, 'import warnings\n'), ((4350, 4371), 'qsharp.azure.output', 'qsharp.azure.output', ([], {}), '()\n', (4369, 4371), False, 'import qsharp\n')]
|
#!/usr/bin/python3
from typing import List
from src.word_importer import import_word_list
from src.scoring import word_scores
from src.wordle_filter import filter_from_word_info
class WordleAssistant:
"""
This class asks for input when initialized and then packages the suggestion mechanism.
"""
def __init__(self, word_list: List[str] = import_word_list()) -> None:
self.guess = input("Input the 5-letter word that you guessed: \n").strip(" '\"")
self.color = input(
"Input the colors given back by wordle (for example bygyy): \n"
).strip(" '\"")
self.word_list = filter_from_word_info(self.guess, self.color, word_list)
self.scored_words = word_scores(self.word_list)
def suggestions(self, number: int = 10) -> List[str]:
return [w for w, _ in self.scored_words[:number]]
# This is the 'interactive' part of the program. Is this (ie the while loop) a good idea? Probably not, but I'm still learning.
# Initial suggestions:
def main():
wl = import_word_list()
scored = word_scores(wl)
print("\nHere are some suggested guesses: ")
print([w for w, _ in scored[:10]])
w = WordleAssistant()
print(w.suggestions())
# loop until we're done
while True:
cont = input("Would you like to keep going ([y]/n)? ")
if cont == "n":
break
w = WordleAssistant(filter_from_word_info(w.guess, w.color, w.word_list))
print("\nHere are some suggested guesses: ")
print(w.suggestions())
if __name__ == "__main__":
main()
|
[
"src.scoring.word_scores",
"src.word_importer.import_word_list",
"src.wordle_filter.filter_from_word_info"
] |
[((1033, 1051), 'src.word_importer.import_word_list', 'import_word_list', ([], {}), '()\n', (1049, 1051), False, 'from src.word_importer import import_word_list\n'), ((1065, 1080), 'src.scoring.word_scores', 'word_scores', (['wl'], {}), '(wl)\n', (1076, 1080), False, 'from src.scoring import word_scores\n'), ((358, 376), 'src.word_importer.import_word_list', 'import_word_list', ([], {}), '()\n', (374, 376), False, 'from src.word_importer import import_word_list\n'), ((629, 685), 'src.wordle_filter.filter_from_word_info', 'filter_from_word_info', (['self.guess', 'self.color', 'word_list'], {}), '(self.guess, self.color, word_list)\n', (650, 685), False, 'from src.wordle_filter import filter_from_word_info\n'), ((714, 741), 'src.scoring.word_scores', 'word_scores', (['self.word_list'], {}), '(self.word_list)\n', (725, 741), False, 'from src.scoring import word_scores\n'), ((1401, 1453), 'src.wordle_filter.filter_from_word_info', 'filter_from_word_info', (['w.guess', 'w.color', 'w.word_list'], {}), '(w.guess, w.color, w.word_list)\n', (1422, 1453), False, 'from src.wordle_filter import filter_from_word_info\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import protocol
import hpstr
#http://www.hpcalc.org/details.php?id=5910
objtypes = {
0x3329: ("DOREAL","real (%) 153."),
0x7729: ("DOCMP","complex (C%) (3.,4.)"),
0x2C2A: ("DOCSTR","string ($) 'Hello'"),
0xE829: ("DOARRY","array ( [] ) [3. 4.]"),
0x742A: ("DOLIST","list ( {} ) {3 4}"),
0x482E: ("DOIDNT","global name (id) 'MYPROG'"),
0x6D2E: ("DOLAM","local name (lam) 'j'"),
0x9D2D: ("DOCOL","program ( :: ; ) :: %1 %2 x+ ;"),
0xB82A: ("DOSYMB","algebraic (alg) '1+2*3^4'"),
0x4E2A: ("DOHSTR/XS" "user binary integer (hxs) #1234567890123456h"),
0x1E2B: ("DOGROB","grob"),
0xFC2A: ("DOTAG","tagged :Price:153.95"),
0xDA2A: ("DOEXT","unit 365.2422_d"),
0x922E: ("DOROMP","xlib (romptr) XLIB F0 BA"),
0x1129: ("DOBINT","bint ~ FFFFFh"),
0x962A: ("DORRP","dir (rrp) DIR ... END"),
0x5529: ("DOEREL","long real (%%) 1.23456789012345E12345"),
0x9D29: ("DOECMP","long complex (C%%) (3E0,4E0)"),
0x0A2A: ("DOLNKARRY","linked array ( l[] )"),
0xBF29: ("DOCHAR","character"),
0xCC2D: ("DOCODE","code object"),
0x402B: ("DOLIB","library"),
0x622B: ("DOBAK","backup object"),
0x882B: ("DOEXT0","library data (aka EXT0)"),
0xAA2B: ("DOEXT1","or DOACPTR access pointer (aka Extended Ptr, and EXT1)"),
0xCC2B: ("DOEXT2","font (erroneously called EXT2 by Vger)"),
0xFE26: ("DOMINIFONT","MiniFont"),
0xEE2B: ("DOEXT3","ext3 note: was dispatch type DF in HP48"),
0x102C: ("DOEXT4","ext4"),
0x1426: ("DOINT","integer (ZINT)"),
0x3A26: ("DOLNGREAL","infinite-precision real (not yet implemented)"),
0x6026: ("DOLNGCMP","infinite-precision complex (not yet implemented)"),
0x8626: ("DOMATRIX","symbolic matrix"),
0xAC26: ("DOFLASHP","Flash Pointer (FPTR n n; FPTR2 ^name)"),
0xD526: ("DOAPLET","Aplet (not yet implemented)")
}
def objtype( integer, verbose=0 ):
"""Translates object type number into a string representation.
The default verbose=0 gives a compact descriptor, =1 a verbose description.
"""
return objtypes[integer][verbose]
def version():
"""Returns the server version string.
"""
protocol.cmd( "V" )
return hpstr.tostr( protocol.readpacket() )
def meminfo():
"""Returns the number of free bytes in calculator memory.
"""
protocol.cmd( "M" )
return int( hpstr.tostr( protocol.readpacket()[:-1] ) )
def ls():
"""Returns a list with objects in the current directory.
"""
protocol.cmd( "L" )
raw = protocol.readpacket()
p = 0
ls = []
while p < len(raw):
l = raw[p] ; p += 1
name = hpstr.tostr( raw[p:p+l] ) ; p += l
objtype = raw[p]*256 + raw[p+1] ; p += 2
size = (raw[p] + raw[p+1]*256 + raw[p+2]*256*256) / 2.0 ; p += 3
flags = raw[p]*256 + raw[p+1] ; p += 2
ls.append( [ name, objtype, size, flags ] )
return ls
def get( remotefile ):
"""Reads remotefile from current directory and returns it as byte array.
Currently only binary mode is supported.
"""
return protocol.get( remotefile )
def put( remotefile, data ):
"""Writes data to remotefile in current directory and returns status.
Currently only binary mode is supported.
"""
return protocol.put( remotefile, data )
|
[
"protocol.put",
"hpstr.tostr",
"protocol.readpacket",
"protocol.get",
"protocol.cmd"
] |
[((2269, 2286), 'protocol.cmd', 'protocol.cmd', (['"""V"""'], {}), "('V')\n", (2281, 2286), False, 'import protocol\n'), ((2427, 2444), 'protocol.cmd', 'protocol.cmd', (['"""M"""'], {}), "('M')\n", (2439, 2444), False, 'import protocol\n'), ((2591, 2608), 'protocol.cmd', 'protocol.cmd', (['"""L"""'], {}), "('L')\n", (2603, 2608), False, 'import protocol\n'), ((2621, 2642), 'protocol.readpacket', 'protocol.readpacket', ([], {}), '()\n', (2640, 2642), False, 'import protocol\n'), ((3162, 3186), 'protocol.get', 'protocol.get', (['remotefile'], {}), '(remotefile)\n', (3174, 3186), False, 'import protocol\n'), ((3360, 3390), 'protocol.put', 'protocol.put', (['remotefile', 'data'], {}), '(remotefile, data)\n', (3372, 3390), False, 'import protocol\n'), ((2313, 2334), 'protocol.readpacket', 'protocol.readpacket', ([], {}), '()\n', (2332, 2334), False, 'import protocol\n'), ((2728, 2753), 'hpstr.tostr', 'hpstr.tostr', (['raw[p:p + l]'], {}), '(raw[p:p + l])\n', (2739, 2753), False, 'import hpstr\n'), ((2476, 2497), 'protocol.readpacket', 'protocol.readpacket', ([], {}), '()\n', (2495, 2497), False, 'import protocol\n')]
|
from typing import NamedTuple
from pytest import mark
from graphql import (
graphql,
GraphQLField,
GraphQLID,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
)
from graphql_relay import node_definitions
class User(NamedTuple):
id: str
name: str
user_data = [User(id="1", name="<NAME>"), User(id="2", name="<NAME>")]
user_type: GraphQLObjectType
node_interface, node_field = node_definitions(
lambda id_, _info: next(filter(lambda obj: obj.id == id_, user_data), None),
lambda _obj, _info, _type: user_type,
)[:2]
user_type = GraphQLObjectType(
"User",
lambda: {
"id": GraphQLField(GraphQLNonNull(GraphQLID)),
"name": GraphQLField(GraphQLString),
},
interfaces=[node_interface],
)
query_type = GraphQLObjectType("Query", lambda: {"node": node_field})
schema = GraphQLSchema(query=query_type, types=[user_type])
def describe_node_interface_and_fields_with_async_object_fetcher():
@mark.asyncio
async def gets_the_correct_id_for_users():
source = """
{
node(id: "1") {
id
}
}
"""
assert await graphql(schema, source) == ({"node": {"id": "1"}}, None)
@mark.asyncio
async def gets_the_correct_name_for_users():
source = """
{
node(id: "1") {
id
... on User {
name
}
}
}
"""
assert await graphql(schema, source) == (
{"node": {"id": "1", "name": "<NAME>"}},
None,
)
|
[
"graphql.GraphQLNonNull",
"graphql.graphql",
"graphql.GraphQLObjectType",
"graphql.GraphQLSchema",
"graphql.GraphQLField"
] |
[((798, 855), 'graphql.GraphQLObjectType', 'GraphQLObjectType', (['"""Query"""', "(lambda : {'node': node_field})"], {}), "('Query', lambda : {'node': node_field})\n", (815, 855), False, 'from graphql import graphql, GraphQLField, GraphQLID, GraphQLNonNull, GraphQLObjectType, GraphQLSchema, GraphQLString\n'), ((865, 915), 'graphql.GraphQLSchema', 'GraphQLSchema', ([], {'query': 'query_type', 'types': '[user_type]'}), '(query=query_type, types=[user_type])\n', (878, 915), False, 'from graphql import graphql, GraphQLField, GraphQLID, GraphQLNonNull, GraphQLObjectType, GraphQLSchema, GraphQLString\n'), ((713, 740), 'graphql.GraphQLField', 'GraphQLField', (['GraphQLString'], {}), '(GraphQLString)\n', (725, 740), False, 'from graphql import graphql, GraphQLField, GraphQLID, GraphQLNonNull, GraphQLObjectType, GraphQLSchema, GraphQLString\n'), ((669, 694), 'graphql.GraphQLNonNull', 'GraphQLNonNull', (['GraphQLID'], {}), '(GraphQLID)\n', (683, 694), False, 'from graphql import graphql, GraphQLField, GraphQLID, GraphQLNonNull, GraphQLObjectType, GraphQLSchema, GraphQLString\n'), ((1188, 1211), 'graphql.graphql', 'graphql', (['schema', 'source'], {}), '(schema, source)\n', (1195, 1211), False, 'from graphql import graphql, GraphQLField, GraphQLID, GraphQLNonNull, GraphQLObjectType, GraphQLSchema, GraphQLString\n'), ((1515, 1538), 'graphql.graphql', 'graphql', (['schema', 'source'], {}), '(schema, source)\n', (1522, 1538), False, 'from graphql import graphql, GraphQLField, GraphQLID, GraphQLNonNull, GraphQLObjectType, GraphQLSchema, GraphQLString\n')]
|
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import uuid
from collections import namedtuple
from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
create_string_buffer, Structure, POINTER, CDLL
logger = logging.getLogger(__name__)
# Some constants taken from cuda.h
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
# nvml constants
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
class _CUuuid_t(Structure):
_fields_ = [
('bytes', c_char * 16)
]
class _nvmlUtilization_t(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
class _struct_nvmlDevice_t(Structure):
pass # opaque handle
_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
class _nvmlBAR1Memory_t(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
def _load_nv_library(*libnames):
for lib in libnames:
try:
return CDLL(lib)
except OSError:
continue
_cuda_lib = _nvml_lib = None
_cu_device_info = namedtuple('_cu_device_info', 'uuid name multiprocessors cuda_cores threads')
_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
_initialized = False
_gpu_count = None
_driver_info = None
_device_infos = dict()
class NVError(Exception):
pass
def _cu_check_error(result):
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
raise NVError('Device API Error %d: %s' % (result, _error_str.value.decode()))
if _nvml_lib is not None:
_nvmlErrorString = _nvml_lib.nvmlErrorString
_nvmlErrorString.restype = c_char_p
def _nvml_check_error(result):
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
if _error_str:
raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))
else:
raise NVError('Unknown NVML API Error %d' % result)
_cu_process_var_to_cores = {
(1, 0): 8,
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32,
(2, 1): 48,
}
def _cu_get_processor_cores(major, minor):
return _cu_process_var_to_cores.get((major, minor), 192)
def _init_cp():
global _cuda_lib
if _initialized:
return
_cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll')
if _cuda_lib is None:
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
except NVError:
logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
global _nvml_lib
if _initialized:
return
_nvml_lib = _load_nv_library('libnvidia-ml.so', 'libnvidia-ml.dylib', 'nvml.dll')
if _nvml_lib is None:
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
except NVError:
logger.exception('Failed to initialize libnvidia-ml.')
return
def _init():
global _initialized
_init_cp()
_init_nvml()
_initialized = _nvml_lib is not None and _cuda_lib is not None
def get_device_count():
global _gpu_count
if _gpu_count is not None:
return _gpu_count
_init_nvml()
if _nvml_lib is None:
return None
if 'CUDA_VISIBLE_DEVICES' in os.environ:
_gpu_count = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
else:
n_gpus = c_uint()
_cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
_gpu_count = n_gpus.value
return _gpu_count
def get_driver_info():
global _driver_info
_init_nvml()
if _nvml_lib is None:
return None
if _driver_info is not None:
return _driver_info
version_buf = create_string_buffer(100)
cuda_version = c_uint()
_nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
_nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
_driver_info = _nvml_driver_info(
driver_version=version_buf.value.decode(),
cuda_version='%d.%d' % (cuda_version.value // 1000, cuda_version.value % 1000)
)
return _driver_info
def get_device_info(dev_index):
try:
return _device_infos[dev_index]
except KeyError:
pass
_init()
if not _initialized:
return None
device = c_int()
name_buf = create_string_buffer(100)
uuid_t = _CUuuid_t()
cc_major = c_int()
cc_minor = c_int()
cores = c_int()
threads_per_core = c_int()
_cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
_cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
_cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
_cu_check_error(_cuda_lib.cuDeviceComputeCapability(
byref(cc_major), byref(cc_minor), device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
info = _device_infos[dev_index] = _cu_device_info(
uuid=uuid.UUID(bytes=uuid_t.bytes),
name=name_buf.value.decode(),
multiprocessors=cores.value,
cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
threads=cores.value * threads_per_core.value,
)
return info
def get_device_status(dev_index):
_init()
if not _initialized:
return None
device = _nvmlDevice_t()
utils = _nvmlUtilization_t()
temperature = c_uint()
memory_info = _nvmlBAR1Memory_t()
dev_uuid = get_device_info(dev_index).uuid
uuid_str = 'GPU-' + str(dev_uuid)
if sys.version_info[0] >= 3:
uuid_str = uuid_str.encode()
_nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
device, NVML_TEMPERATURE_GPU, byref(temperature)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
return _nvml_device_status(
gpu_util=utils.gpu,
mem_util=utils.memory,
temperature=temperature.value,
fb_total_mem=memory_info.total,
fb_free_mem=memory_info.free,
fb_used_mem=memory_info.used,
)
|
[
"ctypes.c_char_p",
"ctypes.c_int",
"ctypes.byref",
"ctypes.create_string_buffer",
"uuid.UUID",
"collections.namedtuple",
"ctypes.c_uint",
"ctypes.CDLL",
"logging.getLogger",
"ctypes.POINTER"
] |
[((838, 865), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (855, 865), False, 'import logging\n'), ((1514, 1543), 'ctypes.POINTER', 'POINTER', (['_struct_nvmlDevice_t'], {}), '(_struct_nvmlDevice_t)\n', (1521, 1543), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((1895, 1972), 'collections.namedtuple', 'namedtuple', (['"""_cu_device_info"""', '"""uuid name multiprocessors cuda_cores threads"""'], {}), "('_cu_device_info', 'uuid name multiprocessors cuda_cores threads')\n", (1905, 1972), False, 'from collections import namedtuple\n'), ((1993, 2055), 'collections.namedtuple', 'namedtuple', (['"""_nvml_driver_info"""', '"""driver_version cuda_version"""'], {}), "('_nvml_driver_info', 'driver_version cuda_version')\n", (2003, 2055), False, 'from collections import namedtuple\n'), ((2078, 2185), 'collections.namedtuple', 'namedtuple', (['"""_nvml_device_status"""', '"""gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem"""'], {}), "('_nvml_device_status',\n 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')\n", (2088, 2185), False, 'from collections import namedtuple\n'), ((4680, 4705), 'ctypes.create_string_buffer', 'create_string_buffer', (['(100)'], {}), '(100)\n', (4700, 4705), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((4725, 4733), 'ctypes.c_uint', 'c_uint', ([], {}), '()\n', (4731, 4733), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5307, 5314), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (5312, 5314), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5330, 5355), 'ctypes.create_string_buffer', 'create_string_buffer', (['(100)'], {}), '(100)\n', (5350, 5355), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5396, 5403), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (5401, 5403), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5419, 5426), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (5424, 5426), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5439, 5446), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (5444, 5446), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5470, 5477), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (5475, 5477), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((6599, 6607), 'ctypes.c_uint', 'c_uint', ([], {}), '()\n', (6605, 6607), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((2391, 2401), 'ctypes.c_char_p', 'c_char_p', ([], {}), '()\n', (2399, 2401), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((4353, 4361), 'ctypes.c_uint', 'c_uint', ([], {}), '()\n', (4359, 4361), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((1790, 1799), 'ctypes.CDLL', 'CDLL', (['lib'], {}), '(lib)\n', (1794, 1799), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((2445, 2462), 'ctypes.byref', 'byref', (['_error_str'], {}), '(_error_str)\n', (2450, 2462), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((4889, 4908), 'ctypes.byref', 'byref', (['cuda_version'], {}), '(cuda_version)\n', (4894, 4908), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5521, 5534), 'ctypes.byref', 'byref', (['device'], {}), '(device)\n', (5526, 5534), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5536, 5552), 'ctypes.c_int', 'c_int', (['dev_index'], {}), '(dev_index)\n', (5541, 5552), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5681, 5694), 'ctypes.byref', 'byref', (['uuid_t'], {}), '(uuid_t)\n', (5686, 5694), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5770, 5785), 'ctypes.byref', 'byref', (['cc_major'], {}), '(cc_major)\n', (5775, 5785), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5787, 5802), 'ctypes.byref', 'byref', (['cc_minor'], {}), '(cc_minor)\n', (5792, 5802), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5873, 5885), 'ctypes.byref', 'byref', (['cores'], {}), '(cores)\n', (5878, 5885), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((5998, 6021), 'ctypes.byref', 'byref', (['threads_per_core'], {}), '(threads_per_core)\n', (6003, 6021), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((6153, 6182), 'uuid.UUID', 'uuid.UUID', ([], {'bytes': 'uuid_t.bytes'}), '(bytes=uuid_t.bytes)\n', (6162, 6182), False, 'import uuid\n'), ((6872, 6885), 'ctypes.byref', 'byref', (['device'], {}), '(device)\n', (6877, 6885), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((6958, 6970), 'ctypes.byref', 'byref', (['utils'], {}), '(utils)\n', (6963, 6970), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((7069, 7087), 'ctypes.byref', 'byref', (['temperature'], {}), '(temperature)\n', (7074, 7087), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((7158, 7176), 'ctypes.byref', 'byref', (['memory_info'], {}), '(memory_info)\n', (7163, 7176), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n'), ((4415, 4428), 'ctypes.byref', 'byref', (['n_gpus'], {}), '(n_gpus)\n', (4420, 4428), False, 'from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref, create_string_buffer, Structure, POINTER, CDLL\n')]
|
from __future__ import annotations
from asyncio import create_task, sleep
from collections import defaultdict
from dataclasses import dataclass
from json import dumps
from typing import Any, Awaitable, Callable, Mapping, Optional, Sequence, Type, Union
from aiohttp import BasicAuth, ClientResponse, ClientSession, FormData
from bauxite.constants import API_URL, VERSION
from .errors import (
BadGateway,
BadRequest,
Forbidden,
GatewayTimeout,
HTTPError,
MethodNotAllowed,
NotFound,
ServerError,
ServiceUnavailable,
TooManyRequests,
Unauthorized,
UnprocessableEntity,
)
from .file import File
from .ratelimiting import LocalRateLimiter, RateLimiter
from .route import Route
Callback = Callable[[ClientResponse, Route], Awaitable[None]]
Unset = object()
@dataclass
class _RequestContext:
route: Route
headers: dict[str, str]
params: dict[str, Any]
files: Sequence[File]
json: Any
@dataclass
class _ResponseContext:
route: Route
response: ClientResponse
successful: bool
class HTTPClient:
_status_codes: Mapping[int, Type[HTTPError]] = defaultdict(
lambda: HTTPError,
{
400: BadRequest,
401: Unauthorized,
403: Forbidden,
404: NotFound,
405: MethodNotAllowed,
422: UnprocessableEntity,
429: TooManyRequests,
500: ServerError,
502: BadGateway,
503: ServiceUnavailable,
504: GatewayTimeout,
},
)
def __init__(
self,
token: str,
api_url: Optional[str] = None,
user_agent: Optional[str] = None,
proxy_url: Optional[str] = None,
proxy_auth: Optional[BasicAuth] = None,
ratelimiter: Optional[RateLimiter] = None,
on_success: Optional[set[Callback]] = None,
on_error: Optional[set[Callback]] = None,
on_ratelimit: Optional[set[Callback]] = None,
) -> None:
self._token = token.strip()
self._api_url = api_url or API_URL
self._user_agent = (
user_agent
or f"DiscordBot (https://github.com/vcokltfre/bauxite, {VERSION})"
)
self._proxy_url = proxy_url
self._proxy_auth = proxy_auth
self._ratelimiter = ratelimiter or LocalRateLimiter()
self.__session: Optional[ClientSession] = None
self._on_success = on_success or set()
self._on_error = on_error or set()
self._on_ratelimit = on_ratelimit or set()
@property
def _session(self) -> ClientSession:
if self.__session and not self.__session.closed:
return self.__session
self.__session = ClientSession(
headers={
"Authorization": f"Bot {self._token}",
"User-Agent": self._user_agent,
}
)
return self.__session
def _dispatch(self, listeners: set[Callback], ctx: _ResponseContext) -> None:
for listener in listeners:
create_task(listener(ctx.response, ctx.route))
async def _request(
self, ctx: _RequestContext, reset_files: int
) -> _ResponseContext:
if ctx.files:
data = FormData()
for i, file in enumerate(ctx.files):
file.reset(reset_files)
data.add_field(f"file_{i}", file.fp, filename=file.filename)
if ctx.json is not Unset:
data.add_field(
"payload_json", dumps(ctx.json), content_type="application/json"
)
ctx.params["data"] = data
elif ctx.json is not Unset:
ctx.params["json"] = ctx.json
lock = await self._ratelimiter.acquire(ctx.route.bucket)
async with lock:
response = await self._session.request(
ctx.route.method,
self._api_url + ctx.route.path,
headers=ctx.headers,
**ctx.params,
)
status = response.status
headers = response.headers
response_ctx = _ResponseContext(ctx.route, response, 200 <= status < 300)
rl_reset_after = float(headers.get("X-RateLimit-Reset-After", 0))
rl_bucket_remaining = int(headers.get("X-RateLimit-Remaining", 1))
if response_ctx.successful:
self._dispatch(self._on_success, response_ctx)
if rl_bucket_remaining == 0:
self._dispatch(self._on_ratelimit, response_ctx)
await lock.release(rl_reset_after)
else:
await lock.release(0)
return response_ctx
elif status == 429:
self._dispatch(self._on_error, response_ctx)
self._dispatch(self._on_ratelimit, response_ctx)
if not headers.get("Via"):
raise TooManyRequests(response)
json = await response.json()
is_global = json.get("global", False)
retry_after = json["retry_after"]
if is_global:
await self._ratelimiter.lock_globally(retry_after)
else:
await lock.release(retry_after)
else:
self._dispatch(self._on_error, response_ctx)
raise self._status_codes[status](response)
return response_ctx
async def request(
self,
route: Route,
qparams: Optional[dict[str, Union[str, int]]] = None,
reason: Optional[str] = None,
files: Optional[Sequence[File]] = None,
json: Optional[Any] = Unset,
max_attempts: int = 3,
) -> ClientResponse:
headers = {}
params = {}
if qparams:
params["params"] = qparams
if reason:
headers["X-Audit-Log-Reason"] = reason
for attempt in range(max_attempts):
ctx = _RequestContext(route, headers, params, files or (), json)
resp = await self._request(ctx, attempt)
if resp.successful:
return resp.response
if attempt == max_attempts - 1:
raise self._status_codes[resp.response.status](resp.response)
await sleep(1 + attempt * 2)
raise Exception("Unreachable")
async def close(self) -> None:
if self.__session:
await self.__session.close()
|
[
"asyncio.sleep",
"aiohttp.FormData",
"json.dumps",
"collections.defaultdict",
"aiohttp.ClientSession"
] |
[((1129, 1416), 'collections.defaultdict', 'defaultdict', (['(lambda : HTTPError)', '{(400): BadRequest, (401): Unauthorized, (403): Forbidden, (404): NotFound,\n (405): MethodNotAllowed, (422): UnprocessableEntity, (429):\n TooManyRequests, (500): ServerError, (502): BadGateway, (503):\n ServiceUnavailable, (504): GatewayTimeout}'], {}), '(lambda : HTTPError, {(400): BadRequest, (401): Unauthorized, (\n 403): Forbidden, (404): NotFound, (405): MethodNotAllowed, (422):\n UnprocessableEntity, (429): TooManyRequests, (500): ServerError, (502):\n BadGateway, (503): ServiceUnavailable, (504): GatewayTimeout})\n', (1140, 1416), False, 'from collections import defaultdict\n'), ((2719, 2817), 'aiohttp.ClientSession', 'ClientSession', ([], {'headers': "{'Authorization': f'Bot {self._token}', 'User-Agent': self._user_agent}"}), "(headers={'Authorization': f'Bot {self._token}', 'User-Agent':\n self._user_agent})\n", (2732, 2817), False, 'from aiohttp import BasicAuth, ClientResponse, ClientSession, FormData\n'), ((3237, 3247), 'aiohttp.FormData', 'FormData', ([], {}), '()\n', (3245, 3247), False, 'from aiohttp import BasicAuth, ClientResponse, ClientSession, FormData\n'), ((6320, 6342), 'asyncio.sleep', 'sleep', (['(1 + attempt * 2)'], {}), '(1 + attempt * 2)\n', (6325, 6342), False, 'from asyncio import create_task, sleep\n'), ((3523, 3538), 'json.dumps', 'dumps', (['ctx.json'], {}), '(ctx.json)\n', (3528, 3538), False, 'from json import dumps\n')]
|
import threading
import time
from threading import Thread
import cli_ui
def long_computation():
# Simulates a long computation
time.sleep(0.6)
def count_down(lock, start):
x = start
while x >= 0:
with lock:
# Note: the sleeps are here so that we are more likely to
# see mangled output
#
# In reality, if you only call `ui.info()` once you don't
# need locks at all thanks to the GIL
cli_ui.info("down", end=" ")
time.sleep(0.2)
cli_ui.info(x)
time.sleep(0.2)
long_computation()
x -= 1
def count_up(lock, stop):
x = 0
while x <= stop:
with lock:
cli_ui.info("up", end=" ")
time.sleep(0.2)
cli_ui.info(x)
time.sleep(0.2)
long_computation()
x += 1
def main():
lock = threading.Lock()
t1 = Thread(target=count_down, args=(lock, 4))
t2 = Thread(target=count_up, args=(lock, 4))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
main()
|
[
"threading.Lock",
"threading.Thread",
"cli_ui.info",
"time.sleep"
] |
[((138, 153), 'time.sleep', 'time.sleep', (['(0.6)'], {}), '(0.6)\n', (148, 153), False, 'import time\n'), ((906, 922), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (920, 922), False, 'import threading\n'), ((932, 973), 'threading.Thread', 'Thread', ([], {'target': 'count_down', 'args': '(lock, 4)'}), '(target=count_down, args=(lock, 4))\n', (938, 973), False, 'from threading import Thread\n'), ((983, 1022), 'threading.Thread', 'Thread', ([], {'target': 'count_up', 'args': '(lock, 4)'}), '(target=count_up, args=(lock, 4))\n', (989, 1022), False, 'from threading import Thread\n'), ((485, 513), 'cli_ui.info', 'cli_ui.info', (['"""down"""'], {'end': '""" """'}), "('down', end=' ')\n", (496, 513), False, 'import cli_ui\n'), ((526, 541), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (536, 541), False, 'import time\n'), ((554, 568), 'cli_ui.info', 'cli_ui.info', (['x'], {}), '(x)\n', (565, 568), False, 'import cli_ui\n'), ((581, 596), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (591, 596), False, 'import time\n'), ((729, 755), 'cli_ui.info', 'cli_ui.info', (['"""up"""'], {'end': '""" """'}), "('up', end=' ')\n", (740, 755), False, 'import cli_ui\n'), ((768, 783), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (778, 783), False, 'import time\n'), ((796, 810), 'cli_ui.info', 'cli_ui.info', (['x'], {}), '(x)\n', (807, 810), False, 'import cli_ui\n'), ((823, 838), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (833, 838), False, 'import time\n')]
|
from os import name
from django.db import router
from django.urls import path
from django.urls.conf import include
from api.views.core_views import SubscribeAPIView
from api.views.order_views import BasketItemDeleteAPIView, BasketView, WishlistAPIView, WishlistDeleteAPIView
from api.views.product_views import ProductAPIView, ProductCategoryAPIView
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path ('basket/', BasketView.as_view(), name='basket'),
path ('wishlist/', WishlistAPIView.as_view(), name='wishlist'),
path ('wishlist/delete', WishlistDeleteAPIView.as_view(), name='wishlist_delete'),
path ('subscribe/', SubscribeAPIView.as_view(), name='subscribe'),
path ('productcategory/', ProductCategoryAPIView.as_view(), name='product-category'),
path ('product/' , ProductAPIView.as_view(), name='product'),
path ('basket-item/delete/', BasketItemDeleteAPIView.as_view(), name='basket-item-delete'),
]
|
[
"api.views.product_views.ProductAPIView.as_view",
"api.views.core_views.SubscribeAPIView.as_view",
"api.views.product_views.ProductCategoryAPIView.as_view",
"api.views.order_views.BasketItemDeleteAPIView.as_view",
"api.views.order_views.WishlistAPIView.as_view",
"api.views.order_views.BasketView.as_view",
"api.views.order_views.WishlistDeleteAPIView.as_view"
] |
[((497, 517), 'api.views.order_views.BasketView.as_view', 'BasketView.as_view', ([], {}), '()\n', (515, 517), False, 'from api.views.order_views import BasketItemDeleteAPIView, BasketView, WishlistAPIView, WishlistDeleteAPIView\n'), ((558, 583), 'api.views.order_views.WishlistAPIView.as_view', 'WishlistAPIView.as_view', ([], {}), '()\n', (581, 583), False, 'from api.views.order_views import BasketItemDeleteAPIView, BasketView, WishlistAPIView, WishlistDeleteAPIView\n'), ((632, 663), 'api.views.order_views.WishlistDeleteAPIView.as_view', 'WishlistDeleteAPIView.as_view', ([], {}), '()\n', (661, 663), False, 'from api.views.order_views import BasketItemDeleteAPIView, BasketView, WishlistAPIView, WishlistDeleteAPIView\n'), ((714, 740), 'api.views.core_views.SubscribeAPIView.as_view', 'SubscribeAPIView.as_view', ([], {}), '()\n', (738, 740), False, 'from api.views.core_views import SubscribeAPIView\n'), ((791, 823), 'api.views.product_views.ProductCategoryAPIView.as_view', 'ProductCategoryAPIView.as_view', ([], {}), '()\n', (821, 823), False, 'from api.views.product_views import ProductAPIView, ProductCategoryAPIView\n'), ((874, 898), 'api.views.product_views.ProductAPIView.as_view', 'ProductAPIView.as_view', ([], {}), '()\n', (896, 898), False, 'from api.views.product_views import ProductAPIView, ProductCategoryAPIView\n'), ((950, 983), 'api.views.order_views.BasketItemDeleteAPIView.as_view', 'BasketItemDeleteAPIView.as_view', ([], {}), '()\n', (981, 983), False, 'from api.views.order_views import BasketItemDeleteAPIView, BasketView, WishlistAPIView, WishlistDeleteAPIView\n')]
|
from robber import expect
from robber.explanation import Explanation
from robber.matchers.base import Base
class Called(Base):
"""
expect(mock).to.be.called()
"""
def matches(self):
try:
return self.actual.called
except AttributeError:
raise TypeError('{actual} is not a mock'.format(actual=self.actual))
@property
def explanation(self):
return Explanation(self.actual, self.is_negative, 'be called')
expect.register('called', Called)
|
[
"robber.expect.register",
"robber.explanation.Explanation"
] |
[((479, 512), 'robber.expect.register', 'expect.register', (['"""called"""', 'Called'], {}), "('called', Called)\n", (494, 512), False, 'from robber import expect\n'), ((421, 476), 'robber.explanation.Explanation', 'Explanation', (['self.actual', 'self.is_negative', '"""be called"""'], {}), "(self.actual, self.is_negative, 'be called')\n", (432, 476), False, 'from robber.explanation import Explanation\n')]
|
import os
import h5py
import torch
import numpy as np
import scipy
import json
class CorresPondenceNet(torch.utils.data.Dataset):
def __init__(self, cfg, flag='train'):
super().__init__()
with open(os.path.join(cfg['data_path'], 'name2id.json'), 'r') as f:
self.name2id = json.load(f)
try:
self.catg = self.name2id[cfg['class_name'].capitalize()]
except:
raise ValueError
self.task = cfg['task_type']
with h5py.File(os.path.join(cfg['data_path'], 'corr_mean_dist_geo', '{}_mean_distance.h5'.format(self.catg)), 'r') as f:
self.mean_distance = f['mean_distance'][:]
if self.task == 'embedding':
self.users = {}
self.pcds = []
self.keypoints = []
self.num_annos = 0
filename = os.path.join(
cfg['data_path'], '{}.h5'.format(self.catg))
with h5py.File(filename, 'r') as f:
self.pcds = f['point_clouds'][:]
self.keypoints = f['keypoints'][:]
self.mesh_names = f['mesh_names'][:]
num_train = int(self.pcds.shape[0] * 0.7)
num_divide = int(self.pcds.shape[0] * 0.85)
if flag == 'train':
self.pcds = self.pcds[:num_train]
self.keypoints = self.keypoints[:num_train]
self.mesh_names = self.mesh_names[:num_train]
elif flag == 'val':
self.pcds = self.pcds[num_train:num_divide]
self.keypoints = self.keypoints[num_train:num_divide]
self.mesh_names = self.mesh_names[num_train:num_divide]
elif flag == 'test':
self.pcds = self.pcds[num_divide:]
self.keypoints = self.keypoints[num_divide:]
self.mesh_names = self.mesh_names[num_divide:]
else:
raise ValueError
self.num_annos = self.pcds.shape[0]
else:
raise ValueError
def __getitem__(self, item):
if self.task == 'embedding':
pcd = self.pcds[item]
keypoint_index = np.array(self.keypoints[item], dtype=np.int32)
return torch.tensor(pcd).float(), torch.tensor(keypoint_index).int(), torch.tensor(self.mean_distance).float(), 0
else:
raise ValueError
def __len__(self):
return self.num_annos
|
[
"h5py.File",
"json.load",
"numpy.array",
"os.path.join",
"torch.tensor"
] |
[((306, 318), 'json.load', 'json.load', (['f'], {}), '(f)\n', (315, 318), False, 'import json\n'), ((2186, 2232), 'numpy.array', 'np.array', (['self.keypoints[item]'], {'dtype': 'np.int32'}), '(self.keypoints[item], dtype=np.int32)\n', (2194, 2232), True, 'import numpy as np\n'), ((219, 265), 'os.path.join', 'os.path.join', (["cfg['data_path']", '"""name2id.json"""'], {}), "(cfg['data_path'], 'name2id.json')\n", (231, 265), False, 'import os\n'), ((954, 978), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (963, 978), False, 'import h5py\n'), ((2252, 2269), 'torch.tensor', 'torch.tensor', (['pcd'], {}), '(pcd)\n', (2264, 2269), False, 'import torch\n'), ((2279, 2307), 'torch.tensor', 'torch.tensor', (['keypoint_index'], {}), '(keypoint_index)\n', (2291, 2307), False, 'import torch\n'), ((2315, 2347), 'torch.tensor', 'torch.tensor', (['self.mean_distance'], {}), '(self.mean_distance)\n', (2327, 2347), False, 'import torch\n')]
|
import os
import time
import torch
import random
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from util import epoch_time
import torch.optim as optim
from model.neural_network import RandomlyWiredNeuralNetwork
from data.data_util import fetch_dataloader, test_voc, test_imagenet
SEED = 981126
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
class Trainer:
def __init__(self, num_epoch, lr,
batch_size, num_node,
p, k, m, channel,
in_channels, path,
graph_mode, dataset,
is_small_regime,
checkpoint_path, load):
super(Trainer, self).__init__()
self.params = {'num_epoch': num_epoch,
'batch_size': batch_size,
'lr': lr,
'node_num': num_node,
'p': p,
'k': k,
'm': m,
'in_channels': in_channels,
'channel': channel,
'classes': 21 if dataset == 'voc' else 1000,
'graph_mode': graph_mode,
'load': load,
'path': path,
'dataset': dataset,
'is_small_regime': is_small_regime,
'checkpoint_path': checkpoint_path
}
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.train_data, self.val_data, self.test_data = fetch_dataloader(
self.params['dataset'],
self.params['path'],
self.params['batch_size'])
self.rwnn = RandomlyWiredNeuralNetwork(
self.params['channel'],
self.params['in_channels'],
self.params['p'],
self.params['k'],
self.params['m'],
self.params['graph_mode'],
self.params['classes'],
self.params['node_num'],
self.params['checkpoint_path'],
self.params['load'],
self.params['is_small_regime']
).to(self.device)
self.optimizer = optim.SGD(
self.rwnn.parameters(), self.params['lr'], 0.9, weight_decay=5e-5)
self.best_loss = float('inf')
self.step_num = 0
if load:
checkpoint = torch.load(os.path.join(
self.params['checkpoint_path'], 'train.tar'))
self.rwnn.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(
checkpoint['optimizer_state_dict'])
self.epoch = checkpoint['epoch']
self.best_loss = checkpoint['best_loss']
self.scheduler = checkpoint['scheduler']
self.step_num = checkpoint['step_num']
else:
self.epoch = 0
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, self.params['num_epoch'])
self.criterion = nn.CrossEntropyLoss()
pytorch_total_params = sum(p.numel() for p in self.rwnn.parameters())
print(f"Number of parameters {pytorch_total_params}")
def train(self):
print("\nbegin training...")
for epoch in range(self.epoch, self.params['num_epoch']):
print(
f"\nEpoch: {epoch+1} out of {self.params['num_epoch']}, step: {self.step_num}")
start_time = time.perf_counter()
epoch_loss, step = train_loop(
self.train_data, self.rwnn, self.optimizer, self.criterion, self.device)
val_loss = val_loop(self.val_data, self.rwnn,
self.criterion, self.device)
if val_loss < self.best_loss:
self.best_loss = val_loss
with open(os.path.join(self.params['checkpoint_path'], 'best_model.txt'), 'w') as f:
f.write(
f"epoch: {epoch+1}, 'validation loss: {val_loss}, step: {self.step_num}")
torch.save(
self.rwnn,
os.path.join(self.params['checkpoint_path'], 'best.pt'))
if (epoch + 1) % 15 == 0:
if self.params['dataset'] == 'voc':
test_voc(self.test_data, self.rwnn, self.device)
self.step_num += step
self.scheduler.step()
end_time = time.perf_counter()
minutes, seconds, time_left_min, time_left_sec = epoch_time(
end_time-start_time, epoch, self.params['num_epoch'])
torch.save({
'epoch': epoch,
'model_state_dict': self.rwnn.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_loss': self.best_loss,
'scheduler': self.scheduler,
'step_num': self.step_num
}, os.path.join(self.params['checkpoint_path'], 'train.tar'))
print(
f"Train_loss: {round(epoch_loss, 3)} - Val_loss: {round(val_loss, 3)}")
print(
f"Epoch time: {minutes}m {seconds}s - Time left for training: {time_left_min}m {time_left_sec}s")
def train_loop(train_iter, model, optimizer, criterion, device):
epoch_loss = 0
step_num = 0
model.train()
print("Training...")
for src, tgt in tqdm(train_iter):
src = src.to(device)
tgt = tgt.to(device)
optimizer.zero_grad()
logits = model(src)
loss = criterion(logits, tgt)
loss.backward()
optimizer.step()
step_num += 1
epoch_loss += loss.item()
return epoch_loss / len(train_iter), step_num
def val_loop(val_iter, model, criterion, device):
model.eval()
val_loss = 0
with torch.no_grad():
print("Validating...")
for src, tgt in tqdm(val_iter):
src = src.to(device)
tgt = tgt.to(device)
logits = model(src)
loss = criterion(logits, tgt)
val_loss += loss.item()
return val_loss / len(val_iter)
|
[
"util.epoch_time",
"tqdm.tqdm",
"numpy.random.seed",
"torch.manual_seed",
"data.data_util.fetch_dataloader",
"model.neural_network.RandomlyWiredNeuralNetwork",
"torch.cuda.manual_seed",
"torch.nn.CrossEntropyLoss",
"time.perf_counter",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"data.data_util.test_voc",
"random.seed",
"torch.cuda.is_available",
"torch.no_grad",
"os.path.join"
] |
[((314, 331), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (325, 331), False, 'import random\n'), ((332, 352), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (346, 352), True, 'import numpy as np\n'), ((353, 376), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (370, 376), False, 'import torch\n'), ((377, 405), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (399, 405), False, 'import torch\n'), ((5502, 5518), 'tqdm.tqdm', 'tqdm', (['train_iter'], {}), '(train_iter)\n', (5506, 5518), False, 'from tqdm import tqdm\n'), ((1660, 1753), 'data.data_util.fetch_dataloader', 'fetch_dataloader', (["self.params['dataset']", "self.params['path']", "self.params['batch_size']"], {}), "(self.params['dataset'], self.params['path'], self.params[\n 'batch_size'])\n", (1676, 1753), False, 'from data.data_util import fetch_dataloader, test_voc, test_imagenet\n'), ((3131, 3152), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3150, 3152), True, 'import torch.nn as nn\n'), ((5932, 5947), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5945, 5947), False, 'import torch\n'), ((6005, 6019), 'tqdm.tqdm', 'tqdm', (['val_iter'], {}), '(val_iter)\n', (6009, 6019), False, 'from tqdm import tqdm\n'), ((3009, 3087), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['self.optimizer', "self.params['num_epoch']"], {}), "(self.optimizer, self.params['num_epoch'])\n", (3045, 3087), True, 'import torch.optim as optim\n'), ((3560, 3579), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3577, 3579), False, 'import time\n'), ((4536, 4555), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4553, 4555), False, 'import time\n'), ((4618, 4684), 'util.epoch_time', 'epoch_time', (['(end_time - start_time)', 'epoch', "self.params['num_epoch']"], {}), "(end_time - start_time, epoch, self.params['num_epoch'])\n", (4628, 4684), False, 'from util import epoch_time\n'), ((1564, 1589), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1587, 1589), False, 'import torch\n'), ((1807, 2119), 'model.neural_network.RandomlyWiredNeuralNetwork', 'RandomlyWiredNeuralNetwork', (["self.params['channel']", "self.params['in_channels']", "self.params['p']", "self.params['k']", "self.params['m']", "self.params['graph_mode']", "self.params['classes']", "self.params['node_num']", "self.params['checkpoint_path']", "self.params['load']", "self.params['is_small_regime']"], {}), "(self.params['channel'], self.params[\n 'in_channels'], self.params['p'], self.params['k'], self.params['m'],\n self.params['graph_mode'], self.params['classes'], self.params[\n 'node_num'], self.params['checkpoint_path'], self.params['load'], self.\n params['is_small_regime'])\n", (1833, 2119), False, 'from model.neural_network import RandomlyWiredNeuralNetwork\n'), ((2495, 2552), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""train.tar"""'], {}), "(self.params['checkpoint_path'], 'train.tar')\n", (2507, 2552), False, 'import os\n'), ((5034, 5091), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""train.tar"""'], {}), "(self.params['checkpoint_path'], 'train.tar')\n", (5046, 5091), False, 'import os\n'), ((4225, 4280), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""best.pt"""'], {}), "(self.params['checkpoint_path'], 'best.pt')\n", (4237, 4280), False, 'import os\n'), ((4393, 4441), 'data.data_util.test_voc', 'test_voc', (['self.test_data', 'self.rwnn', 'self.device'], {}), '(self.test_data, self.rwnn, self.device)\n', (4401, 4441), False, 'from data.data_util import fetch_dataloader, test_voc, test_imagenet\n'), ((3944, 4006), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""best_model.txt"""'], {}), "(self.params['checkpoint_path'], 'best_model.txt')\n", (3956, 4006), False, 'import os\n')]
|
from copy import deepcopy
from datetime import datetime
from os import makedirs, remove
from os.path import join, isfile, isdir, dirname
import numpy as np
import torch
def append_to_file(file, string):
dir_nm = dirname(file)
if len(dir_nm) > 0 and not isdir(dir_nm):
makedirs(dir_nm)
with open(file, "a") as f:
f.write(string + "\n")
def train_model(model, dataloader, device, criterion, optimizer):
model.train()
for xb, yb in dataloader:
xb, yb = xb.to(device), yb.to(device)
optimizer.zero_grad()
out = model(xb)
if out.ndim > 1 and out.shape[1] == 1:
out = out.squeeze(dim=1)
loss = criterion(out, yb)
loss.backward()
optimizer.step()
def eval_model(model, dataloader, device):
y_pred, y_true = [], []
model.eval()
with torch.no_grad():
for xb, yb in dataloader:
xb = xb.to(device)
out = model(xb)
if out.ndim > 1 and out.shape[1] == 1:
out.squeeze_(dim=1)
y_pred.append(out.detach_().cpu())
y_true.append(yb)
y_pred = torch.cat(y_pred, dim=0)
y_true = torch.cat(y_true, dim=0)
return y_pred, y_true
class FunctionEvaluator:
def __init__(self,
func_lst):
self.func_lst = func_lst
def __call__(self, y_pred, y_true):
res = {}
for fname, func, kwargs in self.func_lst:
res[fname] = func(y_pred, y_true, **kwargs)
return res
class Logger:
def __init__(self, save_path=None):
self.save_path = save_path
def log(self, msg):
print(msg)
if self.save_path is not None:
append_to_file(self.save_path, msg)
class TopNSaver:
def __init__(self, n):
self.n = n
self.dct = {0: None}
def save(self, score, state, save_path):
if any(score > key for key in self.dct) and all(score != key for key in self.dct):
if len(self.dct) >= self.n:
key_to_delete = sorted(list(self.dct.keys()))[0]
if self.dct[key_to_delete] is not None:
try:
remove(self.dct[key_to_delete])
except OSError:
pass
self.dct.pop(key_to_delete)
self.dct[score] = save_path
torch.save(state, save_path)
class Experiment:
def __init__(self,
dl_train,
dl_train_val,
dl_validation,
model,
optimizer,
criterion,
device,
max_epoch,
metrics,
target_metric,
format_str,
init_epoch=0,
scheduler=None,
load_path=None,
save_path=None,
early_stopping=None,
evaluate_freq=1,
):
self._params = locals()
self.dl_train = dl_train
self.dl_train_val = dl_train_val
self.dl_validation = dl_validation
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.max_epoch = max_epoch
self.metric_evaluator = FunctionEvaluator(metrics)
self.target_metric = target_metric
self.format_str = format_str
self.init_epoch = init_epoch
self.scheduler = scheduler
self.load_path = load_path
self.save_path = save_path
self.logger = Logger(join(save_path, "log.txt")) if save_path is not None else Logger()
self.early_stopping = early_stopping
self.evaluate_freq = evaluate_freq
self.top5saver = TopNSaver(10)
self.reset()
def reset(self):
self.results = {
"metrics_train": [],
"metrics_valid": [],
"state_dict": None,
}
self.best_validation_metric = .0
self.model_best_state_dict = None
self.no_score_improvement = 0
self.experiment_start = datetime.now()
self.now = None
def evaluate(self, epoch, step):
# evaluate subset of train set (in eval mode)
y_pred_train, y_true_train = eval_model(model=self.model,
dataloader=self.dl_train_val,
device=self.device)
metrics_train = self.metric_evaluator(y_pred_train, y_true_train)
self.results["metrics_train"].append(metrics_train)
# evaluate validation subset
y_pred_valid, y_true_valid = eval_model(model=self.model,
dataloader=self.dl_validation,
device=self.device)
metrics_valid = self.metric_evaluator(y_pred_valid, y_true_valid)
self.results["metrics_valid"].append(metrics_valid)
val_score = metrics_valid[self.target_metric]
# check if validation score is improved
if val_score > self.best_validation_metric:
self.model_best_state_dict = deepcopy(self.model.state_dict())
self.best_validation_metric = val_score
# reset early stopping counter
self.no_score_improvement = 0
# save best model weights
if self.save_path is not None:
torch.save(self.model_best_state_dict, join(self.save_path, "best_weights.pth"))
else:
self.no_score_improvement += 1
if self.early_stopping is not None and self.no_score_improvement >= self.early_stopping:
self.logger.log("Early stopping at epoch %d, step %d" % (epoch, step))
return True
if self.scheduler is not None:
self.scheduler.step(val_score)
if self.save_path is not None:
# (optional) save model state dict at end of each epoch
self.top5saver.save(val_score,
self.model.state_dict(),
join(self.save_path, "model_state_{}_{}.pth".format(epoch, step)))
# torch.save(self.model.state_dict(), join(self.save_path, "model_state_{}_{}.pth".format(epoch, step)))
# save full experiment state at the end of each epoch
checkpoint = {
'epoch': epoch + 1,
'model_curr_state_dict': self.model.state_dict(),
'model_best_state_dict': self.model_best_state_dict,
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': None if self.scheduler is None else self.scheduler.state_dict(),
'no_score_improvement': self.no_score_improvement,
'best_validation_metric': self.best_validation_metric,
}
torch.save(checkpoint, join(self.save_path, "full_state.pth"))
metrics_train = dict([(key + "_train", val) for key, val in metrics_train.items()])
metrics_valid = dict([(key + "_valid", val) for key, val in metrics_valid.items()])
time_delta = datetime.now() - self.now
s = self.format_str.format(time_delta=time_delta,
epoch=epoch,
step=step,
max_epoch=self.max_epoch,
**metrics_train,
**metrics_valid)
self.logger.log(s)
self.now = datetime.now()
return False
def train(self, epoch):
steps = np.round(np.linspace(0, len(self.dl_train), self.evaluate_freq + 1)).astype(np.int)
steps = steps[1:-1]
step = 1
self.model.train()
for i, (xb, yb) in enumerate(self.dl_train):
xb, yb = xb.to(self.device), yb.to(self.device)
self.optimizer.zero_grad()
out = self.model(xb)
if out.ndim == 2 and out.shape[1] == 1:
out = out.squeeze(dim=1)
loss = self.criterion(out, yb)
loss.backward()
self.optimizer.step()
if i in steps:
res = self.evaluate(epoch, step)
if res:
return True
step += 1
self.model.train()
self.evaluate(epoch, step)
return False
def run(self):
self.reset()
experiment_start = datetime.now()
if self.save_path is not None:
if not isdir(self.save_path):
makedirs(self.save_path)
# dump all args and their values
for key, value in self._params.items():
append_to_file(join(self.save_path, "params.txt"), "{}: {}".format(key, repr(value)))
if self.load_path is not None:
# load full experiment state to continue experiment
load_path = join(self.load_path, "full_state.pth")
if not isfile(load_path):
raise ValueError("Checkpoint file {} does not exist".format(load_path))
checkpoint = torch.load(load_path)
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.model_best_state_dict = checkpoint['model_best_state_dict']
self.model.load_state_dict(checkpoint['model_curr_state_dict'])
self.init_epoch = checkpoint['epoch']
self.best_validation_metric = checkpoint['best_validation_metric']
if self.scheduler is not None:
self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
self.logger.log("Successfully loaded checkpoint.")
self.logger.log(self.format_str)
self.now = datetime.now()
for epoch in range(self.init_epoch, self.max_epoch):
res = self.train(epoch)
if res:
break
self.logger.log("Experiment time: {}".format(datetime.now() - experiment_start))
return self.results
|
[
"os.remove",
"os.makedirs",
"os.path.join",
"os.path.isdir",
"os.path.dirname",
"torch.load",
"torch.cat",
"torch.save",
"os.path.isfile",
"torch.no_grad",
"datetime.datetime.now"
] |
[((219, 232), 'os.path.dirname', 'dirname', (['file'], {}), '(file)\n', (226, 232), False, 'from os.path import join, isfile, isdir, dirname\n'), ((287, 303), 'os.makedirs', 'makedirs', (['dir_nm'], {}), '(dir_nm)\n', (295, 303), False, 'from os import makedirs, remove\n'), ((848, 863), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (861, 863), False, 'import torch\n'), ((1139, 1163), 'torch.cat', 'torch.cat', (['y_pred'], {'dim': '(0)'}), '(y_pred, dim=0)\n', (1148, 1163), False, 'import torch\n'), ((1181, 1205), 'torch.cat', 'torch.cat', (['y_true'], {'dim': '(0)'}), '(y_true, dim=0)\n', (1190, 1205), False, 'import torch\n'), ((4131, 4145), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4143, 4145), False, 'from datetime import datetime\n'), ((7579, 7593), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7591, 7593), False, 'from datetime import datetime\n'), ((8517, 8531), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8529, 8531), False, 'from datetime import datetime\n'), ((9812, 9826), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9824, 9826), False, 'from datetime import datetime\n'), ((264, 277), 'os.path.isdir', 'isdir', (['dir_nm'], {}), '(dir_nm)\n', (269, 277), False, 'from os.path import join, isfile, isdir, dirname\n'), ((2382, 2410), 'torch.save', 'torch.save', (['state', 'save_path'], {}), '(state, save_path)\n', (2392, 2410), False, 'import torch\n'), ((7190, 7204), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7202, 7204), False, 'from datetime import datetime\n'), ((8983, 9021), 'os.path.join', 'join', (['self.load_path', '"""full_state.pth"""'], {}), "(self.load_path, 'full_state.pth')\n", (8987, 9021), False, 'from os.path import join, isfile, isdir, dirname\n'), ((9174, 9195), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (9184, 9195), False, 'import torch\n'), ((3606, 3632), 'os.path.join', 'join', (['save_path', '"""log.txt"""'], {}), "(save_path, 'log.txt')\n", (3610, 3632), False, 'from os.path import join, isfile, isdir, dirname\n'), ((6944, 6982), 'os.path.join', 'join', (['self.save_path', '"""full_state.pth"""'], {}), "(self.save_path, 'full_state.pth')\n", (6948, 6982), False, 'from os.path import join, isfile, isdir, dirname\n'), ((8591, 8612), 'os.path.isdir', 'isdir', (['self.save_path'], {}), '(self.save_path)\n', (8596, 8612), False, 'from os.path import join, isfile, isdir, dirname\n'), ((8630, 8654), 'os.makedirs', 'makedirs', (['self.save_path'], {}), '(self.save_path)\n', (8638, 8654), False, 'from os import makedirs, remove\n'), ((9041, 9058), 'os.path.isfile', 'isfile', (['load_path'], {}), '(load_path)\n', (9047, 9058), False, 'from os.path import join, isfile, isdir, dirname\n'), ((5496, 5536), 'os.path.join', 'join', (['self.save_path', '"""best_weights.pth"""'], {}), "(self.save_path, 'best_weights.pth')\n", (5500, 5536), False, 'from os.path import join, isfile, isdir, dirname\n'), ((8784, 8818), 'os.path.join', 'join', (['self.save_path', '"""params.txt"""'], {}), "(self.save_path, 'params.txt')\n", (8788, 8818), False, 'from os.path import join, isfile, isdir, dirname\n'), ((10020, 10034), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10032, 10034), False, 'from datetime import datetime\n'), ((2189, 2220), 'os.remove', 'remove', (['self.dct[key_to_delete]'], {}), '(self.dct[key_to_delete])\n', (2195, 2220), False, 'from os import makedirs, remove\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import urllib
import urllib2
import json
import datetime
class Error(Exception):
pass
class OpenStack(object):
def __init__(self, url, user, password, region=''):
self.auth_url = url
self.user = user
self.public_url, self.auth_token = self.auth(password, region)
def auth(self, password, region):
auth_url = '/'.join([self.auth_url, 'v2.0/tokens'])
auth_data = json.dumps({
'auth': {
'tenantName': self.user,
'passwordCredentials': {
'username': self.user,
'password': password,
},
},
})
auth_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
request = urllib2.Request(auth_url, auth_data, auth_headers)
auth_reply = json.loads(urllib2.urlopen(request).read())
for service in auth_reply['access']['serviceCatalog']:
if service['name'] == 'nova':
for endpoint in service['endpoints']:
if not region or endpoint['region'] == region:
public_url = endpoint['publicURL']
break
else:
raise Error('Service "nova" not available for this region')
break
else:
raise Error('Service "nova" not available')
auth_token = auth_reply['access']['token']['id']
return public_url, auth_token
def query(self, query, url=None, **kwargs):
query_args = urllib.urlencode(kwargs)
query_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Auth-Project-Id': self.user,
'X-Auth-Token': self.auth_token,
}
query_url = '/'.join([
url or self.public_url,
query,
]) + '?' + query_args
request = urllib2.Request(query_url, headers=query_headers)
return json.loads(urllib2.urlopen(request).read())
def simple_tenant_usage(self, start=None, end=None):
if end is None:
end = datetime.datetime.now()
if start is None:
start = end - datetime.timedelta(hours=24)
return self.query(
'os-simple-tenant-usage',
start=start.strftime('%Y-%m-%dT%H:%M:%S'),
end=end.strftime('%Y-%m-%dT%H:%M:%S'),
)['tenant_usages']
|
[
"urllib2.Request",
"json.dumps",
"datetime.timedelta",
"urllib.urlencode",
"datetime.datetime.now",
"urllib2.urlopen"
] |
[((593, 715), 'json.dumps', 'json.dumps', (["{'auth': {'tenantName': self.user, 'passwordCredentials': {'username': self\n .user, 'password': password}}}"], {}), "({'auth': {'tenantName': self.user, 'passwordCredentials': {\n 'username': self.user, 'password': password}}})\n", (603, 715), False, 'import json\n'), ((983, 1033), 'urllib2.Request', 'urllib2.Request', (['auth_url', 'auth_data', 'auth_headers'], {}), '(auth_url, auth_data, auth_headers)\n', (998, 1033), False, 'import urllib2\n'), ((1773, 1797), 'urllib.urlencode', 'urllib.urlencode', (['kwargs'], {}), '(kwargs)\n', (1789, 1797), False, 'import urllib\n'), ((2159, 2208), 'urllib2.Request', 'urllib2.Request', (['query_url'], {'headers': 'query_headers'}), '(query_url, headers=query_headers)\n', (2174, 2208), False, 'import urllib2\n'), ((2368, 2391), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2389, 2391), False, 'import datetime\n'), ((2444, 2472), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (2462, 2472), False, 'import datetime\n'), ((1066, 1090), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (1081, 1090), False, 'import urllib2\n'), ((2235, 2259), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (2250, 2259), False, 'import urllib2\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SPDX-FileCopyrightText: Siemens AG, 2020 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__author__ = 'Siemens AG'
import gc
import os
import sys
import time
import base64
import hashlib
import threading
from datetime import datetime
from subprocess import PIPE, Popen
import boto3
import psutil
from botocore.exceptions import ClientError
class ProgressPercentage(object):
"""
Progress percentage printer
"""
def __init__(self, filename, size):
self._filename = filename
self._size = size
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
with self._lock:
self._seen_so_far += bytes_amount
percentage = round((self._seen_so_far / self._size) * 100, 2)
sys.stdout.write(f"Downloading {self._filename}: {percentage}% " +
"done \r")
sys.stdout.flush()
class S3Handler:
"""
Class to handle communications with S3 services
:ivar s3client: S3 client from boto3
:ivar bucket: Name of the bucket to use
:ivar verbose: Verbosity level (0-3)
:ivar temp_download: Path of the location where object from S3 is stored
:ivar restore_partition_size: Size of partition being restored
:ivar split_size: Size in bytes to split tar at
:ivar gzip: True to compress tar with gzip
:ivar storage_class: Storage class of S3 object
:ivar FIVE_HUNDRED_MB: Five hundred MiB in bytes
:ivar FIVE_GB: Five GiB in bytes
"""
FIVE_HUNDRED_MB = 500 * (1024 ** 2)
FIVE_GB = (5 * (1024 ** 3))
def __init__(self, bucket, split_size=5497558138880.0, gzip=False,
storage_class="STANDARD", verbose=0):
"""
Initializer for the class attributes.
Additionally, check if the provided bucket can be accessed.
:param bucket: Bucket to use
:type bucket: string
:param split_size: Split size of tar
:type split_size: float
:param gzip: True to compress tar with gzip
:type gzip: boolean
:param storage_class: Storage class of S3 object
:type storage_class: string
:param verbose: Verbosity level (0-3)
:type verbose: integer
"""
self.s3client = boto3.client('s3')
self.bucket = bucket
self.__check_bucket_accessiblity(bucket)
self.split_size = split_size
self.gzip = gzip
self.storage_class = storage_class
self.verbose = verbose
def __check_bucket_accessiblity(self, bucket):
"""
Check if the bucket can be accessed
:param bucket: Bucket to check
:type bucket: string
:raises Exception: If the bucket can't be accessed
"""
try:
response = self.s3client.head_bucket(Bucket=bucket)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception
except Exception as e:
print(f"Unable to access bucket '{bucket}'", file=sys.stderr)
raise e
def __get_object_count(self, key):
"""
Get the count of objects under given key
This function also assigns value to attribute ``restore_partition_size``.
If the object has meta data ``x-amz-meta-disc-size`` and value if
greater than 1, partition size is assigned to its value.
Otherwise, partition size is assigned value of content length
:param key: Object key to check
:type key: string
:return: Number of objects with ``key`` as the prefix
:rtype: integer
:raises Exception: If the objects can't be accessed
"""
try:
response = self.s3client.list_objects_v2(Bucket=self.bucket,
Prefix=key)
if response['ResponseMetadata']['HTTPStatusCode'] != 200 or 'Contents' not in response:
raise Exception
objects = [o for o in response['Contents']]
response = self.s3client.head_object(Bucket=self.bucket,
Key=objects[0]['Key'])
partition_size = 0
if 'x-amz-meta-disc-size' in response['Metadata']:
partition_size = int(
response['Metadata']['x-amz-meta-disc-size'])
if partition_size < 2:
partition_size = sum([int(o['Size']) for o in objects])
self.restore_partition_size = partition_size
return len(objects)
except Exception as e:
print(f"Unable to access key '{key}' in bucket '{self.bucket}'",
file=sys.stderr)
raise e
def __byte_checksum(self, data):
"""
Calculate the checksum for the given bytes
:param data: Data to calculate checksum for
:type data: byte
:return: The Base64 encoded MD5 checksum
:rtype: string
"""
md_obj = hashlib.md5()
md_obj.update(data)
return base64.b64encode(md_obj.digest()).decode('UTF-8').strip()
def __get_key_uploadid(self, snapshot, size, partno):
"""
Generate the key and uploadid for a snapshot
:param snapshot: Snapshot to get the key for
:type snapshot: dict()
:param size: Size of mounted partition
:type size: integer
:param partno: Part no of the upload (-1 for single part upload)
:type partno: integer
:return: S3 key and uploadid for the snapshot
:rtype: list()
"""
meta_data = dict()
content_type = 'application/x-tar'
timestr = datetime.now().isoformat(timespec='seconds')
created = snapshot['created'].isoformat(timespec='seconds')
name = snapshot['name'].replace(' ', '+').replace('/', '_')
key = f"snap/{name}/{snapshot['id']}-{created}-{timestr}"
meta_data["creation-time"] = snapshot['created'].isoformat()
meta_data["snap-volume-size"] = f"{snapshot['volumesize']} GiB"
if partno == -1:
key = f"{key}.tar"
if self.gzip:
key = f"{key}.gz"
content_type = 'application/gzip'
else:
key = f"{key}-part{partno}.tar"
if self.gzip:
key = f"{key}.gz"
content_type = 'application/gzip'
if size > 1:
meta_data["x-amz-meta-disc-size"] = str(size)
res = self.s3client.create_multipart_upload(
Bucket=self.bucket,
ContentType=content_type,
Key=key,
Metadata=meta_data,
StorageClass=self.storage_class
)
return (key, res['UploadId'])
def initiate_upload(self, snapshot, path, size=0):
"""
Start multipart upload
1. Initialize the variables
1. If the upload can be done in one go, set partno as -1
2. Get the first key and upload id
3. Create a tar process
4. Read a chunk (max 5 GB or available RAM size - 50 MB of overhead or
remaining size before split occurs)
1. Have read enough data for split
1. Finish the upload, reset the counters
2. If more data to read, get new key and upload id.
3. Otherwise break.
2. Calculate new chunk size to be read
3. Read the chunk, update the counters and get the checksum of the
chunk
4. Upload part and add returned Etag to list
4. Finish the upload
If upload fails in between, abort the upload
:param snapshot: Snapshot to be uploaded
:type snapshot: dict()
:param path: Path of the mounted directory
:type path: string
:param size: Size of the partition (attached as meta info)
:type size: integer
"""
uploaded_bytes = 0
if self.split_size >= size:
if self.verbose > 1:
print("Uploading snapshot as a single file as " +
f"{self.split_size} >= {size}")
partno = -1
else:
partno = 1
tar_process = Popen(["tar", "--directory", path, "--create",
"--preserve-permissions", "."], stdout=PIPE)
read_process = tar_process
if self.gzip:
gzip_process = Popen(["gzip", "--to-stdout", "-6"],
stdin=tar_process.stdout, stdout=PIPE)
read_process = gzip_process
more_to_read = True
try:
while more_to_read:
(key, uploadid) = self.__get_key_uploadid(snapshot, size,
partno)
(uploaded_bytes, more_to_read) = self.__read_and_upload_part(
read_process, uploaded_bytes, key, uploadid)
partno += 1
finally:
read_process = None
if self.gzip:
gzip_process.wait()
tar_process.wait()
print()
if self.verbose > 0:
print("Multipart upload finished. Sending complete")
def __read_and_upload_part(self, read_process, uploaded_bytes, key,
upload_id):
"""
Prepare an upload a single part of the tar.
1. Read the data from read_process
2. Upload it as multipart upload
3. Check if there is more data to be uploaded
4. Set the flag and complete the multipart upload
:param read_process: The process to read from
:type read_process: subprocess.Popen
:param uploaded_bytes: No of bytes already uploaded
:type uploaded_bytes: integer
:param key: S3 key
:type key: string
:param upload_id: S3 multipart upload id
:type upload_id: string
:return: No of total bytes uploaded, is there more data to process
:rtype: dict(integer, boolean)
"""
tar_read_bytes = 0
upload_partid = 1
parts_info = list()
more_to_read = True
print(f"Uploading {key} to {self.bucket} bucket")
while True:
free_mem = psutil.virtual_memory().available
if free_mem > self.FIVE_GB: # Maximum part size is 5 GiB
free_mem = self.FIVE_GB
max_chunk = free_mem - self.FIVE_HUNDRED_MB
if tar_read_bytes + max_chunk > self.split_size:
read_chunk = self.split_size - tar_read_bytes
else:
read_chunk = max_chunk
try:
inline = read_process.stdout.read(read_chunk)
if len(inline) == 0:
# No more data to read
more_to_read = False
break
tar_read_bytes += len(inline)
uploaded_bytes += len(inline)
resp = self.__upload_s3_part(inline, key, upload_partid,
upload_id)
del inline
parts_info.append({
'ETag': resp['ETag'],
'PartNumber': upload_partid
})
if self.verbose > 0:
print(f"Part # {upload_partid}, ", end='')
print("Uploaded " +
str(round(uploaded_bytes / (1024 ** 2), 2)) +
" MiB (total) ", end="\r")
upload_partid += 1
gc.collect()
if (tar_read_bytes >= self.split_size):
# One split upload completed
break
except Exception as e:
print("\nMultipart upload failed. Trying to abort",
file=sys.stderr)
inline = None # Safely drop the data
self.s3client.abort_multipart_upload(
Bucket=self.bucket,
Key=key,
UploadId=upload_id
)
raise e
self.__complete_upload(key, upload_id, parts_info)
return uploaded_bytes, more_to_read
def __upload_s3_part(self, body, key, part_id, upload_id, retry_count=0):
"""
Upload a part of S3 multipart upload.
The function also reties failed calls. Every upload request, if failed,
will be retried 4 times at 4 seconds of intervals.
:param body: Body of the upload
:param key: S3 object key
:type key: string
:param part_id: Upload part ID
:type part_id: int
:param upload_id: Multipart upload's Upload ID
:type upload_id: string
:param retry_count: How many retries have been done.
:type retry_count: int
:return: Response from S3
:raises Exception: If all upload attempt fails
"""
if retry_count > 3:
raise Exception("S3 multipart part upload failed")
try:
return self.s3client.upload_part(
Body=body,
Bucket=self.bucket,
ContentMD5=self.__byte_checksum(body),
Key=key,
PartNumber=part_id,
UploadId=upload_id
)
except ClientError as error:
print(f"Failed: '{error.response['Error']['Message']}'.\nRetrying.",
file=sys.stderr)
time.sleep(4.0)
return self.__upload_s3_part(body, key, part_id, upload_id,
retry_count + 1)
def __complete_upload(self, key, uploadid, partlist, retry_count=0):
"""
Complete a multipart upload
The function also reties failed calls. Every upload request, if failed,
will be retried 4 times at 4 seconds of intervals.
:param key: Key of the upload
:type key: string
:param uploadid: Upload id of the multipart upload
:type uploadid: string
:param partlist: List of uploaded parts
:type partlist: list(dict())
:raises Exception: If all upload attempt fails, abort uploads.
"""
if retry_count > 3:
print("\nMultipart upload failed. Trying to abort",
file=sys.stderr)
self.s3client.abort_multipart_upload(
Bucket=self.bucket,
Key=key,
UploadId=uploadid
)
raise Exception("S3 upload failed")
try:
self.s3client.complete_multipart_upload(
Bucket=self.bucket,
Key=key,
MultipartUpload={
'Parts': partlist
},
UploadId=uploadid
)
except ClientError as error:
print(f"Failed: '{error.response['Error']['Message']}'.\nRetrying.",
file=sys.stderr)
time.sleep(4.0)
self.__complete_upload(key, uploadid, partlist, retry_count + 1)
if self.verbose > 0:
print(f"\nCompleted multipart upload, key: {key}")
def get_object_count_and_size(self, key):
"""
Check if the given key is available and return number of objects under
it.
:param key: Key to check
:type key: string
:return: Number of objects under provided key prefix, size of unpacked
tar
:rtype: tuple(integer, integer)
"""
return (self.__get_object_count(key),
self.restore_partition_size)
def download_key(self, key, partno, restore_dir):
"""
Download the key from S3
Create a temporary path to download the key and start download.
:param key: Key to be downloaded
:type key: string
:param partno: Part number of the key to be downloaded (-1 if there is
only one part)
:type partno: integer
:param restore_dir: Location to store S3 object for restore
:type restore_dir: string
:return: Location of downloaded file and size of restored partition (in
bytes)
:rtype: dict(string, integer)
:raises Exception: If download fails, delete the temp location
"""
response = self.s3client.list_objects_v2(Bucket=self.bucket,
Prefix=key)
keys = [o['Key'] for o in response['Contents']]
download_key_name = None
if partno == -1:
download_key_name = keys[0]
else:
for key in keys:
if f"-part{partno}.tar" in key:
download_key_name = key
break
if download_key_name == None:
raise Exception(f"Unable to find part '{partno}' under key {key}")
self.temp_download = os.path.join(restore_dir, download_key_name)
os.makedirs(os.path.dirname(self.temp_download), exist_ok=True)
size = self.s3client.head_object(Bucket=self.bucket,
Key=download_key_name)['ContentLength']
progress = ProgressPercentage(key, size)
try:
self.s3client.download_file(self.bucket, download_key_name,
self.temp_download, Callback=progress)
print()
except Exception as e:
print(f"Failed while downloading s3://{self.bucket}/{download_key_name}",
file=sys.stderr)
os.remove(self.temp_download)
raise e
return self.temp_download
|
[
"sys.stdout.write",
"subprocess.Popen",
"hashlib.md5",
"psutil.virtual_memory",
"os.remove",
"boto3.client",
"os.path.dirname",
"datetime.datetime.now",
"time.sleep",
"threading.Lock",
"gc.collect",
"sys.stdout.flush",
"os.path.join"
] |
[((633, 649), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (647, 649), False, 'import threading\n'), ((2340, 2358), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (2352, 2358), False, 'import boto3\n'), ((5063, 5076), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (5074, 5076), False, 'import hashlib\n'), ((8280, 8375), 'subprocess.Popen', 'Popen', (["['tar', '--directory', path, '--create', '--preserve-permissions', '.']"], {'stdout': 'PIPE'}), "(['tar', '--directory', path, '--create', '--preserve-permissions',\n '.'], stdout=PIPE)\n", (8285, 8375), False, 'from subprocess import PIPE, Popen\n'), ((16956, 17000), 'os.path.join', 'os.path.join', (['restore_dir', 'download_key_name'], {}), '(restore_dir, download_key_name)\n', (16968, 17000), False, 'import os\n'), ((846, 923), 'sys.stdout.write', 'sys.stdout.write', (["(f'Downloading {self._filename}: {percentage}% ' + 'done \\r')"], {}), "(f'Downloading {self._filename}: {percentage}% ' + 'done \\r')\n", (862, 923), False, 'import sys\n'), ((965, 983), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (981, 983), False, 'import sys\n'), ((8485, 8560), 'subprocess.Popen', 'Popen', (["['gzip', '--to-stdout', '-6']"], {'stdin': 'tar_process.stdout', 'stdout': 'PIPE'}), "(['gzip', '--to-stdout', '-6'], stdin=tar_process.stdout, stdout=PIPE)\n", (8490, 8560), False, 'from subprocess import PIPE, Popen\n'), ((17021, 17056), 'os.path.dirname', 'os.path.dirname', (['self.temp_download'], {}), '(self.temp_download)\n', (17036, 17056), False, 'import os\n'), ((5743, 5757), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5755, 5757), False, 'from datetime import datetime\n'), ((10316, 10339), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (10337, 10339), False, 'import psutil\n'), ((11618, 11630), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11628, 11630), False, 'import gc\n'), ((13533, 13548), 'time.sleep', 'time.sleep', (['(4.0)'], {}), '(4.0)\n', (13543, 13548), False, 'import time\n'), ((15030, 15045), 'time.sleep', 'time.sleep', (['(4.0)'], {}), '(4.0)\n', (15040, 15045), False, 'import time\n'), ((17612, 17641), 'os.remove', 'os.remove', (['self.temp_download'], {}), '(self.temp_download)\n', (17621, 17641), False, 'import os\n')]
|
import pygame
import math
from functools import reduce
from tower import Tower
from unit import Unit
class BoardState:
def __init__(self, board):
self.unitsDeployed = len(board._units) + board._unitsDestroyed + board._unitsThatReachedGoal
self.towersDeployed = 0
self.towersInUpperLeft = 0
self.towersInUpperRight = 0
self.towersInLowerLeft = 0
self.towersInLowerRight = 0
self.unitsOnLeftSide = 0
self.unitsOnRightSide = 0
for col in range(0, board._width):
for row in range(0, board._height):
tower = board._towers[row][col]
if not (tower is None):
self.towersDeployed += 1
if tower._x < board._width/2:
if tower._y < board._height/2:
self.towersInUpperLeft += 1
else:
self.towersInLowerLeft += 1
else:
if tower._y < board._height/2:
self.towersInUpperRight += 1
else:
self.towersInLowerRight += 1
for unit in board._units:
if unit._x < board._width/2:
self.unitsOnLeftSide += 1
else:
self.unitsOnRightSide += 1
def normalizedDistToState(self, boardState):
thisVector = []
otherVector = []
# Find the sum of all the state values
thisTotal = 0.0
otherTotal = 0.0
for key in self.__dict__:
thisTotal += pow(self.__dict__[key], 2)
otherTotal += pow(boardState[key], 2)
thisTotal = math.sqrt(thisTotal)
otherTotal = math.sqrt(otherTotal)
# Normalize the state by dividing by the sum
for key in self.__dict__:
if thisTotal > 0:
thisVector.append(self.__dict__[key] / thisTotal)
else:
thisVector.append(0)
if otherTotal > 0:
otherVector.append(boardState[key] / otherTotal)
else:
otherVector.append(0)
# Get the dist between the normalized states (max distance would be 1)
dist = 0.0
for i in range(0, len(thisVector)):
dist += abs(thisVector[i] - otherVector[i])
return dist
class Board:
def __init__(self, offset_x, offset_y):
self._offset_x = offset_x
self._offset_y = offset_y
self._cell_size = 32
self._width = 10
self._height = 10
self._towers = [[None for x in range(self._height)] for x in range(self._width)]
self._tower_list = []
self._num_towers = 0
self._num_units = 0 # total that have been deployed this round
self._last_tower = None
self._last_unit = None
self._last_unit_initial_location = None
self._units = []
self._bullets = []
self._unitsThatReachedGoal = 0
self._unitsDestroyed = 0
self._score = 0
self._tick = 0
def hasUnit(self, x, y):
return reduce(lambda u1, u2: u1 or u2, map(lambda u: int(u._lastNode[0]) == x and int(u._lastNode[1]) == y or u._nextNode and int(u._nextNode[0]) == x and int(u._nextNode[1]) == y, self._units), False)
def isInBounds(self, x, y):
return 0 <= x < self._width and 0 <= y < self._height
def hasTower(self, x, y):
if not self.isInBounds(x, y):
return False
return self._towers[x][y] != None
def add_tower(self, tower):
if not self.isInBounds(tower._x, tower._y):
return False
if self.hasUnit(tower._x, tower._y):
return False
if not (0 <= tower._x < self._width and 0 <= tower._y < self._height):
return False
if self.hasTower(tower._x, tower._y):
return False
self._towers[tower._x][tower._y] = tower
if not (self.path_exists() and self.unit_path_exists()):
self._towers[tower._x][tower._y] = None
return False
self._tower_list += [tower]
self._last_tower = tower
self._num_towers += 1
return True
def add_unit(self, unit):
if unit._x < 0 or unit._x > self._width-1:
return False
if unit._y != -1:
return False
self._units.append(unit)
self._last_unit = unit
self._last_unit_initial_location = (unit._x, unit._y)
self._num_units += 1
return True
def add_bullet(self, bullet):
self._bullets.append(bullet)
def step(self):
self._tick += 1
for unit in self._units:
unit.step(self)
# Check for updates on all units
for unit in self._units:
if unit._shouldDestroy:
self._unitsDestroyed += 1
self._score += int(unit._y)
self._units.remove(unit)
elif unit._y >= self._height:
self._unitsThatReachedGoal += 1
self._score += 10
unit.setIsAtGoal()
self._units.remove(unit)
# Check for updates on all bullets
for bullet in self._bullets:
bullet.step()
if bullet._shouldDestroy:
self._bullets.remove(bullet)
continue
# Check for out of bounds
if bullet._x > self._width or bullet._x < 0 or bullet._y > self._height+2 or bullet._y < -2:
bullet.setShouldDestroy()
else:
# Check for collisions
for unit in self._units:
if self.has_collision(bullet, unit):
unit.damage(50)
bullet._parent._body_count += 1
bullet.setShouldDestroy()
break
# Updates towers
for i in range(self._width):
for j in range(self._height):
if (self._towers[i][j] is None):
continue
self._towers[i][j].step(self)
# The state of the board at a given step (used by the generator)
def getState(self):
return BoardState(self)
# The score for the game (used by the generator)
def getScore(self):
aliveUnitsTotalDistance = 0
for unit in self._units:
aliveUnitsTotalDistance += int(unit._y)
return self._score + aliveUnitsTotalDistance
def execute(self, action):
if action.name == "PlaceUnitAction":
self.add_unit(Unit(action.x, -1, 0))
elif action.name == "PlaceTowerAction":
self.add_tower(Tower(action.x, action.y))
def distance(self, obj1, obj2):
return math.sqrt(pow(obj1._x - obj2._x, 2) + pow(obj1._y - obj2._y, 2))
def has_collision(self, obj1, obj2):
return self.distance(obj1, obj2) < 0.5
def draw(self, screen):
# Draws grid
line_color = (125, 125, 125)
for x in range(self._offset_x, self._offset_x + self._width * self._cell_size + 1, self._cell_size):
pygame.draw.line(screen, line_color, (x, self._offset_y),
(x, self._offset_y + self._height * self._cell_size))
for y in range(self._offset_y, self._offset_y + self._height * self._cell_size + 1, self._cell_size):
pygame.draw.line(screen, line_color, (self._offset_x, y),
(self._offset_x + self._width * self._cell_size, y))
# Draws mouse tower
# mouse_x, mouse_y = pygame.mouse.get_pos()
# if self.contains_point(mouse_x, mouse_y):
# s = Tower._image_transparent.copy()
# screen.blit(s, self.trunc_screen(mouse_x, mouse_y))
# Draws towers
for i in range(self._width):
for j in range(self._height):
if (self._towers[i][j] is None):
continue
screen.blit(self._towers[i][j]._image, self.grid_to_screen(i, j))
# Draws enemy units
for unit in self._units:
screen.blit(unit._image, self.grid_to_screen(unit._x, unit._y))
# Draws tower bullets
for bullet in self._bullets:
screen.blit(bullet._image, self.grid_to_screen(bullet._x, bullet._y))
def grid_to_screen(self, x, y):
return (x * self._cell_size + self._offset_x, y * self._cell_size + self._offset_y)
def trunc_screen(self, x, y):
return (((int) (x / self._cell_size)) * self._cell_size,
((int) (y / self._cell_size)) * self._cell_size)
def screen_to_grid(self, x, y):
return ((int) ((x - self._offset_x) / self._cell_size), (int) ((y - self._offset_y) / self._cell_size))
def contains_point(self, x, y):
return (self._offset_x <= x < self._offset_x + self._width * self._cell_size
and self._offset_y <= y < self._offset_y + self._height * self._cell_size)
def unit_path_exists(self):
paths = [self.path_from(u._lastNode[0], u._lastNode[1]) is not None for u in self._units if u._y >= 0]
return reduce(lambda u1, u2: u1 and u2, paths, True)
def path_exists(self):
for x in range(len(self._towers[0])):
if self.path_from(x, 0) is None:
continue
return True
return False
def path_from(self, x, y):
if not self.isInBounds(x, y):
return None
if self.hasTower(x, y):
return None
open_nodes = []
nodes = [[(False, -1, (-1, -1)) for x in range(0, self._height)] for x in range(0, self._width)]
nodes[x][y] = (True, 0, None)
open_nodes.append((0, (x, y)))
while len(open_nodes) > 0:
node = open_nodes.pop(0)
if node[1][1] is 9:
path = []
path.append(node[1])
temp = nodes[node[1][0]][node[1][1]]
while temp[2] is not None:
path = [temp[2]] + path
temp = nodes[temp[2][0]][temp[2][1]]
return path
for i in range(max(0, node[1][0] - 1), min(self._width, node[1][0] + 2)):
for j in range(max(0, node[1][1] - 1), min(self._height, node[1][1] + 2)):
if self.hasTower(i, j):
continue
if i is node[1][0] and j is node[1][1]:
continue
if i is not node[1][0] and j is not node[1][1]:
continue
if nodes[i][j][0] and node[0] + 1 >= nodes[i][j][1]:
continue
open_nodes.append((node[0] + 1, (i, j)))
nodes[i][j] = (True, node[0] + 1, node[1])
open_nodes.sort(key=lambda n: n[0])
return None
|
[
"pygame.draw.line",
"math.sqrt",
"unit.Unit",
"tower.Tower",
"functools.reduce"
] |
[((1719, 1739), 'math.sqrt', 'math.sqrt', (['thisTotal'], {}), '(thisTotal)\n', (1728, 1739), False, 'import math\n'), ((1761, 1782), 'math.sqrt', 'math.sqrt', (['otherTotal'], {}), '(otherTotal)\n', (1770, 1782), False, 'import math\n'), ((9188, 9233), 'functools.reduce', 'reduce', (['(lambda u1, u2: u1 and u2)', 'paths', '(True)'], {}), '(lambda u1, u2: u1 and u2, paths, True)\n', (9194, 9233), False, 'from functools import reduce\n'), ((7169, 7285), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'line_color', '(x, self._offset_y)', '(x, self._offset_y + self._height * self._cell_size)'], {}), '(screen, line_color, (x, self._offset_y), (x, self.\n _offset_y + self._height * self._cell_size))\n', (7185, 7285), False, 'import pygame\n'), ((7433, 7547), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'line_color', '(self._offset_x, y)', '(self._offset_x + self._width * self._cell_size, y)'], {}), '(screen, line_color, (self._offset_x, y), (self._offset_x +\n self._width * self._cell_size, y))\n', (7449, 7547), False, 'import pygame\n'), ((6630, 6651), 'unit.Unit', 'Unit', (['action.x', '(-1)', '(0)'], {}), '(action.x, -1, 0)\n', (6634, 6651), False, 'from unit import Unit\n'), ((6728, 6753), 'tower.Tower', 'Tower', (['action.x', 'action.y'], {}), '(action.x, action.y)\n', (6733, 6753), False, 'from tower import Tower\n')]
|
import trimesh
import numpy as np
import cv2
import copy
import pickle
import torch
import pdb
def depth2normal(depth, f_pix_x, f_pix_y=None):
'''
To compute a normal map from the depth map
Input:
- depth: torch.Tensor (H, W)
- f_pix_x: K[0, 0]
- f_pix_y: K[1, 1]
Return:
- normal: torch.Tensor (H, W, 3)
'''
if f_pix_y is None:
f_pix_y = f_pix_x
h, w = depth.shape
eps = 1e-12
bg_flag = (depth > 1e5) | (depth == 0)
depth[bg_flag] = 0.0
depth_left, depth_right, depth_up, depth_down = torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w)
if depth.get_device() != -1:
device_id = depth.get_device()
depth_left, depth_right, depth_up, depth_down = depth_left.to(device_id), depth_right.to(device_id), depth_up.to(device_id), depth_down.to(device_id)
depth_left[:, 1:w-1] = depth[:, :w-2].clone()
depth_right[:, 1:w-1] = depth[:, 2:].clone()
depth_up[1:h-1, :] = depth[:h-2, :].clone()
depth_down[1:h-1, :] = depth[2:, :].clone()
dzdx = (depth_right - depth_left) * f_pix_x / 2.0
dzdy = (depth_down - depth_up) * f_pix_y / 2.0
normal = torch.stack([dzdx, dzdy, -torch.ones_like(dzdx)]).permute(1, 2, 0)
normal_length = torch.norm(normal, p=2, dim=2)
normal = normal / (normal_length + 1e-12)[:,:,None]
normal[bg_flag] = 0.0
return normal
def quad2rotation(quad):
'''
input: torch.Tensor (4)
'''
bs = quad.shape[0]
qr, qi, qj, qk = quad[:,0], quad[:,1], quad[:,2], quad[:,3]
rot_mat = torch.zeros(bs, 3, 3).to(quad.get_device())
rot_mat[:,0,0] = 1 - 2 * (qj ** 2 + qk ** 2)
rot_mat[:,0,1] = 2 * (qi * qj - qk * qr)
rot_mat[:,0,2] = 2 * (qi * qk + qj * qr)
rot_mat[:,1,0] = 2 * (qi * qj + qk * qr)
rot_mat[:,1,1] = 1 - 2 * (qi ** 2 + qk ** 2)
rot_mat[:,1,2] = 2 * (qj * qk - qi * qr)
rot_mat[:,2,0] = 2 * (qi * qk - qj * qr)
rot_mat[:,2,1] = 2 * (qj * qk + qi * qr)
rot_mat[:,2,2] = 1 - 2 * (qi ** 2 + qj ** 2)
return rot_mat
def get_camera_from_tensor(inputs):
N = len(inputs.shape)
if N == 1:
inputs = inputs.unsqueeze(0)
quad, T = inputs[:,:4], inputs[:,4:]
R = quad2rotation(quad)
RT = torch.cat([R, T[:,:,None]], 2)
if N == 1:
RT = RT[0]
return RT
def get_tensor_from_camera(RT):
gpu_id = -1
if type(RT) == torch.Tensor:
if RT.get_device() != -1:
RT = RT.detach().cpu()
gpu_id = RT.get_device()
RT = RT.numpy()
from mathutils import Matrix
R, T = RT[:,:3], RT[:,3]
rot = Matrix(R)
quad = rot.to_quaternion()
tensor = np.concatenate([quad, T], 0)
tensor = torch.from_numpy(tensor).float()
if gpu_id != -1:
tensor = tensor.to(gpu_id)
return tensor
def downsize_camera_intrinsic(intrinsic, factor):
'''
Input:
- intrinsic type: np.array (3,3)
- factor int
'''
img_h, img_w = int(2 * intrinsic[1,2]), int(2 * intrinsic[0,2])
img_h_new, img_w_new = img_h / factor, img_w / factor
if (img_h_new - round(img_h_new)) > 1e-12 or (img_w_new - round(img_w_new)) > 1e-12:
raise ValueError('The image size {0} should be divisible by the factor {1}.'.format((img_h, img_w), factor))
intrinsic_new = copy.deepcopy(intrinsic)
intrinsic_new[0,:] = intrinsic[0,:] / factor
intrinsic_new[1,:] = intrinsic[1,:] / factor
return intrinsic_new
def sample_points_from_mesh(mesh, N=30000):
'''
Return:
-- points: np.array (N, 3)
'''
points = trimesh.sample.sample_surface(mesh, N)[0]
return points
def transform_point_cloud(points):
'''
solve the mismatch between the point cloud coordinate and the mesh obj.
'''
points_new = copy.deepcopy(points)
points_new[:,1] = -points[:,2]
points_new[:,2] = points[:,1]
return points_new
def read_pickle(fname):
with open(fname, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def save_render_output(render_output, fname):
depth_rendered, normal_rendered, valid_mask_rendered, _ = render_output
output = {}
output['depth'] = depth_rendered.detach().cpu().numpy()
output['normal'] = normal_rendered.detach().cpu().numpy()
output['valid_mask'] = valid_mask_rendered.detach().cpu().numpy()
save_pkl(output, fname)
def save_pkl(data, fname):
with open(fname, 'wb') as f:
pickle.dump(data, f)
|
[
"torch.ones_like",
"copy.deepcopy",
"trimesh.sample.sample_surface",
"pickle.dump",
"torch.norm",
"torch.cat",
"pickle.load",
"torch.zeros",
"mathutils.Matrix",
"numpy.concatenate",
"torch.from_numpy"
] |
[((1268, 1298), 'torch.norm', 'torch.norm', (['normal'], {'p': '(2)', 'dim': '(2)'}), '(normal, p=2, dim=2)\n', (1278, 1298), False, 'import torch\n'), ((2244, 2276), 'torch.cat', 'torch.cat', (['[R, T[:, :, None]]', '(2)'], {}), '([R, T[:, :, None]], 2)\n', (2253, 2276), False, 'import torch\n'), ((2607, 2616), 'mathutils.Matrix', 'Matrix', (['R'], {}), '(R)\n', (2613, 2616), False, 'from mathutils import Matrix\n'), ((2661, 2689), 'numpy.concatenate', 'np.concatenate', (['[quad, T]', '(0)'], {}), '([quad, T], 0)\n', (2675, 2689), True, 'import numpy as np\n'), ((3298, 3322), 'copy.deepcopy', 'copy.deepcopy', (['intrinsic'], {}), '(intrinsic)\n', (3311, 3322), False, 'import copy\n'), ((3768, 3789), 'copy.deepcopy', 'copy.deepcopy', (['points'], {}), '(points)\n', (3781, 3789), False, 'import copy\n'), ((561, 578), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (572, 578), False, 'import torch\n'), ((580, 597), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (591, 597), False, 'import torch\n'), ((599, 616), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (610, 616), False, 'import torch\n'), ((618, 635), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (629, 635), False, 'import torch\n'), ((3563, 3601), 'trimesh.sample.sample_surface', 'trimesh.sample.sample_surface', (['mesh', 'N'], {}), '(mesh, N)\n', (3592, 3601), False, 'import trimesh\n'), ((3954, 3987), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3965, 3987), False, 'import pickle\n'), ((4433, 4453), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (4444, 4453), False, 'import pickle\n'), ((1571, 1592), 'torch.zeros', 'torch.zeros', (['bs', '(3)', '(3)'], {}), '(bs, 3, 3)\n', (1582, 1592), False, 'import torch\n'), ((2703, 2727), 'torch.from_numpy', 'torch.from_numpy', (['tensor'], {}), '(tensor)\n', (2719, 2727), False, 'import torch\n'), ((1207, 1228), 'torch.ones_like', 'torch.ones_like', (['dzdx'], {}), '(dzdx)\n', (1222, 1228), False, 'import torch\n')]
|
import display
import board
import robot
import config as conf
import sys
import pygame
from pygame.locals import *
import time
class Checkers:
def __init__(self):
self.display = display.Display()
self.board = board.Board()
self.set_difficulty = 0
self.turn = None
self.valid_moves = []
self.curr_piece = None
self.Rbt_noMove = 0
self.Hum_noMove = 0
self._window()
def _window(self):
'''
Initialize a GUI window
'''
pygame.init()
pygame.display.set_caption("Smart Checkers Robot")
def _changeTurn(self):
'''
Change self.turn to another player
Reset variables
'''
self.turn = "black" if self.turn == "white" else "white"
self.curr_piece = None
self.valid_moves = []
def _has_move(self, player):
'''
Valid move check
Return whether the current player has move (bool)
'''
for i in xrange(6):
for j in xrange(6):
grid = self.board.checkerBoard[i][j]
if grid.color == "B"\
and grid.piece\
and grid.piece.player == player\
and self.board.valid_moves([i, j]):
return True
return False
def _check_winner(self):
'''
Check which player wins the game
Print out the message
'''
if self.board.white_piece_Num > self.board.black_piece_Num:
self.display.show_msg("HaHa You Lose! Click Right Key to Restart")
pygame.display.update()
elif self.board.white_piece_Num < self.board.black_piece_Num:
self.display.show_msg("Congratulation, You Win! Click Right Key to Restart")
pygame.display.update()
else:
self.display.show_msg("Draw! Click Right Key to Restart")
pygame.display.update()
def _restart(self):
'''
Restart game, reset all global variables
'''
self.display = display.Display()
self.board = board.Board()
self.turn = None
self.valid_moves = []
self.curr_piece = None
self.Rbt_noMove = 0
self.Hum_noMove = 0
self.set_difficulty = 0
self._window()
def _choose_AI_level(self):
'''
Keep monitoring pygame event until an AI level is choosen
'''
self.display.show_msg("Choose AI level: Easy-1, Mid-2, Hard-3")
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.unicode == '1':
self.robot = robot.Robot(conf.DEPTH_EASY)
self.set_difficulty = 1
elif event.unicode == '2':
self.robot = robot.Robot(conf.DEPTH_MID)
self.set_difficulty = 1
elif event.unicode == '3':
self.robot = robot.Robot(conf.DEPTH_HARD)
self.set_difficulty = 1
def _choose_first_move(self):
'''
Keep monitoring pygame event until first move is choosen
'''
self.display.show_msg("Who goes first? 1: You / 2: AI")
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.unicode == '1':
self.turn = "black"
self.display = display.Display()
elif event.unicode == '2':
self.turn = "white"
self.display = display.Display()
def game(self):
'''
Checker rules and actioins
Loop this function until exit
'''
def check_move():
'''
Check whether valid move exists for current player
If doesn't exist, set self.Rbt_noMove/self.Hum_noMove to 1, then change self.turn
If exist, reset self.Rbt_noMove/self.Hum_noMove to 0, and continue
If both players don't have move, game over. Call function _check_winner().
'''
if self.Hum_noMove and self.Rbt_noMove:
self._check_winner()
if pygame.mouse.get_pressed() == conf.RIGHTKEY:
self._restart()
else: # if current player has move, reset Rbt_noMove and Hum_noMove
if self._has_move(self.turn):
if self.turn == "black":
self.Rbt_noMove = 0
elif self.turn == "white":
self.Hum_noMove = 0
else: # if current player cannot move, change turn
if self.turn == "black":
self.Hum_noMove = 1
self._changeTurn()
elif self.turn == "white":
self.Rbt_noMove = 1
self._changeTurn()
# At the beginning of game, select difficulty
# and which player moves first
if self.turn is None:
if self.set_difficulty == 0: # choose AI level
self._choose_AI_level()
else: # choose who goes first
self._choose_first_move()
# Check whether current player has move
# If yes, continue. Otherwise, change turn.
check_move()
if self.turn == "white":
action = self.robot.choose_move(self.board) # choose action
if action:
time.sleep(0.5)
piece, move = action
if abs(piece[0] - move[0]) == 2: # capture move
self.board.remove([(piece[0] + move[0]) / 2, (piece[1] + move[1]) / 2])
self.board.move(piece, move)
self.curr_piece = move
time.sleep(0.5)
self._changeTurn()
# Human move
check_move()
self.mouse = self.display.mouse_to_grid(pygame.mouse.get_pos())
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
jump_step = self.board.check_jump(self.turn)
grid = self.board.checkerBoard[self.mouse[0]][self.mouse[1]]
# select piece
if grid.piece and grid.piece.player == self.turn:
if jump_step == []:
self.curr_piece = self.mouse
self.valid_moves = self.board.valid_moves(self.curr_piece)
else:
if self.mouse in jump_step:
self.curr_piece = self.mouse
self.valid_moves = self.board.valid_moves(self.curr_piece)
# choose a move
elif self.curr_piece and self.mouse in self.valid_moves:
self.board.move(self.curr_piece, self.mouse)
# if capture, then remove an adversary piece
if abs(self.curr_piece[0] - self.mouse[0]) == 2:
piece_rmv = ((self.curr_piece[0]+self.mouse[0])/2, (self.curr_piece[1]+self.mouse[1])/2)
self.board.remove(piece_rmv)
self._changeTurn()
def main(self):
while True:
self.game()
self.display. update_board(self.board, self.curr_piece, self.valid_moves)
checkers = Checkers()
checkers.main()
|
[
"pygame.quit",
"robot.Robot",
"pygame.mouse.get_pressed",
"pygame.event.get",
"pygame.init",
"time.sleep",
"display.Display",
"board.Board",
"pygame.display.update",
"pygame.mouse.get_pos",
"pygame.display.set_caption",
"sys.exit"
] |
[((186, 203), 'display.Display', 'display.Display', ([], {}), '()\n', (201, 203), False, 'import display\n'), ((219, 232), 'board.Board', 'board.Board', ([], {}), '()\n', (230, 232), False, 'import board\n'), ((454, 467), 'pygame.init', 'pygame.init', ([], {}), '()\n', (465, 467), False, 'import pygame\n'), ((470, 520), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Smart Checkers Robot"""'], {}), "('Smart Checkers Robot')\n", (496, 520), False, 'import pygame\n'), ((1706, 1723), 'display.Display', 'display.Display', ([], {}), '()\n', (1721, 1723), False, 'import display\n'), ((1739, 1752), 'board.Board', 'board.Board', ([], {}), '()\n', (1750, 1752), False, 'import board\n'), ((2080, 2103), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2101, 2103), False, 'import pygame\n'), ((2120, 2138), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2136, 2138), False, 'import pygame\n'), ((2711, 2734), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2732, 2734), False, 'import pygame\n'), ((2751, 2769), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2767, 2769), False, 'import pygame\n'), ((4900, 4918), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4916, 4918), False, 'import pygame\n'), ((1315, 1338), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1336, 1338), False, 'import pygame\n'), ((4860, 4882), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4880, 4882), False, 'import pygame\n'), ((1487, 1510), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1508, 1510), False, 'import pygame\n'), ((1586, 1609), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1607, 1609), False, 'import pygame\n'), ((2170, 2183), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2181, 2183), False, 'import pygame\n'), ((2188, 2198), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2196, 2198), False, 'import sys\n'), ((2801, 2814), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2812, 2814), False, 'import pygame\n'), ((2819, 2829), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2827, 2829), False, 'import sys\n'), ((4510, 4525), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4520, 4525), False, 'import time\n'), ((4744, 4759), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4754, 4759), False, 'import time\n'), ((4951, 4964), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4962, 4964), False, 'import pygame\n'), ((4969, 4979), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4977, 4979), False, 'import sys\n'), ((2276, 2304), 'robot.Robot', 'robot.Robot', (['conf.DEPTH_EASY'], {}), '(conf.DEPTH_EASY)\n', (2287, 2304), False, 'import robot\n'), ((2934, 2951), 'display.Display', 'display.Display', ([], {}), '()\n', (2949, 2951), False, 'import display\n'), ((3538, 3564), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (3562, 3564), False, 'import pygame\n'), ((2383, 2410), 'robot.Robot', 'robot.Robot', (['conf.DEPTH_MID'], {}), '(conf.DEPTH_MID)\n', (2394, 2410), False, 'import robot\n'), ((3028, 3045), 'display.Display', 'display.Display', ([], {}), '()\n', (3043, 3045), False, 'import display\n'), ((2489, 2517), 'robot.Robot', 'robot.Robot', (['conf.DEPTH_HARD'], {}), '(conf.DEPTH_HARD)\n', (2500, 2517), False, 'import robot\n')]
|
from pathlib import Path
from typing import Callable
from typing import Iterable
from typing import Tuple
from typing import Union
from django.conf import settings
from django.utils.autoreload import autoreload_started
from django.utils.autoreload import StatReloader
from .camel_case import camel_to_underscore
from .camel_case import camelize
from .camel_case import underscore_to_camel
from .camel_case import underscoreize
from .date import python_to_django_date_format
from .get_firstparty_apps import get_firstparty_apps
__all__ = [
'camel_to_underscore',
'camelize',
'get_firstparty_apps',
'underscore_to_camel',
'underscoreize',
'python_to_django_date_format',
'retry_fn',
]
def retry_fn(fn: Callable, allowable_exceptions: Tuple, retry_count: int=5):
"""
Call fn, retrying if exception type in allowable_exceptions is raised up to retry_count times
"""
for i in range(0, retry_count):
try:
return fn()
except allowable_exceptions:
if i == retry_count - 1:
raise
def add_autoreload_extra_files(extra_files: Iterable[Union[str, Path]]):
if not settings.DEBUG:
return
try:
from werkzeug.serving import is_running_from_reloader
except ImportError:
is_running_from_reloader = None
if is_running_from_reloader and is_running_from_reloader():
# we're running from the main runserver_plus process
if not hasattr(settings, 'RUNSERVER_PLUS_EXTRA_FILES'):
settings.RUNSERVER_PLUS_EXTRA_FILES = []
settings.RUNSERVER_PLUS_EXTRA_FILES += extra_files
else:
# either:
# - we're using the runserver (django) server
# - we're running from a child runserver_plus thread. If this is the case
# then the django autoreload signal will do nothing: working as intended
def add_watched_files(sender: StatReloader, **kwargs):
sender.extra_files.update([Path(p) for p in extra_files])
autoreload_started.connect(add_watched_files)
|
[
"pathlib.Path",
"werkzeug.serving.is_running_from_reloader",
"django.utils.autoreload.autoreload_started.connect"
] |
[((1367, 1393), 'werkzeug.serving.is_running_from_reloader', 'is_running_from_reloader', ([], {}), '()\n', (1391, 1393), False, 'from werkzeug.serving import is_running_from_reloader\n'), ((2027, 2072), 'django.utils.autoreload.autoreload_started.connect', 'autoreload_started.connect', (['add_watched_files'], {}), '(add_watched_files)\n', (2053, 2072), False, 'from django.utils.autoreload import autoreload_started\n'), ((1987, 1994), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (1991, 1994), False, 'from pathlib import Path\n')]
|
#!/bin/python3
"""Parse final xml and return an error if there are failures.
"""
import sys
from xml.dom.minidom import parse
dom = parse(sys.argv[1])
errors = 0
failures = 0
for nodes in dom.childNodes:
l = nodes.attributes.length
for node in range(l):
attr = nodes.attributes.item(node)
print(f" {attr.name} : {attr.value}")
if attr.name == "failures":
failures = int(attr.value)
if attr.name == "errors":
errors = int(attr.value)
print(f"Recorded {errors} errors and {failures} failures in {sys.argv[1]}")
if errors == 0 and failures == 0:
print("Passed, no errors.")
sys.exit(0)
else:
print("Failed, there are errors in the test run.")
sys.exit(1)
|
[
"xml.dom.minidom.parse",
"sys.exit"
] |
[((135, 153), 'xml.dom.minidom.parse', 'parse', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (140, 153), False, 'from xml.dom.minidom import parse\n'), ((652, 663), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (660, 663), False, 'import sys\n'), ((729, 740), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (737, 740), False, 'import sys\n')]
|
import logging
import requests
logger = logging.getLogger(__name__)
# https://developer.twitter.com/en/docs/authentication/oauth-2-0/user-access-token
class TwitterProvider:
def __init__(self, configs):
self.client_id = configs['client_id']
self.redirect_uri = configs['redirect_uri']
self.scope = configs['scope']
self.accept_users = configs['accept_users']
def get_authorize_uri(self, session):
uri = (f'https://twitter.com/i/oauth2/authorize?response_type=code&'
+ f'client_id={self.client_id}&redirect_uri={self.redirect_uri}&'
+ f'scope={self.scope}&state={session.state}&'
+ f'code_challenge={session.code_challenge}&'
+ f'code_challenge_method={session.code_challenge_method}')
logger.info(uri)
return uri
def get_token(self, args, session):
if not {'state', 'code'}.issubset(args.keys()):
return False
if args['state'] != session.state:
return False
resp = requests.post('https://api.twitter.com/2/oauth2/token',
data={
'code': args['code'],
'grant_type': 'authorization_code',
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'code_verifier': session.code_verifier
}
)
logger.info(resp.json())
session.set_expires_in(resp.json()['expires_in'])
session.access_token = resp.json()['access_token']
session.refresh_token = resp.json()['refresh_token']
return True
def check_user(self, session):
resp = requests.get('https://api.twitter.com/2/users/me',
headers={
'Authorization': f'Bearer {session.access_token}'
}
)
logger.info(resp.json())
if resp.json()['data']['id'] not in self.accept_users:
return False
return True
def refresh_token(self, session):
resp = requests.post('https://api.twitter.com/2/oauth2/token',
data={
'refresh_token': session.refresh_token,
'grant_type': 'refresh_token',
'client_id': self.client_id
}
)
logger.info(resp.json())
session.set_expires_in(resp.json()['expires_in'])
session.access_token = resp.json()['access_token']
session.refresh_token = resp.json()['refresh_token']
return True
|
[
"requests.get",
"requests.post",
"logging.getLogger"
] |
[((43, 70), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (60, 70), False, 'import logging\n'), ((1041, 1280), 'requests.post', 'requests.post', (['"""https://api.twitter.com/2/oauth2/token"""'], {'data': "{'code': args['code'], 'grant_type': 'authorization_code', 'client_id':\n self.client_id, 'redirect_uri': self.redirect_uri, 'code_verifier':\n session.code_verifier}"}), "('https://api.twitter.com/2/oauth2/token', data={'code': args[\n 'code'], 'grant_type': 'authorization_code', 'client_id': self.\n client_id, 'redirect_uri': self.redirect_uri, 'code_verifier': session.\n code_verifier})\n", (1054, 1280), False, 'import requests\n'), ((1666, 1781), 'requests.get', 'requests.get', (['"""https://api.twitter.com/2/users/me"""'], {'headers': "{'Authorization': f'Bearer {session.access_token}'}"}), "('https://api.twitter.com/2/users/me', headers={'Authorization':\n f'Bearer {session.access_token}'})\n", (1678, 1781), False, 'import requests\n'), ((2027, 2198), 'requests.post', 'requests.post', (['"""https://api.twitter.com/2/oauth2/token"""'], {'data': "{'refresh_token': session.refresh_token, 'grant_type': 'refresh_token',\n 'client_id': self.client_id}"}), "('https://api.twitter.com/2/oauth2/token', data={\n 'refresh_token': session.refresh_token, 'grant_type': 'refresh_token',\n 'client_id': self.client_id})\n", (2040, 2198), False, 'import requests\n')]
|
# safecracker
from safecracker import safecracker as sc
from safecracker.safe import zip as safe
from safecracker.tools import mask as sct
from examples import testing
# get safe
safe = safe.Safe("examples/safe/easy.zip")
# get safecracker
pwg = sct.PasswordGenerator("-d", max_len=3) # just digits
safecracker = sc.Safecracker(PasswordGenerator=pwg)
# crack
testing.test(safe, safecracker, ["hello world.txt"])
|
[
"examples.testing.test",
"safecracker.tools.mask.PasswordGenerator",
"safecracker.safecracker.Safecracker",
"safecracker.safe.zip.Safe"
] |
[((187, 222), 'safecracker.safe.zip.Safe', 'safe.Safe', (['"""examples/safe/easy.zip"""'], {}), "('examples/safe/easy.zip')\n", (196, 222), True, 'from safecracker.safe import zip as safe\n'), ((248, 286), 'safecracker.tools.mask.PasswordGenerator', 'sct.PasswordGenerator', (['"""-d"""'], {'max_len': '(3)'}), "('-d', max_len=3)\n", (269, 286), True, 'from safecracker.tools import mask as sct\n'), ((316, 353), 'safecracker.safecracker.Safecracker', 'sc.Safecracker', ([], {'PasswordGenerator': 'pwg'}), '(PasswordGenerator=pwg)\n', (330, 353), True, 'from safecracker import safecracker as sc\n'), ((363, 415), 'examples.testing.test', 'testing.test', (['safe', 'safecracker', "['hello world.txt']"], {}), "(safe, safecracker, ['hello world.txt'])\n", (375, 415), False, 'from examples import testing\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2020 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test Fraunhofer estimation.
"""
import numpy as np
from shabanipy.jj.fraunhofer.estimation import guess_current_distribution
def create_fraunhofer_like():
fields = np.linspace(-1, 1, 1001)
return fields, np.abs(np.sinc(8 * (fields - 0.1)))
def create_squid_like():
fields = np.linspace(-1, 1, 1001)
return (
fields,
2 + np.cos(8 * np.pi * (fields + 0.1)) * np.sinc(0.1 * (fields + 0.1)),
)
def validate_fraunhofer(offset, first_node, amplitude, c_dis):
np.testing.assert_almost_equal(offset, 0.1)
assert abs(first_node + 0.025) < 0.05
np.testing.assert_almost_equal(amplitude, 1.0)
np.testing.assert_array_equal(c_dis, np.ones(5) / 20)
def validate_squid(offset, first_node, amplitude, c_dis):
np.testing.assert_almost_equal(offset, -0.1)
assert abs(first_node - 0.025) < 0.05
np.testing.assert_almost_equal(amplitude, 3.0)
np.testing.assert_array_equal(c_dis, np.array([0.625, 0, 0, 0, 0.625]))
def test_guess_current_distribution_fraunhofer():
"""Test identifying a fraunhofer like pattern.
"""
fields, fraunhofer_like_ics = create_fraunhofer_like()
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, fraunhofer_like_ics, 5, 4
)
validate_fraunhofer(offsets, first_nodes, amplitudes, c_dis)
def test_guess_current_distribution_squid():
"""Test identifying a SQUID like pattern.
"""
fields, squid_like_ics = create_squid_like()
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, squid_like_ics, 5, 4
)
validate_squid(offsets, first_nodes, amplitudes, c_dis)
def test_guess_current_distribution_too_small_data():
"""Test handling data which do not comport enough points.
"""
fields = np.linspace(-1, 1, 201)
fraunhofer_like_ics = np.abs(np.sinc(2 * (fields - 0.1)))
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, fraunhofer_like_ics, 5, 4
)
np.testing.assert_almost_equal(offsets, 0.1)
assert amplitudes == 1.0
def test_2D_inputs():
"""Test that we can handle properly 2D inputs."""
fields_f, fraunhofer_like_ics = create_fraunhofer_like()
fields_s, squid_like_ics = create_squid_like()
# 2D inputs
fields = np.empty((2, len(fields_f)))
fields[0] = fields_f
fields[1] = fields_s
ics = np.empty_like(fields)
ics[0] = fraunhofer_like_ics
ics[1] = squid_like_ics
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, ics, 5, 4
)
for o, f, a, cd, validator in zip(
offsets, first_nodes, amplitudes, c_dis, (validate_fraunhofer, validate_squid)
):
validator(o, f, a, cd)
def test_3D_inputs():
"""Test that we can handle properly 3D inputs."""
fields_f, fraunhofer_like_ics = create_fraunhofer_like()
fields_s, squid_like_ics = create_squid_like()
# 3D inputs
fields = np.empty((2, 2, len(fields_f)))
fields[0, :] = fields_f
fields[1, :] = fields_s
ics = np.empty_like(fields)
ics[0, :] = fraunhofer_like_ics
ics[1, :] = squid_like_ics
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, ics, 5, 4
)
for o, f, a, cd, validator in zip(
offsets, first_nodes, amplitudes, c_dis, (validate_fraunhofer, validate_squid)
):
validator(o[0], f[0], a[0], cd[0])
validator(o[1], f[1], a[1], cd[1])
|
[
"numpy.testing.assert_almost_equal",
"shabanipy.jj.fraunhofer.estimation.guess_current_distribution",
"numpy.empty_like",
"numpy.ones",
"numpy.sinc",
"numpy.array",
"numpy.linspace",
"numpy.cos"
] |
[((556, 580), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1001)'], {}), '(-1, 1, 1001)\n', (567, 580), True, 'import numpy as np\n'), ((676, 700), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1001)'], {}), '(-1, 1, 1001)\n', (687, 700), True, 'import numpy as np\n'), ((885, 928), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['offset', '(0.1)'], {}), '(offset, 0.1)\n', (915, 928), True, 'import numpy as np\n'), ((975, 1021), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['amplitude', '(1.0)'], {}), '(amplitude, 1.0)\n', (1005, 1021), True, 'import numpy as np\n'), ((1144, 1188), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['offset', '(-0.1)'], {}), '(offset, -0.1)\n', (1174, 1188), True, 'import numpy as np\n'), ((1235, 1281), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['amplitude', '(3.0)'], {}), '(amplitude, 3.0)\n', (1265, 1281), True, 'import numpy as np\n'), ((1576, 1637), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'fraunhofer_like_ics', '(5)', '(4)'], {}), '(fields, fraunhofer_like_ics, 5, 4)\n', (1602, 1637), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((1916, 1972), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'squid_like_ics', '(5)', '(4)'], {}), '(fields, squid_like_ics, 5, 4)\n', (1942, 1972), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((2188, 2211), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(201)'], {}), '(-1, 1, 201)\n', (2199, 2211), True, 'import numpy as np\n'), ((2321, 2382), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'fraunhofer_like_ics', '(5)', '(4)'], {}), '(fields, fraunhofer_like_ics, 5, 4)\n', (2347, 2382), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((2401, 2445), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['offsets', '(0.1)'], {}), '(offsets, 0.1)\n', (2431, 2445), True, 'import numpy as np\n'), ((2785, 2806), 'numpy.empty_like', 'np.empty_like', (['fields'], {}), '(fields)\n', (2798, 2806), True, 'import numpy as np\n'), ((2915, 2960), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'ics', '(5)', '(4)'], {}), '(fields, ics, 5, 4)\n', (2941, 2960), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((3459, 3480), 'numpy.empty_like', 'np.empty_like', (['fields'], {}), '(fields)\n', (3472, 3480), True, 'import numpy as np\n'), ((3595, 3640), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'ics', '(5)', '(4)'], {}), '(fields, ics, 5, 4)\n', (3621, 3640), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((1323, 1356), 'numpy.array', 'np.array', (['[0.625, 0, 0, 0, 0.625]'], {}), '([0.625, 0, 0, 0, 0.625])\n', (1331, 1356), True, 'import numpy as np\n'), ((2245, 2272), 'numpy.sinc', 'np.sinc', (['(2 * (fields - 0.1))'], {}), '(2 * (fields - 0.1))\n', (2252, 2272), True, 'import numpy as np\n'), ((607, 634), 'numpy.sinc', 'np.sinc', (['(8 * (fields - 0.1))'], {}), '(8 * (fields - 0.1))\n', (614, 634), True, 'import numpy as np\n'), ((1063, 1073), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1070, 1073), True, 'import numpy as np\n'), ((742, 776), 'numpy.cos', 'np.cos', (['(8 * np.pi * (fields + 0.1))'], {}), '(8 * np.pi * (fields + 0.1))\n', (748, 776), True, 'import numpy as np\n'), ((779, 808), 'numpy.sinc', 'np.sinc', (['(0.1 * (fields + 0.1))'], {}), '(0.1 * (fields + 0.1))\n', (786, 808), True, 'import numpy as np\n')]
|
import re
from collections import defaultdict
from django.db.models import OneToOneRel
from django.core.exceptions import ValidationError
from django.conf import settings
from django.db import IntegrityError
from peeringdb import resource
import peeringdb_server.models as models
from django_peeringdb.client_adaptor.backend import (
Backend as BaseBackend,
reftag_to_cls,
)
__version__ = "1.0"
class Backend(BaseBackend):
"""
Custom tailored peeringdb_server backend for the
peeringdb client we can use to sync data from
another peeringdb server instance.
We can inherit most of the official django-peeringdb
Backend, however we need bind resources to the peeringdb
models and fix some issues with validation and relationships.
"""
# map peeringdb_server models to peeringdb client resources
RESOURCE_MAP = {
resource.Facility: models.Facility,
resource.InternetExchange: models.InternetExchange,
resource.InternetExchangeFacility: models.InternetExchangeFacility,
resource.InternetExchangeLan: models.IXLan,
resource.InternetExchangeLanPrefix: models.IXLanPrefix,
resource.Network: models.Network,
resource.NetworkContact: models.NetworkContact,
resource.NetworkFacility: models.NetworkFacility,
resource.NetworkIXLan: models.NetworkIXLan,
resource.Organization: models.Organization,
}
def get_resource(self, cls):
"""
Override this so it doesn't hard fail on a non
existing resource. As sync will try to obtain resources
for relationships in peeringdb_server that aren't
really resources (sponsorships, partnerships etc.)
"""
return self.CONCRETE_MAP.get(cls)
@reftag_to_cls
def get_fields(self, concrete):
"""
Sync currently doesnt support OneToOne relationships
and none of the ones that exist in peeringdb_server
are relevant to the data we want to sync.
However they still get processed, causing errors.
Here we make sure to not process OneToOneRel relationships
"""
_fields = super(Backend, self).get_fields(concrete)
fields = []
for field in _fields:
if isinstance(field, OneToOneRel):
continue
fields.append(field)
return fields
def set_relation_many_to_many(self, obj, field_name, objs):
"""
Sync will try to process sponsorship_set off of `org`
and run into an error, so we make sure to ignore it
when handling many 2 many relationships during sync
"""
if field_name in ["sponsorship_set"]:
return
return super(Backend, self).set_relation_many_to_many(obj, field_name, objs)
def clean(self, obj):
"""
We override the object validation here to handle
common validation issues that exist in the official production
db, where valdiators are set, but data has not yet been
fixed retroactively.
These instances are:
- info_prefixes4 on networks (adjust data)
- info_prefixes6 on networks (adjust data)
- overlapping prefixes on ixlan prefixes (skip validation)
- invalid prefix length on ixlan prefixes (skip validation)
- ipaddr4 out of prefix address space on netixlans (skip validation)
- ipaddr6 out of prefix address space on netixlans (skip validation)
"""
if isinstance(obj, models.Network):
obj.info_prefixes4 = min(
obj.info_prefixes4, settings.DATA_QUALITY_MAX_PREFIX_V4_LIMIT
)
obj.info_prefixes6 = min(
obj.info_prefixes6, settings.DATA_QUALITY_MAX_PREFIX_V6_LIMIT
)
obj.clean_fields()
obj.validate_unique()
if not isinstance(
obj, (models.IXLanPrefix, models.NetworkIXLan, models.NetworkFacility)
):
obj.clean()
def save(self, obj):
if obj.HandleRef.tag == "ix":
obj.save(create_ixlan=False)
else:
obj.save()
def detect_uniqueness_error(self, exc):
"""
Parse error, and if it describes any violations of a uniqueness constraint,
return the corresponding fields, else None
"""
pattern = r"(\w+) with this (\w+) already exists"
fields = []
if isinstance(exc, IntegrityError):
return self._detect_integrity_error(exc)
assert isinstance(exc, ValidationError), TypeError
error_dict = getattr(exc, "error_dict", getattr(exc, "message_dict", {}))
for name, err in error_dict.items():
if re.search(pattern, str(err)):
fields.append(name)
return fields or None
def detect_missing_relations(self, obj, exc):
"""
Parse error messages and collect the missing-relationship errors
as a dict of Resource -> {id set}
"""
missing = defaultdict(set)
error_dict = getattr(exc, "error_dict", getattr(exc, "message_dict", {}))
for name, err in error_dict.items():
# check if it was a relationship that doesnt exist locally
pattern = r".+ with id (\d+) does not exist.+"
m = re.match(pattern, str(err))
if m:
field = obj._meta.get_field(name)
res = self.get_resource(field.related_model)
missing[res].add(int(m.group(1)))
return missing
|
[
"collections.defaultdict"
] |
[((5039, 5055), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (5050, 5055), False, 'from collections import defaultdict\n')]
|
# Copyright (C) 2013-2014 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains PrioritizingThreadPool, an implementation of an
interface similar to that of concurrent.futures.Executor. See:
https://docs.python.org/dev/library/concurrent.futures.html
"""
import collections
import concurrent.futures
import threading
# Monkeypatch ThreadPoolExecutor with relevant logic from the patch for
# Python issue 16284. See:
#
# <http://bugs.python.org/issue16284>
# <http://hg.python.org/cpython/rev/70cef0a160cf/>
#
# We may need to apply the relevant parts of the patches to
# ProcessPoolExecutor and multiprocessing.Queue if we ever start using
# those, too.
def _non_leaky_worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if concurrent.futures.thread._shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
concurrent.futures.thread._base.LOGGER.critical('Exception in worker', exc_info=True)
def _chain_result(outer_future):
"""Returns a callable that can be supplied to Future.add_done_callback
to propagate a future's result to outer_future.
"""
def f(inner_future):
try:
result = inner_future.result()
except BaseException as e:
outer_future.set_exception(e)
else:
outer_future.set_result(result)
return f
concurrent.futures.thread._worker = _non_leaky_worker
def _run_callable_with_postamble(postamble, callable_, *args, **kwargs):
"""Returns a callable of no args that invokes callable_ (with the
specified args and kwargs) and then invokes postamble (with no
args).
The callable returns the result of (or exception thrown by)
callable_.
"""
def fn():
try:
return callable_(*args, **kwargs)
finally:
postamble()
return fn
class PrioritizingThreadPool(object):
"""Presents an abstraction similar to that of
concurrent.futures.Executor except that multiple clients may write
their tasks to separate queues (which may be distinguished by any
hashable object). Tasks are handled by different threads (in the
same process) simultaneously. The tasks in each queue are processed
in order; tasks written to different queues are processed as
follows:
When a task is submitted using submit_to_queue the client may
specify a priority_fn to go along with that task. Each time a worker
thread is ready to start a task, the priority_fn of each candidate
task (the head of each queue) is called, and the task that returns
the lowest value is chosen. (This is more generic than a priority
queue in that the priority value of each task is not a static value
that must be submitted at the time that the task is enqueued.)
When a task is enqueued, we return a Future for the result of that
task.
"""
def __init__(self, max_workers):
self._pool = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
self._tasks = threading.Semaphore(max_workers)
self._queue_lock = threading.Lock()
# Invariant: self._queues is a mapping of queue_id to a NONEMPTY
# list of Futures representing yet-unscheduled items in that
# queue. (This invariant may only be violated by threads that
# are holding _queue_lock.)
#
# Each Future is the future we gave to the client, augmented
# with:
# (1) a field "args" containing a tuple
# (callable, args, kwargs), and
# (2) a field "priority_fn" with the priority function for that
# task.
self._queues = {}
def _submit_one(self, callable_, *args, **kwargs):
"""Starts the next task (which, when complete, will, in turn, start one
more task when finished, which will, in turn, etc.). Returns a
future object corresponding to the newly started task.
Thread safety note: assumes that the caller has already reserved
a worker using self._tasks.
"""
def postamble():
self._tasks.release()
self._maybe_schedule_task()
return self._pool.submit(_run_callable_with_postamble(postamble, callable_, *args, **kwargs))
def _maybe_schedule_task(self):
"""Starts a task if there is an available worker to serve it.
Thread safe.
"""
if self._tasks.acquire(blocking=False):
# Atomically remove the item from the queue and feed it to
# the ThreadPoolExecutor.
self._queue_lock.acquire()
try:
outer_future = self._next()
except StopIteration:
# Oops, there is in fact no task to be served, so we
# won't be tying up a worker after all.
self._tasks.release()
else:
callable_, args, kwargs = outer_future.args
inner_future = self._submit_one(callable_, *args, **kwargs)
# Now that we have the real future (inner_future), chain
# its result to what we provided to our client
inner_future.add_done_callback(_chain_result(outer_future))
finally:
self._queue_lock.release()
def _next(self):
"""Pop the highest priority task.
Returns the Future corresponding to that task (and removes it
from the queue of items to be scheduled), or raises
StopIteration if no tasks are available.
Thread safety note: assumes the caller is holding
self._queue_lock (the caller will probably also want to hold the
same lock while scheduling the result of this method, so as to
make the pop+schedule operation atomic).
"""
if self._queue_lock.acquire(False):
raise AssertionError('Expected _queue_lock to be held here')
queue_ids = list(self._queues.keys())
if not queue_ids:
raise StopIteration()
# Find the queue whose head item has the lowest priority value
best_queue_id = None
best_priority_value = None
for candidate_queue_id in queue_ids:
selected_queue = self._queues[candidate_queue_id]
if not len(selected_queue):
raise AssertionError('Invariant violation: queue %r is empty' % (candidate_queue_id,))
head_of_queue = selected_queue[0]
priority_value = head_of_queue.priority_fn() if head_of_queue.priority_fn else 0
if best_queue_id is None or priority_value < best_priority_value:
best_queue_id = candidate_queue_id
best_priority_value = priority_value
queue_id = best_queue_id
assert queue_id is not None
next_task = self._queues[queue_id].popleft()
if len(self._queues[queue_id]) == 0:
del self._queues[queue_id]
return next_task
def submit(self, callable_, *args, **kwargs):
"""For compatibility with code that was previously using
ThreadPoolExecutor directly, provides a similar interface to the
submit method of that class.
Requests submitted in this way have a priority of 0 and go into
a single default queue.
Returns a Future corresponding to the specified task.
"""
return self.submit_to_queue('', None, callable_, *args, **kwargs)
def submit_to_queue(self, queue_id, priority_fn, callable_, *args, **kwargs):
"""Adds a new task to the end of the specified queue.
Returns a Future corresponding to the specified task.
:param queue_id: indicates which queue this request should go at
the end of
:param priority_fn: a function of no args. Whenever a worker is
available, the task whose priority_fn returns the lowest value
is selected. None may also be provided in which case the
priority_fn is considered to return 0.
"""
if queue_id is None:
# In _next, None is used as a sentinel value
raise AssertionError('queue_id may not be None')
outer_future = concurrent.futures._base.Future()
outer_future.priority_fn = priority_fn
outer_future.args = (callable_, args, kwargs)
with self._queue_lock:
if queue_id not in self._queues:
self._queues[queue_id] = collections.deque()
self._queues[queue_id].append(outer_future)
# Start the task now if there is a worker that can serve it.
self._maybe_schedule_task()
return outer_future
|
[
"threading.Lock",
"threading.Semaphore",
"collections.deque"
] |
[((4220, 4252), 'threading.Semaphore', 'threading.Semaphore', (['max_workers'], {}), '(max_workers)\n', (4239, 4252), False, 'import threading\n'), ((4280, 4296), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4294, 4296), False, 'import threading\n'), ((9615, 9634), 'collections.deque', 'collections.deque', ([], {}), '()\n', (9632, 9634), False, 'import collections\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ecstasy
import oauth2client.file
import pyperclip
import pytest
import requests
import threading
try:
from Queue import Queue
except ImportError:
from queue import Queue
from collections import namedtuple
import tests.paths
import lnk.googl.link
VERSION = 1
KEY = '<KEY>'
API = 'https://www.googleapis.com/urlshortener'
LOCK = threading.Lock()
QUEUE = Queue()
def shorten(url='http://python.org'):
headers = {'content-type': 'application/json'}
data = '{{"longUrl": "{0}"}}'.format(url)
params = dict(key=KEY)
response = requests.post('{0}/v{1}/url'.format(API, VERSION),
headers=headers,
data=data,
params=params)
return response.json()['id']
def shorten_fmt(destination):
url = QUEUE.get()
short = shorten(url)
formatted = '{0} => {1}'.format(url, short)
LOCK.acquire()
destination.append(formatted)
LOCK.release()
def expand(url='http://goo.gl/Euc5', version=VERSION):
response = requests.get('{0}/v{1}/url'.format(API, version),
params=dict(shortUrl=url, key=KEY))
return response.json()['longUrl']
def expand_fmt(destination):
url = QUEUE.get()
expanded = expand(url)
formatted = '{0} => {1}'.format(url, expanded)
LOCK.acquire()
destination.append(formatted)
LOCK.release()
@pytest.fixture(scope='module')
def fixture():
Fixture = namedtuple('Fixture', [
'link',
'long',
'short',
'bold',
'long_to_short',
'short_to_long'
])
link = lnk.googl.link.Link(raw=True)
link.credentials = oauth2client.file.Storage(tests.paths.CREDENTIALS_PATH)
url = 'https://www.github.com/goldsborough/lnk'
short = shorten(url)
bold = ecstasy.beautify('<{0}>'.format(short), ecstasy.Style.Bold)
long_to_short = '{0} => {1}'.format(url, short)
short_to_long = '{0} => {1}'.format(short, url)
return Fixture(link, url, short, bold, long_to_short, short_to_long)
def test_copy_copies_to_clipboard_if_copy_true(fixture):
fixture.link.copy(True, fixture.short)
assert pyperclip.paste() == fixture.short
def test_copy_copies_only_first_url(fixture):
assert fixture.link.already_copied
fixture.link.copy(True, 'a')
fixture.link.copy(True, 'b')
fixture.link.copy(True, 'c')
assert pyperclip.paste() == fixture.short
def test_copy_copies_to_clipboard_if_copy_false(fixture):
pyperclip.copy('original')
fixture.link.copy(False, fixture.short)
assert pyperclip.paste() == 'original'
def test_copy_makes_copied_url_bold(fixture):
fixture.link.already_copied = False
returned_url = fixture.link.copy(True, fixture.short)
assert returned_url == fixture.bold
def test_get_long_expands_well(fixture):
result = fixture.link.get_long(fixture.short)
assert result == fixture.long
def test_get_short_shortens_well(fixture):
short = fixture.link.get_short(fixture.long)
result = fixture.link.get_long(short)
assert result == fixture.long
def test_shorten_formats_well(fixture):
result = []
fixture.link.queue.put(fixture.long)
fixture.link.shorten(result, False)
result = result[0].split()
assert result[0] == fixture.long
assert result[1] == '=>'
assert result[2].startswith('https://goo.gl/')
def test_expand_formats_well(fixture):
result = []
fixture.link.queue.put(fixture.short)
fixture.link.expand(result, False)
assert result[0] == fixture.short_to_long
def test_shorten_urls_works_for_single_url(fixture):
result = fixture.link.shorten_urls(False, True, [fixture.long])
result = result[0].split()
assert result[0] == fixture.long
assert result[1] == '=>'
expanded = fixture.link.get_long(result[2])
assert expanded == result[0]
def test_shorten_urls_works_for_many_urls(fixture):
urls = [
'http://facebook.com/',
'http://google.com/',
'http://python.org/'
]
result = fixture.link.shorten_urls(False, True, urls)
expected = []
threads = []
for url in urls:
QUEUE.put(url)
thread = threading.Thread(target=shorten_fmt, args=(expected,))
thread.daemon = True
threads.append(thread)
thread.start()
for thread in threads:
thread.join(timeout=10)
print(sorted(result), sorted(expected))
for got, wanted in zip(sorted(result), sorted(expected)):
got = got.split()
wanted = wanted.split()
assert got[0] == wanted[0]
assert got[1] == wanted[1] == '=>'
expanded = fixture.link.get_long(got[2])
assert expanded == got[0] == wanted[0]
def test_expand_urls_works_for_single_url(fixture):
result = fixture.link.expand_urls(False, [fixture.short])
assert result[0] == fixture.short_to_long
def test_expand_urls_works_for_many_urls(fixture):
urls = [
'http://goo.gl/Up0wrT',
'http://goo.gl/4Nuepy',
'http://goo.gl/bYm2EP'
]
result = fixture.link.expand_urls(False, urls)
expected = []
threads = []
for url in urls:
QUEUE.put(url)
thread = threading.Thread(target=expand_fmt, args=(expected,))
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join(timeout=10)
print(result, expected)
assert sorted(result) == sorted(expected)
def test_shorten_urls_warns_about_url_without_protocol(fixture, capsys):
fixture.link.shorten_urls(False, False, ['google.com'])
out = capsys.readouterr()
assert out
assert out[0].startswith("\aWarning: Prepending 'http://' to")
def test_fetch_works(fixture):
result = fixture.link.fetch(False,
True,
[fixture.short],
[fixture.long],
False)
expected = [fixture.long_to_short, fixture.short_to_long]
for got, wanted in zip(sorted(result), sorted(expected)):
got = got.split()
wanted = wanted.split()
assert got[0] == wanted[0]
assert got[1] == wanted[1] == '=>'
if got[0] == fixture.long:
expanded = fixture.link.get_long(got[2])
assert expanded == got[0] == wanted[0]
def test_fetch_correct_output_if_raw_false_pretty_false(fixture):
fixture.link.raw = False
result = fixture.link.fetch(False,
True,
[fixture.short],
[fixture.long],
False)
expected = '\n'.join([fixture.short_to_long, fixture.long_to_short])
return result == expected
|
[
"threading.Thread",
"pyperclip.paste",
"pytest.fixture",
"threading.Lock",
"collections.namedtuple",
"pyperclip.copy",
"queue.Queue"
] |
[((388, 404), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (402, 404), False, 'import threading\n'), ((413, 420), 'queue.Queue', 'Queue', ([], {}), '()\n', (418, 420), False, 'from queue import Queue\n'), ((1302, 1332), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1316, 1332), False, 'import pytest\n'), ((1359, 1453), 'collections.namedtuple', 'namedtuple', (['"""Fixture"""', "['link', 'long', 'short', 'bold', 'long_to_short', 'short_to_long']"], {}), "('Fixture', ['link', 'long', 'short', 'bold', 'long_to_short',\n 'short_to_long'])\n", (1369, 1453), False, 'from collections import namedtuple\n'), ((2312, 2338), 'pyperclip.copy', 'pyperclip.copy', (['"""original"""'], {}), "('original')\n", (2326, 2338), False, 'import pyperclip\n'), ((1997, 2014), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (2012, 2014), False, 'import pyperclip\n'), ((2216, 2233), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (2231, 2233), False, 'import pyperclip\n'), ((2389, 2406), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (2404, 2406), False, 'import pyperclip\n'), ((3879, 3933), 'threading.Thread', 'threading.Thread', ([], {'target': 'shorten_fmt', 'args': '(expected,)'}), '(target=shorten_fmt, args=(expected,))\n', (3895, 3933), False, 'import threading\n'), ((4773, 4826), 'threading.Thread', 'threading.Thread', ([], {'target': 'expand_fmt', 'args': '(expected,)'}), '(target=expand_fmt, args=(expected,))\n', (4789, 4826), False, 'import threading\n')]
|
import string
import random
from logging import Logger
from pyspark.sql.session import SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StructType
from datalakebundle.table.upsert.UpsertQueryCreator import UpsertQueryCreator
from datalakebundle.delta.DeltaStorage import DeltaStorage
class DataWriter:
def __init__(
self,
logger: Logger,
spark: SparkSession,
delta_storage: DeltaStorage,
upsert_query_creator: UpsertQueryCreator,
):
self.__logger = logger
self.__spark = spark
self.__delta_storage = delta_storage
self.__upsert_query_creator = upsert_query_creator
def append(self, df: DataFrame, full_table_name: str, schema: StructType, options: dict):
# insertInto() requires dataframe columns order to match schema columns order
df.select([field.name for field in schema.fields]).write.options(**options).insertInto(full_table_name, overwrite=False)
def overwrite(self, df: DataFrame, full_table_name: str, partition_by: list, options: dict):
self.__delta_storage.overwrite_data(df, full_table_name, partition_by, options)
def upsert(self, df: DataFrame, full_table_name: str, schema: StructType, primary_key: list):
temp_source_table = (
f"upsert_{full_table_name.replace('.', '__')}_{''.join(random.choice(string.ascii_lowercase) for _ in range(6))}"
)
df.createOrReplaceTempView(temp_source_table)
upsert_sql_statement = self.__upsert_query_creator.create(full_table_name, schema, primary_key, temp_source_table)
try:
self.__spark.sql(upsert_sql_statement)
except BaseException:
raise
finally:
self.__spark.catalog.dropTempView(temp_source_table)
|
[
"random.choice"
] |
[((1377, 1414), 'random.choice', 'random.choice', (['string.ascii_lowercase'], {}), '(string.ascii_lowercase)\n', (1390, 1414), False, 'import random\n')]
|
"""
Author :
<NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from keras import backend as K
from tqdm.keras import TqdmCallback
from scipy.stats import spearmanr
from tensorflow.keras import Input
from tensorflow.keras import optimizers
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.models import Model
from statistics import mean
from sklearn.utils import shuffle
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
import pandas as pd
import datetime
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau ,Callback,TensorBoard
from keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras import applications
import PIL
from keras.activations import softmax,sigmoid
import h5py
from PIL import Image
from keras.layers import Layer
from scipy.stats import spearmanr,pearsonr
import sklearn
import tensorflow as tf
from tensorflow.keras.layers import MaxPooling2D ,Dense,Concatenate ,Dropout ,Input,concatenate,Conv2D,Reshape,GlobalMaxPooling2D,Flatten,GlobalAveragePooling2D,AveragePooling2D,Lambda,MaxPooling2D,TimeDistributed, Bidirectional, LSTM
import argparse
import random
from tqdm import tqdm
tf.keras.backend.clear_session()
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
#os.environ['CUDA_VISIBLE_DEVICES']=""
def data_generator(data,batch_size=16):
num_samples = len(data)
random.shuffle(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
y_train[i,:] = y_train[i,:]
yield X_train, y_train
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
'''
def data_generator_1(data,batch_size=4):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield X_train
def data_generator_2(data,batch_size=1):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield y_train
'''
def build_model(batch_shape, model_final):
model = models.Sequential()
model.add(TimeDistributed(model_final,input_shape = batch_shape))
model.add(Bidirectional(LSTM(64,return_sequences=True,kernel_initializer='random_normal',
recurrent_initializer='random_normal',
dropout=0.4,recurrent_dropout=0)))
model.add(Bidirectional(LSTM(64,return_sequences=True,
kernel_initializer='random_normal',
recurrent_initializer='random_normal', dropout=0.4,recurrent_dropout=0)))
model.add(Flatten())
model.add(Dense(256,activation='relu', kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001)))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(1))
model.add(layers.Activation('linear'))
model.compile(optimizer=optimizers.Adam(),loss='mse',metrics=['mae'])
model.summary()
return model
def data_prepare():
x = os.listdir('features_X')
li = []
for i in range(len(x)):
tem = []
x_f = './features_X/' + x[i]
y_f = './features_y/' + x[i]
tem.append(x_f)
tem.append(y_f)
li.append(tem)
li.sort()
return (li)
if __name__ == '__main__':
parser = argparse.ArgumentParser("End2End_train")
parser.add_argument('-nf',
'--num_frames',
default=30,
type=int,
help='Number of cropped frames per video.'
)
parser.add_argument('-m',
'--pretrained_model',
default='/models/res-bi-sp_koniq.h5',
type=str,
help='path to pretrained spatial pooling module.'
)
parser.add_argument('-b',
'--batch_size',
default=16,
type=int,
help='batch_size.'
)
if not os.path.exists('./models'):
os.makedirs('./models')
args = parser.parse_args()
md = ModelCheckpoint(filepath='./models/trained_model.h5',monitor='val_loss', mode='min',save_weights_only=True,save_best_only=True,verbose=1)
rd = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=20,min_lr=1e-7, verbose=2, mode='min')
ear = EarlyStopping(monitor='val_loss',mode ='min', patience=80, verbose=2,restore_best_weights=False)
callbacks_k = [md,rd,TqdmCallback(verbose=2),ear]
li = data_prepare()
li.sort()
num_patch = 25
nb = args.num_frames
batch_size = args.batch_size
sp_pretrained = args.pretrained_model
sep = int(len(li)/5)
train_l = li[0:sep*4]
test_l = li[sep*4:]
train_gen = data_generator(train_l,batch_size= batch_size)
val_gen = data_generator(test_l,batch_size= batch_size)
In = Input((nb,num_patch,2048))
model = load_model(sp_pretrained)
for layer in model.layers:
layer.trainable = True
model_final = Model(inputs=model.input,outputs=model.layers[-3].output )
model = build_model((nb,num_patch,2048), model_final)
history = model.fit_generator(train_gen,steps_per_epoch = int(len(train_l)/ batch_size),
epochs=200,validation_data=val_gen,validation_steps =
int(len(test_l)/batch_size) ,verbose=0,callbacks=callbacks_k)
|
[
"keras.models.load_model",
"numpy.load",
"numpy.abs",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"random.shuffle",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"os.path.exists",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"keras.callbacks.ReduceLROnPlateau",
"numpy.divide",
"tensorflow.keras.layers.Dropout",
"keras.callbacks.ModelCheckpoint",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.models.Model",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.layers.TimeDistributed",
"os.listdir",
"os.makedirs",
"numpy.zeros",
"keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.LSTM",
"tqdm.keras.TqdmCallback"
] |
[((1330, 1362), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (1360, 1362), True, 'import tensorflow as tf\n'), ((1559, 1579), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (1573, 1579), False, 'import random\n'), ((3628, 3647), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (3645, 3647), False, 'from tensorflow.keras import models\n'), ((4530, 4554), 'os.listdir', 'os.listdir', (['"""features_X"""'], {}), "('features_X')\n", (4540, 4554), False, 'import os\n'), ((4781, 4821), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""End2End_train"""'], {}), "('End2End_train')\n", (4804, 4821), False, 'import argparse\n'), ((5380, 5525), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""./models/trained_model.h5"""', 'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_weights_only': '(True)', 'save_best_only': '(True)', 'verbose': '(1)'}), "(filepath='./models/trained_model.h5', monitor='val_loss',\n mode='min', save_weights_only=True, save_best_only=True, verbose=1)\n", (5395, 5525), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, TensorBoard\n'), ((5524, 5627), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(20)', 'min_lr': '(1e-07)', 'verbose': '(2)', 'mode': '"""min"""'}), "(monitor='val_loss', factor=0.5, patience=20, min_lr=1e-07,\n verbose=2, mode='min')\n", (5541, 5627), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, TensorBoard\n'), ((5629, 5730), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'patience': '(80)', 'verbose': '(2)', 'restore_best_weights': '(False)'}), "(monitor='val_loss', mode='min', patience=80, verbose=2,\n restore_best_weights=False)\n", (5642, 5730), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, TensorBoard\n'), ((6105, 6133), 'tensorflow.keras.layers.Input', 'Input', (['(nb, num_patch, 2048)'], {}), '((nb, num_patch, 2048))\n', (6110, 6133), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((6141, 6166), 'keras.models.load_model', 'load_model', (['sp_pretrained'], {}), '(sp_pretrained)\n', (6151, 6166), False, 'from keras.models import load_model\n'), ((6235, 6293), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': 'model.layers[-3].output'}), '(inputs=model.input, outputs=model.layers[-3].output)\n', (6240, 6293), False, 'from tensorflow.keras.models import Model\n'), ((2329, 2369), 'numpy.divide', 'np.divide', (['(bayta1 - bayta2)', 'logisticPart'], {}), '(bayta1 - bayta2, logisticPart)\n', (2338, 2369), True, 'import numpy as np\n'), ((3660, 3713), 'tensorflow.keras.layers.TimeDistributed', 'TimeDistributed', (['model_final'], {'input_shape': 'batch_shape'}), '(model_final, input_shape=batch_shape)\n', (3675, 3713), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((4166, 4175), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4173, 4175), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((4301, 4325), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (4315, 4325), False, 'from tensorflow.keras import layers\n'), ((4340, 4355), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (4352, 4355), False, 'from tensorflow.keras import layers\n'), ((4369, 4396), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""linear"""'], {}), "('linear')\n", (4386, 4396), False, 'from tensorflow.keras import layers\n'), ((5290, 5316), 'os.path.exists', 'os.path.exists', (['"""./models"""'], {}), "('./models')\n", (5304, 5316), False, 'import os\n'), ((5320, 5343), 'os.makedirs', 'os.makedirs', (['"""./models"""'], {}), "('./models')\n", (5331, 5343), False, 'import os\n'), ((5748, 5771), 'tqdm.keras.TqdmCallback', 'TqdmCallback', ([], {'verbose': '(2)'}), '(verbose=2)\n', (5760, 5771), False, 'from tqdm.keras import TqdmCallback\n'), ((1803, 1839), 'numpy.zeros', 'np.zeros', (['(batch_size, 30, 25, 2560)'], {}), '((batch_size, 30, 25, 2560))\n', (1811, 1839), True, 'import numpy as np\n'), ((1860, 1885), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (1868, 1885), True, 'import numpy as np\n'), ((3744, 3888), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)', 'kernel_initializer': '"""random_normal"""', 'recurrent_initializer': '"""random_normal"""', 'dropout': '(0.4)', 'recurrent_dropout': '(0)'}), "(64, return_sequences=True, kernel_initializer='random_normal',\n recurrent_initializer='random_normal', dropout=0.4, recurrent_dropout=0)\n", (3748, 3888), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((3946, 4090), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)', 'kernel_initializer': '"""random_normal"""', 'recurrent_initializer': '"""random_normal"""', 'dropout': '(0.4)', 'recurrent_dropout': '(0)'}), "(64, return_sequences=True, kernel_initializer='random_normal',\n recurrent_initializer='random_normal', dropout=0.4, recurrent_dropout=0)\n", (3950, 4090), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((4425, 4442), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (4440, 4442), False, 'from tensorflow.keras import optimizers\n'), ((1958, 1986), 'numpy.load', 'np.load', (['batch_samples[i][0]'], {}), '(batch_samples[i][0])\n', (1965, 1986), True, 'import numpy as np\n'), ((2016, 2044), 'numpy.load', 'np.load', (['batch_samples[i][1]'], {}), '(batch_samples[i][1])\n', (2023, 2044), True, 'import numpy as np\n'), ((4238, 4286), 'tensorflow.keras.initializers.RandomNormal', 'tf.keras.initializers.RandomNormal', ([], {'stddev': '(0.001)'}), '(stddev=0.001)\n', (4272, 4286), True, 'import tensorflow as tf\n'), ((2293, 2307), 'numpy.abs', 'np.abs', (['bayta4'], {}), '(bayta4)\n', (2299, 2307), True, 'import numpy as np\n')]
|
import logging
import json
import requests
import time
import websocket
from platypush.config import Config
from platypush.message import Message
from platypush.message.event.pushbullet import PushbulletEvent
from .. import Backend
class PushbulletBackend(Backend):
def __init__(self, token, device, **kwargs):
super().__init__(**kwargs)
self.token = token
self.device_name = device
self.pb_device_id = self.get_device_id()
self._last_received_msg = {
'request' : { 'body': None, 'time': None },
'response' : { 'body': None, 'time': None },
'event' : { 'body': None, 'time': None },
}
def _get_latest_push(self):
t = int(time.time()) - 5
try:
response = requests.get(
u'https://api.pushbullet.com/v2/pushes',
headers = { 'Access-Token': self.token },
params = {
'modified_after': str(t),
'active' : 'true',
'limit' : 1,
}
)
response = response.json()
except Exception as e:
logging.exception(e)
raise e
if 'pushes' in response and response['pushes']:
return response['pushes'][0]
else:
return {}
def _should_skip_last_received_msg(self, msg):
if not isinstance(msg, dict): return True # We received something weird
is_duplicate=False
last_msg = self._last_received_msg[msg['type']]
if last_msg:
msg = Message.parse(msg)
if str(msg) == str(last_msg['body']) \
and time.time() - last_msg['time'] <= 2:
# Duplicate message sent on the Pushbullet socket within
# two seconds, ignore it
logging.debug('Ignoring duplicate message received on the socket')
is_duplicate = True
self._last_received_msg[msg['type']] = {
'body': msg, 'time': time.time()
}
return is_duplicate
def on_push(self):
def _f(ws, data):
try:
# Parse the push
try:
data = json.loads(data) if isinstance(data, str) else push
except Exception as e:
logging.exception(e)
return
# If it's a push, get it
if data['type'] == 'tickle' and data['subtype'] == 'push':
push = self._get_latest_push()
elif data['type'] == 'push':
push = data['push']
else: return # Not a push notification
# Post an event, useful to react on mobile notifications if
# you enabled notification mirroring on your PushBullet app
event = PushbulletEvent(**push)
self.on_message(event)
if 'body' not in push: return
logging.debug('Received push: {}'.format(push))
body = push['body']
try: body = json.loads(body)
except ValueError as e: return # Some other non-JSON push
if not self._should_skip_last_received_msg(body):
self.on_message(body)
except Exception as e:
logging.exception(e)
return
return _f
def on_error(self):
def _f(ws, e):
logging.exception(e)
logging.info('Restarting PushBullet backend')
ws.close()
self._init_socket()
return _f
def _init_socket(self):
self.ws = websocket.WebSocketApp(
'wss://stream.pushbullet.com/websocket/' + self.token,
on_message = self.on_push(),
on_error = self.on_error())
def get_device_id(self):
response = requests.get(
u'https://api.pushbullet.com/v2/devices',
headers = { 'Access-Token': self.token },
).json()
devices = [dev for dev in response['devices'] if 'nickname' in dev
and dev['nickname'] == self.device_name]
if not devices:
raise RuntimeError('No such Pushbullet device: {}'
.format(self.device_name))
return devices[0]['iden']
def send_message(self, msg):
requests.post(
u'https://api.pushbullet.com/v2/pushes',
headers = { 'Access-Token': self.token },
json = {
'type': 'note',
'device_iden': self.pb_device_id,
'body': str(msg)
}
).json()
def on_stop(self):
self.ws.close()
def run(self):
super().run()
self._init_socket()
logging.info('Initialized Pushbullet backend - device_id: {}'
.format(self.device_name))
self.ws.run_forever()
# vim:sw=4:ts=4:et:
|
[
"logging.exception",
"logging.debug",
"json.loads",
"platypush.message.event.pushbullet.PushbulletEvent",
"platypush.message.Message.parse",
"time.time",
"logging.info",
"requests.get"
] |
[((1607, 1625), 'platypush.message.Message.parse', 'Message.parse', (['msg'], {}), '(msg)\n', (1620, 1625), False, 'from platypush.message import Message\n'), ((2054, 2065), 'time.time', 'time.time', ([], {}), '()\n', (2063, 2065), False, 'import time\n'), ((3512, 3532), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (3529, 3532), False, 'import logging\n'), ((3545, 3590), 'logging.info', 'logging.info', (['"""Restarting PushBullet backend"""'], {}), "('Restarting PushBullet backend')\n", (3557, 3590), False, 'import logging\n'), ((731, 742), 'time.time', 'time.time', ([], {}), '()\n', (740, 742), False, 'import time\n'), ((1175, 1195), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (1192, 1195), False, 'import logging\n'), ((1868, 1934), 'logging.debug', 'logging.debug', (['"""Ignoring duplicate message received on the socket"""'], {}), "('Ignoring duplicate message received on the socket')\n", (1881, 1934), False, 'import logging\n'), ((2898, 2921), 'platypush.message.event.pushbullet.PushbulletEvent', 'PushbulletEvent', ([], {}), '(**push)\n', (2913, 2921), False, 'from platypush.message.event.pushbullet import PushbulletEvent\n'), ((3933, 4030), 'requests.get', 'requests.get', (['u"""https://api.pushbullet.com/v2/devices"""'], {'headers': "{'Access-Token': self.token}"}), "(u'https://api.pushbullet.com/v2/devices', headers={\n 'Access-Token': self.token})\n", (3945, 4030), False, 'import requests\n'), ((3137, 3153), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (3147, 3153), False, 'import json\n'), ((3389, 3409), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (3406, 3409), False, 'import logging\n'), ((1701, 1712), 'time.time', 'time.time', ([], {}), '()\n', (1710, 1712), False, 'import time\n'), ((2253, 2269), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2263, 2269), False, 'import json\n'), ((2364, 2384), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2381, 2384), False, 'import logging\n')]
|
import random
from fineract.objects.group import Group
number = random.randint(0, 10000)
def test_create_group(fineract):
group = Group.create(fineract.request_handler, 'Test ' + str(number), 1)
assert isinstance(group, Group)
def test_get_group_by_name(fineract):
group = Group.get_group_by_name(fineract.request_handler, 'Test ' + str(number))
assert isinstance(group, Group)
def test_add_member_to_group(fineract):
client = fineract.get_client(1)
group = Group.get_group_by_name(fineract.request_handler, 'Test ' + str(number))
assert group.add_members([client.id])
def test_remove_member_from_group(fineract):
client = fineract.get_client(1)
group = Group.get_group_by_name(fineract.request_handler, 'Test ' + str(number))
assert group.remove_members([client.id])
|
[
"random.randint"
] |
[((66, 90), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (80, 90), False, 'import random\n')]
|
#!/usr/bin/env python3
import argparse
note_name = [
"FX_C_0", "FX_Cs0", "FX_D_0", "FX_Ds0", "FX_E_0", "FX_F_0", "FX_Fs0", "FX_G_0", "FX_Gs0", "FX_A_0", "FX_As0", "FX_B_0",
"FX_C_1", "FX_Cs1", "FX_D_1", "FX_Ds1", "FX_E_1", "FX_F_1", "FX_Fs1", "FX_G_1", "FX_Gs1", "FX_A_1", "FX_As1", "FX_B_1",
"FX_C_2", "FX_Cs2", "FX_D_2", "FX_Ds2", "FX_E_2", "FX_F_2", "FX_Fs2", "FX_G_2", "FX_Gs2", "FX_A_2", "FX_As2", "FX_B_2",
"FX_C_3", "FX_Cs3", "FX_D_3", "FX_Ds3", "FX_E_3", "FX_F_3", "FX_Fs3", "FX_G_3", "FX_Gs3", "FX_A_3", "FX_As3", "FX_B_3",
"FX_C_4", "FX_Cs4", "FX_D_4", "FX_Ds4", "FX_E_4", "FX_F_4", "FX_Fs4", "FX_G_4", "FX_Gs4", "FX_A_4", "FX_As4", "FX_B_4",
"FX_C_5", "FX_Cs5", "FX_D_5", "FX_Ds5", "FX_E_5", "FX_F_5", "FX_Fs5", "FX_G_5", "FX_Gs5", "FX_A_5", "FX_As5", "FX_B_5"]
buffer_array_length = [0] * 32
buffer_array_ch2pan = [0] * 32
buffer_array_ch2vol = [0] * 32
buffer_array_ch2duty = [0] * 32
buffer_array_ch2note = [0] * 32
buffer_array_ch4pan = [0] * 32
buffer_array_ch4vol = [0] * 32
buffer_array_ch4freq = [0] * 32
channels_used_string = [""] * 52
channels_used_string[0] = "No channels (...Empty?)"
channels_used_string[3] = "Noise channel"
channels_used_string[48] = "Duty channel 2"
channels_used_string[51] = "Duty channel 2 & Noise channel"
buffer_length = 0
buffer_channels_used = 0
buffer_priority = 0
def update_c_header(file, leng, b):
return """/*
""" + file + """
Sound Effect File.
Info:
Length : """ + str(leng) + """
Bank : """ + str(b) + """
Priority : """ + str(buffer_priority) + """
Channels used : """ + channels_used_string[buffer_channels_used] + """
This file was generated by hammer2cbt
*/
"""
def clamp(n, smallest, largest):
return max(smallest, min(n, largest))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help = "Input FX Hammer .sav file", type = str)
parser.add_argument("-o", "--output", help = "Output folder for .c/.h files", type = str)
parser.add_argument("-b", "--bank", help = "Optional bank number", type = int)
parser.add_argument("-dp", "--dpath", help = "Optional path to CBT-FX (F.E '-dp include/cbtfx.h', default is 'cbtfx.h')", type = str)
parser.add_argument("-na", "--name", help = "Optional effect name (Default is 'SFX_' Followed by the effect number)", type = str)
parser.add_argument("-nu", "--number", help = "Effect number to export", type = int)
parser.add_argument("-lr", "--invert", help = "Invert FX Hammer pan values (Editor is inverted)")
args = parser.parse_args()
input_sav = args.input
output_folder = ""
if args.output:
output_folder = args.output + "/"
FXHammer_sfx = 0
if args.number:
FXHammer_sfx = args.number
buffer_name = "SFX_" + (("{0:X}").format(FXHammer_sfx)).zfill(2)
if args.name:
buffer_name = args.name
buffer_bank = 0
if args.bank:
buffer_bank = args.bank
default_path = "cbtfx.h"
if args.dpath:
default_path = args.dpath
FXHammer_file = open(input_sav, "rb")
FXHammer_file.seek(0x400 + (FXHammer_sfx * 256))
buffer_length = 0
for x in range(32):
buffer_array_length[x] = int.from_bytes(FXHammer_file.read(1), "big")
if buffer_array_length[x] == 0:
buffer_length += 1 # This is to avoid adding 1 to the length in the driver and save some space
break
buffer_array_ch2pan[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 1
buffer_array_ch2vol[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 4
buffer_array_ch2duty[x] = (int.from_bytes(FXHammer_file.read(1), "big") >> 6)
buffer_array_ch2note[x] = (int.from_bytes(FXHammer_file.read(1), "big") - 0x40) / 2
buffer_array_ch4pan[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 3
buffer_array_ch4vol[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 4
buffer_array_ch4freq[x] = int.from_bytes(FXHammer_file.read(1), "big")
if args.invert:
buffer_array_ch2pan[x] =((buffer_array_ch2pan[x] & 0x0F) << 4 | (buffer_array_ch2pan[x] & 0xF0) >> 4)
buffer_array_ch4pan[x] =((buffer_array_ch4pan[x] & 0x0F) << 4 | (buffer_array_ch4pan[x] & 0xF0) >> 4)
buffer_length += 1
FXHammer_file.seek(0x300 + FXHammer_sfx)
buffer_channels_used = int.from_bytes(FXHammer_file.read(1), "big")
FXHammer_file.seek(0x200 + FXHammer_sfx)
buffer_priority = int.from_bytes(FXHammer_file.read(1), "big")
FXHammer_file.close()
C_file_out = open(output_folder + buffer_name + ".c", "w")
if buffer_bank > 0:
C_file_out.write("#pragma bank " + str(buffer_bank) + "\n")
C_file_out.write(update_c_header(buffer_name, buffer_length, buffer_bank))
C_file_out.write("""#define """ + buffer_name + """_Length """ + str(buffer_length) + """\n#define """ + buffer_name + """_Bank """ + str(buffer_bank) + """\n#define """ + buffer_name + """_CH_Used """ + str(buffer_channels_used << 2 | buffer_channels_used) + """\n#define """ + buffer_name + """_Priority """ + str(buffer_priority))
C_file_out.write("\n#define CBTFX_PLAY_" + buffer_name + " CBTFX_init(&" + buffer_name + "[0][0], " + str(buffer_length) + ", " + str(buffer_priority) + ", " + str(buffer_channels_used << 2 | buffer_channels_used) + ")")
C_file_out.write('\n#include "' + default_path + '"\n')
if buffer_bank > 0:
C_file_out.write("\nconst void __at(" + str(buffer_bank) +") __bank_" + buffer_name +";")
C_file_out.write("\nconst unsigned char " + buffer_name +"[" + str(buffer_length) +"][CBTFX_LENGTH] = {\n")
for x in range(0, 32):
if buffer_array_length[x] == 0:
break
C_file_out.write(" CBTFX_FRAME(" + str(buffer_array_length[x]) + ", " + str(buffer_array_ch2pan[x]) + ", " + str(buffer_array_ch2vol[x]) + ", " + str(buffer_array_ch2duty[x]) + ", " + str(note_name[int(buffer_array_ch2note[x])]) + ", " + str(buffer_array_ch4pan[x]) + ", " + str(buffer_array_ch4vol[x])+ ", " + str(buffer_array_ch4freq[x]) +")")
if not buffer_array_length[int(clamp(x + 1, 0, 32))] == 0:
C_file_out.write(",\n")
else:
break
C_file_out.write("\n};")
C_file_out.close()
H_file_out = open(output_folder + buffer_name + ".h", "w")
H_file_out.write(update_c_header(buffer_name, buffer_length, buffer_bank))
H_file_out.write("#ifndef __" + buffer_name + "_h_INCLUDE\n")
H_file_out.write("#define __" + buffer_name + "_h_INCLUDE\n")
H_file_out.write("""#define """ + buffer_name + """_Length """ + str(buffer_length) + """\n#define """ + buffer_name + """_Bank """ + str(buffer_bank) + """\n#define """ + buffer_name + """_CH_Used """ + str(buffer_channels_used << 2 | buffer_channels_used) + """\n#define """ + buffer_name + """_Priority """ + str(buffer_priority))
H_file_out.write("\n#define CBTFX_PLAY_" + buffer_name + " CBTFX_init(&" + buffer_name + "[0][0], " + str(buffer_length) + ", " + str(buffer_priority) + ", " + str(buffer_channels_used << 2 | buffer_channels_used) + ")")
if buffer_bank > 0:
H_file_out.write("\nextern const void __bank_" + buffer_name +";")
H_file_out.write("\nextern const unsigned char " + buffer_name +"[" + str(buffer_length) +"][CBTFX_LENGTH];\n")
H_file_out.write("#endif")
H_file_out.close()
|
[
"argparse.ArgumentParser"
] |
[((1738, 1763), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1761, 1763), False, 'import argparse\n')]
|
#!/usr/bin/env python
# coding: utf-8
import smtplib
import datetime
from dateutil.parser import parse
from flask_mail import Mail, Message
from app.auxiliary.query_tools import calc_time
from app.logger import Logger
def sendEmail(email, msg_body):
from app.fl_app import application
mail = Mail(application)
with application.app_context():
msg = Message('Response from Trading Project', recipients=[email])
msg.body = msg_body
try:
mail.send(msg)
except (smtplib.SMTPHeloError, smtplib.SMTPRecipientsRefused,
smtplib.SMTPSenderRefused, smtplib.SMTPDataError):
pass
def celeryLogFailAndEmail(task_id, start_time, email, ex_name):
message = f"Your request was failed. To know why - you can make a status request with your task ID: {task_id}"
sendEmail(email, message)
Logger.info(f"Response: Celery task failed. task_id: <{task_id}>; exc_name: <{ex_name}>; "
f"time: <{calc_time(start_time)} ms>")
def celeryLogSuccessAndEmail(task_id, start_time, email, result):
message = f"Your request successed! The result is:\n" \
f"{result}"
sendEmail(email, message)
Logger.info(f"Response: Query successed. query_id: <{task_id}>; "
f"time: <{calc_time(start_time)} ms>")
def getDateAndTimeByKey(parameters, dt_key, task_id):
dateValue = None
timeValue = None
if parameters.get("".join(["date", dt_key])):
try:
dateValue = parse(parameters.get("".join(["date", dt_key]))).date()
except ValueError:
Logger.warn(f"Bad date format. Continue running task without it. task_id: {task_id} \n"
f"key=<{''.join(['date', dt_key])}>; value=<{parameters.get(''.join(['date', dt_key]))}>")
if parameters.get("".join(["time", dt_key])):
try:
timeValue = parse(" ".join(["1970-01-01", parameters.get("".join(["time", dt_key]))])).time()
except ValueError:
Logger.warn(f"Bad time format. Continue running task without it. task_id: {task_id} \n"
f"key=<{''.join(['time', dt_key])}>; value=<{parameters.get(''.join(['time', dt_key]))}>")
Logger.debug(dateValue)
Logger.debug(timeValue)
return (dateValue, timeValue)
def addWhereToExpression(expr, tickers, dateFrom, timeFrom, dateTo, timeTo):
expr = " ".join([expr, "WHERE "])
expr = "".join([expr, "("])
if tickers:
expr = "".join([expr, "("])
for i in range(len(tickers)):
if i != 0:
expr = " ".join([expr, "OR", f"ticker='{tickers[i]}'"])
else:
expr = "".join([expr, f"ticker='{tickers[i]}'"])
expr = "".join([expr, ")"])
if dateFrom or dateTo or timeFrom or timeTo:
expr = " ".join([expr, "AND", "("])
needOR = False
needAND = False
if (dateFrom and not dateTo and timeTo) or \
(dateTo and not dateFrom and timeFrom):
return None
if dateFrom and not timeFrom:
timeFrom = datetime.time(0, 0, 0)
if dateTo and not timeTo:
timeTo = datetime.time(23, 59, 59)
Logger.debug(dateFrom)
Logger.debug(dateTo)
Logger.debug(timeFrom)
Logger.debug(timeTo)
if dateFrom and dateTo:
if dateFrom > dateTo or (dateFrom == dateTo and timeFrom > timeTo):
return None
# даты между
if not dateFrom or not dateTo or dateTo != dateFrom:
if dateFrom or dateTo:
expr = "".join([expr, "("])
if dateFrom:
expr = "".join([expr, f"date>'{dateFrom.strftime('%Y-%m-%d')}'"])
needAND = True
if dateTo:
if needAND:
expr = " ".join([expr, "AND"])
expr = " ".join([expr, f"date<'{dateTo.strftime('%Y-%m-%d')}'"])
if dateFrom or dateTo:
expr = "".join([expr, ")"])
needOR = True
needAND = False
# слева
if dateFrom or timeFrom:
if needOR:
expr = " ".join([expr, "OR"])
needOR = False
expr = " ".join([expr, "("])
if dateFrom:
expr = "".join([expr, f"date='{dateFrom.strftime('%Y-%m-%d')}'"])
needAND = True
if timeFrom:
if needAND:
expr = " ".join([expr, "AND"])
needAND = False
expr = " ".join([expr, f"time>='{timeFrom.strftime('%H:%M:%S')}'"])
needAND = True
if dateFrom and dateTo and dateTo == dateFrom:
if needAND:
expr = " ".join([expr, "AND"])
needAND = False
expr = " ".join([expr, f"time<='{timeTo.strftime('%H:%M:%S')}'"])
needAND = True
if dateFrom or timeFrom:
expr = "".join([expr, ")"])
needOR = True
needAND = False
# справа
if not dateFrom or not dateTo or dateTo != dateFrom:
if dateTo or timeTo:
if needOR:
expr = " ".join([expr, "OR"])
needOR = False
expr = " ".join([expr, "("])
if dateTo:
expr = "".join([expr, f"date='{dateTo.strftime('%Y-%m-%d')}'"])
needAND = True
if timeTo:
if needAND:
expr = " ".join([expr, "AND"])
expr = " ".join([expr, f"time<='{timeTo.strftime('%H:%M:%S')}'"])
if dateTo or timeTo:
expr = "".join([expr, ")"])
needOR = True
needAND = False
expr = " ".join([expr, ")"])
expr = "".join([expr, ")"])
return expr
|
[
"app.fl_app.application.app_context",
"flask_mail.Message",
"app.auxiliary.query_tools.calc_time",
"flask_mail.Mail",
"app.logger.Logger.debug",
"datetime.time"
] |
[((303, 320), 'flask_mail.Mail', 'Mail', (['application'], {}), '(application)\n', (307, 320), False, 'from flask_mail import Mail, Message\n'), ((2225, 2248), 'app.logger.Logger.debug', 'Logger.debug', (['dateValue'], {}), '(dateValue)\n', (2237, 2248), False, 'from app.logger import Logger\n'), ((2253, 2276), 'app.logger.Logger.debug', 'Logger.debug', (['timeValue'], {}), '(timeValue)\n', (2265, 2276), False, 'from app.logger import Logger\n'), ((330, 355), 'app.fl_app.application.app_context', 'application.app_context', ([], {}), '()\n', (353, 355), False, 'from app.fl_app import application\n'), ((371, 431), 'flask_mail.Message', 'Message', (['"""Response from Trading Project"""'], {'recipients': '[email]'}), "('Response from Trading Project', recipients=[email])\n", (378, 431), False, 'from flask_mail import Mail, Message\n'), ((3210, 3232), 'app.logger.Logger.debug', 'Logger.debug', (['dateFrom'], {}), '(dateFrom)\n', (3222, 3232), False, 'from app.logger import Logger\n'), ((3241, 3261), 'app.logger.Logger.debug', 'Logger.debug', (['dateTo'], {}), '(dateTo)\n', (3253, 3261), False, 'from app.logger import Logger\n'), ((3270, 3292), 'app.logger.Logger.debug', 'Logger.debug', (['timeFrom'], {}), '(timeFrom)\n', (3282, 3292), False, 'from app.logger import Logger\n'), ((3301, 3321), 'app.logger.Logger.debug', 'Logger.debug', (['timeTo'], {}), '(timeTo)\n', (3313, 3321), False, 'from app.logger import Logger\n'), ((3096, 3118), 'datetime.time', 'datetime.time', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3109, 3118), False, 'import datetime\n'), ((3175, 3200), 'datetime.time', 'datetime.time', (['(23)', '(59)', '(59)'], {}), '(23, 59, 59)\n', (3188, 3200), False, 'import datetime\n'), ((986, 1007), 'app.auxiliary.query_tools.calc_time', 'calc_time', (['start_time'], {}), '(start_time)\n', (995, 1007), False, 'from app.auxiliary.query_tools import calc_time\n'), ((1295, 1316), 'app.auxiliary.query_tools.calc_time', 'calc_time', (['start_time'], {}), '(start_time)\n', (1304, 1316), False, 'from app.auxiliary.query_tools import calc_time\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['OrganizationArgs', 'Organization']
@pulumi.input_type
class OrganizationArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Organization resource.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
if description is not None:
pulumi.set(__self__, "description", description)
if logo is not None:
pulumi.set(__self__, "logo", logo)
if name is not None:
pulumi.set(__self__, "name", name)
if twitter is not None:
pulumi.set(__self__, "twitter", twitter)
if website is not None:
pulumi.set(__self__, "website", website)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description string
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def logo(self) -> Optional[pulumi.Input[str]]:
"""
Logo URL
"""
return pulumi.get(self, "logo")
@logo.setter
def logo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Organization
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def twitter(self) -> Optional[pulumi.Input[str]]:
"""
Twitter handle
"""
return pulumi.get(self, "twitter")
@twitter.setter
def twitter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "twitter", value)
@property
@pulumi.getter
def website(self) -> Optional[pulumi.Input[str]]:
"""
Website link
"""
return pulumi.get(self, "website")
@website.setter
def website(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website", value)
@pulumi.input_type
class _OrganizationState:
def __init__(__self__, *,
created: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
updated: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Organization resources.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
if created is not None:
pulumi.set(__self__, "created", created)
if description is not None:
pulumi.set(__self__, "description", description)
if logo is not None:
pulumi.set(__self__, "logo", logo)
if name is not None:
pulumi.set(__self__, "name", name)
if twitter is not None:
pulumi.set(__self__, "twitter", twitter)
if updated is not None:
pulumi.set(__self__, "updated", updated)
if website is not None:
pulumi.set(__self__, "website", website)
@property
@pulumi.getter
def created(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "created")
@created.setter
def created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description string
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def logo(self) -> Optional[pulumi.Input[str]]:
"""
Logo URL
"""
return pulumi.get(self, "logo")
@logo.setter
def logo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Organization
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def twitter(self) -> Optional[pulumi.Input[str]]:
"""
Twitter handle
"""
return pulumi.get(self, "twitter")
@twitter.setter
def twitter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "twitter", value)
@property
@pulumi.getter
def updated(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "updated")
@updated.setter
def updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated", value)
@property
@pulumi.getter
def website(self) -> Optional[pulumi.Input[str]]:
"""
Website link
"""
return pulumi.get(self, "website")
@website.setter
def website(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website", value)
class Organization(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to manage organization resource in Equinix Metal.
## Example Usage
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create a new Project
tf_organization1 = equinix_metal.Organization("tfOrganization1", description="quux")
```
## Import
This resource can be imported using an existing organization ID
```sh
$ pulumi import equinix-metal:index/organization:Organization metal_organization {existing_organization_id}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[OrganizationArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage organization resource in Equinix Metal.
## Example Usage
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create a new Project
tf_organization1 = equinix_metal.Organization("tfOrganization1", description="quux")
```
## Import
This resource can be imported using an existing organization ID
```sh
$ pulumi import equinix-metal:index/organization:Organization metal_organization {existing_organization_id}
```
:param str resource_name: The name of the resource.
:param OrganizationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OrganizationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OrganizationArgs.__new__(OrganizationArgs)
__props__.__dict__["description"] = description
__props__.__dict__["logo"] = logo
__props__.__dict__["name"] = name
__props__.__dict__["twitter"] = twitter
__props__.__dict__["website"] = website
__props__.__dict__["created"] = None
__props__.__dict__["updated"] = None
super(Organization, __self__).__init__(
'equinix-metal:index/organization:Organization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
updated: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None) -> 'Organization':
"""
Get an existing Organization resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OrganizationState.__new__(_OrganizationState)
__props__.__dict__["created"] = created
__props__.__dict__["description"] = description
__props__.__dict__["logo"] = logo
__props__.__dict__["name"] = name
__props__.__dict__["twitter"] = twitter
__props__.__dict__["updated"] = updated
__props__.__dict__["website"] = website
return Organization(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description string
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def logo(self) -> pulumi.Output[Optional[str]]:
"""
Logo URL
"""
return pulumi.get(self, "logo")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Organization
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def twitter(self) -> pulumi.Output[Optional[str]]:
"""
Twitter handle
"""
return pulumi.get(self, "twitter")
@property
@pulumi.getter
def updated(self) -> pulumi.Output[str]:
return pulumi.get(self, "updated")
@property
@pulumi.getter
def website(self) -> pulumi.Output[Optional[str]]:
"""
Website link
"""
return pulumi.get(self, "website")
|
[
"pulumi.get",
"pulumi.ResourceOptions",
"pulumi.set"
] |
[((1727, 1758), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (1737, 1758), False, 'import pulumi\n'), ((1855, 1893), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (1865, 1893), False, 'import pulumi\n'), ((2035, 2059), 'pulumi.get', 'pulumi.get', (['self', '"""logo"""'], {}), "(self, 'logo')\n", (2045, 2059), False, 'import pulumi\n'), ((2142, 2173), 'pulumi.set', 'pulumi.set', (['self', '"""logo"""', 'value'], {}), "(self, 'logo', value)\n", (2152, 2173), False, 'import pulumi\n'), ((2335, 2359), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (2345, 2359), False, 'import pulumi\n'), ((2442, 2473), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (2452, 2473), False, 'import pulumi\n'), ((2624, 2651), 'pulumi.get', 'pulumi.get', (['self', '"""twitter"""'], {}), "(self, 'twitter')\n", (2634, 2651), False, 'import pulumi\n'), ((2740, 2774), 'pulumi.set', 'pulumi.set', (['self', '"""twitter"""', 'value'], {}), "(self, 'twitter', value)\n", (2750, 2774), False, 'import pulumi\n'), ((2923, 2950), 'pulumi.get', 'pulumi.get', (['self', '"""website"""'], {}), "(self, 'website')\n", (2933, 2950), False, 'import pulumi\n'), ((3039, 3073), 'pulumi.set', 'pulumi.set', (['self', '"""website"""', 'value'], {}), "(self, 'website', value)\n", (3049, 3073), False, 'import pulumi\n'), ((4676, 4703), 'pulumi.get', 'pulumi.get', (['self', '"""created"""'], {}), "(self, 'created')\n", (4686, 4703), False, 'import pulumi\n'), ((4792, 4826), 'pulumi.set', 'pulumi.set', (['self', '"""created"""', 'value'], {}), "(self, 'created', value)\n", (4802, 4826), False, 'import pulumi\n'), ((4985, 5016), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (4995, 5016), False, 'import pulumi\n'), ((5113, 5151), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (5123, 5151), False, 'import pulumi\n'), ((5293, 5317), 'pulumi.get', 'pulumi.get', (['self', '"""logo"""'], {}), "(self, 'logo')\n", (5303, 5317), False, 'import pulumi\n'), ((5400, 5431), 'pulumi.set', 'pulumi.set', (['self', '"""logo"""', 'value'], {}), "(self, 'logo', value)\n", (5410, 5431), False, 'import pulumi\n'), ((5593, 5617), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (5603, 5617), False, 'import pulumi\n'), ((5700, 5731), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (5710, 5731), False, 'import pulumi\n'), ((5882, 5909), 'pulumi.get', 'pulumi.get', (['self', '"""twitter"""'], {}), "(self, 'twitter')\n", (5892, 5909), False, 'import pulumi\n'), ((5998, 6032), 'pulumi.set', 'pulumi.set', (['self', '"""twitter"""', 'value'], {}), "(self, 'twitter', value)\n", (6008, 6032), False, 'import pulumi\n'), ((6136, 6163), 'pulumi.get', 'pulumi.get', (['self', '"""updated"""'], {}), "(self, 'updated')\n", (6146, 6163), False, 'import pulumi\n'), ((6252, 6286), 'pulumi.set', 'pulumi.set', (['self', '"""updated"""', 'value'], {}), "(self, 'updated', value)\n", (6262, 6286), False, 'import pulumi\n'), ((6435, 6462), 'pulumi.get', 'pulumi.get', (['self', '"""website"""'], {}), "(self, 'website')\n", (6445, 6462), False, 'import pulumi\n'), ((6551, 6585), 'pulumi.set', 'pulumi.set', (['self', '"""website"""', 'value'], {}), "(self, 'website', value)\n", (6561, 6585), False, 'import pulumi\n'), ((13062, 13089), 'pulumi.get', 'pulumi.get', (['self', '"""created"""'], {}), "(self, 'created')\n", (13072, 13089), False, 'import pulumi\n'), ((13249, 13280), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (13259, 13280), False, 'import pulumi\n'), ((13423, 13447), 'pulumi.get', 'pulumi.get', (['self', '"""logo"""'], {}), "(self, 'logo')\n", (13433, 13447), False, 'import pulumi\n'), ((13600, 13624), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (13610, 13624), False, 'import pulumi\n'), ((13776, 13803), 'pulumi.get', 'pulumi.get', (['self', '"""twitter"""'], {}), "(self, 'twitter')\n", (13786, 13803), False, 'import pulumi\n'), ((13898, 13925), 'pulumi.get', 'pulumi.get', (['self', '"""updated"""'], {}), "(self, 'updated')\n", (13908, 13925), False, 'import pulumi\n'), ((14075, 14102), 'pulumi.get', 'pulumi.get', (['self', '"""website"""'], {}), "(self, 'website')\n", (14085, 14102), False, 'import pulumi\n'), ((1198, 1246), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (1208, 1246), False, 'import pulumi\n'), ((1288, 1322), 'pulumi.set', 'pulumi.set', (['__self__', '"""logo"""', 'logo'], {}), "(__self__, 'logo', logo)\n", (1298, 1322), False, 'import pulumi\n'), ((1364, 1398), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (1374, 1398), False, 'import pulumi\n'), ((1443, 1483), 'pulumi.set', 'pulumi.set', (['__self__', '"""twitter"""', 'twitter'], {}), "(__self__, 'twitter', twitter)\n", (1453, 1483), False, 'import pulumi\n'), ((1528, 1568), 'pulumi.set', 'pulumi.set', (['__self__', '"""website"""', 'website'], {}), "(__self__, 'website', website)\n", (1538, 1568), False, 'import pulumi\n'), ((4028, 4068), 'pulumi.set', 'pulumi.set', (['__self__', '"""created"""', 'created'], {}), "(__self__, 'created', created)\n", (4038, 4068), False, 'import pulumi\n'), ((4117, 4165), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (4127, 4165), False, 'import pulumi\n'), ((4207, 4241), 'pulumi.set', 'pulumi.set', (['__self__', '"""logo"""', 'logo'], {}), "(__self__, 'logo', logo)\n", (4217, 4241), False, 'import pulumi\n'), ((4283, 4317), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (4293, 4317), False, 'import pulumi\n'), ((4362, 4402), 'pulumi.set', 'pulumi.set', (['__self__', '"""twitter"""', 'twitter'], {}), "(__self__, 'twitter', twitter)\n", (4372, 4402), False, 'import pulumi\n'), ((4447, 4487), 'pulumi.set', 'pulumi.set', (['__self__', '"""updated"""', 'updated'], {}), "(__self__, 'updated', updated)\n", (4457, 4487), False, 'import pulumi\n'), ((4532, 4572), 'pulumi.set', 'pulumi.set', (['__self__', '"""website"""', 'website'], {}), "(__self__, 'website', website)\n", (4542, 4572), False, 'import pulumi\n'), ((10096, 10120), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (10118, 10120), False, 'import pulumi\n'), ((12461, 12490), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (12483, 12490), False, 'import pulumi\n')]
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
import unittest
import pytest # for pytest filterwarnings
import numpy as np
from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder
from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder
from sionna.fec.polar.decoding import Polar5GDecoder
from sionna.fec.crc import CRCEncoder
from sionna.fec.utils import GaussianPriorSource
from sionna.utils import BinarySource
from sionna.fec.polar.utils import generate_5g_ranking
class TestPolarDecodingSC(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n+1)
# test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
PolarSCDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output equals all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
c = -10. * np.ones([bs, p[1]]) # all-zero with BPSK (no noise);logits
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero llr input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
def test_identity(self):
"""test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and xla is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarSCDecoder(frozen_pos, n)
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
dec = PolarSCDecoder(frozen_pos, n)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_dtype_flexible(self):
"""Test that output_dtype can be flexible."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingSCL(unittest.TestCase):
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n+1)
# also test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarSCLDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCLDecoder(frozen_pos, 64, output_dtype=tf.complex64)
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is the all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
# check shape
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
# also test different list sizes
n = 32
k = 16
frozen_pos, _ = generate_5g_ranking(k, n)
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, n])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==k)
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, k])
self.assertTrue(np.array_equal(u, u_hat))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)"""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = BinarySource()
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(),
u_hat.numpy()))
# also test different list sizes
n = 32
k = 16
crc_degree = "CRC11"
frozen_pos, _ = generate_5g_ranking(k, n)
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
u = source([bs, k-enc_crc.crc_length])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(),
u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 16
n = 32
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against multi-dimensional input shapes.
As reshaping is done before calling the actual decoder, no exhaustive
testing against all decoder options is required.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCLDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 78
n = 128
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
bs = 10
k = 16
n = 32
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
crc_degrees = [None, "CRC11"]
for crc_degree in crc_degrees:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
# test that for arbitrary input only binary values are
# returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
if not cpu_only: # cpu only does not support XLA
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
Also verifies that all decoding options yield same results.
Remark: results are for SC only, i.e., list_size=1.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=1,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_hybrid_scl(self):
"""Verify hybrid SC decoding option.
Remark: XLA is currently not supported.
"""
bs = 10
n = 32
k = 16
crc_degree = "CRC11"
list_sizes = [1, 2, 8, 32]
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
k_crc = enc_crc.crc_length
u = source([bs, k-k_crc])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
for list_size in list_sizes:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_hybrid_sc=True,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(), u_hat.numpy()))
# verify that graph can be executed
@tf.function
def run_graph(u):
return dec(u)
u = source([bs, n])
# execute the graph twice
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
def test_dtype_flexible(self):
"""Test that output_dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingBP(unittest.TestCase):
"""Test Polar BP decoder."""
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n+1)
# test also valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarBPDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarBPDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is all-zero
codeword."""
# batch size
bs = 10
# (k, n)
param_valid = [[1, 32],[10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for hard_out in [True, False]:
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarBPDecoder(frozen_pos,
p[1],
hard_out=hard_out)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
if hard_out:
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarBPDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarBPDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([1, 15, n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_numerics(self):
"""Test for numerical stability with large llrs and many iterations.
"""
bs = 100
k = 120
n = 256
num_iter = 200
for hard_out in [False, True]:
frozen_pos, _ = generate_5g_ranking(k, n)
source = GaussianPriorSource()
dec = PolarBPDecoder(frozen_pos,
n,
hard_out=hard_out,
num_iter=num_iter)
b = source([[bs,n], 0.001]) # very large llrs
c = dec(b).numpy()
# all values are finite (not nan and not inf)
self.assertTrue(np.sum(np.abs(1 - np.isfinite(c)))==0)
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 32
n = 64
num_iter = 10
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarBPDecoder(frozen_pos, n, num_iter=num_iter)
# test that for arbitrary input only 0,1 values are returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# Currently not supported
# run same test for XLA (jit_compile=True)
#u = source([bs, n])
#x = run_graph_xla(u).numpy()
#x = run_graph_xla(u).numpy()
#u = source([bs+1, n])
#x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against Numpy reference implementation.
Test hard and soft output.
"""
def boxplus_np(x, y):
"""Check node update (boxplus) for LLRs in numpy.
See [Stimming_LLR]_ and [Hashemi_SSCL]_ for detailed equations.
"""
x_in = np.maximum(np.minimum(x, llr_max), -llr_max)
y_in = np.maximum(np.minimum(y, llr_max), -llr_max)
# avoid division for numerical stability
llr_out = np.log(1 + np.exp(x_in + y_in))
llr_out -= np.log(np.exp(x_in) + np.exp(y_in))
return llr_out
def decode_bp(llr_ch, n_iter, frozen_pos, info_pos):
n = llr_ch.shape[-1]
bs = llr_ch.shape[0]
n_stages = int(np.log2(n))
msg_r = np.zeros([bs, n_stages+1, n])
msg_l = np.zeros([bs, n_stages+1, n])
# init llr_ch
msg_l[:, n_stages, :] = -1*llr_ch.numpy()
# init frozen positions with infty
msg_r[:, 0, frozen_pos] = llr_max
# and decode
for iter in range(n_iter):
# update r messages
for s in range(n_stages):
# calc indices
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
# load messages
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# r1_out
msg_r[:, s+1, ind_1] = boxplus_np(r1_in, l2_in + r2_in)
# r2_out
msg_r[:, s+1, ind_2] = boxplus_np(r1_in, l1_in) + r2_in
# update l messages
for s in range(n_stages-1, -1, -1):
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# l1_out
msg_l[:, s, ind_1] = boxplus_np(l1_in, l2_in + r2_in)
# l2_out
msg_l[:, s, ind_2] = boxplus_np(r1_in, l1_in) + l2_in
# recover u_hat
u_hat_soft = msg_l[:, 0, info_pos]
u_hat = 0.5 * (1 - np.sign(u_hat_soft))
return u_hat, u_hat_soft
# generate llr_ch
noise_var = 0.3
num_iters = [5, 10, 20, 40]
llr_max = 19.3
bs = 100
n = 128
k = 64
frozen_pos, info_pos = generate_5g_ranking(k, n)
for num_iter in num_iters:
source = GaussianPriorSource()
llr_ch = source([[bs, n], noise_var])
# and decode
dec_bp = PolarBPDecoder(frozen_pos, n,
hard_out=True, num_iter=num_iter)
dec_bp_soft = PolarBPDecoder(frozen_pos, n,
hard_out=False, num_iter=num_iter)
u_hat_bp = dec_bp(llr_ch).numpy()
u_hat_bp_soft = dec_bp_soft(llr_ch,).numpy()
# and run BP decoder
u_hat_ref, u_hat_ref_soft = decode_bp(llr_ch,
num_iter,
frozen_pos,
info_pos)
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_bp, u_hat_ref))
self.assertTrue(np.allclose(-u_hat_bp_soft,
u_hat_ref_soft,
rtol=5e-2,
atol=5e-3))
def test_dtype_flexible(self):
"""Test that output dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarBPDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarBPDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecoding5G(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid input values.
Note: consistency of code parameters is already checked by the encoder.
"""
enc = Polar5GEncoder(40, 60)
with self.assertRaises(AssertionError):
Polar5GDecoder(enc, dec_type=1)
with self.assertRaises(ValueError):
Polar5GDecoder(enc, dec_type="ABC")
with self.assertRaises(AssertionError):
Polar5GDecoder("SC")
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity_de_ratematching(self):
"""Test that info bits can be recovered if no noise is added and
dimensions are correct."""
bs = 10
# (k,n)
param_valid = [[12, 32], [20, 32], [100, 257], [123, 897],
[1013, 1088]]
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for p in param_valid:
for dec_type in dec_types:
source = BinarySource()
enc = Polar5GEncoder(p[0], p[1])
dec = Polar5GDecoder(enc, dec_type=dec_type)
u = source([bs, p[0]])
c = enc(u)
self.assertTrue(c.numpy().shape[-1]==p[1])
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 145
source = BinarySource()
enc = Polar5GEncoder(k, n)
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = Polar5GDecoder(enc, dec_type=dec_type)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 237
enc = Polar5GEncoder(k, n)
source = BinarySource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 95
n = 145
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
llr = source([[1,4,n], 0.5])
llr_rep = tf.tile(llr, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(llr_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that tf.function decorator works
include xla compiler test."""
bs = 10
k = 45
n = 67
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
# hybSCL does not support graph mode!
dec_types = ["SC", "SCL", "BP"]
for dec_type in dec_types:
print(dec_type)
dec = Polar5GDecoder(enc, dec_type=dec_type)
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
# test that for arbitrary input only binary values are returned
u = source([[bs, n], 0.5])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([[bs+1, n], 0.5])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
# BP does currently not support XLA
if dec_type != "BP":
u = source([[bs, n], 0.5])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([[bs+1, n], 0.5])
x = run_graph_xla(u).numpy()
def test_dtype_flexible(self):
"""Test that output dtype can be variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
enc = Polar5GEncoder(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = Polar5GDecoder(enc, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = Polar5GDecoder(enc, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
|
[
"numpy.load",
"numpy.array_equal",
"sionna.fec.polar.utils.generate_5g_ranking",
"numpy.allclose",
"tensorflow.reshape",
"tensorflow.zeros_like",
"numpy.ones",
"numpy.isnan",
"sionna.fec.polar.decoding.Polar5GDecoder",
"numpy.arange",
"numpy.exp",
"sionna.fec.polar.decoding.PolarSCLDecoder",
"sionna.fec.polar.decoding.PolarSCDecoder",
"sys.path.append",
"tensorflow.keras.Input",
"sionna.fec.polar.encoding.PolarEncoder",
"numpy.isfinite",
"tensorflow.cast",
"sionna.fec.polar.encoding.Polar5GEncoder",
"numpy.minimum",
"numpy.log2",
"numpy.isneginf",
"sionna.fec.polar.decoding.PolarBPDecoder",
"tensorflow.config.experimental.set_memory_growth",
"numpy.isinf",
"numpy.mod",
"tensorflow.keras.Model",
"tensorflow.tile",
"sionna.utils.BinarySource",
"sionna.fec.crc.CRCEncoder",
"sionna.fec.utils.GaussianPriorSource",
"tensorflow.config.set_visible_devices",
"tensorflow.config.list_physical_devices",
"numpy.zeros",
"tensorflow.zeros",
"numpy.where",
"numpy.sign",
"tensorflow.function",
"pytest.mark.filterwarnings"
] |
[((267, 305), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (298, 305), True, 'import tensorflow as tf\n'), ((9161, 9228), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (9187, 9228), False, 'import pytest\n'), ((10308, 10375), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (10334, 10375), False, 'import pytest\n'), ((12882, 12949), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (12908, 12949), False, 'import pytest\n'), ((14689, 14756), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (14715, 14756), False, 'import pytest\n'), ((18282, 18349), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (18308, 18349), False, 'import pytest\n'), ((22085, 22152), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (22111, 22152), False, 'import pytest\n'), ((37736, 37803), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (37762, 37803), False, 'import pytest\n'), ((38730, 38797), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (38756, 38797), False, 'import pytest\n'), ((39548, 39615), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (39574, 39615), False, 'import pytest\n'), ((40416, 40483), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (40442, 40483), False, 'import pytest\n'), ((213, 235), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (228, 235), False, 'import sys\n'), ((426, 477), 'tensorflow.config.set_visible_devices', 'tf.config.set_visible_devices', (['gpus[gpu_num]', '"""GPU"""'], {}), "(gpus[gpu_num], 'GPU')\n", (455, 477), True, 'import tensorflow as tf\n'), ((537, 598), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[gpu_num]', '(True)'], {}), '(gpus[gpu_num], True)\n', (577, 598), True, 'import tensorflow as tf\n'), ((1323, 1339), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (1332, 1339), True, 'import numpy as np\n'), ((1538, 1563), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (1557, 1563), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((3129, 3150), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (3148, 3150), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((4831, 4845), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (4843, 4845), False, 'from sionna.utils import BinarySource\n'), ((4870, 4895), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (4889, 4895), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((4913, 4954), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (4927, 4954), True, 'import tensorflow as tf\n'), ((5023, 5063), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (5037, 5063), True, 'import tensorflow as tf\n'), ((5381, 5406), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (5400, 5406), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((5424, 5438), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (5436, 5438), False, 'from sionna.utils import BinarySource\n'), ((5453, 5482), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (5467, 5482), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((5529, 5556), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (5539, 5556), True, 'import tensorflow as tf\n'), ((5724, 5751), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (5734, 5751), True, 'import tensorflow as tf\n'), ((6042, 6067), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (6061, 6067), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((6085, 6099), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (6097, 6099), False, 'from sionna.utils import BinarySource\n'), ((6114, 6143), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (6128, 6143), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((6190, 6212), 'tensorflow.tile', 'tf.tile', (['b', '[bs, 1, 1]'], {}), '(b, [bs, 1, 1])\n', (6197, 6212), True, 'import tensorflow as tf\n'), ((6558, 6587), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (6569, 6587), True, 'import tensorflow as tf\n'), ((6710, 6724), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (6722, 6724), False, 'from sionna.utils import BinarySource\n'), ((6749, 6774), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (6768, 6774), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((6789, 6818), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (6803, 6818), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((8285, 8306), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (8304, 8306), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((8331, 8356), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (8350, 8356), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((8920, 8974), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {'output_dtype': 'tf.float32'}), '(frozen_pos, n, output_dtype=tf.float32)\n', (8934, 8974), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((9393, 9409), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (9402, 9409), True, 'import numpy as np\n'), ((9609, 9634), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (9628, 9634), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((11808, 11833), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (11827, 11833), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((13172, 13193), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (13191, 13193), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((14974, 14988), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (14986, 14988), False, 'from sionna.utils import BinarySource\n'), ((16059, 16084), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (16078, 16084), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((16099, 16126), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (16111, 16126), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((16145, 16167), 'sionna.fec.crc.CRCEncoder', 'CRCEncoder', (['crc_degree'], {}), '(crc_degree)\n', (16155, 16167), False, 'from sionna.fec.crc import CRCEncoder\n'), ((18649, 18674), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (18668, 18674), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((18692, 18706), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (18704, 18706), False, 'from sionna.utils import BinarySource\n'), ((18721, 18751), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (18736, 18751), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((18798, 18825), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (18808, 18825), True, 'import tensorflow as tf\n'), ((18993, 19020), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (19003, 19020), True, 'import tensorflow as tf\n'), ((19309, 19334), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (19328, 19334), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((19352, 19366), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (19364, 19366), False, 'from sionna.utils import BinarySource\n'), ((20244, 20258), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (20256, 20258), False, 'from sionna.utils import BinarySource\n'), ((20283, 20308), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (20302, 20308), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((23910, 23935), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (23929, 23935), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((23953, 23967), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (23965, 23967), False, 'from sionna.utils import BinarySource\n'), ((23982, 24009), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (23994, 24009), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((24028, 24050), 'sionna.fec.crc.CRCEncoder', 'CRCEncoder', (['crc_degree'], {}), '(crc_degree)\n', (24038, 24050), False, 'from sionna.fec.crc import CRCEncoder\n'), ((25157, 25178), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (25176, 25178), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((25203, 25228), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (25222, 25228), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((25793, 25848), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'output_dtype': 'tf.float32'}), '(frozen_pos, n, output_dtype=tf.float32)\n', (25808, 25848), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26165, 26181), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (26174, 26181), True, 'import numpy as np\n'), ((26380, 26405), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (26399, 26405), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((28865, 28879), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (28877, 28879), False, 'from sionna.utils import BinarySource\n'), ((28904, 28929), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (28923, 28929), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((28947, 28988), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (28961, 28988), True, 'import tensorflow as tf\n'), ((29057, 29097), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (29071, 29097), True, 'import tensorflow as tf\n'), ((29406, 29431), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (29425, 29431), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((29449, 29463), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (29461, 29463), False, 'from sionna.utils import BinarySource\n'), ((29478, 29507), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (29492, 29507), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((29554, 29581), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (29564, 29581), True, 'import tensorflow as tf\n'), ((29749, 29776), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (29759, 29776), True, 'import tensorflow as tf\n'), ((30067, 30092), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (30086, 30092), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((30110, 30124), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (30122, 30124), False, 'from sionna.utils import BinarySource\n'), ((30139, 30168), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (30153, 30168), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((30217, 30239), 'tensorflow.tile', 'tf.tile', (['b', '[bs, 1, 1]'], {}), '(b, [bs, 1, 1])\n', (30224, 30239), True, 'import tensorflow as tf\n'), ((31316, 31345), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (31327, 31345), True, 'import tensorflow as tf\n'), ((31488, 31502), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (31500, 31502), False, 'from sionna.utils import BinarySource\n'), ((31527, 31552), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (31546, 31552), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((31567, 31615), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'num_iter': 'num_iter'}), '(frozen_pos, n, num_iter=num_iter)\n', (31581, 31615), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((35087, 35112), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (35106, 35112), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((36392, 36413), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (36411, 36413), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((36438, 36463), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (36457, 36463), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((37020, 37074), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'output_dtype': 'tf.float32'}), '(frozen_pos, n, output_dtype=tf.float32)\n', (37034, 37074), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((37382, 37404), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['(40)', '(60)'], {}), '(40, 60)\n', (37396, 37404), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((38969, 38983), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (38981, 38983), False, 'from sionna.utils import BinarySource\n'), ((38998, 39018), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (39012, 39018), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((39747, 39767), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (39761, 39767), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((39785, 39799), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (39797, 39799), False, 'from sionna.utils import BinarySource\n'), ((40663, 40683), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (40677, 40683), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((40701, 40722), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (40720, 40722), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((41319, 41339), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (41333, 41339), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((41357, 41378), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (41376, 41378), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((42648, 42669), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (42667, 42669), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((42684, 42704), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (42698, 42704), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((43251, 43295), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'output_dtype': 'tf.float32'}), '(enc, output_dtype=tf.float32)\n', (43265, 43295), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((1398, 1427), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (1412, 1427), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((1624, 1657), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', '(n + 1)'], {}), '(frozen_pos, n + 1)\n', (1638, 1657), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((1875, 1906), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (1894, 1906), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((1919, 1951), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (1933, 1951), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((2066, 2093), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['(32)', '(64)'], {}), '(32, 64)\n', (2085, 2093), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((2106, 2163), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', '(64)'], {'output_dtype': 'tf.complex64'}), '(frozen_pos, 64, output_dtype=tf.complex64)\n', (2120, 2163), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((2502, 2533), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (2521, 2533), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((2551, 2583), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (2565, 2583), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((2832, 2852), 'numpy.zeros', 'np.zeros', (['[bs, p[0]]'], {}), '([bs, p[0]])\n', (2840, 2852), True, 'import numpy as np\n'), ((3210, 3241), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (3229, 3241), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((3259, 3291), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (3273, 3291), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((3688, 3708), 'tensorflow.zeros', 'tf.zeros', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (3696, 3708), True, 'import tensorflow as tf\n'), ((4263, 4277), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (4275, 4277), False, 'from sionna.utils import BinarySource\n'), ((4306, 4337), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (4325, 4337), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((4355, 4385), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (4367, 4385), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((4404, 4436), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (4418, 4436), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((4969, 4998), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (4983, 4998), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((5824, 5848), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (5838, 5848), True, 'import numpy as np\n'), ((7540, 7575), 'numpy.load', 'np.load', (["(ref_path + f + '_Avec.npy')"], {}), "(ref_path + f + '_Avec.npy')\n", (7547, 7575), True, 'import numpy as np\n'), ((7597, 7631), 'numpy.load', 'np.load', (["(ref_path + f + '_Lch.npy')"], {}), "(ref_path + f + '_Lch.npy')\n", (7604, 7631), True, 'import numpy as np\n'), ((7652, 7687), 'numpy.load', 'np.load', (["(ref_path + f + '_uhat.npy')"], {}), "(ref_path + f + '_uhat.npy')\n", (7659, 7687), True, 'import numpy as np\n'), ((7890, 7919), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (7904, 7919), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((8886, 8904), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (8899, 8904), True, 'import tensorflow as tf\n'), ((9468, 9498), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (9483, 9498), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((9695, 9729), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', '(n + 1)'], {}), '(frozen_pos, n + 1)\n', (9710, 9729), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((9952, 9983), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (9971, 9983), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((9995, 10028), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (10010, 10028), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((10143, 10170), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['(32)', '(64)'], {}), '(32, 64)\n', (10162, 10170), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((10183, 10241), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', '(64)'], {'output_dtype': 'tf.complex64'}), '(frozen_pos, 64, output_dtype=tf.complex64)\n', (10198, 10241), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((10770, 10801), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (10789, 10801), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((13253, 13284), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (13272, 13284), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((15106, 15137), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (15125, 15137), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((15156, 15186), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (15168, 15186), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((19093, 19117), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (19107, 19117), True, 'import numpy as np\n'), ((22556, 22591), 'numpy.load', 'np.load', (["(ref_path + f + '_Avec.npy')"], {}), "(ref_path + f + '_Avec.npy')\n", (22563, 22591), True, 'import numpy as np\n'), ((22613, 22647), 'numpy.load', 'np.load', (["(ref_path + f + '_Lch.npy')"], {}), "(ref_path + f + '_Lch.npy')\n", (22620, 22647), True, 'import numpy as np\n'), ((22668, 22703), 'numpy.load', 'np.load', (["(ref_path + f + '_uhat.npy')"], {}), "(ref_path + f + '_uhat.npy')\n", (22675, 22703), True, 'import numpy as np\n'), ((24283, 24381), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': 'list_size', 'use_hybrid_sc': '(True)', 'crc_degree': 'crc_degree'}), '(frozen_pos, n, list_size=list_size, use_hybrid_sc=True,\n crc_degree=crc_degree)\n', (24298, 24381), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((25759, 25777), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (25772, 25777), True, 'import tensorflow as tf\n'), ((26240, 26269), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (26254, 26269), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26466, 26499), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', '(n + 1)'], {}), '(frozen_pos, n + 1)\n', (26480, 26499), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26722, 26753), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (26741, 26753), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((26765, 26797), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (26779, 26797), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26912, 26939), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['(32)', '(64)'], {}), '(32, 64)\n', (26931, 26939), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((26952, 27009), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', '(64)'], {'output_dtype': 'tf.complex64'}), '(frozen_pos, 64, output_dtype=tf.complex64)\n', (26966, 27009), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((28296, 28310), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (28308, 28310), False, 'from sionna.utils import BinarySource\n'), ((28339, 28370), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (28358, 28370), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((28389, 28419), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (28401, 28419), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((28438, 28470), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (28452, 28470), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((29003, 29032), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (29017, 29032), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((29849, 29873), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (29863, 29873), True, 'import numpy as np\n'), ((30669, 30694), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (30688, 30694), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((30716, 30737), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (30735, 30737), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((30756, 30823), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'hard_out': 'hard_out', 'num_iter': 'num_iter'}), '(frozen_pos, n, hard_out=hard_out, num_iter=num_iter)\n', (30770, 30823), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((33045, 33076), 'numpy.zeros', 'np.zeros', (['[bs, n_stages + 1, n]'], {}), '([bs, n_stages + 1, n])\n', (33053, 33076), True, 'import numpy as np\n'), ((33095, 33126), 'numpy.zeros', 'np.zeros', (['[bs, n_stages + 1, n]'], {}), '([bs, n_stages + 1, n])\n', (33103, 33126), True, 'import numpy as np\n'), ((35171, 35192), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (35190, 35192), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((35290, 35353), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'hard_out': '(True)', 'num_iter': 'num_iter'}), '(frozen_pos, n, hard_out=True, num_iter=num_iter)\n', (35304, 35353), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((35416, 35480), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'hard_out': '(False)', 'num_iter': 'num_iter'}), '(frozen_pos, n, hard_out=False, num_iter=num_iter)\n', (35430, 35480), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((36986, 37004), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (36999, 37004), True, 'import tensorflow as tf\n'), ((37465, 37496), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': '(1)'}), '(enc, dec_type=1)\n', (37479, 37496), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((37553, 37588), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': '"""ABC"""'}), "(enc, dec_type='ABC')\n", (37567, 37588), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((37649, 37669), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['"""SC"""'], {}), "('SC')\n", (37663, 37669), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((39126, 39167), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (39140, 39167), True, 'import tensorflow as tf\n'), ((39253, 39293), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (39267, 39293), True, 'import tensorflow as tf\n'), ((39904, 39942), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (39918, 39942), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((39997, 40024), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (40007, 40024), True, 'import tensorflow as tf\n'), ((40216, 40243), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (40226, 40243), True, 'import tensorflow as tf\n'), ((40827, 40865), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (40841, 40865), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((40930, 40954), 'tensorflow.tile', 'tf.tile', (['llr', '[bs, 1, 1]'], {}), '(llr, [bs, 1, 1])\n', (40937, 40954), True, 'import tensorflow as tf\n'), ((41547, 41585), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (41561, 41585), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((41686, 41715), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (41697, 41715), True, 'import tensorflow as tf\n'), ((43217, 43235), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (43230, 43235), True, 'import tensorflow as tf\n'), ((2607, 2626), 'numpy.ones', 'np.ones', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (2614, 2626), True, 'import numpy as np\n'), ((2881, 2905), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (2895, 2905), True, 'import numpy as np\n'), ((6346, 6384), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (6360, 6384), True, 'import numpy as np\n'), ((8089, 8120), 'numpy.array_equal', 'np.array_equal', (['u_hat_tf', 'u_hat'], {}), '(u_hat_tf, u_hat)\n', (8103, 8120), True, 'import numpy as np\n'), ((8580, 8599), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (8587, 8599), True, 'import tensorflow as tf\n'), ((8623, 8673), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {'output_dtype': 'dt_out'}), '(frozen_pos, n, output_dtype=dt_out)\n', (8637, 8673), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((25452, 25471), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (25459, 25471), True, 'import tensorflow as tf\n'), ((25495, 25546), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'output_dtype': 'dt_out'}), '(frozen_pos, n, output_dtype=dt_out)\n', (25510, 25546), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((27410, 27441), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (27429, 27441), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((27463, 27514), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'p[1]'], {'hard_out': 'hard_out'}), '(frozen_pos, p[1], hard_out=hard_out)\n', (27477, 27514), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((30373, 30411), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (30387, 30411), True, 'import numpy as np\n'), ((32564, 32586), 'numpy.minimum', 'np.minimum', (['x', 'llr_max'], {}), '(x, llr_max)\n', (32574, 32586), True, 'import numpy as np\n'), ((32628, 32650), 'numpy.minimum', 'np.minimum', (['y', 'llr_max'], {}), '(y, llr_max)\n', (32638, 32650), True, 'import numpy as np\n'), ((33012, 33022), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (33019, 33022), True, 'import numpy as np\n'), ((35981, 36016), 'numpy.array_equal', 'np.array_equal', (['u_hat_bp', 'u_hat_ref'], {}), '(u_hat_bp, u_hat_ref)\n', (35995, 36016), True, 'import numpy as np\n'), ((36046, 36112), 'numpy.allclose', 'np.allclose', (['(-u_hat_bp_soft)', 'u_hat_ref_soft'], {'rtol': '(0.05)', 'atol': '(0.005)'}), '(-u_hat_bp_soft, u_hat_ref_soft, rtol=0.05, atol=0.005)\n', (36057, 36112), True, 'import numpy as np\n'), ((36687, 36706), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (36694, 36706), True, 'import tensorflow as tf\n'), ((36730, 36780), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'output_dtype': 'dt_out'}), '(frozen_pos, n, output_dtype=dt_out)\n', (36744, 36780), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((38238, 38252), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (38250, 38252), False, 'from sionna.utils import BinarySource\n'), ((38275, 38301), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (38289, 38301), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((38324, 38362), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (38338, 38362), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((39186, 39224), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (39200, 39224), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((40324, 40348), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (40338, 40348), True, 'import numpy as np\n'), ((42928, 42947), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (42935, 42947), True, 'import tensorflow as tf\n'), ((42971, 43011), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'output_dtype': 'dt_out'}), '(enc, output_dtype=dt_out)\n', (42985, 43011), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((3491, 3503), 'numpy.isnan', 'np.isnan', (['u1'], {}), '(u1)\n', (3499, 3503), True, 'import numpy as np\n'), ((3565, 3577), 'numpy.isinf', 'np.isinf', (['u1'], {}), '(u1)\n', (3573, 3577), True, 'import numpy as np\n'), ((3616, 3631), 'numpy.isneginf', 'np.isneginf', (['u1'], {}), '(u1)\n', (3627, 3631), True, 'import numpy as np\n'), ((3817, 3829), 'numpy.isnan', 'np.isnan', (['u2'], {}), '(u2)\n', (3825, 3829), True, 'import numpy as np\n'), ((3891, 3903), 'numpy.isinf', 'np.isinf', (['u2'], {}), '(u2)\n', (3899, 3903), True, 'import numpy as np\n'), ((3942, 3957), 'numpy.isneginf', 'np.isneginf', (['u2'], {}), '(u2)\n', (3953, 3957), True, 'import numpy as np\n'), ((7722, 7738), 'numpy.where', 'np.where', (['(A == 0)'], {}), '(A == 0)\n', (7730, 7738), True, 'import numpy as np\n'), ((7773, 7789), 'numpy.where', 'np.where', (['(A == 1)'], {}), '(A == 1)\n', (7781, 7789), True, 'import numpy as np\n'), ((17467, 17481), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (17479, 17481), False, 'from sionna.utils import BinarySource\n'), ((17518, 17543), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (17537, 17543), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((17573, 17614), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (17587, 17614), True, 'import tensorflow as tf\n'), ((17939, 17979), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (17953, 17979), True, 'import tensorflow as tf\n'), ((19529, 19634), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, use_fast_scl=use_fast_scl, cpu_only=cpu_only,\n use_scatter=use_scatter)\n', (19544, 19634), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((19849, 19871), 'tensorflow.tile', 'tf.tile', (['b', '[bs, 1, 1]'], {}), '(b, [bs, 1, 1])\n', (19856, 19871), True, 'import tensorflow as tf\n'), ((22738, 22754), 'numpy.where', 'np.where', (['(A == 0)'], {}), '(A == 0)\n', (22746, 22754), True, 'import numpy as np\n'), ((22789, 22805), 'numpy.where', 'np.where', (['(A == 1)'], {}), '(A == 1)\n', (22797, 22805), True, 'import numpy as np\n'), ((27671, 27690), 'numpy.ones', 'np.ones', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (27678, 27690), True, 'import numpy as np\n'), ((27910, 27930), 'numpy.zeros', 'np.zeros', (['[bs, p[0]]'], {}), '([bs, p[0]])\n', (27918, 27930), True, 'import numpy as np\n'), ((32748, 32767), 'numpy.exp', 'np.exp', (['(x_in + y_in)'], {}), '(x_in + y_in)\n', (32754, 32767), True, 'import numpy as np\n'), ((32799, 32811), 'numpy.exp', 'np.exp', (['x_in'], {}), '(x_in)\n', (32805, 32811), True, 'import numpy as np\n'), ((32814, 32826), 'numpy.exp', 'np.exp', (['y_in'], {}), '(y_in)\n', (32820, 32826), True, 'import numpy as np\n'), ((34840, 34859), 'numpy.sign', 'np.sign', (['u_hat_soft'], {}), '(u_hat_soft)\n', (34847, 34859), True, 'import numpy as np\n'), ((41106, 41144), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (41120, 41144), True, 'import numpy as np\n'), ((10980, 11089), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, p[1], use_fast_scl=use_fast_scl, cpu_only=\n cpu_only, use_scatter=use_scatter)\n', (10995, 11089), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((11625, 11645), 'numpy.zeros', 'np.zeros', (['[bs, p[0]]'], {}), '([bs, p[0]])\n', (11633, 11645), True, 'import numpy as np\n'), ((12084, 12211), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': 'list_size', 'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, list_size=list_size, use_fast_scl=\n use_fast_scl, cpu_only=cpu_only, use_scatter=use_scatter)\n', (12099, 12211), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((12732, 12749), 'numpy.zeros', 'np.zeros', (['[bs, k]'], {}), '([bs, k])\n', (12740, 12749), True, 'import numpy as np\n'), ((13463, 13572), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, p[1], use_fast_scl=use_fast_scl, cpu_only=\n cpu_only, use_scatter=use_scatter)\n', (13478, 13572), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((14267, 14287), 'tensorflow.zeros', 'tf.zeros', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (14275, 14287), True, 'import tensorflow as tf\n'), ((15484, 15593), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, p[1], use_fast_scl=use_fast_scl, cpu_only=\n cpu_only, use_scatter=use_scatter)\n', (15499, 15593), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((16572, 16727), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': 'list_size', 'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter', 'crc_degree': 'crc_degree'}), '(frozen_pos, n, list_size=list_size, use_fast_scl=\n use_fast_scl, cpu_only=cpu_only, use_scatter=use_scatter, crc_degree=\n crc_degree)\n', (16587, 16727), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((17641, 17746), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, use_fast_scl=use_fast_scl, cpu_only=cpu_only,\n use_scatter=use_scatter)\n', (17656, 17746), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((20681, 20710), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (20692, 20710), True, 'import tensorflow as tf\n'), ((20829, 20957), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter', 'crc_degree': 'crc_degree'}), '(frozen_pos, n, use_fast_scl=use_fast_scl, cpu_only=cpu_only,\n use_scatter=use_scatter, crc_degree=crc_degree)\n', (20844, 20957), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((23066, 23184), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': '(1)', 'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, list_size=1, use_fast_scl=use_fast_scl,\n cpu_only=cpu_only, use_scatter=use_scatter)\n', (23081, 23184), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((27967, 27991), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (27981, 27991), True, 'import numpy as np\n'), ((33575, 33600), 'numpy.mod', 'np.mod', (['ind_range', '(2 ** s)'], {}), '(ind_range, 2 ** s)\n', (33581, 33600), True, 'import numpy as np\n'), ((34266, 34291), 'numpy.mod', 'np.mod', (['ind_range', '(2 ** s)'], {}), '(ind_range, 2 ** s)\n', (34272, 34291), True, 'import numpy as np\n'), ((11360, 11379), 'numpy.ones', 'np.ones', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (11367, 11379), True, 'import numpy as np\n'), ((11686, 11710), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (11700, 11710), True, 'import numpy as np\n'), ((12511, 12527), 'numpy.ones', 'np.ones', (['[bs, n]'], {}), '([bs, n])\n', (12518, 12527), True, 'import numpy as np\n'), ((12790, 12814), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (12804, 12814), True, 'import numpy as np\n'), ((20053, 20091), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (20067, 20091), True, 'import numpy as np\n'), ((23603, 23634), 'numpy.array_equal', 'np.array_equal', (['u_hat_tf', 'u_hat'], {}), '(u_hat_tf, u_hat)\n', (23617, 23634), True, 'import numpy as np\n'), ((14014, 14026), 'numpy.isnan', 'np.isnan', (['u1'], {}), '(u1)\n', (14022, 14026), True, 'import numpy as np\n'), ((14112, 14124), 'numpy.isinf', 'np.isinf', (['u1'], {}), '(u1)\n', (14120, 14124), True, 'import numpy as np\n'), ((14175, 14190), 'numpy.isneginf', 'np.isneginf', (['u1'], {}), '(u1)\n', (14186, 14190), True, 'import numpy as np\n'), ((14444, 14456), 'numpy.isnan', 'np.isnan', (['u2'], {}), '(u2)\n', (14452, 14456), True, 'import numpy as np\n'), ((14542, 14554), 'numpy.isinf', 'np.isinf', (['u2'], {}), '(u2)\n', (14550, 14554), True, 'import numpy as np\n'), ((14605, 14620), 'numpy.isneginf', 'np.isneginf', (['u2'], {}), '(u2)\n', (14616, 14620), True, 'import numpy as np\n'), ((31119, 31133), 'numpy.isfinite', 'np.isfinite', (['c'], {}), '(c)\n', (31130, 31133), True, 'import numpy as np\n')]
|
#
# Bentobox
# SDK - Specifications
# Graph Specifications
#
from collections import OrderedDict
from typing import Set
from bento.ecs.graph import (
GraphComponent,
GraphEntity,
GraphNode,
to_str_attr,
wrap_const,
)
from bento.spec.ecs import ComponentDef, EntityDef
from bento.example.specs import Position, Speed
from bento.protos.graph_pb2 import Node
from bento.protos.references_pb2 import AttributeRef
from bento.utils import to_str_attr, to_yaml_proto
def test_graph_ecs_entity():
components = [GraphComponent.from_def(entity_id=1, component_def=Position)]
entity = GraphEntity(components=components, entity_id=1)
# check Entity's components accessible via `.components`
assert [c.component_name for c in entity.components] == [Position.name]
# check component accessible by name using [] notation
position = entity[Position]
assert isinstance(position, GraphComponent)
def test_graph_ecs_entity_from_def():
entity_id = 1
car = GraphEntity.from_def(
entity_def=EntityDef(components=[Position.name], entity_id=1),
component_defs=[Position],
)
assert car.id == entity_id
# check Entity's components accessible via `.components`
assert [c.component_name for c in car.components] == [Position.name]
# test that we cannnot create from EntityDef with unset id
has_error = False
try:
GraphEntity.from_def(EntityDef([]), [])
except ValueError:
has_error = True
assert has_error
def test_graph_ecs_entity_update_input_outputs():
# test use_input_outputs() propagates input and output dict to components
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
car = GraphEntity.from_def(
entity_def=EntityDef(components=[Position, Speed], entity_id=1),
component_defs=[Position, Speed],
)
car.use_input_outputs(inputs, outputs)
# get/set should propagate retrieve mutate to inputs and output
car_pos_x = car[Position].x
car[Position].x = 1
car_speed_x = car[Speed].x
car[Position].x = 2
pos_attr_ref = AttributeRef(
entity_id=entity_id, component=Position.name, attribute="x"
)
speed_attr_ref = AttributeRef(
entity_id=entity_id, component=Position.name, attribute="x"
)
pos_expected_input = Node(retrieve_op=Node.Retrieve(retrieve_attr=pos_attr_ref))
pos_expected_output = Node(
mutate_op=Node.Mutate(mutate_attr=pos_attr_ref, to_node=wrap_const(1))
)
assert inputs[to_str_attr(pos_attr_ref)] == pos_expected_input
assert outputs[to_str_attr(pos_attr_ref)] == pos_expected_input
speed_expected_input = Node(retrieve_op=Node.Retrieve(retrieve_attr=speed_attr_ref))
speed_expected_output = Node(
mutate_op=Node.Mutate(mutate_attr=speed_attr_ref, to_node=wrap_const(2))
)
assert inputs[to_str_attr(speed_attr_ref)] == speed_expected_input
assert outputs[to_str_attr(speed_attr_ref)] == speed_expected_input
def test_graph_ecs_component_from_def():
entity_id = 1
position = GraphComponent.from_def(entity_id, Position)
assert position.component_name == Position.name
# test that we cannnot create from ComponentDef with unset name
has_error = False
try:
GraphComponent.from_def(entity_id, ComponentDef("", {}))
except ValueError:
has_error = True
assert has_error
def test_graph_ecs_component_get_attr():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# check that getting an attribute from a component returns a GraphNode
# wrapping a Retrieve node that retrieves the attribute
pos_x = position.x
attr_ref = AttributeRef(entity_id=entity_id, component=Position.name, attribute="x")
expected_node = Node(retrieve_op=Node.Retrieve(retrieve_attr=attr_ref))
assert pos_x.node == expected_node
# check that component records the retrieve in
assert inputs[to_str_attr(attr_ref)].node == expected_node
# check that retrieving the same attribute only records it once
pos_y = position.x
assert len(inputs) == 1
def test_graph_ecs_component_get_attr_preserve_set_graph():
entity_id = 1
position = GraphComponent.from_def(entity_id, Position)
# check that getting an attribute from a component preserves
# any graph that has already being built by set_attr()
position.x = 2
pos_x = position.x
expected_node = GraphNode.wrap(2)
assert to_yaml_proto(pos_x.node) == to_yaml_proto(expected_node.node) # type: ignore
def test_graph_ecs_component_set_attr_node():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
pos_x = position.x
position.y = 10
# check setting attribute to node sets expected output node.
position.y = pos_x
attr_ref = AttributeRef(
entity_id=entity_id,
component=Position.name,
attribute="y",
)
expected_node = Node(
mutate_op=Node.Mutate(
mutate_attr=attr_ref,
to_node=pos_x.node,
)
)
assert outputs[to_str_attr(attr_ref)].node == expected_node
# check that setting attribute only takes the last definition
# the first definition should be ignored since the attribute is redefined
assert len(outputs) == 1
def test_graph_ecs_component_set_attr_native_value():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# check setting attribute to native sets expected output node node.
position.y = 3
attr_ref = AttributeRef(
entity_id=entity_id,
component=Position.name,
attribute="y",
)
expected_node = Node(
mutate_op=Node.Mutate(
mutate_attr=attr_ref,
to_node=wrap_const(3),
)
)
assert outputs[to_str_attr(attr_ref)] == expected_node
def test_graph_ecs_component_set_attr_ignore_self_assign():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# test assignment should be ignore as we are setting to the attribute to itself position.x = position.x
assert len(outputs) == 0
def test_graph_ecs_component_aug_assign_node():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# check augment assignment flags the attribute (position.x) as both input and output
position.y += 30
attr_ref = AttributeRef(
entity_id=entity_id,
component=Position.name,
attribute="y",
)
expected_input = Node(retrieve_op=Node.Retrieve(retrieve_attr=attr_ref))
expected_output = Node(
mutate_op=Node.Mutate(
mutate_attr=attr_ref,
to_node=Node(
add_op=Node.Add(
x=expected_input,
y=wrap_const(30),
)
),
)
)
assert len(inputs) == 1
assert inputs[to_str_attr(attr_ref)] == expected_input
assert len(outputs) == 1
assert outputs[to_str_attr(attr_ref)] == expected_output
def test_graph_ecs_node_wrap():
wrap_cases = [
["w", GraphNode(node=wrap_const("w"))],
[wrap_const(1), GraphNode(node=wrap_const(1))],
[GraphNode(node=wrap_const(True)), GraphNode(node=wrap_const(True))],
]
for val, expected in wrap_cases:
assert GraphNode.wrap(val) == expected
|
[
"bento.protos.graph_pb2.Node.Mutate",
"bento.spec.ecs.ComponentDef",
"bento.ecs.graph.wrap_const",
"bento.ecs.graph.GraphNode.wrap",
"bento.ecs.graph.GraphEntity",
"bento.ecs.graph.GraphComponent.from_def",
"bento.spec.ecs.EntityDef",
"bento.utils.to_yaml_proto",
"bento.utils.to_str_attr",
"bento.protos.references_pb2.AttributeRef",
"collections.OrderedDict",
"bento.protos.graph_pb2.Node.Retrieve"
] |
[((606, 653), 'bento.ecs.graph.GraphEntity', 'GraphEntity', ([], {'components': 'components', 'entity_id': '(1)'}), '(components=components, entity_id=1)\n', (617, 653), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((2100, 2173), 'bento.protos.references_pb2.AttributeRef', 'AttributeRef', ([], {'entity_id': 'entity_id', 'component': 'Position.name', 'attribute': '"""x"""'}), "(entity_id=entity_id, component=Position.name, attribute='x')\n", (2112, 2173), False, 'from bento.protos.references_pb2 import AttributeRef\n'), ((2209, 2282), 'bento.protos.references_pb2.AttributeRef', 'AttributeRef', ([], {'entity_id': 'entity_id', 'component': 'Position.name', 'attribute': '"""x"""'}), "(entity_id=entity_id, component=Position.name, attribute='x')\n", (2221, 2282), False, 'from bento.protos.references_pb2 import AttributeRef\n'), ((3065, 3109), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', (['entity_id', 'Position'], {}), '(entity_id, Position)\n', (3088, 3109), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((3518, 3562), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', (['entity_id', 'Position'], {}), '(entity_id, Position)\n', (3541, 3562), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((3786, 3859), 'bento.protos.references_pb2.AttributeRef', 'AttributeRef', ([], {'entity_id': 'entity_id', 'component': 'Position.name', 'attribute': '"""x"""'}), "(entity_id=entity_id, component=Position.name, attribute='x')\n", (3798, 3859), False, 'from bento.protos.references_pb2 import AttributeRef\n'), ((4303, 4347), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', (['entity_id', 'Position'], {}), '(entity_id, Position)\n', (4326, 4347), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((4536, 4553), 'bento.ecs.graph.GraphNode.wrap', 'GraphNode.wrap', (['(2)'], {}), '(2)\n', (4550, 4553), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((4772, 4816), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', (['entity_id', 'Position'], {}), '(entity_id, Position)\n', (4795, 4816), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((5013, 5086), 'bento.protos.references_pb2.AttributeRef', 'AttributeRef', ([], {'entity_id': 'entity_id', 'component': 'Position.name', 'attribute': '"""y"""'}), "(entity_id=entity_id, component=Position.name, attribute='y')\n", (5025, 5086), False, 'from bento.protos.references_pb2 import AttributeRef\n'), ((5630, 5674), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', (['entity_id', 'Position'], {}), '(entity_id, Position)\n', (5653, 5674), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((5831, 5904), 'bento.protos.references_pb2.AttributeRef', 'AttributeRef', ([], {'entity_id': 'entity_id', 'component': 'Position.name', 'attribute': '"""y"""'}), "(entity_id=entity_id, component=Position.name, attribute='y')\n", (5843, 5904), False, 'from bento.protos.references_pb2 import AttributeRef\n'), ((6279, 6323), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', (['entity_id', 'Position'], {}), '(entity_id, Position)\n', (6302, 6323), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((6640, 6684), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', (['entity_id', 'Position'], {}), '(entity_id, Position)\n', (6663, 6684), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((6860, 6933), 'bento.protos.references_pb2.AttributeRef', 'AttributeRef', ([], {'entity_id': 'entity_id', 'component': 'Position.name', 'attribute': '"""y"""'}), "(entity_id=entity_id, component=Position.name, attribute='y')\n", (6872, 6933), False, 'from bento.protos.references_pb2 import AttributeRef\n'), ((531, 591), 'bento.ecs.graph.GraphComponent.from_def', 'GraphComponent.from_def', ([], {'entity_id': '(1)', 'component_def': 'Position'}), '(entity_id=1, component_def=Position)\n', (554, 591), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((1675, 1688), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1686, 1688), False, 'from collections import OrderedDict\n'), ((1690, 1703), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1701, 1703), False, 'from collections import OrderedDict\n'), ((3474, 3487), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3485, 3487), False, 'from collections import OrderedDict\n'), ((3489, 3502), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3500, 3502), False, 'from collections import OrderedDict\n'), ((4565, 4590), 'bento.utils.to_yaml_proto', 'to_yaml_proto', (['pos_x.node'], {}), '(pos_x.node)\n', (4578, 4590), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((4594, 4627), 'bento.utils.to_yaml_proto', 'to_yaml_proto', (['expected_node.node'], {}), '(expected_node.node)\n', (4607, 4627), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((4728, 4741), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4739, 4741), False, 'from collections import OrderedDict\n'), ((4743, 4756), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4754, 4756), False, 'from collections import OrderedDict\n'), ((5586, 5599), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5597, 5599), False, 'from collections import OrderedDict\n'), ((5601, 5614), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5612, 5614), False, 'from collections import OrderedDict\n'), ((6235, 6248), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6246, 6248), False, 'from collections import OrderedDict\n'), ((6250, 6263), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6261, 6263), False, 'from collections import OrderedDict\n'), ((6596, 6609), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6607, 6609), False, 'from collections import OrderedDict\n'), ((6611, 6624), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6622, 6624), False, 'from collections import OrderedDict\n'), ((1039, 1089), 'bento.spec.ecs.EntityDef', 'EntityDef', ([], {'components': '[Position.name]', 'entity_id': '(1)'}), '(components=[Position.name], entity_id=1)\n', (1048, 1089), False, 'from bento.spec.ecs import ComponentDef, EntityDef\n'), ((1421, 1434), 'bento.spec.ecs.EntityDef', 'EntityDef', (['[]'], {}), '([])\n', (1430, 1434), False, 'from bento.spec.ecs import ComponentDef, EntityDef\n'), ((1755, 1807), 'bento.spec.ecs.EntityDef', 'EntityDef', ([], {'components': '[Position, Speed]', 'entity_id': '(1)'}), '(components=[Position, Speed], entity_id=1)\n', (1764, 1807), False, 'from bento.spec.ecs import ComponentDef, EntityDef\n'), ((2340, 2381), 'bento.protos.graph_pb2.Node.Retrieve', 'Node.Retrieve', ([], {'retrieve_attr': 'pos_attr_ref'}), '(retrieve_attr=pos_attr_ref)\n', (2353, 2381), False, 'from bento.protos.graph_pb2 import Node\n'), ((2518, 2543), 'bento.utils.to_str_attr', 'to_str_attr', (['pos_attr_ref'], {}), '(pos_attr_ref)\n', (2529, 2543), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((2586, 2611), 'bento.utils.to_str_attr', 'to_str_attr', (['pos_attr_ref'], {}), '(pos_attr_ref)\n', (2597, 2611), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((2680, 2723), 'bento.protos.graph_pb2.Node.Retrieve', 'Node.Retrieve', ([], {'retrieve_attr': 'speed_attr_ref'}), '(retrieve_attr=speed_attr_ref)\n', (2693, 2723), False, 'from bento.protos.graph_pb2 import Node\n'), ((2864, 2891), 'bento.utils.to_str_attr', 'to_str_attr', (['speed_attr_ref'], {}), '(speed_attr_ref)\n', (2875, 2891), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((2936, 2963), 'bento.utils.to_str_attr', 'to_str_attr', (['speed_attr_ref'], {}), '(speed_attr_ref)\n', (2947, 2963), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((3304, 3324), 'bento.spec.ecs.ComponentDef', 'ComponentDef', (['""""""', '{}'], {}), "('', {})\n", (3316, 3324), False, 'from bento.spec.ecs import ComponentDef, EntityDef\n'), ((3897, 3934), 'bento.protos.graph_pb2.Node.Retrieve', 'Node.Retrieve', ([], {'retrieve_attr': 'attr_ref'}), '(retrieve_attr=attr_ref)\n', (3910, 3934), False, 'from bento.protos.graph_pb2 import Node\n'), ((5162, 5215), 'bento.protos.graph_pb2.Node.Mutate', 'Node.Mutate', ([], {'mutate_attr': 'attr_ref', 'to_node': 'pos_x.node'}), '(mutate_attr=attr_ref, to_node=pos_x.node)\n', (5173, 5215), False, 'from bento.protos.graph_pb2 import Node\n'), ((6097, 6118), 'bento.utils.to_str_attr', 'to_str_attr', (['attr_ref'], {}), '(attr_ref)\n', (6108, 6118), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((7003, 7040), 'bento.protos.graph_pb2.Node.Retrieve', 'Node.Retrieve', ([], {'retrieve_attr': 'attr_ref'}), '(retrieve_attr=attr_ref)\n', (7016, 7040), False, 'from bento.protos.graph_pb2 import Node\n'), ((7365, 7386), 'bento.utils.to_str_attr', 'to_str_attr', (['attr_ref'], {}), '(attr_ref)\n', (7376, 7386), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((7454, 7475), 'bento.utils.to_str_attr', 'to_str_attr', (['attr_ref'], {}), '(attr_ref)\n', (7465, 7475), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((7606, 7619), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(1)'], {}), '(1)\n', (7616, 7619), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((7790, 7809), 'bento.ecs.graph.GraphNode.wrap', 'GraphNode.wrap', (['val'], {}), '(val)\n', (7804, 7809), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((4044, 4065), 'bento.utils.to_str_attr', 'to_str_attr', (['attr_ref'], {}), '(attr_ref)\n', (4055, 4065), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((5276, 5297), 'bento.utils.to_str_attr', 'to_str_attr', (['attr_ref'], {}), '(attr_ref)\n', (5287, 5297), False, 'from bento.utils import to_str_attr, to_yaml_proto\n'), ((2479, 2492), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(1)'], {}), '(1)\n', (2489, 2492), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((2825, 2838), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(2)'], {}), '(2)\n', (2835, 2838), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((6047, 6060), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(3)'], {}), '(3)\n', (6057, 6060), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((7578, 7593), 'bento.ecs.graph.wrap_const', 'wrap_const', (['"""w"""'], {}), "('w')\n", (7588, 7593), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((7636, 7649), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(1)'], {}), '(1)\n', (7646, 7649), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((7677, 7693), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(True)'], {}), '(True)\n', (7687, 7693), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((7711, 7727), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(True)'], {}), '(True)\n', (7721, 7727), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n'), ((7254, 7268), 'bento.ecs.graph.wrap_const', 'wrap_const', (['(30)'], {}), '(30)\n', (7264, 7268), False, 'from bento.ecs.graph import GraphComponent, GraphEntity, GraphNode, to_str_attr, wrap_const\n')]
|
import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm'):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class ResnetBlockConv1d(nn.Module):
''' 1D-Convolutional ResNet block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_h=None, size_out=None):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.bn_0 = nn.BatchNorm1d(size_in)
self.bn_1 = nn.BatchNorm1d(size_h)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(self.bn_0(x)))
dx = self.fc_1(self.actvn(self.bn_1(net)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
# Utility modules
class AffineLayer(nn.Module):
''' Affine layer class.
Args:
c_dim (tensor): dimension of latent conditioned code c
dim (int): input dimension
'''
def __init__(self, c_dim, dim=3):
super().__init__()
self.c_dim = c_dim
self.dim = dim
# Submodules
self.fc_A = nn.Linear(c_dim, dim * dim)
self.fc_b = nn.Linear(c_dim, dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.fc_A.weight)
nn.init.zeros_(self.fc_b.weight)
with torch.no_grad():
self.fc_A.bias.copy_(torch.eye(3).view(-1))
self.fc_b.bias.copy_(torch.tensor([0., 0., 2.]))
def forward(self, x, p):
assert(x.size(0) == p.size(0))
assert(p.size(2) == self.dim)
batch_size = x.size(0)
A = self.fc_A(x).view(batch_size, 3, 3)
b = self.fc_b(x).view(batch_size, 1, 3)
out = p @ A + b
return out
class _RunningBatchNorm(nn.modules.batchnorm._NormBase):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super().__init__(
num_features, eps, momentum, affine, track_running_stats)
def forward(self, input: Tensor) -> Tensor:
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
return F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean,
self.running_var,
self.weight, self.bias, bn_training, exponential_average_factor, self.eps)
class RunningBatchNorm1d(_RunningBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
elif norm_method == 'no_norm':
self.bn = nn.Identity()
elif norm_method == 'running_batch_norm':
self.bn = RunningBatchNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
|
[
"torch.nn.functional.batch_norm",
"torch.nn.ReLU",
"torch.eye",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d",
"torch.nn.GroupNorm1d",
"torch.nn.InstanceNorm1d",
"torch.nn.init.zeros_",
"torch.nn.Linear",
"torch.nn.init.ones_",
"torch.nn.Identity",
"torch.no_grad",
"torch.tensor"
] |
[((701, 727), 'torch.nn.Linear', 'nn.Linear', (['size_in', 'size_h'], {}), '(size_in, size_h)\n', (710, 727), True, 'import torch.nn as nn\n'), ((748, 775), 'torch.nn.Linear', 'nn.Linear', (['size_h', 'size_out'], {}), '(size_h, size_out)\n', (757, 775), True, 'import torch.nn as nn\n'), ((797, 806), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (804, 806), True, 'import torch.nn as nn\n'), ((989, 1021), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.fc_1.weight'], {}), '(self.fc_1.weight)\n', (1003, 1021), True, 'import torch.nn as nn\n'), ((2197, 2226), 'torch.nn.Conv1d', 'nn.Conv1d', (['size_in', 'size_h', '(1)'], {}), '(size_in, size_h, 1)\n', (2206, 2226), True, 'import torch.nn as nn\n'), ((2247, 2277), 'torch.nn.Conv1d', 'nn.Conv1d', (['size_h', 'size_out', '(1)'], {}), '(size_h, size_out, 1)\n', (2256, 2277), True, 'import torch.nn as nn\n'), ((2299, 2308), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2306, 2308), True, 'import torch.nn as nn\n'), ((2494, 2526), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.fc_1.weight'], {}), '(self.fc_1.weight)\n', (2508, 2526), True, 'import torch.nn as nn\n'), ((3379, 3402), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['size_in'], {}), '(size_in)\n', (3393, 3402), True, 'import torch.nn as nn\n'), ((3423, 3445), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['size_h'], {}), '(size_h)\n', (3437, 3445), True, 'import torch.nn as nn\n'), ((3467, 3496), 'torch.nn.Conv1d', 'nn.Conv1d', (['size_in', 'size_h', '(1)'], {}), '(size_in, size_h, 1)\n', (3476, 3496), True, 'import torch.nn as nn\n'), ((3517, 3547), 'torch.nn.Conv1d', 'nn.Conv1d', (['size_h', 'size_out', '(1)'], {}), '(size_h, size_out, 1)\n', (3526, 3547), True, 'import torch.nn as nn\n'), ((3569, 3578), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3576, 3578), True, 'import torch.nn as nn\n'), ((3765, 3797), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.fc_1.weight'], {}), '(self.fc_1.weight)\n', (3779, 3797), True, 'import torch.nn as nn\n'), ((4411, 4438), 'torch.nn.Linear', 'nn.Linear', (['c_dim', '(dim * dim)'], {}), '(c_dim, dim * dim)\n', (4420, 4438), True, 'import torch.nn as nn\n'), ((4459, 4480), 'torch.nn.Linear', 'nn.Linear', (['c_dim', 'dim'], {}), '(c_dim, dim)\n', (4468, 4480), True, 'import torch.nn as nn\n'), ((4554, 4586), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.fc_A.weight'], {}), '(self.fc_A.weight)\n', (4568, 4586), True, 'import torch.nn as nn\n'), ((4595, 4627), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.fc_b.weight'], {}), '(self.fc_b.weight)\n', (4609, 4627), True, 'import torch.nn as nn\n'), ((7028, 7164), 'torch.nn.functional.batch_norm', 'F.batch_norm', (['input', 'self.running_mean', 'self.running_var', 'self.weight', 'self.bias', 'bn_training', 'exponential_average_factor', 'self.eps'], {}), '(input, self.running_mean, self.running_var, self.weight, self.\n bias, bn_training, exponential_average_factor, self.eps)\n', (7040, 7164), True, 'import torch.nn.functional as F\n'), ((8032, 8058), 'torch.nn.Conv1d', 'nn.Conv1d', (['c_dim', 'f_dim', '(1)'], {}), '(c_dim, f_dim, 1)\n', (8041, 8058), True, 'import torch.nn as nn\n'), ((8084, 8110), 'torch.nn.Conv1d', 'nn.Conv1d', (['c_dim', 'f_dim', '(1)'], {}), '(c_dim, f_dim, 1)\n', (8093, 8110), True, 'import torch.nn as nn\n'), ((8751, 8789), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.conv_gamma.weight'], {}), '(self.conv_gamma.weight)\n', (8765, 8789), True, 'import torch.nn as nn\n'), ((8798, 8835), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.conv_beta.weight'], {}), '(self.conv_beta.weight)\n', (8812, 8835), True, 'import torch.nn as nn\n'), ((8844, 8879), 'torch.nn.init.ones_', 'nn.init.ones_', (['self.conv_gamma.bias'], {}), '(self.conv_gamma.bias)\n', (8857, 8879), True, 'import torch.nn as nn\n'), ((8888, 8923), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.conv_beta.bias'], {}), '(self.conv_beta.bias)\n', (8902, 8923), True, 'import torch.nn as nn\n'), ((915, 955), 'torch.nn.Linear', 'nn.Linear', (['size_in', 'size_out'], {'bias': '(False)'}), '(size_in, size_out, bias=False)\n', (924, 955), True, 'import torch.nn as nn\n'), ((2417, 2460), 'torch.nn.Conv1d', 'nn.Conv1d', (['size_in', 'size_out', '(1)'], {'bias': '(False)'}), '(size_in, size_out, 1, bias=False)\n', (2426, 2460), True, 'import torch.nn as nn\n'), ((3687, 3730), 'torch.nn.Conv1d', 'nn.Conv1d', (['size_in', 'size_out', '(1)'], {'bias': '(False)'}), '(size_in, size_out, 1, bias=False)\n', (3696, 3730), True, 'import torch.nn as nn\n'), ((4641, 4656), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4654, 4656), False, 'import torch\n'), ((8173, 8208), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['f_dim'], {'affine': '(False)'}), '(f_dim, affine=False)\n', (8187, 8208), True, 'import torch.nn as nn\n'), ((4747, 4776), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 2.0]'], {}), '([0.0, 0.0, 2.0])\n', (4759, 4776), False, 'import torch\n'), ((8276, 8314), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', (['f_dim'], {'affine': '(False)'}), '(f_dim, affine=False)\n', (8293, 8314), True, 'import torch.nn as nn\n'), ((8379, 8414), 'torch.nn.GroupNorm1d', 'nn.GroupNorm1d', (['f_dim'], {'affine': '(False)'}), '(f_dim, affine=False)\n', (8393, 8414), True, 'import torch.nn as nn\n'), ((4691, 4703), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (4700, 4703), False, 'import torch\n'), ((8476, 8489), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (8487, 8489), True, 'import torch.nn as nn\n')]
|
import pytest
import responses
import status
from django.urls import reverse
from apps.tickets.models import Purchase
pytestmark = pytest.mark.django_db
def test_event_list(admin_client, event):
url = reverse('tickets:event_list')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert 'cart' in response.context
assert event in response.context['events']
def test_event_detail(admin_client, event):
url = reverse('tickets:event_detail', args=[event.id])
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert 'cart' in response.context
assert response.context['event'] == event
def test_cart_detail(admin_client):
url = reverse('tickets:cart_detail')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert 'cart' in response.context
def test_cart_clear(admin_client, admin_user, cart_item):
url = reverse('tickets:cart_clear')
cart = cart_item.cart
assert cart.cart_items.count() == 1
response = admin_client.post(url, follow=True)
assert response.status_code == status.HTTP_200_OK
assert cart.cart_items.count() == 0
def test_cart_add_item(admin_client, cart, ticket):
url = reverse('tickets:cart_add_item')
assert cart.cart_items.count() == 0
response = admin_client.post(url, {'ticket': ticket.id, 'quantity': 1}, follow=True)
assert response.status_code == status.HTTP_200_OK
assert cart.cart_items.count() == 1
@responses.activate
def test_purchase_create(admin_client, cart_item, pagseguro_checkout_response):
responses.add(
responses.POST,
'https://ws.sandbox.pagseguro.uol.com.br/v2/checkout',
body=pagseguro_checkout_response,
status=200
)
url = reverse('tickets:purchase_create')
response = admin_client.post(url, follow=True)
assert response.status_code == status.HTTP_200_OK
purchase = Purchase.objects.filter(user=cart_item.cart.user).first()
assert purchase.status == 'pending'
assert purchase.pagseguro_redirect_url
def test_purchase_list(admin_client, purchase):
url = reverse('tickets:purchase_list')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert purchase in response.context['purchases']
def test_purchase_detail(admin_client, purchase):
url = reverse('tickets:purchase_detail', args=[purchase.id])
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.context['purchase'] == purchase
|
[
"django.urls.reverse",
"responses.add",
"apps.tickets.models.Purchase.objects.filter"
] |
[((209, 238), 'django.urls.reverse', 'reverse', (['"""tickets:event_list"""'], {}), "('tickets:event_list')\n", (216, 238), False, 'from django.urls import reverse\n'), ((471, 519), 'django.urls.reverse', 'reverse', (['"""tickets:event_detail"""'], {'args': '[event.id]'}), "('tickets:event_detail', args=[event.id])\n", (478, 519), False, 'from django.urls import reverse\n'), ((743, 773), 'django.urls.reverse', 'reverse', (['"""tickets:cart_detail"""'], {}), "('tickets:cart_detail')\n", (750, 773), False, 'from django.urls import reverse\n'), ((973, 1002), 'django.urls.reverse', 'reverse', (['"""tickets:cart_clear"""'], {}), "('tickets:cart_clear')\n", (980, 1002), False, 'from django.urls import reverse\n'), ((1278, 1310), 'django.urls.reverse', 'reverse', (['"""tickets:cart_add_item"""'], {}), "('tickets:cart_add_item')\n", (1285, 1310), False, 'from django.urls import reverse\n'), ((1640, 1779), 'responses.add', 'responses.add', (['responses.POST', '"""https://ws.sandbox.pagseguro.uol.com.br/v2/checkout"""'], {'body': 'pagseguro_checkout_response', 'status': '(200)'}), "(responses.POST,\n 'https://ws.sandbox.pagseguro.uol.com.br/v2/checkout', body=\n pagseguro_checkout_response, status=200)\n", (1653, 1779), False, 'import responses\n'), ((1819, 1853), 'django.urls.reverse', 'reverse', (['"""tickets:purchase_create"""'], {}), "('tickets:purchase_create')\n", (1826, 1853), False, 'from django.urls import reverse\n'), ((2175, 2207), 'django.urls.reverse', 'reverse', (['"""tickets:purchase_list"""'], {}), "('tickets:purchase_list')\n", (2182, 2207), False, 'from django.urls import reverse\n'), ((2414, 2468), 'django.urls.reverse', 'reverse', (['"""tickets:purchase_detail"""'], {'args': '[purchase.id]'}), "('tickets:purchase_detail', args=[purchase.id])\n", (2421, 2468), False, 'from django.urls import reverse\n'), ((1974, 2023), 'apps.tickets.models.Purchase.objects.filter', 'Purchase.objects.filter', ([], {'user': 'cart_item.cart.user'}), '(user=cart_item.cart.user)\n', (1997, 2023), False, 'from apps.tickets.models import Purchase\n')]
|
from scipy.integrate import odeint
from scipy.optimize import fsolve
import numpy as np
import itertools
import matplotlib.pyplot as plt
from colorlines import colorline
from matplotlib import style
class PhaseDiagram:
def __init__(self, system):
self.system = system
self.fig, self.ax = plt.subplots(1, 1)
def steady_states(self, search_space, discretization=5):
linspaces = [np.linspace(axis[0], axis[1], discretization) for axis in search_space]
guesses = list(itertools.product(*linspaces))
ss_system = lambda x: self.system(x, 0)
results = []
for guess in guesses:
calc_result, _, convergence_success, info = fsolve(ss_system, guess, full_output=True)
if convergence_success:
if len(results) == 0:
results.append(calc_result)
else:
new_guess = True
for result in results:
if all(np.isclose(calc_result, result, atol=1e-2)):
new_guess = False
if new_guess:
results.append(calc_result)
else:
print('convergence failure')
return results
def plot_trajectory(self, x0, time_sequence, ax, fade=0.1, linewidth=1):
r = odeint(f, x0, time_sequence)
colorline(x=r[:,0], y=r[:,1], ax=ax, cmap='bone_r', fade=fade, linewidth=linewidth)
# plt.plot(r[:,0], r[:,1])
def random_paths(self, n, time_sequence, x_rand_interval, y_rand_interval, fade=0.1, linewidth=1):
self.fig.subplots_adjust(
top=0.981,
bottom=0.043,
left=0.029,
right=0.981,
hspace=0.2,
wspace=0.2
)
for _ in range(n):
x_random = np.random.uniform(x_rand_interval[0], x_rand_interval[1])
y_random = np.random.uniform(y_rand_interval[0], y_rand_interval[1])
self.plot_trajectory([x_random, y_random], time_sequence=time_sequence, ax=self.ax, fade=fade, linewidth=linewidth)
plt.show()
def f(x, t):
y = np.zeros(shape=2)
y[0] = x[0] - x[1]*x[0]
y[1] = x[0]*x[1] - x[1]
return y
PD = PhaseDiagram(f)
steady_states = PD.steady_states(search_space=[[-10,40],[-10,40]])
print(steady_states)
time_sequence=np.linspace(0.1,2.5,1000)
PD.random_paths(n=150, time_sequence=time_sequence, x_rand_interval=[-.4, 1.5], y_rand_interval=[0, 2], fade=1.0)
# PD.fig.savefig('PD1.png', dpi=300)
|
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"scipy.integrate.odeint",
"numpy.zeros",
"scipy.optimize.fsolve",
"numpy.isclose",
"numpy.linspace",
"itertools.product",
"matplotlib.pyplot.subplots",
"colorlines.colorline"
] |
[((2448, 2475), 'numpy.linspace', 'np.linspace', (['(0.1)', '(2.5)', '(1000)'], {}), '(0.1, 2.5, 1000)\n', (2459, 2475), True, 'import numpy as np\n'), ((2227, 2244), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2)'}), '(shape=2)\n', (2235, 2244), True, 'import numpy as np\n'), ((324, 342), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (336, 342), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1424), 'scipy.integrate.odeint', 'odeint', (['f', 'x0', 'time_sequence'], {}), '(f, x0, time_sequence)\n', (1402, 1424), False, 'from scipy.integrate import odeint\n'), ((1434, 1524), 'colorlines.colorline', 'colorline', ([], {'x': 'r[:, 0]', 'y': 'r[:, 1]', 'ax': 'ax', 'cmap': '"""bone_r"""', 'fade': 'fade', 'linewidth': 'linewidth'}), "(x=r[:, 0], y=r[:, 1], ax=ax, cmap='bone_r', fade=fade, linewidth=\n linewidth)\n", (1443, 1524), False, 'from colorlines import colorline\n'), ((2189, 2199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2197, 2199), True, 'import matplotlib.pyplot as plt\n'), ((429, 474), 'numpy.linspace', 'np.linspace', (['axis[0]', 'axis[1]', 'discretization'], {}), '(axis[0], axis[1], discretization)\n', (440, 474), True, 'import numpy as np\n'), ((527, 556), 'itertools.product', 'itertools.product', (['*linspaces'], {}), '(*linspaces)\n', (544, 556), False, 'import itertools\n'), ((719, 761), 'scipy.optimize.fsolve', 'fsolve', (['ss_system', 'guess'], {'full_output': '(True)'}), '(ss_system, guess, full_output=True)\n', (725, 761), False, 'from scipy.optimize import fsolve\n'), ((1911, 1968), 'numpy.random.uniform', 'np.random.uniform', (['x_rand_interval[0]', 'x_rand_interval[1]'], {}), '(x_rand_interval[0], x_rand_interval[1])\n', (1928, 1968), True, 'import numpy as np\n'), ((1993, 2050), 'numpy.random.uniform', 'np.random.uniform', (['y_rand_interval[0]', 'y_rand_interval[1]'], {}), '(y_rand_interval[0], y_rand_interval[1])\n', (2010, 2050), True, 'import numpy as np\n'), ((1030, 1072), 'numpy.isclose', 'np.isclose', (['calc_result', 'result'], {'atol': '(0.01)'}), '(calc_result, result, atol=0.01)\n', (1040, 1072), True, 'import numpy as np\n')]
|
import pytest
from ocdskit.cli.__main__ import main
from tests import assert_command, assert_command_error, path
def test_command(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', path('release-schema.json')],
'mapping-sheet.csv')
def test_command_no_deprecated(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--no-deprecated', path('release-schema.json')],
'mapping-sheet_no-deprecated.csv')
def test_command_order_by(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--order-by', 'path', path('release-schema.json')],
'mapping-sheet_order-by.csv')
@pytest.mark.vcr()
def test_command_extension(capsys, monkeypatch):
url = 'https://github.com/open-contracting-extensions/ocds_lots_extension/archive/v1.1.4.zip'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', path('release-schema.json'), '--extension', url],
'mapping-sheet_extension.csv')
def test_command_extension_field(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json')],
'mapping-sheet_extension-field.csv')
@pytest.mark.vcr()
def test_command_extension_and_extension_field(capsys, monkeypatch):
url = 'https://github.com/open-contracting-extensions/ocds_lots_extension/archive/v1.1.4.zip'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json'), '--extension', url],
'mapping-sheet_extension_extension-field.csv')
@pytest.mark.vcr()
def test_command_extension_and_extension_field_and_language(capsys, monkeypatch):
url = 'https://extensions.open-contracting.org/es/extensions/lots/master/'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json'), '--extension', url, '--language', 'es'],
'mapping-sheet_extension_extension-field_language.csv')
@pytest.mark.vcr()
def test_command_extension_and_extension_field_location(capsys, monkeypatch):
url = 'https://github.com/open-contracting-extensions/ocds_location_extension/archive/v1.1.4.zip'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json'), '--extension', url],
'mapping-sheet_extension_extension-field_location.csv')
def test_command_oc4ids(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', path('project-schema.json')],
'mapping-sheet_oc4ids.csv')
def test_command_bods(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--order-by', 'path', path('bods/person-statement.json')],
'mapping-sheet_bods.csv')
def test_command_sedl(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', path('sedl-schema.json')],
'mapping-sheet_sedl.csv')
def test_command_order_by_nonexistent(capsys, monkeypatch, caplog):
assert_command_error(capsys, monkeypatch, main, ['mapping-sheet', '--order-by',
'nonexistent', path('release-schema.json')])
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'CRITICAL'
assert caplog.records[0].message == "the column 'nonexistent' doesn't exist – did you make a typo?"
|
[
"pytest.mark.vcr",
"tests.path"
] |
[((846, 863), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (861, 863), False, 'import pytest\n'), ((1518, 1535), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (1533, 1535), False, 'import pytest\n'), ((1980, 1997), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (1995, 1997), False, 'import pytest\n'), ((2465, 2482), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (2480, 2482), False, 'import pytest\n'), ((258, 285), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (262, 285), False, 'from tests import assert_command, assert_command_error, path\n'), ((505, 532), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (509, 532), False, 'from tests import assert_command, assert_command_error, path\n'), ((764, 791), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (768, 791), False, 'from tests import assert_command, assert_command_error, path\n'), ((1115, 1142), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (1119, 1142), False, 'from tests import assert_command, assert_command_error, path\n'), ((1429, 1456), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (1433, 1456), False, 'from tests import assert_command, assert_command_error, path\n'), ((1861, 1888), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (1865, 1888), False, 'from tests import assert_command, assert_command_error, path\n'), ((2317, 2344), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (2321, 2344), False, 'from tests import assert_command, assert_command_error, path\n'), ((2821, 2848), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (2825, 2848), False, 'from tests import assert_command, assert_command_error, path\n'), ((3077, 3104), 'tests.path', 'path', (['"""project-schema.json"""'], {}), "('project-schema.json')\n", (3081, 3104), False, 'from tests import assert_command, assert_command_error, path\n'), ((3305, 3339), 'tests.path', 'path', (['"""bods/person-statement.json"""'], {}), "('bods/person-statement.json')\n", (3309, 3339), False, 'from tests import assert_command, assert_command_error, path\n'), ((3516, 3540), 'tests.path', 'path', (['"""sedl-schema.json"""'], {}), "('sedl-schema.json')\n", (3520, 3540), False, 'from tests import assert_command, assert_command_error, path\n'), ((3810, 3837), 'tests.path', 'path', (['"""release-schema.json"""'], {}), "('release-schema.json')\n", (3814, 3837), False, 'from tests import assert_command, assert_command_error, path\n')]
|
from sqlalchemy import Column, ForeignKey, Integer, String, Enum
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
import psycopg2
Base = declarative_base()
# define database tables
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key = True)
fname = Column(String(250), nullable = False)
lname = Column(String(250), nullable = False)
email = Column(String(250))
status = Column(String(10))
class Race(Base):
__tablename__ = 'race'
id = Column(Integer, primary_key = True)
name = Column(String(100), nullable = False)
class Character(Base):
__tablename__ = 'character'
id = Column(Integer, primary_key = True)
person_id = Column(Integer, ForeignKey('person.id'))
name = Column(String(250), nullable = False)
race_id = Column(Integer, ForeignKey('race.id'))
concept = Column(String(250), nullable = True)
person = relationship(Person)
race = relationship(Race)
class Faction(Base):
__tablename__ = 'faction'
id = Column(Integer, primary_key = True)
race_id = Column(Integer, ForeignKey('race.id'))
name = Column(String(100), nullable = False)
race = relationship(Race)
class Ability(Base):
__tablename__ = 'ability'
id = Column(Integer, primary_key = True)
type = Column(String(100), nullable = False)
name = Column(String(100), nullable = False)
class Attribute(Base):
__tablename__ = 'attribute'
id = Column(Integer, primary_key = True)
name = Column(String(100), nullable = False)
engine = create_engine('postgresql://charsheet:4ab62xxc@localhost/charsheet')
Base.metadata.create_all(engine)
|
[
"sqlalchemy.String",
"sqlalchemy.ForeignKey",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.orm.relationship",
"sqlalchemy.Column",
"sqlalchemy.create_engine"
] |
[((222, 240), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (238, 240), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((1539, 1607), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://charsheet:4ab62xxc@localhost/charsheet"""'], {}), "('postgresql://charsheet:4ab62xxc@localhost/charsheet')\n", (1552, 1607), False, 'from sqlalchemy import create_engine\n'), ((319, 352), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (325, 352), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((556, 589), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (562, 589), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((697, 730), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (703, 730), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((941, 961), 'sqlalchemy.orm.relationship', 'relationship', (['Person'], {}), '(Person)\n', (953, 961), False, 'from sqlalchemy.orm import relationship\n'), ((970, 988), 'sqlalchemy.orm.relationship', 'relationship', (['Race'], {}), '(Race)\n', (982, 988), False, 'from sqlalchemy.orm import relationship\n'), ((1044, 1077), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1050, 1077), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((1184, 1202), 'sqlalchemy.orm.relationship', 'relationship', (['Race'], {}), '(Race)\n', (1196, 1202), False, 'from sqlalchemy.orm import relationship\n'), ((1258, 1291), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1264, 1291), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((1446, 1479), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1452, 1479), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((371, 382), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (377, 382), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((418, 429), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (424, 429), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((465, 476), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (471, 476), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((495, 505), 'sqlalchemy.String', 'String', (['(10)'], {}), '(10)\n', (501, 505), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((607, 618), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (613, 618), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((762, 785), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""person.id"""'], {}), "('person.id')\n", (772, 785), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((802, 813), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (808, 813), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((860, 881), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""race.id"""'], {}), "('race.id')\n", (870, 881), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((901, 912), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (907, 912), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((1107, 1128), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""race.id"""'], {}), "('race.id')\n", (1117, 1128), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((1145, 1156), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (1151, 1156), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((1309, 1320), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (1315, 1320), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((1355, 1366), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (1361, 1366), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n'), ((1497, 1508), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (1503, 1508), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Enum\n')]
|
import json
from tervis.environment import CurrentEnvironment
from tervis.auth import Auth
from tervis.producer import Producer
from tervis.exceptions import ApiError, PayloadTooLarge, ClientReadFailed, \
ClientBlacklisted
from tervis.web import Endpoint, ApiResponse, get_remote_addr
from tervis.filter import Filter
from libtervis.event import normalize_event
from libtervis.exceptions import ValidationError
class SubmitEventEndpoint(Endpoint):
url_path = '/events/{project_id}'
env = CurrentEnvironment()
auth = Auth()
producer = Producer()
filter = Filter()
async def get_allowed_origins(self):
return await self.filter.get_allowed_origins()
async def accept_event(self):
max_json_packet = self.env.get_config(
'apiserver.limits.max_json_packet')
line = await self.op.req.content.readline()
if not line:
return
try:
line = line.decode('utf-8')
if len(line) > max_json_packet:
raise PayloadTooLarge('JSON event above maximum size')
return normalize_event(json.loads(line))
except IOError as e:
raise ClientReadFailed(str(e))
except ValidationError as e:
raise ApiError(e.message)
async def post(self):
remote_addr = get_remote_addr(self.env, self.op.req)
if remote_addr is not None \
and await self.filter.ip_is_blacklisted(remote_addr):
raise ClientBlacklisted('The ip address of the client is '
'blacklisted for event submission')
errors = []
events = 0
while True:
try:
event = await self.accept_event()
if event is None:
break
await self.producer.produce_event(
self.auth.project_id, event, self.auth.timestamp)
events += 1
except ApiError as e:
errors.append(e.to_json())
return ApiResponse({
'errors': errors,
'events': events,
})
|
[
"tervis.web.get_remote_addr",
"tervis.auth.Auth",
"json.loads",
"tervis.exceptions.ClientBlacklisted",
"tervis.environment.CurrentEnvironment",
"tervis.web.ApiResponse",
"tervis.exceptions.PayloadTooLarge",
"tervis.exceptions.ApiError",
"tervis.producer.Producer",
"tervis.filter.Filter"
] |
[((504, 524), 'tervis.environment.CurrentEnvironment', 'CurrentEnvironment', ([], {}), '()\n', (522, 524), False, 'from tervis.environment import CurrentEnvironment\n'), ((536, 542), 'tervis.auth.Auth', 'Auth', ([], {}), '()\n', (540, 542), False, 'from tervis.auth import Auth\n'), ((558, 568), 'tervis.producer.Producer', 'Producer', ([], {}), '()\n', (566, 568), False, 'from tervis.producer import Producer\n'), ((582, 590), 'tervis.filter.Filter', 'Filter', ([], {}), '()\n', (588, 590), False, 'from tervis.filter import Filter\n'), ((1327, 1365), 'tervis.web.get_remote_addr', 'get_remote_addr', (['self.env', 'self.op.req'], {}), '(self.env, self.op.req)\n', (1342, 1365), False, 'from tervis.web import Endpoint, ApiResponse, get_remote_addr\n'), ((2040, 2089), 'tervis.web.ApiResponse', 'ApiResponse', (["{'errors': errors, 'events': events}"], {}), "({'errors': errors, 'events': events})\n", (2051, 2089), False, 'from tervis.web import Endpoint, ApiResponse, get_remote_addr\n'), ((1486, 1576), 'tervis.exceptions.ClientBlacklisted', 'ClientBlacklisted', (['"""The ip address of the client is blacklisted for event submission"""'], {}), "(\n 'The ip address of the client is blacklisted for event submission')\n", (1503, 1576), False, 'from tervis.exceptions import ApiError, PayloadTooLarge, ClientReadFailed, ClientBlacklisted\n'), ((1029, 1077), 'tervis.exceptions.PayloadTooLarge', 'PayloadTooLarge', (['"""JSON event above maximum size"""'], {}), "('JSON event above maximum size')\n", (1044, 1077), False, 'from tervis.exceptions import ApiError, PayloadTooLarge, ClientReadFailed, ClientBlacklisted\n'), ((1113, 1129), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1123, 1129), False, 'import json\n'), ((1258, 1277), 'tervis.exceptions.ApiError', 'ApiError', (['e.message'], {}), '(e.message)\n', (1266, 1277), False, 'from tervis.exceptions import ApiError, PayloadTooLarge, ClientReadFailed, ClientBlacklisted\n')]
|
import os
import errno
import threading
import queue
from collections import namedtuple
import jinja2
from PIL import Image
import tesserocr
import sd3.gfx
import sd3.text_table
_Char = namedtuple("_Char", ["idx", "char", "img_path"])
_WorkDesc = namedtuple("_WorkDesc", ["idx", "tile"])
_WorkRes = namedtuple("_WorkDesc", ["idx", "char"])
_HTML_RESIZE_FACTOR = 2
_OCR_RESIZE_FACTOR = 5
_FIRST_CHAR_IDX = 0x20
_JPN_CHAR_START = 0x5F
_JPN_TESSEROCR_ID = "jpn"
_ENG_TESSEROCR_ID = "eng"
def _tile_to_char(tile, lang):
char_img = tile.to_img()
new_dim = (char_img.width * _OCR_RESIZE_FACTOR,
char_img.height * _OCR_RESIZE_FACTOR)
char_img = char_img.resize(new_dim, Image.LANCZOS)
return tesserocr.image_to_text(char_img, lang=lang, psm=10)
class _Worker:
def __init__(self, work_queue, result_queue):
self.work_queue = work_queue
self.result_queue = result_queue
def __call__(self):
while True:
work_desc = self.work_queue.get()
if work_desc is None:
break
if work_desc.idx >= _JPN_CHAR_START:
char = _tile_to_char(work_desc.tile, _JPN_TESSEROCR_ID)
else:
char = _tile_to_char(work_desc.tile, _ENG_TESSEROCR_ID)
if char:
char = char[0]
else:
char = "???"
print("%04X=%s" % (work_desc.idx, char))
self.result_queue.put(_WorkRes(work_desc.idx, char))
def generate(rom, output_path):
# Prepare communication tools
work_queue = queue.Queue()
result_queue = queue.Queue()
# Create and start workers
worker_list = []
thread_count = len(os.sched_getaffinity(0))
for _ in range(thread_count):
worker = _Worker(work_queue, result_queue)
t = threading.Thread(target=worker)
t.start()
worker_list.append(t)
# Dispatch tiles to decode
font_reader = sd3.gfx.FontReader(rom)
for idx, tile in font_reader.read_char_gen():
work_queue.put(_WorkDesc(idx, tile))
# Add a None work for each worker
for _ in worker_list:
work_queue.put(None)
for worker in worker_list:
worker.join()
print("Workers stopped")
# Gather results
decoded_dict = {}
while not result_queue.empty():
work_res = result_queue.get(block=False)
idx = work_res.idx + _FIRST_CHAR_IDX
decoded_dict[idx] = work_res.char
# Flush result in a file
out = open(output_path, "w")
for idx in sorted(decoded_dict.keys()):
out.write("%04X=%s\n" % (idx, decoded_dict[idx]))
out.close()
def _load_jina_template(name):
template_dir = os.path.dirname(os.path.abspath(__file__))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True, lstrip_blocks=True)
return env.get_template(name)
def generate_html(rom, tbl_path, out_folder):
char_list = []
tbl = sd3.text_table.Table()
tbl.load(tbl_path)
# Create output folder
try:
os.makedirs(out_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Read font
font_reader = sd3.gfx.FontReader(rom)
for idx, tile in font_reader.read_char_gen():
# Write file
img_name = "char_%04X.png" % idx
img_path = os.path.join(out_folder, img_name)
# Get and resize tile
img = tile.to_img()
new_dim = (img.width * _HTML_RESIZE_FACTOR,
img.height * _HTML_RESIZE_FACTOR)
img = img.resize(new_dim, Image.LANCZOS)
img.save(img_path)
idx += _FIRST_CHAR_IDX
char_list.append(_Char(idx, tbl.decode_char(idx), img_name))
# Generate html
template = _load_jina_template("jap_html_table.template")
rendered = template.render(char_list=char_list)
# Write output file
output_index = os.path.join(out_folder, "index.html")
with open(output_index, 'w') as out:
out.write(rendered)
|
[
"threading.Thread",
"os.path.abspath",
"os.sched_getaffinity",
"os.makedirs",
"tesserocr.image_to_text",
"jinja2.FileSystemLoader",
"collections.namedtuple",
"os.path.join",
"queue.Queue"
] |
[((187, 235), 'collections.namedtuple', 'namedtuple', (['"""_Char"""', "['idx', 'char', 'img_path']"], {}), "('_Char', ['idx', 'char', 'img_path'])\n", (197, 235), False, 'from collections import namedtuple\n'), ((248, 288), 'collections.namedtuple', 'namedtuple', (['"""_WorkDesc"""', "['idx', 'tile']"], {}), "('_WorkDesc', ['idx', 'tile'])\n", (258, 288), False, 'from collections import namedtuple\n'), ((300, 340), 'collections.namedtuple', 'namedtuple', (['"""_WorkDesc"""', "['idx', 'char']"], {}), "('_WorkDesc', ['idx', 'char'])\n", (310, 340), False, 'from collections import namedtuple\n'), ((724, 776), 'tesserocr.image_to_text', 'tesserocr.image_to_text', (['char_img'], {'lang': 'lang', 'psm': '(10)'}), '(char_img, lang=lang, psm=10)\n', (747, 776), False, 'import tesserocr\n'), ((1585, 1598), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1596, 1598), False, 'import queue\n'), ((1618, 1631), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1629, 1631), False, 'import queue\n'), ((3946, 3984), 'os.path.join', 'os.path.join', (['out_folder', '"""index.html"""'], {}), "(out_folder, 'index.html')\n", (3958, 3984), False, 'import os\n'), ((1708, 1731), 'os.sched_getaffinity', 'os.sched_getaffinity', (['(0)'], {}), '(0)\n', (1728, 1731), False, 'import os\n'), ((1830, 1861), 'threading.Thread', 'threading.Thread', ([], {'target': 'worker'}), '(target=worker)\n', (1846, 1861), False, 'import threading\n'), ((2725, 2750), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2740, 2750), False, 'import os\n'), ((3097, 3120), 'os.makedirs', 'os.makedirs', (['out_folder'], {}), '(out_folder)\n', (3108, 3120), False, 'import os\n'), ((3390, 3424), 'os.path.join', 'os.path.join', (['out_folder', 'img_name'], {}), '(out_folder, img_name)\n', (3402, 3424), False, 'import os\n'), ((2788, 2825), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['template_dir'], {}), '(template_dir)\n', (2811, 2825), False, 'import jinja2\n')]
|
from itertools import (chain,
combinations)
import pytest
from ground.base import (Context,
Relation)
from ground.hints import Contour
from hypothesis import given
from bentley_ottmann.planar import contour_self_intersects
from tests.utils import (contour_to_edges,
pop_left_vertex,
reverse_contour,
reverse_contour_coordinates)
from . import strategies
@given(strategies.contours)
def test_basic(contour: Contour) -> None:
result = contour_self_intersects(contour)
assert isinstance(result, bool)
@given(strategies.triangular_contours)
def test_base_case(context: Context, contour: Contour) -> None:
result = contour_self_intersects(contour)
left_vertex, mid_vertex, right_vertex = sorted(contour.vertices)
assert result is context.segment_contains_point(
context.segment_cls(left_vertex, right_vertex), mid_vertex)
@given(strategies.non_triangular_contours)
def test_step(context: Context, contour: Contour) -> None:
first_vertex, rest_contour = pop_left_vertex(contour)
rest_vertices = rest_contour.vertices
result = contour_self_intersects(rest_contour)
next_result = contour_self_intersects(contour)
first_edge = context.segment_cls(first_vertex, rest_vertices[0])
last_edge = context.segment_cls(rest_vertices[-1], first_vertex)
rest_edges = contour_to_edges(rest_contour)
overlap_relations = (Relation.COMPONENT, Relation.COMPOSITE,
Relation.EQUAL, Relation.OVERLAP)
assert (next_result
is (result
and len(rest_vertices) > 2
and (any(context.segments_relation(rest_edges[index],
rest_edges[other_index])
is not Relation.DISJOINT
for index in range(len(rest_edges) - 1)
for other_index
in chain(range(index - 1),
range(index + 2, len(rest_edges) - 1)))
or any(context.segments_relation(edge, other_edge)
in overlap_relations
for edge, other_edge
in combinations(rest_edges[:-1], 2)))
or any(context.segments_relation(first_edge, edge)
is not Relation.DISJOINT
for edge in rest_edges[1:-1])
or any(context.segments_relation(last_edge, edge)
is not Relation.DISJOINT
for edge in rest_edges[:-2])
or len(rest_vertices) > 1
and (context.segments_relation(first_edge, rest_edges[0])
in overlap_relations
or context.segments_relation(first_edge, last_edge)
in overlap_relations
or context.segments_relation(last_edge, rest_edges[0])
in overlap_relations)))
@given(strategies.contours)
def test_reversed(contour: Contour) -> None:
result = contour_self_intersects(contour)
assert result is contour_self_intersects(reverse_contour(contour))
@given(strategies.contours)
def test_reversed_coordinates(contour: Contour) -> None:
result = contour_self_intersects(contour)
assert result is contour_self_intersects(reverse_contour_coordinates(contour))
@given(strategies.degenerate_contours)
def test_degenerate_contour(contour: Contour) -> None:
with pytest.raises(ValueError):
contour_self_intersects(contour)
|
[
"bentley_ottmann.planar.contour_self_intersects",
"tests.utils.contour_to_edges",
"itertools.combinations",
"pytest.raises",
"hypothesis.given",
"tests.utils.pop_left_vertex",
"tests.utils.reverse_contour",
"tests.utils.reverse_contour_coordinates"
] |
[((482, 508), 'hypothesis.given', 'given', (['strategies.contours'], {}), '(strategies.contours)\n', (487, 508), False, 'from hypothesis import given\n'), ((637, 674), 'hypothesis.given', 'given', (['strategies.triangular_contours'], {}), '(strategies.triangular_contours)\n', (642, 674), False, 'from hypothesis import given\n'), ((983, 1024), 'hypothesis.given', 'given', (['strategies.non_triangular_contours'], {}), '(strategies.non_triangular_contours)\n', (988, 1024), False, 'from hypothesis import given\n'), ((3083, 3109), 'hypothesis.given', 'given', (['strategies.contours'], {}), '(strategies.contours)\n', (3088, 3109), False, 'from hypothesis import given\n'), ((3276, 3302), 'hypothesis.given', 'given', (['strategies.contours'], {}), '(strategies.contours)\n', (3281, 3302), False, 'from hypothesis import given\n'), ((3493, 3530), 'hypothesis.given', 'given', (['strategies.degenerate_contours'], {}), '(strategies.degenerate_contours)\n', (3498, 3530), False, 'from hypothesis import given\n'), ((564, 596), 'bentley_ottmann.planar.contour_self_intersects', 'contour_self_intersects', (['contour'], {}), '(contour)\n', (587, 596), False, 'from bentley_ottmann.planar import contour_self_intersects\n'), ((752, 784), 'bentley_ottmann.planar.contour_self_intersects', 'contour_self_intersects', (['contour'], {}), '(contour)\n', (775, 784), False, 'from bentley_ottmann.planar import contour_self_intersects\n'), ((1117, 1141), 'tests.utils.pop_left_vertex', 'pop_left_vertex', (['contour'], {}), '(contour)\n', (1132, 1141), False, 'from tests.utils import contour_to_edges, pop_left_vertex, reverse_contour, reverse_contour_coordinates\n'), ((1198, 1235), 'bentley_ottmann.planar.contour_self_intersects', 'contour_self_intersects', (['rest_contour'], {}), '(rest_contour)\n', (1221, 1235), False, 'from bentley_ottmann.planar import contour_self_intersects\n'), ((1254, 1286), 'bentley_ottmann.planar.contour_self_intersects', 'contour_self_intersects', (['contour'], {}), '(contour)\n', (1277, 1286), False, 'from bentley_ottmann.planar import contour_self_intersects\n'), ((1443, 1473), 'tests.utils.contour_to_edges', 'contour_to_edges', (['rest_contour'], {}), '(rest_contour)\n', (1459, 1473), False, 'from tests.utils import contour_to_edges, pop_left_vertex, reverse_contour, reverse_contour_coordinates\n'), ((3168, 3200), 'bentley_ottmann.planar.contour_self_intersects', 'contour_self_intersects', (['contour'], {}), '(contour)\n', (3191, 3200), False, 'from bentley_ottmann.planar import contour_self_intersects\n'), ((3373, 3405), 'bentley_ottmann.planar.contour_self_intersects', 'contour_self_intersects', (['contour'], {}), '(contour)\n', (3396, 3405), False, 'from bentley_ottmann.planar import contour_self_intersects\n'), ((3595, 3620), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3608, 3620), False, 'import pytest\n'), ((3630, 3662), 'bentley_ottmann.planar.contour_self_intersects', 'contour_self_intersects', (['contour'], {}), '(contour)\n', (3653, 3662), False, 'from bentley_ottmann.planar import contour_self_intersects\n'), ((3247, 3271), 'tests.utils.reverse_contour', 'reverse_contour', (['contour'], {}), '(contour)\n', (3262, 3271), False, 'from tests.utils import contour_to_edges, pop_left_vertex, reverse_contour, reverse_contour_coordinates\n'), ((3452, 3488), 'tests.utils.reverse_contour_coordinates', 'reverse_contour_coordinates', (['contour'], {}), '(contour)\n', (3479, 3488), False, 'from tests.utils import contour_to_edges, pop_left_vertex, reverse_contour, reverse_contour_coordinates\n'), ((2317, 2349), 'itertools.combinations', 'combinations', (['rest_edges[:-1]', '(2)'], {}), '(rest_edges[:-1], 2)\n', (2329, 2349), False, 'from itertools import chain, combinations\n')]
|
import torch
import torch.nn as nn
from torchvision import models
import numpy as np
from torch.autograd import Variable
import os
class Model:
def __init__(self, key = 'abnormal'):
self.INPUT_DIM = 224
self.MAX_PIXEL_VAL = 255
self.MEAN = 58.09
self.STDDEV = 49.73
self.model_ab=MRI_alex(False)
if key == 'abnormal':
self.model_ab.load_state_dict(torch.load(r"models/abnormal.pt", map_location='cpu'))
elif key =='acl':
self.model_ab.load_state_dict(torch.load(r"models/acl.pt", map_location='cpu'))
else:
self.model_ab.load_state_dict(torch.load(r"models/men.pt", map_location='cpu'))
self.model_ab.cuda()
def preprocess(self, series):
pad = int((series.shape[2] - self.INPUT_DIM)/2)
series = series[:,pad:-pad,pad:-pad]
series = (series-np.min(series))/(np.max(series)-np.min(series))*self.MAX_PIXEL_VAL
series = (series - self.MEAN) / self.STDDEV
series = np.stack((series,)*3, axis=1)
series_float = torch.FloatTensor(series)
return series_float
def study(self, axial_path, sagit_path, coron_path):
vol_axial = np.load(axial_path)
vol_sagit = np.load(sagit_path)
vol_coron = np.load(coron_path)
vol_axial_tensor = self.preprocess(vol_axial)
vol_sagit_tensor = self.preprocess(vol_sagit)
vol_coron_tensor = self.preprocess(vol_coron)
return {"axial": vol_axial_tensor,
"sagit": vol_sagit_tensor,
"coron": vol_coron_tensor}
def predict(self, model, tensors, abnormality_prior=None):
vol_axial = tensors["axial"].cuda()
vol_sagit = tensors["sagit"].cuda()
vol_coron = tensors["coron"].cuda()
vol_axial = Variable(vol_axial)
vol_sagit = Variable(vol_sagit)
vol_coron = Variable(vol_coron)
logit = model.forward(vol_axial, vol_sagit, vol_coron)
pred = torch.sigmoid(logit)
pred_npy = pred.data.cpu().numpy()[0][0]
if abnormality_prior:
pred_npy = pred_npy * abnormality_prior
return pred_npy
def get_prediction(self):
self.predict(self.model_ab, self.study(axial_path, coronal_path, sagittal_path))
class MRI_alex(nn.Module):
def __init__(self, training=True):
super().__init__()
self.axial_net = models.alexnet(pretrained=training)
self.sagit_net = models.alexnet(pretrained=training)
self.coron_net = models.alexnet(pretrained=training)
self.gap_axial = nn.AdaptiveAvgPool2d(1)
self.gap_sagit = nn.AdaptiveAvgPool2d(1)
self.gap_coron = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(3*256, 1)
return
def forward(self,vol_axial, vol_sagit, vol_coron):
vol_axial = torch.squeeze(vol_axial, dim=0)
vol_sagit = torch.squeeze(vol_sagit, dim=0)
vol_coron = torch.squeeze(vol_coron, dim=0)
vol_axial = self.axial_net.features(vol_axial)
vol_sagit = self.sagit_net.features(vol_sagit)
vol_coron = self.coron_net.features(vol_coron)
vol_axial = self.gap_axial(vol_axial).view(vol_axial.size(0), -1)
x = torch.max(vol_axial, 0, keepdim=True)[0]
vol_sagit = self.gap_sagit(vol_sagit).view(vol_sagit.size(0), -1)
y = torch.max(vol_sagit, 0, keepdim=True)[0]
vol_coron = self.gap_coron(vol_coron).view(vol_coron.size(0), -1)
z = torch.max(vol_coron, 0, keepdim=True)[0]
w = torch.cat((x, y, z), 1)
out = self.classifier(w)
return out
|
[
"numpy.stack",
"torch.nn.AdaptiveAvgPool2d",
"numpy.load",
"torch.autograd.Variable",
"torch.load",
"torchvision.models.alexnet",
"torch.FloatTensor",
"torch.cat",
"torch.squeeze",
"torch.sigmoid",
"numpy.min",
"torch.max",
"numpy.max",
"torch.nn.Linear"
] |
[((1031, 1062), 'numpy.stack', 'np.stack', (['((series,) * 3)'], {'axis': '(1)'}), '((series,) * 3, axis=1)\n', (1039, 1062), True, 'import numpy as np\n'), ((1084, 1109), 'torch.FloatTensor', 'torch.FloatTensor', (['series'], {}), '(series)\n', (1101, 1109), False, 'import torch\n'), ((1220, 1239), 'numpy.load', 'np.load', (['axial_path'], {}), '(axial_path)\n', (1227, 1239), True, 'import numpy as np\n'), ((1260, 1279), 'numpy.load', 'np.load', (['sagit_path'], {}), '(sagit_path)\n', (1267, 1279), True, 'import numpy as np\n'), ((1300, 1319), 'numpy.load', 'np.load', (['coron_path'], {}), '(coron_path)\n', (1307, 1319), True, 'import numpy as np\n'), ((1835, 1854), 'torch.autograd.Variable', 'Variable', (['vol_axial'], {}), '(vol_axial)\n', (1843, 1854), False, 'from torch.autograd import Variable\n'), ((1875, 1894), 'torch.autograd.Variable', 'Variable', (['vol_sagit'], {}), '(vol_sagit)\n', (1883, 1894), False, 'from torch.autograd import Variable\n'), ((1915, 1934), 'torch.autograd.Variable', 'Variable', (['vol_coron'], {}), '(vol_coron)\n', (1923, 1934), False, 'from torch.autograd import Variable\n'), ((2013, 2033), 'torch.sigmoid', 'torch.sigmoid', (['logit'], {}), '(logit)\n', (2026, 2033), False, 'import torch\n'), ((2448, 2483), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'training'}), '(pretrained=training)\n', (2462, 2483), False, 'from torchvision import models\n'), ((2509, 2544), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'training'}), '(pretrained=training)\n', (2523, 2544), False, 'from torchvision import models\n'), ((2570, 2605), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'training'}), '(pretrained=training)\n', (2584, 2605), False, 'from torchvision import models\n'), ((2631, 2654), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2651, 2654), True, 'import torch.nn as nn\n'), ((2680, 2703), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2700, 2703), True, 'import torch.nn as nn\n'), ((2729, 2752), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2749, 2752), True, 'import torch.nn as nn\n'), ((2779, 2800), 'torch.nn.Linear', 'nn.Linear', (['(3 * 256)', '(1)'], {}), '(3 * 256, 1)\n', (2788, 2800), True, 'import torch.nn as nn\n'), ((2894, 2925), 'torch.squeeze', 'torch.squeeze', (['vol_axial'], {'dim': '(0)'}), '(vol_axial, dim=0)\n', (2907, 2925), False, 'import torch\n'), ((2946, 2977), 'torch.squeeze', 'torch.squeeze', (['vol_sagit'], {'dim': '(0)'}), '(vol_sagit, dim=0)\n', (2959, 2977), False, 'import torch\n'), ((2998, 3029), 'torch.squeeze', 'torch.squeeze', (['vol_coron'], {'dim': '(0)'}), '(vol_coron, dim=0)\n', (3011, 3029), False, 'import torch\n'), ((3588, 3611), 'torch.cat', 'torch.cat', (['(x, y, z)', '(1)'], {}), '((x, y, z), 1)\n', (3597, 3611), False, 'import torch\n'), ((3281, 3318), 'torch.max', 'torch.max', (['vol_axial', '(0)'], {'keepdim': '(True)'}), '(vol_axial, 0, keepdim=True)\n', (3290, 3318), False, 'import torch\n'), ((3408, 3445), 'torch.max', 'torch.max', (['vol_sagit', '(0)'], {'keepdim': '(True)'}), '(vol_sagit, 0, keepdim=True)\n', (3417, 3445), False, 'import torch\n'), ((3535, 3572), 'torch.max', 'torch.max', (['vol_coron', '(0)'], {'keepdim': '(True)'}), '(vol_coron, 0, keepdim=True)\n', (3544, 3572), False, 'import torch\n'), ((422, 474), 'torch.load', 'torch.load', (['"""models/abnormal.pt"""'], {'map_location': '"""cpu"""'}), "('models/abnormal.pt', map_location='cpu')\n", (432, 474), False, 'import torch\n'), ((545, 592), 'torch.load', 'torch.load', (['"""models/acl.pt"""'], {'map_location': '"""cpu"""'}), "('models/acl.pt', map_location='cpu')\n", (555, 592), False, 'import torch\n'), ((651, 698), 'torch.load', 'torch.load', (['"""models/men.pt"""'], {'map_location': '"""cpu"""'}), "('models/men.pt', map_location='cpu')\n", (661, 698), False, 'import torch\n'), ((895, 909), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (901, 909), True, 'import numpy as np\n'), ((912, 926), 'numpy.max', 'np.max', (['series'], {}), '(series)\n', (918, 926), True, 'import numpy as np\n'), ((927, 941), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (933, 941), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import utils
utils.check_version((3,7))
utils.clear()
print('Hello, my name is <NAME>')
print('My favorite game is Bioshock Infinite')
print('My only concern is getting back into the groove of coding for this class')
print('I just want to learn more about what goes into creating the things I love and more about the industry in general.')
print('stackoverflow number: user:12003507')
print('github url: https://github.com/17jrb17')
|
[
"utils.clear",
"utils.check_version"
] |
[((38, 65), 'utils.check_version', 'utils.check_version', (['(3, 7)'], {}), '((3, 7))\n', (57, 65), False, 'import utils\n'), ((65, 78), 'utils.clear', 'utils.clear', ([], {}), '()\n', (76, 78), False, 'import utils\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-10 16:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0003_contactcaptchaformfield_hide_label'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='first_names',
new_name='name',
),
migrations.RemoveField(
model_name='contact',
name='last_names',
),
migrations.RemoveField(
model_name='contactcaptchaformfield',
name='hide_label',
),
migrations.AddField(
model_name='contactmessage',
name='subject',
field=models.CharField(default='', max_length=254, verbose_name='Subject'),
preserve_default=False,
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.migrations.RenameField"
] |
[((317, 406), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""contact"""', 'old_name': '"""first_names"""', 'new_name': '"""name"""'}), "(model_name='contact', old_name='first_names',\n new_name='name')\n", (339, 406), False, 'from django.db import migrations, models\n'), ((459, 522), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""contact"""', 'name': '"""last_names"""'}), "(model_name='contact', name='last_names')\n", (481, 522), False, 'from django.db import migrations, models\n'), ((567, 646), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""contactcaptchaformfield"""', 'name': '"""hide_label"""'}), "(model_name='contactcaptchaformfield', name='hide_label')\n", (589, 646), False, 'from django.db import migrations, models\n'), ((799, 867), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(254)', 'verbose_name': '"""Subject"""'}), "(default='', max_length=254, verbose_name='Subject')\n", (815, 867), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
import scrapy
import urllib
import os
from scrapy.http import Request
from scrapy.selector import Selector
from crawl_good_softwares.items import CrawlGoodSoftwaresItem
class TestSpiderSpider(scrapy.Spider):
name = "firehorse_scrapy_software_spider"
start_urls = ['http://www.filehorse.com/popular/',
'http://www.filehorse.com/latest/',
'http://www.filehorse.com/software-benchmarking/',
'http://www.filehorse.com/software-compression-and-backup/']
current_page = 1
max_page = 6
headers = {
'Connection': 'keep - alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'
}
def __init__(self, *a, **kw):
while self.current_page < self.max_page:
self.current_page = self.current_page + 1
link = 'http://www.filehorse.com/popular/page-' + str(self.current_page)
link2 = 'http://www.filehorse.com/latest/page-' + str(self.current_page)
link3 = 'http://www.filehorse.com/software-benchmarking/page-' + str(self.current_page)
link4 = 'http://www.filehorse.com/software-compression-and-backup/page-' + str(self.current_page)
self.start_urls.append(link)
self.start_urls.append(link2)
self.start_urls.append(link3)
self.start_urls.append(link4)
super(scrapy.Spider, self).__init__(*a, **kw)
def parse(self, response):
selector = Selector(response)
software_links = selector.xpath('//div[@class="cat_dl_btn"]/a/@href').extract()
if 0 < len(software_links):
for link in software_links:
yield Request(link, callback=self.parse, headers=self.headers)
else:
final_download_url = selector.xpath('//a[@id="download_url"]/@href').extract()
if 0 == len(final_download_url):
yield Request(response.url + 'download/', callback=self.parse, headers=self.headers)
else:
item = CrawlGoodSoftwaresItem()
item['link'] = final_download_url[0]
yield item
|
[
"crawl_good_softwares.items.CrawlGoodSoftwaresItem",
"scrapy.selector.Selector",
"scrapy.http.Request"
] |
[((1580, 1598), 'scrapy.selector.Selector', 'Selector', (['response'], {}), '(response)\n', (1588, 1598), False, 'from scrapy.selector import Selector\n'), ((2134, 2158), 'crawl_good_softwares.items.CrawlGoodSoftwaresItem', 'CrawlGoodSoftwaresItem', ([], {}), '()\n', (2156, 2158), False, 'from crawl_good_softwares.items import CrawlGoodSoftwaresItem\n'), ((1785, 1841), 'scrapy.http.Request', 'Request', (['link'], {'callback': 'self.parse', 'headers': 'self.headers'}), '(link, callback=self.parse, headers=self.headers)\n', (1792, 1841), False, 'from scrapy.http import Request\n'), ((2014, 2092), 'scrapy.http.Request', 'Request', (["(response.url + 'download/')"], {'callback': 'self.parse', 'headers': 'self.headers'}), "(response.url + 'download/', callback=self.parse, headers=self.headers)\n", (2021, 2092), False, 'from scrapy.http import Request\n')]
|
import sys
def write(s):
"""write s to stdout"""
s = s.replace('\n', '\r\n')
sys.stdout.write(s)
sys.stdout.flush()
def make_target(options):
return write
def free_target():
pass
|
[
"sys.stdout.write",
"sys.stdout.flush"
] |
[((90, 109), 'sys.stdout.write', 'sys.stdout.write', (['s'], {}), '(s)\n', (106, 109), False, 'import sys\n'), ((114, 132), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (130, 132), False, 'import sys\n')]
|
'''
Copyright 2017, United States Government, as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved.
The pyCMR platform is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
'''
import os
import unittest
import xml.etree.ElementTree as ET
from ..pyCMR import CMR, Collection, Granule
class TestCMRIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
configFilePath = "pyCMRConfig.cfg"
cls.cmr = CMR(configFilePath)
cls._test_collection_path = os.path.abspath(os.curdir) + "/pyCMR/tests/fixtures/test-collection.xml" #os.path.join(os.curdir, 'tests', 'fixtures', 'test-collection.xml')
cls._test_granule_path = os.path.abspath(os.curdir) + "/pyCMR/tests/fixtures/test-granule.xml" #os.path.join(os.curdir, 'tests', 'fixtures', 'test-granule.xml')
cls._test_collection_name = 'PYCMR TEST COLLECTION'
cls._test_granule_name = 'PYCMR_TEST_GRANULE.hd5'
def collection_search(self):
results = self.cmr.searchCollection()
# Make sure that the XML response was actually parsed
self.assertTrue(isinstance(results[0], Collection))
self.assertTrue('concept-id' in results[0].keys())
self.assertTrue('Collection' in results[0].keys())
def granule_search(self):
results = self.cmr.searchGranule()
self.assertTrue(isinstance(results[0], Granule))
self.assertTrue('concept-id' in results[0].keys())
self.assertTrue('Granule' in results[0].keys())
def collection_ingest(self):
result = self.cmr.ingestCollection(self._test_collection_path)
# If ingest wasn't successful, the above would've thrown a 4XX error
# But just to be sure, let's check that there was a result in the returned XML
# Otherwise, the top-level tag would be `<errors>`
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def granule_ingest(self):
result = self.cmr.ingestGranule(self._test_granule_path)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def collection_update(self):
result = self.cmr.updateCollection(self._test_collection_path)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def granule_update(self):
result = self.cmr.updateGranule(self._test_granule_path)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def granule_delete(self):
result = self.cmr.deleteGranule(self._test_granule_name)
# Confirm that a tombstone was returned
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def collection_delete(self):
result = self.cmr.deleteCollection(self._test_collection_name)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def test_monolith(self):
'''
Since these are order-sensitive integration tests,
wrap them in a monolithic test, so that they run in the proper order
and stop after a single failure (without having to specify `failfast`)
https://stackoverflow.com/questions/5387299/python-unittest-testcase-execution-order
'''
for test_name in [
'collection_search',
'granule_search',
'collection_ingest',
'granule_ingest',
'collection_update',
'granule_update',
'granule_delete',
'collection_delete'
]:
test = getattr(self, test_name)
test()
def test_search_limit(self):
''' Make sure that the correct number of items are returned by searches '''
results = self.cmr.searchCollection(limit=3)
self.assertTrue(len(results) == 3)
results = self.cmr.searchGranule(limit=91)
self.assertTrue(len(results) == 91)
|
[
"os.path.abspath",
"xml.etree.ElementTree.XML"
] |
[((2360, 2374), 'xml.etree.ElementTree.XML', 'ET.XML', (['result'], {}), '(result)\n', (2366, 2374), True, 'import xml.etree.ElementTree as ET\n'), ((2536, 2550), 'xml.etree.ElementTree.XML', 'ET.XML', (['result'], {}), '(result)\n', (2542, 2550), True, 'import xml.etree.ElementTree as ET\n'), ((2721, 2735), 'xml.etree.ElementTree.XML', 'ET.XML', (['result'], {}), '(result)\n', (2727, 2735), True, 'import xml.etree.ElementTree as ET\n'), ((2897, 2911), 'xml.etree.ElementTree.XML', 'ET.XML', (['result'], {}), '(result)\n', (2903, 2911), True, 'import xml.etree.ElementTree as ET\n'), ((3121, 3135), 'xml.etree.ElementTree.XML', 'ET.XML', (['result'], {}), '(result)\n', (3127, 3135), True, 'import xml.etree.ElementTree as ET\n'), ((3306, 3320), 'xml.etree.ElementTree.XML', 'ET.XML', (['result'], {}), '(result)\n', (3312, 3320), True, 'import xml.etree.ElementTree as ET\n'), ((1020, 1046), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (1035, 1046), False, 'import os\n'), ((1195, 1221), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (1210, 1221), False, 'import os\n')]
|
import os
REDIS_CONFIG = {
'host': os.getenv('REDING_REDIS_HOST', 'localhost'),
'port': int(os.getenv('REDING_REDIS_PORT', 6379)),
'db': int(os.getenv('REDING_REDIS_DB', 0)),
}
DAEMON_CONFIG = {
'host': os.getenv('REDING_DAEMON_HOST', '0.0.0.0'),
'port': int(os.getenv('REDING_DAEMON_PORT', 5000)),
}
KEY_CONFIG = {
'prefix': 'rating',
'subject': 'user',
'object': 'app',
'subjects': 'users',
'objects': 'apps'
}
PAGINATION_DEFAULT_OFFSET = 0
PAGINATION_DEFAULT_SIZE = 10
__all__ = (
'REDIS_CONFIG',
'DAEMON_CONFIG',
'KEY_CONFIG',
)
|
[
"os.getenv"
] |
[((40, 83), 'os.getenv', 'os.getenv', (['"""REDING_REDIS_HOST"""', '"""localhost"""'], {}), "('REDING_REDIS_HOST', 'localhost')\n", (49, 83), False, 'import os\n'), ((221, 263), 'os.getenv', 'os.getenv', (['"""REDING_DAEMON_HOST"""', '"""0.0.0.0"""'], {}), "('REDING_DAEMON_HOST', '0.0.0.0')\n", (230, 263), False, 'import os\n'), ((101, 137), 'os.getenv', 'os.getenv', (['"""REDING_REDIS_PORT"""', '(6379)'], {}), "('REDING_REDIS_PORT', 6379)\n", (110, 137), False, 'import os\n'), ((154, 185), 'os.getenv', 'os.getenv', (['"""REDING_REDIS_DB"""', '(0)'], {}), "('REDING_REDIS_DB', 0)\n", (163, 185), False, 'import os\n'), ((281, 318), 'os.getenv', 'os.getenv', (['"""REDING_DAEMON_PORT"""', '(5000)'], {}), "('REDING_DAEMON_PORT', 5000)\n", (290, 318), False, 'import os\n')]
|
from scipy import optimize
import matplotlib.pyplot as plt
import numpy as np
x = np.array([1, 1.1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ,11, 12, 13, 14, 15], dtype=float)
y = np.array([5, 3, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59,
84.47, 98.36, 112.25, 126.14, 140.03])
# 一个输入序列,4个未知参数,2个分段函数
def piecewise_linear(x, x0, y0, k1, k2):
# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0
# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0
return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0,
lambda x:k2*x + y0-k2*x0])
def gauss(mean, scale, x=np.linspace(1,22,22), sigma=4):
return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))
# # 用已有的 (x, y) 去拟合 piecewise_linear 分段函数
# p , e = optimize.curve_fit(piecewise_linear, x, y)
# xd = np.linspace(0, 15, 100)
# plt.plot(x, y, "o")
# plt.plot(xd, piecewise_linear(xd, *p))
# plt.savefig('123.png')
xi = np.linspace(1,22,22)
information_matrix = np.zeros((22))
x = [1, 13]
for i in range(len(x)):
information_matrix += gauss(x[i],1)
# plt.plot(xi, information_matrix)
plt.plot(xi, information_matrix)
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.square",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"numpy.piecewise"
] |
[((87, 166), 'numpy.array', 'np.array', (['[1, 1.1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'], {'dtype': 'float'}), '([1, 1.1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=float)\n', (95, 166), True, 'import numpy as np\n'), ((172, 276), 'numpy.array', 'np.array', (['[5, 3, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59, 84.47, 98.36, 112.25, \n 126.14, 140.03]'], {}), '([5, 3, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59, 84.47, 98.36, \n 112.25, 126.14, 140.03])\n', (180, 276), True, 'import numpy as np\n'), ((935, 957), 'numpy.linspace', 'np.linspace', (['(1)', '(22)', '(22)'], {}), '(1, 22, 22)\n', (946, 957), True, 'import numpy as np\n'), ((978, 990), 'numpy.zeros', 'np.zeros', (['(22)'], {}), '(22)\n', (986, 990), True, 'import numpy as np\n'), ((1107, 1139), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'information_matrix'], {}), '(xi, information_matrix)\n', (1115, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1149, 1151), True, 'import matplotlib.pyplot as plt\n'), ((451, 557), 'numpy.piecewise', 'np.piecewise', (['x', '[x < x0, x >= x0]', '[lambda x: k1 * x + y0 - k1 * x0, lambda x: k2 * x + y0 - k2 * x0]'], {}), '(x, [x < x0, x >= x0], [lambda x: k1 * x + y0 - k1 * x0, lambda\n x: k2 * x + y0 - k2 * x0])\n', (463, 557), True, 'import numpy as np\n'), ((605, 627), 'numpy.linspace', 'np.linspace', (['(1)', '(22)', '(22)'], {}), '(1, 22, 22)\n', (616, 627), True, 'import numpy as np\n'), ((665, 684), 'numpy.square', 'np.square', (['(x - mean)'], {}), '(x - mean)\n', (674, 684), True, 'import numpy as np\n')]
|
from unittest.mock import MagicMock
import pytest
from pyspark.sql import SparkSession
from prefect.tasks.sodaspark import SodaSparkScan
class TestSodaSparkScan:
def test_construction_provide_scan_and_df(self):
expected_scan_def = "/foo/bar.yaml"
expected_df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=expected_scan_def, df=expected_df)
assert soda_spark_scan_task.scan_def == expected_scan_def
assert soda_spark_scan_task.df == expected_df
def test_construction_no_scan_and_df(self):
soda_spark_scan_task = SodaSparkScan()
assert soda_spark_scan_task.scan_def is None
assert soda_spark_scan_task.df is None
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_no_scan(self):
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(df=df)
with pytest.raises(ValueError) as exc:
soda_spark_scan_task.run()
assert "scan_def cannot be None" in str(exc)
def test_run_no_df(self):
soda_spark_scan_task = SodaSparkScan(scan_def="/foo/bar.yaml")
with pytest.raises(ValueError) as exc:
soda_spark_scan_task.run()
assert "df cannot be None" in str(exc)
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_invalid_scan(self, monkeypatch):
scan_def = "invalid scan definition"
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
with pytest.raises(AttributeError):
soda_spark_scan_task.run()
def test_run_invalid_df(self, monkeypatch):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count > 0
"""
df = "not a valid df"
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
with pytest.raises(AttributeError):
soda_spark_scan_task.run()
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_valid_scan_and_df_with_measurements(self):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count > 0
"""
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
res = soda_spark_scan_task.run()
assert hasattr(res, "measurements")
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_valid_scan_and_df_with_errors(self):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count == 0
"""
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
res = soda_spark_scan_task.run()
assert hasattr(res, "errors")
|
[
"pytest.raises",
"prefect.tasks.sodaspark.SodaSparkScan",
"pyspark.sql.SparkSession.builder.getOrCreate"
] |
[((450, 507), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {'scan_def': 'expected_scan_def', 'df': 'expected_df'}), '(scan_def=expected_scan_def, df=expected_df)\n', (463, 507), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((709, 724), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {}), '()\n', (722, 724), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((1112, 1132), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {'df': 'df'}), '(df=df)\n', (1125, 1132), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((1335, 1374), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {'scan_def': '"""/foo/bar.yaml"""'}), "(scan_def='/foo/bar.yaml')\n", (1348, 1374), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((1857, 1896), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {'scan_def': 'scan_def', 'df': 'df'}), '(scan_def=scan_def, df=df)\n', (1870, 1896), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((2265, 2304), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {'scan_def': 'scan_def', 'df': 'df'}), '(scan_def=scan_def, df=df)\n', (2278, 2304), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((2877, 2916), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {'scan_def': 'scan_def', 'df': 'df'}), '(scan_def=scan_def, df=df)\n', (2890, 2916), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((3487, 3526), 'prefect.tasks.sodaspark.SodaSparkScan', 'SodaSparkScan', ([], {'scan_def': 'scan_def', 'df': 'df'}), '(scan_def=scan_def, df=df)\n', (3500, 3526), False, 'from prefect.tasks.sodaspark import SodaSparkScan\n'), ((1147, 1172), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1160, 1172), False, 'import pytest\n'), ((1388, 1413), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1401, 1413), False, 'import pytest\n'), ((1910, 1939), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1923, 1939), False, 'import pytest\n'), ((2318, 2347), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2331, 2347), False, 'import pytest\n'), ((286, 320), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (318, 320), False, 'from pyspark.sql import SparkSession\n'), ((948, 982), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (980, 982), False, 'from pyspark.sql import SparkSession\n'), ((1693, 1727), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (1725, 1727), False, 'from pyspark.sql import SparkSession\n'), ((2713, 2747), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (2745, 2747), False, 'from pyspark.sql import SparkSession\n'), ((3323, 3357), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (3355, 3357), False, 'from pyspark.sql import SparkSession\n')]
|
# %%
"""
<NAME> любит французские багеты. Длина французского
багета равна 1 метру. За один заглот <NAME> заглатывает
кусок случайной длины равномерно распределенной на отрезке
[0; 1]. Для того, чтобы съесть весь багет удаву потребуется случайное
количество N заглотов.
Оцените P(N=2), P(N=3), E(N)
"""
# %%
import numpy as np
import pandas as pd
from random import uniform
# %%
uniform(a=0, b=1)
list(range(7))
# %%
def eat_baget():
"""
Симулятор <NAME>.
Возвращает число укусов, потребовавшееся на один багет.
"""
n_ukusov = 0
baget = 1
while baget > 0:
zaglot = uniform(a=0, b=1)
baget -= zaglot
n_ukusov += 1
return n_ukusov
# %%
eat_baget()
# %%
n_exp = 1000
udaff_life = [eat_baget() for i in range(n_exp)]
udaff_life
EN_hat = np.mean(udaff_life)
EN_hat
PNeq2_hat = udaff_life.count(2) / n_exp
PNeq2_hat
PNeq3_hat = udaff_life.count(3) / n_exp
PNeq3_hat
# %%
"""
<NAME> подбрасывает кубик до первой шестёрки.
Обозначим: величина N — число бросков.
Событие A — при подбрасываниях выпадала только чётная грань.
Оцените P(N=2), P(N=3), E(N)
Оцените P(A), P(N=2|A), P(A|N=2), P(A OR N=2), P(A AND N=2)
"""
# %%
from random import randint
# %%
randint(a=1, b=2)
# %%
7 // 2
# %%
7 % 2
def throw_until_six():
"""
Подбрасываем кубик до первой шестёрки.
Считаем число бросков. И следим за тем, выпадали ли только четные числа.
Возвращает: (число бросков, True/False)
"""
n_broskov = 0
tolko_chet = True
brosok = -1 # вымышленный бросок, только чтобы зайти в цикл
while brosok < 6:
brosok = randint(1, 6)
n_broskov += 1
if brosok % 2 == 1:
tolko_chet = False
return (n_broskov, tolko_chet)
# %%
throw_until_six()
n_exp = 1000
throw_list = [throw_until_six() for i in range(n_exp)]
throw_list
# %%
throw_df = pd.DataFrame(throw_list, columns=['n_throw', 'only_even'])
throw_df.describe()
# %%
"""
Накануне войны Жестокий Тиран очень большой страны издал
указ. Отныне за каждого новорождённого мальчика семья получает
денежную премию, но если в семье рождается вторая девочка, то
всю семью убивают. Бедные жители страны запуганы и остро
нуждаются в деньгах, поэтому в каждой семье дети будут появляться
до тех пор, пока не родится первая девочка.
а) Каким будет среднее число детей в семье?
б) Какой будет доля мальчиков в стране?
в) Какой будет средняя доля мальчиков в случайной семье?
г) Сколько в среднем мальчиков в случайно выбираемой семье?
"""
|
[
"pandas.DataFrame",
"numpy.mean",
"random.randint",
"random.uniform"
] |
[((381, 398), 'random.uniform', 'uniform', ([], {'a': '(0)', 'b': '(1)'}), '(a=0, b=1)\n', (388, 398), False, 'from random import uniform\n'), ((795, 814), 'numpy.mean', 'np.mean', (['udaff_life'], {}), '(udaff_life)\n', (802, 814), True, 'import numpy as np\n'), ((1218, 1235), 'random.randint', 'randint', ([], {'a': '(1)', 'b': '(2)'}), '(a=1, b=2)\n', (1225, 1235), False, 'from random import randint\n'), ((1862, 1920), 'pandas.DataFrame', 'pd.DataFrame', (['throw_list'], {'columns': "['n_throw', 'only_even']"}), "(throw_list, columns=['n_throw', 'only_even'])\n", (1874, 1920), True, 'import pandas as pd\n'), ((604, 621), 'random.uniform', 'uniform', ([], {'a': '(0)', 'b': '(1)'}), '(a=0, b=1)\n', (611, 621), False, 'from random import uniform\n'), ((1610, 1623), 'random.randint', 'randint', (['(1)', '(6)'], {}), '(1, 6)\n', (1617, 1623), False, 'from random import randint\n')]
|
import json
import pytest
from django.conf import settings as test_settings
from rest_framework import status
from rest_framework.request import ForcedAuthentication
from rest_framework.test import APIClient
from shipchain_common.utils import random_id
from shipchain_common.test_utils import get_jwt, mocked_rpc_response
from apps.authentication import passive_credentials_auth
from apps.shipments.models import Shipment, Device
def fake_get_raw_token(self, header):
return header.split()[1]
def fake_get_header(self, request):
return b'JWT dummy'
ForcedAuthentication.get_raw_token = fake_get_raw_token
ForcedAuthentication.get_header = fake_get_header
USER_ID = random_id()
ORGANIZATION_ID = random_id()
VAULT_ID = random_id()
TRANSACTION_HASH = 'txHash'
DEVICE_ID = random_id()
@pytest.fixture(scope='session')
def token():
return get_jwt(username='<EMAIL>', sub=USER_ID, organization_id=ORGANIZATION_ID)
@pytest.fixture(scope='session')
def user(token):
return passive_credentials_auth(token)
@pytest.fixture(scope='session')
def api_client(user, token):
client = APIClient()
client.force_authenticate(user=user, token=token)
return client
@pytest.fixture
def mocked_engine_rpc(mocker):
mocker.patch('apps.shipments.rpc.Load110RPCClient.create_vault', return_value=(VAULT_ID, 's3://fake-vault-uri/'))
mocker.patch('apps.shipments.rpc.Load110RPCClient.add_shipment_data', return_value={'hash': TRANSACTION_HASH})
mocked_cst = mocker.patch('apps.shipments.rpc.Load110RPCClient.create_shipment_transaction',
return_value=('version', {}))
mocked_cst.__qualname__ = 'ShipmentRPCClient.create_shipment_transaction'
mocker.patch('apps.shipments.rpc.Load110RPCClient.sign_transaction', return_value=('version', {}))
mocked_uvht = mocker.patch('apps.shipments.rpc.Load110RPCClient.set_vault_hash_tx', return_value={})
mocked_uvht.__qualname__ = 'ShipmentRPCClient.set_vault_hash_tx'
mocker.patch('apps.shipments.rpc.Load110RPCClient.send_transaction', return_value={
"blockHash": "0xccb595947a121e37df8bf689c3f88c6d9c7fb56070c9afda38551540f9e231f7",
"blockNumber": 15,
"contractAddress": None,
"cumulativeGasUsed": 138090,
"from": "0x13b1eebb31a1aa2ecaa2ad9e7455df2f717f2143",
"gasUsed": 138090,
"logs": [],
"logsBloom": "0x0000000000",
"status": True,
"to": "0x25ff5dc79a7c4e34254ff0f4a19d69e491201dd3",
"transactionHash": TRANSACTION_HASH,
"transactionIndex": 0
})
@pytest.fixture
def mocked_iot_api(mocker):
return mocker.patch('apps.shipments.iot_client.DeviceAWSIoTClient.update_shadow', return_value=mocked_rpc_response(
{'data': {'shipmentId': 'dunno yet', 'shipmentState': 'dunno yet'}}))
@pytest.yield_fixture
def http_pretty():
import httpretty
httpretty.enable()
yield httpretty
httpretty.disable()
@pytest.fixture
def mocked_profiles(http_pretty):
profiles_ids = {
"shipper_wallet_id": random_id(),
"carrier_wallet_id": random_id(),
"storage_credentials_id": random_id()
}
http_pretty.register_uri(http_pretty.GET,
f"{test_settings.PROFILES_URL}/api/v1/wallet/{profiles_ids['shipper_wallet_id']}/",
body=json.dumps({'good': 'good'}), status=status.HTTP_200_OK)
http_pretty.register_uri(http_pretty.GET,
f"{test_settings.PROFILES_URL}/api/v1/wallet/{profiles_ids['carrier_wallet_id']}/",
body=json.dumps({'good': 'good'}), status=status.HTTP_200_OK)
http_pretty.register_uri(http_pretty.GET,
f"{test_settings.PROFILES_URL}/api/v1/storage_credentials/{profiles_ids['storage_credentials_id']}/",
body=json.dumps({'good': 'good'}), status=status.HTTP_200_OK)
return profiles_ids
@pytest.fixture
def shipment(mocked_engine_rpc, mocked_iot_api):
return Shipment.objects.create(vault_id=VAULT_ID,
carrier_wallet_id=random_id(),
shipper_wallet_id=random_id(),
storage_credentials_id=random_id(),
owner_id=USER_ID)
@pytest.fixture
def shipment_with_device(shipment):
shipment.device = Device.objects.create(id=DEVICE_ID)
shipment.save()
shipment.refresh_from_db(fields=('device',))
return shipment
|
[
"shipchain_common.test_utils.mocked_rpc_response",
"pytest.fixture",
"apps.shipments.models.Device.objects.create",
"json.dumps",
"apps.authentication.passive_credentials_auth",
"shipchain_common.test_utils.get_jwt",
"httpretty.disable",
"httpretty.enable",
"shipchain_common.utils.random_id",
"rest_framework.test.APIClient"
] |
[((683, 694), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (692, 694), False, 'from shipchain_common.utils import random_id\n'), ((713, 724), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (722, 724), False, 'from shipchain_common.utils import random_id\n'), ((736, 747), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (745, 747), False, 'from shipchain_common.utils import random_id\n'), ((788, 799), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (797, 799), False, 'from shipchain_common.utils import random_id\n'), ((803, 834), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (817, 834), False, 'import pytest\n'), ((936, 967), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (950, 967), False, 'import pytest\n'), ((1031, 1062), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1045, 1062), False, 'import pytest\n'), ((859, 932), 'shipchain_common.test_utils.get_jwt', 'get_jwt', ([], {'username': '"""<EMAIL>"""', 'sub': 'USER_ID', 'organization_id': 'ORGANIZATION_ID'}), "(username='<EMAIL>', sub=USER_ID, organization_id=ORGANIZATION_ID)\n", (866, 932), False, 'from shipchain_common.test_utils import get_jwt, mocked_rpc_response\n'), ((996, 1027), 'apps.authentication.passive_credentials_auth', 'passive_credentials_auth', (['token'], {}), '(token)\n', (1020, 1027), False, 'from apps.authentication import passive_credentials_auth\n'), ((1105, 1116), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1114, 1116), False, 'from rest_framework.test import APIClient\n'), ((2883, 2901), 'httpretty.enable', 'httpretty.enable', ([], {}), '()\n', (2899, 2901), False, 'import httpretty\n'), ((2926, 2945), 'httpretty.disable', 'httpretty.disable', ([], {}), '()\n', (2943, 2945), False, 'import httpretty\n'), ((4401, 4436), 'apps.shipments.models.Device.objects.create', 'Device.objects.create', ([], {'id': 'DEVICE_ID'}), '(id=DEVICE_ID)\n', (4422, 4436), False, 'from apps.shipments.models import Shipment, Device\n'), ((3048, 3059), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (3057, 3059), False, 'from shipchain_common.utils import random_id\n'), ((3090, 3101), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (3099, 3101), False, 'from shipchain_common.utils import random_id\n'), ((3137, 3148), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (3146, 3148), False, 'from shipchain_common.utils import random_id\n'), ((2716, 2808), 'shipchain_common.test_utils.mocked_rpc_response', 'mocked_rpc_response', (["{'data': {'shipmentId': 'dunno yet', 'shipmentState': 'dunno yet'}}"], {}), "({'data': {'shipmentId': 'dunno yet', 'shipmentState':\n 'dunno yet'}})\n", (2735, 2808), False, 'from shipchain_common.test_utils import get_jwt, mocked_rpc_response\n'), ((3349, 3377), 'json.dumps', 'json.dumps', (["{'good': 'good'}"], {}), "({'good': 'good'})\n", (3359, 3377), False, 'import json\n'), ((3599, 3627), 'json.dumps', 'json.dumps', (["{'good': 'good'}"], {}), "({'good': 'good'})\n", (3609, 3627), False, 'import json\n'), ((3867, 3895), 'json.dumps', 'json.dumps', (["{'good': 'good'}"], {}), "({'good': 'good'})\n", (3877, 3895), False, 'import json\n'), ((4122, 4133), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (4131, 4133), False, 'from shipchain_common.utils import random_id\n'), ((4188, 4199), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (4197, 4199), False, 'from shipchain_common.utils import random_id\n'), ((4259, 4270), 'shipchain_common.utils.random_id', 'random_id', ([], {}), '()\n', (4268, 4270), False, 'from shipchain_common.utils import random_id\n')]
|
from setuptools import setup, Extension
import sys, os
# explode if environment isn't correct, as set in CIBW_ENVIRONMENT
CIBW_TEST_VAR = os.environ.get('CIBW_TEST_VAR')
CIBW_TEST_VAR_2 = os.environ.get('CIBW_TEST_VAR_2')
PATH = os.environ.get('PATH')
if CIBW_TEST_VAR != 'a b c':
raise Exception('CIBW_TEST_VAR should equal "a b c". It was "%s"' % CIBW_TEST_VAR)
if CIBW_TEST_VAR_2 != '1':
raise Exception('CIBW_TEST_VAR_2 should equal "1". It was "%s"' % CIBW_TEST_VAR_2)
if '/opt/cibw_test_path' not in PATH:
raise Exception('PATH should contain "/opt/cibw_test_path". It was "%s"' % PATH)
if '$PATH' in PATH:
raise Exception('$PATH should be expanded in PATH. It was "%s"' % PATH)
setup(
name="spam",
ext_modules=[Extension('spam', sources=['spam.c'])],
version="0.1.0",
)
|
[
"os.environ.get",
"setuptools.Extension"
] |
[((139, 170), 'os.environ.get', 'os.environ.get', (['"""CIBW_TEST_VAR"""'], {}), "('CIBW_TEST_VAR')\n", (153, 170), False, 'import sys, os\n'), ((189, 222), 'os.environ.get', 'os.environ.get', (['"""CIBW_TEST_VAR_2"""'], {}), "('CIBW_TEST_VAR_2')\n", (203, 222), False, 'import sys, os\n'), ((230, 252), 'os.environ.get', 'os.environ.get', (['"""PATH"""'], {}), "('PATH')\n", (244, 252), False, 'import sys, os\n'), ((746, 783), 'setuptools.Extension', 'Extension', (['"""spam"""'], {'sources': "['spam.c']"}), "('spam', sources=['spam.c'])\n", (755, 783), False, 'from setuptools import setup, Extension\n')]
|
# Copyright 2019 <NAME>
# Licensed under the MIT License
import asyncio
import time
from aiohttp import request
URLS = [
"https://2019.northbaypython.org",
"https://duckduckgo.com",
"https://jreese.sh",
"https://news.ycombinator.com",
"https://python.org",
]
# Coroutines with aiohttp
async def fetch(url: str) -> str:
async with request("GET", url) as r:
return await r.text("utf-8")
async def main():
coros = [fetch(url) for url in URLS]
results = await asyncio.gather(*coros)
for result in results:
print(f"{result[:20]!r}")
if __name__ == "__main__":
asyncio.run(main())
|
[
"asyncio.gather",
"aiohttp.request"
] |
[((360, 379), 'aiohttp.request', 'request', (['"""GET"""', 'url'], {}), "('GET', url)\n", (367, 379), False, 'from aiohttp import request\n'), ((504, 526), 'asyncio.gather', 'asyncio.gather', (['*coros'], {}), '(*coros)\n', (518, 526), False, 'import asyncio\n')]
|
import tbf.utils as utils
class HarnessCreator(object):
def _get_vector_read_method(self):
return b"""char * parse_inp(char * __inp_var) {
unsigned int input_length = strlen(__inp_var)-1;
/* Remove '\\n' at end of input */
if (__inp_var[input_length] == '\\n') {
__inp_var[input_length] = '\\0';
}
char * parseEnd;
char * value_pointer = malloc(16);
unsigned long long intVal = strtoull(__inp_var, &parseEnd, 0);
if (*parseEnd != 0) {
long long sintVal = strtoll(__inp_var, &parseEnd, 0);
if (*parseEnd != 0) {
long double floatVal = strtold(__inp_var, &parseEnd);
if (*parseEnd != 0) {
fprintf(stderr, "Can't parse input: '%s' (failing at '%s')\\n", __inp_var, parseEnd);
abort();
} else {
memcpy(value_pointer, &floatVal, 16);
}
} else {
memcpy(value_pointer, &sintVal, 8);
}
} else {
memcpy(value_pointer, &intVal, 8);
}
return value_pointer;
}\n\n"""
def __init__(self):
self.repr_type = b"__repr"
def _get_preamble(self):
preamble = ''
preamble += utils.EXTERNAL_DECLARATIONS
preamble += "\n"
preamble += utils.get_assume_method() + "\n"
preamble = preamble.encode()
preamble += self._get_vector_read_method()
return preamble
def _get_error_definition(self, method_name):
definition = 'void {0}() {{\n'.format(method_name)
definition += ' fprintf(stderr, \"{0}\\n\");\n'.format(
utils.ERROR_STRING)
definition += ' exit(1);\n}\n\n'
return definition.encode()
def _get_nondet_method_definitions(self, nondet_methods, test_vector):
definitions = b''
if test_vector is not None:
definitions += b'unsigned int access_counter = 0;\n\n'
for method in nondet_methods:
definitions += utils.get_method_head(method['name'], method['type'],
method['params']).encode()
definitions += b' {\n'
if method['type'] != 'void':
definitions += " unsigned int inp_size = 3000;\n".encode()
definitions += " char * inp_var = malloc(inp_size);\n".encode(
)
if test_vector is None: # Build generic harness
definitions += " fgets(inp_var, inp_size, stdin);\n".encode(
)
else:
definitions += " switch(access_counter) {\n".encode()
for idx, item in enumerate(test_vector.vector):
if type(item['value']) is bytes:
value = item['value']
else:
value = item['value'].encode()
# yapf: disable
definitions += b''.join([
b'case ', str(idx).encode(),
b': strcpy(inp_var, "', value, b'"); break;\n'
])
# yapf: enable
definitions += b" default: abort();\n"
definitions += b" }\n"
definitions += b" access_counter++;\n"
definitions += b''.join([
b' return *((', method['type'].encode(),
b' *) parse_inp(inp_var));\n'
])
definitions += b'}\n\n'
return definitions
def create_harness(self, nondet_methods, error_method, test_vector=None):
harness = b''
harness += self._get_preamble()
if error_method:
harness += self._get_error_definition(error_method)
harness += self._get_nondet_method_definitions(nondet_methods,
test_vector)
return harness
|
[
"tbf.utils.get_method_head",
"tbf.utils.get_assume_method"
] |
[((1231, 1256), 'tbf.utils.get_assume_method', 'utils.get_assume_method', ([], {}), '()\n', (1254, 1256), True, 'import tbf.utils as utils\n'), ((1934, 2005), 'tbf.utils.get_method_head', 'utils.get_method_head', (["method['name']", "method['type']", "method['params']"], {}), "(method['name'], method['type'], method['params'])\n", (1955, 2005), True, 'import tbf.utils as utils\n')]
|
from django.urls import path, re_path
from declaracion.views import (DeclaracionFormView, DatosCurricularesView,
DatosEncargoActualView, ExperienciaLaboralView,
ConyugeDependientesView,
DatosCurricularesDelete,
ExperienciaLaboralDeleteView,
ConyugeDependientesDeleteView,DeclaracionFiscalFormView,
DeclaracionFiscalDelete,listaMunicipios,DomiciliosViews, ParejaView,ParejaDeleteView)
from django.views.generic import TemplateView
from django.conf.urls import url
urlpatterns = [
re_path(r'^declaracion-fiscal/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DeclaracionFiscalFormView.as_view(),
name='declaracion-fiscal'),
re_path(r'^declaracion-fiscal/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
DeclaracionFiscalDelete.as_view(),
name='declaracion-fiscal-borrar'),
re_path(r'^informacion-general/(?P<cat_tipos_declaracion>[0-9])/$', DeclaracionFormView.as_view(),
name='informacion-general'),
re_path(r'^informacion-general/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DeclaracionFormView.as_view(),
name='informacion-general'),
re_path(r'^direccion/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DomiciliosViews.as_view(),
name='direccion'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DatosCurricularesView.as_view(),
name='datos-curriculares'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/agregar/$',
DatosCurricularesView.as_view(), {'agregar': True},
name='datos-curriculares-agregar'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/editar/(?P<pk>\d+)/$',
DatosCurricularesView.as_view(),
name='datos-curriculares-editar'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
DatosCurricularesDelete.as_view(),
name='datos-curriculares-borrar'),
re_path(r'^datos-del-encargo-actual/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DatosEncargoActualView.as_view(),
name='datos-del-encargo-actual'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
ExperienciaLaboralView.as_view(),
name='experiencia-laboral'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/agregar/$',
ExperienciaLaboralView.as_view(), {'agregar': True},
name='experiencia-laboral-agregar'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/editar/(?P<pk>\d+)/$',
ExperienciaLaboralView.as_view(),
name='experiencia-laboral-editar'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
ExperienciaLaboralDeleteView.as_view(),
name='experiencia-laboral-borrar'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
ConyugeDependientesView.as_view(),
name='dependientes-economicos'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/agregar/$',
ConyugeDependientesView.as_view(), {'agregar': True},
name='dependientes-economicos-agregar'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/editar/(?P<pk>\d+)/$',
ConyugeDependientesView.as_view(),
name='dependientes-economicos-editar'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
ConyugeDependientesDeleteView.as_view(),
name='dependientes-economicos-borrar'),
re_path(r'^datos-pareja/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
ParejaView.as_view(),
name='datos-pareja'),
url(r'^ajax/lista_municipios/$', listaMunicipios, name='lista_municipios'),
]
|
[
"declaracion.views.ConyugeDependientesDeleteView.as_view",
"declaracion.views.DeclaracionFiscalFormView.as_view",
"declaracion.views.DatosCurricularesView.as_view",
"declaracion.views.DeclaracionFormView.as_view",
"declaracion.views.DeclaracionFiscalDelete.as_view",
"declaracion.views.ExperienciaLaboralView.as_view",
"declaracion.views.ConyugeDependientesView.as_view",
"declaracion.views.ParejaView.as_view",
"declaracion.views.DomiciliosViews.as_view",
"django.conf.urls.url",
"declaracion.views.ExperienciaLaboralDeleteView.as_view",
"declaracion.views.DatosCurricularesDelete.as_view",
"declaracion.views.DatosEncargoActualView.as_view"
] |
[((5013, 5086), 'django.conf.urls.url', 'url', (['"""^ajax/lista_municipios/$"""', 'listaMunicipios'], {'name': '"""lista_municipios"""'}), "('^ajax/lista_municipios/$', listaMunicipios, name='lista_municipios')\n", (5016, 5086), False, 'from django.conf.urls import url\n'), ((807, 842), 'declaracion.views.DeclaracionFiscalFormView.as_view', 'DeclaracionFiscalFormView.as_view', ([], {}), '()\n', (840, 842), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((1044, 1077), 'declaracion.views.DeclaracionFiscalDelete.as_view', 'DeclaracionFiscalDelete.as_view', ([], {}), '()\n', (1075, 1077), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((1203, 1232), 'declaracion.views.DeclaracionFormView.as_view', 'DeclaracionFormView.as_view', ([], {}), '()\n', (1230, 1232), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((1414, 1443), 'declaracion.views.DeclaracionFormView.as_view', 'DeclaracionFormView.as_view', ([], {}), '()\n', (1441, 1443), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((1619, 1644), 'declaracion.views.DomiciliosViews.as_view', 'DomiciliosViews.as_view', ([], {}), '()\n', (1642, 1644), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((1819, 1850), 'declaracion.views.DatosCurricularesView.as_view', 'DatosCurricularesView.as_view', ([], {}), '()\n', (1848, 1850), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((2041, 2072), 'declaracion.views.DatosCurricularesView.as_view', 'DatosCurricularesView.as_view', ([], {}), '()\n', (2070, 2072), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((2301, 2332), 'declaracion.views.DatosCurricularesView.as_view', 'DatosCurricularesView.as_view', ([], {}), '()\n', (2330, 2332), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((2541, 2574), 'declaracion.views.DatosCurricularesDelete.as_view', 'DatosCurricularesDelete.as_view', ([], {}), '()\n', (2572, 2574), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((2775, 2807), 'declaracion.views.DatosEncargoActualView.as_view', 'DatosEncargoActualView.as_view', ([], {}), '()\n', (2805, 2807), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((2998, 3030), 'declaracion.views.ExperienciaLaboralView.as_view', 'ExperienciaLaboralView.as_view', ([], {}), '()\n', (3028, 3030), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((3223, 3255), 'declaracion.views.ExperienciaLaboralView.as_view', 'ExperienciaLaboralView.as_view', ([], {}), '()\n', (3253, 3255), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((3486, 3518), 'declaracion.views.ExperienciaLaboralView.as_view', 'ExperienciaLaboralView.as_view', ([], {}), '()\n', (3516, 3518), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((3729, 3767), 'declaracion.views.ExperienciaLaboralDeleteView.as_view', 'ExperienciaLaboralDeleteView.as_view', ([], {}), '()\n', (3765, 3767), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((3964, 3997), 'declaracion.views.ConyugeDependientesView.as_view', 'ConyugeDependientesView.as_view', ([], {}), '()\n', (3995, 3997), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((4198, 4231), 'declaracion.views.ConyugeDependientesView.as_view', 'ConyugeDependientesView.as_view', ([], {}), '()\n', (4229, 4231), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((4470, 4503), 'declaracion.views.ConyugeDependientesView.as_view', 'ConyugeDependientesView.as_view', ([], {}), '()\n', (4501, 4503), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((4722, 4761), 'declaracion.views.ConyugeDependientesDeleteView.as_view', 'ConyugeDependientesDeleteView.as_view', ([], {}), '()\n', (4759, 4761), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n'), ((4951, 4971), 'declaracion.views.ParejaView.as_view', 'ParejaView.as_view', ([], {}), '()\n', (4969, 4971), False, 'from declaracion.views import DeclaracionFormView, DatosCurricularesView, DatosEncargoActualView, ExperienciaLaboralView, ConyugeDependientesView, DatosCurricularesDelete, ExperienciaLaboralDeleteView, ConyugeDependientesDeleteView, DeclaracionFiscalFormView, DeclaracionFiscalDelete, listaMunicipios, DomiciliosViews, ParejaView, ParejaDeleteView\n')]
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from django.shortcuts import render
from heartbeat.models import MonitorAgent, InstanceUUID
from trigger.models import Trigger
from heartbeat.serializers import MonitorAgentSerializer
from rest_framework import generics
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
import datetime
def get_config(agent):
uuids = agent.instances_uuid.all()
config = {}
for uuid in uuids:
config[uuid.uuid] = [trigger.format_dict() for trigger in Trigger.objects.filter(instance_uuid=uuid.uuid)]
return config
class MonitorAgentCreateView(generics.ListCreateAPIView):
queryset = MonitorAgent.objects.all()
serializer_class = MonitorAgentSerializer
def post(self, request, *args, **kwargs):
agent_id = request.data.get('id', None)
hostname = request.data.get('hostname', None)
uuids = request.data.get('uuids', None)
if agent_id and uuids is not None:
config = None
try:
agent = MonitorAgent.objects.get(id=agent_id)
except MonitorAgent.DoesNotExist:
return Response({'id': None, 'config': config})
if agent.update_status:
config = get_config(agent)
agent.update_status = False
agent.status = True
agent.save()
instances_uuid = agent.instances_uuid.all()
instances_uuid_list = [instance_uuid.uuid for instance_uuid in instances_uuid]
for instance_uuid in instances_uuid:
if instance_uuid.uuid not in uuids:
instance_uuid.delete()
for uuid in uuids:
if uuid not in instances_uuid_list:
instance_uuid = InstanceUUID(uuid=uuid, agent=agent)
instance_uuid.save()
return Response({'id': agent_id, 'config': config})
elif agent_id and uuids is None:
config = None
try:
agent = MonitorAgent.objects.get(id=agent_id)
except MonitorAgent.DoesNotExist:
return Response({'id': None, 'config': config})
if agent.update_status:
config = get_config(agent)
agent.update_status = False
agent.status = True
agent.update_time = datetime.datetime.now()
agent.save()
return Response({'id': agent_id, 'config': config})
elif not agent_id and hostname:
try:
agent = MonitorAgent.objects.get(hostname=hostname)
except MonitorAgent.DoesNotExist:
agent = MonitorAgent(hostname=hostname, status=0)
agent.save()
if uuids is not None:
instances_uuid = agent.instances_uuid.all()
for instance_uuid in instances_uuid:
if instance_uuid.uuid not in uuids:
instance_uuid.delete()
for uuid in uuids:
try:
InstanceUUID.objects.get(uuid=uuid)
except InstanceUUID.DoesNotExist:
instance_uuid = InstanceUUID(uuid=uuid, agent=agent)
instance_uuid.save()
config = get_config(agent)
agent.update_status = False
agent.status = True
agent.save()
return Response({'id': agent.id, 'config': config})
else:
return Response({'id': None, 'config': None})
class InstanceUUIDRetrieveView(generics.RetrieveAPIView):
queryset = InstanceUUID.objects.all()
|
[
"heartbeat.models.MonitorAgent",
"heartbeat.models.InstanceUUID.objects.get",
"heartbeat.models.InstanceUUID",
"trigger.models.Trigger.objects.filter",
"rest_framework.response.Response",
"heartbeat.models.MonitorAgent.objects.get",
"datetime.datetime.now",
"heartbeat.models.InstanceUUID.objects.all",
"heartbeat.models.MonitorAgent.objects.all"
] |
[((690, 716), 'heartbeat.models.MonitorAgent.objects.all', 'MonitorAgent.objects.all', ([], {}), '()\n', (714, 716), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((3677, 3703), 'heartbeat.models.InstanceUUID.objects.all', 'InstanceUUID.objects.all', ([], {}), '()\n', (3701, 3703), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((1910, 1954), 'rest_framework.response.Response', 'Response', (["{'id': agent_id, 'config': config}"], {}), "({'id': agent_id, 'config': config})\n", (1918, 1954), False, 'from rest_framework.response import Response\n'), ((547, 594), 'trigger.models.Trigger.objects.filter', 'Trigger.objects.filter', ([], {'instance_uuid': 'uuid.uuid'}), '(instance_uuid=uuid.uuid)\n', (569, 594), False, 'from trigger.models import Trigger\n'), ((1071, 1108), 'heartbeat.models.MonitorAgent.objects.get', 'MonitorAgent.objects.get', ([], {'id': 'agent_id'}), '(id=agent_id)\n', (1095, 1108), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((2399, 2422), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2420, 2422), False, 'import datetime\n'), ((2468, 2512), 'rest_framework.response.Response', 'Response', (["{'id': agent_id, 'config': config}"], {}), "({'id': agent_id, 'config': config})\n", (2476, 2512), False, 'from rest_framework.response import Response\n'), ((1178, 1218), 'rest_framework.response.Response', 'Response', (["{'id': None, 'config': config}"], {}), "({'id': None, 'config': config})\n", (1186, 1218), False, 'from rest_framework.response import Response\n'), ((1812, 1848), 'heartbeat.models.InstanceUUID', 'InstanceUUID', ([], {'uuid': 'uuid', 'agent': 'agent'}), '(uuid=uuid, agent=agent)\n', (1824, 1848), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((2063, 2100), 'heartbeat.models.MonitorAgent.objects.get', 'MonitorAgent.objects.get', ([], {'id': 'agent_id'}), '(id=agent_id)\n', (2087, 2100), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((3484, 3528), 'rest_framework.response.Response', 'Response', (["{'id': agent.id, 'config': config}"], {}), "({'id': agent.id, 'config': config})\n", (3492, 3528), False, 'from rest_framework.response import Response\n'), ((3563, 3601), 'rest_framework.response.Response', 'Response', (["{'id': None, 'config': None}"], {}), "({'id': None, 'config': None})\n", (3571, 3601), False, 'from rest_framework.response import Response\n'), ((2170, 2210), 'rest_framework.response.Response', 'Response', (["{'id': None, 'config': config}"], {}), "({'id': None, 'config': config})\n", (2178, 2210), False, 'from rest_framework.response import Response\n'), ((2594, 2637), 'heartbeat.models.MonitorAgent.objects.get', 'MonitorAgent.objects.get', ([], {'hostname': 'hostname'}), '(hostname=hostname)\n', (2618, 2637), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((2708, 2749), 'heartbeat.models.MonitorAgent', 'MonitorAgent', ([], {'hostname': 'hostname', 'status': '(0)'}), '(hostname=hostname, status=0)\n', (2720, 2749), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((3115, 3150), 'heartbeat.models.InstanceUUID.objects.get', 'InstanceUUID.objects.get', ([], {'uuid': 'uuid'}), '(uuid=uuid)\n', (3139, 3150), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n'), ((3245, 3281), 'heartbeat.models.InstanceUUID', 'InstanceUUID', ([], {'uuid': 'uuid', 'agent': 'agent'}), '(uuid=uuid, agent=agent)\n', (3257, 3281), False, 'from heartbeat.models import MonitorAgent, InstanceUUID\n')]
|
import pandas as pd
import numpy as np
from matplotlib.collections import PatchCollection, LineCollection
from descartes.patch import PolygonPatch
try:
import geopandas # noqa: F401
except ImportError:
HAS_GEOPANDAS = False
else:
HAS_GEOPANDAS = True
from ..doctools import document
from ..exceptions import PlotnineError
from ..utils import to_rgba, SIZE_FACTOR
from .geom import geom
from .geom_point import geom_point
@document
class geom_map(geom):
"""
Draw map feature
The map feature are drawn without any special projections.
{usage}
Parameters
----------
{common_parameters}
Notes
-----
This geom is best suited for plotting a shapefile read into
geopandas dataframe. The dataframe should have a ``geometry``
column.
"""
DEFAULT_AES = {'alpha': 1, 'color': '#111111', 'fill': '#333333',
'linetype': 'solid', 'shape': 'o', 'size': 0.5,
'stroke': 0.5}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False}
REQUIRED_AES = {'geometry'}
legend_geom = 'polygon'
def __init__(self, mapping=None, data=None, **kwargs):
if not HAS_GEOPANDAS:
raise PlotnineError(
"geom_map requires geopandas. "
"Please install geopandas."
)
geom.__init__(self, mapping, data, **kwargs)
# Almost all geodataframes loaded from shapefiles
# have a geometry column.
if 'geometry' not in self.mapping:
self.mapping['geometry'] = 'geometry'
def setup_data(self, data):
if not len(data):
return data
# Remove any NULL geometries, and remember
# All the non-Null shapes in a shapefile are required to be
# of the same shape type.
bool_idx = np.array([g is not None for g in data['geometry']])
if not np.all(bool_idx):
data = data.loc[bool_idx]
# Add polygon limits. Scale training uses them
try:
bounds = data['geometry'].bounds
except AttributeError:
# The geometry is not a GeoSeries
# Bounds calculation is extracted from
# geopandas.base.GeoPandasBase.bounds
bounds = pd.DataFrame(
np.array([x.bounds for x in data['geometry']]),
columns=['xmin', 'ymin', 'xmax', 'ymax'],
index=data.index)
else:
bounds.rename(
columns={
'minx': 'xmin',
'maxx': 'xmax',
'miny': 'ymin',
'maxy': 'ymax'
},
inplace=True)
data = pd.concat([data, bounds], axis=1)
return data
def draw_panel(self, data, panel_params, coord, ax, **params):
if not len(data):
return data
data.loc[data['color'].isnull(), 'color'] = 'none'
data.loc[data['fill'].isnull(), 'fill'] = 'none'
data['fill'] = to_rgba(data['fill'], data['alpha'])
geom_type = data.geometry.iloc[0].geom_type
if geom_type in ('Polygon', 'MultiPolygon'):
data['size'] *= SIZE_FACTOR
patches = [PolygonPatch(g) for g in data['geometry']]
coll = PatchCollection(
patches,
edgecolor=data['color'],
facecolor=data['fill'],
linestyle=data['linetype'],
linewidth=data['size'],
zorder=params['zorder'],
)
ax.add_collection(coll)
elif geom_type == 'Point':
# Extract point coordinates from shapely geom
# and plot with geom_point
arr = np.array([list(g.coords)[0] for g in data['geometry']])
data['x'] = arr[:, 0]
data['y'] = arr[:, 1]
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True, drop=True)
gdata.is_copy = None
geom_point.draw_group(
gdata, panel_params, coord, ax, **params)
elif geom_type == 'LineString':
data['size'] *= SIZE_FACTOR
data['color'] = to_rgba(data['color'], data['alpha'])
segments = [list(g.coords) for g in data['geometry']]
coll = LineCollection(
segments,
edgecolor=data['color'],
linewidth=data['size'],
linestyle=data['linetype'],
zorder=params['zorder'])
ax.add_collection(coll)
|
[
"matplotlib.collections.LineCollection",
"descartes.patch.PolygonPatch",
"numpy.array",
"matplotlib.collections.PatchCollection",
"pandas.concat",
"numpy.all"
] |
[((1860, 1913), 'numpy.array', 'np.array', (["[(g is not None) for g in data['geometry']]"], {}), "([(g is not None) for g in data['geometry']])\n", (1868, 1913), True, 'import numpy as np\n'), ((2741, 2774), 'pandas.concat', 'pd.concat', (['[data, bounds]'], {'axis': '(1)'}), '([data, bounds], axis=1)\n', (2750, 2774), True, 'import pandas as pd\n'), ((1927, 1943), 'numpy.all', 'np.all', (['bool_idx'], {}), '(bool_idx)\n', (1933, 1943), True, 'import numpy as np\n'), ((3321, 3480), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'edgecolor': "data['color']", 'facecolor': "data['fill']", 'linestyle': "data['linetype']", 'linewidth': "data['size']", 'zorder': "params['zorder']"}), "(patches, edgecolor=data['color'], facecolor=data['fill'],\n linestyle=data['linetype'], linewidth=data['size'], zorder=params['zorder']\n )\n", (3336, 3480), False, 'from matplotlib.collections import PatchCollection, LineCollection\n'), ((3259, 3274), 'descartes.patch.PolygonPatch', 'PolygonPatch', (['g'], {}), '(g)\n', (3271, 3274), False, 'from descartes.patch import PolygonPatch\n'), ((2326, 2372), 'numpy.array', 'np.array', (["[x.bounds for x in data['geometry']]"], {}), "([x.bounds for x in data['geometry']])\n", (2334, 2372), True, 'import numpy as np\n'), ((4372, 4502), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'edgecolor': "data['color']", 'linewidth': "data['size']", 'linestyle': "data['linetype']", 'zorder': "params['zorder']"}), "(segments, edgecolor=data['color'], linewidth=data['size'],\n linestyle=data['linetype'], zorder=params['zorder'])\n", (4386, 4502), False, 'from matplotlib.collections import PatchCollection, LineCollection\n')]
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import random
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
class DataLoader:
def __init__(self, config, logger):
# Initialize Dataloader Configuration
logging.info('[DATALOADER]: Initializing Spectrometer Dataloader')
self.config = config
self.dl_cfg = self.config['dataloader']
# Initialize PRNG Seed Values
if self.dl_cfg.enable_seed:
random.seed(self.dl_cfg.seed)
np.random.seed(self.dl_cfg.seed)
# Load Dataset
logging.info('[DATALOADER]: Loading Dataset Files')
logging.info('[DATALOADER]: > Loading File: ' + self.dl_cfg.data_path)
# Preprocess Data
raw_data = open(self.dl_cfg.data_path, 'r').read().split('\n\n')
data = []
for row in raw_data:
if len(row) == 0: continue
row = row.replace('(', '').replace(')', '').strip()
row = row.replace('\n', ' ').split(' ')[2:]
row = list(map(lambda x: float(x), row))
if row[0] == 0: row[0] = 1.0 # Replace Encoding
data.append(row)
self.data = np.array(data)
logging.info('[DATALOADER]: > Loaded: ' + self.dl_cfg.data_path + '\t' + \
'Data Shape: ' + str(self.data.shape))
def get_data(self):
# Initialize Crossfold Validation
if self.dl_cfg.crossval.stratified:
kf = StratifiedKFold(n_splits=self.dl_cfg.crossval.folds,
shuffle=self.dl_cfg.crossval.shuffle,
random_state=self.dl_cfg.crossval.random_state)
else:
kf = KFold(n_splits=self.dl_cfg.crossval.folds,
shuffle=self.dl_cfg.crossval.shuffle,
random_state=self.dl_cfg.crossval.random_state)
for fold, (train_index, test_index) in enumerate(kf.split(self.data)):
# Initialize Data Features and Labels
X_train = self.data[train_index, 1:]
y_train = self.data[train_index, 0].astype(int) - 1
X_test = self.data[test_index, 1:]
y_test = self.data[test_index, 0].astype(int) - 1
# Set Dataloader Attributes
self.num_class = len(np.unique(y_train))
yield fold, X_train, y_train, X_test, y_test
|
[
"numpy.random.seed",
"sklearn.model_selection.KFold",
"logging.info",
"random.seed",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"numpy.unique"
] |
[((854, 920), 'logging.info', 'logging.info', (['"""[DATALOADER]: Initializing Spectrometer Dataloader"""'], {}), "('[DATALOADER]: Initializing Spectrometer Dataloader')\n", (866, 920), False, 'import logging\n'), ((1192, 1243), 'logging.info', 'logging.info', (['"""[DATALOADER]: Loading Dataset Files"""'], {}), "('[DATALOADER]: Loading Dataset Files')\n", (1204, 1243), False, 'import logging\n'), ((1252, 1322), 'logging.info', 'logging.info', (["('[DATALOADER]: > Loading File: ' + self.dl_cfg.data_path)"], {}), "('[DATALOADER]: > Loading File: ' + self.dl_cfg.data_path)\n", (1264, 1322), False, 'import logging\n'), ((1793, 1807), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1801, 1807), True, 'import numpy as np\n'), ((1085, 1114), 'random.seed', 'random.seed', (['self.dl_cfg.seed'], {}), '(self.dl_cfg.seed)\n', (1096, 1114), False, 'import random\n'), ((1127, 1159), 'numpy.random.seed', 'np.random.seed', (['self.dl_cfg.seed'], {}), '(self.dl_cfg.seed)\n', (1141, 1159), True, 'import numpy as np\n'), ((2079, 2222), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'self.dl_cfg.crossval.folds', 'shuffle': 'self.dl_cfg.crossval.shuffle', 'random_state': 'self.dl_cfg.crossval.random_state'}), '(n_splits=self.dl_cfg.crossval.folds, shuffle=self.dl_cfg.\n crossval.shuffle, random_state=self.dl_cfg.crossval.random_state)\n', (2094, 2222), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((2315, 2448), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.dl_cfg.crossval.folds', 'shuffle': 'self.dl_cfg.crossval.shuffle', 'random_state': 'self.dl_cfg.crossval.random_state'}), '(n_splits=self.dl_cfg.crossval.folds, shuffle=self.dl_cfg.crossval.\n shuffle, random_state=self.dl_cfg.crossval.random_state)\n', (2320, 2448), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((2916, 2934), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (2925, 2934), True, 'import numpy as np\n')]
|
#!/home/roberto/anaconda3/envs/tensorflow/bin/python
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import sys
import multiprocessing
import time
import pandas as pd
import tensorflow as tf
import cv2
from utils import detector_utils as detector_utils
from utils import label_map_util
class FasterRCNN(multiprocessing.Process):
def __init__(self, input_pipe, kcf_pipe, gpu_id, num_classes, jump, video_name, player, model_name):
multiprocessing.Process.__init__(self)
self.input_pipe = input_pipe
self.kcf_pipe = kcf_pipe
self.gpu_id = gpu_id
self.num_classes = num_classes
self.jump = jump
self.video_name = video_name
self.player = player
self.model_name = model_name
def run(self):
cwd_path = os.getcwd()
path_to_ckpt = os.path.join(cwd_path, self.model_name,'frozen_inference_graph.pb')
path_to_labels = os.path.join(cwd_path,'training','labelmap.pbtxt')
path_to_video = os.path.join(cwd_path,self.video_name)
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(self.gpu_id)
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess = tf.Session(config=config, graph=detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
print(detection_classes)
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
video = cv2.VideoCapture(path_to_video)
num_iter = 0
while(video.isOpened()):
_, frame = video.read()
if not (num_iter % self.jump):
if frame is None:
break
frame_expanded = np.expand_dims(frame, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
(box, score) = self.best_score_box(boxes, scores, classes)
# Send info to both threads
self.input_pipe.send((num_iter, box, score))
self.kcf_pipe.send((num_iter, box, score))
num_iter+=1
return
def best_score_box(self, boxes, scores, classes):
pos_max = np.where(scores==np.amax(scores[np.where(classes==self.player)]))
return boxes[pos_max], scores[pos_max]
|
[
"os.getcwd",
"utils.label_map_util.create_category_index",
"utils.label_map_util.load_labelmap",
"tensorflow.GraphDef",
"tensorflow.Session",
"numpy.expand_dims",
"utils.label_map_util.convert_label_map_to_categories",
"cv2.VideoCapture",
"tensorflow.ConfigProto",
"numpy.where",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"multiprocessing.Process.__init__",
"os.path.join"
] |
[((996, 1034), 'multiprocessing.Process.__init__', 'multiprocessing.Process.__init__', (['self'], {}), '(self)\n', (1028, 1034), False, 'import multiprocessing\n'), ((1340, 1351), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1349, 1351), False, 'import os\n'), ((1375, 1443), 'os.path.join', 'os.path.join', (['cwd_path', 'self.model_name', '"""frozen_inference_graph.pb"""'], {}), "(cwd_path, self.model_name, 'frozen_inference_graph.pb')\n", (1387, 1443), False, 'import os\n'), ((1468, 1520), 'os.path.join', 'os.path.join', (['cwd_path', '"""training"""', '"""labelmap.pbtxt"""'], {}), "(cwd_path, 'training', 'labelmap.pbtxt')\n", (1480, 1520), False, 'import os\n'), ((1543, 1582), 'os.path.join', 'os.path.join', (['cwd_path', 'self.video_name'], {}), '(cwd_path, self.video_name)\n', (1555, 1582), False, 'import os\n'), ((1682, 1726), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['path_to_labels'], {}), '(path_to_labels)\n', (1710, 1726), False, 'from utils import label_map_util\n'), ((1748, 1867), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'self.num_classes', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n self.num_classes, use_display_name=True)\n', (1794, 1867), False, 'from utils import label_map_util\n'), ((1888, 1936), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1924, 1936), False, 'from utils import label_map_util\n'), ((1972, 1982), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1980, 1982), True, 'import tensorflow as tf\n'), ((2983, 3014), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path_to_video'], {}), '(path_to_video)\n', (2999, 3014), False, 'import cv2\n'), ((2053, 2066), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2064, 2066), True, 'import tensorflow as tf\n'), ((2317, 2333), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2331, 2333), True, 'import tensorflow as tf\n'), ((2473, 2521), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config', 'graph': 'detection_graph'}), '(config=config, graph=detection_graph)\n', (2483, 2521), True, 'import tensorflow as tf\n'), ((2084, 2118), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['path_to_ckpt', '"""rb"""'], {}), "(path_to_ckpt, 'rb')\n", (2098, 2118), True, 'import tensorflow as tf\n'), ((2252, 2294), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2271, 2294), True, 'import tensorflow as tf\n'), ((3252, 3281), 'numpy.expand_dims', 'np.expand_dims', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (3266, 3281), True, 'import numpy as np\n'), ((3914, 3946), 'numpy.where', 'np.where', (['(classes == self.player)'], {}), '(classes == self.player)\n', (3922, 3946), True, 'import numpy as np\n')]
|
""" ReshapeBarcodeWindow Class """
import tkinter
import copy
import cv2
from kalmus.tkinter_windows.gui_utils import update_graph, resource_path
class ReshapeBarcodeWindow():
"""
ReshapeBarcodeWindow Class
GUI window for user to reshape the selected barcode into the desirable shape
"""
def __init__(self, barcode_1, barcode_2, axes, canvas):
"""
Initialize
:param barcode_1: The barcode 1
:param barcode_2: The barcode 2
:param axes: The display axes in the MainWindow of the kalmus
:param canvas: The display canvas in the MainWindow of the kalmus
"""
self.barcode_1 = barcode_1
self.barcode_2 = barcode_2
self.axes = axes
self.canvas = canvas
# Initialize the window
self.window = tkinter.Tk()
self.window.wm_title("Reshape/Resize Barcode Config")
self.window.iconbitmap(resource_path("kalmus_icon.ico"))
# Reshape/Resize option
self.config_option = tkinter.StringVar(self.window)
self.config_option.set("Reshape") # initialize
# Prompt for the resize parameters specification
params_label = tkinter.Label(self.window, text="Config Params: ")
params_label.grid(row=0, column=0, columnspan=1, sticky=tkinter.W)
# Label (text) prompt and entry for user to specify the resize parameters
column_length_label = tkinter.Label(self.window, text="Frames per Column: ")
column_length_label.grid(row=1, column=0, sticky=tkinter.W)
self.column_length_entry = tkinter.Entry(self.window, textvariable="-1", width=5)
self.column_length_entry.grid(row=1, column=1, padx=15)
self.resize_x_label = tkinter.Label(self.window, text="Scale Width by (ratio): ")
self.resize_x_label.grid(row=2, column=0, sticky=tkinter.W)
self.resize_x_entry = tkinter.Entry(self.window, textvariable="-2", width=5, state="disabled")
self.resize_x_entry.grid(row=2, column=1, padx=15)
self.resize_y_label = tkinter.Label(self.window, text="Scale Height by (ratio): ")
self.resize_y_label.grid(row=3, column=0, sticky=tkinter.W)
self.resize_y_entry = tkinter.Entry(self.window, textvariable="-3", width=5, state="disabled")
self.resize_y_entry.grid(row=3, column=1, padx=15)
# Label prompt for displaying the width and height of the currently selected barcode
self.size_label = tkinter.Label(self.window, text="Current Width = {:d}\nCurrent Height = {:d}"
.format(self.barcode_1.get_barcode().shape[1],
self.barcode_1.get_barcode().shape[0]))
self.size_label.grid(row=4, column=0, columnspan=1)
# Button to process the resize
self.process_button = tkinter.Button(self.window, text="Process", command=self.reshape_resize_barcode)
self.process_button.grid(row=4, column=2, sticky=tkinter.W)
# Label prompt for the Resize type selection
config_label = tkinter.Label(self.window, text="Config options: ")
config_label.grid(row=0, column=2, columnspan=1)
# Radio button for selecting the resize type
radio_reshape = tkinter.Radiobutton(self.window, text="Reshape", variable=self.config_option,
value="Reshape", anchor='w',
command=self.reshape)
radio_reshape.grid(row=1, column=2, sticky=tkinter.W)
radio_reshape.select()
radio_scaling = tkinter.Radiobutton(self.window, text="Scaling", variable=self.config_option,
value="Scaling", anchor='w',
command=self.scale)
radio_scaling.grid(row=2, column=2, sticky=tkinter.W)
radio_resize = tkinter.Radiobutton(self.window, text="Resize", variable=self.config_option,
value="Resize", anchor='w',
command=self.resize)
radio_resize.grid(row=3, column=2, sticky=tkinter.W)
# Label prompt for selecting which barcode to resize
which_barcode_label = tkinter.Label(self.window, text="Barcode: ")
which_barcode_label.grid(row=0, column=3, columnspan=1)
# Option variable
self.barcode_option = tkinter.StringVar(self.window)
self.barcode_option.set("Barcode 1")
# Radio button for selecting which barcode to resize
radio_barcode_1 = tkinter.Radiobutton(self.window, text="Barcode 1", variable=self.barcode_option,
value="Barcode 1", command=self.update_size_label)
radio_barcode_1.grid(row=1, column=3)
radio_barcode_1.select()
radio_barcode_2 = tkinter.Radiobutton(self.window, text="Barcode 2", variable=self.barcode_option,
value="Barcode 2", command=self.update_size_label)
radio_barcode_2.grid(row=2, column=3)
def update_size_label(self):
"""
Update the size label if the currently selected barcode is changed
"""
if self.barcode_option.get() == "Barcode 1":
text = "Current Width = {:d}\nCurrent Height = {:d}".format(
self.barcode_1.get_barcode().shape[1], self.barcode_1.get_barcode().shape[0])
elif self.barcode_option.get() == "Barcode 2":
text = "Current Width = {:d}\nCurrent Height = {:d}".format(
self.barcode_2.get_barcode().shape[1], self.barcode_2.get_barcode().shape[0])
self.size_label['text'] = text
def reshape(self):
"""
Enable or disable the input parameters entry if the reshape radio button is selected
"""
self.column_length_entry.config(state='normal')
self.resize_x_entry.config(state='disabled')
self.resize_y_entry.config(state='disabled')
def scale(self):
"""
Enable or disable the input parameters entry and update the corresponding text
if the scale radio button is selected
"""
self.resize_x_label['text'] = "Scale Width by (ratio): "
self.resize_y_label['text'] = "Scale Height by (ratio): "
self.column_length_entry.config(state='disabled')
self.resize_x_entry.config(state='normal')
self.resize_y_entry.config(state='normal')
def resize(self):
"""
Enable or disable the input parameters entry and update the corresponding text
if the resize radio button is selected
"""
self.resize_x_label['text'] = "Resize Width to (pixels): "
self.resize_y_label['text'] = "Resize Height to (pixels): "
self.column_length_entry.config(state='disabled')
self.resize_x_entry.config(state='normal')
self.resize_y_entry.config(state='normal')
def reshape_resize_barcode(self):
"""
Reshape or resize the barcode using the given parameters
"""
# Get the reshape/resize type from the user selection
option = self.config_option.get()
# Get which barcode to reshape/resize
if self.barcode_option.get() == "Barcode 1":
barcode = self.barcode_1
elif self.barcode_option.get() == "Barcode 2":
barcode = self.barcode_2
# Save the current barcode size
old_barcode_size = barcode.get_barcode().shape[0] * barcode.get_barcode().shape[1]
# Reshape/resize the currently selected barcode using the given type with parameters
if option == "Reshape":
frames_per_column_str = self.column_length_entry.get()
# Check if the reshape parameter is given
# If not given, return and do not process the reshape
if len(frames_per_column_str) == 0:
return
frames_per_column = int(frames_per_column_str)
barcode.reshape_barcode(frames_per_column)
self.update_scale_factor(barcode, old_barcode_size)
self.updated_new_barcode()
elif option == "Resize":
barcode_shape = barcode.get_barcode().shape
resize_x, resize_y = self._check_resize_entry(barcode_shape[1], barcode_shape[0])
if resize_x is None:
return
resized_barcode = cv2.resize(barcode.get_barcode(),
dsize=(int(resize_x), int(resize_y)),
interpolation=cv2.INTER_NEAREST)
barcode.barcode = resized_barcode
self.update_scale_factor(barcode, old_barcode_size)
self.updated_new_barcode()
elif option == "Scaling":
resize_x, resize_y = self._check_resize_entry(1, 1)
if resize_x is None:
return
resized_barcode = cv2.resize(barcode.get_barcode(),
dsize=(0, 0),
fx=float(resize_x),
fy=float(resize_y),
interpolation=cv2.INTER_NEAREST)
barcode.barcode = resized_barcode
self.update_scale_factor(barcode, old_barcode_size)
self.updated_new_barcode()
# Quit the window
self.window.destroy()
def _check_resize_entry(self, default_x, default_y):
"""
Check if the resize parameter is given
If one of the parameter is not given assume that dimension is unchanged
If both are not given, return and do not process the resize.
:param default_x: Default x dimension
:param default_y: Default y dimension
:return: Processed resize x and y parameters from the user input
"""
resize_x_str = self.resize_x_entry.get()
resize_y_str = self.resize_y_entry.get()
if len(resize_x_str) == 0 and len(resize_y_str) == 0:
return None, None
if len(resize_x_str) == 0:
resize_x = str(default_x)
else:
resize_x = resize_x_str
if len(resize_y_str) == 0:
resize_y = str(default_y)
else:
resize_y = resize_y_str
return resize_x, resize_y
def update_scale_factor(self, barcode, old_barcode_size):
"""
Update the scale factor of the barcode
:param barcode: The barcode to update
:param old_barcode_size: The old size of that barcode
"""
barcode.scale_factor *= (old_barcode_size / (barcode.get_barcode().shape[0] * barcode.get_barcode().shape[1]))
def updated_new_barcode(self):
"""
Update the resized/reshaped barcode to the MainWindow of the kalmus
"""
# Clear the display axes
self.axes[0][0].cla()
self.axes[1][0].cla()
self.axes[0][1].cla()
self.axes[1][1].cla()
# Update the displayed barcode and redraw the canvas
if self.barcode_1.get_barcode().shape[1] > self.barcode_2.get_barcode().shape[1]:
temp = copy.deepcopy(self.barcode_1)
self.barcode_1.__dict__ = self.barcode_2.__dict__.copy()
self.barcode_2.__dict__ = temp.__dict__.copy()
update_graph(barcode_1=self.barcode_1, barcode_2=self.barcode_2, axes=self.axes)
# Redraw the canvas
self.canvas.draw()
|
[
"tkinter.StringVar",
"kalmus.tkinter_windows.gui_utils.update_graph",
"copy.deepcopy",
"tkinter.Button",
"kalmus.tkinter_windows.gui_utils.resource_path",
"tkinter.Entry",
"tkinter.Radiobutton",
"tkinter.Label",
"tkinter.Tk"
] |
[((816, 828), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (826, 828), False, 'import tkinter\n'), ((1018, 1048), 'tkinter.StringVar', 'tkinter.StringVar', (['self.window'], {}), '(self.window)\n', (1035, 1048), False, 'import tkinter\n'), ((1186, 1236), 'tkinter.Label', 'tkinter.Label', (['self.window'], {'text': '"""Config Params: """'}), "(self.window, text='Config Params: ')\n", (1199, 1236), False, 'import tkinter\n'), ((1425, 1479), 'tkinter.Label', 'tkinter.Label', (['self.window'], {'text': '"""Frames per Column: """'}), "(self.window, text='Frames per Column: ')\n", (1438, 1479), False, 'import tkinter\n'), ((1584, 1638), 'tkinter.Entry', 'tkinter.Entry', (['self.window'], {'textvariable': '"""-1"""', 'width': '(5)'}), "(self.window, textvariable='-1', width=5)\n", (1597, 1638), False, 'import tkinter\n'), ((1734, 1796), 'tkinter.Label', 'tkinter.Label', (['self.window'], {'text': '"""Scale Width by (ratio): """'}), "(self.window, text='Scale Width by (ratio): ')\n", (1747, 1796), False, 'import tkinter\n'), ((1896, 1968), 'tkinter.Entry', 'tkinter.Entry', (['self.window'], {'textvariable': '"""-2"""', 'width': '(5)', 'state': '"""disabled"""'}), "(self.window, textvariable='-2', width=5, state='disabled')\n", (1909, 1968), False, 'import tkinter\n'), ((2059, 2122), 'tkinter.Label', 'tkinter.Label', (['self.window'], {'text': '"""Scale Height by (ratio): """'}), "(self.window, text='Scale Height by (ratio): ')\n", (2072, 2122), False, 'import tkinter\n'), ((2222, 2294), 'tkinter.Entry', 'tkinter.Entry', (['self.window'], {'textvariable': '"""-3"""', 'width': '(5)', 'state': '"""disabled"""'}), "(self.window, textvariable='-3', width=5, state='disabled')\n", (2235, 2294), False, 'import tkinter\n'), ((2857, 2942), 'tkinter.Button', 'tkinter.Button', (['self.window'], {'text': '"""Process"""', 'command': 'self.reshape_resize_barcode'}), "(self.window, text='Process', command=self.reshape_resize_barcode\n )\n", (2871, 2942), False, 'import tkinter\n'), ((3083, 3134), 'tkinter.Label', 'tkinter.Label', (['self.window'], {'text': '"""Config options: """'}), "(self.window, text='Config options: ')\n", (3096, 3134), False, 'import tkinter\n'), ((3270, 3403), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.window'], {'text': '"""Reshape"""', 'variable': 'self.config_option', 'value': '"""Reshape"""', 'anchor': '"""w"""', 'command': 'self.reshape'}), "(self.window, text='Reshape', variable=self.\n config_option, value='Reshape', anchor='w', command=self.reshape)\n", (3289, 3403), False, 'import tkinter\n'), ((3605, 3736), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.window'], {'text': '"""Scaling"""', 'variable': 'self.config_option', 'value': '"""Scaling"""', 'anchor': '"""w"""', 'command': 'self.scale'}), "(self.window, text='Scaling', variable=self.\n config_option, value='Scaling', anchor='w', command=self.scale)\n", (3624, 3736), False, 'import tkinter\n'), ((3906, 4035), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.window'], {'text': '"""Resize"""', 'variable': 'self.config_option', 'value': '"""Resize"""', 'anchor': '"""w"""', 'command': 'self.resize'}), "(self.window, text='Resize', variable=self.config_option,\n value='Resize', anchor='w', command=self.resize)\n", (3925, 4035), False, 'import tkinter\n'), ((4271, 4315), 'tkinter.Label', 'tkinter.Label', (['self.window'], {'text': '"""Barcode: """'}), "(self.window, text='Barcode: ')\n", (4284, 4315), False, 'import tkinter\n'), ((4437, 4467), 'tkinter.StringVar', 'tkinter.StringVar', (['self.window'], {}), '(self.window)\n', (4454, 4467), False, 'import tkinter\n'), ((4601, 4737), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.window'], {'text': '"""Barcode 1"""', 'variable': 'self.barcode_option', 'value': '"""Barcode 1"""', 'command': 'self.update_size_label'}), "(self.window, text='Barcode 1', variable=self.\n barcode_option, value='Barcode 1', command=self.update_size_label)\n", (4620, 4737), False, 'import tkinter\n'), ((4885, 5021), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.window'], {'text': '"""Barcode 2"""', 'variable': 'self.barcode_option', 'value': '"""Barcode 2"""', 'command': 'self.update_size_label'}), "(self.window, text='Barcode 2', variable=self.\n barcode_option, value='Barcode 2', command=self.update_size_label)\n", (4904, 5021), False, 'import tkinter\n'), ((11368, 11453), 'kalmus.tkinter_windows.gui_utils.update_graph', 'update_graph', ([], {'barcode_1': 'self.barcode_1', 'barcode_2': 'self.barcode_2', 'axes': 'self.axes'}), '(barcode_1=self.barcode_1, barcode_2=self.barcode_2, axes=self.axes\n )\n', (11380, 11453), False, 'from kalmus.tkinter_windows.gui_utils import update_graph, resource_path\n'), ((922, 954), 'kalmus.tkinter_windows.gui_utils.resource_path', 'resource_path', (['"""kalmus_icon.ico"""'], {}), "('kalmus_icon.ico')\n", (935, 954), False, 'from kalmus.tkinter_windows.gui_utils import update_graph, resource_path\n'), ((11200, 11229), 'copy.deepcopy', 'copy.deepcopy', (['self.barcode_1'], {}), '(self.barcode_1)\n', (11213, 11229), False, 'import copy\n')]
|
import tempfile
import shutil
import os
import inspect
from lib import BaseTest
class AddRepo1Test(BaseTest):
"""
add package to local repo: .deb file
"""
fixtureCmds = [
"aptly repo create -comment=Repo1 -distribution=squeeze repo1",
]
runCmd = "aptly repo add repo1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo1", "repo_show")
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo2Test(BaseTest):
"""
add package to local repo: .dsc file
"""
fixtureCmds = [
"aptly repo create -comment=Repo2 -distribution=squeeze repo2",
]
runCmd = "aptly repo add repo2 ${files}/pyspi_0.6.1-1.3.dsc ${files}/pyspi-0.6.1-1.3.stripped.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo2", "repo_show")
# check pool
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/28/9d/3aefa970876e9c43686ce2b02f47_pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo3Test(BaseTest):
"""
add package to local repo: directory
"""
fixtureCmds = [
"aptly repo create -comment=Repo3 -distribution=squeeze repo3",
]
runCmd = "aptly repo add repo3 ${files}"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo3", "repo_show")
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/28/9d/3aefa970876e9c43686ce2b02f47_pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo4Test(BaseTest):
"""
add package to local repo: complex directory + remove
"""
fixtureCmds = [
"aptly repo create -comment=Repo4 -distribution=squeeze repo4",
]
runCmd = "aptly repo add -remove-files repo4 "
def prepare(self):
super(AddRepo4Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "01"), 0o755)
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0o755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "libboost-program-options-dev_1.49.0.1_i386.deb"),
os.path.join(self.tempSrcDir, "01"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03", "other.file"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo4", "repo_show")
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
path = os.path.join(self.tempSrcDir, "01", "libboost-program-options-dev_1.49.0.1_i386.deb")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "pyspi_0.6.1.orig.tar.gz")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "other.file")
if not os.path.exists(path):
raise Exception("path %s doesn't exist" % (path, ))
shutil.rmtree(self.tempSrcDir)
class AddRepo5Test(BaseTest):
"""
add package to local repo: some source files missing
"""
fixtureCmds = [
"aptly repo create -comment=Repo5 -distribution=squeeze repo5",
]
runCmd = "aptly repo add repo5 "
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(self.tempSrcDir, "")
def prepare(self):
super(AddRepo5Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0o755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo5", "repo_show")
shutil.rmtree(self.tempSrcDir)
class AddRepo6Test(BaseTest):
"""
add package to local repo: missing file
"""
fixtureCmds = [
"aptly repo create -comment=Repo6 -distribution=squeeze repo6",
]
runCmd = "aptly repo add repo6 no-such-file"
expectedCode = 1
class AddRepo7Test(BaseTest):
"""
add package to local repo: missing repo
"""
runCmd = "aptly repo add repo7 ${files}"
expectedCode = 1
class AddRepo8Test(BaseTest):
"""
add package to local repo: conflict in packages
"""
fixtureCmds = [
"aptly repo create -comment=Repo8 -distribution=squeeze repo8",
"aptly repo add repo8 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add repo8 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo8", "repo_show")
class AddRepo9Test(BaseTest):
"""
add package to local repo: conflict in files
"""
fixtureCmds = [
"aptly repo create -comment=Repo9 -distribution=squeeze repo9",
]
runCmd = "aptly repo add repo9 ${files}/pyspi_0.6.1-1.3.dsc"
gold_processor = BaseTest.expand_environ
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def prepare(self):
super(AddRepo9Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool/64/06/"))
with open(os.path.join(os.environ["HOME"], ".aptly", "pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz"), "w") as f:
f.write("abcd")
class AddRepo10Test(BaseTest):
"""
add package to local repo: double import
"""
fixtureCmds = [
"aptly repo create -comment=Repo10 -distribution=squeeze repo10",
"aptly repo add repo10 ${files}",
]
runCmd = "aptly repo add repo10 ${files}/pyspi_0.6.1-1.3.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo10", "repo_show")
class AddRepo11Test(BaseTest):
"""
add package to local repo: conflict in packages + -force-replace
"""
fixtureCmds = [
"aptly repo create -comment=Repo11 -distribution=squeeze repo11",
"aptly repo add repo11 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add -force-replace repo11 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo11", "repo_show")
class AddRepo12Test(BaseTest):
"""
add package to local repo: .udeb file
"""
fixtureCmds = [
"aptly repo create -comment=Repo12 -distribution=squeeze repo12",
]
runCmd = "aptly repo add repo12 ${udebs}/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo12", "repo_show")
# check pool
self.check_exists('pool/ef/ae/69921b97494e40437712053b60a5_dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
class AddRepo13Test(BaseTest):
"""
add package to local repo: .udeb and .deb files
"""
fixtureCmds = [
"aptly repo create -comment=Repo13 -distribution=squeeze repo13",
]
runCmd = "aptly repo add repo13 ${udebs} ${files}"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo13", "repo_show")
# check pool
self.check_exists('pool/ef/ae/69921b97494e40437712053b60a5_dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
class AddRepo14Test(BaseTest):
"""
add same package to local repo twice and make sure the file doesn't get truncated.
"""
fixtureCmds = [
"aptly repo create -comment=Repo14 -distribution=squeeze repo14",
"aptly repo add repo14 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb",
"aptly publish repo -distribution=test1 -skip-signing repo14"
]
runCmd = "aptly repo add repo14 $aptlyroot/public/pool/"
def check(self):
super(AddRepo14Test, self).check()
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo15Test(BaseTest):
"""
add package with wrong case in stanza and missing fields
"""
fixtureCmds = [
"aptly repo create -comment=Repo15 -distribution=squeeze repo15",
]
runCmd = "aptly repo add repo15 ${testfiles}"
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
class AddRepo16Test(BaseTest):
"""
add package to local repo: some source files missing, but already in the pool
"""
fixtureCmds = [
"aptly repo create repo1",
"aptly repo create repo2",
"aptly repo add repo1 ${files}"
]
runCmd = "aptly repo add repo2 "
def outputMatchPrepare(self, s):
return s.replace(self.tempSrcDir, "")
def prepare(self):
super(AddRepo16Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0o755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo2", "repo_show")
shutil.rmtree(self.tempSrcDir)
|
[
"os.path.exists",
"tempfile.mkdtemp",
"shutil.rmtree",
"os.path.join",
"inspect.getsourcefile"
] |
[((2674, 2692), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2690, 2692), False, 'import tempfile\n'), ((4372, 4461), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""01"""', '"""libboost-program-options-dev_1.49.0.1_i386.deb"""'], {}), "(self.tempSrcDir, '01',\n 'libboost-program-options-dev_1.49.0.1_i386.deb')\n", (4384, 4461), False, 'import os\n'), ((4469, 4489), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4483, 4489), False, 'import os\n'), ((4572, 4640), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""', '"""pyspi_0.6.1.orig.tar.gz"""'], {}), "(self.tempSrcDir, '02', '03', 'pyspi_0.6.1.orig.tar.gz')\n", (4584, 4640), False, 'import os\n'), ((4652, 4672), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4666, 4672), False, 'import os\n'), ((4756, 4811), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""', '"""other.file"""'], {}), "(self.tempSrcDir, '02', '03', 'other.file')\n", (4768, 4811), False, 'import os\n'), ((4922, 4952), 'shutil.rmtree', 'shutil.rmtree', (['self.tempSrcDir'], {}), '(self.tempSrcDir)\n', (4935, 4952), False, 'import shutil\n'), ((5393, 5411), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5409, 5411), False, 'import tempfile\n'), ((6012, 6042), 'shutil.rmtree', 'shutil.rmtree', (['self.tempSrcDir'], {}), '(self.tempSrcDir)\n', (6025, 6042), False, 'import shutil\n'), ((12207, 12225), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (12223, 12225), False, 'import tempfile\n'), ((12826, 12856), 'shutil.rmtree', 'shutil.rmtree', (['self.tempSrcDir'], {}), '(self.tempSrcDir)\n', (12839, 12856), False, 'import shutil\n'), ((2713, 2748), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""01"""'], {}), "(self.tempSrcDir, '01')\n", (2725, 2748), False, 'import os\n'), ((2777, 2818), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (2789, 2818), False, 'import os\n'), ((2991, 3026), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""01"""'], {}), "(self.tempSrcDir, '01')\n", (3003, 3026), False, 'import os\n'), ((3164, 3205), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (3176, 3205), False, 'import os\n'), ((3347, 3388), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (3359, 3388), False, 'import os\n'), ((3530, 3571), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (3542, 3571), False, 'import os\n'), ((3713, 3768), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""', '"""other.file"""'], {}), "(self.tempSrcDir, '02', '03', 'other.file')\n", (3725, 3768), False, 'import os\n'), ((4827, 4847), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4841, 4847), False, 'import os\n'), ((5432, 5473), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (5444, 5473), False, 'import os\n'), ((5619, 5660), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (5631, 5660), False, 'import os\n'), ((5802, 5843), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (5814, 5843), False, 'import os\n'), ((7917, 7974), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '""".aptly"""', '"""pool/64/06/"""'], {}), "(os.environ['HOME'], '.aptly', 'pool/64/06/')\n", (7929, 7974), False, 'import os\n'), ((12246, 12287), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (12258, 12287), False, 'import os\n'), ((12433, 12474), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (12445, 12474), False, 'import os\n'), ((12616, 12657), 'os.path.join', 'os.path.join', (['self.tempSrcDir', '"""02"""', '"""03"""'], {}), "(self.tempSrcDir, '02', '03')\n", (12628, 12657), False, 'import os\n'), ((7994, 8107), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '""".aptly"""', '"""pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz"""'], {}), "(os.environ['HOME'], '.aptly',\n 'pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')\n", (8006, 8107), False, 'import os\n'), ((2877, 2908), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (2898, 2908), False, 'import inspect\n'), ((3077, 3108), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (3098, 3108), False, 'import inspect\n'), ((3256, 3287), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (3277, 3287), False, 'import inspect\n'), ((3439, 3470), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (3460, 3470), False, 'import inspect\n'), ((3622, 3653), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (3643, 3653), False, 'import inspect\n'), ((5532, 5563), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (5553, 5563), False, 'import inspect\n'), ((5711, 5742), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (5732, 5742), False, 'import inspect\n'), ((7046, 7077), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (7067, 7077), False, 'import inspect\n'), ((7780, 7811), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (7801, 7811), False, 'import inspect\n'), ((9181, 9212), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (9202, 9212), False, 'import inspect\n'), ((11675, 11706), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (11696, 11706), False, 'import inspect\n'), ((12346, 12377), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (12367, 12377), False, 'import inspect\n'), ((12525, 12556), 'inspect.getsourcefile', 'inspect.getsourcefile', (['BaseTest'], {}), '(BaseTest)\n', (12546, 12556), False, 'import inspect\n'), ((6911, 6948), 'inspect.getsourcefile', 'inspect.getsourcefile', (['self.__class__'], {}), '(self.__class__)\n', (6932, 6948), False, 'import inspect\n'), ((7645, 7682), 'inspect.getsourcefile', 'inspect.getsourcefile', (['self.__class__'], {}), '(self.__class__)\n', (7666, 7682), False, 'import inspect\n'), ((9046, 9083), 'inspect.getsourcefile', 'inspect.getsourcefile', (['self.__class__'], {}), '(self.__class__)\n', (9067, 9083), False, 'import inspect\n'), ((11540, 11577), 'inspect.getsourcefile', 'inspect.getsourcefile', (['self.__class__'], {}), '(self.__class__)\n', (11561, 11577), False, 'import inspect\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/util/landmarks_smoothing_calculator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_options_pb2 as mediapipe_dot_framework_dot_calculator__options__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/util/landmarks_smoothing_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_pb=_b('\n?mediapipe/calculators/util/landmarks_smoothing_calculator.proto\x12\tmediapipe\x1a,mediapipe/framework/calculator_options.proto\"\xb9\x03\n#LandmarksSmoothingCalculatorOptions\x12L\n\tno_filter\x18\x01 \x01(\x0b\x32\x37.mediapipe.LandmarksSmoothingCalculatorOptions.NoFilterH\x00\x12X\n\x0fvelocity_filter\x18\x02 \x01(\x0b\x32=.mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilterH\x00\x1a\n\n\x08NoFilter\x1am\n\x0eVelocityFilter\x12\x16\n\x0bwindow_size\x18\x01 \x01(\x05:\x01\x35\x12\x1a\n\x0evelocity_scale\x18\x02 \x01(\x02:\x02\x31\x30\x12\'\n\x18min_allowed_object_scale\x18\x03 \x01(\x02:\x05\x31\x65-062]\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\x85\xb4\xa5\x9b\x01 \x01(\x0b\x32..mediapipe.LandmarksSmoothingCalculatorOptionsB\x10\n\x0e\x66ilter_options')
,
dependencies=[mediapipe_dot_framework_dot_calculator__options__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER = _descriptor.Descriptor(
name='NoFilter',
full_name='mediapipe.LandmarksSmoothingCalculatorOptions.NoFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=332,
serialized_end=342,
)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER = _descriptor.Descriptor(
name='VelocityFilter',
full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_size', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.window_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='velocity_scale', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.velocity_scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(10),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_allowed_object_scale', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.min_allowed_object_scale', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-06),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=344,
serialized_end=453,
)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS = _descriptor.Descriptor(
name='LandmarksSmoothingCalculatorOptions',
full_name='mediapipe.LandmarksSmoothingCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='no_filter', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.no_filter', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='velocity_filter', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.velocity_filter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.ext', index=0,
number=325671429, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[_LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER, _LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='filter_options', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.filter_options',
index=0, containing_type=None, fields=[]),
],
serialized_start=125,
serialized_end=566,
)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER.containing_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER.containing_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['no_filter'].message_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['velocity_filter'].message_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options'].fields.append(
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['no_filter'])
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['no_filter'].containing_oneof = _LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options']
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options'].fields.append(
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['velocity_filter'])
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['velocity_filter'].containing_oneof = _LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options']
DESCRIPTOR.message_types_by_name['LandmarksSmoothingCalculatorOptions'] = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
LandmarksSmoothingCalculatorOptions = _reflection.GeneratedProtocolMessageType('LandmarksSmoothingCalculatorOptions', (_message.Message,), dict(
NoFilter = _reflection.GeneratedProtocolMessageType('NoFilter', (_message.Message,), dict(
DESCRIPTOR = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER,
__module__ = 'mediapipe.calculators.util.landmarks_smoothing_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.LandmarksSmoothingCalculatorOptions.NoFilter)
))
,
VelocityFilter = _reflection.GeneratedProtocolMessageType('VelocityFilter', (_message.Message,), dict(
DESCRIPTOR = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER,
__module__ = 'mediapipe.calculators.util.landmarks_smoothing_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter)
))
,
DESCRIPTOR = _LANDMARKSSMOOTHINGCALCULATOROPTIONS,
__module__ = 'mediapipe.calculators.util.landmarks_smoothing_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.LandmarksSmoothingCalculatorOptions)
))
_sym_db.RegisterMessage(LandmarksSmoothingCalculatorOptions)
_sym_db.RegisterMessage(LandmarksSmoothingCalculatorOptions.NoFilter)
_sym_db.RegisterMessage(LandmarksSmoothingCalculatorOptions.VelocityFilter)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_LANDMARKSSMOOTHINGCALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor.OneofDescriptor",
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.Descriptor",
"mediapipe.framework.calculator_options_pb2.CalculatorOptions.RegisterExtension"
] |
[((530, 556), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (554, 556), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1820, 2186), 'google.protobuf.descriptor.Descriptor', '_descriptor.Descriptor', ([], {'name': '"""NoFilter"""', 'full_name': '"""mediapipe.LandmarksSmoothingCalculatorOptions.NoFilter"""', 'filename': 'None', 'file': 'DESCRIPTOR', 'containing_type': 'None', 'fields': '[]', 'extensions': '[]', 'nested_types': '[]', 'enum_types': '[]', 'options': 'None', 'is_extendable': '(False)', 'syntax': '"""proto2"""', 'extension_ranges': '[]', 'oneofs': '[]', 'serialized_start': '(332)', 'serialized_end': '(342)'}), "(name='NoFilter', full_name=\n 'mediapipe.LandmarksSmoothingCalculatorOptions.NoFilter', filename=None,\n file=DESCRIPTOR, containing_type=None, fields=[], extensions=[],\n nested_types=[], enum_types=[], options=None, is_extendable=False,\n syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=332,\n serialized_end=342)\n", (1842, 2186), True, 'from google.protobuf import descriptor as _descriptor\n'), ((8322, 8479), 'mediapipe.framework.calculator_options_pb2.CalculatorOptions.RegisterExtension', 'mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension', (["_LANDMARKSSMOOTHINGCALCULATOROPTIONS.extensions_by_name['ext']"], {}), "(\n _LANDMARKSSMOOTHINGCALCULATOROPTIONS.extensions_by_name['ext'])\n", (8410, 8479), True, 'from mediapipe.framework import calculator_options_pb2 as mediapipe_dot_framework_dot_calculator__options__pb2\n'), ((2468, 2821), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""window_size"""', 'full_name': '"""mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.window_size"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(True)', 'default_value': '(5)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='window_size', full_name=\n 'mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.window_size',\n index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True,\n default_value=5, message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, options=None)\n", (2495, 2821), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4104, 4444), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""no_filter"""', 'full_name': '"""mediapipe.LandmarksSmoothingCalculatorOptions.no_filter"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='no_filter', full_name=\n 'mediapipe.LandmarksSmoothingCalculatorOptions.no_filter', index=0,\n number=1, type=11, cpp_type=10, label=1, has_default_value=False,\n default_value=None, message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, options=None)\n", (4131, 4444), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4469, 4822), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""velocity_filter"""', 'full_name': '"""mediapipe.LandmarksSmoothingCalculatorOptions.velocity_filter"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='velocity_filter', full_name=\n 'mediapipe.LandmarksSmoothingCalculatorOptions.velocity_filter', index=\n 1, number=2, type=11, cpp_type=10, label=1, has_default_value=False,\n default_value=None, message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, options=None)\n", (4496, 4822), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4866, 5202), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""ext"""', 'full_name': '"""mediapipe.LandmarksSmoothingCalculatorOptions.ext"""', 'index': '(0)', 'number': '(325671429)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(True)', 'extension_scope': 'None', 'options': 'None'}), "(name='ext', full_name=\n 'mediapipe.LandmarksSmoothingCalculatorOptions.ext', index=0, number=\n 325671429, type=11, cpp_type=10, label=1, has_default_value=False,\n default_value=None, message_type=None, enum_type=None, containing_type=\n None, is_extension=True, extension_scope=None, options=None)\n", (4893, 5202), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5462, 5637), 'google.protobuf.descriptor.OneofDescriptor', '_descriptor.OneofDescriptor', ([], {'name': '"""filter_options"""', 'full_name': '"""mediapipe.LandmarksSmoothingCalculatorOptions.filter_options"""', 'index': '(0)', 'containing_type': 'None', 'fields': '[]'}), "(name='filter_options', full_name=\n 'mediapipe.LandmarksSmoothingCalculatorOptions.filter_options', index=0,\n containing_type=None, fields=[])\n", (5489, 5637), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import objax
from jax import vmap, grad, jacrev
import jax.numpy as np
from jax.scipy.linalg import cholesky, cho_factor
from .utils import inv, solve, gaussian_first_derivative_wrt_mean, gaussian_second_derivative_wrt_mean
from numpy.polynomial.hermite import hermgauss
import numpy as onp
import itertools
class Cubature(objax.Module):
def __init__(self, dim=None):
if dim is None: # dimension of cubature not known upfront
self.store = False
else: # dimension known, store sigma points and weights
self.store = True
self.x, self.w = self.get_cubature_points_and_weights(dim)
def __call__(self, dim):
if self.store:
return self.x, self.w
else:
return self.get_cubature_points_and_weights(dim)
def get_cubature_points_and_weights(self, dim):
raise NotImplementedError
class GaussHermite(Cubature):
def __init__(self, dim=None, num_cub_points=20):
self.num_cub_points = num_cub_points
super().__init__(dim)
def get_cubature_points_and_weights(self, dim):
return gauss_hermite(dim, self.num_cub_points)
class UnscentedThirdOrder(Cubature):
def get_cubature_points_and_weights(self, dim):
return symmetric_cubature_third_order(dim)
class UnscentedFifthOrder(Cubature):
def get_cubature_points_and_weights(self, dim):
return symmetric_cubature_fifth_order(dim)
class Unscented(UnscentedFifthOrder):
pass
def mvhermgauss(H: int, D: int):
"""
This function is adapted from GPflow: https://github.com/GPflow/GPflow
Return the evaluation locations 'xn', and weights 'wn' for a multivariate
Gauss-Hermite quadrature.
The outputs can be used to approximate the following type of integral:
int exp(-x)*f(x) dx ~ sum_i w[i,:]*f(x[i,:])
:param H: Number of Gauss-Hermite evaluation points.
:param D: Number of input dimensions. Needs to be known at call-time.
:return: eval_locations 'x' (H**DxD), weights 'w' (H**D)
"""
gh_x, gh_w = hermgauss(H)
x = np.array(list(itertools.product(*(gh_x,) * D))) # H**DxD
w = np.prod(np.array(list(itertools.product(*(gh_w,) * D))), 1) # H**D
return x, w
def gauss_hermite(dim=1, num_quad_pts=20):
"""
Return weights and sigma-points for Gauss-Hermite cubature
"""
# sigma_pts, weights = hermgauss(num_quad_pts) # Gauss-Hermite sigma points and weights
sigma_pts, weights = mvhermgauss(num_quad_pts, dim)
sigma_pts = np.sqrt(2) * sigma_pts.T
weights = weights.T * np.pi ** (-0.5 * dim) # scale weights by 1/√π
return sigma_pts, weights
def symmetric_cubature_third_order(dim=1, kappa=None):
"""
Return weights and sigma-points for the symmetric cubature rule of order 3.
Uses 2dim+1 sigma-points
"""
if kappa is None:
# kappa = 1 - dim
kappa = 0 # CKF
w0 = kappa / (dim + kappa)
wm = 1 / (2 * (dim + kappa))
u = onp.sqrt(dim + kappa)
if (dim == 1) and (kappa == 0):
weights = onp.array([w0, wm, wm])
sigma_pts = onp.array([0., u, -u])
# sigma_pts = onp.array([-u, 0., u])
# weights = onp.array([wm, w0, wm])
elif (dim == 2) and (kappa == 0):
weights = onp.array([w0, wm, wm, wm, wm])
sigma_pts = onp.block([[0., u, 0., -u, 0.],
[0., 0., u, 0., -u]])
elif (dim == 3) and (kappa == 0):
weights = onp.array([w0, wm, wm, wm, wm, wm, wm])
sigma_pts = onp.block([[0., u, 0., 0., -u, 0., 0.],
[0., 0., u, 0., 0., -u, 0.],
[0., 0., 0., u, 0., 0., -u]])
else:
weights = onp.concatenate([onp.array([[kappa / (dim + kappa)]]), wm * onp.ones([1, 2*dim])], axis=1)
sigma_pts = onp.sqrt(dim + kappa) * onp.block([onp.zeros([dim, 1]), onp.eye(dim), -onp.eye(dim)])
return sigma_pts, weights
def symmetric_cubature_fifth_order(dim=1):
"""
Return weights and sigma-points for the symmetric cubature rule of order 5
Uses 2(dim**2)+1 sigma-points
"""
# The weights and sigma-points from McNamee & Stenger
I0 = 1
I2 = 1
I4 = 3
I22 = 1
u = onp.sqrt(I4 / I2)
A0 = I0 - dim * (I2 / I4) ** 2 * (I4 - 0.5 * (dim - 1) * I22)
A1 = 0.5 * (I2 / I4) ** 2 * (I4 - (dim - 1) * I22)
A2 = 0.25 * (I2 / I4) ** 2 * I22
# we implement specific cases manually to save compute
if dim == 1:
weights = onp.array([A0, A1, A1])
sigma_pts = onp.array([0., u, -u])
elif dim == 2:
weights = onp.array([A0, A1, A1, A1, A1, A2, A2, A2, A2])
sigma_pts = onp.block([[0., u, -u, 0., 0., u, -u, u, -u],
[0., 0., 0., u, -u, u, -u, -u, u]])
elif dim == 3:
weights = onp.array([A0, A1, A1, A1, A1, A1, A1, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2])
sigma_pts = onp.block([[0., u, -u, 0., 0., 0., 0., u, -u, u, -u, u, -u, u, -u, 0., 0., 0., 0.],
[0., 0., 0., u, -u, 0., 0., u, -u, -u, u, 0., 0., 0., 0., u, -u, u, -u],
[0., 0., 0., 0., 0., u, -u, 0., 0., 0., 0., u, -u, -u, u, u, -u, -u, u]])
else:
# general case
U0 = sym_set(dim, [])
U1 = sym_set(dim, [u])
U2 = sym_set(dim, [u, u])
sigma_pts = onp.concatenate([U0, U1, U2], axis=1)
weights = onp.concatenate([A0 * onp.ones(U0.shape[1]),
A1 * onp.ones(U1.shape[1]),
A2 * onp.ones(U2.shape[1])])
return sigma_pts, weights
def sym_set(n, gen=None):
if (gen is None) or (len(gen) == 0):
U = onp.zeros([n, 1])
else:
lengen = len(gen)
if lengen == 1:
U = onp.zeros([n, 2 * n])
elif lengen == 2:
U = onp.zeros([n, 2 * n * (n - 1)])
else:
raise NotImplementedError
ind = 0
for i in range(n):
u = onp.zeros(n)
u[i] = gen[0]
if lengen > 1:
if abs(gen[0] - gen[1]) < 1e-10:
V = sym_set(n-i-1, gen[1:])
for j in range(V.shape[1]):
u[i+1:] = V[:, j]
U[:, 2*ind] = u
U[:, 2*ind + 1] = -u
ind += 1
else:
raise NotImplementedError
# V = sym_set(n-1, gen[1:])
# for j in range(V.shape[1]):
# u[:i-1, i+1:] = V[:, j]
# U = onp.concatenate([U, u, -u])
# ind += 1
else:
U[:, 2*i] = u
U[:, 2*i+1] = -u
return U
def variational_expectation_cubature(likelihood, y, post_mean, post_cov, cubature=None):
"""
Computes the "variational expectation" via cubature, i.e. the
expected log-likelihood, and its derivatives w.r.t. the posterior mean
E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param post_mean: posterior mean (mₙ) [scalar]
:param post_cov: posterior variance (vₙ) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
exp_log_lik: the expected log likelihood, E[log p(yₙ|fₙ)] [scalar]
dE_dm: derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
d2E_dm2: second derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
"""
if cubature is None:
x, w = gauss_hermite(post_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(post_mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(post_cov) @ np.atleast_2d(x) + post_mean
# pre-compute wᵢ log p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_log_likelihood_eval = w * likelihood.evaluate_log_likelihood(y, sigma_points)
# Compute expected log likelihood via cubature:
# E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
exp_log_lik = np.sum(
weighted_log_likelihood_eval
)
# Compute first derivative via cubature:
# dE[log p(yₙ|fₙ)]/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fsigᵢ)
invv = np.diag(post_cov)[:, None] ** -1
dE_dm = np.sum(
invv * (sigma_points - post_mean)
* weighted_log_likelihood_eval, axis=-1
)[:, None]
# Compute second derivative via cubature (deriv. w.r.t. var = 0.5 * 2nd deriv. w.r.t. mean):
# dE[log p(yₙ|fₙ)]/dvₙ = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fsigᵢ)
dE_dv = np.sum(
(0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv)
* weighted_log_likelihood_eval, axis=-1
)
dE_dv = np.diag(dE_dv)
d2E_dm2 = 2 * dE_dv
return exp_log_lik, dE_dm, d2E_dm2
def log_density_cubature(likelihood, y, mean, cov, cubature=None):
"""
logZₙ = log ∫ p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param mean: cavity mean (mₙ) [scalar]
:param cov: cavity covariance (cₙ) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log density, logZₙ [scalar]
"""
if cubature is None:
x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean.shape[0])
cav_cho, low = cho_factor(cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + mean
# pre-compute wᵢ p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * likelihood.evaluate_likelihood(y, sigma_points)
# Compute partition function via cubature:
# Zₙ = ∫ p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(np.maximum(Z, 1e-8))
return lZ
def moment_match_cubature(likelihood, y, cav_mean, cav_cov, power=1.0, cubature=None):
"""
TODO: N.B. THIS VERSION ALLOWS MULTI-DIMENSIONAL MOMENT MATCHING, BUT CAN BE UNSTABLE
Perform moment matching via cubature.
Moment matching involves computing the log partition function, logZₙ, and its derivatives w.r.t. the cavity mean
logZₙ = log ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param cav_mean: cavity mean (mₙ) [scalar]
:param cav_cov: cavity covariance (cₙ) [scalar]
:param power: EP power / fraction (a) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
if cubature is None:
x, w = gauss_hermite(cav_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(cav_mean.shape[0])
cav_cho, low = cho_factor(cav_cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + cav_mean
# pre-compute wᵢ pᵃ(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * likelihood.evaluate_likelihood(y, sigma_points) ** power
# Compute partition function via cubature:
# Zₙ = ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ pᵃ(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(np.maximum(Z, 1e-8))
Zinv = 1.0 / np.maximum(Z, 1e-8)
# Compute derivative of partition function via cubature:
# dZₙ/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fsigᵢ)
d1 = vmap(
gaussian_first_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
dZ = np.sum(d1, axis=0)
# dlogZₙ/dmₙ = (dZₙ/dmₙ) / Zₙ
dlZ = Zinv * dZ
# Compute second derivative of partition function via cubature:
# d²Zₙ/dmₙ² = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fsigᵢ)
d2 = vmap(
gaussian_second_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
d2Z = np.sum(d2, axis=0)
# d²logZₙ/dmₙ² = d[(dZₙ/dmₙ) / Zₙ]/dmₙ
# = (d²Zₙ/dmₙ² * Zₙ - (dZₙ/dmₙ)²) / Zₙ²
# = d²Zₙ/dmₙ² / Zₙ - (dlogZₙ/dmₙ)²
d2lZ = -dlZ @ dlZ.T + Zinv * d2Z
return lZ, dlZ, d2lZ
# def statistical_linear_regression_cubature(likelihood, mean, cov, cubature=None):
# """
# Perform statistical linear regression (SLR) using cubature.
# We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω).
# TODO: this currently assumes an additive noise model (ok for our current applications), make more general
# """
# if cubature is None:
# x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
# else:
# x, w = cubature(mean.shape[0])
# # fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
# sigma_points = cholesky(cov) @ np.atleast_2d(x) + mean
# lik_expectation, lik_covariance = likelihood.conditional_moments(sigma_points)
# # Compute muₙ via cubature:
# # muₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
# mu = np.sum(
# w * lik_expectation, axis=-1
# )[:, None]
# # Compute variance S via cubature:
# # S = ∫ [(E[yₙ|fₙ]-muₙ) (E[yₙ|fₙ]-muₙ)' + Cov[yₙ|fₙ]] 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ [(E[yₙ|fsigᵢ]-muₙ) (E[yₙ|fsigᵢ]-muₙ)' + Cov[yₙ|fₙ]]
# # TODO: allow for multi-dim cubature
# S = np.sum(
# w * ((lik_expectation - mu) * (lik_expectation - mu) + lik_covariance), axis=-1
# )[:, None]
# # Compute cross covariance C via cubature:
# # C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-muₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-muₙ)'
# C = np.sum(
# w * (sigma_points - mean) * (lik_expectation - mu), axis=-1
# )[:, None]
# # compute equivalent likelihood noise, omega
# omega = S - C.T @ solve(cov, C)
# # Compute derivative of z via cubature:
# # d_mu = ∫ E[yₙ|fₙ] vₙ⁻¹ (fₙ-mₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] vₙ⁻¹ (fsigᵢ-mₙ)
# prec = inv(cov)
# d_mu = np.sum(
# # w * lik_expectation * (solve(cov, sigma_points - mean)), axis=-1
# w * lik_expectation * (prec @ (sigma_points - mean)), axis=-1
# )[None, :]
# # Second derivative:
# # d2_mu = -∫ E[yₙ|fₙ] vₙ⁻¹ 𝓝(fₙ|mₙ,vₙ) dfₙ + ∫ E[yₙ|fₙ] (vₙ⁻¹ (fₙ-mₙ))² 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] ((vₙ⁻¹ (fsigᵢ-mₙ))² - vₙ⁻¹)
# d2_mu = np.sum(
# w * lik_expectation * (prec @ (sigma_points - mean) ** 2 - prec), axis=-1
# )[None, :]
# return mu, omega, d_mu, d2_mu
def statistical_linear_regression_cubature(likelihood, mean, cov, cubature=None):
"""
Perform statistical linear regression (SLR) using cubature.
We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω).
TODO: this currently assumes an additive noise model (ok for our current applications), make more general
"""
mu, omega = expected_conditional_mean(likelihood, mean, cov, cubature)
dmu_dm = expected_conditional_mean_dm(likelihood, mean, cov, cubature)
d2mu_dm2 = expected_conditional_mean_dm2(likelihood, mean, cov, cubature)
return mu.reshape(-1, 1), omega, dmu_dm.reshape(1, -1), d2mu_dm2
# return mu.reshape(-1, 1), omega, dmu_dm[None], np.swapaxes(d2mu_dm2, axis1=0, axis2=2)
def expected_conditional_mean(likelihood, mean, cov, cubature=None):
"""
Compute Eq[E[y|f]] = ∫ Ey[p(y|f)] 𝓝(f|mean,cov) dfₙ
"""
if cubature is None:
x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(cov) @ np.atleast_2d(x) + mean
lik_expectation, lik_covariance = likelihood.conditional_moments(sigma_points)
# Compute muₙ via cubature:
# muₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
mu = np.sum(
w * lik_expectation, axis=-1
)[:, None]
S = np.sum(
# w * ((lik_expectation - mu) @ (lik_expectation - mu).T + lik_covariance), axis=-1 # TODO: CHECK MULTI-DIM
w * ((lik_expectation - mu) ** 2 + lik_covariance), axis=-1
)[:, None]
# Compute cross covariance C via cubature:
# C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-muₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-muₙ)'
C = np.sum(
w * (sigma_points - mean) * (lik_expectation - mu), axis=-1
)[:, None]
# compute equivalent likelihood noise, omega
omega = S - C.T @ solve(cov, C)
return np.squeeze(mu), omega
def expected_conditional_mean_dm(likelihood, mean, cov, cubature=None):
"""
"""
dmu_dm, _ = grad(expected_conditional_mean, argnums=1, has_aux=True)(likelihood, mean, cov, cubature)
return np.squeeze(dmu_dm)
def expected_conditional_mean_dm2(likelihood, mean, cov, cubature=None):
"""
"""
d2mu_dm2 = jacrev(expected_conditional_mean_dm, argnums=1)(likelihood, mean, cov, cubature)
return d2mu_dm2
def predict_cubature(likelihood, mean_f, var_f, cubature=None):
"""
predict in data space given predictive mean and var of the latent function
"""
if cubature is None:
x, w = gauss_hermite(mean_f.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean_f.shape[0])
chol_f, low = cho_factor(var_f)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to latent dist.
sigma_points = chol_f @ np.atleast_2d(x) + mean_f
# Compute moments via cubature:
# E[y] = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fₙ]
# E[y^2] = ∫ (Cov[yₙ|fₙ] + E[yₙ|fₙ]^2) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (Cov[yₙ|fₙ] + E[yₙ|fₙ]^2)
conditional_expectation, conditional_covariance = likelihood.conditional_moments(sigma_points)
expected_y = np.sum(w * conditional_expectation, axis=-1)
expected_y_squared = np.sum(w * (conditional_covariance + conditional_expectation ** 2), axis=-1)
# Cov[y] = E[y^2] - E[y]^2
covariance_y = expected_y_squared - expected_y ** 2
return expected_y, covariance_y
|
[
"numpy.polynomial.hermite.hermgauss",
"jax.numpy.atleast_2d",
"numpy.ones",
"jax.numpy.squeeze",
"itertools.product",
"jax.numpy.diag",
"jax.numpy.sum",
"jax.vmap",
"jax.numpy.maximum",
"jax.scipy.linalg.cho_factor",
"numpy.concatenate",
"numpy.block",
"jax.jacrev",
"jax.scipy.linalg.cholesky",
"numpy.zeros",
"numpy.array",
"jax.grad",
"numpy.eye",
"jax.numpy.sqrt",
"numpy.sqrt"
] |
[((2062, 2074), 'numpy.polynomial.hermite.hermgauss', 'hermgauss', (['H'], {}), '(H)\n', (2071, 2074), False, 'from numpy.polynomial.hermite import hermgauss\n'), ((2977, 2998), 'numpy.sqrt', 'onp.sqrt', (['(dim + kappa)'], {}), '(dim + kappa)\n', (2985, 2998), True, 'import numpy as onp\n'), ((4231, 4248), 'numpy.sqrt', 'onp.sqrt', (['(I4 / I2)'], {}), '(I4 / I2)\n', (4239, 4248), True, 'import numpy as onp\n'), ((8243, 8279), 'jax.numpy.sum', 'np.sum', (['weighted_log_likelihood_eval'], {}), '(weighted_log_likelihood_eval)\n', (8249, 8279), True, 'import jax.numpy as np\n'), ((8917, 9035), 'jax.numpy.sum', 'np.sum', (['((0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv) *\n weighted_log_likelihood_eval)'], {'axis': '(-1)'}), '((0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv) *\n weighted_log_likelihood_eval, axis=-1)\n', (8923, 9035), True, 'import jax.numpy as np\n'), ((9066, 9080), 'jax.numpy.diag', 'np.diag', (['dE_dv'], {}), '(dE_dv)\n', (9073, 9080), True, 'import jax.numpy as np\n'), ((9783, 9798), 'jax.scipy.linalg.cho_factor', 'cho_factor', (['cov'], {}), '(cov)\n', (9793, 9798), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((10155, 10196), 'jax.numpy.sum', 'np.sum', (['weighted_likelihood_eval'], {'axis': '(-1)'}), '(weighted_likelihood_eval, axis=-1)\n', (10161, 10196), True, 'import jax.numpy as np\n'), ((11438, 11457), 'jax.scipy.linalg.cho_factor', 'cho_factor', (['cav_cov'], {}), '(cav_cov)\n', (11448, 11457), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((11840, 11881), 'jax.numpy.sum', 'np.sum', (['weighted_likelihood_eval'], {'axis': '(-1)'}), '(weighted_likelihood_eval, axis=-1)\n', (11846, 11881), True, 'import jax.numpy as np\n'), ((12300, 12318), 'jax.numpy.sum', 'np.sum', (['d1'], {'axis': '(0)'}), '(d1, axis=0)\n', (12306, 12318), True, 'import jax.numpy as np\n'), ((12736, 12754), 'jax.numpy.sum', 'np.sum', (['d2'], {'axis': '(0)'}), '(d2, axis=0)\n', (12742, 12754), True, 'import jax.numpy as np\n'), ((17507, 17525), 'jax.numpy.squeeze', 'np.squeeze', (['dmu_dm'], {}), '(dmu_dm)\n', (17517, 17525), True, 'import jax.numpy as np\n'), ((18080, 18097), 'jax.scipy.linalg.cho_factor', 'cho_factor', (['var_f'], {}), '(var_f)\n', (18090, 18097), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((18544, 18588), 'jax.numpy.sum', 'np.sum', (['(w * conditional_expectation)'], {'axis': '(-1)'}), '(w * conditional_expectation, axis=-1)\n', (18550, 18588), True, 'import jax.numpy as np\n'), ((18614, 18690), 'jax.numpy.sum', 'np.sum', (['(w * (conditional_covariance + conditional_expectation ** 2))'], {'axis': '(-1)'}), '(w * (conditional_covariance + conditional_expectation ** 2), axis=-1)\n', (18620, 18690), True, 'import jax.numpy as np\n'), ((2522, 2532), 'jax.numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2529, 2532), True, 'import jax.numpy as np\n'), ((3053, 3076), 'numpy.array', 'onp.array', (['[w0, wm, wm]'], {}), '([w0, wm, wm])\n', (3062, 3076), True, 'import numpy as onp\n'), ((3097, 3120), 'numpy.array', 'onp.array', (['[0.0, u, -u]'], {}), '([0.0, u, -u])\n', (3106, 3120), True, 'import numpy as onp\n'), ((4501, 4524), 'numpy.array', 'onp.array', (['[A0, A1, A1]'], {}), '([A0, A1, A1])\n', (4510, 4524), True, 'import numpy as onp\n'), ((4545, 4568), 'numpy.array', 'onp.array', (['[0.0, u, -u]'], {}), '([0.0, u, -u])\n', (4554, 4568), True, 'import numpy as onp\n'), ((5714, 5731), 'numpy.zeros', 'onp.zeros', (['[n, 1]'], {}), '([n, 1])\n', (5723, 5731), True, 'import numpy as onp\n'), ((8532, 8617), 'jax.numpy.sum', 'np.sum', (['(invv * (sigma_points - post_mean) * weighted_log_likelihood_eval)'], {'axis': '(-1)'}), '(invv * (sigma_points - post_mean) * weighted_log_likelihood_eval,\n axis=-1)\n', (8538, 8617), True, 'import jax.numpy as np\n'), ((10227, 10247), 'jax.numpy.maximum', 'np.maximum', (['Z', '(1e-08)'], {}), '(Z, 1e-08)\n', (10237, 10247), True, 'import jax.numpy as np\n'), ((11912, 11932), 'jax.numpy.maximum', 'np.maximum', (['Z', '(1e-08)'], {}), '(Z, 1e-08)\n', (11922, 11932), True, 'import jax.numpy as np\n'), ((11950, 11970), 'jax.numpy.maximum', 'np.maximum', (['Z', '(1e-08)'], {}), '(Z, 1e-08)\n', (11960, 11970), True, 'import jax.numpy as np\n'), ((12146, 12206), 'jax.vmap', 'vmap', (['gaussian_first_derivative_wrt_mean', '(1, None, None, 1)'], {}), '(gaussian_first_derivative_wrt_mean, (1, None, None, 1))\n', (12150, 12206), False, 'from jax import vmap, grad, jacrev\n'), ((12580, 12641), 'jax.vmap', 'vmap', (['gaussian_second_derivative_wrt_mean', '(1, None, None, 1)'], {}), '(gaussian_second_derivative_wrt_mean, (1, None, None, 1))\n', (12584, 12641), False, 'from jax import vmap, grad, jacrev\n'), ((16661, 16697), 'jax.numpy.sum', 'np.sum', (['(w * lik_expectation)'], {'axis': '(-1)'}), '(w * lik_expectation, axis=-1)\n', (16667, 16697), True, 'import jax.numpy as np\n'), ((16729, 16796), 'jax.numpy.sum', 'np.sum', (['(w * ((lik_expectation - mu) ** 2 + lik_covariance))'], {'axis': '(-1)'}), '(w * ((lik_expectation - mu) ** 2 + lik_covariance), axis=-1)\n', (16735, 16796), True, 'import jax.numpy as np\n'), ((17091, 17158), 'jax.numpy.sum', 'np.sum', (['(w * (sigma_points - mean) * (lik_expectation - mu))'], {'axis': '(-1)'}), '(w * (sigma_points - mean) * (lik_expectation - mu), axis=-1)\n', (17097, 17158), True, 'import jax.numpy as np\n'), ((17278, 17292), 'jax.numpy.squeeze', 'np.squeeze', (['mu'], {}), '(mu)\n', (17288, 17292), True, 'import jax.numpy as np\n'), ((17406, 17462), 'jax.grad', 'grad', (['expected_conditional_mean'], {'argnums': '(1)', 'has_aux': '(True)'}), '(expected_conditional_mean, argnums=1, has_aux=True)\n', (17410, 17462), False, 'from jax import vmap, grad, jacrev\n'), ((17632, 17679), 'jax.jacrev', 'jacrev', (['expected_conditional_mean_dm'], {'argnums': '(1)'}), '(expected_conditional_mean_dm, argnums=1)\n', (17638, 17679), False, 'from jax import vmap, grad, jacrev\n'), ((2097, 2130), 'itertools.product', 'itertools.product', (['*((gh_x,) * D)'], {}), '(*((gh_x,) * D))\n', (2114, 2130), False, 'import itertools\n'), ((3265, 3296), 'numpy.array', 'onp.array', (['[w0, wm, wm, wm, wm]'], {}), '([w0, wm, wm, wm, wm])\n', (3274, 3296), True, 'import numpy as onp\n'), ((3317, 3376), 'numpy.block', 'onp.block', (['[[0.0, u, 0.0, -u, 0.0], [0.0, 0.0, u, 0.0, -u]]'], {}), '([[0.0, u, 0.0, -u, 0.0], [0.0, 0.0, u, 0.0, -u]])\n', (3326, 3376), True, 'import numpy as onp\n'), ((4605, 4652), 'numpy.array', 'onp.array', (['[A0, A1, A1, A1, A1, A2, A2, A2, A2]'], {}), '([A0, A1, A1, A1, A1, A2, A2, A2, A2])\n', (4614, 4652), True, 'import numpy as onp\n'), ((4673, 4765), 'numpy.block', 'onp.block', (['[[0.0, u, -u, 0.0, 0.0, u, -u, u, -u], [0.0, 0.0, 0.0, u, -u, u, -u, -u, u]]'], {}), '([[0.0, u, -u, 0.0, 0.0, u, -u, u, -u], [0.0, 0.0, 0.0, u, -u, u, \n -u, -u, u]])\n', (4682, 4765), True, 'import numpy as onp\n'), ((5809, 5830), 'numpy.zeros', 'onp.zeros', (['[n, 2 * n]'], {}), '([n, 2 * n])\n', (5818, 5830), True, 'import numpy as onp\n'), ((6017, 6029), 'numpy.zeros', 'onp.zeros', (['n'], {}), '(n)\n', (6026, 6029), True, 'import numpy as onp\n'), ((7890, 7908), 'jax.scipy.linalg.cholesky', 'cholesky', (['post_cov'], {}), '(post_cov)\n', (7898, 7908), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((7911, 7927), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (7924, 7927), True, 'import jax.numpy as np\n'), ((8487, 8504), 'jax.numpy.diag', 'np.diag', (['post_cov'], {}), '(post_cov)\n', (8494, 8504), True, 'import jax.numpy as np\n'), ((9894, 9910), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (9907, 9910), True, 'import jax.numpy as np\n'), ((11553, 11569), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (11566, 11569), True, 'import jax.numpy as np\n'), ((16429, 16442), 'jax.scipy.linalg.cholesky', 'cholesky', (['cov'], {}), '(cov)\n', (16437, 16442), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((16445, 16461), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (16458, 16461), True, 'import jax.numpy as np\n'), ((18192, 18208), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (18205, 18208), True, 'import jax.numpy as np\n'), ((2171, 2204), 'itertools.product', 'itertools.product', (['*((gh_w,) * D)'], {}), '(*((gh_w,) * D))\n', (2188, 2204), False, 'import itertools\n'), ((3459, 3498), 'numpy.array', 'onp.array', (['[w0, wm, wm, wm, wm, wm, wm]'], {}), '([w0, wm, wm, wm, wm, wm, wm])\n', (3468, 3498), True, 'import numpy as onp\n'), ((3519, 3637), 'numpy.block', 'onp.block', (['[[0.0, u, 0.0, 0.0, -u, 0.0, 0.0], [0.0, 0.0, u, 0.0, 0.0, -u, 0.0], [0.0, \n 0.0, 0.0, u, 0.0, 0.0, -u]]'], {}), '([[0.0, u, 0.0, 0.0, -u, 0.0, 0.0], [0.0, 0.0, u, 0.0, 0.0, -u, \n 0.0], [0.0, 0.0, 0.0, u, 0.0, 0.0, -u]])\n', (3528, 3637), True, 'import numpy as onp\n'), ((4823, 4914), 'numpy.array', 'onp.array', (['[A0, A1, A1, A1, A1, A1, A1, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2]'], {}), '([A0, A1, A1, A1, A1, A1, A1, A2, A2, A2, A2, A2, A2, A2, A2, A2,\n A2, A2, A2])\n', (4832, 4914), True, 'import numpy as onp\n'), ((4931, 5200), 'numpy.block', 'onp.block', (['[[0.0, u, -u, 0.0, 0.0, 0.0, 0.0, u, -u, u, -u, u, -u, u, -u, 0.0, 0.0, 0.0,\n 0.0], [0.0, 0.0, 0.0, u, -u, 0.0, 0.0, u, -u, -u, u, 0.0, 0.0, 0.0, 0.0,\n u, -u, u, -u], [0.0, 0.0, 0.0, 0.0, 0.0, u, -u, 0.0, 0.0, 0.0, 0.0, u, \n -u, -u, u, u, -u, -u, u]]'], {}), '([[0.0, u, -u, 0.0, 0.0, 0.0, 0.0, u, -u, u, -u, u, -u, u, -u, 0.0,\n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, u, -u, 0.0, 0.0, u, -u, -u, u, 0.0, 0.0,\n 0.0, 0.0, u, -u, u, -u], [0.0, 0.0, 0.0, 0.0, 0.0, u, -u, 0.0, 0.0, 0.0,\n 0.0, u, -u, -u, u, u, -u, -u, u]])\n', (4940, 5200), True, 'import numpy as onp\n'), ((5373, 5410), 'numpy.concatenate', 'onp.concatenate', (['[U0, U1, U2]'], {'axis': '(1)'}), '([U0, U1, U2], axis=1)\n', (5388, 5410), True, 'import numpy as onp\n'), ((5873, 5904), 'numpy.zeros', 'onp.zeros', (['[n, 2 * n * (n - 1)]'], {}), '([n, 2 * n * (n - 1)])\n', (5882, 5904), True, 'import numpy as onp\n'), ((3830, 3851), 'numpy.sqrt', 'onp.sqrt', (['(dim + kappa)'], {}), '(dim + kappa)\n', (3838, 3851), True, 'import numpy as onp\n'), ((3736, 3772), 'numpy.array', 'onp.array', (['[[kappa / (dim + kappa)]]'], {}), '([[kappa / (dim + kappa)]])\n', (3745, 3772), True, 'import numpy as onp\n'), ((3779, 3801), 'numpy.ones', 'onp.ones', (['[1, 2 * dim]'], {}), '([1, 2 * dim])\n', (3787, 3801), True, 'import numpy as onp\n'), ((3865, 3884), 'numpy.zeros', 'onp.zeros', (['[dim, 1]'], {}), '([dim, 1])\n', (3874, 3884), True, 'import numpy as onp\n'), ((3886, 3898), 'numpy.eye', 'onp.eye', (['dim'], {}), '(dim)\n', (3893, 3898), True, 'import numpy as onp\n'), ((5451, 5472), 'numpy.ones', 'onp.ones', (['U0.shape[1]'], {}), '(U0.shape[1])\n', (5459, 5472), True, 'import numpy as onp\n'), ((5514, 5535), 'numpy.ones', 'onp.ones', (['U1.shape[1]'], {}), '(U1.shape[1])\n', (5522, 5535), True, 'import numpy as onp\n'), ((5577, 5598), 'numpy.ones', 'onp.ones', (['U2.shape[1]'], {}), '(U2.shape[1])\n', (5585, 5598), True, 'import numpy as onp\n'), ((3901, 3913), 'numpy.eye', 'onp.eye', (['dim'], {}), '(dim)\n', (3908, 3913), True, 'import numpy as onp\n')]
|
import pytest
from cqc.util import parse_cqc_message
from cqc.pythonLib import CQCConnection, qubit
from cqc.pythonLib import CQCMixConnection
from cqc.cqcHeader import (
CQCCmdHeader,
CQCHeader,
CQCType,
CQC_CMD_H,
CQC_CMD_NEW,
CQC_CMD_RELEASE,
)
from utilities import get_header
from test_cases_cqcconnection.flush import (
commands_to_apply_flush, get_expected_headers_flush
)
from test_cases_cqcconnection.cqc_mix import (
commands_to_apply_bit_flip_code,
get_expected_headers_bit_flip_code,
commands_to_apply_simple_mix,
get_expected_headers_simple_mix,
commands_to_apply_mix_with_factory,
get_expected_headers_mix_with_factory,
commands_to_apply_mix_if_else,
get_expected_headers_mix_if_else,
commands_to_apply_mix_nested_if_else,
get_expected_headers_mix_nested_if_else,
)
def get_expected_headers_simple_h():
"""What headers we expect"""
hdr_tp_cmd = get_header(
CQCHeader,
version=2,
tp=CQCType.COMMAND,
app_id=0,
length=CQCCmdHeader.HDR_LENGTH,
)
hdr_cmd_new = get_header(
CQCCmdHeader,
qubit_id=0,
instr=CQC_CMD_NEW,
notify=True,
action=False,
block=True,
)
hdr_cmd_h = get_header(
CQCCmdHeader,
qubit_id=1,
instr=CQC_CMD_H,
notify=True,
action=False,
block=True,
)
hdr_cmd_release = get_header(
CQCCmdHeader,
qubit_id=1,
instr=CQC_CMD_RELEASE,
notify=True,
action=False,
block=True,
)
expected_headers = [
hdr_tp_cmd + hdr_cmd_new,
hdr_tp_cmd + hdr_cmd_h,
hdr_tp_cmd + hdr_cmd_release,
]
return expected_headers
def commands_to_apply_simple_h(cqc):
"""What to do with the CQCConnection"""
q = qubit(cqc)
q.H()
@pytest.mark.parametrize("conn_type, commands_to_apply, get_expected_headers", [
(CQCConnection, commands_to_apply_simple_h, get_expected_headers_simple_h),
(CQCConnection, commands_to_apply_flush, get_expected_headers_flush),
(CQCMixConnection, commands_to_apply_bit_flip_code, get_expected_headers_bit_flip_code),
(CQCMixConnection, commands_to_apply_simple_mix, get_expected_headers_simple_mix),
(CQCMixConnection, commands_to_apply_mix_with_factory, get_expected_headers_mix_with_factory),
(CQCMixConnection, commands_to_apply_mix_if_else, get_expected_headers_mix_if_else),
(CQCMixConnection, commands_to_apply_mix_nested_if_else, get_expected_headers_mix_nested_if_else),
])
def test_commands(conn_type, commands_to_apply, get_expected_headers, monkeypatch, mock_socket, mock_read_message):
# logging.getLogger().setLevel(logging.DEBUG)
with conn_type("Test", socket_address=('localhost', 8000), use_classical_communication=False) as cqc:
commands_to_apply(cqc)
expected_messages = get_expected_headers()
send_calls = list(filter(lambda call: call.name == 'send', cqc._s.calls))
sent_messages = [call.args[0] for call in send_calls]
full_msg = {}
# Parse and print what we expect and what we got
for name, messages in zip(["EXPECTED", "GOT"], [expected_messages, sent_messages]):
print("\n{}:".format(name))
for msg in messages:
print('[')
for hdr in parse_cqc_message(msg):
print(" {}".format(hdr))
print('\n]')
full_msg[name] = b''.join([msg for msg in messages])
# Check if full messages are equal
assert full_msg["EXPECTED"] == full_msg["GOT"]
for got, expected in zip(sent_messages, expected_messages):
# Excluding None gives the opportunity to not specify all expected headers but still check the number of them
if expected is not None:
assert got == expected
|
[
"cqc.pythonLib.qubit",
"pytest.mark.parametrize",
"utilities.get_header",
"cqc.util.parse_cqc_message"
] |
[((1880, 2602), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""conn_type, commands_to_apply, get_expected_headers"""', '[(CQCConnection, commands_to_apply_simple_h, get_expected_headers_simple_h),\n (CQCConnection, commands_to_apply_flush, get_expected_headers_flush), (\n CQCMixConnection, commands_to_apply_bit_flip_code,\n get_expected_headers_bit_flip_code), (CQCMixConnection,\n commands_to_apply_simple_mix, get_expected_headers_simple_mix), (\n CQCMixConnection, commands_to_apply_mix_with_factory,\n get_expected_headers_mix_with_factory), (CQCMixConnection,\n commands_to_apply_mix_if_else, get_expected_headers_mix_if_else), (\n CQCMixConnection, commands_to_apply_mix_nested_if_else,\n get_expected_headers_mix_nested_if_else)]'], {}), "('conn_type, commands_to_apply, get_expected_headers',\n [(CQCConnection, commands_to_apply_simple_h,\n get_expected_headers_simple_h), (CQCConnection, commands_to_apply_flush,\n get_expected_headers_flush), (CQCMixConnection,\n commands_to_apply_bit_flip_code, get_expected_headers_bit_flip_code), (\n CQCMixConnection, commands_to_apply_simple_mix,\n get_expected_headers_simple_mix), (CQCMixConnection,\n commands_to_apply_mix_with_factory,\n get_expected_headers_mix_with_factory), (CQCMixConnection,\n commands_to_apply_mix_if_else, get_expected_headers_mix_if_else), (\n CQCMixConnection, commands_to_apply_mix_nested_if_else,\n get_expected_headers_mix_nested_if_else)])\n", (1903, 2602), False, 'import pytest\n'), ((946, 1045), 'utilities.get_header', 'get_header', (['CQCHeader'], {'version': '(2)', 'tp': 'CQCType.COMMAND', 'app_id': '(0)', 'length': 'CQCCmdHeader.HDR_LENGTH'}), '(CQCHeader, version=2, tp=CQCType.COMMAND, app_id=0, length=\n CQCCmdHeader.HDR_LENGTH)\n', (956, 1045), False, 'from utilities import get_header\n'), ((1107, 1206), 'utilities.get_header', 'get_header', (['CQCCmdHeader'], {'qubit_id': '(0)', 'instr': 'CQC_CMD_NEW', 'notify': '(True)', 'action': '(False)', 'block': '(True)'}), '(CQCCmdHeader, qubit_id=0, instr=CQC_CMD_NEW, notify=True, action\n =False, block=True)\n', (1117, 1206), False, 'from utilities import get_header\n'), ((1274, 1371), 'utilities.get_header', 'get_header', (['CQCCmdHeader'], {'qubit_id': '(1)', 'instr': 'CQC_CMD_H', 'notify': '(True)', 'action': '(False)', 'block': '(True)'}), '(CQCCmdHeader, qubit_id=1, instr=CQC_CMD_H, notify=True, action=\n False, block=True)\n', (1284, 1371), False, 'from utilities import get_header\n'), ((1445, 1547), 'utilities.get_header', 'get_header', (['CQCCmdHeader'], {'qubit_id': '(1)', 'instr': 'CQC_CMD_RELEASE', 'notify': '(True)', 'action': '(False)', 'block': '(True)'}), '(CQCCmdHeader, qubit_id=1, instr=CQC_CMD_RELEASE, notify=True,\n action=False, block=True)\n', (1455, 1547), False, 'from utilities import get_header\n'), ((1856, 1866), 'cqc.pythonLib.qubit', 'qubit', (['cqc'], {}), '(cqc)\n', (1861, 1866), False, 'from cqc.pythonLib import CQCConnection, qubit\n'), ((3347, 3369), 'cqc.util.parse_cqc_message', 'parse_cqc_message', (['msg'], {}), '(msg)\n', (3364, 3369), False, 'from cqc.util import parse_cqc_message\n')]
|
from sqlalchemy import Column
from sqlalchemy.types import JSON, Text, Boolean, TIMESTAMP, BigInteger
from sqlalchemy.dialects import postgresql as psql
from steampipe_alchemy.mixins import FormatMixins
from steampipe_alchemy import Base
class AwsVpcRoute(Base, FormatMixins):
__tablename__ = 'aws_vpc_route'
destination_ipv6_cidr_block = Column('destination_ipv6_cidr_block', psql.CIDR, nullable=True)
destination_cidr_block = Column('destination_cidr_block', psql.CIDR, nullable=True)
akas = Column('akas', JSON, nullable=True)
destination_prefix_list_id = Column('destination_prefix_list_id', Text, nullable=True)
egress_only_internet_gateway_id = Column('egress_only_internet_gateway_id', Text, nullable=True)
gateway_id = Column('gateway_id', Text, nullable=True)
instance_id = Column('instance_id', Text, nullable=True)
instance_owner_id = Column('instance_owner_id', Text, nullable=True)
local_gateway_id = Column('local_gateway_id', Text, nullable=True)
nat_gateway_id = Column('nat_gateway_id', Text, nullable=True)
network_interface_id = Column('network_interface_id', Text, nullable=True)
transit_gateway_id = Column('transit_gateway_id', Text, nullable=True)
vpc_peering_connection_id = Column('vpc_peering_connection_id', Text, nullable=True)
origin = Column('origin', Text, nullable=True)
title = Column('title', Text, primary_key=True, nullable=True)
partition = Column('partition', Text, nullable=True)
region = Column('region', Text, nullable=True)
route_table_id = Column('route_table_id', Text, nullable=True)
account_id = Column('account_id', Text, nullable=True)
state = Column('state', Text, nullable=True)
carrier_gateway_id = Column('carrier_gateway_id', Text, nullable=True)
|
[
"sqlalchemy.Column"
] |
[((349, 412), 'sqlalchemy.Column', 'Column', (['"""destination_ipv6_cidr_block"""', 'psql.CIDR'], {'nullable': '(True)'}), "('destination_ipv6_cidr_block', psql.CIDR, nullable=True)\n", (355, 412), False, 'from sqlalchemy import Column\n'), ((442, 500), 'sqlalchemy.Column', 'Column', (['"""destination_cidr_block"""', 'psql.CIDR'], {'nullable': '(True)'}), "('destination_cidr_block', psql.CIDR, nullable=True)\n", (448, 500), False, 'from sqlalchemy import Column\n'), ((512, 547), 'sqlalchemy.Column', 'Column', (['"""akas"""', 'JSON'], {'nullable': '(True)'}), "('akas', JSON, nullable=True)\n", (518, 547), False, 'from sqlalchemy import Column\n'), ((581, 638), 'sqlalchemy.Column', 'Column', (['"""destination_prefix_list_id"""', 'Text'], {'nullable': '(True)'}), "('destination_prefix_list_id', Text, nullable=True)\n", (587, 638), False, 'from sqlalchemy import Column\n'), ((677, 739), 'sqlalchemy.Column', 'Column', (['"""egress_only_internet_gateway_id"""', 'Text'], {'nullable': '(True)'}), "('egress_only_internet_gateway_id', Text, nullable=True)\n", (683, 739), False, 'from sqlalchemy import Column\n'), ((757, 798), 'sqlalchemy.Column', 'Column', (['"""gateway_id"""', 'Text'], {'nullable': '(True)'}), "('gateway_id', Text, nullable=True)\n", (763, 798), False, 'from sqlalchemy import Column\n'), ((817, 859), 'sqlalchemy.Column', 'Column', (['"""instance_id"""', 'Text'], {'nullable': '(True)'}), "('instance_id', Text, nullable=True)\n", (823, 859), False, 'from sqlalchemy import Column\n'), ((884, 932), 'sqlalchemy.Column', 'Column', (['"""instance_owner_id"""', 'Text'], {'nullable': '(True)'}), "('instance_owner_id', Text, nullable=True)\n", (890, 932), False, 'from sqlalchemy import Column\n'), ((956, 1003), 'sqlalchemy.Column', 'Column', (['"""local_gateway_id"""', 'Text'], {'nullable': '(True)'}), "('local_gateway_id', Text, nullable=True)\n", (962, 1003), False, 'from sqlalchemy import Column\n'), ((1025, 1070), 'sqlalchemy.Column', 'Column', (['"""nat_gateway_id"""', 'Text'], {'nullable': '(True)'}), "('nat_gateway_id', Text, nullable=True)\n", (1031, 1070), False, 'from sqlalchemy import Column\n'), ((1098, 1149), 'sqlalchemy.Column', 'Column', (['"""network_interface_id"""', 'Text'], {'nullable': '(True)'}), "('network_interface_id', Text, nullable=True)\n", (1104, 1149), False, 'from sqlalchemy import Column\n'), ((1175, 1224), 'sqlalchemy.Column', 'Column', (['"""transit_gateway_id"""', 'Text'], {'nullable': '(True)'}), "('transit_gateway_id', Text, nullable=True)\n", (1181, 1224), False, 'from sqlalchemy import Column\n'), ((1257, 1313), 'sqlalchemy.Column', 'Column', (['"""vpc_peering_connection_id"""', 'Text'], {'nullable': '(True)'}), "('vpc_peering_connection_id', Text, nullable=True)\n", (1263, 1313), False, 'from sqlalchemy import Column\n'), ((1327, 1364), 'sqlalchemy.Column', 'Column', (['"""origin"""', 'Text'], {'nullable': '(True)'}), "('origin', Text, nullable=True)\n", (1333, 1364), False, 'from sqlalchemy import Column\n'), ((1377, 1431), 'sqlalchemy.Column', 'Column', (['"""title"""', 'Text'], {'primary_key': '(True)', 'nullable': '(True)'}), "('title', Text, primary_key=True, nullable=True)\n", (1383, 1431), False, 'from sqlalchemy import Column\n'), ((1448, 1488), 'sqlalchemy.Column', 'Column', (['"""partition"""', 'Text'], {'nullable': '(True)'}), "('partition', Text, nullable=True)\n", (1454, 1488), False, 'from sqlalchemy import Column\n'), ((1502, 1539), 'sqlalchemy.Column', 'Column', (['"""region"""', 'Text'], {'nullable': '(True)'}), "('region', Text, nullable=True)\n", (1508, 1539), False, 'from sqlalchemy import Column\n'), ((1561, 1606), 'sqlalchemy.Column', 'Column', (['"""route_table_id"""', 'Text'], {'nullable': '(True)'}), "('route_table_id', Text, nullable=True)\n", (1567, 1606), False, 'from sqlalchemy import Column\n'), ((1624, 1665), 'sqlalchemy.Column', 'Column', (['"""account_id"""', 'Text'], {'nullable': '(True)'}), "('account_id', Text, nullable=True)\n", (1630, 1665), False, 'from sqlalchemy import Column\n'), ((1678, 1714), 'sqlalchemy.Column', 'Column', (['"""state"""', 'Text'], {'nullable': '(True)'}), "('state', Text, nullable=True)\n", (1684, 1714), False, 'from sqlalchemy import Column\n'), ((1740, 1789), 'sqlalchemy.Column', 'Column', (['"""carrier_gateway_id"""', 'Text'], {'nullable': '(True)'}), "('carrier_gateway_id', Text, nullable=True)\n", (1746, 1789), False, 'from sqlalchemy import Column\n')]
|
from rest_framework import viewsets
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.authentication import SessionAuthentication
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from django.core import exceptions
from django.db.models import Q
from rest_framework.mixins import (CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from apps.equipment.serializes.basicinfor_serialize import *
from apps.equipment.filters.basicinfor_filters import *
from apps.commonFunction import StandardResultsSetPagination
class EquipmentAuditRecordView(ListModelMixin,RetrieveModelMixin, viewsets.GenericViewSet):
"""
当前APP操作记录
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = EquipmentAuditRecordFilters
search_fields = ["uri", "uri_id"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "list":
return EquipmentAuditRecordSerialize_List
elif self.action == "retrieve":
return EquipmentAuditRecordSerialize_Retrieve
return EquipmentAuditRecordSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser: # 超级用户可以查看所有信息
return EquipmentAuditRecordModel.objects.all().order_by("-id")
user = self.request.user.username
condtions1 = {'user__iexact': user} # 普通用户只能查看自己的信息
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentauditrecordmodel'): # 如果当前用户没有查看权限
raise exceptions.PermissionDenied
if self.action == "retrieve": # 如果是查看详情
if not self.request.user.has_perm('equipment.read_equipmentauditrecordmodel'): # 如果当前用户没有查看详情权限
raise exceptions.PermissionDenied
return EquipmentAuditRecordModel.objects.filter(Q(**condtions1))
class EquipmentAlterRecordView(CreateModelMixin, viewsets.GenericViewSet):
"""
当前APP审核记录
"""
queryset = EquipmentAlterRecordModel.objects.all().order_by("-id")
serializer_class = EquipmentAlterRecordSerialize_Create
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
class EquipmentImageView(CreateModelMixin,viewsets.GenericViewSet):
"""
当前APP图片项
"""
queryset = EquipmentImageModel.objects.all().order_by("-id")
serializer_class = EquipmentImageSerialize_Create
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
class EquipmentFileView(CreateModelMixin, viewsets.GenericViewSet):
"""
当前APP文件项
"""
queryset = EquipmentFileModel.objects.all().order_by("-id")
serializer_class = EquipmentFileSerialize_Create
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
class EquipmentVendorDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备厂商定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = EquipmentVendorDefinitionFilters
search_fields = ["name","code","company_name","company_abbre"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentVendorDefinitionSerialize_Create
elif self.action == "list":
return EquipmentVendorDefinitionSerialize_List
elif self.action == "retrieve":
return EquipmentVendorDefinitionSerialize_Retrieve
elif self.action == "update":
return EquipmentVendorDefinitionSerialize_Update
elif self.action == "partial_update":
return EquipmentVendorDefinitionSerialize_Partial
return EquipmentVendorDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentVendorDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentvendordefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmentvendordefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentVendorDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class PartsTypeDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
配件类型定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = PartsTypeDefinitionFilters
search_fields = ["name","code"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return PartsTypeDefinitionSerialize_Create
elif self.action == "list":
return PartsTypeDefinitionSerialize_List
elif self.action == "retrieve":
return PartsTypeDefinitionSerialize_Retrieve
elif self.action == "update":
return PartsTypeDefinitionSerialize_Update
elif self.action == "partial_update":
return PartsTypeDefinitionSerialize_Partial
return PartsTypeDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return PartsTypeDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_partstypedefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_partstypedefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return PartsTypeDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class PartsTypeDefinitionViews(ListModelMixin,viewsets.GenericViewSet):
"""
配件类型层级结构
"""
serializer_class = PartsTypeDefinitionSerialize_First
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
def get_queryset(self):
if (self.request.user.is_superuser or self.request.user.has_perm('equipment.view_partstypedefinitionmodel')):
return PartsTypeDefinitionModel.objects.filter(classes="一级类别")
else:
raise exceptions.PermissionDenied
class PartsInforDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
配件信息定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = PartsInforDefinitionFilters
search_fields = ["name","code",]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return PartsInforDefinitionSerialize_Create
elif self.action == "list":
return PartsInforDefinitionSerialize_List
elif self.action == "retrieve":
return PartsInforDefinitionSerialize_Retrieve
elif self.action == "update":
return PartsInforDefinitionSerialize_Update
elif self.action == "partial_update":
return PartsInforDefinitionSerialize_Partial
return PartsInforDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return PartsInforDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_partsinfordefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_partsinfordefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return PartsInforDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class EquipmentTypeDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备类型定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = EquipmentTypeDefinitionFilters
search_fields = ["name","code",]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentTypeDefinitionSerialize_Create
elif self.action == "list":
return EquipmentTypeDefinitionSerialize_List
elif self.action == "retrieve":
return EquipmentTypeDefinitionSerialize_Retrieve
elif self.action == "update":
return EquipmentTypeDefinitionSerialize_Update
elif self.action == "partial_update":
return EquipmentTypeDefinitionSerialize_Partial
return EquipmentTypeDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentTypeDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmenttypedefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmenttypedefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentTypeDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class EquipmentTypeDefinitionViews(ListModelMixin,viewsets.GenericViewSet):
"""
设备类型层级结构
"""
serializer_class = EquipmentTypeDefinitionSerialize_First
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
def get_queryset(self):
if (self.request.user.is_superuser or self.request.user.has_perm('equipment.view_equipmenttypedefinitionmodel')):
return EquipmentTypeDefinitionModel.objects.filter(classes="一级类别")
else:
raise exceptions.PermissionDenied
class EquipmentAccountView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备台账定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = EquipmentAccountFilters
search_fields = ["name","code",]
ordering_fields = ["id","update_time","dataOfActivation","dataOfPurchase"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentAccountSerialize_Create
elif self.action == "list":
return EquipmentAccountSerialize_List
elif self.action == "retrieve":
return EquipmentAccountSerialize_Retrieve
elif self.action == "update":
return EquipmentAccountSerialize_Update
elif self.action == "partial_update":
return EquipmentAccountSerialize_Partial
return EquipmentAccountSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentAccountModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentaccountmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmentaccountmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentAccountModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class MaintainRecordTypeDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
维护记录类型定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = MaintainRecordTypeDefinitionFilters
search_fields = ["name","code"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return MaintainRecordTypeDefinitionSerialize_Create
elif self.action == "list":
return MaintainRecordTypeDefinitionSerialize_List
elif self.action == "retrieve":
return MaintainRecordTypeDefinitionSerialize_Retrieve
elif self.action == "update":
return MaintainRecordTypeDefinitionSerialize_Update
elif self.action == "partial_update":
return MaintainRecordTypeDefinitionSerialize_Partial
return MaintainRecordTypeDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return MaintainRecordTypeDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_maintainrecordtypedefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_maintainrecordtypedefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return MaintainRecordTypeDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class MaintainRecordTypeDefinitionViews(ListModelMixin,viewsets.GenericViewSet):
"""
维护记录类型层级结构
"""
serializer_class = MaintainRecordTypeDefinitionSerialize_First
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
def get_queryset(self):
if (self.request.user.is_superuser or self.request.user.has_perm('equipment.view_maintainrecordtypedefinitionmodel')):
return MaintainRecordTypeDefinitionModel.objects.filter(classes="一级类别")
else:
raise exceptions.PermissionDenied
class EquipmentBoardView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备看板定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class =EquipmentBoardFilters
search_fields = ["name","code"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication,]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentBoardSerialize_Create
elif self.action == "list":
return EquipmentBoardSerialize_List
elif self.action == "retrieve":
return EquipmentBoardSerialize_Retrieve
elif self.action == "update":
return EquipmentBoardSerialize_Update
elif self.action == "partial_update":
return EquipmentBoardSerialize_Partial
return EquipmentBoardSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentBoardModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentboardmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmentboardmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentBoardModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
|
[
"django.db.models.Q"
] |
[((2530, 2545), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (2531, 2545), False, 'from django.db.models import Q\n'), ((6415, 6430), 'django.db.models.Q', 'Q', ([], {}), '(**condtions3)\n', (6416, 6430), False, 'from django.db.models import Q\n'), ((9076, 9091), 'django.db.models.Q', 'Q', ([], {}), '(**condtions3)\n', (9077, 9091), False, 'from django.db.models import Q\n'), ((12334, 12349), 'django.db.models.Q', 'Q', ([], {}), '(**condtions3)\n', (12335, 12349), False, 'from django.db.models import Q\n'), ((15047, 15062), 'django.db.models.Q', 'Q', ([], {}), '(**condtions3)\n', (15048, 15062), False, 'from django.db.models import Q\n'), ((18309, 18324), 'django.db.models.Q', 'Q', ([], {}), '(**condtions3)\n', (18310, 18324), False, 'from django.db.models import Q\n'), ((21080, 21095), 'django.db.models.Q', 'Q', ([], {}), '(**condtions3)\n', (21081, 21095), False, 'from django.db.models import Q\n'), ((24296, 24311), 'django.db.models.Q', 'Q', ([], {}), '(**condtions3)\n', (24297, 24311), False, 'from django.db.models import Q\n'), ((6379, 6394), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (6380, 6394), False, 'from django.db.models import Q\n'), ((6397, 6412), 'django.db.models.Q', 'Q', ([], {}), '(**condtions2)\n', (6398, 6412), False, 'from django.db.models import Q\n'), ((9040, 9055), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (9041, 9055), False, 'from django.db.models import Q\n'), ((9058, 9073), 'django.db.models.Q', 'Q', ([], {}), '(**condtions2)\n', (9059, 9073), False, 'from django.db.models import Q\n'), ((12298, 12313), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (12299, 12313), False, 'from django.db.models import Q\n'), ((12316, 12331), 'django.db.models.Q', 'Q', ([], {}), '(**condtions2)\n', (12317, 12331), False, 'from django.db.models import Q\n'), ((15011, 15026), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (15012, 15026), False, 'from django.db.models import Q\n'), ((15029, 15044), 'django.db.models.Q', 'Q', ([], {}), '(**condtions2)\n', (15030, 15044), False, 'from django.db.models import Q\n'), ((18273, 18288), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (18274, 18288), False, 'from django.db.models import Q\n'), ((18291, 18306), 'django.db.models.Q', 'Q', ([], {}), '(**condtions2)\n', (18292, 18306), False, 'from django.db.models import Q\n'), ((21044, 21059), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (21045, 21059), False, 'from django.db.models import Q\n'), ((21062, 21077), 'django.db.models.Q', 'Q', ([], {}), '(**condtions2)\n', (21063, 21077), False, 'from django.db.models import Q\n'), ((24260, 24275), 'django.db.models.Q', 'Q', ([], {}), '(**condtions1)\n', (24261, 24275), False, 'from django.db.models import Q\n'), ((24278, 24293), 'django.db.models.Q', 'Q', ([], {}), '(**condtions2)\n', (24279, 24293), False, 'from django.db.models import Q\n')]
|
from RandomGenerator.randomInt import randomInt
from numpy import random
def randomIntSeed (start, end, seed):
state = random.get_state()
random.seed(seed)
try:
randIntSeeded = randomInt(start, end)
return randIntSeeded
finally:
random.set_state(state)
|
[
"numpy.random.get_state",
"numpy.random.seed",
"RandomGenerator.randomInt.randomInt",
"numpy.random.set_state"
] |
[((124, 142), 'numpy.random.get_state', 'random.get_state', ([], {}), '()\n', (140, 142), False, 'from numpy import random\n'), ((147, 164), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (158, 164), False, 'from numpy import random\n'), ((198, 219), 'RandomGenerator.randomInt.randomInt', 'randomInt', (['start', 'end'], {}), '(start, end)\n', (207, 219), False, 'from RandomGenerator.randomInt import randomInt\n'), ((270, 293), 'numpy.random.set_state', 'random.set_state', (['state'], {}), '(state)\n', (286, 293), False, 'from numpy import random\n')]
|
from flask_restplus import fields
from api.restplus import api
model_score = api.model('Model Score', {
'algorithm': fields.String(required=True, description='Model name'),
'source_url': fields.String(required=True, description='Source URL'),
'field_names': fields.String(required=True, description='Field Names'),
})
|
[
"flask_restplus.fields.String"
] |
[((123, 177), 'flask_restplus.fields.String', 'fields.String', ([], {'required': '(True)', 'description': '"""Model name"""'}), "(required=True, description='Model name')\n", (136, 177), False, 'from flask_restplus import fields\n'), ((197, 251), 'flask_restplus.fields.String', 'fields.String', ([], {'required': '(True)', 'description': '"""Source URL"""'}), "(required=True, description='Source URL')\n", (210, 251), False, 'from flask_restplus import fields\n'), ((272, 327), 'flask_restplus.fields.String', 'fields.String', ([], {'required': '(True)', 'description': '"""Field Names"""'}), "(required=True, description='Field Names')\n", (285, 327), False, 'from flask_restplus import fields\n')]
|
from functools import partial
from music.abstract_source import AbstractSource
from music.music_util import file_detail, get_file_info
class FileSource(AbstractSource):
"""
An audio source from a file.
"""
__slots__ = ('file_path', 'title')
def __init__(self, file_path: str):
"""
:param file_path: the file path.
"""
self.title, genre, artist, album, length = get_file_info(file_path)
self.file_path = file_path
super().__init__(
partial(file_detail, self.title, genre, artist, album, length)
)
def __str__(self):
return self.title
def clean_up(self):
del self.title
del self.file_path
async def true_name(self) -> str:
"""
See `AbstractSource.true_name`
"""
return self.file_path
|
[
"functools.partial",
"music.music_util.get_file_info"
] |
[((418, 442), 'music.music_util.get_file_info', 'get_file_info', (['file_path'], {}), '(file_path)\n', (431, 442), False, 'from music.music_util import file_detail, get_file_info\n'), ((516, 578), 'functools.partial', 'partial', (['file_detail', 'self.title', 'genre', 'artist', 'album', 'length'], {}), '(file_detail, self.title, genre, artist, album, length)\n', (523, 578), False, 'from functools import partial\n')]
|
from numpy import random, pi
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
Ntrials, Nhits = 1_000_000, 0
for n in range(Ntrials):
x, y, z = random.uniform(-1, 1, 3) # draw 2 samples, each uniformly distributed over (-1,1)
if x**2 + y**2 + z**2 < 1:
Nhits += 1
print("Monte Carlo estimator of V(3): %.5f" % ((2**3)*(Nhits / Ntrials)))
print("Actual value of V(3) up to 5 decimal digits: %.5f" % (4*pi/3))
print("The relative error is %.5f%%" % (100 * abs((2**3)*(Nhits / Ntrials) - (4*pi/3))))
|
[
"numpy.random.uniform"
] |
[((171, 195), 'numpy.random.uniform', 'random.uniform', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (185, 195), False, 'from numpy import random, pi\n')]
|
'''InputHandler is an extension to add "Input commands" to bottery views
Usage:
On an Application:
app = App()
input = InputHandler(app)
On Patterns:
hang_user_pattern_input = HangUserPattern(input_example)
input.set_hang(hang_user_pattern_input, 'project')
patterns = [
hang_user_pattern_input,
Pattern('project', input_example),
On a View:
# This block will be executed on first call
if not app.input_queue:
app.hang_in(message)
app.input(message, 'name', 'Enter Project Name:')
app.input(message, 'language', 'Enter Project Language: ',
['python2', 'python3'])
return app.print_next_input(message) # To return first message of queue
# On next calls, this block wil be executed
stay, response = app.next_input_queue(message)
if stay:
return response # Contains message from Input Command
# Queue ended, now you could save resulting Project and exit view
app.hang_out(message)
return 'Project created: ' + response # Response contains user entries
'''
from collections import OrderedDict
class InputHandler:
'''Adds Input Command to views'''
def __init__(self, app):
self.hang = dict()
self.input_queue = dict()
self.user_inputs = dict()
self._app = app
def set_hang(self, hang, hang_pattern):
self.hang[hang_pattern] = hang
def hang_in(self, message):
'''Used in conjunction with HangUserPattern. Mantains app on the view'''
self.hang[message.text].activate_hang(message)
self.user_inputs[message.user.id] = dict()
def hang_out(self, message, hang_pattern):
self.hang[hang_pattern].deactivate_hang(message)
self.user_inputs.pop(message.user.id, None)
def input(self, message, name, prompt, valid_values=None):
'''Adds a input message to the dict'''
if not self.input_queue.get(message.user.id, None):
self.input_queue[message.user.id] = OrderedDict()
self.user_inputs[message.user.id] = dict()
user_input_dict = self.input_queue[message.user.id]
user_input_dict[name] = (prompt, valid_values)
def print_next_input(self, message):
user_input_dict = self.input_queue[message.user.id]
if not user_input_dict:
return _('No messages on the input command queue')
actual_prompt, _valid_values = list(user_input_dict.values())[0]
print(actual_prompt)
return actual_prompt
def next_input_queue(self, message):
'''Validates user input, saves user input.
Returns a "stay" flag to say if views mantains hang or not
Returns actual prompt OR
a dict of name:user_input on end of prompts'''
user_input_dict = self.input_queue[message.user.id]
if not user_input_dict:
return False, _('No messages on the input command queue')
_actual_prompt, valid_values = list(user_input_dict.values())[0]
if valid_values:
if message.text not in valid_values:
# If validation fail, remain on actual item
return True, _('Enter a Valid Value: ') + ' '.join(valid_values)
user_inputs = self.user_inputs[message.user.id]
name = list(user_input_dict.keys())[0]
user_inputs[name] = message.text
user_input_dict.popitem(last=False)
if not user_input_dict:
# Ended! Return what user entered
return False, user_inputs
next_prompt, valid_values = list(user_input_dict.values())[0]
if valid_values:
next_prompt = next_prompt + ' - ' + ' '.join(valid_values)
return True, next_prompt
|
[
"collections.OrderedDict"
] |
[((1981, 1994), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1992, 1994), False, 'from collections import OrderedDict\n')]
|
from ipywidgets.widgets import HTML, Button
from tornado.ioloop import IOLoop
from IPython import display
import time
from traitlets import Bool
class LoopDecorator(object):
""" Runs the wrapped function in a certain interval until the user presses
the stop button. """
def __init__(self, button, interval=1.0):
self.button = button
self.interval = interval
def __call__(self, func):
display.display(self.button)
self.last_time = None
self.wrapped = func
self.iterate()
return None
def iterate(self):
if self.button.clicked:
return
loop = IOLoop.current()
t = time.time()
self.wrapped()
wait = self.interval
wait = min(self.interval - time.time()+t , wait)
wait = max(0.01, wait)
wait = min(self.interval, wait)
loop.call_later(wait, self.iterate)
class StopButton(Button):
""" A modified Button which as a "clicked" Attribute. """
clicked = Bool(False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.clicked = False
self.on_click(self._on_click)
def _on_click(self, *args, **kwargs):
self.disabled = True
self.clicked = True
def loop(self, interval=1.0):
""" Returns a loop decorator with this button. """
decorator = LoopDecorator(self, interval)
return decorator
|
[
"traitlets.Bool",
"tornado.ioloop.IOLoop.current",
"IPython.display.display",
"time.time"
] |
[((1031, 1042), 'traitlets.Bool', 'Bool', (['(False)'], {}), '(False)\n', (1035, 1042), False, 'from traitlets import Bool\n'), ((438, 466), 'IPython.display.display', 'display.display', (['self.button'], {}), '(self.button)\n', (453, 466), False, 'from IPython import display\n'), ((662, 678), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (676, 678), False, 'from tornado.ioloop import IOLoop\n'), ((691, 702), 'time.time', 'time.time', ([], {}), '()\n', (700, 702), False, 'import time\n'), ((790, 801), 'time.time', 'time.time', ([], {}), '()\n', (799, 801), False, 'import time\n')]
|
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi',fourcc,20.0,(640,480))
#cap.isOpened()=>will return true value if cammera is linked or file name is correct and false in other case
while cap.isOpened():
ret,frame=cap.read()#ret will store true or false if frame store image the it store true else false , frame will store instant capture frames
if ret:
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) used to change the colout of image
#cv2.imshow('frame',frame)
out.write(frame)
cv2.imshow('video',frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
[
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.VideoWriter",
"cv2.destroyAllWindows"
] |
[((19, 38), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (35, 38), False, 'import cv2\n'), ((48, 79), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (70, 79), False, 'import cv2\n'), ((86, 141), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.avi"""', 'fourcc', '(20.0)', '(640, 480)'], {}), "('output.avi', fourcc, 20.0, (640, 480))\n", (101, 141), False, 'import cv2\n'), ((752, 775), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (773, 775), False, 'import cv2\n'), ((590, 616), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame'], {}), "('video', frame)\n", (600, 616), False, 'import cv2\n'), ((636, 652), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (647, 652), False, 'import cv2\n')]
|
# Author: <NAME>
# License: BSD
import warnings
from nilearn.input_data import NiftiMasker
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
from os.path import expanduser, join
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from joblib import Memory, dump
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from modl.datasets import fetch_adhd
from modl.decomposition.fmri import fMRIDictFact
from modl.decomposition.stability import mean_amari_discrepency
from modl.plotting.fmri import display_maps
from nilearn.datasets import fetch_atlas_smith_2009
from modl.utils.system import get_cache_dirs
batch_size = 200
learning_rate = .92
method = 'masked'
step_size = 0.01
reduction_ = 8
alpha = 1e-3
n_epochs = 4
verbose = 15
n_jobs = 70
smoothing_fwhm = 6
components_list = [20, 40, 80, 120, 200, 300, 500]
n_runs = 20
dict_init = fetch_atlas_smith_2009().rsn20
dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=2, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
mem = Memory(location=get_cache_dirs()[0])
masker = NiftiMasker(mask_img=mask).fit()
def fit_single(train_imgs, test_imgs, n_components, random_state):
dict_fact = fMRIDictFact(smoothing_fwhm=smoothing_fwhm,
method=method,
step_size=step_size,
mask=mask,
memory=mem,
memory_level=2,
verbose=verbose,
n_epochs=n_epochs,
n_jobs=1,
random_state=random_state,
n_components=n_components,
positive=True,
learning_rate=learning_rate,
batch_size=batch_size,
reduction=reduction_,
alpha=alpha,
callback=None,
)
dict_fact.fit(train_imgs, confounds=train_confounds)
score = dict_fact.score(test_imgs)
return dict_fact.components_, score
def fit_many_runs(train_imgs, test_imgs, components_list, n_runs=10, n_jobs=1):
random_states = check_random_state(0).randint(0, int(1e7), size=n_runs)
cached_fit = mem.cache(fit_single)
res = Parallel(n_jobs=n_jobs)(delayed(cached_fit)(
train_imgs, test_imgs, n_components, random_state)
for n_components in components_list
for random_state in random_states
)
components, scores = zip(*res)
shape = (len(components_list), len(random_states))
components = np.array(components).reshape(shape).tolist()
scores = np.array(scores).reshape(shape).tolist()
discrepencies = []
var_discrepencies = []
best_components = []
for n_components, these_components, these_scores in zip(components_list,
components,
scores):
discrepency, var_discrepency = mean_amari_discrepency(
these_components)
best_estimator = these_components[np.argmin(these_scores)]
discrepencies.append(var_discrepency)
var_discrepencies.append(var_discrepency)
best_components.append(best_estimator)
discrepencies = np.array(discrepencies)
var_discrepencies = np.array(var_discrepencies)
best_components = np.array(best_components)
components = best_components[np.argmin(discrepencies)]
return discrepencies, var_discrepencies, components
output_dir = expanduser('~/output_drago4/modl/fmri_stability2')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
discrepencies, var_discrepencies, components = fit_many_runs(
train_imgs, test_imgs,
components_list,
n_jobs=n_jobs,
n_runs=n_runs)
components_img = masker.inverse_transform(components)
components_img.to_filename(
join(output_dir, 'components.nii.gz'))
dump((components_list, discrepencies, var_discrepencies),
join(output_dir, 'discrepencies.pkl'))
fig = plt.figure()
display_maps(fig, components_img)
plt.savefig(join(output_dir, 'components.pdf'))
fig, ax = plt.subplots(1, 1)
ax.fill_between(components_list, discrepencies - var_discrepencies,
discrepencies + var_discrepencies, alpha=0.5)
ax.plot(components_list, discrepencies, marker='o')
ax.set_xlabel('Number of components')
ax.set_ylabel('Mean Amari discrepency')
sns.despine(fig)
fig.suptitle('Stability selection using DL')
plt.savefig(join(output_dir, 'discrepencies.pdf'))
|
[
"sklearn.utils.check_random_state",
"modl.decomposition.fmri.fMRIDictFact",
"sklearn.model_selection.train_test_split",
"numpy.argmin",
"matplotlib.pyplot.figure",
"modl.plotting.fmri.display_maps",
"os.path.join",
"os.path.exists",
"nilearn.datasets.fetch_atlas_smith_2009",
"nilearn.input_data.NiftiMasker",
"matplotlib.pyplot.subplots",
"modl.datasets.fetch_adhd",
"modl.decomposition.stability.mean_amari_discrepency",
"modl.utils.system.get_cache_dirs",
"os.makedirs",
"warnings.filterwarnings",
"seaborn.despine",
"numpy.array",
"joblib.Parallel",
"joblib.delayed",
"os.path.expanduser"
] |
[((93, 155), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (116, 155), False, 'import warnings\n'), ((1015, 1040), 'modl.datasets.fetch_adhd', 'fetch_adhd', ([], {'n_subjects': '(40)'}), '(n_subjects=40)\n', (1025, 1040), False, 'from modl.datasets import fetch_adhd\n'), ((1092, 1143), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(2)', 'random_state': '(0)'}), '(data, test_size=2, random_state=0)\n', (1108, 1143), False, 'from sklearn.model_selection import train_test_split\n'), ((3957, 4007), 'os.path.expanduser', 'expanduser', (['"""~/output_drago4/modl/fmri_stability2"""'], {}), "('~/output_drago4/modl/fmri_stability2')\n", (3967, 4007), False, 'from os.path import expanduser, join\n'), ((4456, 4468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4466, 4468), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4502), 'modl.plotting.fmri.display_maps', 'display_maps', (['fig', 'components_img'], {}), '(fig, components_img)\n', (4481, 4502), False, 'from modl.plotting.fmri import display_maps\n'), ((4561, 4579), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4573, 4579), True, 'import matplotlib.pyplot as plt\n'), ((4840, 4856), 'seaborn.despine', 'sns.despine', (['fig'], {}), '(fig)\n', (4851, 4856), True, 'import seaborn as sns\n'), ((973, 997), 'nilearn.datasets.fetch_atlas_smith_2009', 'fetch_atlas_smith_2009', ([], {}), '()\n', (995, 997), False, 'from nilearn.datasets import fetch_atlas_smith_2009\n'), ((1425, 1779), 'modl.decomposition.fmri.fMRIDictFact', 'fMRIDictFact', ([], {'smoothing_fwhm': 'smoothing_fwhm', 'method': 'method', 'step_size': 'step_size', 'mask': 'mask', 'memory': 'mem', 'memory_level': '(2)', 'verbose': 'verbose', 'n_epochs': 'n_epochs', 'n_jobs': '(1)', 'random_state': 'random_state', 'n_components': 'n_components', 'positive': '(True)', 'learning_rate': 'learning_rate', 'batch_size': 'batch_size', 'reduction': 'reduction_', 'alpha': 'alpha', 'callback': 'None'}), '(smoothing_fwhm=smoothing_fwhm, method=method, step_size=\n step_size, mask=mask, memory=mem, memory_level=2, verbose=verbose,\n n_epochs=n_epochs, n_jobs=1, random_state=random_state, n_components=\n n_components, positive=True, learning_rate=learning_rate, batch_size=\n batch_size, reduction=reduction_, alpha=alpha, callback=None)\n', (1437, 1779), False, 'from modl.decomposition.fmri import fMRIDictFact\n'), ((3702, 3725), 'numpy.array', 'np.array', (['discrepencies'], {}), '(discrepencies)\n', (3710, 3725), True, 'import numpy as np\n'), ((3750, 3777), 'numpy.array', 'np.array', (['var_discrepencies'], {}), '(var_discrepencies)\n', (3758, 3777), True, 'import numpy as np\n'), ((3800, 3825), 'numpy.array', 'np.array', (['best_components'], {}), '(best_components)\n', (3808, 3825), True, 'import numpy as np\n'), ((4016, 4042), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (4030, 4042), False, 'import os\n'), ((4048, 4071), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4059, 4071), False, 'import os\n'), ((4308, 4345), 'os.path.join', 'join', (['output_dir', '"""components.nii.gz"""'], {}), "(output_dir, 'components.nii.gz')\n", (4312, 4345), False, 'from os.path import expanduser, join\n'), ((4410, 4447), 'os.path.join', 'join', (['output_dir', '"""discrepencies.pkl"""'], {}), "(output_dir, 'discrepencies.pkl')\n", (4414, 4447), False, 'from os.path import expanduser, join\n'), ((4515, 4549), 'os.path.join', 'join', (['output_dir', '"""components.pdf"""'], {}), "(output_dir, 'components.pdf')\n", (4519, 4549), False, 'from os.path import expanduser, join\n'), ((4914, 4951), 'os.path.join', 'join', (['output_dir', '"""discrepencies.pdf"""'], {}), "(output_dir, 'discrepencies.pdf')\n", (4918, 4951), False, 'from os.path import expanduser, join\n'), ((1307, 1333), 'nilearn.input_data.NiftiMasker', 'NiftiMasker', ([], {'mask_img': 'mask'}), '(mask_img=mask)\n', (1318, 1333), False, 'from nilearn.input_data import NiftiMasker\n'), ((2600, 2623), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (2608, 2623), False, 'from joblib import Parallel, delayed\n'), ((3417, 3457), 'modl.decomposition.stability.mean_amari_discrepency', 'mean_amari_discrepency', (['these_components'], {}), '(these_components)\n', (3439, 3457), False, 'from modl.decomposition.stability import mean_amari_discrepency\n'), ((3859, 3883), 'numpy.argmin', 'np.argmin', (['discrepencies'], {}), '(discrepencies)\n', (3868, 3883), True, 'import numpy as np\n'), ((1277, 1293), 'modl.utils.system.get_cache_dirs', 'get_cache_dirs', ([], {}), '()\n', (1291, 1293), False, 'from modl.utils.system import get_cache_dirs\n'), ((2494, 2515), 'sklearn.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (2512, 2515), False, 'from sklearn.utils import check_random_state\n'), ((3513, 3536), 'numpy.argmin', 'np.argmin', (['these_scores'], {}), '(these_scores)\n', (3522, 3536), True, 'import numpy as np\n'), ((2624, 2643), 'joblib.delayed', 'delayed', (['cached_fit'], {}), '(cached_fit)\n', (2631, 2643), False, 'from joblib import Parallel, delayed\n'), ((2985, 3005), 'numpy.array', 'np.array', (['components'], {}), '(components)\n', (2993, 3005), True, 'import numpy as np\n'), ((3043, 3059), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3051, 3059), True, 'import numpy as np\n')]
|
"""Some additional filters that don't belong to any specific category."""
import json
from typing import Any
from typing import Optional
from typing import Mapping
from liquid.context import get_item
from liquid.filter import liquid_filter
from liquid.filter import with_context
from liquid.filter import with_environment
from liquid import Environment
from liquid import Context
class JSON:
"""Serialize objects as a JSON (JavaScript Object Notation) formatted string.
Args:
default: A 'default' function passed to json.dumps. This function is called
in the event that the JSONEncoder does not know how to serialize an object.
"""
name = "json"
def __init__(self, default: Any = None):
self.default = default
@liquid_filter
def __call__(self, obj: object) -> str:
return json.dumps(obj, default=self.default)
@with_context
@with_environment
class Translate:
"""Replace translation keys with strings for the current locale.
Tries to read the locale from the current template context, falling back to
"default" if the key "locale" does not exist.
Args:
locales: A mapping of locale name to translation key mapping. If locales
is `None`, the default, the translation key will be returned unchanged.
"""
name = "t"
def __init__(self, locales: Optional[Mapping[str, Mapping[str, object]]] = None):
self.locales: Mapping[str, Mapping[str, object]] = locales or {}
@liquid_filter
def __call__(
self,
key: object,
*,
context: Context,
environment: Environment,
**kwargs: Any,
) -> str:
locale = context.resolve("locale", default="default")
translations: Mapping[str, object] = self.locales.get(locale, {})
key = str(key)
path = key.split(".")
val = get_item(translations, *path, default=key) # type: ignore
return environment.from_string(val).render(**kwargs)
|
[
"liquid.context.get_item",
"json.dumps"
] |
[((846, 883), 'json.dumps', 'json.dumps', (['obj'], {'default': 'self.default'}), '(obj, default=self.default)\n', (856, 883), False, 'import json\n'), ((1881, 1923), 'liquid.context.get_item', 'get_item', (['translations', '*path'], {'default': 'key'}), '(translations, *path, default=key)\n', (1889, 1923), False, 'from liquid.context import get_item\n')]
|
from functools import wraps
from .helper import abline
from . import plt
def can_set_title(function):
@wraps(function)
def set_title(*args, **kwargs):
title = kwargs.pop('title', None)
r = function(*args, **kwargs)
if title:
ax = plt.gca()
ax.set_title(title)
return r
return set_title
def can_set_xlabel(function):
@wraps(function)
def set_xlabel(*args, **kwargs):
xlabel = kwargs.pop('xlabel', None)
r = function(*args, **kwargs)
if xlabel:
ax = plt.gca()
ax.set_xlabel(xlabel)
return r
return set_xlabel
def can_set_ylabel(function):
@wraps(function)
def set_ylabel(*args, **kwargs):
ylabel = kwargs.pop('ylabel', None)
r = function(*args, **kwargs)
if ylabel:
ax = plt.gca()
ax.set_ylabel(ylabel)
return r
return set_ylabel
def can_create_figure(function):
@wraps(function)
def create_figure(*args, **kwargs):
if not kwargs.get('ax'):
figsize = kwargs.pop('figsize', None)
_, kwargs['ax'] = plt.subplots(figsize=figsize)
return function(*args, **kwargs)
return create_figure
def can_set_equal_scale(function):
@wraps(function)
def set_equal_scale(*args, **kwargs):
equal_scale = kwargs.pop('equal_scale', False)
r = function(*args, **kwargs)
if equal_scale:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
new_min = min(xlim[0], ylim[0])
new_max = max(xlim[1], ylim[1])
assert (new_min, new_max) == ax.set_xlim(new_min, new_max)
assert (new_min, new_max) == ax.set_ylim(new_min, new_max)
return r
return set_equal_scale
def can_add_abline(function):
@wraps(function)
def _abline(*args, **kwargs):
add_abline = kwargs.pop('abline', False)
r = function(*args, **kwargs)
if add_abline:
abline()
return r
return _abline
def tight_layout(function):
@wraps(function)
def tight_layout(*args, **kwargs):
r = function(*args, **kwargs)
plt.tight_layout()
return r
return tight_layout
|
[
"functools.wraps"
] |
[((109, 124), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (114, 124), False, 'from functools import wraps\n'), ((393, 408), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (398, 408), False, 'from functools import wraps\n'), ((684, 699), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (689, 699), False, 'from functools import wraps\n'), ((978, 993), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (983, 993), False, 'from functools import wraps\n'), ((1286, 1301), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (1291, 1301), False, 'from functools import wraps\n'), ((1866, 1881), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (1871, 1881), False, 'from functools import wraps\n'), ((2118, 2133), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (2123, 2133), False, 'from functools import wraps\n')]
|
import os
import json
import codingame
import discord
from discord.ext import commands
with open("./config/config.json", "r") as cjson:
config = json.load(cjson)
with open("./config/db.json", "r") as dbjson:
db = json.load(dbjson)
intents = discord.Intents.default()
bot = commands.Bot(command_prefix=config["prefix"], intents=intents)
bot.config = config
bot.codingame_client = codingame.Client(is_async=True)
for file in os.listdir('cogs'):
if file.endswith('.py'):
bot.load_extension(f"cogs.{file[:-3]}")
bot.run(config["token"])
|
[
"codingame.Client",
"discord.Intents.default",
"json.load",
"discord.ext.commands.Bot",
"os.listdir"
] |
[((252, 277), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (275, 277), False, 'import discord\n'), ((284, 346), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': "config['prefix']", 'intents': 'intents'}), "(command_prefix=config['prefix'], intents=intents)\n", (296, 346), False, 'from discord.ext import commands\n'), ((391, 422), 'codingame.Client', 'codingame.Client', ([], {'is_async': '(True)'}), '(is_async=True)\n', (407, 422), False, 'import codingame\n'), ((436, 454), 'os.listdir', 'os.listdir', (['"""cogs"""'], {}), "('cogs')\n", (446, 454), False, 'import os\n'), ((151, 167), 'json.load', 'json.load', (['cjson'], {}), '(cjson)\n', (160, 167), False, 'import json\n'), ((223, 240), 'json.load', 'json.load', (['dbjson'], {}), '(dbjson)\n', (232, 240), False, 'import json\n')]
|
import time
import datetime
from Status.logList import log
from Message.sendEmail import send_email
from Message.sendMessage import send_message
from Scheduler.dataAnalysis import analysis
from Update.getData import getCurrentData_torxiong
def getTime():
# time.localtime(time.time())
# int tm_sec; /* 秒 – 取值区间为[0,59] */
# int tm_min; /* 分 - 取值区间为[0,59] */
# int tm_hour; /* 时 - 取值区间为[0,23] */
# int tm_mday; /* 一个月中的日期 - 取值区间为[1,31] */
# int tm_mon; /* 月份(从一月开始,0代表一月) - 取值区间为[0,11] */
# int tm_year; /* 年份,其值等于实际年份减去1900 */
# int tm_wday; /* 星期 – 取值区间为[0,6],其中0代表星期一,1代表星期二,以此类推 */
# int tm_yday; /* 从每年的1月1日开始的天数 – 取值区间为[0,365],其中0代表1月1日,1代表1月2日,以此类推 */
# int tm_isdst; /* 夏令时标识符,实行夏令时的时候,tm_isdst为正。不实行夏令时的时候,tm_isdst为0;不了解情况时,tm_isdst()为负。
return time.localtime(time.time())
def dormancy(to_time):
pass
def min_sleep(startTime, endTime):
'''计算两个时间点之间的分钟数'''
# 处理格式,加上秒位
startTime1 = startTime + ':00'
endTime1 = endTime + ':00'
# 计算分钟数
startTime2 = datetime.datetime.strptime(startTime1, "%Y-%m-%d %H:%M:%S")
endTime2 = datetime.datetime.strptime(endTime1, "%Y-%m-%d %H:%M:%S")
seconds = (endTime2 - startTime2).seconds
# 来获取时间差中的秒数。注意,seconds获得的秒只是时间差中的小时、分钟和秒部分的和,并没有包含时间差的天数(既是两个时间点不是同一天,失效)
total_seconds = (endTime2 - startTime2).total_seconds()
# 来获取准确的时间差,并将时间差转换为秒
# print(total_seconds)
# mins = total_seconds / 60
log.update("(子线程:巡航模块):即将休眠,将于{}重新工作".format(endTime2))
time.sleep(total_seconds)
return True
# return int(mins)
# if __name__ == "__main__":
# startTime_1 = '2019-07-28 00:00'
# endTime_1 = '2019-07-29 00:00'
# fenNum = minNums(startTime_1, endTime_1)
# print(fenNum)
def time_in_work():
'''判断当前时间是否开市'''
# 范围时间
morning_start_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '9:30', '%Y-%m-%d%H:%M')
morning_end_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '11:30', '%Y-%m-%d%H:%M')
afternoon_start_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '13:00', '%Y-%m-%d%H:%M')
afternoon_end_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '15:00', '%Y-%m-%d%H:%M')
# 当前时间
now_time = datetime.datetime.now()
# 判断当前时间是否在范围时间内
if morning_end_time > now_time > morning_start_time:
return True
elif afternoon_end_time > now_time > afternoon_start_time:
return True
elif afternoon_start_time > now_time > morning_end_time:
return "REST"
else:
return False
def daily_tracking(stock_code, mins): # 股票代码与获取数据频率
now = time.localtime(time.time())
log.update("(子线程:巡航模块):今日任务初始化成功")
if now.tm_wday - 1 < 5: # 如果是工作日
log.update("(子线程:巡航模块):当前为工作日")
if time_in_work() == "REST": # 午休时间
log.update("(子线程:巡航模块):当前已到午休时间")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 13:00".format(now.tm_year, now.tm_mon, now.tm_mday)
min_sleep(now_str_time, end_time)
elif time_in_work(): # 开市时间
log.update("(子线程:巡航模块):当前为正常交易时间")
log.update("(子线程:巡航模块):启用数据获取模块Update.getData与数据分析模块Scheduler.dataAnalyse")
log.update("(子线程:巡航模块):当前持续监测中,数据获取频率:{}分钟/次".format(mins))
while time_in_work():
data = getCurrentData_torxiong(stock_code)
analysis_result = analysis(data)
if analysis_result:
send_message(analysis_result)
# if email_title:
# send_email(email_title, email_html)
time.sleep(60 * mins) # 暂时休眠5分钟
elif now.tm_hour > 15: # 午休未开市
log.update("(子线程:巡航模块):结束休眠,正在等待开盘")
time.sleep(60 * 5)
elif now.tm_hour < 9: # 今天未开市
log.update("(子线程:巡航模块):等待开市")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday)
min_sleep(now_str_time, end_time)
else: # 今天已休市
log.update("(子线程:巡航模块):已休市")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday+1)
min_sleep(now_str_time, end_time)
else: # 周末
log.update("(子线程:巡航模块):今日为周六")
if now.tm_wday == 5: # 周六
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday+2)
min_sleep(now_str_time, end_time)
if now.tm_wday == 6: # 周日
log.update("(子线程:巡航模块):今日为周五")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday+1)
min_sleep(now_str_time, end_time)
def timing(stock_code):
while True:
daily_tracking(stock_code, 5)
|
[
"Message.sendMessage.send_message",
"Scheduler.dataAnalysis.analysis",
"time.time",
"time.sleep",
"datetime.datetime.strptime",
"Status.logList.log.update",
"datetime.datetime.now",
"Update.getData.getCurrentData_torxiong"
] |
[((1031, 1090), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['startTime1', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(startTime1, '%Y-%m-%d %H:%M:%S')\n", (1057, 1090), False, 'import datetime\n'), ((1106, 1163), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['endTime1', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(endTime1, '%Y-%m-%d %H:%M:%S')\n", (1132, 1163), False, 'import datetime\n'), ((1498, 1523), 'time.sleep', 'time.sleep', (['total_seconds'], {}), '(total_seconds)\n', (1508, 1523), False, 'import time\n'), ((2298, 2321), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2319, 2321), False, 'import datetime\n'), ((2714, 2748), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):今日任务初始化成功"""'], {}), "('(子线程:巡航模块):今日任务初始化成功')\n", (2724, 2748), False, 'from Status.logList import log\n'), ((814, 825), 'time.time', 'time.time', ([], {}), '()\n', (823, 825), False, 'import time\n'), ((2697, 2708), 'time.time', 'time.time', ([], {}), '()\n', (2706, 2708), False, 'import time\n'), ((2794, 2825), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):当前为工作日"""'], {}), "('(子线程:巡航模块):当前为工作日')\n", (2804, 2825), False, 'from Status.logList import log\n'), ((4548, 4578), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):今日为周六"""'], {}), "('(子线程:巡航模块):今日为周六')\n", (4558, 4578), False, 'from Status.logList import log\n'), ((2882, 2915), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):当前已到午休时间"""'], {}), "('(子线程:巡航模块):当前已到午休时间')\n", (2892, 2915), False, 'from Status.logList import log\n'), ((4905, 4935), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):今日为周五"""'], {}), "('(子线程:巡航模块):今日为周五')\n", (4915, 4935), False, 'from Status.logList import log\n'), ((3209, 3243), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):当前为正常交易时间"""'], {}), "('(子线程:巡航模块):当前为正常交易时间')\n", (3219, 3243), False, 'from Status.logList import log\n'), ((3256, 3331), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):启用数据获取模块Update.getData与数据分析模块Scheduler.dataAnalyse"""'], {}), "('(子线程:巡航模块):启用数据获取模块Update.getData与数据分析模块Scheduler.dataAnalyse')\n", (3266, 3331), False, 'from Status.logList import log\n'), ((3461, 3496), 'Update.getData.getCurrentData_torxiong', 'getCurrentData_torxiong', (['stock_code'], {}), '(stock_code)\n', (3484, 3496), False, 'from Update.getData import getCurrentData_torxiong\n'), ((3531, 3545), 'Scheduler.dataAnalysis.analysis', 'analysis', (['data'], {}), '(data)\n', (3539, 3545), False, 'from Scheduler.dataAnalysis import analysis\n'), ((3740, 3761), 'time.sleep', 'time.sleep', (['(60 * mins)'], {}), '(60 * mins)\n', (3750, 3761), False, 'import time\n'), ((3823, 3859), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):结束休眠,正在等待开盘"""'], {}), "('(子线程:巡航模块):结束休眠,正在等待开盘')\n", (3833, 3859), False, 'from Status.logList import log\n'), ((3872, 3890), 'time.sleep', 'time.sleep', (['(60 * 5)'], {}), '(60 * 5)\n', (3882, 3890), False, 'import time\n'), ((1865, 1888), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1886, 1888), False, 'import datetime\n'), ((1978, 2001), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1999, 2001), False, 'import datetime\n'), ((2096, 2119), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2117, 2119), False, 'import datetime\n'), ((2212, 2235), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2233, 2235), False, 'import datetime\n'), ((3602, 3631), 'Message.sendMessage.send_message', 'send_message', (['analysis_result'], {}), '(analysis_result)\n', (3614, 3631), False, 'from Message.sendMessage import send_message\n'), ((3942, 3971), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):等待开市"""'], {}), "('(子线程:巡航模块):等待开市')\n", (3952, 3971), False, 'from Status.logList import log\n'), ((4250, 4278), 'Status.logList.log.update', 'log.update', (['"""(子线程:巡航模块):已休市"""'], {}), "('(子线程:巡航模块):已休市')\n", (4260, 4278), False, 'from Status.logList import log\n')]
|
"""
Create the component-specific README files by concatenating `description.md` and
a generated description of the metadata.
"""
import json
from pycldf.terms import Terms
from csvw.metadata import Table
from cldfspec.util import REPO_DIR
def run(args):
for p in REPO_DIR.joinpath('components').glob('*/*.json'):
readme = p.parent.joinpath('description.md').read_text(encoding='utf8')
cols = table2markdown(Table.fromvalue(json.loads(p.read_text(encoding='utf8'))))
p.parent.joinpath('README.md').write_text(readme + '\n' + cols, encoding='utf8')
def cardinality(col, term):
#
# FIXME: move to pycldf
#
res = None
if term:
# Make sure, cardinality is consistent with the ontology:
tcol = term.to_column()
res = term.cardinality
assert (res == 'multivalued' and tcol.separator) or \
(res == 'singlevalued' and not tcol.separator) or \
(res is None), 'y'
assert bool(col.separator) == bool(tcol.separator), 'x'
# Make sure, cardinality is consistent with separator spec:
card = col.common_props.get('dc:extent')
assert (card == 'multivalued' and col.separator) or \
(card == 'singlevalued' and not col.separator) or \
(card is None), 'z'
return res or card or 'unspecified'
def colrow(col, pk, TERMS):
dt = '`{}`'.format(col.datatype.base if col.datatype else 'string')
if col.separator:
dt = 'list of {} (separated by `{}`)'.format(dt, col.separator)
desc = col.common_props.get('dc:description', '').replace('\n', ' ')
term = None
if col.propertyUrl:
term = TERMS.by_uri.get(col.propertyUrl.uri)
card = cardinality(col, term)
if (not desc) and term:
desc = term.comment(one_line=True)
pk = pk or []
if col.name in pk:
desc = (desc + '<br>') if desc else desc
desc += 'Primary key'
if term and term.references:
desc = (desc + '<br>') if desc else desc
desc += 'References {}'.format(term.references)
return ' | '.join([
'[{}]({})'.format(col.name, col.propertyUrl)
if col.propertyUrl else '`{}`'.format(col.name),
dt,
card,
desc,
])
def table2markdown(table):
res = []
res.append('## [{}]({}): `{}`\n'.format(
table.common_props['dc:conformsTo'].split('#')[1],
table.common_props['dc:conformsTo'],
table.url.string,
))
if table.common_props.get('dc:description'):
res.append(table.common_props['dc:description'] + '\n')
res.append('Name/Property | Datatype | Cardinality | Description')
res.append(' --- | --- | --- | --- ')
TERMS = Terms(REPO_DIR / 'terms.rdf')
for col in table.tableSchema.columns:
res.append(colrow(col, table.tableSchema.primaryKey, TERMS))
return '\n'.join(res)
|
[
"cldfspec.util.REPO_DIR.joinpath",
"pycldf.terms.Terms"
] |
[((2706, 2735), 'pycldf.terms.Terms', 'Terms', (["(REPO_DIR / 'terms.rdf')"], {}), "(REPO_DIR / 'terms.rdf')\n", (2711, 2735), False, 'from pycldf.terms import Terms\n'), ((271, 302), 'cldfspec.util.REPO_DIR.joinpath', 'REPO_DIR.joinpath', (['"""components"""'], {}), "('components')\n", (288, 302), False, 'from cldfspec.util import REPO_DIR\n')]
|
import sqlite3
import sys
def get_pk_key(databasename, name, value):
connect_sqlite3 = sqlite3.connect(databasename)
cursor_sqlite3 = connect_sqlite3.cursor()
if name == "meals":
meal_name = ""
# 1) breakfast 2) brunch 3) lunch 4) supper
if int(value) == 1:
meal_name = "breakfast"
if int(value) == 2:
meal_name = "brunch"
if int(value) == 3:
meal_name = "lunch"
if int(value) == 4:
meal_name = "supper"
sqlite_insert_with_param = "SELECT meal_id FROM meals WHERE meal_name = '{}'".format(meal_name)
if name == "measures":
sqlite_insert_with_param = "SELECT measure_id FROM measures WHERE measure_name = '{}'".format(value)
if name == "ingredients":
sqlite_insert_with_param = "SELECT ingredient_id FROM ingredients WHERE ingredient_name like '%{}%'".format(value)
if name == "recipes":
sqlite_insert_with_param = "SELECT recipe_id FROM recipes WHERE recipe_name = '{}'".format(value)
cursor_sqlite3.execute(sqlite_insert_with_param)
results = cursor_sqlite3.fetchone()
if connect_sqlite3:
connect_sqlite3.close()
# meals_id doesn't exists
if results is None:
return 0
# meals_id
return results[0]
def validate_parameters(measure, ingredient, databasename):
# not pass
if not get_pk_key(databasename, "measures", measure):
print("The measure is not conclusive!")
return 0
# not pass
if not get_pk_key(databasename, "ingredients", ingredient):
print("The ingredient is not conclusive!")
return 0
# pass
return 1
def insert_value(name, databasename, value1, value2, meals):
try:
connect = sqlite3.connect(databasename)
connect.execute("PRAGMA foreign_keys = 1")
cursor = connect.cursor()
sqlite_insert_with_param = ""
if name == "measures":
sqlite_insert_with_param = "INSERT INTO measures (measure_name) VALUES ('{}');".format(value1)
if name == "ingredients":
sqlite_insert_with_param = "INSERT INTO ingredients (ingredient_name) VALUES ('{}');".format(value1)
if name == "meals":
sqlite_insert_with_param = "INSERT INTO meals (meal_name) VALUES ('{}');".format(value1)
if name == "recipes":
sqlite_insert_with_param = "INSERT INTO recipes (recipe_name, recipe_description) VALUES ('{}', '{}');"\
.format(value1, value2)
result = cursor.execute(sqlite_insert_with_param).lastrowid
connect.commit()
if name == "recipes":
for meal in meals:
# serve
meal_id = get_pk_key(databasename, "meals", meal)
sqlite_insert_with_param = "INSERT INTO serve (meal_id, recipe_id) VALUES ({}, {});"\
.format(meal_id, result)
cursor.execute(sqlite_insert_with_param)
connect.commit()
while True:
input_value = input("Input quantity of ingredient <press enter to stop>:").split()
if input_value == "" or len(input_value) == 0:
break
if len(input_value) == 2:
quantity = input_value[0]
measure = ""
ingredient = input_value[1]
if len(input_value) == 3:
quantity = input_value[0]
measure = input_value[1]
ingredient = input_value[2]
if validate_parameters(measure, ingredient, databasename) == 0:
continue
measure_id = get_pk_key(databasename, "measures", measure)
ingredients_id = get_pk_key(databasename, "ingredients", ingredient)
#quantity
sqlite_insert_with_param = "INSERT INTO quantity (quantity, recipe_id, measure_id, ingredient_id) " \
"VALUES ({}, {}, {}, {});".format(quantity, result, measure_id, ingredients_id)
print(" ### quantity " + sqlite_insert_with_param)
cursor.execute(sqlite_insert_with_param)
connect.commit()
except sqlite3.Error as error:
print("Failed to insert Python variable into sqlite table", error)
finally:
if connect:
connect.close()
def populate_db(data, databasename):
for name, keys in data.items():
for value in keys:
insert_value(name, databasename, value, "", "")
def create_db(arg):
try:
connect = sqlite3.connect(arg)
connect.execute("PRAGMA foreign_keys = 1")
cursor = connect.cursor()
cursor.execute("DROP TABLE IF EXISTS measures;")
cursor.execute('''CREATE TABLE IF NOT EXISTS measures (measure_id integer not null primary key autoincrement,
measure_name text unique);''')
cursor.execute("DROP TABLE IF EXISTS ingredients;")
cursor.execute('''CREATE TABLE IF NOT EXISTS ingredients (ingredient_id integer not null primary key autoincrement,
ingredient_name text not null unique);''')
cursor.execute("DROP TABLE IF EXISTS meals;")
cursor.execute('''CREATE TABLE IF NOT EXISTS meals (meal_id integer not null primary key autoincrement,
meal_name text not null unique);''')
cursor.execute("DROP TABLE IF EXISTS recipes;")
cursor.execute('''CREATE TABLE IF NOT EXISTS recipes (recipe_id integer not null primary key autoincrement,
recipe_name text not null,
recipe_description text);''')
cursor.execute("DROP TABLE IF EXISTS serve;")
cursor.execute('''CREATE TABLE IF NOT EXISTS serve (serve_id integer not null primary key autoincrement,
meal_id integer not null,
recipe_id integer not null,
FOREIGN KEY(meal_id) REFERENCES meals(meal_id),
FOREIGN KEY(recipe_id) REFERENCES recipes(recipe_id));''')
cursor.execute("DROP TABLE IF EXISTS quantity ;")
cursor.execute('''CREATE TABLE IF NOT EXISTS quantity (quantity_id integer not null primary key autoincrement,
quantity integer not null,
recipe_id integer not null,
measure_id integer not null,
ingredient_id integer not null,
FOREIGN KEY(recipe_id) REFERENCES recipes(recipe_id),
FOREIGN KEY(measure_id) REFERENCES measures(measure_id),
FOREIGN KEY(ingredient_id) REFERENCES ingredients(ingredient_id));''')
connect.commit()
except sqlite3.Error as error:
print("Failed to insert Python variable into sqlite table", error)
finally:
if connect:
connect.close()
def check_recipes(database, ingredients, meals):
"""
cur = conn.cursor()
cur.execute("SELECT * FROM tasks WHERE priority=?", (priority,))
rows = cur.fetchall()
for row in rows:
print(row)
"""
connect = sqlite3.connect(database)
cursor = connect.cursor()
ingredients_sql = ""
for value in ingredients:
ingredients_sql += "'" + value + "',"
if ingredients_sql != "":
ingredients_sql += "''"
meals_sql = ""
for value in meals:
meals_sql += "'" + value + "',"
if meals_sql != "":
meals_sql += "''"
# ----- first sql
sql_with_param = "SELECT recipes.recipe_id, recipes.recipe_name FROM recipes WHERE recipes.recipe_id IN ( \
SELECT serve.recipe_id FROM serve, meals WHERE \
serve.meal_id = meals.meal_id AND meals.meal_name IN ({}));" \
.format(meals_sql)
cursor.execute(sql_with_param)
recipes_list = []
rows = cursor.fetchall()
for row in rows:
recipes_list.append(row)
# ----- second sql
sql_with_param = "SELECT quantity.recipe_id,ingredients.ingredient_name FROM quantity, ingredients \
WHERE quantity.ingredient_id = ingredients.ingredient_id \
AND ingredients.ingredient_name in ({});" \
.format(ingredients_sql)
cursor.execute(sql_with_param)
ingredients_list = []
rows = cursor.fetchall()
for row in rows:
ingredients_list.append(row)
lenght = len(ingredients)
recipes = []
for x, y in recipes_list:
result = [i for i,j in ingredients_list if i==x ]
if len(result) == lenght:
recipes.append(y)
if len(recipes) > 0:
print("Recipes selected for you: " + " and ".join(recipes))
else:
print("There are no such recipes in the database.")
connect.close()
if __name__ == "__main__":
data = {"meals": ("breakfast", "brunch", "lunch", "supper"),
"ingredients": ("milk", "cacao", "strawberry", "blueberry", "blackberry", "sugar"),
"measures": ("ml", "g", "l", "cup", "tbsp", "tsp", "dsp", "")}
args = sys.argv
if len(args) == 4:
# database name
database = str(args[1])
# --ingredients
if args[2].split("=")[0] == "--ingredients":
ingredients = args[2].split("=")[1].split(",")
#print("ingredients : " + str(ingredients))
# --meals
if args[3].split("=")[0] == "--meals":
meals = args[3].split("=")[1].split(",")
#print("meals : " + str(meals))
check_recipes(database, ingredients, meals)
elif len(args) == 2:
database = str(args[1])
create_db(database)
populate_db(data, database)
print("Pass the empty recipe name to exit.")
while True:
print("Recipe name:")
name = input()
if name == "":
break
print("Recipe description:")
description = input()
if description == "":
break
print("1) breakfast 2) brunch 3) lunch 4) supper")
print("When the dish can be served:")
serve = input().split()
insert_value("recipes", database, name, description, serve)
|
[
"sqlite3.connect"
] |
[((98, 127), 'sqlite3.connect', 'sqlite3.connect', (['databasename'], {}), '(databasename)\n', (113, 127), False, 'import sqlite3\n'), ((8216, 8241), 'sqlite3.connect', 'sqlite3.connect', (['database'], {}), '(database)\n', (8231, 8241), False, 'import sqlite3\n'), ((1833, 1862), 'sqlite3.connect', 'sqlite3.connect', (['databasename'], {}), '(databasename)\n', (1848, 1862), False, 'import sqlite3\n'), ((4973, 4993), 'sqlite3.connect', 'sqlite3.connect', (['arg'], {}), '(arg)\n', (4988, 4993), False, 'import sqlite3\n')]
|
"""Test code snippets embedded in the docs.
Reference: https://sybil.readthedocs.io/en/latest/use.html#pytest
"""
from doctest import NORMALIZE_WHITESPACE
from os import chdir, getcwd
from shutil import rmtree
from tempfile import mkdtemp
import pytest
from sybil import Sybil
from sybil.parsers.doctest import DocTestParser
from sybil.parsers.skip import skip
@pytest.fixture(scope="module")
def tempdir():
path = mkdtemp()
cwd = getcwd()
try:
chdir(path)
yield path
finally:
chdir(cwd)
rmtree(path)
pytest_collect_file = Sybil(
parsers=[DocTestParser(optionflags=NORMALIZE_WHITESPACE), skip],
pattern="*.rst",
fixtures=["tempdir"],
).pytest()
|
[
"sybil.parsers.doctest.DocTestParser",
"os.getcwd",
"pytest.fixture",
"tempfile.mkdtemp",
"shutil.rmtree",
"os.chdir"
] |
[((367, 397), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (381, 397), False, 'import pytest\n'), ((424, 433), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (431, 433), False, 'from tempfile import mkdtemp\n'), ((444, 452), 'os.getcwd', 'getcwd', ([], {}), '()\n', (450, 452), False, 'from os import chdir, getcwd\n'), ((470, 481), 'os.chdir', 'chdir', (['path'], {}), '(path)\n', (475, 481), False, 'from os import chdir, getcwd\n'), ((522, 532), 'os.chdir', 'chdir', (['cwd'], {}), '(cwd)\n', (527, 532), False, 'from os import chdir, getcwd\n'), ((541, 553), 'shutil.rmtree', 'rmtree', (['path'], {}), '(path)\n', (547, 553), False, 'from shutil import rmtree\n'), ((598, 645), 'sybil.parsers.doctest.DocTestParser', 'DocTestParser', ([], {'optionflags': 'NORMALIZE_WHITESPACE'}), '(optionflags=NORMALIZE_WHITESPACE)\n', (611, 645), False, 'from sybil.parsers.doctest import DocTestParser\n')]
|
import pyaem
import unittest
class TestHandlers(unittest.TestCase):
def test_auth_fail(self):
response = {
'http_code': 401,
'body': 'some body'
}
try:
pyaem.handlers.auth_fail(response)
self.fail('An exception should have been raised')
except pyaem.PyAemException as exception:
self.assertEqual(exception.code, 401)
self.assertEqual(exception.message, 'Authentication failed - incorrect username and/or password')
self.assertEqual(exception.response, response)
def test_method_not_allowed(self):
response = {
'http_code': 405,
'body': '<html><body><title>some error message</title></body></html>'
}
try:
pyaem.handlers.method_not_allowed(response)
self.fail('An exception should have been raised')
except pyaem.PyAemException as exception:
self.assertEqual(exception.code, 405)
self.assertEqual(exception.message, 'some error message')
self.assertEqual(exception.response, response)
def test_unexpected(self):
response = {
'http_code': 500,
'body': 'some unexpected server error'
}
try:
pyaem.handlers.unexpected(response)
self.fail('An exception should have been raised')
except pyaem.PyAemException as exception:
self.assertEqual(exception.code, 500)
self.assertEqual(
exception.message, 'Unexpected response\nhttp code: 500\nbody:\nsome unexpected server error')
self.assertEqual(exception.response, response)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pyaem.handlers.method_not_allowed",
"pyaem.handlers.auth_fail",
"pyaem.handlers.unexpected"
] |
[((1734, 1749), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1747, 1749), False, 'import unittest\n'), ((221, 255), 'pyaem.handlers.auth_fail', 'pyaem.handlers.auth_fail', (['response'], {}), '(response)\n', (245, 255), False, 'import pyaem\n'), ((798, 841), 'pyaem.handlers.method_not_allowed', 'pyaem.handlers.method_not_allowed', (['response'], {}), '(response)\n', (831, 841), False, 'import pyaem\n'), ((1304, 1339), 'pyaem.handlers.unexpected', 'pyaem.handlers.unexpected', (['response'], {}), '(response)\n', (1329, 1339), False, 'import pyaem\n')]
|
"""
Copyright 2021 Objectiv B.V.
"""
import typing
from typing import Dict, TypeVar, Tuple, List, Optional, Mapping, Hashable, Union
from sqlalchemy.engine import Dialect
from bach.expression import Expression, get_variable_tokens, VariableToken
from bach.types import value_to_dtype, get_series_type_from_dtype
from sql_models.util import quote_identifier
from sql_models.model import CustomSqlModelBuilder, SqlModel, Materialization, SqlModelSpec
from sql_models.constants import NotSet, not_set
T = TypeVar('T', bound='SqlModelSpec')
TBachSqlModel = TypeVar('TBachSqlModel', bound='BachSqlModel')
if typing.TYPE_CHECKING:
from bach.dataframe import DtypeNamePair
class BachSqlModel(SqlModel[T]):
"""
SqlModel with meta information about the columns that it produces.
This additional information needs to be specifically set at model instantiation, it cannot be deduced
from the sql.
The column information is not used for sql generation, but can be used by other code
interacting with the models. The information is not reflected in the `hash`, as it doesn't matter for
the purpose of sql generation.
"""
def __init__(
self,
model_spec: T,
placeholders: Mapping[str, Hashable],
references: Mapping[str, 'SqlModel'],
materialization: Materialization,
materialization_name: Optional[str],
column_expressions: Dict[str, Expression],
) -> None:
"""
Similar to :py:meth:`SqlModel.__init__()`. With one additional parameter: column_expressions,
a mapping between the names of the columns and expressions
that this model's query will return in the correct order.
"""
self._column_expressions = column_expressions
super().__init__(
model_spec=model_spec,
placeholders=placeholders,
references=references,
materialization=materialization,
materialization_name=materialization_name,
)
@property
def columns(self) -> Tuple[str, ...]:
""" Columns returned by the query of this model, in order."""
return tuple(self._column_expressions.keys())
@property
def column_expressions(self) -> Dict[str, Expression]:
""" Mapping containing the expression used per column."""
return self._column_expressions
def copy_override(
self: TBachSqlModel,
*,
model_spec: T = None,
placeholders: Mapping[str, Hashable] = None,
references: Mapping[str, 'SqlModel'] = None,
materialization: Materialization = None,
materialization_name: Union[Optional[str], NotSet] = not_set,
column_expressions: Dict[str, Expression] = None
) -> TBachSqlModel:
"""
Similar to super class's implementation, but adds optional 'columns' parameter
"""
materialization_name_value = (
self.materialization_name if materialization_name is not_set else materialization_name
)
return self.__class__(
model_spec=self.model_spec if model_spec is None else model_spec,
placeholders=self.placeholders if placeholders is None else placeholders,
references=self.references if references is None else references,
materialization=self.materialization if materialization is None else materialization,
materialization_name=materialization_name_value,
column_expressions=self.column_expressions if column_expressions is None else column_expressions
)
@classmethod
def from_sql_model(cls, sql_model: SqlModel, column_expressions: Dict[str, Expression]) -> 'BachSqlModel':
""" From any SqlModel create a BachSqlModel with the given column definitions. """
return cls(
model_spec=sql_model.model_spec,
placeholders=sql_model.placeholders,
references=sql_model.references,
materialization=sql_model.materialization,
materialization_name=sql_model.materialization_name,
column_expressions=column_expressions,
)
@classmethod
def _get_placeholders(
cls,
dialect: Dialect,
variables: Dict['DtypeNamePair', Hashable],
expressions: List[Expression],
) -> Dict[str, str]:
filtered_variables = filter_variables(variables, expressions)
return get_variable_values_sql(dialect, filtered_variables)
class SampleSqlModel(BachSqlModel):
"""
A custom SqlModel that simply does select * from a table. In addition to that, this class stores an
extra property: previous.
The previous property is not used in the generated sql at all, but can be used to track a previous
SqlModel. This is useful for how we implemented sampling, as that effectively inserts a sql-model in the
graph that has no regular reference to the previous node in the graph. By storing the previous node
here, we can later still reconstruct what the actual previous node was with some custom logic.
See the DataFrame.sample() implementation for more information
"""
def __init__(
self,
model_spec: T,
placeholders: Mapping[str, Hashable],
references: Mapping[str, 'SqlModel'],
materialization: Materialization,
materialization_name: Optional[str],
column_expressions: Dict[str, Expression],
previous: BachSqlModel,
) -> None:
self.previous = previous
super().__init__(
model_spec=model_spec,
placeholders=placeholders,
references=references,
materialization=materialization,
materialization_name=materialization_name,
column_expressions=column_expressions,
)
def copy_override(
self: 'SampleSqlModel',
*,
model_spec: T = None,
placeholders: Mapping[str, Hashable] = None,
references: Mapping[str, 'SqlModel'] = None,
materialization: Materialization = None,
materialization_name: Union[Optional[str], NotSet] = not_set,
column_expressions: Dict[str, Expression] = None,
previous: BachSqlModel = None
) -> 'SampleSqlModel':
"""
Similar to super class's implementation, but adds optional 'previous' parameter
"""
materialization_name_value = \
self.materialization_name if materialization_name is not_set else materialization_name
return self.__class__(
model_spec=self.model_spec if model_spec is None else model_spec,
placeholders=self.placeholders if placeholders is None else placeholders,
references=self.references if references is None else references,
materialization=self.materialization if materialization is None else materialization,
materialization_name=materialization_name_value,
column_expressions=self.column_expressions if column_expressions is None else column_expressions,
previous=self.previous if previous is None else previous
)
@staticmethod
def get_instance(
*,
dialect: Dialect,
table_name: str,
previous: BachSqlModel,
column_expressions: Dict[str, Expression],
name: str = 'sample_node',
) -> 'SampleSqlModel':
""" Helper function to instantiate a SampleSqlModel """
sql = 'SELECT * FROM {table_name}'
return SampleSqlModel(
model_spec=CustomSqlModelBuilder(sql=sql, name=name),
placeholders={'table_name': quote_identifier(dialect, table_name)},
references={},
materialization=Materialization.CTE,
materialization_name=None,
column_expressions=column_expressions,
previous=previous
)
class CurrentNodeSqlModel(BachSqlModel):
@staticmethod
def get_instance(
*,
dialect: Dialect,
name: str,
column_names: Tuple[str, ...],
column_exprs: List[Expression],
distinct: bool,
where_clause: Optional[Expression],
group_by_clause: Optional[Expression],
having_clause: Optional[Expression],
order_by_clause: Optional[Expression],
limit_clause: Expression,
previous_node: BachSqlModel,
variables: Dict['DtypeNamePair', Hashable],
) -> 'CurrentNodeSqlModel':
columns_str = ', '.join(expr.to_sql(dialect) for expr in column_exprs)
distinct_stmt = ' distinct ' if distinct else ''
where_str = where_clause.to_sql(dialect) if where_clause else ''
group_by_str = group_by_clause.to_sql(dialect) if group_by_clause else ''
having_str = having_clause.to_sql(dialect) if having_clause else ''
order_by_str = order_by_clause.to_sql(dialect) if order_by_clause else ''
limit_str = limit_clause.to_sql(dialect) if limit_clause else ''
sql = (
f"select {distinct_stmt}{columns_str} \n"
f"from {{{{prev}}}} \n"
f"{where_str} \n"
f"{group_by_str} \n"
f"{having_str} \n"
f"{order_by_str} \n"
f"{limit_str} \n"
)
# Add all references found in the Expressions to self.references
nullable_expressions = [where_clause, group_by_clause, having_clause, order_by_clause, limit_clause]
all_expressions = column_exprs + [expr for expr in nullable_expressions if expr is not None]
references = construct_references({'prev': previous_node}, all_expressions)
return CurrentNodeSqlModel(
model_spec=CustomSqlModelBuilder(sql=sql, name=name),
placeholders=BachSqlModel._get_placeholders(dialect, variables, all_expressions),
references=references,
materialization=Materialization.CTE,
materialization_name=None,
column_expressions={name: expr for name, expr in zip(column_names, column_exprs)},
)
def construct_references(
base_references: Mapping[str, 'SqlModel'],
expressions: List['Expression']
) -> Dict[str, 'SqlModel']:
"""
Create a dictionary of references consisting of the base_references and all references found in the
expressions.
Will raise an exception if there are references with the same name that reference different models.
"""
result: Dict[str, SqlModel] = {}
for expr in expressions:
references = expr.get_references()
_check_reference_conflicts(result, references)
result.update(references)
_check_reference_conflicts(base_references, result)
result.update(base_references)
return result
def _check_reference_conflicts(left: Mapping[str, 'SqlModel'], right: Mapping[str, 'SqlModel']) -> None:
"""
Util function: Check that two dicts with references don't have conflicting values.
"""
for ref_name, model in right.items():
if left.get(ref_name) not in (None, model):
# This should never happen, if other code doesn't mess up.
# We have this check as a backstop assertion to fail early
raise Exception(f'Encountered reference {ref_name} before, but with a different value: '
f'{left.get(ref_name)} != {model}')
def filter_variables(
variable_values: Dict['DtypeNamePair', Hashable],
filter_expressions: List['Expression']
) -> Dict['DtypeNamePair', Hashable]:
"""
Util function: Return a copy of the variable_values, with only the variables for which there is a
VariableToken in the filter_expressions.
"""
available_tokens = get_variable_tokens(filter_expressions)
dtype_names = {token.dtype_name for token in available_tokens}
return {dtype_name: value for dtype_name, value in variable_values.items() if dtype_name in dtype_names}
def get_variable_values_sql(
dialect: Dialect,
variable_values: Dict['DtypeNamePair', Hashable]
) -> Dict[str, str]:
"""
Take a dictionary with variable_values and return a dict with the full variable names and the values
as sql.
The sql assumes it will be used as values for SqlModels's placeholders. i.e. It will not be format
escaped, unlike if it would be used directly into SqlModel.sql in which case it would be escaped twice.
The sql will be proper sql tho, with identifier, strings, etc. properly quoted and escaped.
:param variable_values: Mapping of variable to value.
:return: Dictionary mapping full variable name to sql literal
"""
result = {}
for dtype_name, value in variable_values.items():
dtype, name = dtype_name
value_dtype = value_to_dtype(value)
if dtype != value_dtype: # should never happen
Exception(f'Dtype of value {value}, {value_dtype} does not match registered dtype {dtype}')
placeholder_name = VariableToken.dtype_name_to_placeholder_name(dtype=dtype, name=name)
series_type = get_series_type_from_dtype(dtype)
expr = series_type.value_to_literal(dialect=dialect, value=value, dtype=dtype)
double_escaped_sql = expr.to_sql(dialect)
sql = double_escaped_sql.format().format()
result[placeholder_name] = sql
return result
|
[
"sql_models.util.quote_identifier",
"bach.types.get_series_type_from_dtype",
"sql_models.model.CustomSqlModelBuilder",
"bach.expression.get_variable_tokens",
"bach.expression.VariableToken.dtype_name_to_placeholder_name",
"typing.TypeVar",
"bach.types.value_to_dtype"
] |
[((505, 539), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""SqlModelSpec"""'}), "('T', bound='SqlModelSpec')\n", (512, 539), False, 'from typing import Dict, TypeVar, Tuple, List, Optional, Mapping, Hashable, Union\n'), ((556, 602), 'typing.TypeVar', 'TypeVar', (['"""TBachSqlModel"""'], {'bound': '"""BachSqlModel"""'}), "('TBachSqlModel', bound='BachSqlModel')\n", (563, 602), False, 'from typing import Dict, TypeVar, Tuple, List, Optional, Mapping, Hashable, Union\n'), ((11696, 11735), 'bach.expression.get_variable_tokens', 'get_variable_tokens', (['filter_expressions'], {}), '(filter_expressions)\n', (11715, 11735), False, 'from bach.expression import Expression, get_variable_tokens, VariableToken\n'), ((12738, 12759), 'bach.types.value_to_dtype', 'value_to_dtype', (['value'], {}), '(value)\n', (12752, 12759), False, 'from bach.types import value_to_dtype, get_series_type_from_dtype\n'), ((12947, 13015), 'bach.expression.VariableToken.dtype_name_to_placeholder_name', 'VariableToken.dtype_name_to_placeholder_name', ([], {'dtype': 'dtype', 'name': 'name'}), '(dtype=dtype, name=name)\n', (12991, 13015), False, 'from bach.expression import Expression, get_variable_tokens, VariableToken\n'), ((13038, 13071), 'bach.types.get_series_type_from_dtype', 'get_series_type_from_dtype', (['dtype'], {}), '(dtype)\n', (13064, 13071), False, 'from bach.types import value_to_dtype, get_series_type_from_dtype\n'), ((7537, 7578), 'sql_models.model.CustomSqlModelBuilder', 'CustomSqlModelBuilder', ([], {'sql': 'sql', 'name': 'name'}), '(sql=sql, name=name)\n', (7558, 7578), False, 'from sql_models.model import CustomSqlModelBuilder, SqlModel, Materialization, SqlModelSpec\n'), ((9671, 9712), 'sql_models.model.CustomSqlModelBuilder', 'CustomSqlModelBuilder', ([], {'sql': 'sql', 'name': 'name'}), '(sql=sql, name=name)\n', (9692, 9712), False, 'from sql_models.model import CustomSqlModelBuilder, SqlModel, Materialization, SqlModelSpec\n'), ((7620, 7657), 'sql_models.util.quote_identifier', 'quote_identifier', (['dialect', 'table_name'], {}), '(dialect, table_name)\n', (7636, 7657), False, 'from sql_models.util import quote_identifier\n')]
|
"""
Tetris in Python for Natural Selection
"""
from nat_selection.agent import Agent as NatAgent
from nat_selection.model import Model
import time
from enviorment.tetris import Tetris
env = Tetris({'reduced_grid': 0, 'reduced_shapes': 0}, 'Genetic algorithm')
def main():
agent = NatAgent(cores=4)
generations = 1000
#candidate = agent.train(generations)
candidate = Model([-0.8995652940240592, 0.06425443268253492, -0.3175211096545741, -0.292974392382306])
while True:
score = 0
state, reward, done, info = env.reset()
while not done:
action = candidate.best(env)
for a in action:
env.render()
#time.sleep(0.1)
state, reward, done, info = env.step(a)
score += reward
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
finally:
env.quit()
|
[
"nat_selection.agent.Agent",
"enviorment.tetris.Tetris",
"nat_selection.model.Model"
] |
[((191, 260), 'enviorment.tetris.Tetris', 'Tetris', (["{'reduced_grid': 0, 'reduced_shapes': 0}", '"""Genetic algorithm"""'], {}), "({'reduced_grid': 0, 'reduced_shapes': 0}, 'Genetic algorithm')\n", (197, 260), False, 'from enviorment.tetris import Tetris\n'), ((287, 304), 'nat_selection.agent.Agent', 'NatAgent', ([], {'cores': '(4)'}), '(cores=4)\n', (295, 304), True, 'from nat_selection.agent import Agent as NatAgent\n'), ((388, 483), 'nat_selection.model.Model', 'Model', (['[-0.8995652940240592, 0.06425443268253492, -0.3175211096545741, -\n 0.292974392382306]'], {}), '([-0.8995652940240592, 0.06425443268253492, -0.3175211096545741, -\n 0.292974392382306])\n', (393, 483), False, 'from nat_selection.model import Model\n')]
|
#!/usr/bin/env python
#
# Copyright 2001-2004 by <NAME>. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the standalone Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests thread safety.
Copyright (C) 2001-2004 <NAME>. All Rights Reserved.
"""
import logging, logging.handlers, threading, random
import _thread
logging.raiseExceptions = 1
NUM_THREADS = 10
LOOP_COUNT = 10000
LOG_MESSAGES = [
(logging.DEBUG, "%3d This is a %s message", "debug"),
(logging.INFO, "%3d This is an %s message", "informational"),
(logging.WARNING, "%3d This is a %s message", "warning"),
(logging.ERROR, "%3d This is an %s message", "error"),
(logging.CRITICAL, "%3d This is a %s message", "critical"),
]
LOG_NAMES = ["A", "A.B", "A.B.C", "A.B.C.D"]
def doLog(num):
logger = logging.getLogger('')
logger.info("*** thread %s started (%d)", _thread.get_ident(), num)
for i in range(LOOP_COUNT):
logger = logging.getLogger(random.choice(LOG_NAMES))
a = random.choice(LOG_MESSAGES)
args = a[0:2] + (num,) + a[2:]
logger.log(args)
def test():
f = logging.Formatter("%(asctime)s %(levelname)-9s %(name)-8s %(thread)5s %(message)s")
root = logging.getLogger('')
root.setLevel(logging.DEBUG)
h = logging.FileHandler('thread.log', 'w')
root.addHandler(h)
h.setFormatter(f)
h = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
root.addHandler(h)
threads = []
for i in range(NUM_THREADS):
threads.append(threading.Thread(target=doLog, args=(len(threads),)))
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
test()
|
[
"_thread.get_ident",
"logging.FileHandler",
"logging.handlers.SocketHandler",
"random.choice",
"logging.Formatter",
"logging.getLogger"
] |
[((1788, 1809), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (1805, 1809), False, 'import logging, logging.handlers, threading, random\n'), ((2100, 2188), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)-9s %(name)-8s %(thread)5s %(message)s"""'], {}), "(\n '%(asctime)s %(levelname)-9s %(name)-8s %(thread)5s %(message)s')\n", (2117, 2188), False, 'import logging, logging.handlers, threading, random\n'), ((2195, 2216), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (2212, 2216), False, 'import logging, logging.handlers, threading, random\n'), ((2258, 2296), 'logging.FileHandler', 'logging.FileHandler', (['"""thread.log"""', '"""w"""'], {}), "('thread.log', 'w')\n", (2277, 2296), False, 'import logging, logging.handlers, threading, random\n'), ((2350, 2441), 'logging.handlers.SocketHandler', 'logging.handlers.SocketHandler', (['"""localhost"""', 'logging.handlers.DEFAULT_TCP_LOGGING_PORT'], {}), "('localhost', logging.handlers.\n DEFAULT_TCP_LOGGING_PORT)\n", (2380, 2441), False, 'import logging, logging.handlers, threading, random\n'), ((1856, 1875), '_thread.get_ident', '_thread.get_ident', ([], {}), '()\n', (1873, 1875), False, 'import _thread\n'), ((1987, 2014), 'random.choice', 'random.choice', (['LOG_MESSAGES'], {}), '(LOG_MESSAGES)\n', (2000, 2014), False, 'import logging, logging.handlers, threading, random\n'), ((1949, 1973), 'random.choice', 'random.choice', (['LOG_NAMES'], {}), '(LOG_NAMES)\n', (1962, 1973), False, 'import logging, logging.handlers, threading, random\n')]
|
import json
import smtplib
from socket import gaierror
import datetime as dt
port = 2525
smtp_server = "smtp.mailtrap.io"
login = "eb<PASSWORD>" # paste your login generated by Mailtrap
password = "<PASSWORD>" # paste your password generated by Mailtrap
sender = "<EMAIL>"
receiver = "<EMAIL>"
const = """\
Subject: Received alarm
To: {receiver}
From: {sender}
"""
last_send = dt.datetime(2000, 1, 1)
def send_mail(msg: str):
try:
with smtplib.SMTP(smtp_server, port) as server:
server.login(login, password)
server.sendmail(sender, receiver, msg)
print('Sent')
except (gaierror, ConnectionRefusedError):
print('Failed to connect to the server. Bad connection settings?')
except smtplib.SMTPServerDisconnected:
print('Failed to connect to the server. Wrong user/password?')
except smtplib.SMTPException as e:
print('SMTP error occurred: ' + str(e))
def handle_json(msg):
structure = json.loads(msg)
global last_send
current_time = dt.datetime.now()
seconds_from_last_call = (current_time - last_send).seconds
isAlarm = "alarm" in structure
if (not isAlarm) or seconds_from_last_call < 60:
return
last_send = current_time
time = dt.datetime.strptime(structure["time"][:-3], "%Y-%m-%dT%H:%M:%S.%f").strftime("%Y-%m-%d %H:%M:%S")
message = const + "Alarm send by " + str(structure["uid"]) + " at " + time + \
"\nPulse: " + str(int(structure["pulse"])) + \
"\nTemperature: " + str(round(structure["temp"], 2))
send_mail(message)
def lambda_handler(event, context):
print(event)
handle_json(json.dumps(event))
return {
'statusCode': 200,
'body': json.dumps(event)
}
|
[
"json.loads",
"smtplib.SMTP",
"json.dumps",
"datetime.datetime",
"datetime.datetime.strptime",
"datetime.datetime.now"
] |
[((384, 407), 'datetime.datetime', 'dt.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (395, 407), True, 'import datetime as dt\n'), ((979, 994), 'json.loads', 'json.loads', (['msg'], {}), '(msg)\n', (989, 994), False, 'import json\n'), ((1036, 1053), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1051, 1053), True, 'import datetime as dt\n'), ((1670, 1687), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (1680, 1687), False, 'import json\n'), ((1745, 1762), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (1755, 1762), False, 'import json\n'), ((457, 488), 'smtplib.SMTP', 'smtplib.SMTP', (['smtp_server', 'port'], {}), '(smtp_server, port)\n', (469, 488), False, 'import smtplib\n'), ((1265, 1333), 'datetime.datetime.strptime', 'dt.datetime.strptime', (["structure['time'][:-3]", '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(structure['time'][:-3], '%Y-%m-%dT%H:%M:%S.%f')\n", (1285, 1333), True, 'import datetime as dt\n')]
|
#
# (C) 2014-2017 <NAME>
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import unittest
from ffws.parser import LightCycler480 as lc
class TestLightCycler480(unittest.TestCase):
def test_file_loader(self):
data = lc.file_loader("./raw/instruments/LightCycler480.txt")
plate1 = data["plates"][0]
self.assertEqual(plate1["plateId"], "Plate1")
self.assertEqual(plate1["layerIndex"], 0)
self.assertEqual(plate1["wellValues"][0], "NaN")
self.assertEqual(plate1["wellValues"][3], "NaN")
self.assertEqual(plate1["wellValues"][22], 9.83E-1)
self.assertEqual(plate1["wellValues"][361], 1.98E3)
self.assertEqual(plate1["wellValues"][383], "NaN")
|
[
"ffws.parser.LightCycler480.file_loader"
] |
[((260, 314), 'ffws.parser.LightCycler480.file_loader', 'lc.file_loader', (['"""./raw/instruments/LightCycler480.txt"""'], {}), "('./raw/instruments/LightCycler480.txt')\n", (274, 314), True, 'from ffws.parser import LightCycler480 as lc\n')]
|
__author__ = 'Ranjith'
import os
from .utils import find_elements_for_element
from .actions import Action
from ..common_utils import get_user_home_dir
from ..exceptions import InvalidArgumentError
from ..downloader import download_url
class File(Action):
def __init__(self, driver, locator=None, element=None, wait_time=10, visible=False):
super().__init__(driver, locator, element, wait_time, visible)
self._downloaded_files = None
self._is_download_complete = False
def upload(self, filename=""):
if not os.path.isfile(filename):
raise InvalidArgumentError(f"{filename} is not a file. Please provide valid filename for upload")
self.element.send_keys(filename)
return self
#TODO: requires work - link not found, more testing
def download(self, directory=None, as_filename=None, asynch=True, unzip=False, del_zipfile=False, add_to_ospath=False):
#flag reset
self._is_download_complete = False
self._downloaded_files = None
if not directory:
directory = get_user_home_dir()
if not os.path.isdir(directory):
raise InvalidArgumentError(f"{directory} is not a directory. Please provide valid directory for download")
link = self.href
if link:
self._download_file(link, directory, as_filename, asynch, unzip, del_zipfile, add_to_ospath)
else:
links = self._get_child_links()
#TODO: not a good solution, think of a better way to resolve this
for l in links:
self._download_file(l, directory, None, asynch, unzip, del_zipfile, add_to_ospath)
return self
def _download_file(self, link, directory, as_filename, asynch, unzip, del_zipfile, add_to_ospath):
try:
download_url(url=link, to_dir=directory, download_filename=as_filename, asynch=asynch,
unzip=unzip, del_zipfile=del_zipfile, add_to_ospath=add_to_ospath, callback=self._callback)
except Exception:
pass
def _get_child_links(self):
child_links_xpath = "xpath=.//a"
links = find_elements_for_element(self.element, child_links_xpath)
return links
@property
def is_download_complete(self):
return self._is_download_complete
@property
def downloaded_files(self):
return self._downloaded_files
def _callback(self, files):
self._is_download_complete = True
if self._downloaded_files:
if files:
self._downloaded_files += files
else:
self._downloaded_files = files
|
[
"os.path.isdir",
"os.path.isfile"
] |
[((557, 581), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (571, 581), False, 'import os\n'), ((1125, 1149), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (1138, 1149), False, 'import os\n')]
|
from django.db import models
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
class Channel(models.Model):
name = models.CharField(max_length=50)
slug = models.CharField(max_length=50)
BID_TYPES_CHOICES = (
("CPC", "CPC"),
("CPM", "CPM"),
("CPA", "CPA"),
("CPV", "CPV"),
("CPI", "CPI"),
)
bid_types = ArrayField(
models.CharField(choices=BID_TYPES_CHOICES, max_length=3, default="CPM"),
)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.bid_types = list(map(str.upper, self.bid_types))
super(Channel, self).save(*args, **kwargs)
class Campaign(models.Model):
name = models.CharField(max_length=50)
channel = models.ForeignKey(Channel, on_delete=models.CASCADE)
bid = models.FloatField(blank=True)
bid_type = models.CharField(max_length=3)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.bid_type = self.bid_type.upper()
if self.bid_type not in self.channel.bid_types:
raise ValidationError("Please, specify one of the bid types that belongs to the selected channel: {}"
.format(self.channel.bid_types))
super(Campaign, self).save(*args, **kwargs)
|
[
"django.db.models.CharField",
"django.db.models.FloatField",
"django.db.models.ForeignKey"
] |
[((177, 208), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (193, 208), False, 'from django.db import models\n'), ((220, 251), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (236, 251), False, 'from django.db import models\n'), ((815, 846), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (831, 846), False, 'from django.db import models\n'), ((861, 913), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Channel'], {'on_delete': 'models.CASCADE'}), '(Channel, on_delete=models.CASCADE)\n', (878, 913), False, 'from django.db import models\n'), ((924, 953), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)'}), '(blank=True)\n', (941, 953), False, 'from django.db import models\n'), ((969, 999), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (985, 999), False, 'from django.db import models\n'), ((441, 513), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'BID_TYPES_CHOICES', 'max_length': '(3)', 'default': '"""CPM"""'}), "(choices=BID_TYPES_CHOICES, max_length=3, default='CPM')\n", (457, 513), False, 'from django.db import models\n')]
|
from fhir.resources.codeableconcept import CodeableConcept
from fhir.resources.coding import Coding
from fhir.resources.identifier import Identifier
from fhir.resources.medication import Medication, MedicationIngredient
from fhir.resources.meta import Meta
from fhir.resources.quantity import Quantity
from fhir.resources.ratio import Ratio
from structlog import get_logger
from ahd2fhir.utils.fhir_utils import sha256_of_identifier
log = get_logger()
MEDICATION_PROFILE = (
"https://www.medizininformatik-initiative.de/"
+ "fhir/core/modul-medikation/StructureDefinition/Medication"
)
def get_medication_from_annotation(annotation):
medication = Medication.construct()
drug = annotation["drugs"][0]
if drug.get("ingredient") is None:
return None
# Medication Meta
medication.meta = Meta.construct()
medication.meta.profile = [MEDICATION_PROFILE]
# Medication Code
codes = []
if "Abdamed-Averbis" in str(drug["ingredient"]["source"]):
system = "http://fhir.de/CodeSystem/dimdi/atc"
codes = str(drug["ingredient"]["conceptId"]).split("-")
elif "RxNorm" in str(drug["ingredient"]["source"]):
system = "http://www.nlm.nih.gov/research/umls/rxnorm"
codes.append(str(drug["ingredient"]["conceptId"]))
else:
system = ""
med_code = CodeableConcept.construct()
med_code.coding = []
for code in codes:
med_coding = Coding.construct()
med_coding.system = system
med_coding.display = drug["ingredient"]["dictCanon"]
med_coding.code = code
med_code.coding.append(med_coding)
medication.code = med_code
# Medication Ingredient
ingredient = MedicationIngredient.construct()
medication.ingredient = [ingredient]
ingredient.itemCodeableConcept = CodeableConcept.construct()
ingredient.itemCodeableConcept.coding = [Coding()]
ingredient.itemCodeableConcept.coding[0].display = drug["ingredient"]["dictCanon"]
ingredient.itemCodeableConcept.coding[0].system = system
medication_identifier_system = (
"https://fhir.miracum.org/nlp/identifiers/"
+ f"{annotation['type'].replace('.', '-').lower()}"
)
medication.identifier = [Identifier()]
medication.identifier[0].value = drug["ingredient"]["dictCanon"]
medication.identifier[0].system = medication_identifier_system
medication.id = sha256_of_identifier(medication.identifier[0])
if (
"strength" not in drug
or drug["strength"] is None
or "value" not in drug["strength"]
or "unit" not in drug["strength"]
or drug["strength"]["value"] is None
or drug["strength"]["unit"] is None
):
return medication
strength = Ratio.construct()
numerator = Quantity.construct()
numerator.value = drug["strength"]["value"]
numerator.unit = drug["strength"]["unit"]
strength.numerator = numerator
medication.identifier[0].value = (
drug["ingredient"]["dictCanon"]
+ "_"
+ str(drug["strength"]["value"])
+ drug["strength"]["unit"]
)
medication.id = sha256_of_identifier(medication.identifier[0])
if "doseForm" not in annotation or annotation["doseForm"] is None:
return medication
denominator = Quantity.construct()
denominator.value = 1
denominator.unit = annotation["doseForm"]["dictCanon"]
strength.denominator = denominator
ingredient.strength = strength
medication.identifier[0].value = (
drug["ingredient"]["dictCanon"]
+ "_"
+ str(drug["strength"]["value"])
+ drug["strength"]["unit"]
+ "_"
+ annotation["doseForm"]["dictCanon"]
)
medication.id = sha256_of_identifier(medication.identifier[0])
return medication
|
[
"structlog.get_logger",
"ahd2fhir.utils.fhir_utils.sha256_of_identifier",
"fhir.resources.medication.Medication.construct",
"fhir.resources.coding.Coding.construct",
"fhir.resources.coding.Coding",
"fhir.resources.quantity.Quantity.construct",
"fhir.resources.medication.MedicationIngredient.construct",
"fhir.resources.meta.Meta.construct",
"fhir.resources.identifier.Identifier",
"fhir.resources.ratio.Ratio.construct",
"fhir.resources.codeableconcept.CodeableConcept.construct"
] |
[((441, 453), 'structlog.get_logger', 'get_logger', ([], {}), '()\n', (451, 453), False, 'from structlog import get_logger\n'), ((664, 686), 'fhir.resources.medication.Medication.construct', 'Medication.construct', ([], {}), '()\n', (684, 686), False, 'from fhir.resources.medication import Medication, MedicationIngredient\n'), ((826, 842), 'fhir.resources.meta.Meta.construct', 'Meta.construct', ([], {}), '()\n', (840, 842), False, 'from fhir.resources.meta import Meta\n'), ((1338, 1365), 'fhir.resources.codeableconcept.CodeableConcept.construct', 'CodeableConcept.construct', ([], {}), '()\n', (1363, 1365), False, 'from fhir.resources.codeableconcept import CodeableConcept\n'), ((1702, 1734), 'fhir.resources.medication.MedicationIngredient.construct', 'MedicationIngredient.construct', ([], {}), '()\n', (1732, 1734), False, 'from fhir.resources.medication import Medication, MedicationIngredient\n'), ((1814, 1841), 'fhir.resources.codeableconcept.CodeableConcept.construct', 'CodeableConcept.construct', ([], {}), '()\n', (1839, 1841), False, 'from fhir.resources.codeableconcept import CodeableConcept\n'), ((2401, 2447), 'ahd2fhir.utils.fhir_utils.sha256_of_identifier', 'sha256_of_identifier', (['medication.identifier[0]'], {}), '(medication.identifier[0])\n', (2421, 2447), False, 'from ahd2fhir.utils.fhir_utils import sha256_of_identifier\n'), ((2748, 2765), 'fhir.resources.ratio.Ratio.construct', 'Ratio.construct', ([], {}), '()\n', (2763, 2765), False, 'from fhir.resources.ratio import Ratio\n'), ((2783, 2803), 'fhir.resources.quantity.Quantity.construct', 'Quantity.construct', ([], {}), '()\n', (2801, 2803), False, 'from fhir.resources.quantity import Quantity\n'), ((3129, 3175), 'ahd2fhir.utils.fhir_utils.sha256_of_identifier', 'sha256_of_identifier', (['medication.identifier[0]'], {}), '(medication.identifier[0])\n', (3149, 3175), False, 'from ahd2fhir.utils.fhir_utils import sha256_of_identifier\n'), ((3293, 3313), 'fhir.resources.quantity.Quantity.construct', 'Quantity.construct', ([], {}), '()\n', (3311, 3313), False, 'from fhir.resources.quantity import Quantity\n'), ((3731, 3777), 'ahd2fhir.utils.fhir_utils.sha256_of_identifier', 'sha256_of_identifier', (['medication.identifier[0]'], {}), '(medication.identifier[0])\n', (3751, 3777), False, 'from ahd2fhir.utils.fhir_utils import sha256_of_identifier\n'), ((1435, 1453), 'fhir.resources.coding.Coding.construct', 'Coding.construct', ([], {}), '()\n', (1451, 1453), False, 'from fhir.resources.coding import Coding\n'), ((1887, 1895), 'fhir.resources.coding.Coding', 'Coding', ([], {}), '()\n', (1893, 1895), False, 'from fhir.resources.coding import Coding\n'), ((2230, 2242), 'fhir.resources.identifier.Identifier', 'Identifier', ([], {}), '()\n', (2240, 2242), False, 'from fhir.resources.identifier import Identifier\n')]
|
"""
Plot the training progress data collected by the Monitor.
"""
import csv
import matplotlib.pyplot as plt
from microtbs_rl.utils.common_utils import *
from microtbs_rl.utils.monitor import Monitor
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue', 'yellow']
logger = logging.getLogger(os.path.basename(__file__))
def main():
init_logger(os.path.basename(__file__))
_ = [
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'dqn_v0'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'dqn_v3_inception'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v0'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v1'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v2_inception'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v4_10steps_097'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'openai_dqn'),
]
experiments = [
get_experiment_name('MicroTbs-CollectWithTerrain-v1', 'a2c_v0'),
get_experiment_name('MicroTbs-CollectWithTerrain-v1', 'a2c_v1'),
get_experiment_name('MicroTbs-CollectWithTerrain-v2', 'a2c_v5'),
]
_ = [
get_experiment_name('MicroTbs-CollectPartiallyObservable-v1', 'a2c_v2'),
get_experiment_name('MicroTbs-CollectPartiallyObservable-v1', 'a2c_v3'),
get_experiment_name('MicroTbs-CollectPartiallyObservable-v3', 'a2c_v5'),
]
for i in range(len(experiments)):
experiment = experiments[i]
x, y = [], []
stats_filename = Monitor.stats_filename(experiment)
with open(stats_filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
x.append(int(row[0]))
y.append(float(row[1]))
skip_coeff = max(1, len(x) // 200)
x_filtered, y_filtered = [], []
for j in range(len(x)):
if j % skip_coeff == 0:
x_filtered.append(x[j])
y_filtered.append(y[j])
x = x_filtered
y = y_filtered
logger.info('Plotting %s...', experiment)
plt.plot(x, y, color=COLORS[i], label=experiment)
plt.title('Reward over time')
plt.xlabel('Training step (batch #)')
plt.ylabel('Mean reward')
plt.legend()
plt.tight_layout()
plt.show()
if __name__ == '__main__':
sys.exit(main())
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"csv.reader",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"microtbs_rl.utils.monitor.Monitor.stats_filename",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] |
[((2407, 2436), 'matplotlib.pyplot.title', 'plt.title', (['"""Reward over time"""'], {}), "('Reward over time')\n", (2416, 2436), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2478), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training step (batch #)"""'], {}), "('Training step (batch #)')\n", (2451, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2508), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean reward"""'], {}), "('Mean reward')\n", (2493, 2508), True, 'import matplotlib.pyplot as plt\n'), ((2513, 2525), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2523, 2525), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2548), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2546, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2561, 2563), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1816), 'microtbs_rl.utils.monitor.Monitor.stats_filename', 'Monitor.stats_filename', (['experiment'], {}), '(experiment)\n', (1804, 1816), False, 'from microtbs_rl.utils.monitor import Monitor\n'), ((2352, 2401), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'COLORS[i]', 'label': 'experiment'}), '(x, y, color=COLORS[i], label=experiment)\n', (2360, 2401), True, 'import matplotlib.pyplot as plt\n'), ((1884, 1903), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1894, 1903), False, 'import csv\n')]
|
import airflow
from airflow import models,settings
from airflow.contrib.auth.backends.password_auth import PasswordUser
user = PasswordUser(models.User())
user.username = 'admin'
user.email = '<EMAIL>'
user.password = '<PASSWORD>'
#user.superuser = '1'
session = settings.Session()
session.add(user)
session.commit()
session.close()
exit()
|
[
"airflow.models.User",
"airflow.settings.Session"
] |
[((263, 281), 'airflow.settings.Session', 'settings.Session', ([], {}), '()\n', (279, 281), False, 'from airflow import models, settings\n'), ((140, 153), 'airflow.models.User', 'models.User', ([], {}), '()\n', (151, 153), False, 'from airflow import models, settings\n')]
|
import requests
from apps.core.models import UserProfile
from django.conf import settings
from social.exceptions import AuthFailed
USER_INFO_LI_REQUEST_URL = ('https://api.linkedin.com/v1/people/~:('
'id,'
'firstName,'
'lastName,'
'emailAddress,'
'pictureUrl,'
'publicProfileUrl)'
'?oauth2_access_token={}'
'&format=json')
def update_or_create_user_profile(backend, user, response, *args, **kwargs):
li_access_token = response.get('access_token')
li_resp = requests.get(USER_INFO_LI_REQUEST_URL.format(li_access_token))
li_resp_json = li_resp.json()
li_email = li_resp_json.get('emailAddress')
if li_email not in settings.VALID_EMAILS:
raise AuthFailed(backend, 'This is not a whitelisted email')
user_profile, created = UserProfile.objects.get_or_create(user=user)
user_profile.li_email = li_email
user_profile.li_first_name = li_resp_json.get('firstName')
user_profile.li_last_name = li_resp_json.get('lastName')
user_profile.li_picture_url = li_resp_json.get('pictureUrl')
user_profile.li_profile_url = li_resp_json.get('publicProfileUrl')
user_profile.save()
|
[
"apps.core.models.UserProfile.objects.get_or_create",
"social.exceptions.AuthFailed"
] |
[((984, 1028), 'apps.core.models.UserProfile.objects.get_or_create', 'UserProfile.objects.get_or_create', ([], {'user': 'user'}), '(user=user)\n', (1017, 1028), False, 'from apps.core.models import UserProfile\n'), ((900, 954), 'social.exceptions.AuthFailed', 'AuthFailed', (['backend', '"""This is not a whitelisted email"""'], {}), "(backend, 'This is not a whitelisted email')\n", (910, 954), False, 'from social.exceptions import AuthFailed\n')]
|