gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "tohu/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.conf import settings
from django.test import override_settings
from ci import models
import tempfile
import shutil
import os
import json
import subprocess
def base_git_config(authorized_users=[],
post_job_status=False,
post_event_summary=False,
failed_but_allowed_label_name=None,
recipe_label_activation={},
recipe_label_activation_additive={},
remote_update=False,
install_webhook=False,
host_type=None,
icon_class="",
remove_pr_label_prefix=["PR: [TODO]"],
pr_wip_prefix=["WIP:", "[WIP]"],
hostname="dummy_git_server",
repo_settings=None,
):
return {"api_url": "https://<api_url>",
"html_url": "https://<html_url>",
"hostname": hostname,
"authorized_users": authorized_users,
"post_job_status": post_job_status,
"post_event_summary": post_event_summary,
"failed_but_allowed_label_name": failed_but_allowed_label_name,
"recipe_label_activation": recipe_label_activation,
"recipe_label_activation_additive": recipe_label_activation_additive,
"remove_pr_label_prefix": ["PR: [TODO]",],
"remote_update": remote_update,
"install_webhook": install_webhook,
"type": host_type,
"icon_class": icon_class,
"pr_wip_prefix": pr_wip_prefix,
"civet_base_url": "https://dummy_civet_server",
"repository_settings": repo_settings,
}
def github_config(**kwargs):
return base_git_config(host_type=settings.GITSERVER_GITHUB, icon_class="dummy github class", **kwargs)
def gitlab_config(**kwargs):
return base_git_config(host_type=settings.GITSERVER_GITLAB, icon_class="dummy gitlab class", **kwargs)
def bitbucket_config(**kwargs):
config = base_git_config(host_type=settings.GITSERVER_BITBUCKET, icon_class="dummy bitbucket class", **kwargs)
config["api1_url"] = config["api_url"]
config["api2_url"] = config["api_url"]
return config
def create_git_server(name='dummy_git_server', host_type=settings.GITSERVER_GITHUB):
server, created = models.GitServer.objects.get_or_create(host_type=host_type, name=name)
return server
def default_labels():
return {"DOCUMENTATION": "^docs/",
"TUTORIAL": "^tutorials/",
"EXAMPLES": "^examples/",
}
def simulate_login(session, user):
"""
Helper function to simulate signing in to github
"""
tmp_session = session # copying to a variable is required
user.server.auth().set_browser_session_from_user(tmp_session, user)
tmp_session.save()
def create_user(name='testUser', server=None):
if not server:
server = create_git_server()
return models.GitUser.objects.get_or_create(name=name, server=server)[0]
def create_user_with_token(name='testUser', server=None):
user = create_user(name, server=server)
# the token isn't the build key but just use it for the random number
user.token = json.dumps({'access_token': models.generate_build_key(), 'token_type': 'bearer', 'scope': ["scope"]})
user.save()
return user
def get_owner():
return create_user(name='testmb')
def create_repo(name='testRepo', user=None, server=None):
if not user:
user = create_user_with_token(server=server)
return models.Repository.objects.get_or_create(name=name, user=user)[0]
def create_branch(name='testBranch', user=None, repo=None):
if not repo:
repo = create_repo(user=user)
return models.Branch.objects.get_or_create(name=name, repository=repo)[0]
def create_commit(branch=None, user=None, sha='1234'):
if not branch:
branch = create_branch(user=user)
return models.Commit.objects.get_or_create(branch=branch, sha=sha)[0]
def get_test_user(server=None):
user = create_user_with_token(name='testmb01', server=server)
repo = create_repo(name='repo01', user=user)
branch = create_branch(name='branch01', repo=repo)
create_commit(branch=branch, sha='sha01')
return user
def create_event(user=None, commit1='1234', commit2='2345', branch1=None, branch2=None, cause=models.Event.PULL_REQUEST):
if not user:
user = create_user_with_token()
c1 = create_commit(user=user, branch=branch1, sha=commit1)
c2 = create_commit(user=user, branch=branch2, sha=commit2)
return models.Event.objects.get_or_create(head=c1, base=c2, cause=cause, build_user=user)[0]
def create_pr(title='testTitle', number=1, url='http', repo=None, server=None):
if not repo:
repo = create_repo(server=server)
return models.PullRequest.objects.get_or_create(repository=repo, number=number, title=title, url=url)[0]
def create_build_config(name='testBuildConfig'):
return models.BuildConfig.objects.get_or_create(name=name)[0]
def create_recipe(name='testRecipe', user=None, repo=None, cause=models.Recipe.CAUSE_PULL_REQUEST, branch=None, current=True):
if not user:
user = create_user_with_token()
if not repo:
repo = create_repo(user=user)
recipe, created = models.Recipe.objects.get_or_create(
name=name,
display_name=name,
build_user=user,
repository=repo,
private=True,
active=True,
cause=cause,
filename=name,
)
recipe.build_configs.add(create_build_config())
recipe.branch = branch
recipe.current = current
recipe.save()
return recipe
def create_step(name='testStep', filename='default.sh', recipe=None, position=0):
if not recipe:
recipe = create_recipe()
return models.Step.objects.get_or_create(recipe=recipe, name=name, position=position, filename=filename)[0]
def create_recipe_environment(name='testEnv', value='testValue', recipe=None):
if not recipe:
recipe = create_recipe()
return models.RecipeEnvironment.objects.get_or_create(name=name, value=value, recipe=recipe)[0]
def create_recipe_dependency(recipe=None, depends_on=None):
if not recipe:
recipe = create_recipe(name="recipe1")
if not depends_on:
depends_on = create_recipe(name="recipe2")
recipe.depends_on.add(depends_on)
return recipe, depends_on
def create_step_environment(name='testEnv', value='testValue', step=None):
if not step:
step = create_step()
return models.StepEnvironment.objects.get_or_create(step=step, name=name, value=value)[0]
def create_job(recipe=None, event=None, config=None, user=None):
if not recipe:
recipe = create_recipe(user=user)
if not event:
event = create_event(user=user)
if not config:
config = recipe.build_configs.first()
return models.Job.objects.get_or_create(config=config, recipe=recipe, event=event)[0]
def update_job(job, status=None, complete=None, ready=None, active=None, invalidated=None, client=None, created=None):
if status is not None:
job.status = status
if complete is not None:
job.complete = complete
if ready is not None:
job.ready = ready
if active is not None:
job.active = active
if invalidated is not None:
job.invalidated = invalidated
if client is not None:
job.client = client
if created is not None:
job.created = created
job.save()
def create_prestepsource(filename="default.sh", recipe=None):
if not recipe:
recipe = create_recipe()
return models.PreStepSource.objects.get_or_create(recipe=recipe, filename=filename)[0]
def create_client(name='testClient', ip='127.0.0.1'):
obj, created = models.Client.objects.get_or_create(name=name, ip=ip)
return obj
def create_step_result(status=models.JobStatus.NOT_STARTED, step=None, job=None, name="step result", position=0):
if not job:
job = create_job()
if not step:
step = create_step(recipe=job.recipe, name=name, position=position)
result, created = models.StepResult.objects.get_or_create(job=job, name=step.name, position=step.position, abort_on_failure=step.abort_on_failure, filename=step.filename)
result.status = status
result.save()
return result
def create_osversion(name="Linux", version="1", other="other"):
obj, created = models.OSVersion.objects.get_or_create(name=name, version=version, other=other)
return obj
def create_loadedmodule(name="module"):
obj, created = models.LoadedModule.objects.get_or_create(name=name)
return obj
def create_badge(name="badge", repo=None):
if not repo:
repo = create_repo()
return models.RepositoryBadge.objects.get_or_create(name=name, repository=repo)[0]
def _add_git_file(dirname, name):
p = os.path.join(dirname, name)
with open(p, 'w') as f:
f.write(name)
subprocess.check_output(["git", "add", p], cwd=dirname)
def create_recipe_scripts_dir():
scripts_dir = tempfile.mkdtemp()
subprocess.check_output(["git", "init"], cwd=scripts_dir)
_add_git_file(scripts_dir, '1.sh')
_add_git_file(scripts_dir, '2.sh')
subprocess.check_output(["git", "commit", "-m", "'Initial data'"], cwd=scripts_dir)
return scripts_dir
def create_recipe_dir():
recipe_dir = tempfile.mkdtemp()
create_recipes(recipe_dir)
return recipe_dir
class RecipeDir(object):
def __init__(self):
self.name = tempfile.mkdtemp()
settings.RECIPE_BASE_DIR = self.name
create_recipes(self.name)
def __repr__(self):
return self.name
@override_settings(RECIPE_BASE_DIR="")
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
shutil.rmtree(self.name)
def create_recipes(recipe_dir):
subprocess.check_output(["git", "init"], cwd=recipe_dir)
scripts_dir = os.path.join(recipe_dir, "scripts")
os.mkdir(scripts_dir)
os.mkdir(os.path.join(recipe_dir, "recipes"))
_add_git_file(scripts_dir, '1.sh')
_add_git_file(scripts_dir, '2.sh')
_add_git_file(recipe_dir, 'README.md')
subprocess.check_output(["git", "commit", "-m", "'Initial data'"], cwd=recipe_dir)
return recipe_dir
class RequestInResponse(object):
def __init__(self):
self.url = "someurl"
self.method = "HTTP METHOD"
class Response(object):
def __init__(self, json_data=None, content=None, use_links=False, status_code=200, do_raise=False):
self.status_code = status_code
self.do_raise = do_raise
self.reason = "some reason"
if use_links:
self.links = {'next': {'url': 'next_url'}}
else:
self.links = []
self.json_data = json_data
self.content = content
self.request = RequestInResponse()
def json(self):
return self.json_data
def raise_for_status(self):
if self.do_raise or self.status_code >= 400:
raise Exception("Bad status!")
def create_test_jobs():
"""
Create 4 jobs.
j0 -> j1, j2 -> j3
"""
r0 = create_recipe(name="r0")
r1 = create_recipe(name="r1", user=r0.build_user, repo=r0.repository)
r2 = create_recipe(name="r2", user=r0.build_user, repo=r0.repository)
r3 = create_recipe(name="r3", user=r0.build_user, repo=r0.repository)
r1.depends_on.add(r0)
r2.depends_on.add(r0)
r3.depends_on.add(r1)
r3.depends_on.add(r2)
ev = create_event(user=r0.build_user)
job0 = create_job(recipe=r0, event=ev)
job1 = create_job(recipe=r1, event=ev)
job2 = create_job(recipe=r2, event=ev)
job3 = create_job(recipe=r3, event=ev)
create_step_result(job=job0)
create_step_result(job=job1)
create_step_result(job=job2)
create_step_result(job=job3)
return (job0, job1, job2, job3)
| |
'''
This is the final wavelength calibration code that implements the
grating equation to calculate the wavelength dispersion.
The input should just be the name of a comparison lamp, taken from argv.
The function depend on some hard coded data, "WaveList" and "Parameters" which
are project spesific.
Written by Jesus Meza, UNC. March 2016. Upgrades by Josh Fuchs.
Use:
>>> python WaveCal.py lamp_spec.ms.fits
:INPUTS:
lamp_spec.ms.fits: string, 1D Fe lamp spectrum
:OPTIONAL:
ZZCeti_spectrum.ms.fits: string, parameters written to header of this image if supplied when prompted
:OUTPUTS:
wtFe*fits: lamp spectrum with wavelength calibration parameters written to header
w*fits: ZZ Ceti spectrum with wavelength calibration parameters written to header
wavecal_ZZCETINAME_DATE.txt: saved parameters for diagnostics. ZZCETINAME is name of the ZZ Ceti spectrum supplied. DATE is the current date and time. Columns are: fitted wavelengths, residuals, wavelengths, flux, lambdas fit, wavelengths, sky flux, fit to line for recentering
To do:
- Add method to reject lines after refitting
'''
# ==========================================================================
# Imports # ================================================================
# ==========================================================================
import ReduceSpec_tools as rt
import numpy as np
#import pyfits as fits
import astropy.io.fits as fits
import scipy.signal as sg
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit, fsolve
import mpfit
import datetime
import os
# ==========================================================================
# Data # ===================================================================
# ==========================================================================
# Grating Eq Parameters
# Parameters= [fr, fd, fl, zPnt]
# fr= fringe density of grating
# fa= grat. ang fudge
# fd= camera angle correction factor
# fl = focal lenght
# zPnt= Zero point pixel offset
# As close to the red set up as I currently have (930_20_40)
Param_930_20_40= [92.668, 0.973, 377190., 1859.]
# As close to the blue set up as I currently have (930_12_24)
Param_930_12_24= [92.517, 0.962, 377190., 1836.]
# ==========================================================================
# Pixel-Wavelenght List #
# WaveList== array (2, number of lines)
# WaveList[0]== pixels
# WaveList[1]== Wavelenghts
# As close to the red setup (20_35.2) as I currently have #
'''
These are the original lists. Below we have selected the lines that can consistently be fit by a Gaussian. These are kept for archival purposes.
WaveList_Fe_930_20_40= np.array([ [1155.1, 102.964, 142.88, 276.362, 438.35, 532.819,
631.475, 755.5, 798.185, 831.719, 1062.43, 1086.89,
1249.02, 1316.94, 1475.64, 1566.37, 1762.42,
1910, 2053.55, 2072.75, 2168.64, 2179.14,
2271.52, 2318.23, 2347.65, 2370.04, 2417.91,
2645.82, 2672.71, 3385.8, 3562.11, 3620.55,
3765.62, 3913.77, 3935.87, 4016.2, 4034.6],
[6043.223, 6466.5526, 6483.0825, 6538.112, 6604.8534,
6643.6976, 5875.618, 6684.2929, 6752.8335, 6766.6117,
6861.2688, 6871.2891, 6937.6642, 6965.4307,
7030.2514, 7067.2181, 7147.0416, 7206.9804,
7265.1724, 7272.9359, 7311.7159, 7316.005,
7353.293, 7372.1184, 7383.9805, 7392.9801,
7412.3368, 7503.8691, 7514.6518, 7798.5604,
7868.1946, 7891.075, 7948.1964, 8006.1567,
8014.7857, 8046.1169, 8053.3085] ])
# As close to the blue setup (12_24) as I currently have #
WaveList_Fe_930_12_24= np.array([ [34.6852, 431.795, 451.264, 942.966, 1057.76,
1174.6, 1194.97, 1315.35, 1381.1, 1444.58,
1457.81, 1538.65, 1544.11, 1630.61, 1682.99,
1713.34, 1726.03, 1779.61, 1886.38, 1893.19,
1959.28, 1968.19, 1980.81, 2018.27, 2078.23,
2088.47, 2132.53, 2194.03, 2210.85, 2279.64,
2361.34, 2443.06, 2468.22, 2515.14, 2630.53,
2795.45, 2807.86, 2817.11, 2886.45, 2985.15,
3085.52, 3162.56, 3184.41, 3367.86, 3493.34,
3602.21, 3795.76, 3845.65, 3907.57],
[3561.0304, 3729.3087, 3737.1313, 3946.0971,
3994.7918, 4044.4179, 4052.9208, 4103.9121,
4131.7235, 4158.5905, 4164.1795, 4198.3036,
4200.6745, 4237.2198, 4259.3619, 4271.7593,
4277.5282, 4300.1008, 4345.168, 4348.064,
4375.9294, 4379.6668, 4385.0566, 4400.9863,
4426.0011, 4430.189, 4448.8792, 4474.7594,
4481.8107, 4510.7332, 4545.0519, 4579.3495,
4589.8978, 4609.5673, 4657.9012, 4726.8683,
4732.0532, 4735.9058, 4764.8646, 4806.0205,
4847.8095, 4879.8635, 4889.0422, 4965.0795,
5017.1628, 5062.0371, 5141.7827, 5162.2846,
5187.7462] ])
'''
WaveList_Fe_930_12_24= np.array([ [431.795, 1057.76, 1194.97, 1315.35,
1381.1, 1444.58, 1630.61,
1682.99, 1726.03, 1779.61,
1893.19, 2132.53, 2210.85,
2279.64, 2361.34, 2443.06, 2468.22,
2515.14, 2630.53, 2795.45, 2886.45,
2985.15, 3085.52, 3162.56,
3367.86, 3795.76, 3845.65,
3907.57],
[3729.3087, 3994.7918, 4052.9208, 4103.9121,
4131.7235, 4158.5905, 4237.2198,
4259.3619, 4277.5282, 4300.1008,
4348.064, 4448.8792, 4481.8107,
4510.7332, 4545.0519, 4579.3495, 4589.8978,
4609.5673, 4657.9012, 4726.8683, 4764.8646,
4806.0205, 4847.8095, 4879.8635,
4965.0795, 5141.7827, 5162.2846,
5187.7462] ])
WaveList_Fe_930_20_40= np.array([ [1155.1, 102.964, 142.88, 276.362, 438.35,
532.819, 631.475, 798.185, 831.719,
1062.43, 1086.89, 1249.02, 1316.94,
1475.64, 1762.42, 1910.0],
[6043.223, 6466.5526, 6483.0825, 6538.112, 6604.8534,
6643.6976, 5875.618, 6752.8335, 6766.6117,
6861.2688, 6871.2891, 6937.6642, 6965.4307,
7030.2514, 7147.0416, 7206.9804] ])
# ==========================================================================
# Functions # ==============================================================
# ==========================================================================
def DispCalc(Pixels, alpha, theta, fr, fd, fl, zPnt):
# This is the Grating Equation used to calculate the wavelenght of a pixel
# based on the fitted parameters and angle set up.
# Inputs:
# Pixels= Vector of Pixel Numbers
# alpha= of Grating Angle
# aheta= Camera Angle
# fr= fringe density of grating
# fd= Camera Angle Correction Factor
# zPnt= Zero point pixel
Wavelengths= [] # Vector to store calculated wavelengths
for pix in Pixels:
beta = np.arctan( (pix-zPnt)*15./fl ) + (fd*theta*np.pi/180.) - (alpha*np.pi/180.)
wave = (10**6.)*( np.sin(beta) + np.sin(alpha*np.pi/180.) )/fr
Wavelengths.append(wave)
return Wavelengths
# ===========================================================================
def PixCalc(Wavelenghts, alpha, theta, fr, fd, fl, zPnt):
# This is the Grating Equation used to calculate the pixel number of a for
# a certain wavelength based on the fitted parameters and angle set up.
# Inputs:
# Wavelenght= Vector of Wavelengths in Angstrums
# alpha= Grating Angle
# aheta= Camera Angle
# fr= fringe density of grating
# fd= Camera Angle Correction Factor
# zPnt= Zero point pixel
Pixels= [] # Vector to store calculated wavelengths
for wave in Wavelenghts:
beta = np.arcsin( ((wave*fr/1000000.0)) - np.sin(alpha*np.pi/180.) )
pix = np.tan((beta + (alpha*np.pi/180.)) - (fd*theta*np.pi/180.)) * (fl/15.) + zPnt
Pixels.append(pix)
return Pixels
# ===========================================================================
def Gauss(x,a,c,w,b):
# Define a Gousian Function #
# x= some value
# a= amplitude, c= center, w= RMS width.
# Output= y: gausian evaluated at x
y= a*np.exp( (-(x-c)**2.)/(2.*w**2.) ) + b
return y
# ===========================================================================
def CrossCorr(lamp_data):
# This sunction takes a lamp spectra and cross corelates the data with
# Gaussian of RMS width= 1, amplitude= 1, peaked at each pixel.
# It reteturns a list of peak values.
nx= np.size(lamp_data); # Number of pixels
X= np.arange(nx) # Vector of pixel values
Corr= np.zeros(nx) # Vector to store cross corrolated result
# Calculate Cross Correlation
print ("\nCross Correlateing")
for i in range(0,nx):
G= [Gauss(X[n],1.,i,3.,0.) for n in range(0,nx)]
Corr= Corr+ G*lamp_data;
return Corr
# ===========================================================================
def PeakFind(data):
print "\nFinding Peaks"
widths= np.arange(1.,5.,.1)
maybe= sg.find_peaks_cwt(data, widths)
n= np.size(maybe)
prob= []
for i in range(0,n):
if (maybe[i]-maybe[i-1])==1:
prob.append( np.max([maybe[i],maybe[i-1]]) )
else:
prob.append(maybe[i])
peaks_x= []
peaks_y= []
std= np.std(data)
for p in prob:
if data[p]>2.0*std:
peaks_x.append(p)
peaks_y.append(data[p])
return peaks_x, peaks_y
# ===========================================================================
def fit_Gauss(X,Y):
a0= np.max(Y)/2.0
c0= X[ np.argmax(Y) ]
w0= 3.0*0.42
b0= np.mean(Y)
par, cov = curve_fit(Gauss, X, Y, p0 = [a0, c0 , w0, b0], maxfev= 1000)
return par
# ===========================================================================
def find_peak_centers(peak_w, Wavelen, Counts):
list_centers= []
for w in peak_w:
i= Wavelen.index(w) # index of peak_w with wavelengths list
fit_data_w= Wavelen[i-9:i+9]
fit_data_c= Counts[i-9:i+9]
amp, cent, width, b= fit_Gauss(fit_data_w, fit_data_c)
list_centers.append(cent)
## Plot the gaussian fit
X= np.arange(fit_data_w[0],fit_data_w[-1], (fit_data_w[-1]-fit_data_w[0])/50.0 )
Y= [Gauss(x, amp, cent, width, b) for x in X]
#plt.plot(fit_data_w, fit_data_c)
#plt.hold('on')
#plt.plot(X, Y, 'r--')
#plt.axvline(cent)
#plt.hold('off')
#print cent
#plt.show()
return list_centers
# ===========================================================================
def onclick(event):
global ix,iy
ix, iy = event.xdata,event.ydata
global coords
global ax
ax.axvline(x=ix,color='k',linewidth='2')
fig.canvas.draw()
coords.append((ix,iy))
# ===========================================================================
def find_near(p, in_data):
# find the index of the point in_data nearest point p.
near= min(in_data, key=lambda x:abs(x-p))
return near
# ===========================================================================
def fit_Grating_Eq(known_pix, known_wave, alpha, theta, Param,plotalot=False):
# Model # =============================================
def Beta_Calc(w,a, FR, TF):
beta = np.arcsin( (w*FR/1000000.0) - np.sin(a*np.pi/180.) )
return beta
def Predict_Pixel(X, FR, TF, ZPNT):
a, w, t = X
pPixel = np.tan((Beta_Calc(w,a, FR, TF) + (a*np.pi/180.)) - (TF*t*np.pi/180.)) * (fl/15.) + ZPNT
return pPixel
# Curve Fitting # =====================================
Alpha= np.ones(np.shape(known_wave))*alpha
Theta= np.ones(np.shape(known_wave))*theta
global fl
fr, fd, fl, zPnt= Param
p0= [fr, fd, zPnt]
xdata= [Alpha, known_wave, Theta]
Par, Covar = curve_fit(Predict_Pixel, xdata, known_pix, p0, maxfev= 10000)
# Print Results # =====================================
print '\nFitted Parameters:'
print '\nLine Density= %s \nCam. Fudge= %s \nZero Pt. = %s' % (Par[0], Par[1], Par[2])
print '\nConstants: \nFocal Length = %s' % (fl)
#print '\nCovarince Matrix: \n%s' % Covar
# Variance of Parameters # ===============================
# Calculated from Covariance matrix
Varience = []
for i in range (0,len(Par)):
P = np.zeros(len(Par))
P[i] = 1
Pt= np.transpose(P)
C = np.asarray(Covar)
SigSq = np.dot(P, np.dot(C,Pt) )
Varience.append( np.sqrt(SigSq) )
print "\nVariance of Parameters: \n%s" % Varience
# Plot Residuals # =====================================
X = zip(Alpha,known_wave,Theta)
# Xb = zip(Alpha,known_wave)
N = len(X)
pPixel = [Predict_Pixel(X[n], Par[0],Par[1],Par[2]) for n in range(0,N)]
Res = [known_pix[n]-pPixel[n] for n in range(0,N)]
# print '\nResiduals:\n %s' % Res
rmsfit = np.sqrt(np.mean([n**2. for n in Res]))
print '\nRMS = %s' % rmsfit
if plotalot:
plt.scatter(known_wave, Res, color='r', marker='+')
plt.grid()
plt.ylim( min(Res)*2., max(Res)*2.)
plt.title('Least Squares Fit Residuals')
plt.ylabel('Pixels')
plt.xlabel('Wavelength')
plt.show()
savearray[0:len(known_wave),0] = known_wave
savearray[0:len(Res),1] = Res
return Par, rmsfit
# ===========================================================================
def gaussmpfit(x,p): #single gaussian
return p[3] + p[0]*np.exp(-(((x-p[1])/(np.sqrt(2)*p[2])))**2.)
# ===========================================================================
def fitgauss(p,fjac=None,x=None,y=None,err=None):
#Parameter values are passed in p
#fjac = None just means partial derivatives will not be computed
model = gaussmpfit(x,p)
status = 0
return([status,(y-model)/err])
#===========================================
#Single pseudogaussian plus cubic for continuum
def pseudogausscubic(x,p):
#The model function with parameters p
return p[0]*1. + p[1]*x + p[2]*x**2. + p[7]*x**3. + p[3]*np.exp(-(np.abs(x-p[4])/(np.sqrt(2.)*p[5]))**p[6])
def fitpseudogausscubic(p,fjac=None,x=None, y=None, err=None):
#Parameter values are passed in p
#fjac=None just means partial derivatives will not be computed.
model = pseudogausscubic(x,p)
status = 0
return([status,(y-model)/err])
# ===========================================================================
def newzeropoint(x):
beta = np.arctan( (bestpixel-x)*15./parm[2] ) + (n_fd*theta*np.pi/180.) - (alpha*np.pi/180.)
out = newlambda - (10**6.)*( np.sin(beta) + np.sin(alpha*np.pi/180.) )/n_fr
return out
# ===========================================================================
def WaveShift(specname,zzceti,plotall):
#Calculates a new zero point for a spectrum based on a skyline
spec_data= fits.getdata(specname)
dataval = spec_data[0,0,:]
sigmaval = spec_data[3,0,:]
spec_header= fits.getheader(specname)
global alpha, theta
alpha= float( spec_header["GRT_TARG"] )
theta= float( spec_header["CAM_TARG"] )
trim_sec= spec_header["CCDSEC"]
trim_offset= float( trim_sec[1:len(trim_sec)-1].split(':')[0] )-1
try:
bining= float( spec_header["PARAM18"] )
except:
bining= float( spec_header["PG3_2"] )
nx= np.size(spec_data[0])
Pixels= bining*(np.arange(0,nx,1)+trim_offset)
WDwave = DispCalc(Pixels, alpha, theta, n_fr, n_fd, parm[2], n_zPnt)
#Select whether to fit a Balmer line or choose a different line
#selectline = raw_input('Is this a ZZ Ceti? (yes/no): ')
pix = range(len(dataval)) #This sets up an array of pixel numbers
if zzceti == 'yes':
if 'blue' in specname.lower():
#Recenter the observed data to match the models by fitting beta and gamma
bfitlow = 1300 #4680
bfithi = 1750 #5040
fitpixels = np.asarray(pix[bfitlow:bfithi+1])
fitsigmas = sigmaval[bfitlow:bfithi+1]
fitval = dataval[bfitlow:bfithi+1]
best = np.zeros(8)
xes = np.array([pix[bfitlow],pix[bfitlow+10],pix[bfitlow+20],pix[bfithi-10],pix[bfithi]])
yes = np.array([dataval[bfitlow],dataval[bfitlow+10],dataval[bfitlow+20],dataval[bfithi-10],dataval[bfithi]])
bp = np.polyfit(xes,yes,3)
bpp = np.poly1d(bp)
best[0] = bp[3]
best[1] = bp[2]
best[2] = bp[1]
best[7] = bp[0]
best[4] = pix[np.min(np.where(fitval == fitval.min()))] + bfitlow
best[3] = np.min(dataval[bfitlow:bfithi+1]) - bpp(best[4]) #depth of line relative to continuum
bhalfmax = bpp(best[4]) + best[3]/2.5
bdiff = np.abs(fitval-bhalfmax)
blowidx = bdiff[np.where(fitpixels < best[4])].argmin()
bhighidx = bdiff[np.where(fitpixels > best[4])].argmin() + len(bdiff[np.where(fitpixels < best[4])])
best[5] = (fitpixels[bhighidx] - fitpixels[blowidx]) / (2.*np.sqrt(2.*np.log(2.))) #convert FWHM to sigma
best[6] = 1.0 #how much of a pseudo-gaussian
bfa = {'x':fitpixels, 'y':fitval, 'err':fitsigmas}
bparams = mpfit.mpfit(fitpseudogausscubic,best,functkw=bfa,maxiter=3000,ftol=1e-16,xtol=1e-10,quiet=True)
line_center = bparams.params[4]
line_fit = pseudogausscubic(fitpixels,bparams.params)
known_wavelength = 4862.0
if 'red' in specname.lower():
#Recenter the observed data to match the models by fitting beta and gamma
rfitlow = 940 #6380
rfithi = 1400 #6760
fitpixels = np.asarray(pix[rfitlow:rfithi+1])
fitsigmas = sigmaval[rfitlow:rfithi+1]
fitval = dataval[rfitlow:rfithi+1]
rest = np.zeros(8)
xes = np.array([pix[rfitlow],pix[rfitlow+10],pix[rfitlow+20],pix[rfithi-10],pix[rfithi]])
yes = np.array([dataval[rfitlow],dataval[rfitlow+10],dataval[rfitlow+20],dataval[rfithi-10],dataval[rfithi]])
rp = np.polyfit(xes,yes,3)
rpp = np.poly1d(rp)
rest[0] = rp[3]
rest[1] = rp[2]
rest[2] = rp[1]
rest[7] = rp[0]
rest[4] = pix[np.min(np.where(fitval == fitval.min()))] + rfitlow
rest[3] = np.min(dataval[rfitlow:rfithi+1]) - rpp(rest[4]) #depth of line relative to continuum
rhalfmax = rpp(rest[4]) + rest[3]/3.
rdiff = np.abs(fitval-rhalfmax)
rlowidx = rdiff[np.where(fitpixels < rest[4])].argmin()
rhighidx = rdiff[np.where(fitpixels > rest[4])].argmin() + len(rdiff[np.where(fitpixels < rest[4])])
rest[5] = (fitpixels[rhighidx] - fitpixels[rlowidx]) / (2.*np.sqrt(2.*np.log(2.))) #convert FWHM to sigma
rest[6] = 1.0 #how much of a pseudo-gaussian
rfa = {'x':fitpixels, 'y':fitval, 'err':fitsigmas}
rparams = mpfit.mpfit(fitpseudogausscubic,rest,functkw=rfa,maxiter=3000,ftol=1e-16,xtol=1e-10,quiet=True)
line_center = rparams.params[4]
line_fit = pseudogausscubic(fitpixels,rparams.params)
known_wavelength = 6564.6
elif zzceti == 'no':
#Plot the spectrum and allow user to set fit width
global ax, fig, coords
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.plot(pix,dataval)
coords= []
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.xlabel('Pixels')
plt.title('Click on both sides of line you want to fit')
plt.show()
fitlow= find_near(coords[0][0], pix) # Nearest pixel to click cordinates
fithi= find_near(coords[1][0], pix) # Nearest pixel to click cordinates
fitpixels = np.asarray(pix[fitlow:fithi+1])
fitsigmas = sigmaval[fitlow:fithi+1]
fitval = dataval[fitlow:fithi+1]
guess = np.zeros(4)
guess[3] = np.mean(fitval) #continuum
guess[0] = guess[3] - np.min(fitval) #amplitude
guess[1] = fitpixels[fitval.argmin()] #central pixel
guess[2] = 4. #guess at sigma
fa = {'x':fitpixels, 'y':fitval, 'err':fitsigmas}
lineparams = mpfit.mpfit(fitgauss,guess,functkw=fa,quiet=True)
line_center = lineparams.params[1]
line_fit = gaussmpfit(fitpixels,lineparams.params)
known_wavelength = float(raw_input('Wavelength of line: '))
print 'Gaussian center at pixel ',line_center
if plotall:
plt.clf()
plt.hold('on')
plt.plot(fitpixels,line_fit,'r')
plt.plot(fitpixels,fitval,'b')
plt.axvline(x=line_center,color='r')
plt.hold('off')
plt.show()
savearray[0:len(fitpixels),5] = fitpixels
savearray[0:len(fitval),6] = fitval
savearray[0:len(line_fit),7] = line_fit
#Take this fit and determine the new zero point
global newlambda
newlambda = known_wavelength
global bestpixel
bestpixel = bining*(line_center +trim_offset)
guess = n_zPnt
newzero = fsolve(newzeropoint,guess,xtol=1e-12)
newzPnt = float(newzero)
WDwave2 = DispCalc(Pixels, alpha, theta, n_fr, n_fd, parm[2], newzPnt)
#plt.plot(WDwave2,spec_data[2,0,:])
#plt.show()
return newzPnt
# ===========================================================================
# Code ======================================================================
# ===========================================================================
# Get Lamps # ==============================================================
def calibrate_now(lamp,zz_specname,fit_zpoint,zzceti,offset_file,plotall=True):
# Read Lamp Data and Header #
lamp_data= fits.getdata(lamp)
lamp_header= fits.getheader(lamp)
# Check number of image slices, and select the spectra #
if lamp_header["NAXIS"]== 2:
lamp_spec= lamp_data[0]
elif lamp_header["NAXIS"]== 3:
lamp_spec= lamp_data[0][0]
else:
print ("\nDont know which data to unpack.")
print ("Check the array dimensions\n")
# plt.figure(1)
# plt.plot(lamp_spec)
# plt.title('Raw')
# plt.show()
# Find the pixel number offset due to trim reindexing #
trim_sec= lamp_header["CCDSEC"]
trim_offset= float( trim_sec[1:len(trim_sec)-1].split(':')[0] )-1
# Find Bining #
try:
bining= float( lamp_header["PARAM18"] )
except:
bining= float( lamp_header["PG3_2"] )
# Get Pixel Numbers #
nx= np.size(lamp_spec)
Pixels= bining*(np.arange(0,nx,1)+trim_offset)
# Select Set of Parameters to use #
global parm
if lamp.lower().__contains__('red'):
parm= Param_930_20_40
line_list= WaveList_Fe_930_20_40
elif lamp.lower().__contains__('blue'):
parm= Param_930_12_24
line_list= WaveList_Fe_930_12_24
else:
print "Could not detect setup!"
# Calculate Initial Guess Solution # ========================================
alpha= float( lamp_header["GRT_TARG"] )
theta= float( lamp_header["CAM_TARG"] )
Wavelengths= DispCalc(Pixels, alpha, theta, parm[0], parm[1], parm[2], parm[3])
# Ask for offset # ===========================================================
print offset_file
if offset_file:
print 'Using offset file: ', offset_file
offsets = np.genfromtxt(offset_file,dtype='d')
if offsets.size == 1:
offsets = np.array([offsets])
#print offsets
if 'blue' in lamp.lower():
offset = offsets[0]
elif 'red' in lamp.lower():
offset = offsets[1]
Wavelengths= [w+offset for w in Wavelengths]
else:
# Plot Dispersion #
plt.figure(1)
plt.plot(Wavelengths, lamp_spec)
plt.hold('on')
for line in line_list[1]:
if (Wavelengths[0] <= line <= Wavelengths[-1]):
plt.axvline(line, color= 'r', linestyle= '--')
plt.title("Initial Dispersion Inspection Graph. \nClose to Calculate Offset")
plt.xlabel("Wavelengths")
plt.ylabel("Counts")
plt.hold('off')
plt.show()
print "\nWould You like to set Offset?"
yn= raw_input('yes/no? >>> ')
#yn= 'yes'
if yn== 'yes':
global ax, fig, coords
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.plot(Wavelengths, lamp_spec)
plt.hold('on')
for line in line_list[1]:
if (Wavelengths[0] <= line <= Wavelengths[-1]):
plt.axvline(line, color= 'r', linestyle= '--')
plt.title("First click known line(red), then click coresponding peak near center\n Then close graph.")
plt.xlabel("Wavelengths (Ang.)")
plt.ylabel("Counts")
if lamp.__contains__('blue'):
plt.xlim(4700.,4900.)
elif lamp.__contains__('red'):
plt.xlim(6920.,7170.)
plt.hold('off')
coords= []
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
k_line= find_near(coords[0][0], line_list[1]) # Nearest line to click cordinates
k_peak= find_near(coords[1][0], Wavelengths) # Nearest Peak to click cordinates
i_peak= Wavelengths.index(k_peak)
X= Wavelengths[i_peak-7:i_peak+7]
Y= lamp_spec[i_peak-7:i_peak+7]
amp, center, width, b= fit_Gauss(X,Y)
offset= (k_line-center)
##########
#Save the offset
print '\n Would you like to save the offset?'
save_offset = raw_input('yes/no? >>> ')
if save_offset == 'yes':
print 'Saving offset to offsets.txt'
g = open('offsets.txt','a')
g.write(str(offset) + '\n')
g.close()
##########
Wavelengths= [w+offset for w in Wavelengths]
plt.figure(1)
plt.plot(Wavelengths, lamp_spec)
plt.hold('on')
for line in line_list[1]:
if (Wavelengths[0] <= line <= Wavelengths[-1]):
plt.axvline(line, color= 'r', linestyle= '--')
plt.title("Offset Applied.")
plt.xlabel("Wavelengths (Ang.)")
plt.ylabel("Counts")
plt.hold('off')
plt.show()
else:
offset = 0.
# Ask Refit # ===============================================================
yn= 'yes'
while yn== 'yes':
#print "\nWould you like to refit and recalculate dispersion?"
#yn= raw_input('yes/no? >>> ')
yn = 'yes'
if yn== 'yes' :
#print "\nOffset to apply to Grating Angle?"
#alpha_offset= float( raw_input('Offset Value? >>>') )
alpha_offset = 0.
#alpha= alpha + alpha_offset
'''
#Uncomment this part if you would like to select lines to use by hand. Otherwise, all lines in the above line lists are used.
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.plot(Wavelengths, lamp_spec)
plt.hold('on')
lines_in_range= []
for line in line_list[1]:
if (Wavelengths[0] <= line <= Wavelengths[-1]):
lines_in_range.append(line)
plt.axvline(line, color= 'r', linestyle= '--')
plt.title("Click on The Peaks You Want to Use to Refit \n Then close graph.")
plt.xlim([np.min(lines_in_range)-50, np.max(lines_in_range)+50])
plt.ylim([np.min(lamp_spec)-100, np.max(lamp_spec)/2])
plt.xlabel("Wavelengths (Ang.)")
plt.ylabel("Counts")
plt.hold('off')
coords= []
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
'''
###n_pnt, n_cor= np.shape(coords)
###coord_x= [coords[i][0] for i in range(0,n_pnt)]
coord_x = line_list[1] #Use all lines in the line lists for the refitting.
n_pnt = len(coord_x)
peak_x= []
for i in range(0,n_pnt):
x= find_near(coord_x[i], Wavelengths)
peak_x.append(x)
centers_in_wave= find_peak_centers(peak_x, Wavelengths, lamp_spec)
centers_in_wave= [w-offset for w in centers_in_wave]
centers_in_pix= PixCalc(centers_in_wave, alpha, theta, parm[0], parm[1], parm[2], parm[3])
known_waves= []
for i in range(0,n_pnt):
x= find_near(coord_x[i], line_list[1])
known_waves.append(x)
#Create array to save data for diagnostic purposes
global savearray, n_fr, n_fd, n_zPnt
savearray = np.zeros([len(Wavelengths),8])
#n_fr, n_fd, n_zPnt= fit_Grating_Eq(centers_in_pix, known_waves, alpha, theta, parm)
par, rmsfit = fit_Grating_Eq(centers_in_pix, known_waves, alpha, theta, parm,plotalot=plotall)
n_fr, n_fd, n_zPnt = par
n_Wavelengths= DispCalc(Pixels, alpha-alpha_offset, theta, n_fr, n_fd, parm[2], n_zPnt)
if plotall:
plt.figure(1)
plt.plot(n_Wavelengths, lamp_spec)
plt.hold('on')
for line in line_list[1]:
if (n_Wavelengths[0] <= line <= n_Wavelengths[-1]):
plt.axvline(line, color= 'r', linestyle= '--')
plt.title("Refitted Solution")
plt.xlabel("Wavelengths (Ang.)")
plt.ylabel("Counts")
plt.hold('off')
savearray[0:len(n_Wavelengths),2] = n_Wavelengths
savearray[0:len(lamp_spec),3] = lamp_spec
savearray[0:len(np.array(line_list[1])),4] = np.array(line_list[1])
'''
plt.figure(2)
Diff= [ (Wavelengths[i]-n_Wavelengths[i]) for i in range(0,np.size(Wavelengths)) ]
plt.plot(Diff, '.')
plt.title("Diffence between old and new solution.")
plt.xlabel("Pixel")
plt.ylabel("old-new Wavelength (Ang.)")
'''
plt.show()
if ('blue' in lamp.lower()) and (rmsfit > 1.0):
coord_list_short = line_list[0][1:]
wave_list_short = line_list[1][1:]
line_list = np.array([coord_list_short,wave_list_short])
print 'Refitting without first line.'
yn = 'yes'
else:
yn = 'no' #Don't refit again
# Save parameters in header and write file #
#print "\nWrite solution to header?"
#yn= raw_input("yes/no? >>>")
print '\n Writing solution to header'
yn = 'yes'
if yn== "yes":
newname = 'w'+lamp
mylist = [True for f in os.listdir('.') if f == newname]
exists = bool(mylist)
clob = False
if exists:
print 'File %s already exists.' % newname
nextstep = raw_input('Do you want to overwrite or designate a new name (overwrite/new)? ')
if nextstep == 'overwrite':
clob = True
exists = False
elif nextstep == 'new':
newname = raw_input('New file name: ')
exists = False
else:
exists = False
rt.Fix_Header(lamp_header)
lamp_header.append( ('LINDEN', n_fr,'Line Desity for Grating Eq.'),
useblanks= True, bottom= True )
lamp_header.append( ('CAMFUD', n_fd,'Camera Angle Correction Factor for Grat. Eq.'),
useblanks= True, bottom= True )
lamp_header.append( ('FOCLEN', parm[2],'Focal Length for Grat Eq.'),
useblanks= True, bottom= True )
lamp_header.append( ('ZPOINT', n_zPnt,'Zero Point Pixel for Grat Eq.'),
useblanks= True, bottom= True )
lamp_header.append( ('RMSWAVE',rmsfit, 'RMS from Wavelength Calib.'),
useblanks= True, bottom= True )
NewHdu = fits.PrimaryHDU(data= lamp_data, header= lamp_header)
NewHdu.writeto(newname, output_verify='warn', clobber= clob)
#Save parameters to ZZ Ceti spectrum#
#print "\nWrite solution to header of another spectrum?"
#yn= raw_input("yes/no? >>>")
if zz_specname:
#specname = raw_input("Filename: ")
#fitspectrum = raw_input('Would you like to fit a new zero point using a spectral line? (yes/no) ')
if fit_zpoint == 'yes':
newzeropoint = WaveShift(zz_specname,zzceti,plotall)
else:
newzeropoint = n_zPnt
spec_data= fits.getdata(zz_specname)
spec_header= fits.getheader(zz_specname)
rt.Fix_Header(spec_header)
spec_header.append( ('LINDEN', n_fr,'Line Desity for Grating Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('CAMFUD', n_fd,'Camera Angle Correction Factor for Grat. Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('FOCLEN', parm[2],'Focal Length for Grat Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('ZPOINT', newzeropoint,'Zero Point Pixel for Grat Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('RMSWAVE',rmsfit, 'RMS from Wavelength Calib.'),
useblanks= True, bottom= True )
NewspecHdu = fits.PrimaryHDU(data= spec_data, header= spec_header)
newname = 'w'+zz_specname
mylist = [True for f in os.listdir('.') if f == newname]
exists = bool(mylist)
clob = False
if exists:
print 'File %s already exists.' % newname
nextstep = raw_input('Do you want to overwrite or designate a new name (overwrite/new)? ')
if nextstep == 'overwrite':
clob = True
exists = False
elif nextstep == 'new':
newname = raw_input('New file name: ')
exists = False
else:
exists = False
NewspecHdu.writeto(newname, output_verify='warn', clobber= clob)
#Save arrays for diagnostics
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
endpoint = '.ms'
with open('wavecal_' + zz_specname[4:zz_specname.find(endpoint)] + '_' + now + '.txt','a') as handle:
header = lamp + ',' + zz_specname + '\n First 2 columns: fitted wavelengths, residuals \n Next 3 columns: wavelengths, flux, lambdas fit \n Final 3 columns: wavelengths, sky flux, fit to line for recentering'
np.savetxt(handle,savearray,fmt='%f',header = header)
# ==========================================================================
if __name__ == '__main__':
from sys import argv
script, lamp = argv
print "\nWrite solution to header of another spectrum?"
yn= raw_input("yes/no? >>>")
if yn == 'yes':
zz_specname = raw_input("Filename: ")
fit_zpoint = raw_input('Would you like to fit a new zero point using a spectral line? (yes/no) ')
zzceti = raw_input('Is this a ZZ Ceti? (yes/no): ')
else:
zz_specname = None
zzceti = 'no'
fit_zpoint = 'no'
print "\nDoes an offset file already exist?"
yn_off= raw_input("yes/no? >>>")
if yn_off == 'yes':
offset_file = raw_input("Filename: ")
else:
offset_file = None
calibrate_now(lamp,zz_specname,fit_zpoint,zzceti,offset_file,plotall=True)
| |
'''
Project: Prism
Author: Kain Liu
'''
from PIL import Image
from cossim import cossim
from sketch import sketch
from numpy import *
import time
import os, sys
import scipy.spatial.distance as dis
# 24k pictures in total
population = 24000
# random vector
rv_number = 256
# sample id of images
samples = [ 2158, 7418, 7757, 9824, 22039,
16336, 7463, 4595, 20159, 17348,
19166, 23112, 16678, 2084, 11398,
19557, 14867, 5437, 13122, 20811]
'''
Generate a signature based on colour information
'''
def color_sig(file, seg = 4):
print file
try:
im = Image.open(file)
print(im.format, im.size, im.mode)
except:
print "Unable to load image!"
w, h = im.size
colors = im.getcolors(w*h)
color_counter = {}
def cut(x, n=16):
return x / (256 / n)
for color in colors:
key = []
for x in color[1]:
key.append(cut(x, seg))
key = str(key)
color_counter.setdefault(key, []).append(color[0])
hash_result = []
# loop throught rgb colors
for r in range(0, seg):
for g in range(0, seg):
for b in range(0, seg):
key = str([r, g, b])
if key in color_counter:
val = sum(color_counter[key])
else:
val = 0
# optional: ignore background color which is black
'''
if r == 0 and g == 0 and b == 0:
val = 0
'''
# optional: ignore the color takes up too much weight
'''
if val > 10000:
val = 0
'''
hash_result.append(val)
return hash_result
'''
calculate which size is the best choice for bins
'''
def bin_size():
for i in (2, 4, 8, 16, 32, 64):
# compare image collections of two objects
a1 = color_sig('dataset/251_l3c1.png', i)
a2 = color_sig('dataset/251_l3c2.png', i)
a3 = color_sig('dataset/251_l3c3.png', i)
b1 = color_sig('dataset/255_l3c1.png', i)
b2 = color_sig('dataset/255_l3c2.png', i)
b3 = color_sig('dataset/255_l3c3.png', i)
# generate a latex table
print "====== i:", i, " ======"
print '& $A_1$ &',cossim_3(a1, a1), '&',cossim_3(a1, a2), '&',cossim_3(a1, a3), '&',cossim_3(a1, b1), '&',cossim_3(a1, b2), '&',cossim_3(a1, b3), '\\\\ \cline{2-8}'
print '& $A_2$ &',cossim_3(a2, a1), '&',cossim_3(a2, a2), '&',cossim_3(a2, a3), '&',cossim_3(a2, b1), '&',cossim_3(a2, b2), '&',cossim_3(a2, b3), '\\\\ \cline{2-8}'
print '& $A_3$ &',cossim_3(a3, a1), '&',cossim_3(a3, a2), '&',cossim_3(a3, a3), '&',cossim_3(a3, b1), '&',cossim_3(a3, b2), '&',cossim_3(a3, b3), '\\\\ \cline{2-8}'
print '& $B_1$ &',cossim_3(b1, a1), '&',cossim_3(b1, a2), '&',cossim_3(b1, a3), '&',cossim_3(b1, b1), '&',cossim_3(b1, b2), '&',cossim_3(b1, b3), '\\\\ \cline{2-8}'
print '& $B_2$ &',cossim_3(b2, a1), '&',cossim_3(b2, a2), '&',cossim_3(b2, a3), '&',cossim_3(b2, b1), '&',cossim_3(b2, b2), '&',cossim_3(b2, b3), '\\\\ \cline{2-8}'
print '& $B_3$ &',cossim_3(b3, a1), '&',cossim_3(b3, a2), '&',cossim_3(b3, a3), '&',cossim_3(b3, b1), '&',cossim_3(b3, b2), '&',cossim_3(b3, b3), '\\\\ \cline{2-8}'
def sig(start = 1, end = 1000):
file = open("result/sig.txt", "w")
t0 = time.clock()
for i in range(start, end + 1):
for j in range(1, 9):
for k in range(1, 4):
h = color_sig(id2path(i, j, k))
file.write(str(h).replace(",","").replace("[","").replace("]",""))
file.write("\n")
print "{0} of {1}".format(i, end - start + 1)
file.close()
print "sig.txt finish."
print time.clock() - t0, "seconds in generating signatures"
def matrix():
t0 = time.clock()
sketches = open_sketch()
# sketch has #vectors rows and #image columns
# every row is result multipied by one random vector
result = dot(sketches.transpose(), sketches)
# save result
print time.clock() - t0, "seconds in generating matrix"
m = zeros([len(samples), population])
for i in range(len(samples)):
m[i] = result[samples[i]]
savetxt('result/matrix-sample.txt', m, fmt='%i')
def cos():
sig = open_sig()
s = zeros([len(samples), population])
for i in range(len(samples)):
for j in range(0, population):
s[i][j] = cossim_3(sig[samples[i]], sig[j])
savetxt('result/similarity-sample.txt', s, fmt='%.3f')
def sketch():
t0 = time.clock()
m = open_sig()
print "signature matrix size is {0} x {1}".format(m.shape[0], m.shape[1])
sketches = sketch(m, rv_number)
print "sketch matrix size is {0} x {1}".format(sketches.shape[0], sketches.shape[1])
print time.clock() - t0, "seconds in generating sketches"
savetxt('result/sketch.txt', sketches, fmt='%d')
def similar(i, j, k):
# only calculate all pairs of given image with rest images
line = id2line(i, j, k)
sketches = open_sketch()
t0 = time.clock()
'''
def nested_loop(sketches):
h = len(sketches)
w = len(sketches[0])
_r = []
for i in range(0, w):
intersection = 0
for k in range(0, h):
if sketches[k][i] == sketches[k][line]:
intersection += 1
_r.append(round(
float(intersection) / float(w),
4
))
return _r
pre_sim = nested_loop(sketches)
'''
def transpose_dot(sketches):
result = dot(sketches.transpose()[line], sketches)
return result
pre_sim = transpose_dot(sketches)
# get top n
# argsort(line)[-n:] #last n elements
# [::-1] # reverse
n = 32
top_n = argsort(pre_sim)[-n:][::-1]
result = []
path = []
for top in top_n:
di = line2id(top)
result.append( di )
path.append( id2path(di[0],di[1],di[2]) )
print time.clock() - t0, "seconds in finding similar items"
print result
def similar_all():
def transpose_dot(_sketches, _line):
result = dot(_sketches.transpose()[_line], _sketches)
return result
# only calculate all pairs of given image with rest images
file = open("result/all-to-mongodb.txt", "w")
sketches = open_sketch()
t00 = time.clock()
for i in range(0, population):
t0 = time.clock()
pre_sim = transpose_dot(sketches, i)
# get top n
# argsort(line)[-n:] #last n elements
# [::-1] # reverse
n = 32
top_n = argsort(pre_sim)[-n:][::-1]
result = []
path = []
for top in top_n:
di = line2id(top)
result.append( di )
path.append( id2path(di[0],di[1],di[2]) )
print i, ' : ', time.clock() - t0, "s"
# print result
# Mongodb insert similar neighbors for each picture
# print(i, path)
file.write("db.similarPic.insert({{ id: {} , neighbors: {} }})".format(i, path))
file.write("\n")
print "Total {}s".format(time.clock() - t00)
file.close()
'''
loader functions
'''
def open_sig():
t0 = time.clock()
m = loadtxt("result/sig.txt")
print time.clock() - t0, "seconds in opening signatures"
return m
# def open_matrix():
# t0 = time.clock()
# m = loadtxt("('result/matrix.txt")
# print time.clock() - t0, "seconds in opening signatures"
# return m.shape
def open_sketch():
t0 = time.clock()
m = loadtxt("result/sketch.txt")
print time.clock() - t0, "seconds in opening sketches"
return m
'''
helper functions
'''
def id2path(i, j, k):
return "dataset/{0}/{0}_l{1}c{2}.png".format(i, j, k)
def id2line(i, j, k):
line = (i - 1) * 24 + (j - 1) * 3 + (k - 1)
return line
def line2id(line):
a = line / 24 + 1
b = line % 24 / 3 + 1
c = line % 24 % 3 + 1
return a, b, c
def cossim_3(x, y):
return round(cossim(x, y), 3)
'''
main function
'''
if __name__ == "__main__":
c = sys.argv[1] if len(sys.argv) > 1 else ""
if c == "sig":
sig()
elif c == "sketch":
if len(sys.argv) > 2:
rv = int(sys.argv[2])
else:
rv = 256
print 'INFO: ', rv, ' random vectors'
sketch(rv)
elif c == "cos":
cos()
elif c == "matrix":
matrix()
elif c == "similar":
if len(sys.argv) > 4:
similar(
int(sys.argv[2]),
int(sys.argv[3]),
int(sys.argv[4])
)
else:
print 'ERROR: Please identify the picture id.'
elif c == "all":
similar_all()
elif c == 'lsh':
lsh_all()
elif c == 'bin_size':
bin_size()
else:
print '''
Welcome to Prism.
Options:
* sig : generate Signatures based on the colours distribution.
* sketch : generate Sketches based on Signatures.
* cos : calculate the Cosine Similarity between samples and all population.
* matrix : calculate the similarity matrix based on Sketeches
* similar : find similar candidates for one image
* all : find similar candidates for all images, generate a mongdb sql as output
* bin_size : experiments to optimize bin size
'''
| |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import os
import tct
import sys
#
import os
params = tct.readjson(sys.argv[1])
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
toolname = params['toolname']
toolname_pure = params['toolname_pure']
toolchain_name = facts['toolchain_name']
workdir = params['workdir']
loglist = result['loglist'] = result.get('loglist', [])
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Helper functions
# --------------------------------------------------
deepget = tct.deepget
def lookup(D, *keys, **kwdargs):
result = deepget(D, *keys, **kwdargs)
loglist.append((keys, result))
return result
# ==================================================
# define
# --------------------------------------------------
# first
checksum_file = None
checksum_old = None
checksum_time = None
checksum_time_decoded = None
composerjson = None
documentation_folder_exists = None
email_user_receivers_exlude_list = lookup(milestones, 'email_user_receivers_exlude_list', default=[])
emails_found_in_projects = None
emails_user_from_project = None
has_settingscfg = None
has_settingsyml = None
localization_has_localization = None
NAMING = milestones.get('NAMING', {})
settingscfg_file = None
settingsyml_file = None
# second
NAMING['meta'] = NAMING.get('meta', 'Here we keep names and values that are looking good')
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
TheProject = lookup(milestones, 'TheProject')
if not (TheProject):
exitcode = 22
reason = 'Bad PARAMS or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append(reason)
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
import datetime
import glob
import re
documentation_folder = os.path.join(TheProject, 'Documentation')
documentation_folder_exists = os.path.isdir(documentation_folder)
settingscfg_file = os.path.join(TheProject, 'Documentation/Settings.cfg')
has_settingscfg = os.path.exists(settingscfg_file)
settingsyml_file = os.path.join(TheProject, 'Documentation/Settings.yml')
has_settingsyml = os.path.exists(settingsyml_file)
pattern = TheProject + '/Documentation/Localization.*'
loglist.append({'pattern': pattern})
folders = glob.glob(pattern)
loglist.append({'folders': folders})
localization_has_localization = len(folders) > 0
if exitcode == CONTINUE:
makedir = milestones['makedir']
checksum_file = os.path.join(makedir, 'build.checksum')
checksum_old = None
checksum_time = None
if os.path.exists(checksum_file):
with open(checksum_file, 'r') as f1:
checksum_old = f1.read()
checksum_old = checksum_old.strip()
checksum_time = int(os.path.getmtime(checksum_file))
if exitcode == CONTINUE:
composerjson = None
composerjson_file = os.path.join(TheProject, 'composer.json')
if os.path.exists(composerjson_file):
composerjson = tct.readjson(composerjson_file)
if exitcode == CONTINUE:
emails_found_in_projects = []
for fname in sorted(['ext_emconf.php', 'Documentation/Index.rst']):
fpath = os.path.join(TheProject, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f1:
data = f1.read()
# emails
findings = re.findall(r'[\w\.-]+@[\w\.-]+', data, re.MULTILINE)
if findings:
unique = []
for emadr in sorted(findings):
if emadr not in unique:
unique.append(emadr)
emails_found_in_projects.append((fname, unique))
# project_name
# project_version
if exitcode == CONTINUE:
excluded_emails_lowercase = [e.lower() for e in email_user_receivers_exlude_list]
emails_user_from_project = []
if emails_found_in_projects:
for filename, emails in emails_found_in_projects:
for email in emails:
email_lower = email.lower()
if email_lower not in excluded_emails_lowercase:
if email_lower not in emails_user_from_project:
emails_user_from_project.append(email_lower)
if exitcode == CONTINUE:
NAMING['project_name'] = milestones.get('buildsettings', {}).get('project', 'PROJECT')
NAMING['project_version'] = milestones.get('buildsettings', {}).get('version', 'VERSION')
NAMING['project_language'] = milestones.get('buildsettings', {}).get('package_language', 'default').lower()
NAMING['pdf_name'] = ('manual.' +
NAMING['project_name'] + '-' +
NAMING['project_version'] + '.pdf'
)
NAMING['package_name'] = (
NAMING['project_name'] + '-' +
NAMING['project_version'] + '-' +
NAMING['project_language'] + '.zip'
)
# ==================================================
# Set MILESTONE
# --------------------------------------------------
def decode_timestamp(unixtime):
return datetime.datetime.fromtimestamp(unixtime).strftime('%Y-%m-%d %H:%M')
D = {}
if documentation_folder_exists:
D['documentation_folder'] = documentation_folder
if has_settingscfg:
D['has_settingscfg'] = has_settingscfg
D['settingscfg_file'] = settingscfg_file
if has_settingsyml:
D['has_settingsyml'] = has_settingsyml
D['settingsyml_file'] = settingsyml_file
if localization_has_localization:
D['localization_has_localization'] = localization_has_localization
if checksum_file:
D['checksum_file'] = checksum_file
if checksum_old:
D['checksum_old'] = checksum_old
if checksum_time:
D['checksum_time'] = checksum_time
D['checksum_time_decoded'] = decode_timestamp(checksum_time)
if emails_found_in_projects:
D['emails_found_in_projects'] = emails_found_in_projects
if composerjson:
D['composerjson'] = composerjson
if 'always':
D['NAMING'] = NAMING
if emails_user_from_project:
D['emails_user_from_project'] = emails_user_from_project
result['MILESTONES'].append(D)
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
| |
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
'''
Run MODIS based flood detection algorithms on many lakes at a single time
and log the results compared with the permanent water mask.
'''
import logging
logging.basicConfig(level=logging.ERROR)
try:
import cmt.ee_authenticate
except:
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import cmt.ee_authenticate
cmt.ee_authenticate.initialize()
import sys
import time
import os
import csv
import ee
import numpy
import traceback
import cmt.domain
import cmt.util.processManyLakes
import cmt.modis.flood_algorithms
import cmt.util.evaluation
import cmt.util.miscUtilities
from cmt.util.processManyLakes import LakeDataLoggerBase
class LoggingClass(LakeDataLoggerBase):
'''Log MODIS flood detection results for a lake compared with the permanent water mask'''
def __init__(self, logDirectory, ee_lake, lake_name):
'''Open and prep the output file'''
# Base class init function
LakeDataLoggerBase.__init__(self, logDirectory, ee_lake, lake_name)
# Get the file path
filePrefix = LakeDataLoggerBase.computeLakePrefix(self)
self.logFolder = filePrefix + os.path.sep
self.logPath = os.path.join(self.logFolder, 'MODIS_log.csv')
# Read in any existing data from the file
self.entryList = LoggingClass.readAllEntries(self.logPath)
def __del__(self):
'''On destruction write out the file to disk'''
if self.entryList:
self.writeAllEntries()
def getLakeDirectory(self):
'''The folder where the log is written'''
return self.logFolder
def saveResultsImage(self, classifiedImage, ee_bounds, imageName, cloudMask, waterMask, resolution=30):
'''Records a diagnostic image to the log directory'''
if not os.path.exists(self.logFolder): # Create folder if it does not exist
os.mkdir(self.logFolder)
# Currently we are not using the modis image
# Red channel is detected, blue channel is water mask, green is constant zero.
mergedImage = classifiedImage.addBands(ee.Image(0)).addBands(waterMask)
mergedImage = mergedImage.Or(cloudMask) #TODO: Make sure this is working!
vis_params = {'min': 0, 'max': 1} # Binary image data
imagePath = os.path.join(self.logFolder, imageName + '.tif')
return cmt.util.miscUtilities.downloadEeImage(mergedImage, ee_bounds, resolution, imagePath, vis_params)
#return cmt.util.miscUtilities.downloadEeImage(cloudRgb, ee_bounds, resolution, imagePath, vis_params)
def saveModisImage(self, modisImage, ee_bounds, imageName):
'''Record the input MODIS image to the log directory'''
if not os.path.exists(self.logFolder): # Create folder if it does not exist
os.mkdir(self.logFolder)
imagePath = os.path.join(self.logFolder, imageName)
vis_params = {'min': 0, 'max': 8000, 'bands': ['sur_refl_b01', 'sur_refl_b02', 'sur_refl_b06']}
if not os.path.exists(imagePath): # Don't overwrite this image
return cmt.util.miscUtilities.downloadEeImage(modisImage, ee_bounds, 250, imagePath, vis_params)
def findRecordByDate(self, date):
'''Searches for a record with a particular date and returns it'''
try:
return self.entryList[date]
except:
return None
def addDataRecord(self, dataRecord):
'''Adds a new record to the log'''
key = dataRecord['date']
self.entryList[key] = dataRecord
@staticmethod
def dictToLine(dataRecord):
'''Converts an input data record dictionary to a line of text'''
# Add the fixed elements
s = dataRecord['date']+', '+dataRecord['satellite']
# Add all the algorithm results
for k in dataRecord:
if k in ['date', 'satellite']: # Don't double-write these
continue
v = dataRecord[k]
if v == False: # Log invalid data
s += ', '+k+', NA, NA, NA'
else: # Log valid data: Algorithm, precision, recall, eval_resolution
s += ', '+k+', '+str(v[0])+', '+str(v[1])+', '+str(v[2])
return (s + '\n')
@staticmethod
def lineToDict(line):
'''Extract the information from a single line in the log file in to a dictionary object'''
MAX_ALGS = 15 # Used for sanity check
NUM_HEADER_VALS = 2 # Date, MODIS
ELEMENTS_PER_ALGORITHM = 4 # (alg name, precision, recall, eval_res)
thisDict = dict()
parts = line.split(',')
numAlgs = (len(parts) - NUM_HEADER_VALS) / ELEMENTS_PER_ALGORITHM # Date, MODIS, (alg name, precision, recall, eval_res)...
if numAlgs > MAX_ALGS: # Error checking
print line
raise Exception('Error: Too many algorithms found!')
thisDict['date' ] = parts[0]
thisDict['satellite'] = parts[1]
for i in range(numAlgs): # Loop through each algorithm
startIndex = i*ELEMENTS_PER_ALGORITHM + NUM_HEADER_VALS
algName = parts[startIndex].strip()
if (parts[startIndex+1].strip() == 'NA'): # Check if this was logged as a failure
thisDict[algName] = False
else: # Get the successful log results
precision = float(parts[startIndex+1])
recall = float(parts[startIndex+2])
evalRes = float(parts[startIndex+3])
thisDict[algName] = (precision, recall, evalRes) # Store the pair of results for the algorithm
return thisDict
@staticmethod
def readAllEntries(logPath):
'''Reads the entire contents of the log file into a list of dictionaries'''
# Return an empty dict if the file does not exist
if not os.path.exists(logPath):
return dict()
outputDict = dict()
fileHandle = open(logPath, 'r')
line = fileHandle.readline() # Skip header line
while True:
# Read the next line
line = fileHandle.readline()
# If we hit the end of the file return the dictionary
if not line:
return outputDict
# Put all the parts of the line into a dictionary
thisDict = LoggingClass.lineToDict(line)
# Put this dict into an output dictionary
key = thisDict['date']
outputDict[key] = thisDict
raise Exception('Should never get here!')
def writeAllEntries(self):
'''Dump all the added records to a file on disk'''
if not os.path.exists(self.logFolder): # Create folder if it does not exist
os.mkdir(self.logFolder)
# Open the file for writing, clobbering any existing file
fileHandle = open(self.logPath, 'w')
# Write the header
fileHandle.write('date, satellite, algorithm, precision, recall, evaluation_resolution\n')
# Write all the data
for key in self.entryList:
line = self.dictToLine(self.entryList[key])
fileHandle.write(line)
fileHandle.close()
return True
#from cmt.mapclient_qt import centerMap, addToMap
#centerMap(-119, 38, 11)
# Constants used to describe how to treat an algorithm result
KEEP = 0 # Existing results will be preserved. Recompute if no entry for data.
RECOMPUTE = 1 # Set an algorithm to this to force recomputation of all results!
RECOMPUTE_IF_FALSE = 2 # Recompute results if we don't have valid results
def getAlgorithmList():
'''Return the list of available algorithms'''
# Code, name, recompute_all_results?
algorithmList = [#(cmt.modis.flood_algorithms.DEM_THRESHOLD , 'DEM Threshold', KEEP),
(cmt.modis.flood_algorithms.EVI , 'EVI', KEEP),
(cmt.modis.flood_algorithms.XIAO , 'XIAO', KEEP),
(cmt.modis.flood_algorithms.DIFF_LEARNED , 'Difference', KEEP),
(cmt.modis.flood_algorithms.CART , 'CART', KEEP),
(cmt.modis.flood_algorithms.SVM , 'SVM', KEEP),
(cmt.modis.flood_algorithms.RANDOM_FORESTS , 'Random Forests', KEEP ),
#(cmt.modis.flood_algorithms.DNNS , 'DNNS', KEEP),
#(cmt.modis.flood_algorithms.DNNS_REVISED , 'DNNS Revised', KEEP),
#(cmt.modis.flood_algorithms.DNNS_DEM , 'DNNS with DEM', KEEP),
(cmt.modis.flood_algorithms.DNNS_DIFF , 'DNNS Diff', KEEP),
(cmt.modis.flood_algorithms.DNNS_DIFF_DEM , 'DNNS Diff DEM', KEEP),
#(cmt.modis.flood_algorithms.DIFFERENCE_HISTORY , 'Difference with History', KEEP),
(cmt.modis.flood_algorithms.DART_LEARNED , 'Dartmouth', KEEP),
(cmt.modis.flood_algorithms.MARTINIS_TREE , 'Martinis Tree', KEEP),
(cmt.modis.flood_algorithms.MODNDWI_LEARNED , 'Mod NDWI', KEEP),
(cmt.modis.flood_algorithms.FAI_LEARNED , 'Floating Algae Index', KEEP),
(cmt.modis.flood_algorithms.ADABOOST , 'Adaboost', KEEP),
(cmt.modis.flood_algorithms.ADABOOST_DEM , 'Adaboost DEM', KEEP)
]
return algorithmList
def needToComputeAlgorithm(currentResults, algInfo):
'''Return true if we should compute this algorithm'''
algName = algInfo[1]
return ( (algInfo[2] == RECOMPUTE) or (algName not in currentResults) or
((algInfo[2] == RECOMPUTE_IF_FALSE) and (currentResults[algName] == False)) )
def processing_function(bounds, image, image_date, logger):
'''Detect water using multiple MODIS algorithms and compare to the permanent water mask'''
# Define a list of all the algorithms we want to test
algorithmList = getAlgorithmList()
waterResults = dict() # This is where results will be stored
# First check if we have already processed this data
existingResults = logger.findRecordByDate(image_date)
if existingResults:
# Go ahead and load the existing results into the output dictionary
waterResults = existingResults
# Check if we already have all the results we need
needToRedo = False
for a in algorithmList:
# Check conditions to recompute this algorithm result
if needToComputeAlgorithm(waterResults, a):
needToRedo = True
break
if not needToRedo: # If we have everything we need, just return it.
print 'Nothing new to compute'
return waterResults
# If we made it to here then we need to run at least one algorithm.
MAX_CLOUD_PERCENTAGE = 0.05
# Needed to change EE formats for later function calls
eeDate = ee.Date(image_date)
rectBounds = cmt.util.miscUtilities.unComputeRectangle(bounds.bounds())
# First check the input image for clouds. If there are too many just raise an exception.
cloudPercentage = cmt.modis.modis_utilities.getCloudPercentage(image, rectBounds)
if cloudPercentage > MAX_CLOUD_PERCENTAGE:
cmt.util.processManyLakes.addLakeToBadList(logger.getLakeName(), logger.getBaseDirectory(), image_date)
raise Exception('Input image has too many cloud pixels!')
# Get the cloud mask and apply it to the input image
cloudMask = cmt.modis.modis_utilities.getModisBadPixelMask(image)
maskedImage = image.mask(cloudMask.Not()) # TODO: Verify this is having an effect!
# Check if the data is all zero
onCount = maskedImage.select('sur_refl_b01').reduceRegion(ee.Reducer.sum(), bounds, 4000).getInfo()['sur_refl_b01']
print 'onCount = ' + str(onCount)
if onCount < 10:
cmt.util.processManyLakes.addLakeToBadList(logger.getLakeName(), logger.getBaseDirectory(), image_date)
raise Exception('Masked image is blank!')
# Save the input image
imageName = 'input_modis_' + str(image_date)
logger.saveModisImage(image, rectBounds, imageName)
# Get the permanent water mask
# - We change the band name to make this work with the evaluation function call further down
waterMask = ee.Image("MODIS/MOD44W/MOD44W_005_2000_02_24").select(['water_mask'], ['b1'])
# Pick a training image. We just use the same lake one year in the past.
trainingStart = eeDate.advance(-1.0, 'year')
trainingEnd = eeDate.advance(10.0, 'day')
# Fetch a MODIS image for training
print 'Retrieving training data...'
modisTrainingCollection = cmt.util.processManyLakes.get_image_collection_modis(bounds, trainingStart, trainingEnd)
modisTrainingList = modisTrainingCollection.toList(100)
modisTrainingInfo = modisTrainingList.getInfo()
# Find the first image with a low cloud percentage
trainingImage = None
for i in range(len(modisTrainingInfo)):
thisImage = ee.Image(modisTrainingList.get(i))
cloudPercentage = cmt.modis.modis_utilities.getCloudPercentage(thisImage, rectBounds)
if cloudPercentage < MAX_CLOUD_PERCENTAGE:
trainingImage = thisImage
break
if not trainingImage:
raise Exception('Could not find a training image for date ' + str(image_date))
# Generate a pair of train/test domain files for this lake
training_date = cmt.util.processManyLakes.get_image_date(trainingImage.getInfo())
testDomainPath, trainDomainPath = cmt.util.miscUtilities.writeDomainFilePair(logger.getLakeName(), bounds,
ee.Date(image_date), ee.Date(training_date), logger.getLakeDirectory())
# Load the domains using the standard domain class
fakeDomain = cmt.domain.Domain(testDomainPath)
trainingDomain = cmt.domain.Domain(trainDomainPath)
# Loop through each algorithm
for a in algorithmList:
algName = a[1]
# Skip this iteration if we don't need to recompute this algorithm
if not needToComputeAlgorithm(waterResults, a):
continue
try:
print 'Running algorithm ' + algName
# Call function to generate the detected water map
detectedWater = cmt.modis.flood_algorithms.detect_flood(fakeDomain, a[0])[1]
#addToMap(detectedWater, {'min': 0, 'max': 1}, a[1], False)
# Save image of results so we can look at them later
# - Try at a high resolution and if that fails try a lower resolution
imageName = 'alg_' + algName.replace(' ', '_') +'_'+ str(image_date)
FULL_DEBUG_IMAGE_RESOLUTION = 250 # Pixel resolution in meters
REDUCED_DEBUG_IMAGE_RESOLUTION = 1000
try: # High res output
logger.saveResultsImage(detectedWater, rectBounds, imageName, cloudMask, waterMask, FULL_DEBUG_IMAGE_RESOLUTION)
except:
print 'Retrying download at lower resolution.'
try: # Low res output
logger.saveResultsImage(detectedWater, rectBounds, imageName, cloudMask, waterMask, REDUCED_DEBUG_IMAGE_RESOLUTION)
except Exception,e:
print 'Saving results image failed with exception --> ' + str(e)
print 'Evaluating detection results...'
# Compare the detection result to the water mask
isFractional = False # Currently not using fractional evaluation, but maybe we should for DNSS-DEM
(precision, recall, evalRes, noTruthEval) = cmt.util.evaluation.evaluate_approach(detectedWater, waterMask, rectBounds, isFractional)
# Store the results for this algorithm
print 'Evaluation results: ' + str(precision) + ' ' + str(recall) +' at resolution ' + str(evalRes)
waterResults[algName] = (precision, recall, evalRes, noTruthEval)
except Exception,e: # Handly any failure thet prevents us from obtaining results
traceback.print_exc(file=sys.stdout)
print 'Processing results failed with exception --> ' + str(e)
waterResults[algName] = False # Mark this as a failure
# Return the results for each algorithm
waterResults['satellite'] = 'MODIS'
return waterResults
def compileLakeResults(resultsFolder):
'''Compiles a single csv file comparing algorithm results across lakes'''
# Ignore lakes which did not have this many good days for
MIN_GOOD_DATES = 1
# Get a list of the algorithms to read
algorithmList = getAlgorithmList()
# Create the output file
outputPath = os.path.join(resultsFolder, 'compiledLogs.csv')
outputHandle = open(outputPath, 'w')
print 'Writing composite log file: ' + outputPath
# Write a header line
headerLine = 'lake_name'
for a in algorithmList:
headerLine += ', '+ str(a[1]) +'_precision, '+ str(a[1]) +'_recall, '+ str(a[1]) +'_eval_res'
outputHandle.write(headerLine + '\n')
# Define local helper function
def prListStats(prList):
'''Compute the mean and std of a list of precision/recall value pairs'''
pList = []
rList = []
eList = []
for i in prList: # Sum the values
pList.append(i[0]) # Precision
rList.append(i[1]) # Recall
eList.append(i[2]) # EvalRes
return (numpy.mean(pList), numpy.mean(rList), numpy.mean(eList),
numpy.std(pList), numpy.std(rList), numpy.std(eList))
# Loop through the directories
algStats = dict()
for d in os.listdir(resultsFolder):
thisFolder = os.path.join(resultsFolder, d)
print thisFolder
if not (os.path.isdir(thisFolder)): # Skip non-folders
continue
# Skip the directory if it does not contain MODIS_log.csv
logPath = os.path.join(thisFolder, 'MODIS_log.csv')
if not os.path.exists(logPath):
continue
print 'Reading log file ' + logPath
# Read in the contents of the log file
dateResultsDict = LoggingClass.readAllEntries(logPath)
# For each algorithm...
statsDict = dict()
for a in algorithmList:
alg = a[1] # Name of the current algorithm
# Compute the mean precision and recall across all dates for this lake
prList = []
for key in dateResultsDict:
dateResult = dateResultsDict[key]
try:
# Get all the values for this algorithm
precision, recall, evalRes = dateResult[alg]
prList.append( (precision, recall, evalRes) )
except: # This should handle all cases where we don't have data
print 'WARNING: Missing results for algorithm ' + alg + ' for lake ' + d
# Only record something if we got at least one result from the algorithm
if len(prList) >= MIN_GOOD_DATES:
# Call local helper function to get the mean precision and recall values
statsDict[alg] = prListStats(prList)
# Add the means for this algorithm to a list spanning all lakes
if alg in algStats: # Add to existing list
algStats[alg].append(statsDict[alg])
else: # Start a new list
algStats[alg] = [statsDict[alg]]
# Build the next line of the output file
thisLine = d # The line starts with the lake name
for a in algorithmList:
alg = a[1]
try:
# Add precision, recall, and evaluation resolution
thisLine += ', '+ str(statsDict[alg][0]) +', '+ str(statsDict[alg][1]) +', '+ str(statsDict[alg][2])
except:
thisLine += ', NA, NA, NA' # Flag the results as no data!
print 'WARNING: Missing results for algorithm ' + alg + ' for lake ' + d
outputHandle.write(thisLine + '\n')
# Add a final summary line containing the means for each algorithm across lakes
meanSummaries = 'Mean'
stdSummaries = 'Standard Deviation'
for a in algorithmList:
algName = a[1]
if algName in algStats: # Extract results
(pMean, rMean, eMean, pStd, rStd, eStd) = prListStats(algStats[a[1]])
else: # No results for this data
(pMean, rMean, eMean, pStd, rStd, eStd) = ('NA', 'NA', 'NA', 'NA', 'NA', 'NA')
meanSummaries += (', '+ str(pMean) +', '+ str(rMean) +', '+ str(eMean))
stdSummaries += (', '+ str(pStd ) +', '+ str(rStd ) +', '+ str(eStd ))
outputHandle.write('\n') # Skip a line
outputHandle.write(meanSummaries + '\n')
outputHandle.write(stdSummaries + '\n')
outputHandle.write(headerLine) # For convenience reprint the header line at the bottom
outputHandle.close() # All finished!
print 'Finished writing log file'
return 0
#======================================================================================================
def main():
# Check for the compile logs input flag and if found just compile the logs
try:
pos = sys.argv.index('--compile-logs')
except: # Otherwise call the main argument handling function from the supporting file
return cmt.util.processManyLakes.main(processing_function, LoggingClass, cmt.util.processManyLakes.get_image_collection_modis)
# Compile flag found, just compile the logs.
try:
dataFolder = sys.argv[pos+1]
except:
print 'The data folder must follow "--compile-logs"'
return 0
return compileLakeResults(dataFolder)
if __name__ == "__main__":
sys.exit(main())
| |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from neo4j.data import (
Graph,
Node,
Record,
)
# python -m pytest -s -v tests/unit/test_record.py
def test_record_equality():
record1 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
record2 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
record3 = Record(zip(["name", "empire"], ["Stefan", "Das Deutschland"]))
assert record1 == record2
assert record1 != record3
assert record2 != record3
def test_record_hashing():
record1 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
record2 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
record3 = Record(zip(["name", "empire"], ["Stefan", "Das Deutschland"]))
assert hash(record1) == hash(record2)
assert hash(record1) != hash(record3)
assert hash(record2) != hash(record3)
def test_record_iter():
a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
assert list(a_record.__iter__()) == ["Nigel", "The British Empire"]
def test_record_as_dict():
a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
assert dict(a_record) == {"name": "Nigel", "empire": "The British Empire"}
def test_record_as_list():
a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
assert list(a_record) == ["Nigel", "The British Empire"]
def test_record_len():
a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
assert len(a_record) == 2
def test_record_repr():
a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"]))
assert repr(a_record) == "<Record name='Nigel' empire='The British Empire'>"
def test_record_data():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r.data() == {"name": "Alice", "age": 33, "married": True}
assert r.data("name") == {"name": "Alice"}
assert r.data("age", "name") == {"age": 33, "name": "Alice"}
assert r.data("age", "name", "shoe size") == {"age": 33, "name": "Alice", "shoe size": None}
assert r.data(0, "name") == {"name": "Alice"}
assert r.data(0) == {"name": "Alice"}
assert r.data(1, 0) == {"age": 33, "name": "Alice"}
with pytest.raises(IndexError):
_ = r.data(1, 0, 999)
def test_record_keys():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r.keys() == ["name", "age", "married"]
def test_record_values():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r.values() == ["Alice", 33, True]
assert r.values("name") == ["Alice"]
assert r.values("age", "name") == [33, "Alice"]
assert r.values("age", "name", "shoe size") == [33, "Alice", None]
assert r.values(0, "name") == ["Alice", "Alice"]
assert r.values(0) == ["Alice"]
assert r.values(1, 0) == [33, "Alice"]
with pytest.raises(IndexError):
_ = r.values(1, 0, 999)
def test_record_items():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r.items() == [("name", "Alice"), ("age", 33), ("married", True)]
assert r.items("name") == [("name", "Alice")]
assert r.items("age", "name") == [("age", 33), ("name", "Alice")]
assert r.items("age", "name", "shoe size") == [("age", 33), ("name", "Alice"), ("shoe size", None)]
assert r.items(0, "name") == [("name", "Alice"), ("name", "Alice")]
assert r.items(0) == [("name", "Alice")]
assert r.items(1, 0) == [("age", 33), ("name", "Alice")]
with pytest.raises(IndexError):
_ = r.items(1, 0, 999)
def test_record_index():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r.index("name") == 0
assert r.index("age") == 1
assert r.index("married") == 2
with pytest.raises(KeyError):
_ = r.index("shoe size")
assert r.index(0) == 0
assert r.index(1) == 1
assert r.index(2) == 2
with pytest.raises(IndexError):
_ = r.index(3)
with pytest.raises(TypeError):
_ = r.index(None)
def test_record_value():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r.value() == "Alice"
assert r.value("name") == "Alice"
assert r.value("age") == 33
assert r.value("married") is True
assert r.value("shoe size") is None
assert r.value("shoe size", 6) == 6
assert r.value(0) == "Alice"
assert r.value(1) == 33
assert r.value(2) is True
assert r.value(3) is None
assert r.value(3, 6) == 6
with pytest.raises(TypeError):
_ = r.value(None)
def test_record_value_kwargs():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r.value() == "Alice"
assert r.value(key="name") == "Alice"
assert r.value(key="age") == 33
assert r.value(key="married") is True
assert r.value(key="shoe size") is None
assert r.value(key="shoe size", default=6) == 6
assert r.value(key=0) == "Alice"
assert r.value(key=1) == 33
assert r.value(key=2) is True
assert r.value(key=3) is None
assert r.value(key=3, default=6) == 6
def test_record_contains():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert "Alice" in r
assert 33 in r
assert True in r
assert 7.5 not in r
with pytest.raises(TypeError):
_ = r.index(None)
def test_record_from_dict():
r = Record({"name": "Alice", "age": 33})
assert r["name"] == "Alice"
assert r["age"] == 33
def test_record_get_slice():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert Record(zip(["name", "age"], ["Alice", 33])) == r[0:2]
def test_record_get_by_index():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r[0] == "Alice"
def test_record_get_by_name():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r["name"] == "Alice"
def test_record_get_by_out_of_bounds_index():
r = Record(zip(["name", "age", "married"], ["Alice", 33, True]))
assert r[9] is None
def test_record_get_item():
r = Record(zip(["x", "y"], ["foo", "bar"]))
assert r["x"] == "foo"
assert r["y"] == "bar"
with pytest.raises(KeyError):
_ = r["z"]
with pytest.raises(TypeError):
_ = r[object()]
@pytest.mark.parametrize("len_", (0, 1, 2, 42))
def test_record_len(len_):
r = Record(("key_%i" % i, "val_%i" % i) for i in range(len_))
assert len(r) == len_
@pytest.mark.parametrize("len_", range(3))
def test_record_repr(len_):
r = Record(("key_%i" % i, "val_%i" % i) for i in range(len_))
assert repr(r)
@pytest.mark.parametrize(("raw", "keys", "serialized"), (
(
zip(["x", "y", "z"], [1, 2, 3]),
(),
{"x": 1, "y": 2, "z": 3}
),
(
zip(["x", "y", "z"], [1, 2, 3]),
(1, 2),
{"y": 2, "z": 3}
),
(
zip(["x", "y", "z"], [1, 2, 3]),
("z", "x"),
{"x": 1, "z": 3}
),
(
zip(["x"], [None]),
(),
{"x": None}
),
(
zip(["x", "y"], [True, False]),
(),
{"x": True, "y": False}
),
(
zip(["x", "y", "z"], [0.0, 1.0, 3.141592653589]),
(),
{"x": 0.0, "y": 1.0, "z": 3.141592653589}
),
(
zip(["x"], ["hello, world"]),
(),
{"x": "hello, world"}
),
(
zip(["x"], [bytearray([1, 2, 3])]),
(),
{"x": bytearray([1, 2, 3])}
),
(
zip(["x"], [[1, 2, 3]]),
(),
{"x": [1, 2, 3]}
),
(
zip(["x"], [{"one": 1, "two": 2}]),
(),
{"x": {"one": 1, "two": 2}}
),
(
zip(["a"], [Node("graph", 42, "Person", {"name": "Alice"})]),
(),
{"a": {"name": "Alice"}}
),
))
def test_data(raw, keys, serialized):
assert Record(raw).data(*keys) == serialized
def test_data_relationship():
g = Graph()
gh = Graph.Hydrator(g)
alice = gh.hydrate_node(1, {"Person"}, {"name": "Alice", "age": 33})
bob = gh.hydrate_node(2, {"Person"}, {"name": "Bob", "age": 44})
alice_knows_bob = gh.hydrate_relationship(1, alice.id, bob.id, "KNOWS",
{"since": 1999})
record = Record(zip(["a", "b", "r"], [alice, bob, alice_knows_bob]))
assert record.data() == {
"a": {"name": "Alice", "age": 33},
"b": {"name": "Bob", "age": 44},
"r": (
{"name": "Alice", "age": 33},
"KNOWS",
{"name": "Bob", "age": 44}
),
}
def test_data_unbound_relationship():
g = Graph()
gh = Graph.Hydrator(g)
some_one_knows_some_one = gh.hydrate_relationship(
1, 42, 43, "KNOWS", {"since": 1999}
)
record = Record(zip(["r"], [some_one_knows_some_one]))
assert record.data() == {"r": ({}, "KNOWS", {})}
@pytest.mark.parametrize("cyclic", (True, False))
def test_data_path(cyclic):
g = Graph()
gh = Graph.Hydrator(g)
alice = gh.hydrate_node(1, {"Person"}, {"name": "Alice", "age": 33})
bob = gh.hydrate_node(2, {"Person"}, {"name": "Bob", "age": 44})
if cyclic:
carol = alice
else:
carol = gh.hydrate_node(3, {"Person"}, {"name": "Carol", "age": 55})
r = [gh.hydrate_unbound_relationship(1, "KNOWS", {"since": 1999}),
gh.hydrate_unbound_relationship(2, "DISLIKES", {})]
path = gh.hydrate_path([alice, bob, carol], r, [1, 1, -2, 2])
record = Record(zip(["r"], [path]))
assert record.data() == {
"r": [dict(alice), "KNOWS", dict(bob), "DISLIKES", dict(carol)]
}
| |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base models for point-cloud based detection."""
from lingvo import compat as tf
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.tasks.car import base_decoder
from lingvo.tasks.car import detection_3d_metrics
from lingvo.tasks.car import transform_util
from lingvo.tasks.car.waymo import waymo_ap_metric
from lingvo.tasks.car.waymo import waymo_metadata
import numpy as np
class WaymoOpenDatasetDecoder(base_decoder.BaseDecoder):
"""A decoder to use for decoding a detector model on Waymo."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'draw_visualizations', False, 'Boolean for whether to draw '
'visualizations. This is independent of laser_sampling_rate.')
p.ap_metric = waymo_ap_metric.WaymoAPMetrics.Params(
waymo_metadata.WaymoMetadata())
p.Define(
'extra_ap_metrics', {},
'Dictionary of extra AP metrics to run in the decoder. The key'
'is the name of the metric and the value is a sub-class of '
'APMetric')
p.Define(
'save_residuals', False,
'If True, this expects the residuals and ground-truth to be available '
'in the decoder output dictionary, and it will save it to the decoder '
'output file. See decode_include_residuals in PointDetectorBase '
'for details.')
return p
def CreateDecoderMetrics(self):
"""Decoder metrics for WaymoOpenDataset."""
p = self.params
waymo_metric_p = p.ap_metric.Copy().Set(cls=waymo_ap_metric.WaymoAPMetrics)
waymo_metrics = waymo_metric_p.Instantiate()
class_names = waymo_metrics.metadata.ClassNames()
# TODO(bencaine,vrv): There's some code smell with this ap_metrics params
# usage. We create local copies of the params to then instantiate them.
# Failing to do this risks users editing the params after construction of
# the object, making each object method call have the potential for side
# effects.
# Create a new dictionary with copies of the params converted to objects
# so we can then add these to the decoder metrics.
extra_ap_metrics = {}
for k, metric_p in p.extra_ap_metrics.items():
extra_ap_metrics[k] = metric_p.Instantiate()
waymo_metric_bev_p = waymo_metric_p.Copy()
waymo_metric_bev_p.box_type = '2d'
waymo_metrics_bev = waymo_metric_bev_p.Instantiate()
# Convert the list of class names to a dictionary mapping class_id -> name.
class_id_to_name = dict(enumerate(class_names))
# TODO(vrv): This uses the same top down transform as for KITTI;
# re-visit these settings since detections can happen all around
# the car.
top_down_transform = transform_util.MakeCarToImageTransform(
pixels_per_meter=32.,
image_ref_x=512.,
image_ref_y=1408.,
flip_axes=True)
decoder_metrics = py_utils.NestedMap({
'top_down_visualization':
(detection_3d_metrics.TopDownVisualizationMetric(
top_down_transform,
image_height=1536,
image_width=1024,
class_id_to_name=class_id_to_name)),
'num_samples_in_batch': metrics.AverageMetric(),
'waymo_metrics': waymo_metrics,
'waymo_metrics_bev': waymo_metrics_bev,
})
self._update_metrics_class_keys = ['waymo_metrics_bev', 'waymo_metrics']
for k, metric in extra_ap_metrics.items():
decoder_metrics[k] = metric
self._update_metrics_class_keys.append(k)
decoder_metrics.mesh = detection_3d_metrics.WorldViewer()
return decoder_metrics
def ProcessOutputs(self, input_batch, model_outputs):
"""Produce additional decoder outputs for WaymoOpenDataset.
Args:
input_batch: A .NestedMap of the inputs to the model.
model_outputs: A .NestedMap of the outputs of the model, including::
- per_class_predicted_bboxes: [batch, num_classes, num_boxes, 7] float
Tensor with per class 3D (7 DOF) bounding boxes.
- per_class_predicted_bbox_scores: [batch, num_classes, num_boxes] float
Tensor with per class, per box scores.
- per_class_valid_mask: [batch, num_classes, num_boxes] masking Tensor
indicating which boxes were still kept after NMS for each class.
Returns:
A NestedMap of additional decoder outputs needed for
PostProcessDecodeOut.
"""
del model_outputs
p = self.params
input_labels = input_batch.labels
input_metadata = input_batch.metadata
source_ids = tf.strings.join([
input_metadata.run_segment,
tf.as_string(input_metadata.run_start_offset)
],
separator='_')
ret = py_utils.NestedMap({
'num_points_in_bboxes': input_batch.labels.bboxes_3d_num_points,
# Ground truth.
'bboxes_3d': input_labels.bboxes_3d,
'bboxes_3d_mask': input_labels.bboxes_3d_mask,
'labels': input_labels.labels,
'label_ids': input_labels.label_ids,
'speed': input_labels.speed,
'acceleration': input_labels.acceleration,
# Fill the following in.
'source_ids': source_ids,
'difficulties': input_labels.single_frame_detection_difficulties,
'unfiltered_bboxes_3d_mask': input_labels.unfiltered_bboxes_3d_mask,
'run_segment': input_metadata.run_segment,
'run_start_offset': input_metadata.run_start_offset,
'pose': input_metadata.pose,
})
if p.draw_visualizations:
laser_sample = self._SampleLaserForVisualization(
input_batch.lasers.points_xyz, input_batch.lasers.points_padding)
ret.update(laser_sample)
return ret
def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):
"""Post-processes the decoder outputs."""
p = self.params
# Update num_samples_in_batch.
batch_size, num_classes, num_boxes, _ = (
dec_out_dict.per_class_predicted_bboxes.shape)
dec_metrics_dict.num_samples_in_batch.Update(batch_size)
# Update decoder output by removing z-coordinate, thus reshaping the bboxes
# to [batch, num_bboxes, 5] to be compatible with
# TopDownVisualizationMetric.
# Indices corresponding to the 2D bbox parameters (x, y, dx, dy, phi).
bbox_2d_idx = np.asarray([1, 1, 0, 1, 1, 0, 1], dtype=np.bool)
bboxes_2d = dec_out_dict.bboxes_3d[..., bbox_2d_idx]
predicted_bboxes = dec_out_dict.per_class_predicted_bboxes[..., bbox_2d_idx]
if p.draw_visualizations and dec_out_dict.points_sampled:
tf.logging.info('Updating sample for top down visualization')
dec_metrics_dict.mesh.Update(
py_utils.NestedMap({
'points_xyz': dec_out_dict.points_xyz,
'points_padding': dec_out_dict.points_padding,
}))
# Flatten our predictions/scores to match the API of the visualization
# The last dimension of flattened_bboxes is 5 due to the mask
# above using bbox_2d_idx.
flattened_bboxes = np.reshape(predicted_bboxes,
[batch_size, num_classes * num_boxes, 5])
flattened_visualization_weights = np.reshape(
dec_out_dict.visualization_weights,
[batch_size, num_classes * num_boxes])
# Create a label id mask for now to maintain compatibility.
# TODO(bencaine): Refactor visualizations to reflect new structure.
flattened_visualization_labels = np.tile(
np.arange(0, num_classes)[np.newaxis, :, np.newaxis],
[batch_size, 1, num_boxes])
flattened_visualization_labels = np.reshape(
flattened_visualization_labels, [batch_size, num_classes * num_boxes])
dec_metrics_dict.top_down_visualization.Update(
py_utils.NestedMap({
'visualization_labels': flattened_visualization_labels,
'predicted_bboxes': flattened_bboxes,
'visualization_weights': flattened_visualization_weights,
'points_xyz': dec_out_dict.points_xyz,
'points_padding': dec_out_dict.points_padding,
'gt_bboxes_2d': bboxes_2d,
'gt_bboxes_2d_weights': dec_out_dict.bboxes_3d_mask,
'labels': dec_out_dict.labels,
'difficulties': dec_out_dict.difficulties,
'source_ids': dec_out_dict.source_ids,
}))
# Update AP metrics.
# Skip zeroth step decoding.
if dec_out_dict.global_step == 0:
return None
# TODO(bencaine/vrv): Refactor to unify Waymo code and KITTI
# Returned values are saved in model_dir/decode_* directories.
output_to_save = []
for batch_idx in range(batch_size):
pred_bboxes = dec_out_dict.per_class_predicted_bboxes[batch_idx]
pred_bbox_scores = dec_out_dict.per_class_predicted_bbox_scores[batch_idx]
# The current API expects a 'height' matrix to be passed for filtering
# detections based on height. This is a KITTI-ism that we need to remove,
# but for now we just give a height of 1. The MinHeight metadata function
# for non-KITTI datasets should have a threshold lower than this value.
heights = np.ones((num_classes, num_boxes)).astype(np.float32)
gt_mask = dec_out_dict.bboxes_3d_mask[batch_idx].astype(bool)
gt_labels = dec_out_dict.labels[batch_idx][gt_mask]
gt_bboxes = dec_out_dict.bboxes_3d[batch_idx][gt_mask]
gt_difficulties = dec_out_dict.difficulties[batch_idx][gt_mask]
gt_num_points = dec_out_dict.num_points_in_bboxes[batch_idx][gt_mask]
# Note that this is not used in the KITTI evaluation.
gt_speed = dec_out_dict.speed[batch_idx][gt_mask]
# TODO(shlens): Update me
for metric_key in self._update_metrics_class_keys:
metric_cls = dec_metrics_dict[metric_key]
metric_cls.Update(
dec_out_dict.source_ids[batch_idx],
py_utils.NestedMap(
groundtruth_labels=gt_labels,
groundtruth_bboxes=gt_bboxes,
groundtruth_difficulties=gt_difficulties,
groundtruth_num_points=gt_num_points,
groundtruth_speed=gt_speed,
detection_scores=pred_bbox_scores,
detection_boxes=pred_bboxes,
detection_heights_in_pixels=heights,
))
# We still want to save all ground truth (even if it was filtered
# in some way) so we use the unfiltered_bboxes_3d_mask here.
gt_save_mask = dec_out_dict.unfiltered_bboxes_3d_mask[batch_idx].astype(
bool)
pd_save_mask = dec_out_dict.per_class_valid_mask[batch_idx] > 0
class_ids = np.tile(np.arange(num_classes)[:, np.newaxis], [1, num_boxes])
saved_results = py_utils.NestedMap(
pose=dec_out_dict.pose[batch_idx],
frame_id=dec_out_dict.source_ids[batch_idx],
bboxes=pred_bboxes[pd_save_mask],
scores=pred_bbox_scores[pd_save_mask],
gt_labels=dec_out_dict.labels[batch_idx][gt_save_mask],
gt_label_ids=dec_out_dict.label_ids[batch_idx][gt_save_mask],
gt_speed=dec_out_dict.speed[batch_idx][gt_save_mask],
gt_acceleration=dec_out_dict.acceleration[batch_idx][gt_save_mask],
class_ids=class_ids[pd_save_mask],
gt_bboxes=dec_out_dict.bboxes_3d[batch_idx][gt_save_mask],
gt_difficulties=dec_out_dict.difficulties[batch_idx][gt_save_mask],
)
if p.save_residuals:
# The leading shapes of these tensors should match bboxes and scores.
# These are the underlying tensors that can are used to compute score
# and bboxes.
saved_results.update({
'bboxes_gt_residuals':
dec_out_dict.per_class_gt_residuals[batch_idx][pd_save_mask],
'bboxes_gt_labels':
dec_out_dict.per_class_gt_labels[batch_idx][pd_save_mask],
'bboxes_residuals':
dec_out_dict.per_class_residuals[batch_idx][pd_save_mask],
'bboxes_logits':
dec_out_dict.per_class_logits[batch_idx][pd_save_mask],
'bboxes_anchor_boxes':
dec_out_dict.per_class_anchor_boxes[batch_idx][pd_save_mask],
})
serialized = self.SaveTensors(saved_results)
output_to_save += [(dec_out_dict.source_ids[batch_idx], serialized)]
return output_to_save
| |
from auspex.log import logger
from auspex.config import isnotebook
from auspex.experiment import Experiment, FloatParameter
from auspex.stream import DataStream, DataAxis, SweepAxis, DataStreamDescriptor, InputConnector, OutputConnector
from auspex.instruments import instrument_map
import auspex.filters
import bbndb
import numpy as np
import sys
import os
if sys.platform == 'win32' or 'NOFORKING' in os.environ:
from threading import Thread as Process
from threading import Event
else:
from multiprocessing import Process
from multiprocessing import Event
from multiprocessing import Value
from . import pipeline
import time
import datetime
import json
stream_hierarchy = [
bbndb.auspex.Demodulate,
bbndb.auspex.Integrate,
bbndb.auspex.Average,
bbndb.auspex.OutputProxy
]
filter_map = {
bbndb.auspex.Demodulate: auspex.filters.Channelizer,
bbndb.auspex.Average: auspex.filters.Averager,
bbndb.auspex.Framer: auspex.filters.Framer,
bbndb.auspex.Integrate: auspex.filters.KernelIntegrator,
bbndb.auspex.Write: auspex.filters.WriteToFile,
bbndb.auspex.Buffer: auspex.filters.DataBuffer,
bbndb.auspex.Display: auspex.filters.Plotter,
bbndb.auspex.Correlate: auspex.filters.Correlator,
bbndb.auspex.FidelityKernel: auspex.filters.SingleShotMeasurement
}
stream_sel_map = auspex.filters.stream_sel_map
class QubitExperiment(Experiment):
"""Create an `Experiment` with specialized config and run methods for qubit experiments.
Parameters:
meta_file (string)
The filename of the QGL metainfo (*.json) corresponding to the desired
experiment.
averages (int)
The number of shots to take. Results are only actually averaged
if an `Averager` node is present in the processing pipeline.
exp_name (string)
Name of experiment. Used by any writers in pipeline to pick a data container name.
kwargs
Additional keyword arguments passed to the base Auspex `Experiment`
class.
Returns:
experiment instance (`Experiment`)
Returns the initialized Auspex `Experiment`.
Examples:
Creating a simple experiment.
>>> mf = RabiAmp(q1, [-1,0,1])
>>> exp = QubitExperiment(mf, averages=500)
"""
def __init__(self, meta_file, averages=100, exp_name=None, save_chandb=True, **kwargs):
super(QubitExperiment, self).__init__(**kwargs)
if not pipeline.pipelineMgr:
raise Exception("Could not find pipeline manager, have you declared one using PipelineManager()?")
self.cw_mode = False
self.add_date = True # add date to data files?
self.name = exp_name
self.outputs_by_qubit = {}
self.progressbars = None
self.save_chandb = save_chandb
self.create_from_meta(meta_file, averages)
def create_from_meta(self, meta_file, averages):
"""Method called during creation. Implementing a subclass of `QubitExperiment` this method
may be overridden to provide additional functionality. However, this is a complex method, and
it is recommended that the user instead override the `modify_graph` method to provide
custom subclass behavior.
"""
try:
with open(meta_file, 'r') as FID:
meta_info = json.load(FID)
except:
raise Exception(f"Could note process meta info from file {meta_file}")
# Load ChannelLibrary and database information
db_provider = meta_info['database_info']['db_provider']
db_resource_name = meta_info['database_info']['db_resource_name']
library_name = meta_info['database_info']['library_name']
library_id = meta_info['database_info']['library_id']
# Respect separate sessions for channel library and pipeline
self.cl_session = bbndb.get_cl_session()
self.pl_session = bbndb.get_pl_session()
# Load the channel library by ID
self.chan_db = self.cl_session.query(bbndb.qgl.ChannelDatabase).filter_by(id=library_id).first()
all_channels = self.chan_db.channels
all_generators = self.chan_db.generators
all_transmitters = self.chan_db.transmitters
all_receivers = self.chan_db.receivers
all_transceivers = self.chan_db.transceivers
all_qubits = [c for c in all_channels if isinstance(c, bbndb.qgl.Qubit)]
all_measurements = [c for c in all_channels if isinstance(c, bbndb.qgl.Measurement)]
# Restrict to current qubits, channels, etc. involved in this actual experiment
self.controlled_qubits = [c for c in self.chan_db.channels if c.label in meta_info["qubits"]]
self.measurements = [c for c in self.chan_db.channels if c.label in meta_info["measurements"]]
self.measured_qubits = [c for c in self.chan_db.channels if "M-"+c.label in meta_info["measurements"]]
if 'edges' in meta_info:
self.edges = [c for c in self.chan_db.channels if c.label in meta_info["edges"]]
else:
self.edges = []
self.phys_chans = list(set([e.phys_chan for e in self.controlled_qubits + self.measurements + self.edges]))
self.receiver_chans = list(set([e.receiver_chan for e in self.measurements]))
self.slave_trigs = [c for c in self.chan_db.channels if c.label == 'slave_trig']
self.trig_chans = list(set([e.trig_chan.phys_chan for e in self.measurements])) + [c.phys_chan for c in self.slave_trigs]
self.transmitters = list(set([e.phys_chan.transmitter for e in self.controlled_qubits + self.measurements + self.edges + self.slave_trigs]))
self.receivers = list(set([e.receiver_chan.receiver for e in self.measurements]))
self.generators = list(set([q.phys_chan.generator for q in self.measured_qubits + self.controlled_qubits + self.measurements if q.phys_chan.generator]))
self.qubits_by_name = {q.label: q for q in self.measured_qubits + self.controlled_qubits}
# Load the relevant stream selectors from the pipeline.
self.stream_selectors = pipeline.pipelineMgr.get_current_stream_selectors()
if len(self.stream_selectors) == 0:
raise Exception("No filter pipeline has been created. You can try running the create_default_pipeline() method of the Pipeline Manager")
org_stream_selectors = self.stream_selectors
for ss in org_stream_selectors:
labels = ss.label.split('-')
for l in labels:
if l in self.qubits_by_name.keys() and ss not in self.stream_selectors:
self.stream_selectors.append(ss)
continue
# Locate transmitters relying on processors
self.transceivers = list(set([t.transceiver for t in self.transmitters + self.receivers if t.transceiver]))
self.processors = list(set([p for t in self.transceivers for p in t.processors]))
# Determine if the digitizer trigger lives on another transmitter that isn't included already
self.transmitters = list(set([mq.measure_chan.trig_chan.phys_chan.transmitter for mq in self.measured_qubits] + self.transmitters))
# The exception being any instruments that are declared as standalone
self.all_standalone = [i for i in self.chan_db.all_instruments() if i.standalone and i not in self.transmitters + self.receivers + self.generators]
# In case we need to access more detailed foundational information
self.factory = self
# If no pipeline is defined, assumed we want to generate it automatically
if not pipeline.pipelineMgr.meas_graph:
raise Exception("No pipeline has been created, do so automatically using exp_factory.create_default_pipeline()")
#self.create_default_pipeline(self.measured_qubits)
# Add the waveform file info to the qubits
output_chans = self.transmitters + self.transceivers + self.phys_chans + self.trig_chans
for xmit, fname in meta_info['instruments'].items():
awg = [c for c in output_chans if c.label==xmit][0]
awg.sequence_file = fname
# Construct the DataAxis from the meta_info
desc = meta_info["axis_descriptor"]
data_axis = desc[0] # Data will always be the first axis
# ovverride data axis with repeated number of segments
if hasattr(self, "repeats") and self.repeats is not None:
data_axis['points'] = np.tile(data_axis['points'], self.repeats)
# Search for calibration axis, i.e., metadata
axis_names = [d['name'] for d in desc]
if 'calibration' in axis_names:
meta_axis = desc[axis_names.index('calibration')]
# There should be metadata for each cal describing what it is
if len(desc)>1:
metadata = ['data']*len(data_axis['points']) + meta_axis['points']
# Pad the data axis with dummy equidistant x-points for the extra calibration points
avg_step = (data_axis['points'][-1] - data_axis['points'][0])/(len(data_axis['points'])-1)
points = np.append(data_axis['points'], data_axis['points'][-1] + (np.arange(len(meta_axis['points']))+1)*avg_step)
else:
metadata = meta_axis['points'] # data may consist of calibration points only
points = np.arange(len(metadata)) # dummy axis for plotting purposes
# If there's only one segment we can ignore this axis
if len(points) > 1:
self.segment_axis = DataAxis(data_axis['name'], points, unit=data_axis['unit'], metadata=metadata)
else:
# No calibration data, just add a segment axis as long as there is more than one segment
if len(data_axis['points']) > 1:
self.segment_axis = DataAxis(data_axis['name'], data_axis['points'], unit=data_axis['unit'])
# Build a mapping of qubits to self.receivers, construct qubit proxies
# We map by the unique database ID since that is much safer
receiver_chans_by_qubit_label = {}
for m in self.measurements:
q = [c for c in self.chan_db.channels if c.label==m.label[2:]][0]
receiver_chans_by_qubit_label[q.label] = m.receiver_chan
# Now a pipeline exists, so we create Auspex filters from the proxy filters in the db
self.proxy_to_filter = {}
self.filters = []
self.connector_by_sel = {}
self.chan_to_dig = {}
self.chan_to_oc = {}
self.qubit_to_dig = {}
self.qubits_by_output = {}
self.proxy_name_to_instrument = {}
# Create microwave sources and receiver instruments from the database objects.
# We configure the self.receivers later after adding channels.
self.instrument_proxies = self.generators + self.receivers + self.transmitters + self.transceivers + self.all_standalone + self.processors
for t in self.transceivers:
if t.initialize_separately:
self.instrument_proxies.remove(t)
else:
for el in t.transmitters + t.receivers:
self.instrument_proxies.remove(el)
self.instruments = []
for instrument in self.instrument_proxies:
if (hasattr(instrument, 'serial_port') and
instrument.serial_port is not None and
hasattr(instrument, 'dac') and
instrument.dac is not None):
address = (instrument.address, instrument.serial_port, instrument.dac)
else:
address = instrument.address
instr = instrument_map[instrument.model](address, instrument.label) # Instantiate
# For easy lookup
instr.proxy_obj = instrument
instrument._locked = False
instrument.instr = instr # This shouldn't be relied upon
instrument._locked = True
self.proxy_name_to_instrument[instrument.label] = instr
# Add to the experiment's instrument list
self._instruments[instrument.label] = instr
self.instruments.append(instr)
# Add to class dictionary for convenience
if not hasattr(self, instrument.label):
setattr(self, instrument.label, instr)
processed_sels = []
for mq in self.measured_qubits:
# Stream selectors from the pipeline database:
# These contain all information except for the physical channel
mq_stream_sels = [ss for ss in self.stream_selectors if mq.label in ss.label.split("-")]
# Look up the receiver channel
rcv = receiver_chans_by_qubit_label[mq.label]
# Look up the digitizer/transceiver and find the correct stream selector class
transcvr = rcv.receiver.transceiver
if transcvr is not None and transcvr.initialize_separately == False:
dig = rcv.receiver.transceiver
stream_sel_class = stream_sel_map[rcv.receiver.stream_sel]
else:
dig = rcv.receiver
stream_sel_class = stream_sel_map[dig.stream_sel]
# For future lookup of digitizers
self.qubit_to_dig[mq.id] = dig
# Create the stream selectors
for mq_stream_sel in mq_stream_sels:
# ONLY CREATE THE SELECTOR ONCE!
if mq_stream_sel not in processed_sels:
processed_sels.append(mq_stream_sel)
auspex_stream_sel = stream_sel_class(name=f"{rcv.label}-{mq_stream_sel.stream_type}-stream_sel")
mq_stream_sel.channel = rcv.channel
auspex_stream_sel.configure_with_proxy(mq_stream_sel)
auspex_stream_sel.receiver = auspex_stream_sel.proxy = mq_stream_sel
# Construct the channel from the receiver channel
channel = auspex_stream_sel.get_channel(mq_stream_sel)
# Manually set the physical channel
channel.phys_channel = rcv.channel
# Get the base descriptor from the channel
descriptor = auspex_stream_sel.get_descriptor(mq_stream_sel, rcv)
# Update the descriptor based on the number of segments
# The segment axis should already be defined if the sequence
# is greater than length 1
if hasattr(self, "segment_axis"):
descriptor.add_axis(self.segment_axis)
# Add averaging if necessary
if averages > 1:
descriptor.add_axis(DataAxis("averages", range(averages)))
# Add the output connectors to the experiment and set their base descriptor
self.connector_by_sel[mq_stream_sel] = self.add_connector(mq_stream_sel)
self.connector_by_sel[mq_stream_sel].set_descriptor(descriptor)
# Add the channel to the instrument
dig.instr.add_channel(channel)
self.chan_to_dig[channel] = dig.instr
self.chan_to_oc[channel] = self.connector_by_sel[mq_stream_sel]
# Find the number of self.measurements
segments_per_dig = {receiver_chan.receiver: meta_info["receivers"][receiver_chan.label] for receiver_chan in self.receiver_chans
if receiver_chan.label in meta_info["receivers"].keys()}
# Configure receiver instruments from the database objects
# this must be done after adding channels.
for dig in self.receivers:
if dig.transceiver is not None and transcvr.initialize_separately == False:
dig.transceiver._locked = False
dig.transceiver.number_averages = averages
dig.transceiver.number_waveforms = 1
dig.transceiver.number_segments = segments_per_dig[dig]
dig.transceiver._locked = True
else:
dig.number_averages = averages
dig.number_waveforms = 1
dig.number_segments = segments_per_dig[dig]
dig.instr.proxy_obj = dig
# Restrict the graph to the relevant qubits
self.measured_qubit_names = [q.label for q in self.measured_qubits]
self.pl_session.commit()
# Any modifications to be done by subclasses, just a passthrough here
self.modified_graph = self.modify_graph(pipeline.pipelineMgr.meas_graph)
# Compartmentalize the instantiation
self.instantiate_filters(self.modified_graph)
def is_in_measured_qubit_names(self,qubit_name):
labels = []
if qubit_name is not None:
labels = qubit_name.split('-')
for l in labels:
if l in self.measured_qubit_names:
return True
return False
def instantiate_filters(self, graph):
# Configure the individual filter nodes
for _, dat in graph.nodes(data=True):
node = dat['node_obj']
if isinstance(node, bbndb.auspex.FilterProxy):
if all(name in self.measured_qubit_names for name in node.qubit_name.split('-')):
#include correlators only if all participating qubits are measured
new_filt = filter_map[type(node)]()
new_filt.configure_with_proxy(node)
new_filt.proxy = node
self.filters.append(new_filt)
self.proxy_to_filter[node] = new_filt
if isinstance(node, bbndb.auspex.OutputProxy):
self.qubits_by_output[new_filt] = node.qubit_name
# Connect the filters together
graph_edges = []
self.pl_session.commit()
for l1, l2 in graph.edges():
node1, node2 = graph.nodes[l1]['node_obj'], graph.nodes[l2]['node_obj']
if (self.is_in_measured_qubit_names(node1.qubit_name) or self.is_in_measured_qubit_names(node1.label)) and self.is_in_measured_qubit_names(node2.qubit_name):
if isinstance(node1, bbndb.auspex.FilterProxy):
filt1 = self.proxy_to_filter[node1]
oc = filt1.output_connectors[graph[l1][l2]["connector_out"]]
elif isinstance(node1, bbndb.auspex.StreamSelect):
oc = self.connector_by_sel[node1]
filt2 = self.proxy_to_filter[node2]
ic = filt2.input_connectors[graph[l1][l2]["connector_in"]]
graph_edges.append([oc, ic])
# Define the experiment graph
self.set_graph(graph_edges)
def modify_graph(self, graph):
"""Method called near the end of `create_from_meta` to allow custom manipulation of the filter
pipeline. For example, `CalibrationExperiment` implements a version of `modify_graph` that
selectively removes portions of the graph and creates buffers as needed to perform the desired
calibrations on specific qubits.
"""
return graph
def set_fake_data(self, digitizer_proxy, ideal_data, increment=False, random_mag=0.1):
"""Enabled and use the fake data interface for digitizers in order that auspex can
be run without hardware.
Parameters:
digitizer_proxy (bbndb `Receiver` instance)
The digitizer instrument proxy to be used for fake data generation.
ideal_data (numpy array)
The actual data to be used. If `increment` is False, a 1D array with a single value
per segment is used. The digitizer drivers automatical convert to a integrated, demodulated,
or raw signal depending on the stream type being used. If `increment` is True, then this may be a
2D array, which is incremented through to emulate sweeps such a qubit measurement frequency sweep.
increment (boolean)
Whether or not to step through a 2D data array after to incorporate extra sweeps. The behavior is
defined above.
Examples:
Make sure to set auspex dummy mode at import time.
>>> import auspex.config as config
>>> config.auspex_dummy_mode = True
>>> # Configure channels and pipelines here
>>> amps = np.linspace(-1,1,51)
>>> exp = QubitExperiment(RabiAmp(q1,amps),averages=50)
>>> exp.set_fake_data(digitizer_1, np.cos(np.linspace(0, 2*np.pi,51)))
>>> exp.run_sweeps()
"""
auspex_instr = self.proxy_name_to_instrument[digitizer_proxy.label]
auspex_instr.ideal_data = ideal_data
auspex_instr.increment_ideal_data = increment
auspex_instr.gen_fake_data = True
auspex_instr.fake_data_random_mag = random_mag
def clear_fake_data(self, digitizer_proxy):
"""Disable using fake data interface for a digitizer. Take note that dummy mode may
still be active.
Parameters:
digitizer_proxy (bbndb `Receiver` instance)
The digitizer instrument proxy to be used for fake data generation.
"""
auspex_instr = self.proxy_name_to_instrument[digitizer_proxy.label]
auspex_instr.ideal_data = ideal_data
auspex_instr.gen_fake_data = False
def add_connector(self, stream_selector):
name = stream_selector.qubit_name+'-'+stream_selector.stream_type
logger.debug(f"Adding {name} output connector to experiment.")
oc = OutputConnector(name=name, parent=self)
self.output_connectors[name] = oc
setattr(self, name, oc)
return oc
def init_instruments(self):
for name, instr in self._instruments.items():
instr.configure_with_proxy(instr.proxy_obj)
self.digitizers = [v for _, v in self._instruments.items() if "Digitizer" in v.instrument_type]
self.awgs = [v for _, v in self._instruments.items() if "AWG" in v.instrument_type]
# Swap the master AWG so it is last in the list
try:
master_awg_idx = next(ct for ct,awg in enumerate(self.awgs) if awg.master)
self.awgs[-1], self.awgs[master_awg_idx] = self.awgs[master_awg_idx], self.awgs[-1]
except:
logger.warning("No AWG is specified as the master.")
for gen_proxy in self.generators:
gen_proxy.instr.output = True
# Start socket listening processes, store as keys in a dictionary with exit commands as values
self.dig_listeners = {}
ready = Value('i', 0)
self.dig_run = Event()
self.dig_exit = Event()
for chan, dig in self.chan_to_dig.items():
socket = dig.get_socket(chan)
oc = self.chan_to_oc[chan]
p = Process(target=dig.receive_data, args=(chan, oc, self.dig_exit, ready, self.dig_run))
self.dig_listeners[p] = self.dig_exit
assert None not in self.dig_listeners.keys()
for listener in self.dig_listeners.keys():
listener.start()
while ready.value < len(self.chan_to_dig):
time.sleep(0.1)
if self.cw_mode:
for awg in self.awgs:
awg.run()
def add_instrument_sweep(self, instrument_name, attribute, values, channel=None):
param = FloatParameter() # Create the parameter
param.name = f"{instrument_name} {attribute} {channel}"
instr = self._instruments[instrument_name]
def method(value, channel=channel, instr=instr, prop=attribute):
if channel:
getattr(instr, "set_"+prop)(channel, value)
else:
getattr(instr, "set_"+prop)(value)
param.assign_method(method)
self.add_sweep(param, values) # Create the requested sweep on this parameter
def add_manual_sweep(self, label, prompt, values, channel=None):
param = FloatParameter() # Create the parameter
param.name = label
def method(value):
print(f'Manually set {label} to {value}, then press enter.')
input()
param.assign_method(method)
self.add_sweep(param, values) # Create the requested sweep on this parameter
def add_qubit_sweep(self, qubit, measure_or_control, attribute, values):
"""
Add a *ParameterSweep* to the experiment. Users specify a qubit property that auspex
will try to link back to the relevant instrument. For example::
exp = QubitExpFactory.create(PulsedSpec(q1))
self.add_qubit_sweep(q1, "measure", "frequency", np.linspace(6e9, 6.5e9, 500))
self.run_sweeps()
"""
param = FloatParameter() # Create the parameter
param.name = f"{qubit.label} {measure_or_control} {attribute}"
if measure_or_control not in ["measure", "control"]:
raise ValueError(f"Cannot add sweep for something other than measure or control properties of {qubit}")
if measure_or_control == "measure":
logger.debug(f"Sweeping {qubit} measurement")
thing = list(filter(lambda m: m.label=="M-"+qubit.label, self.measurements))
if len(thing) > 1:
raise ValueError(f"Found more than one measurement for {qubit}")
thing = thing[0]
elif measure_or_control == "control":
logger.debug(f"Sweeping {qubit} control")
thing = qubit
if thing.phys_chan.generator and attribute=="frequency":
# Mixed up to final frequency
name = thing.phys_chan.generator.label
instr = list(filter(lambda x: x.name == name, self._instruments.values()))[0]
method = None
else:
# Direct synthesis
name, chan = thing.phys_chan.label.split("-")[0:2]
instr = self._instruments[name] #list(filter(lambda x: x.name == name, self._instruments.values()))[0]
#special casing for APS2 channel amplitude sweeps... is there a better way to do this?
if isinstance(instr, auspex.instruments.APS2) and attribute=="amplitude":
chan = [1, 2]
def method(value, channel=chan, instr=instr, prop=attribute,thing=thing):
# e.g. keysight.set_amplitude("ch1", 0.5)
try:
getattr(instr, "set_"+prop)(chan, value, thing)
except:
getattr(instr, "set_"+prop)(chan, value)
param.set_pair = (thing.phys_chan.label, attribute)
if method:
# Custom method
param.assign_method(method)
else:
# Get method by name
if hasattr(instr, "set_"+attribute):
param.assign_method(getattr(instr, "set_"+attribute)) # Couple the parameter to the instrument
param.add_post_push_hook(lambda: time.sleep(0.05))
else:
raise ValueError("The instrument {} has no method {}".format(name, "set_"+attribute))
param.set_pair = (instr.name, attribute)
self.add_sweep(param, values) # Create the requested sweep on this parameter
def add_avg_sweep(self, num_averages):
param = IntParameter()
param.name = "sw_avg"
setattr(self, param.name, param)
self._parameters[param.name] = param
self.add_sweep(param, range(num_averages))
def shutdown_instruments(self):
# remove socket listeners
logger.debug("Shutting down instruments")
try:
for awg in self.awgs:
awg.stop()
for dig in self.digitizers:
dig.stop()
for gen_proxy in self.generators:
gen_proxy.instr.output = False
except:
logger.error('Could Not Stop AWGs or Digitizers; Reset Experiment')
for instr in self.instruments:
instr.disconnect()
self.dig_exit.set()
for listener in self.dig_listeners:
listener.join(2)
if listener.is_alive():
logger.debug(f"Terminating listener {listener} aggressively")
listener.terminate()
del listener
import gc
gc.collect()
def final_init(self):
super(QubitExperiment, self).final_init()
# In order to fetch data more easily later
self.outputs_by_qubit = {q.label: [self.proxy_to_filter[dat['node_obj']] for f,dat in self.modified_graph.nodes(data=True) if (isinstance(dat['node_obj'], (bbndb.auspex.Write, bbndb.auspex.Buffer,)) and q.label == dat['node_obj'].qubit_name)] for q in self.measured_qubits}
def init_progress_bars(self):
""" initialize the progress bars."""
self.progressbars = {}
ocs = list(self.output_connectors.values())
if isnotebook():
from ipywidgets import IntProgress, VBox
from IPython.display import display
if len(ocs)>0:
for oc in ocs:
self.progressbars[oc] = IntProgress(min=0, max=oc.output_streams[0].descriptor.num_points(), bar_style='success',
description=f'Digitizer Data {oc.name}:', style={'description_width': 'initial'})
for axis in self.sweeper.axes:
self.progressbars[axis] = IntProgress(min=0, max=axis.num_points(),
description=f'{axis.name}:', style={'description_width': 'initial'})
display(VBox(list(self.progressbars.values())))
else:
from progress.bar import ShadyBar
if len(ocs)>0:
for oc in ocs:
self.progressbars[oc] = ShadyBar(f'Digitizer Data {oc.name}:',
max=oc.output_streams[0].descriptor.num_points())
for axis in self.sweeper.axes:
self.progressbars[axis] = ShadyBar(f"Sweep {axis.name}", max=axis.num_points())
def run(self):
# Begin acquisition before enabling the AWGs
for dig in self.digitizers:
dig.acquire()
dig.last_timestamp.value = datetime.datetime.now().timestamp()
# Set flag to enable acquisition process
self.dig_run.set()
time.sleep(1)
# Start the AWGs
if not self.cw_mode:
for awg in self.awgs:
awg.run()
# Wait for all of the acquisitions to complete
timeout = 10
for dig in self.digitizers:
dig.wait_for_acquisition(self.dig_run, timeout=timeout, ocs=list(self.chan_to_oc.values()), progressbars=self.progressbars)
# Bring everything to a stop
for dig in self.digitizers:
dig.stop()
# Pause the receiver processes so they don't time out
self.dig_run.clear()
# Stop the AWGs
if not self.cw_mode:
for awg in self.awgs:
awg.stop()
| |
"""Tests for the 'setuptools' package"""
from unittest import TestSuite, TestCase, makeSuite, defaultTestLoader
import distutils.core, distutils.cmd
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils.errors import DistutilsSetupError
import setuptools, setuptools.dist
from setuptools import Feature
from distutils.core import Extension
extract_constant, get_module_constant = None, None
from setuptools.depends import *
from distutils.version import StrictVersion, LooseVersion
from distutils.util import convert_path
import sys, os.path
def additional_tests():
import doctest, unittest
suite = unittest.TestSuite((
doctest.DocFileSuite(
'api_tests.txt',
optionflags=doctest.ELLIPSIS, package='pkg_resources',
),
))
if sys.platform == 'win32':
suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt'))
return suite
def makeSetup(**args):
"""Return distribution from 'setup(**args)', without executing commands"""
distutils.core._setup_stop_after = "commandline"
# Don't let system command line leak into tests!
args.setdefault('script_args',['install'])
try:
return setuptools.setup(**args)
finally:
distutils.core_setup_stop_after = None
class DependsTests(TestCase):
def testExtractConst(self):
if not extract_constant: return # skip on non-bytecode platforms
def f1():
global x,y,z
x = "test"
y = z
# unrecognized name
self.assertEqual(extract_constant(f1.func_code,'q', -1), None)
# constant assigned
self.assertEqual(extract_constant(f1.func_code,'x', -1), "test")
# expression assigned
self.assertEqual(extract_constant(f1.func_code,'y', -1), -1)
# recognized name, not assigned
self.assertEqual(extract_constant(f1.func_code,'z', -1), None)
def testFindModule(self):
self.assertRaises(ImportError, find_module, 'no-such.-thing')
self.assertRaises(ImportError, find_module, 'setuptools.non-existent')
f,p,i = find_module('setuptools.tests'); f.close()
def testModuleExtract(self):
if not get_module_constant: return # skip on non-bytecode platforms
from distutils import __version__
self.assertEqual(
get_module_constant('distutils','__version__'), __version__
)
self.assertEqual(
get_module_constant('sys','version'), sys.version
)
self.assertEqual(
get_module_constant('setuptools.tests','__doc__'),__doc__
)
def testRequire(self):
if not extract_constant: return # skip on non-bytecode platforms
req = Require('Distutils','1.0.3','distutils')
self.assertEqual(req.name, 'Distutils')
self.assertEqual(req.module, 'distutils')
self.assertEqual(req.requested_version, '1.0.3')
self.assertEqual(req.attribute, '__version__')
self.assertEqual(req.full_name(), 'Distutils-1.0.3')
from distutils import __version__
self.assertEqual(req.get_version(), __version__)
self.failUnless(req.version_ok('1.0.9'))
self.failIf(req.version_ok('0.9.1'))
self.failIf(req.version_ok('unknown'))
self.failUnless(req.is_present())
self.failUnless(req.is_current())
req = Require('Distutils 3000','03000','distutils',format=LooseVersion)
self.failUnless(req.is_present())
self.failIf(req.is_current())
self.failIf(req.version_ok('unknown'))
req = Require('Do-what-I-mean','1.0','d-w-i-m')
self.failIf(req.is_present())
self.failIf(req.is_current())
req = Require('Tests', None, 'tests', homepage="http://example.com")
self.assertEqual(req.format, None)
self.assertEqual(req.attribute, None)
self.assertEqual(req.requested_version, None)
self.assertEqual(req.full_name(), 'Tests')
self.assertEqual(req.homepage, 'http://example.com')
paths = [os.path.dirname(p) for p in __path__]
self.failUnless(req.is_present(paths))
self.failUnless(req.is_current(paths))
class DistroTests(TestCase):
def setUp(self):
self.e1 = Extension('bar.ext',['bar.c'])
self.e2 = Extension('c.y', ['y.c'])
self.dist = makeSetup(
packages=['a', 'a.b', 'a.b.c', 'b', 'c'],
py_modules=['b.d','x'],
ext_modules = (self.e1, self.e2),
package_dir = {},
)
def testDistroType(self):
self.failUnless(isinstance(self.dist,setuptools.dist.Distribution))
def testExcludePackage(self):
self.dist.exclude_package('a')
self.assertEqual(self.dist.packages, ['b','c'])
self.dist.exclude_package('b')
self.assertEqual(self.dist.packages, ['c'])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1, self.e2])
self.dist.exclude_package('c')
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
# test removals from unspecified options
makeSetup().exclude_package('x')
def testIncludeExclude(self):
# remove an extension
self.dist.exclude(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2])
# add it back in
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
# should not add duplicate
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
def testExcludePackages(self):
self.dist.exclude(packages=['c','b','a'])
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
def testEmpty(self):
dist = makeSetup()
dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
dist = makeSetup()
dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
def testContents(self):
self.failUnless(self.dist.has_contents_for('a'))
self.dist.exclude_package('a')
self.failIf(self.dist.has_contents_for('a'))
self.failUnless(self.dist.has_contents_for('b'))
self.dist.exclude_package('b')
self.failIf(self.dist.has_contents_for('b'))
self.failUnless(self.dist.has_contents_for('c'))
self.dist.exclude_package('c')
self.failIf(self.dist.has_contents_for('c'))
def testInvalidIncludeExclude(self):
self.assertRaises(DistutilsSetupError,
self.dist.include, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.include, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, package_dir=['q']
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, package_dir=['q']
)
class FeatureTests(TestCase):
def setUp(self):
self.req = Require('Distutils','1.0.3','distutils')
self.dist = makeSetup(
features={
'foo': Feature("foo",standard=True,require_features=['baz',self.req]),
'bar': Feature("bar", standard=True, packages=['pkg.bar'],
py_modules=['bar_et'], remove=['bar.ext'],
),
'baz': Feature(
"baz", optional=False, packages=['pkg.baz'],
scripts = ['scripts/baz_it'],
libraries=[('libfoo','foo/foofoo.c')]
),
'dwim': Feature("DWIM", available=False, remove='bazish'),
},
script_args=['--without-bar', 'install'],
packages = ['pkg.bar', 'pkg.foo'],
py_modules = ['bar_et', 'bazish'],
ext_modules = [Extension('bar.ext',['bar.c'])]
)
def testDefaults(self):
self.failIf(
Feature(
"test",standard=True,remove='x',available=False
).include_by_default()
)
self.failUnless(
Feature("test",standard=True,remove='x').include_by_default()
)
# Feature must have either kwargs, removes, or require_features
self.assertRaises(DistutilsSetupError, Feature, "test")
def testAvailability(self):
self.assertRaises(
DistutilsPlatformError,
self.dist.features['dwim'].include_in, self.dist
)
def testFeatureOptions(self):
dist = self.dist
self.failUnless(
('with-dwim',None,'include DWIM') in dist.feature_options
)
self.failUnless(
('without-dwim',None,'exclude DWIM (default)') in dist.feature_options
)
self.failUnless(
('with-bar',None,'include bar (default)') in dist.feature_options
)
self.failUnless(
('without-bar',None,'exclude bar') in dist.feature_options
)
self.assertEqual(dist.feature_negopt['without-foo'],'with-foo')
self.assertEqual(dist.feature_negopt['without-bar'],'with-bar')
self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim')
self.failIf('without-baz' in dist.feature_negopt)
def testUseFeatures(self):
dist = self.dist
self.assertEqual(dist.with_foo,1)
self.assertEqual(dist.with_bar,0)
self.assertEqual(dist.with_baz,1)
self.failIf('bar_et' in dist.py_modules)
self.failIf('pkg.bar' in dist.packages)
self.failUnless('pkg.baz' in dist.packages)
self.failUnless('scripts/baz_it' in dist.scripts)
self.failUnless(('libfoo','foo/foofoo.c') in dist.libraries)
self.assertEqual(dist.ext_modules,[])
self.assertEqual(dist.require_features, [self.req])
# If we ask for bar, it should fail because we explicitly disabled
# it on the command line
self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar')
def testFeatureWithInvalidRemove(self):
self.assertRaises(
SystemExit, makeSetup, features = {'x':Feature('x', remove='y')}
)
class TestCommandTests(TestCase):
def testTestIsCommand(self):
test_cmd = makeSetup().get_command_obj('test')
self.failUnless(isinstance(test_cmd, distutils.cmd.Command))
def testLongOptSuiteWNoDefault(self):
ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite'])
ts1 = ts1.get_command_obj('test')
ts1.ensure_finalized()
self.assertEqual(ts1.test_suite, 'foo.tests.suite')
def testDefaultSuite(self):
ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test')
ts2.ensure_finalized()
self.assertEqual(ts2.test_suite, 'bar.tests.suite')
def testDefaultWModuleOnCmdLine(self):
ts3 = makeSetup(
test_suite='bar.tests',
script_args=['test','-m','foo.tests']
).get_command_obj('test')
ts3.ensure_finalized()
self.assertEqual(ts3.test_module, 'foo.tests')
self.assertEqual(ts3.test_suite, 'foo.tests.test_suite')
def testConflictingOptions(self):
ts4 = makeSetup(
script_args=['test','-m','bar.tests', '-s','foo.tests.suite']
).get_command_obj('test')
self.assertRaises(DistutilsOptionError, ts4.ensure_finalized)
def testNoSuite(self):
ts5 = makeSetup().get_command_obj('test')
ts5.ensure_finalized()
self.assertEqual(ts5.test_suite, None)
| |
"""
Author : Jay Rambhia
email : jayrambhia777@gmail.com
Git : https://github.com/jayrambhia
gist : https://gist.github.com/jayrambhia
=============================================
Name : deskwid
Repo : DeskWid
Git : https://github.com/jayrambhia/DeskWid
version 0.1
"""
# Copyright (c) 2012 Jay Rambhia
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import pygtk
import gtk
from threading import Thread
import gobject
import os
import twitter
import imdb
import time
import deskwidutils
gtk.gdk.threads_init()
class DeskwidWindow:
def __init__(self, api):
self.api = api
self.timeline_flag = False
self.timeline_interval = 2
self.window = gtk.Window()
self.window.set_title("DeskWid")
self.window.set_size_request(1000,700)
self.window.connect("destroy", self.close_application)
self.box = gtk.VBox(False, 2)
self.window.add(self.box)
self.box.show()
self.statusbox = gtk.HBox(False, 2)
self.statusbox.set_size_request(1000,30)
self.box.pack_start(self.statusbox)
self.statusbox.show()
self.statusentry = gtk.Entry()
self.statusentry.set_size_request(900,30)
self.statusentry.connect("activate", self.getcommand)
self.statusbox.pack_start(self.statusentry, False, False, 5)
self.statusentry.show()
self.button = gtk.Button("command")
self.button.set_size_request(80,30)
self.button.connect('clicked', self.getcommand)
self.statusbox.pack_start(self.button, False, False, 3)
self.button.show()
self.box1 = gtk.HBox(False, 2)
self.box.pack_start(self.box1, False, False, 3)
self.box1.show()
self.genbox = gtk.VBox(False, 2)
self.genbox.set_size_request(680, 650)
self.box1.pack_start(self.genbox, False, False, 2)
self.genbox.show()
if self.api is None:
self.genlabel = gtk.Label("DeskWid 0.1 -- Some of your Twitter API keys might be incorrect")
else:
self.genlabel = gtk.Label("DeskWid 0.1")
self.genlabel.set_size_request(680,30)
self.genbox.pack_start(self.genlabel)
self.genlabel.show()
self.sw1 = gtk.ScrolledWindow()
self.sw1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.sw1.show()
self.genbox.pack_start(self.sw1, False, False, 2)
self.genview = gtk.TextView()
self.genview.set_size_request(680,610)
self.genview.set_editable(False)
self.genview.set_wrap_mode(gtk.WRAP_WORD)
self.genbuffer = self.genview.get_buffer()
self.sw1.add(self.genview)
self.genview.show()
self.notebox = gtk.EventBox()
self.notebox.set_size_request(300, 650)
self.notebox.connect('leave_notify_event',self.savenote)
self.box1.pack_start(self.notebox, False, False, 2)
self.notebox.show()
self.sw2 = gtk.ScrolledWindow()
self.sw2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.sw2.show()
self.notebox.add(self.sw2)
self.notebook = gtk.TextView()
self.notebook.set_size_request(300, 580)
self.notebook.set_wrap_mode(gtk.WRAP_WORD)
self.notebuffer = self.notebook.get_buffer()
if os.path.isfile(os.path.join(os.getcwd(),'stickynote.txt')):
infile = open('stickynote.txt','r')
if infile:
text = infile.read()
infile.close()
self.notebuffer.set_text(text)
self.sw2.add(self.notebook)
self.notebook.show()
self.window.show()
def close_application(self, widget=None, data=None):
self.savenote()
self.timeline_flag = False
gtk.main_quit()
def savenote(self, widget=None, data=None):
file = open('stickynote.txt','w')
startiter = self.notebuffer.get_start_iter()
enditer = self.notebuffer.get_end_iter()
text = self.notebuffer.get_text(startiter, enditer)
file.write(text)
file.close()
def getcommand(self, widget, data=None):
command = self.statusentry.get_text()
if command.startswith("\\t "):
status = "\\t ".join(command.split("\\t ")[1:])
if len(status) > 140:
text = "Should not be more than 140 characters"
gobject.idle_add(self.change_genlabel, text)
else:
gobject.idle_add(self.change_genlabel, "tweeting..")
self.set_status_thread(status)
elif command.startswith("\imdb "):
gobject.idle_add(self.change_genlabel, "Fetching movie details from IMDb")
self.fetch_movie_thread()
elif command.startswith("\\timeline"):
subcom = command.split("\\timeline ")[-1]
if subcom.isdigit():
self.timeline_interval = int(subcom)
if self.timeline_interval < 1:
self.timeline_interval = 1
if self.timeline_flag is False:
self.timeline_flag = True
self.get_timeline_thread()
elif "stop" in subcom:
if self.timeline_flag is True:
self.timeline_flag = False
gobject.idle_add(self.change_genlabel, 'Timeline stopped')
else:
subcom = 2
if self.timeline_flag is False:
self.timeline_flag = True
self.get_timeline_thread()
elif command.startswith("--proxy"):
print self.setproxy(command)
gobject.idle_add(self.change_genlabel, 'New Proxy set')
elif command.startswith("--consumer_key"):
deskwidutils.setconsumerkey(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Consumer Key set')
elif command.startswith("--consumer_secret"):
deskwidutils.setconsumersecret(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Consumer Secret set')
elif command.startswith("--access_token_key"):
deskwidutils.setaccesstokenkey(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Access Token set')
elif command.startswith("--access_token_secret"):
deskwidutils.setaccesstokensecret(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Access Token Secret set')
#elif command.startswith("quit") or command.startswith("exit"):
# self.close_application()
else:
gobject.idle_add(self.change_genlabel, "No such command")
return
self.statusentry.set_text("")
def get_timeline_thread(self):
self.timeline_thread = Thread(target=self.get_timeline).start()
def get_timeline(self):
since_id = None
while self.timeline_flag:
timeline=''
tweet_list=[]
tweet_str=''
try:
gobject.idle_add(self.change_genlabel, 'fetching timeline')
timeline = self.api.GetFriendsTimeline(since_id = since_id)
#timeline = self.api.friends_timeline(since_id = since_id)
if timeline:
for i in range(len(timeline)-1,-1,-1):
tweet = deskwidutils.gettweet(timeline[i])
tweet_list.append(tweet)
tweet_str = tweet_str + tweet + '\n'
gobject.idle_add(self.set_genview, tweet)
since_id = timeline[0].id
<<<<<<< HEAD:deskwid/deskwid.py
print since_id
gobject.idle_add(self.change_genlabel, 'timeline')
=======
gobject.idle_add(self.change_genlabel, 'timeline')
# print since_id
>>>>>>> 7e9a838c0063c5023012521c641c1a867f43352a:deskwid.py
except :
# print 'Got some error'
gobject.idle_add(self.change_genlabel, 'Unable to fetch timeline')
#gobject.idle_add(self.set_genview, tweet_str)
time.sleep(self.timeline_interval*60)
def set_status_thread(self, status):
Thread(target=self.set_status, args=(status,)).start()
def set_status(self, status):
try:
status_ob = self.api.PostUpdate(status)
#print "tweeted"
gobject.idle_add(self.change_genlabel, 'Tweeted')
self.statusentry.set_text('')
except:
gobject.idle_add(self.change_genlabel, 'Got some error')
#print "Error"
def fetch_movie_thread(self):
Thread(target=self.fetch_movie).start()
def fetch_movie(self):
query = self.statusentry.get_text().split("\imdb ")[-1]
print query
self.movie = imdb.Movie(query)
text = deskwidutils.get_movie_detail(self.movie)
gobject.idle_add(self.set_genview, text)
gobject.idle_add(self.change_genlabel, self.movie.title)
self.statusentry.set_text("")
return
def change_genlabel(self, text):
self.genlabel.set_text(text+" - DeskWid 0.1")
def set_genview(self, text):
startiter = self.genbuffer.get_start_iter()
enditer = self.genbuffer.get_end_iter()
pretext = self.genbuffer.get_text(startiter, enditer)
line = "\n"+180*"-"+"\n"
text = line.join([text, pretext])
self.genbuffer.set_text(text)
def setproxy(self, command):
return deskwidutils.setproxy(command)
def deskwid():
proxy = deskwidutils.getproxy()
consumer_key = deskwidutils.getconsumerkey()
consumer_secret = deskwidutils.getconsumersecret()
access_token_key = deskwidutils.getaccesstokenkey()
access_token_secret = deskwidutils.getaccesstokensecret()
api = twitter.Api(consumer_key, consumer_secret,access_token_key, access_token_secret, proxy=proxy)
DeskwidWindow(api)
gtk.main()
| |
from six.moves import cStringIO as StringIO
import netlib
from netlib import tcp
from netlib.http import user_agents
from pathod import language
from pathod.language import http2, base
import tutils
def parse_request(s):
return language.parse_pathoc(s, True).next()
def parse_response(s):
return language.parse_pathod(s, True).next()
def default_settings():
return language.Settings(
request_host="foo.com",
protocol=netlib.http.http2.HTTP2Protocol(tcp.TCPClient(('localhost', 1234)))
)
def test_make_error_response():
d = StringIO()
s = http2.make_error_response("foo", "bar")
language.serve(s, d, default_settings())
class TestRequest:
def test_cached_values(self):
req = parse_request("get:/")
req_id = id(req)
assert req_id == id(req.resolve(default_settings()))
assert req.values(default_settings()) == req.values(default_settings())
def test_nonascii(self):
tutils.raises("ascii", parse_request, "get:\xf0")
def test_err(self):
tutils.raises(language.ParseException, parse_request, 'GET')
def test_simple(self):
r = parse_request('GET:"/foo"')
assert r.method.string() == "GET"
assert r.path.string() == "/foo"
r = parse_request('GET:/foo')
assert r.path.string() == "/foo"
def test_multiple(self):
r = list(language.parse_pathoc("GET:/ PUT:/"))
assert r[0].method.string() == "GET"
assert r[1].method.string() == "PUT"
assert len(r) == 2
l = """
GET
"/foo"
PUT
"/foo
bar"
"""
r = list(language.parse_pathoc(l, True))
assert len(r) == 2
assert r[0].method.string() == "GET"
assert r[1].method.string() == "PUT"
l = """
get:"http://localhost:9999/p/200"
get:"http://localhost:9999/p/200"
"""
r = list(language.parse_pathoc(l, True))
assert len(r) == 2
assert r[0].method.string() == "GET"
assert r[1].method.string() == "GET"
def test_render_simple(self):
s = StringIO()
r = parse_request("GET:'/foo'")
assert language.serve(
r,
s,
default_settings(),
)
def test_raw_content_length(self):
r = parse_request('GET:/:r')
assert len(r.headers) == 0
r = parse_request('GET:/:r:b"foobar"')
assert len(r.headers) == 0
r = parse_request('GET:/')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("content-length", "0")
r = parse_request('GET:/:b"foobar"')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("content-length", "6")
r = parse_request('GET:/:b"foobar":h"content-length"="42"')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("content-length", "42")
r = parse_request('GET:/:r:b"foobar":h"content-length"="42"')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("content-length", "42")
def test_content_type(self):
r = parse_request('GET:/:r:c"foobar"')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("content-type", "foobar")
def test_user_agent(self):
r = parse_request('GET:/:r:ua')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("user-agent", user_agents.get_by_shortcut('a')[2])
def test_render_with_headers(self):
s = StringIO()
r = parse_request('GET:/foo:h"foo"="bar"')
assert language.serve(
r,
s,
default_settings(),
)
def test_nested_response(self):
l = "get:/p/:s'200'"
r = parse_request(l)
assert len(r.tokens) == 3
assert isinstance(r.tokens[2], http2.NestedResponse)
assert r.values(default_settings())
def test_render_with_body(self):
s = StringIO()
r = parse_request("GET:'/foo':bfoobar")
assert language.serve(
r,
s,
default_settings(),
)
def test_spec(self):
def rt(s):
s = parse_request(s).spec()
assert parse_request(s).spec() == s
rt("get:/foo")
class TestResponse:
def test_cached_values(self):
res = parse_response("200")
res_id = id(res)
assert res_id == id(res.resolve(default_settings()))
assert res.values(default_settings()) == res.values(default_settings())
def test_nonascii(self):
tutils.raises("ascii", parse_response, "200:\xf0")
def test_err(self):
tutils.raises(language.ParseException, parse_response, 'GET:/')
def test_raw_content_length(self):
r = parse_response('200:r')
assert len(r.headers) == 0
r = parse_response('200')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("content-length", "0")
def test_content_type(self):
r = parse_response('200:r:c"foobar"')
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("content-type", "foobar")
def test_simple(self):
r = parse_response('200:r:h"foo"="bar"')
assert r.status_code.string() == "200"
assert len(r.headers) == 1
assert r.headers[0].values(default_settings()) == ("foo", "bar")
assert r.body is None
r = parse_response('200:r:h"foo"="bar":bfoobar:h"bla"="fasel"')
assert r.status_code.string() == "200"
assert len(r.headers) == 2
assert r.headers[0].values(default_settings()) == ("foo", "bar")
assert r.headers[1].values(default_settings()) == ("bla", "fasel")
assert r.body.string() == "foobar"
def test_render_simple(self):
s = StringIO()
r = parse_response('200')
assert language.serve(
r,
s,
default_settings(),
)
def test_render_with_headers(self):
s = StringIO()
r = parse_response('200:h"foo"="bar"')
assert language.serve(
r,
s,
default_settings(),
)
def test_render_with_body(self):
s = StringIO()
r = parse_response('200:bfoobar')
assert language.serve(
r,
s,
default_settings(),
)
def test_spec(self):
def rt(s):
s = parse_response(s).spec()
assert parse_response(s).spec() == s
rt("200:bfoobar")
| |
import unittest
import kdTree
class KdTreeTest(unittest.TestCase):
# TEST Point
def testPointCreate(self):
p = kdTree.Point([1, 2])
self.assertEqual(1, p.coords[kdTree.X_COORD])
self.assertEqual(2, p.coords[kdTree.Y_COORD])
def testEquality(self):
p = kdTree.Point([1, 2])
pp = kdTree.Point([1, 2])
p1 = kdTree.Point([1, 4])
p2 = kdTree.Point([0, 4])
self.assertTrue(p == p)
self.assertTrue(p == pp)
self.assertFalse(p == p1)
self.assertFalse(p == p2)
self.assertFalse(p1 == p2)
self.assertFalse(p != p)
self.assertFalse(p != pp)
self.assertTrue(p != p1)
self.assertTrue(p != p2)
self.assertTrue(p1 != p2)
def testPointDistance(self):
p1 = kdTree.Point([3, 4])
p2 = kdTree.Point([0, 0])
self.assertEqual(25, p1.squared_distance_to(p2))
self.assertEqual(5, p1.distance_to(p2))
# TEST KdTree
def testIsEmpty(self):
tree = kdTree.KdTree()
self.assertTrue(tree.is_empty())
def testInsert(self):
tree = kdTree.KdTree()
self.assertTrue(tree.is_empty())
p = kdTree.Point([1, 2])
tree.insert(p)
self.assertFalse(tree.is_empty())
self.assertIsNotNone(tree._root)
def testDimByLevel(self):
tree = kdTree.KdTree()
self.assertEqual(2, tree._dim)
self.assertEqual(0, tree._dim_by_level(0))
self.assertEqual(1, tree._dim_by_level(1))
self.assertEqual(0, tree._dim_by_level(2))
tree = kdTree.KdTree(5)
self.assertEqual(5, tree._dim)
self.assertEqual(0, tree._dim_by_level(0))
self.assertEqual(1, tree._dim_by_level(1))
self.assertEqual(2, tree._dim_by_level(2))
self.assertEqual(0, tree._dim_by_level(5))
self.assertEqual(1, tree._dim_by_level(6))
self.assertEqual(4, tree._dim_by_level(9))
def testLeveledDist(self):
tree = kdTree.KdTree()
p1 = kdTree.Point([3, 4])
p2 = kdTree.Point([0, 0])
node = kdTree.Node(p2)
self.assertEqual(25, p1.squared_distance_to(p2))
self.assertEqual(5, p1.distance_to(p2))
self.assertEqual(3, tree.leveled_distance(node, p1, 0)) # 0 => X compare
self.assertEqual(4, tree.leveled_distance(node, p1, 1)) # 1 => Y compare
self.assertEqual(3, tree.leveled_distance(node, p1, 2)) # 0 => X compare
def testSearch(self):
p1 = kdTree.Point([3, 4])
p2 = kdTree.Point([0, 0])
p3 = kdTree.Point([1, 2])
tree = kdTree.KdTree()
tree.insert(p1)
tree.insert(p2)
self.assertEqual(p1, tree.search(p1))
self.assertTrue(tree.contains(p1))
self.assertEqual(p2, tree.search(p2))
self.assertTrue(tree.contains(p2))
self.assertEqual(None, tree.search(p3))
self.assertFalse(tree.contains(p3))
def testSearch2(self):
p1 = kdTree.Point([3, 1, 3])
p2 = kdTree.Point([4, 4, 2])
p3 = kdTree.Point([2, 3, 4])
tree = kdTree.KdTree(dimensions=3)
tree.insert(p1)
tree.insert(p2)
tree.insert(p3)
self.assertEqual(p1, tree.search(p1))
self.assertEqual(p2, tree.search(p2))
self.assertEqual(p3, tree.search(p3))
p = kdTree.Point([1, 2, 3])
self.assertEqual(None, tree.search(p))
self.assertTrue(tree.contains(p1))
self.assertTrue(tree.contains(p2))
self.assertTrue(tree.contains(p3))
self.assertFalse(tree.contains(p))
def testSearch3(self):
p = kdTree.Point([1, 2])
pp = kdTree.Point([1, 2])
tree = kdTree.KdTree()
tree.insert(p)
self.assertEqual(p, tree.search(pp))
self.assertTrue(tree.search(pp) is p, "Returned point should be the node's point")
self.assertFalse(tree.search(pp) is pp, "Returned point should be the node's point, not the given point")
def testNearest(self):
p1 = kdTree.Point([3, 1])
p2 = kdTree.Point([4, 4])
p3 = kdTree.Point([2, 3])
p4 = kdTree.Point([0.5, 0.5])
tree = kdTree.KdTree()
tree.insert(p1)
tree.insert(p2)
tree.insert(p3)
tree.insert(p4)
self.assertEqual(4, tree.size())
p = kdTree.Point([1, 2])
nn = tree.nearest(p)
self.assertIsNotNone(nn)
self.assertEqual(p3, nn)
p = kdTree.Point([5, 5])
nn = tree.nearest(p)
self.assertIsNotNone(nn)
self.assertEqual(p2, nn)
def testNearest3D(self):
p1 = kdTree.Point([3, 1, 0])
p2 = kdTree.Point([4, 4, 0])
p3 = kdTree.Point([2, 3, 1])
p4 = kdTree.Point([0.5, 0.5, 10])
tree = kdTree.KdTree(dimensions=3)
tree.insert(p1)
tree.insert(p2)
tree.insert(p3)
tree.insert(p4)
self.assertEqual(4, tree.size())
p = kdTree.Point([2, 2, 1])
nn = tree.nearest(p)
self.assertIsNotNone(nn)
self.assertEqual(p3, nn)
p = kdTree.Point([2, 2, 8])
nn = tree.nearest(p)
self.assertIsNotNone(nn)
self.assertEqual(p4, nn)
def testRange(self):
p1 = kdTree.Point([3, 1])
p2 = kdTree.Point([4, 4])
p3 = kdTree.Point([2, 3])
p4 = kdTree.Point([0.5, 0.5])
tree = kdTree.KdTree()
tree.insert(p1)
tree.insert(p2)
tree.insert(p3)
tree.insert(p4)
points = tree.range(kdTree.Point([0, 0]), kdTree.Point([1, 1]))
self.assertEqual(1, len(points))
self.assertTrue(p4 in points)
points = tree.range(kdTree.Point([1, 2]), kdTree.Point([5, 5]))
self.assertEqual(2, len(points))
self.assertTrue(p2 in points)
self.assertTrue(p3 in points)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit transl2tc' tool.
'''
import getopt
from grit.tool import interface
from grit.tool import rc2grd
from grit import grd_reader
from grit import util
from grit.extern import tclib
class TranslationToTc(interface.Tool):
'''A tool for importing existing translations in RC format into the
Translation Console.
Usage:
grit -i GRD transl2tc [-l LIMITS] [RCOPTS] SOURCE_RC TRANSLATED_RC OUT_FILE
The tool needs a "source" RC file, i.e. in English, and an RC file that is a
translation of precisely the source RC file (not of an older or newer version).
The tool also requires you to provide a .grd file (input file) e.g. using the
-i global option or the GRIT_INPUT environment variable. The tool uses
information from your .grd file to correct placeholder names in the
translations and ensure that only translatable items and translations still
being used are output.
This tool will accept all the same RCOPTS as the 'grit rc2grd' tool. To get
a list of these options, run 'grit help rc2grd'.
Additionally, you can use the -l option (which must be the first option to the
tool) to specify a file containing a list of message IDs to which output should
be limited. This is only useful if you are limiting the output to your XMB
files using the 'grit xmb' tool's -l option. See 'grit help xmb' for how to
generate a file containing a list of the message IDs in an XMB file.
The tool will scan through both of the RC files as well as any HTML files they
refer to, and match together the source messages and translated messages. It
will output a file (OUTPUT_FILE) you can import directly into the TC using the
Bulk Translation Upload tool.
'''
def ShortDescription(self):
return 'Import existing translations in RC format into the TC'
def Setup(self, globopt, args):
'''Sets the instance up for use.
'''
self.SetOptions(globopt)
self.rc2grd = rc2grd.Rc2Grd()
self.rc2grd.SetOptions(globopt)
self.limits = None
if len(args) and args[0] == '-l':
limit_file = file(args[1])
self.limits = limit_file.read().split('\n')
limit_file.close()
args = args[2:]
return self.rc2grd.ParseOptions(args)
def Run(self, globopt, args):
args = self.Setup(globopt, args)
if len(args) != 3:
self.Out('This tool takes exactly three arguments:\n'
' 1. The path to the original RC file\n'
' 2. The path to the translated RC file\n'
' 3. The output file path.\n')
return 2
grd = grd_reader.Parse(self.o.input, debug=self.o.extra_verbose)
grd.RunGatherers(recursive = True)
source_rc = util.WrapInputStream(file(args[0], 'r'), self.rc2grd.input_encoding)
transl_rc = util.WrapInputStream(file(args[1], 'r'), self.rc2grd.input_encoding)
translations = self.ExtractTranslations(grd,
source_rc.read(), args[0],
transl_rc.read(), args[1])
transl_rc.close()
source_rc.close()
output_file = util.WrapOutputStream(file(args[2], 'w'))
self.WriteTranslations(output_file, translations.items())
output_file.close()
self.Out('Wrote output file %s' % args[2])
def ExtractTranslations(self, current_grd, source_rc, source_path, transl_rc, transl_path):
'''Extracts translations from the translated RC file, matching them with
translations in the source RC file to calculate their ID, and correcting
placeholders, limiting output to translateables, etc. using the supplied
.grd file which is the current .grd file for your project.
If this object's 'limits' attribute is not None but a list, the output of
this function will be further limited to include only messages that have
message IDs in the 'limits' list.
Args:
current_grd: grit.node.base.Node child, that has had RunGatherers(True) run on it
source_rc: Complete text of source RC file
source_path: Path to the source RC file
transl_rc: Complete text of translated RC file
transl_path: Path to the translated RC file
Return:
{ id1 : text1, '12345678' : 'Hello USERNAME, howzit?' }
'''
source_grd = self.rc2grd.Process(source_rc, source_path)
self.VerboseOut('Read %s into GRIT format, running gatherers.\n' % source_path)
source_grd.RunGatherers(recursive=True, debug=self.o.extra_verbose)
transl_grd = self.rc2grd.Process(transl_rc, transl_path)
self.VerboseOut('Read %s into GRIT format, running gatherers.\n' % transl_path)
transl_grd.RunGatherers(recursive=True, debug=self.o.extra_verbose)
self.VerboseOut('Done running gatherers for %s.\n' % transl_path)
# Proceed to create a map from ID to translation, getting the ID from the
# source GRD and the translation from the translated GRD.
id2transl = {}
for source_node in source_grd:
source_cliques = source_node.GetCliques()
if not len(source_cliques):
continue
assert 'name' in source_node.attrs, 'All nodes with cliques should have an ID'
node_id = source_node.attrs['name']
self.ExtraVerboseOut('Processing node %s\n' % node_id)
transl_node = transl_grd.GetNodeById(node_id)
if transl_node:
transl_cliques = transl_node.GetCliques()
if not len(transl_cliques) == len(source_cliques):
self.Out(
'Warning: Translation for %s has wrong # of cliques, skipping.\n' %
node_id)
continue
else:
self.Out('Warning: No translation for %s, skipping.\n' % node_id)
continue
if source_node.name == 'message':
# Fixup placeholders as well as possible based on information from
# the current .grd file if they are 'TODO_XXXX' placeholders. We need
# to fixup placeholders in the translated message so that it looks right
# and we also need to fixup placeholders in the source message so that
# its calculated ID will match the current message.
current_node = current_grd.GetNodeById(node_id)
if current_node:
assert len(source_cliques) == 1 and len(current_node.GetCliques()) == 1
source_msg = source_cliques[0].GetMessage()
current_msg = current_node.GetCliques()[0].GetMessage()
# Only do this for messages whose source version has not changed.
if (source_msg.GetRealContent() != current_msg.GetRealContent()):
self.VerboseOut('Info: Message %s has changed; skipping\n' % node_id)
else:
transl_msg = transl_cliques[0].GetMessage()
transl_content = transl_msg.GetContent()
current_content = current_msg.GetContent()
source_content = source_msg.GetContent()
ok_to_fixup = True
if (len(transl_content) != len(current_content)):
# message structure of translation is different, don't try fixup
ok_to_fixup = False
if ok_to_fixup:
for ix in range(len(transl_content)):
if isinstance(transl_content[ix], tclib.Placeholder):
if not isinstance(current_content[ix], tclib.Placeholder):
ok_to_fixup = False # structure has changed
break
if (transl_content[ix].GetOriginal() !=
current_content[ix].GetOriginal()):
ok_to_fixup = False # placeholders have likely been reordered
break
else: # translated part is not a placeholder but a string
if isinstance(current_content[ix], tclib.Placeholder):
ok_to_fixup = False # placeholders have likely been reordered
break
if not ok_to_fixup:
self.VerboseOut(
'Info: Structure of message %s has changed; skipping.\n' % node_id)
else:
def Fixup(content, ix):
if (isinstance(content[ix], tclib.Placeholder) and
content[ix].GetPresentation().startswith('TODO_')):
assert isinstance(current_content[ix], tclib.Placeholder)
# Get the placeholder ID and example from the current message
content[ix] = current_content[ix]
for ix in range(len(transl_content)):
Fixup(transl_content, ix)
Fixup(source_content, ix)
# Only put each translation once into the map. Warn if translations
# for the same message are different.
for ix in range(len(transl_cliques)):
source_msg = source_cliques[ix].GetMessage()
source_msg.GenerateId() # needed to refresh ID based on new placeholders
message_id = source_msg.GetId()
translated_content = transl_cliques[ix].GetMessage().GetPresentableContent()
if message_id in id2transl:
existing_translation = id2transl[message_id]
if existing_translation != translated_content:
original_text = source_cliques[ix].GetMessage().GetPresentableContent()
self.Out('Warning: Two different translations for "%s":\n'
' Translation 1: "%s"\n'
' Translation 2: "%s"\n' %
(original_text, existing_translation, translated_content))
else:
id2transl[message_id] = translated_content
# Remove translations for messages that do not occur in the current .grd
# or have been marked as not translateable, or do not occur in the 'limits'
# list (if it has been set).
current_message_ids = current_grd.UberClique().AllMessageIds()
for message_id in id2transl.keys():
if (message_id not in current_message_ids or
not current_grd.UberClique().BestClique(message_id).IsTranslateable() or
(self.limits and message_id not in self.limits)):
del id2transl[message_id]
return id2transl
# static method
def WriteTranslations(output_file, translations):
'''Writes the provided list of translations to the provided output file
in the format used by the TC's Bulk Translation Upload tool. The file
must be UTF-8 encoded.
Args:
output_file: util.WrapOutputStream(file('bingo.out', 'w'))
translations: [ [id1, text1], ['12345678', 'Hello USERNAME, howzit?'] ]
Return:
None
'''
for id, text in translations:
text = text.replace('<', '<').replace('>', '>')
output_file.write(id)
output_file.write(' ')
output_file.write(text)
output_file.write('\n')
WriteTranslations = staticmethod(WriteTranslations)
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI-Layouts/qPreferences.ui'
#
# Created: Sat Feb 7 19:49:38 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_qPreferences(object):
def setupUi(self, qPreferences):
qPreferences.setObjectName("qPreferences")
qPreferences.resize(369, 317)
qPreferences.setMinimumSize(QtCore.QSize(369, 317))
qPreferences.setMaximumSize(QtCore.QSize(369, 317))
self.gridLayout_13 = QtGui.QGridLayout(qPreferences)
self.gridLayout_13.setSpacing(0)
self.gridLayout_13.setObjectName("gridLayout_13")
self.tabWidget = QtGui.QTabWidget(qPreferences)
self.tabWidget.setObjectName("tabWidget")
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName("tab_4")
self.gridLayout_16 = QtGui.QGridLayout(self.tab_4)
self.gridLayout_16.setSpacing(0)
self.gridLayout_16.setObjectName("gridLayout_16")
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.gridLayout_14 = QtGui.QGridLayout()
self.gridLayout_14.setHorizontalSpacing(0)
self.gridLayout_14.setVerticalSpacing(4)
self.gridLayout_14.setObjectName("gridLayout_14")
self.nameLabel = QtGui.QLabel(self.tab_4)
self.nameLabel.setObjectName("nameLabel")
self.gridLayout_14.addWidget(self.nameLabel, 0, 0, 1, 1)
self.nameEdit = QtGui.QLineEdit(self.tab_4)
self.nameEdit.setMinimumSize(QtCore.QSize(0, 32))
self.nameEdit.setMaximumSize(QtCore.QSize(16777215, 32))
self.nameEdit.setObjectName("nameEdit")
self.gridLayout_14.addWidget(self.nameEdit, 1, 0, 1, 1)
self.passwordLabel = QtGui.QLabel(self.tab_4)
self.passwordLabel.setObjectName("passwordLabel")
self.gridLayout_14.addWidget(self.passwordLabel, 2, 0, 1, 1)
self.passwordEdit = QtGui.QLineEdit(self.tab_4)
self.passwordEdit.setMinimumSize(QtCore.QSize(0, 32))
self.passwordEdit.setMaximumSize(QtCore.QSize(16777215, 32))
self.passwordEdit.setEchoMode(QtGui.QLineEdit.Password)
self.passwordEdit.setObjectName("passwordEdit")
self.gridLayout_14.addWidget(self.passwordEdit, 3, 0, 1, 1)
self.gridLayout_15 = QtGui.QGridLayout()
self.gridLayout_15.setHorizontalSpacing(4)
self.gridLayout_15.setVerticalSpacing(0)
self.gridLayout_15.setObjectName("gridLayout_15")
self.loginButton = QtGui.QPushButton(self.tab_4)
self.loginButton.setMinimumSize(QtCore.QSize(0, 32))
self.loginButton.setObjectName("loginButton")
self.gridLayout_15.addWidget(self.loginButton, 0, 0, 1, 1)
self.accountButton = QtGui.QPushButton(self.tab_4)
self.accountButton.setMinimumSize(QtCore.QSize(0, 32))
self.accountButton.setObjectName("accountButton")
self.gridLayout_15.addWidget(self.accountButton, 0, 1, 1, 1)
self.gridLayout_14.addLayout(self.gridLayout_15, 4, 0, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_14, 0, 0, 1, 1)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(4, -1, -1, -1)
self.gridLayout.setObjectName("gridLayout")
self.userLabel = QtGui.QLabel(self.tab_4)
self.userLabel.setObjectName("userLabel")
self.gridLayout.addWidget(self.userLabel, 0, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 1, 1, 1)
self.logoutButton = QtGui.QToolButton(self.tab_4)
self.logoutButton.setMinimumSize(QtCore.QSize(86, 32))
self.logoutButton.setMaximumSize(QtCore.QSize(86, 32))
self.logoutButton.setObjectName("logoutButton")
self.gridLayout.addWidget(self.logoutButton, 0, 2, 1, 1)
self.gridLayout_4.addLayout(self.gridLayout, 0, 0, 1, 1)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setContentsMargins(4, -1, -1, -1)
self.gridLayout_2.setObjectName("gridLayout_2")
self.qualityLabel = QtGui.QLabel(self.tab_4)
self.qualityLabel.setObjectName("qualityLabel")
self.gridLayout_2.addWidget(self.qualityLabel, 0, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem1, 0, 1, 1, 1)
self.qualityBox = QtGui.QComboBox(self.tab_4)
self.qualityBox.setMinimumSize(QtCore.QSize(86, 32))
self.qualityBox.setMaximumSize(QtCore.QSize(86, 32))
self.qualityBox.setObjectName("qualityBox")
self.gridLayout_2.addWidget(self.qualityBox, 0, 2, 1, 1)
self.gridLayout_4.addLayout(self.gridLayout_2, 1, 0, 1, 1)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setSpacing(0)
self.gridLayout_3.setContentsMargins(4, -1, -1, -1)
self.gridLayout_3.setObjectName("gridLayout_3")
self.notificationLabel = QtGui.QLabel(self.tab_4)
self.notificationLabel.setObjectName("notificationLabel")
self.gridLayout_3.addWidget(self.notificationLabel, 0, 0, 1, 1)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem2, 0, 1, 1, 1)
self.notificationBox = QtGui.QComboBox(self.tab_4)
self.notificationBox.setMinimumSize(QtCore.QSize(86, 32))
self.notificationBox.setMaximumSize(QtCore.QSize(86, 32))
self.notificationBox.setObjectName("notificationBox")
self.gridLayout_3.addWidget(self.notificationBox, 0, 2, 1, 1)
self.gridLayout_4.addLayout(self.gridLayout_3, 2, 0, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_4, 1, 0, 1, 1)
self.gridLayout_16.addLayout(self.gridLayout_5, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_4, "")
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_12 = QtGui.QGridLayout(self.tab_2)
self.gridLayout_12.setSpacing(0)
self.gridLayout_12.setObjectName("gridLayout_12")
self.gridLayout_11 = QtGui.QGridLayout()
self.gridLayout_11.setSpacing(0)
self.gridLayout_11.setObjectName("gridLayout_11")
self.gridLayout_9 = QtGui.QGridLayout()
self.gridLayout_9.setSpacing(0)
self.gridLayout_9.setObjectName("gridLayout_9")
self.gridLayout_7 = QtGui.QGridLayout()
self.gridLayout_7.setSpacing(0)
self.gridLayout_7.setObjectName("gridLayout_7")
self.liveCheck = QtGui.QCheckBox(self.tab_2)
self.liveCheck.setChecked(False)
self.liveCheck.setObjectName("liveCheck")
self.gridLayout_7.addWidget(self.liveCheck, 0, 0, 1, 1)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem3, 0, 1, 1, 2)
self.rateCheck = QtGui.QCheckBox(self.tab_2)
self.rateCheck.setObjectName("rateCheck")
self.gridLayout_7.addWidget(self.rateCheck, 1, 0, 1, 2)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem4, 1, 2, 1, 1)
self.gridLayout_9.addLayout(self.gridLayout_7, 0, 0, 1, 1)
self.gridLayout_8 = QtGui.QGridLayout()
self.gridLayout_8.setSpacing(0)
self.gridLayout_8.setObjectName("gridLayout_8")
self.editCheck = QtGui.QCheckBox(self.tab_2)
self.editCheck.setObjectName("editCheck")
self.gridLayout_8.addWidget(self.editCheck, 0, 0, 1, 1)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_8.addItem(spacerItem5, 0, 1, 1, 2)
self.remixCheck = QtGui.QCheckBox(self.tab_2)
self.remixCheck.setObjectName("remixCheck")
self.gridLayout_8.addWidget(self.remixCheck, 1, 0, 1, 2)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_8.addItem(spacerItem6, 1, 2, 1, 1)
self.gridLayout_9.addLayout(self.gridLayout_8, 0, 1, 1, 1)
self.gridLayout_11.addLayout(self.gridLayout_9, 0, 0, 1, 1)
self.gridLayout_10 = QtGui.QGridLayout()
self.gridLayout_10.setContentsMargins(4, -1, -1, -1)
self.gridLayout_10.setHorizontalSpacing(0)
self.gridLayout_10.setVerticalSpacing(4)
self.gridLayout_10.setObjectName("gridLayout_10")
self.label = QtGui.QLabel(self.tab_2)
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.gridLayout_10.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.tab_2)
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.label_2.setFont(font)
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.gridLayout_10.addWidget(self.label_2, 1, 0, 1, 1)
self.gridLayout_11.addLayout(self.gridLayout_10, 1, 0, 1, 1)
self.gridLayout_12.addLayout(self.gridLayout_11, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName("tab_3")
self.gridLayout_17 = QtGui.QGridLayout(self.tab_3)
self.gridLayout_17.setObjectName("gridLayout_17")
self.gridLayout_6 = QtGui.QGridLayout()
self.gridLayout_6.setSpacing(0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.aboutLabel = QtGui.QLabel(self.tab_3)
self.aboutLabel.setWordWrap(True)
self.aboutLabel.setOpenExternalLinks(True)
self.aboutLabel.setObjectName("aboutLabel")
self.gridLayout_6.addWidget(self.aboutLabel, 0, 0, 1, 1)
spacerItem7 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem7, 1, 0, 1, 1)
self.gridLayout_17.addLayout(self.gridLayout_6, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_3, "")
self.gridLayout_13.addWidget(self.tabWidget, 0, 0, 1, 1)
self.retranslateUi(qPreferences)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(qPreferences)
def retranslateUi(self, qPreferences):
qPreferences.setWindowTitle(QtGui.QApplication.translate("qPreferences", "qAndora - Preferences", None, QtGui.QApplication.UnicodeUTF8))
self.nameLabel.setText(QtGui.QApplication.translate("qPreferences", "Pandora Login:", None, QtGui.QApplication.UnicodeUTF8))
self.passwordLabel.setText(QtGui.QApplication.translate("qPreferences", "Password:", None, QtGui.QApplication.UnicodeUTF8))
self.loginButton.setText(QtGui.QApplication.translate("qPreferences", "Login", None, QtGui.QApplication.UnicodeUTF8))
self.accountButton.setText(QtGui.QApplication.translate("qPreferences", "Create Account", None, QtGui.QApplication.UnicodeUTF8))
self.userLabel.setText(QtGui.QApplication.translate("qPreferences", "Change User:", None, QtGui.QApplication.UnicodeUTF8))
self.logoutButton.setText(QtGui.QApplication.translate("qPreferences", "Logout", None, QtGui.QApplication.UnicodeUTF8))
self.qualityLabel.setText(QtGui.QApplication.translate("qPreferences", "Audio Quality:", None, QtGui.QApplication.UnicodeUTF8))
self.notificationLabel.setText(QtGui.QApplication.translate("qPreferences", "Desktop Notifications:", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), QtGui.QApplication.translate("qPreferences", "General", None, QtGui.QApplication.UnicodeUTF8))
self.liveCheck.setText(QtGui.QApplication.translate("qPreferences", "Filter live songs", None, QtGui.QApplication.UnicodeUTF8))
self.rateCheck.setText(QtGui.QApplication.translate("qPreferences", "Ban Filtered Songs", None, QtGui.QApplication.UnicodeUTF8))
self.editCheck.setText(QtGui.QApplication.translate("qPreferences", "Filter edit songs", None, QtGui.QApplication.UnicodeUTF8))
self.remixCheck.setText(QtGui.QApplication.translate("qPreferences", "Filter remix songs", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("qPreferences", "<html><head/><body><p>Remix are songs that have "mix" in () or []</p><p>Live are songs that have "live" in () or []</p><p>Edit are songs that contain "edit" in () or []</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("qPreferences", "<b>Please note:</b> <br>Filtered songs may count against your skip limit.", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtGui.QApplication.translate("qPreferences", "Filters", None, QtGui.QApplication.UnicodeUTF8))
self.aboutLabel.setText(QtGui.QApplication.translate("qPreferences", "<html><head/><body><p>qAndora is a cross platform, open source, <a href=\"www.pandora.com\"><span style=\" text-decoration: underline; color:#0057ae;\">Pandora Internet Radio</span></a> client written in <a href=\"https://www.python.org/\"><span style=\" text-decoration: underline; color:#0057ae;\">Python</span></a> using <a href=\"http://qt-project.org/\"><span style=\" text-decoration: underline; color:#0057ae;\">Qt</span></a> by <a href=\"http://www.jeffhoogland.com/\"><span style=\" text-decoration: underline; color:#0057ae;\">Jeff Hoogland</span></a>.<br/><br/><a href=\"https://github.com/JeffHoogland/qAndora\"><span style=\" text-decoration: underline; color:#0057ae;\">qAndora source on GitHub</span></a></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), QtGui.QApplication.translate("qPreferences", "About", None, QtGui.QApplication.UnicodeUTF8))
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationsamlpolicy(base_resource) :
""" Configuration for AAA Saml policy resource. """
def __init__(self) :
self._name = ""
self._rule = ""
self._reqaction = ""
self.___count = 0
@property
def name(self) :
"""Name for the SAML policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after SAML policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the SAML policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after SAML policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
"""Name of the NetScaler named rule, or a default syntax expression, that the policy uses to determine whether to attempt to authenticate the user with the SAML server.<br/>Minimum length = 1.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
"""Name of the NetScaler named rule, or a default syntax expression, that the policy uses to determine whether to attempt to authenticate the user with the SAML server.<br/>Minimum length = 1
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def reqaction(self) :
"""Name of the SAML authentication action to be performed if the policy matches.<br/>Minimum length = 1.
"""
try :
return self._reqaction
except Exception as e:
raise e
@reqaction.setter
def reqaction(self, reqaction) :
"""Name of the SAML authentication action to be performed if the policy matches.<br/>Minimum length = 1
"""
try :
self._reqaction = reqaction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationsamlpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationsamlpolicy
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add authenticationsamlpolicy.
"""
try :
if type(resource) is not list :
addresource = authenticationsamlpolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.reqaction = resource.reqaction
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationsamlpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].reqaction = resource[i].reqaction
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete authenticationsamlpolicy.
"""
try :
if type(resource) is not list :
deleteresource = authenticationsamlpolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationsamlpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationsamlpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update authenticationsamlpolicy.
"""
try :
if type(resource) is not list :
updateresource = authenticationsamlpolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.reqaction = resource.reqaction
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ authenticationsamlpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].reqaction = resource[i].reqaction
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of authenticationsamlpolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = authenticationsamlpolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationsamlpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationsamlpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the authenticationsamlpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationsamlpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationsamlpolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationsamlpolicy() for _ in range(len(name))]
obj = [authenticationsamlpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationsamlpolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of authenticationsamlpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationsamlpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the authenticationsamlpolicy resources configured on NetScaler.
"""
try :
obj = authenticationsamlpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of authenticationsamlpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationsamlpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class authenticationsamlpolicy_response(base_response) :
def __init__(self, length=1) :
self.authenticationsamlpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationsamlpolicy = [authenticationsamlpolicy() for _ in range(length)]
| |
##
# Copyright (c) 2007-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from calendarserver.tap.util import (
MemoryLimitService, Stepper, verifyTLSCertificate
)
from twistedcaldav.util import computeProcessCount
from twistedcaldav.test.util import TestCase
from twisted.internet.task import Clock
from twisted.internet.defer import succeed, inlineCallbacks
from twisted.python.filepath import FilePath
from twistedcaldav.config import ConfigDict
class ProcessCountTestCase(TestCase):
def test_count(self):
data = (
# minimum, perCPU, perGB, cpu, memory (in GB), expected:
(0, 1, 1, 0, 0, 0),
(1, 2, 2, 0, 0, 1),
(1, 2, 1, 1, 1, 1),
(1, 2, 2, 1, 1, 2),
(1, 2, 2, 2, 1, 2),
(1, 2, 2, 1, 2, 2),
(1, 2, 2, 2, 2, 4),
(4, 1, 2, 8, 2, 4),
(6, 2, 2, 2, 2, 6),
(1, 2, 1, 4, 99, 8),
(2, 1, 2, 2, 2, 2), # 2 cores, 2GB = 2
(2, 1, 2, 2, 4, 2), # 2 cores, 4GB = 2
(2, 1, 2, 8, 6, 8), # 8 cores, 6GB = 8
(2, 1, 2, 8, 16, 8), # 8 cores, 16GB = 8
)
for min, perCPU, perGB, cpu, mem, expected in data:
mem *= (1024 * 1024 * 1024)
self.assertEquals(
expected,
computeProcessCount(min, perCPU, perGB, cpuCount=cpu, memSize=mem)
)
# Stub classes for MemoryLimitServiceTestCase
class StubProtocol(object):
def __init__(self, transport):
self.transport = transport
class StubProcess(object):
def __init__(self, pid):
self.pid = pid
class StubProcessMonitor(object):
def __init__(self, processes, protocols):
self.processes = processes
self.protocols = protocols
self.history = []
def stopProcess(self, name):
self.history.append(name)
class MemoryLimitServiceTestCase(TestCase):
def test_checkMemory(self):
"""
Set up stub objects to verify MemoryLimitService.checkMemory( )
only stops the processes whose memory usage exceeds the configured
limit, and skips memcached
"""
data = {
# PID : (name, resident memory-in-bytes, virtual memory-in-bytes)
101: ("process #1", 10, 1010),
102: ("process #2", 30, 1030),
103: ("process #3", 50, 1050),
99: ("memcached-Default", 10, 1010),
}
processes = []
protocols = {}
for pid, (name, _ignore_resident, _ignore_virtual) in data.iteritems():
protocols[name] = StubProtocol(StubProcess(pid))
processes.append(name)
processMonitor = StubProcessMonitor(processes, protocols)
clock = Clock()
service = MemoryLimitService(processMonitor, 10, 15, True, reactor=clock)
# For testing, use a stub implementation of memory-usage lookup
def testMemoryForPID(pid, residentOnly):
return data[pid][1 if residentOnly else 2]
service._memoryForPID = testMemoryForPID
# After 5 seconds, nothing should have happened, since the interval is 10 seconds
service.startService()
clock.advance(5)
self.assertEquals(processMonitor.history, [])
# After 7 more seconds, processes 2 and 3 should have been stopped since their
# memory usage exceeds 10 bytes
clock.advance(7)
self.assertEquals(processMonitor.history, ['process #2', 'process #3'])
# Now switch to looking at virtual memory, in which case all 3 processes
# should be stopped
service._residentOnly = False
processMonitor.history = []
clock.advance(10)
self.assertEquals(processMonitor.history, ['process #1', 'process #2', 'process #3'])
#
# Tests for Stepper
#
class Step(object):
def __init__(self, recordCallback, shouldFail):
self._recordCallback = recordCallback
self._shouldFail = shouldFail
def stepWithResult(self, result):
self._recordCallback(self.successValue, None)
if self._shouldFail:
1 / 0
return succeed(result)
def stepWithFailure(self, failure):
self._recordCallback(self.errorValue, failure)
if self._shouldFail:
return failure
class StepOne(Step):
successValue = "one success"
errorValue = "one failure"
class StepTwo(Step):
successValue = "two success"
errorValue = "two failure"
class StepThree(Step):
successValue = "three success"
errorValue = "three failure"
class StepFour(Step):
successValue = "four success"
errorValue = "four failure"
class StepperTestCase(TestCase):
def setUp(self):
self.history = []
self.stepper = Stepper()
def _record(self, value, failure):
self.history.append(value)
@inlineCallbacks
def test_allSuccess(self):
self.stepper.addStep(
StepOne(self._record, False)
).addStep(
StepTwo(self._record, False)
).addStep(
StepThree(self._record, False)
).addStep(
StepFour(self._record, False)
)
result = (yield self.stepper.start("abc"))
self.assertEquals(result, "abc") # original result passed through
self.assertEquals(
self.history,
['one success', 'two success', 'three success', 'four success'])
def test_allFailure(self):
self.stepper.addStep(StepOne(self._record, True))
self.stepper.addStep(StepTwo(self._record, True))
self.stepper.addStep(StepThree(self._record, True))
self.stepper.addStep(StepFour(self._record, True))
self.failUnlessFailure(self.stepper.start(), ZeroDivisionError)
self.assertEquals(
self.history,
['one success', 'two failure', 'three failure', 'four failure'])
@inlineCallbacks
def test_partialFailure(self):
self.stepper.addStep(StepOne(self._record, True))
self.stepper.addStep(StepTwo(self._record, False))
self.stepper.addStep(StepThree(self._record, True))
self.stepper.addStep(StepFour(self._record, False))
result = (yield self.stepper.start("abc"))
self.assertEquals(result, None) # original result is gone
self.assertEquals(
self.history,
['one success', 'two failure', 'three success', 'four failure'])
class PreFlightChecksTestCase(TestCase):
"""
Verify that missing, empty, or bogus TLS Certificates are detected
"""
def test_missingCertificate(self):
success, _ignore_reason = verifyTLSCertificate(
ConfigDict(
{
"SSLCertificate": "missing",
}
)
)
self.assertFalse(success)
def test_emptyCertificate(self):
certFilePath = FilePath(self.mktemp())
certFilePath.setContent("")
success, _ignore_reason = verifyTLSCertificate(
ConfigDict(
{
"SSLCertificate": certFilePath.path,
}
)
)
self.assertFalse(success)
def test_bogusCertificate(self):
certFilePath = FilePath(self.mktemp())
certFilePath.setContent("bogus")
keyFilePath = FilePath(self.mktemp())
keyFilePath.setContent("bogus")
success, _ignore_reason = verifyTLSCertificate(
ConfigDict(
{
"SSLCertificate": certFilePath.path,
"SSLPrivateKey": keyFilePath.path,
"SSLAuthorityChain": "",
"SSLMethod": "SSLv3_METHOD",
"SSLCiphers": "ALL:!aNULL:!ADH:!eNULL:!LOW:!EXP:RC4+RSA:+HIGH:+MEDIUM",
}
)
)
self.assertFalse(success)
| |
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Asserts used in views.
These directly return a DataONE Exception to the client if a test condition is not true.
"""
import contextlib
import requests
import d1_common.date_time
import d1_common.types
import d1_common.types.exceptions
import d1_common.url
import django.conf
import d1_gmn.app
import d1_gmn.app.did
# def is_unused(did):
# """Assert that the ``did`` is currently unused and so is available to be
# assigned to a new object.
#
# To be unused, the DID:
# - Must not exist as a PID or SID.
# - Must not have been accepted for replication.
# - Must not be referenced as obsoletes or obsoleted_by in any object
# - Must not be referenced in any resource map
# """
# if d1_gmn.app.did._is_did(did):
# raise d1_common.types.exceptions.IdentifierNotUnique(
# 0, u'Identifier is already in use as {}. id="{}"'
# .format(d1_gmn.app.did.classify_identifier(did), did), identifier=did
# )
def is_valid_pid_for_create(did):
"""Assert that ``did`` can be used as a PID for creating a new object with
MNStorage.create() or MNStorage.update()."""
if not d1_gmn.app.did.is_valid_pid_for_create(did):
raise d1_common.types.exceptions.IdentifierNotUnique(
0,
'Identifier is already in use as {}. did="{}"'.format(
d1_gmn.app.did.classify_identifier(did), did
),
identifier=did,
)
def is_valid_pid_to_be_updated(did):
"""Assert that ``did`` is the PID of an object that can be updated (obsoleted) with
MNStorage.update()"""
if not d1_gmn.app.did.is_valid_pid_to_be_updated(did):
raise d1_common.types.exceptions.InvalidRequest(
0,
"Object cannot be updated because the identifier for the object to be "
'updated is {}. did="{}"'.format(
d1_gmn.app.did.classify_identifier(did), did
),
identifier=did,
)
def is_did(did):
if not d1_gmn.app.did.is_did(did):
raise d1_common.types.exceptions.NotFound(
0, 'Unknown identifier. id="{}"'.format(did), identifier=did
)
def is_existing_object(did):
"""Raise NotFound if object does not exist."""
if not d1_gmn.app.did.is_existing_object(did):
raise d1_common.types.exceptions.NotFound(
0,
"Identifier is {}. Expected a Persistent ID (PID) for an existing "
'object. id="{}"'.format(d1_gmn.app.did.classify_identifier(did), did),
identifier=did,
)
def is_sid(did):
if not d1_gmn.app.did.is_sid(did):
raise d1_common.types.exceptions.InvalidRequest(
0,
'Identifier is {}. Expected a Series ID (SID). id="{}"'.format(
d1_gmn.app.did.classify_identifier(did), did
),
identifier=did,
)
def is_bool_param(param_name, bool_val):
if not d1_gmn.app.views.util.is_bool_param(bool_val):
raise d1_common.types.exceptions.InvalidRequest(
0,
'Invalid boolean value for parameter. parameter="{}" value="{}"'.format(
param_name, bool_val
),
)
def is_in_revision_chain(pid):
if not d1_gmn.app.did.is_in_revision_chain(pid):
raise d1_common.types.exceptions.InvalidRequest(
0, 'Object is not in a revision chain. pid="{}"'.format(pid), identifier=pid
)
def is_not_obsoleted(pid):
if d1_gmn.app.did.is_obsoleted(pid):
raise d1_common.types.exceptions.InvalidRequest(
0, 'Object has already been obsoleted. pid="{}"'.format(pid), identifier=pid
)
# ------------------------------------------------------------------------------
# Misc
# ------------------------------------------------------------------------------
def post_has_mime_parts(request, parts):
"""Validate that a MMP POST contains all required sections.
:param request: Django Request
:param parts: [(part_type, part_name), ...]
:return: None or raises exception.
Where information is stored in the request:
part_type header: request.META['HTTP_<UPPER CASE NAME>']
part_type file: request.FILES['<name>']
part_type field: request.POST['<name>']
"""
missing = []
for part_type, part_name in parts:
if part_type == "header":
if "HTTP_" + part_name.upper() not in request.META:
missing.append("{}: {}".format(part_type, part_name))
elif part_type == "file":
if part_name not in list(request.FILES.keys()):
missing.append("{}: {}".format(part_type, part_name))
elif part_type == "field":
if part_name not in list(request.POST.keys()):
missing.append("{}: {}".format(part_type, part_name))
else:
raise d1_common.types.exceptions.ServiceFailure(
0, 'Invalid part_type. part_type="{}"'.format(part_type)
)
if len(missing) > 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Missing part(s) in MIME Multipart document. missing="{}"'.format(
", ".join(missing)
),
)
def date_is_utc(date_time):
if not d1_common.date_time.is_utc(date_time):
raise d1_common.types.exceptions.InvalidRequest(
0, 'Date-time must be specified in UTC. date_time="{}"'.format(date_time)
)
def url_is_http_or_https(url):
if not d1_common.url.isHttpOrHttps(url):
raise d1_common.types.exceptions.InvalidRequest(
0,
'URL specified for remote storage must be HTTP or HTTPS. url="{}"'.format(
url
),
)
def url_is_retrievable(url):
try:
with contextlib.closing(
requests.get(
url, stream=True, timeout=django.conf.settings.PROXY_MODE_STREAM_TIMEOUT
)
) as r:
r.raise_for_status()
for _ in r.iter_content(chunk_size=1):
return True
raise IOError("Object appears to be empty")
except Exception as e:
raise d1_common.types.exceptions.InvalidRequest(
0,
"Invalid URL specified for remote storage. The referenced object is not "
'retrievable. url="{}", error="{}"'.format(url, str(e)),
)
def is_not_replica(pid):
if d1_gmn.app.did.is_local_replica(pid):
raise d1_common.types.exceptions.InvalidRequest(
0,
"Object is a replica and cannot be updated on this Member Node. "
"The operation must be performed on the authoritative Member Node. "
'pid="{}"'.format(pid),
identifier=pid,
)
def is_not_archived(pid):
if d1_gmn.app.did.is_archived(pid):
raise d1_common.types.exceptions.InvalidRequest(
0,
"The object has been archived and cannot be updated. "
'pid="{}"'.format(pid),
identifier=pid,
)
| |
# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..utils import (
urlencode_postdata,
int_or_none,
unified_strdate,
)
class VierIE(InfoExtractor):
IE_NAME = 'vier'
IE_DESC = 'vier.be and vijf.be'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?(?P<site>vier|vijf)\.be/
(?:
(?:
[^/]+/videos|
video(?:/[^/]+)*
)/
(?P<display_id>[^/]+)(?:/(?P<id>\d+))?|
(?:
video/v3/embed|
embed/video/public
)/(?P<embed_id>\d+)
)
'''
_NETRC_MACHINE = 'vier'
_TESTS = [{
'url': 'http://www.vier.be/planb/videos/het-wordt-warm-de-moestuin/16129',
'md5': 'e4ae2054a6b040ef1e289e20d111b46e',
'info_dict': {
'id': '16129',
'display_id': 'het-wordt-warm-de-moestuin',
'ext': 'mp4',
'title': 'Het wordt warm in De Moestuin',
'description': 'De vele uren werk eisen hun tol. Wim droomt van assistentie...',
'upload_date': '20121025',
'series': 'Plan B',
'tags': ['De Moestuin', 'Moestuin', 'meisjes', 'Tomaat', 'Wim', 'Droom'],
},
}, {
'url': 'http://www.vijf.be/temptationisland/videos/zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas/2561614',
'info_dict': {
'id': '2561614',
'display_id': 'zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas',
'ext': 'mp4',
'title': 'md5:84f45fe48b8c1fa296a7f6d208d080a7',
'description': 'md5:0356d4981e58b8cbee19355cbd51a8fe',
'upload_date': '20170228',
'series': 'Temptation Island',
'tags': list,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
'info_dict': {
'id': '2674839',
'display_id': 'jani-gaat-naar-tokio-aflevering-4',
'ext': 'mp4',
'title': 'Jani gaat naar Tokio - Aflevering 4',
'description': 'md5:aa8d611541db6ae9e863125704511f88',
'upload_date': '20170501',
'series': 'Jani gaat',
'episode_number': 4,
'tags': ['Jani Gaat', 'Volledige Aflevering'],
},
'params': {
'skip_download': True,
},
'skip': 'Requires account credentials',
}, {
# Requires account credentials but bypassed extraction via v3/embed page
# without metadata
'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
'info_dict': {
'id': '2674839',
'display_id': 'jani-gaat-naar-tokio-aflevering-4',
'ext': 'mp4',
'title': 'jani-gaat-naar-tokio-aflevering-4',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Log in to extract metadata'],
}, {
# Without video id in URL
'url': 'http://www.vier.be/planb/videos/dit-najaar-plan-b',
'only_matching': True,
}, {
'url': 'http://www.vier.be/video/v3/embed/16129',
'only_matching': True,
}, {
'url': 'https://www.vijf.be/embed/video/public/4093',
'only_matching': True,
}, {
'url': 'https://www.vier.be/video/blockbusters/in-juli-en-augustus-summer-classics',
'only_matching': True,
}, {
'url': 'https://www.vier.be/video/achter-de-rug/2017/achter-de-rug-seizoen-1-aflevering-6',
'only_matching': True,
}]
def _real_initialize(self):
self._logged_in = False
def _login(self, site):
username, password = self._get_login_info()
if username is None or password is None:
return
login_page = self._download_webpage(
'http://www.%s.be/user/login' % site,
None, note='Logging in', errnote='Unable to log in',
data=urlencode_postdata({
'form_id': 'user_login',
'name': username,
'pass': password,
}),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
login_error = self._html_search_regex(
r'(?s)<div class="messages error">\s*<div>\s*<h2.+?</h2>(.+?)<',
login_page, 'login error', default=None)
if login_error:
self.report_warning('Unable to log in: %s' % login_error)
else:
self._logged_in = True
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
embed_id = mobj.group('embed_id')
display_id = mobj.group('display_id') or embed_id
video_id = mobj.group('id') or embed_id
site = mobj.group('site')
if not self._logged_in:
self._login(site)
webpage = self._download_webpage(url, display_id)
if r'id="user-login"' in webpage:
self.report_warning(
'Log in to extract metadata', video_id=display_id)
webpage = self._download_webpage(
'http://www.%s.be/video/v3/embed/%s' % (site, video_id),
display_id)
video_id = self._search_regex(
[r'data-nid="(\d+)"', r'"nid"\s*:\s*"(\d+)"'],
webpage, 'video id', default=video_id or display_id)
playlist_url = self._search_regex(
r'data-file=(["\'])(?P<url>(?:https?:)?//[^/]+/.+?\.m3u8.*?)\1',
webpage, 'm3u8 url', default=None, group='url')
if not playlist_url:
application = self._search_regex(
[r'data-application="([^"]+)"', r'"application"\s*:\s*"([^"]+)"'],
webpage, 'application', default=site + '_vod')
filename = self._search_regex(
[r'data-filename="([^"]+)"', r'"filename"\s*:\s*"([^"]+)"'],
webpage, 'filename')
playlist_url = 'http://vod.streamcloud.be/%s/_definst_/mp4:%s.mp4/playlist.m3u8' % (application, filename)
formats = self._extract_wowza_formats(
playlist_url, display_id, skip_protocols=['dash'])
self._sort_formats(formats)
title = self._og_search_title(webpage, default=display_id)
description = self._html_search_regex(
r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-type-text-with-summary\b[^>]*?\1[^>]*>.*?<p>(?P<value>.+?)</p>',
webpage, 'description', default=None, group='value')
thumbnail = self._og_search_thumbnail(webpage, default=None)
upload_date = unified_strdate(self._html_search_regex(
r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-name-post-date\b[^>]*?\1[^>]*>.*?(?P<value>\d{2}/\d{2}/\d{4})',
webpage, 'upload date', default=None, group='value'))
series = self._search_regex(
r'data-program=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
'series', default=None, group='value')
episode_number = int_or_none(self._search_regex(
r'(?i)aflevering (\d+)', title, 'episode number', default=None))
tags = re.findall(r'<a\b[^>]+\bhref=["\']/tags/[^>]+>([^<]+)<', webpage)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'series': series,
'episode_number': episode_number,
'tags': tags,
'formats': formats,
}
class VierVideosIE(InfoExtractor):
IE_NAME = 'vier:videos'
_VALID_URL = r'https?://(?:www\.)?(?P<site>vier|vijf)\.be/(?P<program>[^/]+)/videos(?:\?.*\bpage=(?P<page>\d+)|$)'
_TESTS = [{
'url': 'http://www.vier.be/demoestuin/videos',
'info_dict': {
'id': 'demoestuin',
},
'playlist_mincount': 153,
}, {
'url': 'http://www.vijf.be/temptationisland/videos',
'info_dict': {
'id': 'temptationisland',
},
'playlist_mincount': 159,
}, {
'url': 'http://www.vier.be/demoestuin/videos?page=6',
'info_dict': {
'id': 'demoestuin-page6',
},
'playlist_mincount': 20,
}, {
'url': 'http://www.vier.be/demoestuin/videos?page=7',
'info_dict': {
'id': 'demoestuin-page7',
},
'playlist_mincount': 13,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
program = mobj.group('program')
site = mobj.group('site')
page_id = mobj.group('page')
if page_id:
page_id = int(page_id)
start_page = page_id
playlist_id = '%s-page%d' % (program, page_id)
else:
start_page = 0
playlist_id = program
entries = []
for current_page_id in itertools.count(start_page):
current_page = self._download_webpage(
'http://www.%s.be/%s/videos?page=%d' % (site, program, current_page_id),
program,
'Downloading page %d' % (current_page_id + 1))
page_entries = [
self.url_result('http://www.' + site + '.be' + video_url, 'Vier')
for video_url in re.findall(
r'<h[23]><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)]
entries.extend(page_entries)
if page_id or '>Meer<' not in current_page:
break
return self.playlist_result(entries, playlist_id)
| |
import json
import os
import argparse
import copy
import shutil
import subprocess
cur_dir = os.path.abspath(os.path.dirname(__file__))
print(f"cur_dir: {cur_dir}")
def insert_extract_code(file_path):
code_lines = []
code = \
"""
import json
import os
def extract(my_pipeline, file_name, output_path='generated_conf_and_dsl'):
out_name = file_name.split('/')[-1]
out_name = out_name.replace('pipeline-', '').replace('.py', '').replace('-', '_')
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
conf_name = os.path.join(cur_dir, output_path, f"{out_name}_conf.json")
dsl_name = os.path.join(cur_dir, output_path, f"{out_name}_dsl.json")
json.dump(conf, open(conf_name, 'w'), indent=4)
print('conf name is {}'.format(conf_name))
json.dump(dsl, open(dsl_name, 'w'), indent=4)
print('dsl name is {}'.format(dsl_name))
"""
code_lines.append(code)
screen_keywords = [".predict(", ".fit(", ".deploy_component(", "predict_pipeline ",
"predict_pipeline."]
continue_to_screen = False
has_return = False
with open(file_path, 'r') as f:
lines = f.readlines()
for l in lines:
if ".predict(" in l or ".fit(" in l :
code_lines.append(f"# {l}")
elif 'if __name__ == "__main__":' in l:
if not has_return:
code_lines.append(" extract(pipeline, __file__)\n")
code_lines.append(l)
elif 'return' in l:
code_lines.append(" extract(pipeline, __file__)\n")
code_lines.append(l)
has_return = True
elif "get_summary()" in l:
continue
elif continue_to_screen:
code_lines.append(f"# {l}")
if ")" in l:
continue_to_screen = False
else:
should_append = True
for key_word in screen_keywords:
if key_word in l:
code_lines.append(f"# {l}")
should_append = False
if ")" not in l:
continue_to_screen = True
if should_append:
code_lines.append(l)
return code_lines
def extract(my_pipeline, file_name, output_path='generated_conf_and_dsl'):
out_name = file_name.split('/')[-1]
out_name = out_name.replace('pipeline-', '').replace('.py', '').replace('-', '_')
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
conf_name = './{}/{}_conf.json'.format(output_path, out_name)
dsl_name = './{}/{}_dsl.json'.format(output_path, out_name)
json.dump(conf, open(conf_name, 'w'), indent=4)
print('conf name is {}'.format(conf_name))
json.dump(dsl, open(dsl_name, 'w'), indent=4)
print('dsl name is {}'.format(dsl_name))
def get_testsuite_file(testsuite_file_path):
# import examples
# cpn_path = os.path.dirname(examples.__file__) + f'/federatedml-1.x-examples/{testsuite_file_path}'
print(f"testsuite_file_path: {testsuite_file_path}")
with open(testsuite_file_path, 'r', encoding='utf-8') as load_f:
testsuite_json = json.load(load_f)
# testsuite_json['tasks'] = {}
if "tasks" in testsuite_json:
del testsuite_json["tasks"]
if "pipeline_tasks" in testsuite_json:
del testsuite_json["pipeline_tasks"]
return testsuite_json
def start_task(cmd):
print('Start task: {}'.format(cmd))
subp = subprocess.Popen(cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = stdout.decode("utf-8")
# time_print("start_task, stdout:" + str(stdout))
# try:
# stdout = json.loads(stdout)
# except:
# raise RuntimeError("start task error, return value: {}".format(stdout))
return stdout
def upload_data(file_path):
cmd = ["fate_test", "suite", "-i", file_path, "--data-only", "--yes"]
start_task(cmd)
def do_generated(file_path, fold_name, template_path, yaml_file):
if not os.path.isdir(file_path):
return
files = os.listdir(file_path)
# cmd = 'python {}'
if template_path is None:
for f in files:
if "testsuite" in f and "generated_testsuite" not in f:
template_path = os.path.join(file_path, f)
break
if template_path is None:
# raise RuntimeError("Template cannot be found")
return
print(f"template_path: {template_path}")
# upload_data(file_path)
suite_json = get_testsuite_file(template_path)
pipeline_suite = copy.deepcopy(suite_json)
suite_json["tasks"] = {}
pipeline_suite["pipeline_tasks"] = {}
replaced_path = os.path.join(file_path, 'replaced_code')
generated_path = os.path.join(file_path, 'generated_conf_and_dsl')
if not os.path.exists(replaced_path):
os.system('mkdir {}'.format(replaced_path))
if not os.path.exists(generated_path):
os.system('mkdir {}'.format(generated_path))
for f in files:
if not f.startswith("pipeline"):
continue
print(f)
task_name = f.replace(".py", "")
task_name = "-".join(task_name.split('-')[1:])
pipeline_suite["pipeline_tasks"][task_name] = {
"script": f
}
f_path = os.path.join(file_path, f)
code_str = insert_extract_code(f_path)
pipeline_file_path = os.path.join(replaced_path, f)
open(pipeline_file_path, 'w').writelines(code_str)
# print('replace done')
# file_path = folder + f
# os.system(cmd.format(folder + f))
exe_files = os.listdir(replaced_path)
#
for f in exe_files:
abs_file = os.path.join(replaced_path, f)
print('executing {}'.format(abs_file))
os.system(f"python {abs_file} -config {yaml_file}")
conf_files = os.listdir(generated_path)
f_dsl = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'dsl.json' in f}
f_conf = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'conf.json' in f}
for task_type, dsl_file in f_dsl.items():
conf_file = f_conf[task_type]
suite_json['tasks'][task_type] = {
"conf": conf_file,
"dsl": dsl_file
}
suite_path = os.path.join(generated_path, f"{fold_name}_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(suite_json, json_file, ensure_ascii=False, indent=4)
suite_path = os.path.join(file_path, f"{fold_name}_pipeline_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(pipeline_suite, json_file, ensure_ascii=False, indent=4)
shutil.rmtree(replaced_path)
print("Generate testsuite and dsl&conf finished!")
# os.system('rm -rf {}'.format(replaced_path))
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-i", "--include", type=str, help="path of pipeline files", required=True)
arg_parser.add_argument("-t", "--template_path", type=str, help="test template", required=False,
default=None)
args = arg_parser.parse_args()
input_path = args.include
template_path = args.template_path
input_path = os.path.abspath(input_path)
input_list = [input_path]
i = 0
while i < len(input_list):
dirs = os.listdir(input_list[i])
for d in dirs:
if os.path.isdir(d):
input_list.append(d)
i += 1
yaml_file = os.path.join(os.path.dirname(cur_dir), "config.yaml")
# print(file_path, module_name, template_path)
for file_path in input_list:
module_name = os.path.basename(file_path)
do_generated(file_path, module_name, template_path, yaml_file)
# pass
| |
from __future__ import absolute_import, unicode_literals
from copy import copy
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import NoReverseMatch
from django.db.models.fields import FieldDoesNotExist
from drf_toolbox.compat import models, django_pgfields_installed
from drf_toolbox.serializers.fields import api, postgres, related
from importlib import import_module
from rest_framework import serializers
from rest_framework.compat import smart_text
from rest_framework.settings import api_settings
import collections
import six
__all__ = ('BaseModelSerializer', 'ModelSerializer')
API_ENDPOINT_KEY_SINGULAR = 'api_endpoint'
API_ENDPOINT_KEY_PLURAL = 'api_endpoints'
class BaseModelSerializer(serializers.ModelSerializer):
"""A model serializer that is the starting point for any
extentions consistently needed in the DRF Toolbox API.
"""
def get_field(self, model_field):
"""Return the appropriate Django REST Framework field for this
model field.
"""
# If django-pgfields is not installed, then just take
# the superclass implementation.
if not django_pgfields_installed:
return super(BaseModelSerializer, self).get_field(model_field)
# If this is an ArrayField, we need to send the `of` subfield
# as well as the field itself.
if isinstance(model_field, models.ArrayField):
of_field = self.get_field(model_field.of)
return postgres.ArrayField(of_field,
default=[],
help_text=model_field.help_text,
label=model_field.verbose_name,
read_only=not model_field.editable,
required=not model_field.blank,
)
# If this field is a CompositeField subclass, we can just use
# an intelligent CompositeField serialization class to handle it.
if isinstance(model_field, models.CompositeField):
# Composite fields may define their own serializers.
# If this one does, return it.
if hasattr(model_field, 'get_drf_serializer_field'):
return model_field.get_drf_serializer_field()
# Create and return a generic composite field serializer.
subfields = {}
for name, model_subfield in model_field._meta.fields:
subfields[name] = self.get_field(model_subfield)
return postgres.CompositeField(
default=model_field.instance_class(),
fields=subfields,
help_text=model_field.help_text,
label=model_field.verbose_name,
instance_class=model_field.instance_class,
read_only=not model_field.editable,
required=not model_field.blank,
)
# If this field is a JSONField, then use the simple JSONField
# serialization class.
if isinstance(model_field, models.JSONField):
return postgres.JSONField(
default={},
help_text=model_field.help_text,
label=model_field.verbose_name,
read_only=not model_field.editable,
required=not model_field.blank,
)
# If this field is a UUIDField, then use the UUIDField serialization
# class also.
if isinstance(model_field, models.UUIDField):
if model_field._auto_add:
return postgres.UUIDField(read_only=True, required=False)
return postgres.UUIDField(
default=model_field.default,
help_text=model_field.help_text,
label=model_field.verbose_name,
read_only=not model_field.editable,
required=not model_field.blank,
)
# Okay, this isn't a special field; run the superclass implementation.
return super(BaseModelSerializer, self).get_field(model_field)
class ModelSerializer(BaseModelSerializer):
"""A model serializer which prints both endpoints and
IDs for each record.
"""
_default_view_name = '%(model_name)s-detail'
_options_class = serializers.HyperlinkedModelSerializerOptions
class Meta:
depth = 1
def __init__(self, obj=None, seen_models=(), initial=None, **kwargs):
self._seen_models = set(seen_models)
self._initial = initial or {}
self._rel_fields = {}
super(ModelSerializer, self).__init__(obj, **kwargs)
def get_default_fields(self):
"""Return the default fields for this serializer, as a
dictionary.
"""
# If we received the `fields` or `exclude` options as dictionaries,
# parse them out into the format that DRF expects.
#
# We can then address our related fields lists by sending them
# to the appropriate related fields.
for option in ('fields', 'exclude'):
opt_value = getattr(self.opts, option, ())
if isinstance(opt_value, dict):
setattr(self.opts, option, opt_value.get('self', ()))
self._rel_fields[option] = opt_value
# Perform the superclass behavior.
fields = super(ModelSerializer, self).get_default_fields()
# Expunge created, modified, and password if they are present.
# These fields should only be sent if specifically requested.
for field_name in ('created', 'modified', 'password'):
if field_name not in self.opts.fields:
fields.pop(field_name, None)
# Expunge any fields that are related fields to models
# that we have already seen.
for field_name, field in fields.items():
if (isinstance(field, related.RelatedField) and
field.queryset.model in self._seen_models):
fields.pop(field_name, None)
# Use an OrderedDict to cause our keys to be in mostly-alpha order.
answer = collections.OrderedDict()
# Exception: We always want the primary key to come first, as it is
# the identifier for the object.
pk_field_name = self.opts.model._meta.pk.name
answer[pk_field_name] = fields[pk_field_name]
# Add the `api_endpoints` field, which will give us the
# hyperlink to the given item.
#
# Do it at this point, which will cause the API endpoint field
# to be shown second.
viewset = self.context.get('view', None)
if (hasattr(self.opts.model, 'get_absolute_url')):
if viewset and self._viewset_uses_me(viewset):
answer.setdefault(API_ENDPOINT_KEY_PLURAL,
api.APIEndpointsField())
else:
answer.setdefault(API_ENDPOINT_KEY_SINGULAR,
api.APIEndpointField())
# Now add all other fields, in alphabetical order.
for key in sorted(fields.keys()):
# Sanity check: Don't try to add the primary key field twice.
if key == pk_field_name:
continue
answer[key] = fields[key]
# Sanity check: If the "api_endpoint" or "api_endpoints" field
# is explicitly included or excluded, but we got the other, be
# gracious and just change it under the hood.
#
# This gets around a slew of problems where it becomes extraordinarily
# difficult to initialize serializers outside of the entire
# request setup.
for option in ('fields', 'exclude'):
opt_value = getattr(self.opts, option, [])
original_class = type(opt_value)
if (API_ENDPOINT_KEY_SINGULAR in opt_value and
API_ENDPOINT_KEY_PLURAL in answer):
opt_value = list(opt_value)
index = opt_value.index(API_ENDPOINT_KEY_SINGULAR)
opt_value[index] = API_ENDPOINT_KEY_PLURAL
if (API_ENDPOINT_KEY_PLURAL in opt_value and
API_ENDPOINT_KEY_SINGULAR in answer):
opt_value = list(opt_value)
index = opt_value.index(API_ENDPOINT_KEY_PLURAL)
opt_value[index] = API_ENDPOINT_KEY_SINGULAR
setattr(self.opts, option, original_class(opt_value))
# Done; return the final answer.
return answer
def get_related_field(self, model_field, related_model, to_many):
"""Returns a representation of the related field,
to be shown in a nested fashion.
"""
# Set default keyword arguments.
kwargs = {
'many': to_many,
'queryset': related_model._default_manager,
}
# If we have set filter or exclude lists for this related field,
# add them to the kwargs.
#
# This bit of code is expecting a `self._rel_fields` to be a
# dictionary that looks kind of like this:
# {'fields': 'child_model': ('id', 'api_endpoint', 'foo', 'bar'),
# 'exclude': 'other_model': ('password',)}
#
# Essentially what will happen is that it will check for this
# related model's key within both the fields and exclude sub-dicts,
# and if they are found, they are added to keyword arguments
# used to init the RelatedField.
for key, field_lists in self._rel_fields.items():
if model_field.name in field_lists:
kwargs[key] = field_lists[model_field.name]
# If there is a model field (e.g. this is not a reverse relationship),
# determine whether or not the field is required.
if model_field:
kwargs['required'] = not (model_field.null or model_field.blank)
# Create a new set object that includes all seen models,
# as well as the current model, to send to the related field.
seen_models = self._seen_models.union({ self.opts.model })
# Instantiate and return our field.
rel_field = related.RelatedField(seen_models=seen_models, **kwargs)
rel_field.parent_serializer = self
return rel_field
def save_object(self, obj, **kwargs):
"""Save the provided model instance.
If initial data was provided when this serializer was instantiated,
set the appropriate fields on the model instance before saving.
"""
for key, value in self._initial.items():
setattr(obj, key, self._find_field(key).from_native(value))
return super(ModelSerializer, self).save_object(obj, **kwargs)
def _find_field(self, key):
"""Return the field with the given field name.
If the field does not exist, raise KeyError.
This method also returns a field that exists, but is no longer on
the serializer (e.g. a default field that is excluded).
"""
return self.fields.get(key, self.get_default_fields()[key])
def _viewset_uses_me(self, viewset):
"""Given a viewset, return True if we believe that the viewset uses
this serializer class, False otherwise.
"""
# Get the serializer class that this viewset uses.
sc = viewset.get_serializer_class()
# If this serializer class is the same class as this instance,
# then it's definitely a match.
if sc == type(self):
return True
# Irritating case: If this class uses the default serializer, *and*
# the viewset does also, then this is a match.
if (type(self).__name__ == 'DefaultSerializer' and
isinstance(self, ModelSerializer) and
viewset.model == self.opts.model):
return True
# It's not a match.
return False
| |
from ..errors import AutoDiscoverFailed, ErrorNonExistentMailbox
from ..fields import (
BooleanField,
BuildField,
Choice,
ChoiceField,
EmailAddressField,
EWSElementField,
IntegerField,
OnOffField,
ProtocolListField,
TextField,
)
from ..properties import EWSElement
from ..transport import BASIC, CBA, DEFAULT_ENCODING, GSSAPI, NOAUTH, NTLM, SSPI
from ..util import AUTODISCOVER_BASE_NS, AUTODISCOVER_REQUEST_NS
from ..util import AUTODISCOVER_RESPONSE_NS as RNS
from ..util import ParseError, add_xml_child, create_element, is_xml, to_xml, xml_to_str
from ..version import Version
class AutodiscoverBase(EWSElement):
NAMESPACE = RNS
class User(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/user-pox"""
ELEMENT_NAME = "User"
display_name = TextField(field_uri="DisplayName", namespace=RNS)
legacy_dn = TextField(field_uri="LegacyDN", namespace=RNS)
deployment_id = TextField(field_uri="DeploymentId", namespace=RNS) # GUID format
autodiscover_smtp_address = EmailAddressField(field_uri="AutoDiscoverSMTPAddress", namespace=RNS)
class IntExtUrlBase(AutodiscoverBase):
external_url = TextField(field_uri="ExternalUrl", namespace=RNS)
internal_url = TextField(field_uri="InternalUrl", namespace=RNS)
class AddressBook(IntExtUrlBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/addressbook-pox"""
ELEMENT_NAME = "AddressBook"
class MailStore(IntExtUrlBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/mailstore-pox"""
ELEMENT_NAME = "MailStore"
class NetworkRequirements(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/networkrequirements-pox"""
ELEMENT_NAME = "NetworkRequirements"
ipv4_start = TextField(field_uri="IPv4Start", namespace=RNS)
ipv4_end = TextField(field_uri="IPv4End", namespace=RNS)
ipv6_start = TextField(field_uri="IPv6Start", namespace=RNS)
ipv6_end = TextField(field_uri="IPv6End", namespace=RNS)
class SimpleProtocol(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/protocol-pox
Used for the 'Internal' and 'External' elements that may contain a stripped-down version of the Protocol element.
"""
ELEMENT_NAME = "Protocol"
WEB = "WEB"
EXCH = "EXCH"
EXPR = "EXPR"
EXHTTP = "EXHTTP"
TYPES = (WEB, EXCH, EXPR, EXHTTP)
type = ChoiceField(field_uri="Type", choices={Choice(c) for c in TYPES}, namespace=RNS)
as_url = TextField(field_uri="ASUrl", namespace=RNS)
class IntExtBase(AutodiscoverBase):
# TODO: 'OWAUrl' also has an AuthenticationMethod enum-style XML attribute with values:
# WindowsIntegrated, FBA, NTLM, Digest, Basic
owa_url = TextField(field_uri="OWAUrl", namespace=RNS)
protocol = EWSElementField(value_cls=SimpleProtocol)
class Internal(IntExtBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/internal-pox"""
ELEMENT_NAME = "Internal"
class External(IntExtBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/external-pox"""
ELEMENT_NAME = "External"
class Protocol(SimpleProtocol):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/protocol-pox"""
# Attribute 'Type' is ignored here. Has a name conflict with the child element and does not seem useful.
version = TextField(field_uri="Version", is_attribute=True, namespace=RNS)
internal = EWSElementField(value_cls=Internal)
external = EWSElementField(value_cls=External)
ttl = IntegerField(field_uri="TTL", namespace=RNS, default=1) # TTL for this autodiscover response, in hours
server = TextField(field_uri="Server", namespace=RNS)
server_dn = TextField(field_uri="ServerDN", namespace=RNS)
server_version = BuildField(field_uri="ServerVersion", namespace=RNS)
mdb_dn = TextField(field_uri="MdbDN", namespace=RNS)
public_folder_server = TextField(field_uri="PublicFolderServer", namespace=RNS)
port = IntegerField(field_uri="Port", namespace=RNS, min=1, max=65535)
directory_port = IntegerField(field_uri="DirectoryPort", namespace=RNS, min=1, max=65535)
referral_port = IntegerField(field_uri="ReferralPort", namespace=RNS, min=1, max=65535)
ews_url = TextField(field_uri="EwsUrl", namespace=RNS)
emws_url = TextField(field_uri="EmwsUrl", namespace=RNS)
sharing_url = TextField(field_uri="SharingUrl", namespace=RNS)
ecp_url = TextField(field_uri="EcpUrl", namespace=RNS)
ecp_url_um = TextField(field_uri="EcpUrl-um", namespace=RNS)
ecp_url_aggr = TextField(field_uri="EcpUrl-aggr", namespace=RNS)
ecp_url_mt = TextField(field_uri="EcpUrl-mt", namespace=RNS)
ecp_url_ret = TextField(field_uri="EcpUrl-ret", namespace=RNS)
ecp_url_sms = TextField(field_uri="EcpUrl-sms", namespace=RNS)
ecp_url_publish = TextField(field_uri="EcpUrl-publish", namespace=RNS)
ecp_url_photo = TextField(field_uri="EcpUrl-photo", namespace=RNS)
ecp_url_tm = TextField(field_uri="EcpUrl-tm", namespace=RNS)
ecp_url_tm_creating = TextField(field_uri="EcpUrl-tmCreating", namespace=RNS)
ecp_url_tm_hiding = TextField(field_uri="EcpUrl-tmHiding", namespace=RNS)
ecp_url_tm_editing = TextField(field_uri="EcpUrl-tmEditing", namespace=RNS)
ecp_url_extinstall = TextField(field_uri="EcpUrl-extinstall", namespace=RNS)
oof_url = TextField(field_uri="OOFUrl", namespace=RNS)
oab_url = TextField(field_uri="OABUrl", namespace=RNS)
um_url = TextField(field_uri="UMUrl", namespace=RNS)
ews_partner_url = TextField(field_uri="EwsPartnerUrl", namespace=RNS)
login_name = TextField(field_uri="LoginName", namespace=RNS)
domain_required = OnOffField(field_uri="DomainRequired", namespace=RNS)
domain_name = TextField(field_uri="DomainName", namespace=RNS)
spa = OnOffField(field_uri="SPA", namespace=RNS, default=True)
auth_package = ChoiceField(
field_uri="AuthPackage",
namespace=RNS,
choices={Choice(c) for c in ("basic", "kerb", "kerbntlm", "ntlm", "certificate", "negotiate", "nego2")},
)
cert_principal_name = TextField(field_uri="CertPrincipalName", namespace=RNS)
ssl = OnOffField(field_uri="SSL", namespace=RNS, default=True)
auth_required = OnOffField(field_uri="AuthRequired", namespace=RNS, default=True)
use_pop_path = OnOffField(field_uri="UsePOPAuth", namespace=RNS)
smtp_last = OnOffField(field_uri="SMTPLast", namespace=RNS, default=False)
network_requirements = EWSElementField(value_cls=NetworkRequirements)
address_book = EWSElementField(value_cls=AddressBook)
mail_store = EWSElementField(value_cls=MailStore)
@property
def auth_type(self):
# Translates 'auth_package' value to our own 'auth_type' enum vals
if not self.auth_required:
return NOAUTH
if not self.auth_package:
return None
return {
# Missing in list are DIGEST and OAUTH2
"basic": BASIC,
"kerb": GSSAPI,
"kerbntlm": NTLM, # Means client can chose between NTLM and GSSAPI
"ntlm": NTLM,
"certificate": CBA,
"negotiate": SSPI, # Unsure about this one
"nego2": GSSAPI,
"anonymous": NOAUTH, # Seen in some docs even though it's not mentioned in MSDN
}.get(self.auth_package.lower())
class Error(EWSElement):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/error-pox"""
ELEMENT_NAME = "Error"
NAMESPACE = AUTODISCOVER_BASE_NS
id = TextField(field_uri="Id", namespace=AUTODISCOVER_BASE_NS, is_attribute=True)
time = TextField(field_uri="Time", namespace=AUTODISCOVER_BASE_NS, is_attribute=True)
code = TextField(field_uri="ErrorCode", namespace=AUTODISCOVER_BASE_NS)
message = TextField(field_uri="Message", namespace=AUTODISCOVER_BASE_NS)
debug_data = TextField(field_uri="DebugData", namespace=AUTODISCOVER_BASE_NS)
class Account(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/account-pox"""
ELEMENT_NAME = "Account"
REDIRECT_URL = "redirectUrl"
REDIRECT_ADDR = "redirectAddr"
SETTINGS = "settings"
ACTIONS = (REDIRECT_URL, REDIRECT_ADDR, SETTINGS)
type = ChoiceField(field_uri="AccountType", namespace=RNS, choices={Choice("email")})
action = ChoiceField(field_uri="Action", namespace=RNS, choices={Choice(p) for p in ACTIONS})
microsoft_online = BooleanField(field_uri="MicrosoftOnline", namespace=RNS)
redirect_url = TextField(field_uri="RedirectURL", namespace=RNS)
redirect_address = EmailAddressField(field_uri="RedirectAddr", namespace=RNS)
image = TextField(field_uri="Image", namespace=RNS) # Path to image used for branding
service_home = TextField(field_uri="ServiceHome", namespace=RNS) # URL to website of ISP
protocols = ProtocolListField()
# 'SmtpAddress' is inside the 'PublicFolderInformation' element
public_folder_smtp_address = TextField(field_uri="SmtpAddress", namespace=RNS)
@classmethod
def from_xml(cls, elem, account):
kwargs = {}
public_folder_information = elem.find(f"{{{cls.NAMESPACE}}}PublicFolderInformation")
for f in cls.FIELDS:
if f.name == "public_folder_smtp_address":
if public_folder_information is None:
continue
kwargs[f.name] = f.from_xml(elem=public_folder_information, account=account)
continue
kwargs[f.name] = f.from_xml(elem=elem, account=account)
cls._clear(elem)
return cls(**kwargs)
class Response(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/response-pox"""
ELEMENT_NAME = "Response"
user = EWSElementField(value_cls=User)
account = EWSElementField(value_cls=Account)
@property
def redirect_address(self):
try:
if self.account.action != Account.REDIRECT_ADDR:
return None
return self.account.redirect_address
except AttributeError:
return None
@property
def redirect_url(self):
try:
if self.account.action != Account.REDIRECT_URL:
return None
return self.account.redirect_url
except AttributeError:
return None
@property
def autodiscover_smtp_address(self):
# AutoDiscoverSMTPAddress might not be present in the XML. In this case, use the original email address.
try:
if self.account.action != Account.SETTINGS:
return None
return self.user.autodiscover_smtp_address
except AttributeError:
return None
@property
def version(self):
# Get the server version. Not all protocol entries have a server version so we cheat a bit and also look at the
# other ones that point to the same endpoint.
ews_url = self.protocol.ews_url
for protocol in self.account.protocols:
if not protocol.ews_url or not protocol.server_version:
continue
if protocol.ews_url.lower() == ews_url.lower():
return Version(build=protocol.server_version)
return None
@property
def protocol(self):
"""Return the protocol containing an EWS URL.
A response may contain a number of possible protocol types. EXPR is meant for EWS. See
https://techcommunity.microsoft.com/t5/blogs/blogarticleprintpage/blog-id/Exchange/article-id/16
We allow fallback to EXCH if EXPR is not available, to support installations where EXPR is not available.
Additionally, some responses may contain an EXPR with no EWS URL. In that case, return EXCH, if available.
"""
protocols = {p.type: p for p in self.account.protocols if p.ews_url}
if Protocol.EXPR in protocols:
return protocols[Protocol.EXPR]
if Protocol.EXCH in protocols:
return protocols[Protocol.EXCH]
raise ValueError(
f"No EWS URL found in any of the available protocols: {[str(p) for p in self.account.protocols]}"
)
class ErrorResponse(EWSElement):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/response-pox
Like 'Response', but with a different namespace.
"""
ELEMENT_NAME = "Response"
NAMESPACE = AUTODISCOVER_BASE_NS
error = EWSElementField(value_cls=Error)
class Autodiscover(EWSElement):
ELEMENT_NAME = "Autodiscover"
NAMESPACE = AUTODISCOVER_BASE_NS
response = EWSElementField(value_cls=Response)
error_response = EWSElementField(value_cls=ErrorResponse)
@staticmethod
def _clear(elem):
# Parent implementation also clears the parent, but this element doesn't have one.
elem.clear()
@classmethod
def from_bytes(cls, bytes_content):
"""Create an instance from response bytes. An Autodiscover request and response example is available at:
https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/pox-autodiscover-response-for-exchange
:param bytes_content:
:return:
"""
if not is_xml(bytes_content):
raise ParseError(f"Response is not XML: {bytes_content}", "<not from file>", -1, 0)
root = to_xml(bytes_content).getroot() # May raise ParseError
if root.tag != cls.response_tag():
raise ParseError(f"Unknown root element in XML: {bytes_content}", "<not from file>", -1, 0)
return cls.from_xml(elem=root, account=None)
def raise_errors(self):
# Find an error message in the response and raise the relevant exception
try:
errorcode = self.error_response.error.code
message = self.error_response.error.message
if message in ("The e-mail address cannot be found.", "The email address can't be found."):
raise ErrorNonExistentMailbox("The SMTP address has no mailbox associated with it")
raise AutoDiscoverFailed(f"Unknown error {errorcode}: {message}")
except AttributeError:
raise AutoDiscoverFailed(f"Unknown autodiscover error response: {self.error_response}")
@staticmethod
def payload(email):
# Builds a full Autodiscover XML request
payload = create_element("Autodiscover", attrs=dict(xmlns=AUTODISCOVER_REQUEST_NS))
request = create_element("Request")
add_xml_child(request, "EMailAddress", email)
add_xml_child(request, "AcceptableResponseSchema", RNS)
payload.append(request)
return xml_to_str(payload, encoding=DEFAULT_ENCODING, xml_declaration=True)
| |
# Standard python modules.
import logging
import time
import sys
# citest modules.
import citest.gcp_testing as gcp
import citest.json_predicate as jp
import citest.service_testing as st
from citest.json_contract import ObservationPredicateFactory
ov_factory = ObservationPredicateFactory()
# Spinnaker modules.
import spinnaker_testing as sk
from spinnaker_testing import frigga
from spinnaker_testing import gate
import citest.base
class GoogleServerGroupTestScenario(sk.SpinnakerTestScenario):
MINIMUM_PROJECT_QUOTA = {
'INSTANCE_TEMPLATES': 1,
'HEALTH_CHECKS': 1,
'FORWARDING_RULES': 1,
'IN_USE_ADDRESSES': 3,
'TARGET_POOLS': 1,
}
MINIMUM_REGION_QUOTA = {
'CPUS': 3,
'IN_USE_ADDRESSES': 3,
'INSTANCE_GROUP_MANAGERS': 2,
'INSTANCES': 3,
}
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser."""
super(GoogleServerGroupTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
parser.add_argument('--regional', default=False, action='store_true',
help='Test regional server groups rather than zonal.')
@classmethod
def new_agent(cls, bindings):
"""Implements the base class interface to create a new agent.
This method is called by the base classes during setup/initialization.
Args:
bindings: The bindings dictionary with configuration information
that this factory can draw from to initialize. If the factory would
like additional custom bindings it could add them to initArgumentParser.
Returns:
A citest.service_testing.BaseAgent that can interact with Gate.
This is the agent that test operations will be posted to.
"""
return gate.new_agent(bindings)
def __init__(self, bindings, agent=None):
super(GoogleServerGroupTestScenario, self).__init__(bindings, agent)
if bindings['REGIONAL']:
app_decorator = 'r'
self.__mig_title = 'Regional Instance Group'
self.__mig_resource_name = 'regionInstanceGroups'
self.__mig_resource_kwargs = {'region': bindings['TEST_GCE_REGION']}
self.__mig_manager_name = 'regionInstanceGroupManagers'
self.__mig_manager_kwargs = {'region': bindings['TEST_GCE_REGION']}
self.__mig_payload_extra = {
'regional': True, 'region': bindings['TEST_GCE_REGION']
}
else:
app_decorator = 'z'
self.__mig_title = 'Zonal Instance Group'
self.__mig_resource_name = 'instanceGroups'
self.__mig_resource_kwargs = {} # all zones
self.__mig_manager_name = 'instanceGroupManagers'
self.__mig_manager_kwargs = {} # all zones
self.__mig_payload_extra = {
'zone': bindings['TEST_GCE_ZONE']
}
logging.info('Running tests against %s', self.__mig_title)
if not bindings['TEST_APP']:
bindings['TEST_APP'] = app_decorator + 'svrgrptest' + bindings['TEST_ID']
# Our application name and path to post events to.
self.TEST_APP = bindings['TEST_APP']
self.__path = 'applications/%s/tasks' % self.TEST_APP
# Custom userdata.
self.__custom_user_data_key = 'customUserData'
self.__custom_user_data_value = 'testCustomUserData'
# The spinnaker stack decorator for our resources.
self.TEST_STACK = bindings['TEST_STACK']
self.TEST_REGION = bindings['TEST_GCE_REGION']
self.TEST_ZONE = bindings['TEST_GCE_ZONE']
# Resource names used among tests.
self.__cluster_name = frigga.Naming.cluster(
app=self.TEST_APP, stack=self.TEST_STACK)
self.__server_group_name = frigga.Naming.server_group(
app=self.TEST_APP, stack=self.TEST_STACK, version='v000')
self.__cloned_server_group_name = frigga.Naming.server_group(
app=self.TEST_APP, stack=self.TEST_STACK, version='v001')
self.__lb_name = frigga.Naming.cluster(
app=self.TEST_APP, stack=self.TEST_STACK, detail='fe')
def create_load_balancer(self):
job = [{
'cloudProvider': 'gce',
'loadBalancerName': self.__lb_name,
'ipProtocol': 'TCP',
'portRange': '8080',
'provider': 'gce',
'stack': self.TEST_STACK,
'detail': 'frontend',
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'region': self.TEST_REGION,
'listeners': [{
'protocol': 'TCP',
'portRange': '8080',
'healthCheck': False
}],
'name': self.__lb_name,
'type': 'upsertLoadBalancer',
'availabilityZones': {self.TEST_REGION: []},
'user': 'integration-tests'
}]
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Load Balancer Created', retryable_for_secs=30)
.list_resource('forwardingRules')
.EXPECT(ov_factory.value_list_path_contains(
'name', jp.STR_SUBSTR(self.__lb_name))))
payload = self.agent.make_json_payload_from_kwargs(
job=job, description=self.__mig_title + ' Test - create load balancer',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='create_load_balancer', data=payload, path=self.__path),
contract=builder.build())
def create_server_group(self):
job = [{
'application': self.TEST_APP,
'stack': self.TEST_STACK,
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'network': 'default',
'targetSize': 1,
'capacity': {
'min': 1,
'max': 1,
'desired': 1
},
'availabilityZones': {
self.TEST_REGION: [self.TEST_ZONE]
},
'loadBalancers': [self.__lb_name],
'instanceMetadata': {
'load-balancer-names': self.__lb_name
},
'userData': self.__custom_user_data_key + '=' + self.__custom_user_data_value,
'cloudProvider': 'gce',
'image': self.bindings['TEST_GCE_IMAGE_NAME'],
'instanceType': 'f1-micro',
'initialNumReplicas': 1,
'type': 'createServerGroup',
'account': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': 'integration-tests'
}]
job[0].update(self.__mig_payload_extra)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder(self.__mig_title + 'Created',
retryable_for_secs=150)
.list_resource(self.__mig_manager_name, **self.__mig_manager_kwargs)
.EXPECT(ov_factory.value_list_path_contains(
'name', jp.EQUIVALENT(self.__server_group_name))))
(builder.new_clause_builder('Instance template created',
retryable_for_secs=150)
.list_resource('instanceTemplates')
.EXPECT(ov_factory.value_list_path_contains(
'properties/metadata/items',
jp.LIST_MATCHES([
jp.DICT_MATCHES({
'key': jp.EQUIVALENT(self.__custom_user_data_key),
'value': jp.EQUIVALENT(self.__custom_user_data_value)})
])
)))
payload = self.agent.make_json_payload_from_kwargs(
job=job,
description=self.__mig_title + ' Test - create initial',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='create_server_group', data=payload, path=self.__path),
contract=builder.build())
def resize_server_group(self):
job = [{
'targetSize': 2,
'capacity': {
'min': 2,
'max': 2,
'desired': 2
},
'replicaPoolName': self.__server_group_name,
'numReplicas': 2,
'region': self.TEST_REGION,
'zone': self.TEST_ZONE,
'asgName': self.__server_group_name,
'serverGroupName': self.__server_group_name,
'type': 'resizeServerGroup',
'regions': [self.TEST_REGION],
'zones': [self.TEST_ZONE],
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'cloudProvider': 'gce',
'user': 'integration-tests'
}]
job[0].update(self.__mig_payload_extra)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder(
self.__mig_title + ' Resized', retryable_for_secs=90)
.inspect_resource(self.__mig_resource_name, self.__server_group_name,
**self.__mig_resource_kwargs)
.EXPECT(ov_factory.value_list_path_contains('size', jp.NUM_EQ(2))))
payload = self.agent.make_json_payload_from_kwargs(
job=job, description=self.__mig_title + ' Test - resize to 2 instances',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='resize_instances', data=payload, path=self.__path),
contract=builder.build())
def clone_server_group(self):
job = [{
'application': self.TEST_APP,
'stack': self.TEST_STACK,
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'loadBalancers': [self.__lb_name],
'targetSize': 1,
'capacity': {
'min': 1,
'max': 1,
'desired': 1
},
'zone': self.TEST_ZONE,
'network': 'default',
'instanceMetadata': {'load-balancer-names': self.__lb_name},
'availabilityZones': {self.TEST_REGION: [self.TEST_ZONE]},
'cloudProvider': 'gce',
'source': {
'account': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'region': self.TEST_REGION,
'zone': self.TEST_ZONE,
'serverGroupName': self.__server_group_name,
'asgName': self.__server_group_name
},
'instanceType': 'f1-micro',
'image': self.bindings['TEST_GCE_IMAGE_NAME'],
'initialNumReplicas': 1,
'loadBalancers': [self.__lb_name],
'type': 'cloneServerGroup',
'account': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': 'integration-tests'
}]
job[0].update(self.__mig_payload_extra)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder(self.__mig_title + ' Cloned',
retryable_for_secs=90)
.list_resource(self.__mig_manager_name, **self.__mig_manager_kwargs)
.EXPECT(ov_factory.value_list_path_contains(
'baseInstanceName', jp.STR_SUBSTR(self.__cloned_server_group_name))))
(builder.new_clause_builder('Instance template preserved',
retryable_for_secs=150)
.list_resource('instanceTemplates')
.EXPECT(ov_factory.value_list_path_contains(
'properties/metadata/items',
jp.LIST_MATCHES([
jp.DICT_MATCHES({
'key': jp.EQUIVALENT(self.__custom_user_data_key),
'value': jp.EQUIVALENT(self.__custom_user_data_value)})
])))
)
payload = self.agent.make_json_payload_from_kwargs(
job=job, description=self.__mig_title + ' Test - clone server group',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='clone_server_group', data=payload, path=self.__path),
contract=builder.build())
def disable_server_group(self):
job = [{
'cloudProvider': 'gce',
'asgName': self.__server_group_name,
'serverGroupName': self.__server_group_name,
'region': self.TEST_REGION,
'zone': self.TEST_ZONE,
'type': 'disableServerGroup',
'regions': [self.TEST_REGION],
'zones': [self.TEST_ZONE],
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': 'integration-tests'
}]
job[0].update(self.__mig_payload_extra)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder(self.__mig_title + ' Disabled',
retryable_for_secs=90)
.list_resource(self.__mig_manager_name, **self.__mig_manager_kwargs)
.EXPECT(ov_factory.value_list_path_contains(
'baseInstanceName', jp.STR_SUBSTR(self.__server_group_name)))
.AND(ov_factory.value_list_excludes(
jp.DICT_MATCHES({
'baseInstanceName': jp.STR_SUBSTR(self.__server_group_name),
'targetPools': jp.LIST_MATCHES([jp.STR_SUBSTR('https')])
}))))
payload = self.agent.make_json_payload_from_kwargs(
job=job, description=self.__mig_title + ' Test - disable server group',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='disable_server_group', data=payload, path=self.__path),
contract=builder.build())
def enable_server_group(self):
job = [{
'cloudProvider': 'gce',
'asgName': self.__server_group_name,
'serverGroupName': self.__server_group_name,
'region': self.TEST_REGION,
'zone': self.TEST_ZONE,
'type': 'enableServerGroup',
'regions': [self.TEST_REGION],
'zones': [self.TEST_ZONE],
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': 'integration-tests'
}]
job[0].update(self.__mig_payload_extra)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder(self.__mig_title + ' Enabled',
retryable_for_secs=90)
.list_resource(self.__mig_manager_name, **self.__mig_manager_kwargs)
.EXPECT(ov_factory.value_list_contains(
jp.DICT_MATCHES({
'baseInstanceName': jp.STR_SUBSTR(self.__server_group_name),
'targetPools': jp.LIST_MATCHES([jp.STR_SUBSTR( 'https')])
}))))
payload = self.agent.make_json_payload_from_kwargs(
job=job, description=self.__mig_title + ' Test - enable server group',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='enable_server_group', data=payload, path=self.__path),
contract=builder.build())
def destroy_server_group(self, version):
serverGroupName = '%s-%s' % (self.__cluster_name, version)
job = [{
'cloudProvider': 'gce',
'asgName': serverGroupName,
'serverGroupName': serverGroupName,
'region': self.TEST_REGION,
'zone': self.TEST_ZONE,
'type': 'destroyServerGroup',
'regions': [self.TEST_REGION],
'zones': [self.TEST_ZONE],
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': 'integration-tests'
}]
job[0].update(self.__mig_payload_extra)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder(self.__mig_title + ' Destroyed',
retryable_for_secs=90)
.list_resource(self.__mig_manager_name, **self.__mig_manager_kwargs)
.EXPECT(ov_factory.value_list_path_excludes(
'baseInstanceName', jp.STR_SUBSTR(serverGroupName))))
payload = self.agent.make_json_payload_from_kwargs(
job=job, description=self.__mig_title + ' Test - destroy server group',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='destroy_server_group', data=payload, path=self.__path),
contract=builder.build())
def delete_load_balancer(self):
job = [{
"loadBalancerName": self.__lb_name,
"networkLoadBalancerName": self.__lb_name,
"region": self.TEST_REGION,
"type": "deleteLoadBalancer",
"regions": [self.TEST_REGION],
"credentials": self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
"cloudProvider": "gce",
"user": "integration-tests"
}]
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Load Balancer Created', retryable_for_secs=30)
.list_resource('forwardingRules')
.EXPECT(ov_factory.value_list_path_excludes(
'name', jp.STR_SUBSTR(self.__lb_name))))
payload = self.agent.make_json_payload_from_kwargs(
job=job, description=self.__mig_title + ' Test - delete load balancer',
application=self.TEST_APP)
return st.OperationContract(
self.new_post_operation(
title='delete_load_balancer', data=payload, path=self.__path),
contract=builder.build())
class GoogleServerGroupTest(st.AgentTestCase):
@staticmethod
def setUpClass():
runner = citest.base.TestRunner.global_runner()
scenario = runner.get_shared_data(GoogleServerGroupTestScenario)
managed_region = scenario.bindings['TEST_GCE_REGION']
title = 'Check Quota for {0}'.format(scenario.__class__.__name__)
verify_results = gcp.verify_quota(
title,
scenario.gcp_observer,
project_quota=GoogleServerGroupTestScenario.MINIMUM_PROJECT_QUOTA,
regions=[(managed_region,
GoogleServerGroupTestScenario.MINIMUM_REGION_QUOTA)])
if not verify_results:
raise RuntimeError('Insufficient Quota: {0}'.format(verify_results))
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(
GoogleServerGroupTestScenario)
def test_a_create_load_balancer(self):
self.run_test_case(self.scenario.create_load_balancer())
def test_b_create_server_group(self):
self.run_test_case(self.scenario.create_server_group(),
poll_every_secs=3)
def test_c_resize_server_group(self):
self.run_test_case(self.scenario.resize_server_group())
def test_d_clone_server_group(self):
self.run_test_case(self.scenario.clone_server_group(),
poll_every_secs=3,
# TODO(ewiseblatt): 20160314
# There is a lock contention race condition
# in clouddriver that causes intermittent failure.
max_retries=5)
def test_e_disable_server_group(self):
self.run_test_case(self.scenario.disable_server_group())
def test_f_enable_server_group(self):
self.run_test_case(self.scenario.enable_server_group())
def test_g_destroy_server_group_v000(self):
self.run_test_case(self.scenario.destroy_server_group('v000'),
poll_every_secs=5)
def test_h_destroy_server_group_v001(self):
self.run_test_case(self.scenario.destroy_server_group('v001'),
poll_every_secs=5)
def test_z_delete_load_balancer(self):
self.run_test_case(self.scenario.delete_load_balancer())
def main():
# These are only used by our scenario.
# We'll rebind them in the constructor so we can consider command-line args.
defaults = {
'TEST_STACK': '',
'TEST_APP': '',
}
return citest.base.TestRunner.main(
parser_inits=[GoogleServerGroupTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[GoogleServerGroupTest])
if __name__ == '__main__':
sys.exit(main())
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Manages all providers."""
import fnmatch
import functools
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from functools import wraps
from time import perf_counter
from typing import (
Any,
Callable,
Dict,
List,
MutableMapping,
NamedTuple,
Optional,
Set,
Type,
TypeVar,
Union,
cast,
)
import jsonschema
from packaging import version as packaging_version
from airflow.exceptions import AirflowOptionalProviderFeatureException
from airflow.hooks.base import BaseHook
from airflow.utils import yaml
from airflow.utils.entry_points import entry_points_with_dist
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.module_loading import import_string
log = logging.getLogger(__name__)
if sys.version_info >= (3, 9):
from importlib.resources import files as resource_files
else:
from importlib_resources import files as resource_files
MIN_PROVIDER_VERSIONS = {
"apache-airflow-providers-celery": "2.1.0",
}
class LazyDictWithCache(MutableMapping):
"""
Dictionary, which in case you set callable, executes the passed callable with `key` attribute
at first use - and returns and caches the result.
"""
__slots__ = ['_resolved', '_raw_dict']
def __init__(self, *args, **kw):
self._resolved = set()
self._raw_dict = dict(*args, **kw)
def __setitem__(self, key, value):
self._raw_dict.__setitem__(key, value)
def __getitem__(self, key):
value = self._raw_dict.__getitem__(key)
if key not in self._resolved and callable(value):
# exchange callable with result of calling it -- but only once! allow resolver to return a
# callable itself
value = value()
self._resolved.add(key)
if value:
self._raw_dict.__setitem__(key, value)
return value
def __delitem__(self, key):
self._raw_dict.__delitem__(key)
try:
self._resolved.remove(key)
except KeyError:
pass
def __iter__(self):
return iter(self._raw_dict)
def __len__(self):
return len(self._raw_dict)
def __contains__(self, key):
return key in self._raw_dict
def _create_provider_info_schema_validator():
"""Creates JSON schema validator from the provider_info.schema.json"""
with resource_files("airflow").joinpath("provider_info.schema.json").open("rb") as f:
schema = json.load(f)
cls = jsonschema.validators.validator_for(schema)
validator = cls(schema)
return validator
def _create_customized_form_field_behaviours_schema_validator():
"""Creates JSON schema validator from the customized_form_field_behaviours.schema.json"""
with resource_files("airflow").joinpath("customized_form_field_behaviours.schema.json").open("rb") as f:
schema = json.load(f)
cls = jsonschema.validators.validator_for(schema)
validator = cls(schema)
return validator
def _check_builtin_provider_prefix(provider_package: str, class_name: str) -> bool:
if provider_package.startswith("apache-airflow"):
provider_path = provider_package[len("apache-") :].replace("-", ".")
if not class_name.startswith(provider_path):
log.warning(
"Coherence check failed when importing '%s' from '%s' package. It should start with '%s'",
class_name,
provider_package,
provider_path,
)
return False
return True
def _sanity_check(provider_package: str, class_name: str) -> Optional[Type[BaseHook]]:
"""
Performs coherence check on provider classes.
For apache-airflow providers - it checks if it starts with appropriate package. For all providers
it tries to import the provider - checking that there are no exceptions during importing.
It logs appropriate warning in case it detects any problems.
:param provider_package: name of the provider package
:param class_name: name of the class to import
:return the class if the class is OK, None otherwise.
"""
if not _check_builtin_provider_prefix(provider_package, class_name):
return None
try:
imported_class = import_string(class_name)
except AirflowOptionalProviderFeatureException as e:
# When the provider class raises AirflowOptionalProviderFeatureException
# this is an expected case when only some classes in provider are
# available. We just log debug level here
log.debug(
"Optional feature disabled on exception when importing '%s' from '%s' package",
class_name,
provider_package,
exc_info=e,
)
return None
except ImportError as e:
# When there is an ImportError we turn it into debug warnings as this is
# an expected case when only some providers are installed
log.warning(
"Exception when importing '%s' from '%s' package",
class_name,
provider_package,
exc_info=e,
)
return None
except Exception as e:
log.warning(
"Exception when importing '%s' from '%s' package",
class_name,
provider_package,
exc_info=e,
)
return None
return imported_class
class ProviderInfo(NamedTuple):
"""Provider information"""
version: str
provider_info: Dict
class HookClassProvider(NamedTuple):
"""Hook class and Provider it comes from"""
hook_class_name: str
package_name: str
class HookInfo(NamedTuple):
"""Hook information"""
hook_class_name: str
connection_id_attribute_name: str
package_name: str
hook_name: str
connection_type: str
connection_testable: bool
class ConnectionFormWidgetInfo(NamedTuple):
"""Connection Form Widget information"""
hook_class_name: str
package_name: str
field: Any
T = TypeVar("T", bound=Callable)
logger = logging.getLogger(__name__)
# We want to have better control over initialization of parameters and be able to debug and test it
# So we add our own decorator
def provider_info_cache(cache_name: str) -> Callable[[T], T]:
"""
Decorator factory that create decorator that caches initialization of provider's parameters
:param cache_name: Name of the cache
"""
def provider_info_cache_decorator(func: T):
@wraps(func)
def wrapped_function(*args, **kwargs):
providers_manager_instance = args[0]
if cache_name in providers_manager_instance._initialized_cache:
return
start_time = perf_counter()
logger.debug("Initializing Providers Manager[%s]", cache_name)
func(*args, **kwargs)
providers_manager_instance._initialized_cache[cache_name] = True
logger.debug(
"Initialization of Providers Manager[%s] took %.2f seconds",
cache_name,
perf_counter() - start_time,
)
return cast(T, wrapped_function)
return provider_info_cache_decorator
class ProvidersManager(LoggingMixin):
"""
Manages all provider packages. This is a Singleton class. The first time it is
instantiated, it discovers all available providers in installed packages and
local source folders (if airflow is run from sources).
"""
_instance = None
resource_version = "0"
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
"""Initializes the manager."""
super().__init__()
self._initialized_cache: Dict[str, bool] = {}
# Keeps dict of providers keyed by module name
self._provider_dict: Dict[str, ProviderInfo] = {}
# Keeps dict of hooks keyed by connection type
self._hooks_dict: Dict[str, HookInfo] = {}
self._taskflow_decorators: Dict[str, Callable] = LazyDictWithCache()
# keeps mapping between connection_types and hook class, package they come from
self._hook_provider_dict: Dict[str, HookClassProvider] = {}
# Keeps dict of hooks keyed by connection type. They are lazy evaluated at access time
self._hooks_lazy_dict: LazyDictWithCache[str, Union[HookInfo, Callable]] = LazyDictWithCache()
# Keeps methods that should be used to add custom widgets tuple of keyed by name of the extra field
self._connection_form_widgets: Dict[str, ConnectionFormWidgetInfo] = {}
# Customizations for javascript fields are kept here
self._field_behaviours: Dict[str, Dict] = {}
self._extra_link_class_name_set: Set[str] = set()
self._logging_class_name_set: Set[str] = set()
self._secrets_backend_class_name_set: Set[str] = set()
self._api_auth_backend_module_names: Set[str] = set()
self._provider_schema_validator = _create_provider_info_schema_validator()
self._customized_form_fields_schema_validator = (
_create_customized_form_field_behaviours_schema_validator()
)
@provider_info_cache("list")
def initialize_providers_list(self):
"""Lazy initialization of providers list."""
# Local source folders are loaded first. They should take precedence over the package ones for
# Development purpose. In production provider.yaml files are not present in the 'airflow" directory
# So there is no risk we are going to override package provider accidentally. This can only happen
# in case of local development
self._discover_all_airflow_builtin_providers_from_local_sources()
self._discover_all_providers_from_packages()
self._verify_all_providers_all_compatible()
self._provider_dict = OrderedDict(sorted(self._provider_dict.items()))
def _verify_all_providers_all_compatible(self):
for provider_id, info in self._provider_dict.items():
min_version = MIN_PROVIDER_VERSIONS.get(provider_id)
if min_version:
if packaging_version.parse(min_version) > packaging_version.parse(info.version):
log.warning(
f"The package {provider_id} is not compatible with this version of Airflow. "
f"The package has version {info.version} but the minimum supported version "
f"of the package is {min_version}"
)
@provider_info_cache("hooks")
def initialize_providers_hooks(self):
"""Lazy initialization of providers hooks."""
self.initialize_providers_list()
self._discover_hooks()
self._hook_provider_dict = OrderedDict(sorted(self._hook_provider_dict.items()))
@provider_info_cache("taskflow_decorators")
def initialize_providers_taskflow_decorator(self):
"""Lazy initialization of providers hooks."""
self.initialize_providers_list()
self._discover_taskflow_decorators()
@provider_info_cache("extra_links")
def initialize_providers_extra_links(self):
"""Lazy initialization of providers extra links."""
self.initialize_providers_list()
self._discover_extra_links()
@provider_info_cache("logging")
def initialize_providers_logging(self):
"""Lazy initialization of providers logging information."""
self.initialize_providers_list()
self._discover_logging()
@provider_info_cache("secrets_backends")
def initialize_providers_secrets_backends(self):
"""Lazy initialization of providers secrets_backends information."""
self.initialize_providers_list()
self._discover_secrets_backends()
@provider_info_cache("auth_backends")
def initialize_providers_auth_backends(self):
"""Lazy initialization of providers API auth_backends information."""
self.initialize_providers_list()
self._discover_auth_backends()
def _discover_all_providers_from_packages(self) -> None:
"""
Discovers all providers by scanning packages installed. The list of providers should be returned
via the 'apache_airflow_provider' entrypoint as a dictionary conforming to the
'airflow/provider_info.schema.json' schema. Note that the schema is different at runtime
than provider.yaml.schema.json. The development version of provider schema is more strict and changes
together with the code. The runtime version is more relaxed (allows for additional properties)
and verifies only the subset of fields that are needed at runtime.
"""
for entry_point, dist in entry_points_with_dist('apache_airflow_provider'):
package_name = dist.metadata['name']
if self._provider_dict.get(package_name) is not None:
continue
log.debug("Loading %s from package %s", entry_point, package_name)
version = dist.version
provider_info = entry_point.load()()
self._provider_schema_validator.validate(provider_info)
provider_info_package_name = provider_info['package-name']
if package_name != provider_info_package_name:
raise Exception(
f"The package '{package_name}' from setuptools and "
f"{provider_info_package_name} do not match. Please make sure they are aligned"
)
if package_name not in self._provider_dict:
self._provider_dict[package_name] = ProviderInfo(version, provider_info)
else:
log.warning(
"The provider for package '%s' could not be registered from because providers for that "
"package name have already been registered",
package_name,
)
def _discover_all_airflow_builtin_providers_from_local_sources(self) -> None:
"""
Finds all built-in airflow providers if airflow is run from the local sources.
It finds `provider.yaml` files for all such providers and registers the providers using those.
This 'provider.yaml' scanning takes precedence over scanning packages installed
in case you have both sources and packages installed, the providers will be loaded from
the "airflow" sources rather than from the packages.
"""
try:
import airflow.providers
except ImportError:
log.info("You have no providers installed.")
return
try:
for path in airflow.providers.__path__: # type: ignore[attr-defined]
self._add_provider_info_from_local_source_files_on_path(path)
except Exception as e:
log.warning("Error when loading 'provider.yaml' files from airflow sources: %s", e)
def _add_provider_info_from_local_source_files_on_path(self, path) -> None:
"""
Finds all the provider.yaml files in the directory specified.
:param path: path where to look for provider.yaml files
"""
root_path = path
for folder, subdirs, files in os.walk(path, topdown=True):
for filename in fnmatch.filter(files, "provider.yaml"):
package_name = "apache-airflow-providers" + folder[len(root_path) :].replace(os.sep, "-")
self._add_provider_info_from_local_source_file(os.path.join(folder, filename), package_name)
subdirs[:] = []
def _add_provider_info_from_local_source_file(self, path, package_name) -> None:
"""
Parses found provider.yaml file and adds found provider to the dictionary.
:param path: full file path of the provider.yaml file
:param package_name: name of the package
"""
try:
log.debug("Loading %s from %s", package_name, path)
with open(path) as provider_yaml_file:
provider_info = yaml.safe_load(provider_yaml_file)
self._provider_schema_validator.validate(provider_info)
version = provider_info['versions'][0]
if package_name not in self._provider_dict:
self._provider_dict[package_name] = ProviderInfo(version, provider_info)
else:
log.warning(
"The providers for package '%s' could not be registered because providers for that "
"package name have already been registered",
package_name,
)
except Exception as e:
log.warning("Error when loading '%s'", path, exc_info=e)
def _discover_hooks_from_connection_types(
self,
hook_class_names_registered: Set[str],
already_registered_warning_connection_types: Set[str],
package_name: str,
provider: ProviderInfo,
):
"""
Discover hooks from the "connection-types" property. This is new, better method that replaces
discovery from hook-class-names as it allows to lazy import individual Hook classes when they
are accessed. The "connection-types" keeps information about both - connection type and class
name so we can discover all connection-types without importing the classes.
:param hook_class_names_registered: set of registered hook class names for this provider
:param already_registered_warning_connection_types: set of connections for which warning should be
printed in logs as they were already registered before
:param package_name:
:param provider:
:return:
"""
provider_uses_connection_types = False
connection_types = provider.provider_info.get("connection-types")
if connection_types:
for connection_type_dict in connection_types:
connection_type = connection_type_dict['connection-type']
hook_class_name = connection_type_dict['hook-class-name']
hook_class_names_registered.add(hook_class_name)
already_registered = self._hook_provider_dict.get(connection_type)
if already_registered:
if already_registered.package_name != package_name:
already_registered_warning_connection_types.add(connection_type)
else:
self._hook_provider_dict[connection_type] = HookClassProvider(
hook_class_name=hook_class_name, package_name=package_name
)
# Defer importing hook to access time by setting import hook method as dict value
self._hooks_lazy_dict[connection_type] = functools.partial(
self._import_hook, connection_type
)
provider_uses_connection_types = True
return provider_uses_connection_types
def _discover_hooks_from_hook_class_names(
self,
hook_class_names_registered: Set[str],
already_registered_warning_connection_types: Set[str],
package_name: str,
provider: ProviderInfo,
provider_uses_connection_types: bool,
):
"""
Discovers hooks from "hook-class-names' property. This property is deprecated but we should
support it in Airflow 2. The hook-class-names array contained just Hook names without connection
type, therefore we need to import all those classes immediately to know which connection types
are supported. This makes it impossible to selectively only import those hooks that are used.
:param already_registered_warning_connection_types: list of connection hooks that we should warn
about when finished discovery
:param package_name: name of the provider package
:param provider: class that keeps information about version and details of the provider
:param provider_uses_connection_types: determines whether the provider uses "connection-types" new
form of passing connection types
:return:
"""
hook_class_names = provider.provider_info.get("hook-class-names")
if hook_class_names:
for hook_class_name in hook_class_names:
if hook_class_name in hook_class_names_registered:
# Silently ignore the hook class - it's already marked for lazy-import by
# connection-types discovery
continue
hook_info = self._import_hook(
connection_type=None, hook_class_name=hook_class_name, package_name=package_name
)
if not hook_info:
# Problem why importing class - we ignore it. Log is written at import time
continue
already_registered = self._hook_provider_dict.get(hook_info.connection_type)
if already_registered:
if already_registered.package_name != package_name:
already_registered_warning_connection_types.add(hook_info.connection_type)
else:
if already_registered.hook_class_name != hook_class_name:
log.warning(
"The hook connection type '%s' is registered twice in the"
" package '%s' with different class names: '%s' and '%s'. "
" Please fix it!",
hook_info.connection_type,
package_name,
already_registered.hook_class_name,
hook_class_name,
)
else:
self._hook_provider_dict[hook_info.connection_type] = HookClassProvider(
hook_class_name=hook_class_name, package_name=package_name
)
self._hooks_lazy_dict[hook_info.connection_type] = hook_info
if not provider_uses_connection_types:
warnings.warn(
f"The provider {package_name} uses `hook-class-names` "
"property in provider-info and has no `connection-types` one. "
"The 'hook-class-names' property has been deprecated in favour "
"of 'connection-types' in Airflow 2.2. Use **both** in case you want to "
"have backwards compatibility with Airflow < 2.2",
DeprecationWarning,
)
for already_registered_connection_type in already_registered_warning_connection_types:
log.warning(
"The connection_type '%s' has been already registered by provider '%s.'",
already_registered_connection_type,
self._hook_provider_dict[already_registered_connection_type].package_name,
)
def _discover_hooks(self) -> None:
"""Retrieves all connections defined in the providers via Hooks"""
for package_name, provider in self._provider_dict.items():
duplicated_connection_types: Set[str] = set()
hook_class_names_registered: Set[str] = set()
provider_uses_connection_types = self._discover_hooks_from_connection_types(
hook_class_names_registered, duplicated_connection_types, package_name, provider
)
self._discover_hooks_from_hook_class_names(
hook_class_names_registered,
duplicated_connection_types,
package_name,
provider,
provider_uses_connection_types,
)
self._hook_provider_dict = OrderedDict(sorted(self._hook_provider_dict.items()))
@provider_info_cache("import_all_hooks")
def _import_info_from_all_hooks(self):
"""Force-import all hooks and initialize the connections/fields"""
# Retrieve all hooks to make sure that all of them are imported
_ = list(self._hooks_lazy_dict.values())
self._connection_form_widgets = OrderedDict(sorted(self._connection_form_widgets.items()))
self._field_behaviours = OrderedDict(sorted(self._field_behaviours.items()))
def _discover_taskflow_decorators(self) -> None:
for name, info in self._provider_dict.items():
for taskflow_decorator in info.provider_info.get("task-decorators", []):
self._add_taskflow_decorator(
taskflow_decorator["name"], taskflow_decorator["class-name"], name
)
def _add_taskflow_decorator(self, name, decorator_class_name: str, provider_package: str) -> None:
if not _check_builtin_provider_prefix(provider_package, decorator_class_name):
return
if name in self._taskflow_decorators:
try:
existing = self._taskflow_decorators[name]
other_name = f'{existing.__module__}.{existing.__name__}'
except Exception:
# If problem importing, then get the value from the functools.partial
other_name = self._taskflow_decorators._raw_dict[name].args[0] # type: ignore[attr-defined]
log.warning(
"The taskflow decorator '%s' has been already registered (by %s).",
name,
other_name,
)
return
self._taskflow_decorators[name] = functools.partial(import_string, decorator_class_name)
@staticmethod
def _get_attr(obj: Any, attr_name: str):
"""Retrieves attributes of an object, or warns if not found"""
if not hasattr(obj, attr_name):
log.warning("The object '%s' is missing %s attribute and cannot be registered", obj, attr_name)
return None
return getattr(obj, attr_name)
def _import_hook(
self,
connection_type: Optional[str],
hook_class_name: Optional[str] = None,
package_name: Optional[str] = None,
) -> Optional[HookInfo]:
"""
Imports hook and retrieves hook information. Either connection_type (for lazy loading)
or hook_class_name must be set - but not both). Only needs package_name if hook_class_name is
passed (for lazy loading, package_name is retrieved from _connection_type_class_provider_dict
together with hook_class_name).
:param connection_type: type of the connection
:param hook_class_name: name of the hook class
:param package_name: provider package - only needed in case connection_type is missing
: return
"""
from wtforms import BooleanField, IntegerField, PasswordField, StringField
if connection_type is None and hook_class_name is None:
raise ValueError("Either connection_type or hook_class_name must be set")
if connection_type is not None and hook_class_name is not None:
raise ValueError(
f"Both connection_type ({connection_type} and "
f"hook_class_name {hook_class_name} are set. Only one should be set!"
)
if connection_type is not None:
class_provider = self._hook_provider_dict[connection_type]
package_name = class_provider.package_name
hook_class_name = class_provider.hook_class_name
else:
if not hook_class_name:
raise ValueError("Either connection_type or hook_class_name must be set")
if not package_name:
raise ValueError(
f"Provider package name is not set when hook_class_name ({hook_class_name}) is used"
)
allowed_field_classes = [IntegerField, PasswordField, StringField, BooleanField]
hook_class = _sanity_check(package_name, hook_class_name)
if hook_class is None:
return None
try:
module, class_name = hook_class_name.rsplit('.', maxsplit=1)
# Do not use attr here. We want to check only direct class fields not those
# inherited from parent hook. This way we add form fields only once for the whole
# hierarchy and we add it only from the parent hook that provides those!
if 'get_connection_form_widgets' in hook_class.__dict__:
widgets = hook_class.get_connection_form_widgets()
if widgets:
for widget in widgets.values():
if widget.field_class not in allowed_field_classes:
log.warning(
"The hook_class '%s' uses field of unsupported class '%s'. "
"Only '%s' field classes are supported",
hook_class_name,
widget.field_class,
allowed_field_classes,
)
return None
self._add_widgets(package_name, hook_class, widgets)
if 'get_ui_field_behaviour' in hook_class.__dict__:
field_behaviours = hook_class.get_ui_field_behaviour()
if field_behaviours:
self._add_customized_fields(package_name, hook_class, field_behaviours)
except Exception as e:
log.warning(
"Exception when importing '%s' from '%s' package: %s",
hook_class_name,
package_name,
e,
)
return None
hook_connection_type = self._get_attr(hook_class, 'conn_type')
if connection_type:
if hook_connection_type != connection_type:
log.warning(
"Inconsistency! The hook class '%s' declares connection type '%s'"
" but it is added by provider '%s' as connection_type '%s' in provider info. "
"This should be fixed!",
hook_class,
hook_connection_type,
package_name,
connection_type,
)
connection_type = hook_connection_type
connection_id_attribute_name: str = self._get_attr(hook_class, 'conn_name_attr')
hook_name: str = self._get_attr(hook_class, 'hook_name')
if not connection_type or not connection_id_attribute_name or not hook_name:
log.warning(
"The hook misses one of the key attributes: "
"conn_type: %s, conn_id_attribute_name: %s, hook_name: %s",
connection_type,
connection_id_attribute_name,
hook_name,
)
return None
return HookInfo(
hook_class_name=hook_class_name,
connection_id_attribute_name=connection_id_attribute_name,
package_name=package_name,
hook_name=hook_name,
connection_type=connection_type,
connection_testable=hasattr(hook_class, 'test_connection'),
)
def _add_widgets(self, package_name: str, hook_class: type, widgets: Dict[str, Any]):
for field_name, field in widgets.items():
if not field_name.startswith("extra__"):
log.warning(
"The field %s from class %s does not start with 'extra__'. Ignoring it.",
field_name,
hook_class.__name__,
)
continue
if field_name in self._connection_form_widgets:
log.warning(
"The field %s from class %s has already been added by another provider. Ignoring it.",
field_name,
hook_class.__name__,
)
# In case of inherited hooks this might be happening several times
continue
self._connection_form_widgets[field_name] = ConnectionFormWidgetInfo(
hook_class.__name__, package_name, field
)
def _add_customized_fields(self, package_name: str, hook_class: type, customized_fields: Dict):
try:
connection_type = getattr(hook_class, "conn_type")
self._customized_form_fields_schema_validator.validate(customized_fields)
if connection_type in self._field_behaviours:
log.warning(
"The connection_type %s from package %s and class %s has already been added "
"by another provider. Ignoring it.",
connection_type,
package_name,
hook_class.__name__,
)
return
self._field_behaviours[connection_type] = customized_fields
except Exception as e:
log.warning(
"Error when loading customized fields from package '%s' hook class '%s': %s",
package_name,
hook_class.__name__,
e,
)
def _discover_extra_links(self) -> None:
"""Retrieves all extra links defined in the providers"""
for provider_package, (_, provider) in self._provider_dict.items():
if provider.get("extra-links"):
for extra_link_class_name in provider["extra-links"]:
if _sanity_check(provider_package, extra_link_class_name):
self._extra_link_class_name_set.add(extra_link_class_name)
def _discover_logging(self) -> None:
"""Retrieves all logging defined in the providers"""
for provider_package, (_, provider) in self._provider_dict.items():
if provider.get("logging"):
for logging_class_name in provider["logging"]:
if _sanity_check(provider_package, logging_class_name):
self._logging_class_name_set.add(logging_class_name)
def _discover_secrets_backends(self) -> None:
"""Retrieves all secrets backends defined in the providers"""
for provider_package, (_, provider) in self._provider_dict.items():
if provider.get("secrets-backends"):
for secrets_backends_class_name in provider["secrets-backends"]:
if _sanity_check(provider_package, secrets_backends_class_name):
self._secrets_backend_class_name_set.add(secrets_backends_class_name)
def _discover_auth_backends(self) -> None:
"""Retrieves all API auth backends defined in the providers"""
for provider_package, (_, provider) in self._provider_dict.items():
if provider.get("auth-backends"):
for auth_backend_module_name in provider["auth-backends"]:
if _sanity_check(provider_package, auth_backend_module_name + ".init_app"):
self._api_auth_backend_module_names.add(auth_backend_module_name)
@property
def providers(self) -> Dict[str, ProviderInfo]:
"""Returns information about available providers."""
self.initialize_providers_list()
return self._provider_dict
@property
def hooks(self) -> MutableMapping[str, Optional[HookInfo]]:
"""
Returns dictionary of connection_type-to-hook mapping. Note that the dict can contain
None values if a hook discovered cannot be imported!
"""
self.initialize_providers_hooks()
# When we return hooks here it will only be used to retrieve hook information
return self._hooks_lazy_dict
@property
def taskflow_decorators(self) -> Dict[str, Callable]:
self.initialize_providers_taskflow_decorator()
return self._taskflow_decorators
@property
def extra_links_class_names(self) -> List[str]:
"""Returns set of extra link class names."""
self.initialize_providers_extra_links()
return sorted(self._extra_link_class_name_set)
@property
def connection_form_widgets(self) -> Dict[str, ConnectionFormWidgetInfo]:
"""Returns widgets for connection forms."""
self.initialize_providers_hooks()
self._import_info_from_all_hooks()
return self._connection_form_widgets
@property
def field_behaviours(self) -> Dict[str, Dict]:
"""Returns dictionary with field behaviours for connection types."""
self.initialize_providers_hooks()
self._import_info_from_all_hooks()
return self._field_behaviours
@property
def logging_class_names(self) -> List[str]:
"""Returns set of log task handlers class names."""
self.initialize_providers_logging()
return sorted(self._logging_class_name_set)
@property
def secrets_backend_class_names(self) -> List[str]:
"""Returns set of secret backend class names."""
self.initialize_providers_secrets_backends()
return sorted(self._secrets_backend_class_name_set)
@property
def auth_backend_module_names(self) -> List[str]:
"""Returns set of API auth backend class names."""
self.initialize_providers_auth_backends()
return sorted(self._api_auth_backend_module_names)
| |
"""
"""
# Created on 2013.08.05
#
# Author: Giovanni Cannata
#
# Copyright 2013, 2014, 2015, 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from .exceptions import LDAPSSLNotSupportedError, LDAPSSLConfigurationError, LDAPStartTLSError, LDAPCertificateError, start_tls_exception_factory
from .. import SEQUENCE_TYPES
from ..utils.log import log, log_enabled, ERROR, BASIC, NETWORK
try:
# noinspection PyUnresolvedReferences
import ssl
except ImportError:
if log_enabled(ERROR):
log(ERROR, 'SSL not supported in this Python interpreter')
raise LDAPSSLNotSupportedError('SSL not supported in this Python interpreter')
try:
from ssl import match_hostname, CertificateError # backport for python2 missing ssl functionalities
except ImportError:
from ..utils.tls_backport import CertificateError
from ..utils.tls_backport import match_hostname
if log_enabled(BASIC):
log(BASIC, 'using tls_backport')
try: # try to use SSLContext
# noinspection PyUnresolvedReferences
from ssl import create_default_context, Purpose # defined in Python 3.4 and Python 2.7.9
use_ssl_context = True
except ImportError:
use_ssl_context = False
if log_enabled(BASIC):
log(BASIC, 'SSLContext unavailable')
from os import path
# noinspection PyProtectedMember
class Tls(object):
"""
tls/ssl configuration for Server object
Starting from python 2.7.9 and python 3.4 uses the SSLContext object
that tries to read the CAs defined at system level
ca_certs_path and ca_certs_data are valid only when using SSLContext
local_private_key_password is valid only when using SSLContext
"""
def __init__(self,
local_private_key_file=None,
local_certificate_file=None,
validate=ssl.CERT_NONE,
version=None,
ca_certs_file=None,
valid_names=None,
ca_certs_path=None,
ca_certs_data=None,
local_private_key_password=None,
ciphers=None):
if validate in [ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED]:
self.validate = validate
elif validate:
if log_enabled(ERROR):
log(ERROR, 'invalid validate parameter <%s>', validate)
raise LDAPSSLConfigurationError('invalid validate parameter')
if ca_certs_file and path.exists(ca_certs_file):
self.ca_certs_file = ca_certs_file
elif ca_certs_file:
if log_enabled(ERROR):
log(ERROR, 'invalid CA public key file <%s>', ca_certs_file)
raise LDAPSSLConfigurationError('invalid CA public key file')
else:
self.ca_certs_file = None
if ca_certs_path and use_ssl_context and path.exists(ca_certs_path):
self.ca_certs_path = ca_certs_path
elif ca_certs_path and not use_ssl_context:
if log_enabled(ERROR):
log(ERROR, 'cannot use CA public keys path, SSLContext not available')
raise LDAPSSLNotSupportedError('cannot use CA public keys path, SSLContext not available')
elif ca_certs_path:
if log_enabled(ERROR):
log(ERROR, 'invalid CA public keys path <%s>', ca_certs_path)
raise LDAPSSLConfigurationError('invalid CA public keys path')
else:
self.ca_certs_path = None
if ca_certs_data and use_ssl_context:
self.ca_certs_data = ca_certs_data
elif ca_certs_data:
if log_enabled(ERROR):
log(ERROR, 'cannot use CA data, SSLContext not available')
raise LDAPSSLNotSupportedError('cannot use CA data, SSLContext not available')
else:
self.ca_certs_data = None
if local_private_key_password and use_ssl_context:
self.private_key_password = local_private_key_password
elif local_private_key_password:
if log_enabled(ERROR):
log(ERROR, 'cannot use local private key password, SSLContext not available')
raise LDAPSSLNotSupportedError('cannot use local private key password, SSLContext is not available')
else:
self.private_key_password = None
self.version = version
self.private_key_file = local_private_key_file
self.certificate_file = local_certificate_file
self.valid_names = valid_names
self.ciphers = ciphers
if log_enabled(BASIC):
log(BASIC, 'instantiated Tls: <%r>' % self)
def __str__(self):
s = [
'protocol: ' + str(self.version),
'client private key: ' + ('present ' if self.private_key_file else 'not present'),
'client certificate: ' + ('present ' if self.certificate_file else 'not present'),
'private key password: ' + ('present ' if self.private_key_password else 'not present'),
'CA certificates file: ' + ('present ' if self.ca_certs_file else 'not present'),
'CA certificates path: ' + ('present ' if self.ca_certs_path else 'not present'),
'CA certificates data: ' + ('present ' if self.ca_certs_data else 'not present'),
'verify mode: ' + str(self.validate),
'valid names: ' + str(self.valid_names),
'ciphers: ' + str(self.ciphers)
]
return ' - '.join(s)
def __repr__(self):
r = '' if self.private_key_file is None else ', local_private_key_file={0.private_key_file!r}'.format(self)
r += '' if self.certificate_file is None else ', local_certificate_file={0.certificate_file!r}'.format(self)
r += '' if self.validate is None else ', validate={0.validate!r}'.format(self)
r += '' if self.version is None else ', version={0.version!r}'.format(self)
r += '' if self.ca_certs_file is None else ', ca_certs_file={0.ca_certs_file!r}'.format(self)
r += '' if self.ca_certs_path is None else ', ca_certs_path={0.ca_certs_path!r}'.format(self)
r += '' if self.ca_certs_data is None else ', ca_certs_data={0.ca_certs_data!r}'.format(self)
r += '' if self.ciphers is None else ', ciphers={0.ciphers!r}'.format(self)
r = 'Tls(' + r[2:] + ')'
return r
def wrap_socket(self, connection, do_handshake=False):
"""
Adds TLS to the connection socket
"""
if use_ssl_context:
if self.version is None: # uses the default ssl context for reasonable security
ssl_context = create_default_context(purpose=Purpose.SERVER_AUTH,
cafile=self.ca_certs_file,
capath=self.ca_certs_path,
cadata=self.ca_certs_data)
else: # code from create_default_context in the Python standard library 3.5.1, creates a ssl context with the specificd protocol version
ssl_context = ssl.SSLContext(self.version)
if self.ca_certs_file or self.ca_certs_path or self.ca_certs_data:
ssl_context.load_verify_locations(self.ca_certs_file, self.ca_certs_path, self.ca_certs_data)
elif self.validate != ssl.CERT_NONE:
ssl_context.load_default_certs(Purpose.SERVER_AUTH)
if self.certificate_file:
ssl_context.load_cert_chain(self.certificate_file, keyfile=self.private_key_file, password=self.private_key_password)
ssl_context.check_hostname = False
ssl_context.verify_mode = self.validate
if self.ciphers:
try:
ssl_context.set_ciphers(self.ciphers)
except ssl.SSLError:
pass
wrapped_socket = ssl_context.wrap_socket(connection.socket, server_side=False, do_handshake_on_connect=do_handshake)
if log_enabled(NETWORK):
log(NETWORK, 'socket wrapped with SSL using SSLContext for <%s>', connection)
else:
if self.version is None and hasattr(ssl, 'PROTOCOL_SSLv23'):
self.version = ssl.PROTOCOL_SSLv23
if self.ciphers:
try:
wrapped_socket = ssl.wrap_socket(connection.socket,
keyfile=self.private_key_file,
certfile=self.certificate_file,
server_side=False,
cert_reqs=self.validate,
ssl_version=self.version,
ca_certs=self.ca_certs_file,
do_handshake_on_connect=do_handshake,
ciphers=self.ciphers)
except ssl.SSLError:
raise
except TypeError: # in python2.6 no ciphers argument is present, failback to self.ciphers=None
self.ciphers = None
if not self.ciphers:
wrapped_socket = ssl.wrap_socket(connection.socket,
keyfile=self.private_key_file,
certfile=self.certificate_file,
server_side=False,
cert_reqs=self.validate,
ssl_version=self.version,
ca_certs=self.ca_certs_file,
do_handshake_on_connect=do_handshake)
if log_enabled(NETWORK):
log(NETWORK, 'socket wrapped with SSL for <%s>', connection)
if do_handshake and (self.validate == ssl.CERT_REQUIRED or self.validate == ssl.CERT_OPTIONAL):
check_hostname(wrapped_socket, connection.server.host, self.valid_names)
connection.socket = wrapped_socket
return
def start_tls(self, connection):
if connection.server.ssl: # ssl already established at server level
return False
if (connection.tls_started and not connection._executing_deferred) or connection.strategy._outstanding or connection.sasl_in_progress:
# Per RFC 4513 (3.1.1)
if log_enabled(ERROR):
log(ERROR, "can't start tls because operations are in progress for <%s>", self)
return False
connection.starting_tls = True
if log_enabled(BASIC):
log(BASIC, 'starting tls for <%s>', connection)
result = connection.extended('1.3.6.1.4.1.1466.20037')
if not connection.strategy.sync:
# async - _start_tls must be executed by the strategy
response = connection.get_response(result)
if response != (None, None):
if log_enabled(BASIC):
log(BASIC, 'tls started for <%s>', connection)
return True
else:
if log_enabled(BASIC):
log(BASIC, 'tls not started for <%s>', connection)
return False
else:
if connection.result['description'] not in ['success']:
# startTLS failed
connection.last_error = 'startTLS failed - ' + str(connection.result['description'])
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', connection.last_error, connection)
raise LDAPStartTLSError(connection.last_error)
if log_enabled(BASIC):
log(BASIC, 'tls started for <%s>', connection)
return self._start_tls(connection)
def _start_tls(self, connection):
exc = None
try:
self.wrap_socket(connection, do_handshake=True)
except Exception as e:
connection.last_error = 'wrap socket error: ' + str(e)
exc = e
connection.starting_tls = False
if exc:
if log_enabled(ERROR):
log(ERROR, 'error <%s> wrapping socket for TLS in <%s>', connection.last_error, connection)
raise start_tls_exception_factory(LDAPStartTLSError, exc)(connection.last_error)
if connection.usage:
connection._usage.wrapped_sockets += 1
connection.tls_started = True
return True
def check_hostname(sock, server_name, additional_names):
server_certificate = sock.getpeercert()
if log_enabled(NETWORK):
log(NETWORK, 'certificate found for %s: %s', sock, server_certificate)
if additional_names:
host_names = [server_name] + (additional_names if isinstance(additional_names, SEQUENCE_TYPES) else [additional_names])
else:
host_names = [server_name]
for host_name in host_names:
if not host_name:
continue
elif host_name == '*':
if log_enabled(NETWORK):
log(NETWORK, 'certificate matches * wildcard')
return # valid
try:
match_hostname(server_certificate, host_name) # raise CertificateError if certificate doesn't match server name
if log_enabled(NETWORK):
log(NETWORK, 'certificate matches host name <%s>', host_name)
return # valid
except CertificateError as e:
if log_enabled(NETWORK):
log(NETWORK, str(e))
if log_enabled(ERROR):
log(ERROR, "hostname doesn't match certificate")
raise LDAPCertificateError("certificate %s doesn't match any name in %s " % (server_certificate, str(host_names)))
| |
#!/usr/bin/env python3
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the client_replay.CommandSequence class."""
import io
import optparse
import os
import sys
import unittest
import client_replay
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
_PARENT_DIR = os.path.join(_THIS_DIR, os.pardir)
_TEST_DIR = os.path.join(_PARENT_DIR, "test")
# pylint: disable=g-import-not-at-top
sys.path.insert(1, _TEST_DIR)
import unittest_util
sys.path.remove(_TEST_DIR)
sys.path.insert(1, _PARENT_DIR)
import util
sys.path.insert(1, _PARENT_DIR)
# pylint: enable=g-import-not-at-top
_SESSION_ID = "b15232d5497ec0d8300a5a1ea56f33ce"
_SESSION_ID_ALT = "a81dc5521092a5ba132b9c0b6cf6e84f"
_NO_PARAMS = ("[1531428669.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] "
"COMMAND GetTitle {\n\n}\n"
"[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] "
"RESPONSE GetTitle\n")
_WITH_PARAMS = ('[1531428669.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] '
'COMMAND GetTitle {\n"param1": 7\n}\n'
'[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] '
'RESPONSE GetTitle {\n"param2": 42\n}\n')
_COMMAND_ONLY = ('[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] '
'COMMAND GetTitle {\n"param1": 7\n}\n')
_RESPONSE_ONLY = ('[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] '
'RESPONSE GetTitle {\n"param2": 42\n}\n')
_PAYLOAD_SCRIPT = ('[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce]'
' RESPONSE GetTitle {\n"param2": "function(){func()}"\n}\n')
_PAYLOAD_READABLE_TIME_LINUX = (
'[08-12-2019 15:45:34.824002][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce]'
' RESPONSE GetTitle {\n"param2": "function(){func()}"\n}\n')
_PAYLOAD_READABLE_TIME_WINDOWS = (
'[08-12-2019 15:45:34.824][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce]'
' RESPONSE GetTitle {\n"param2": "function(){func()}"\n}\n')
_BAD_SCRIPT = ('[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce]'
' RESPONSE GetTitle {\n"param2": "))}\\})}/{)}({(})}"\n}\n')
_MULTI_SESSION = ('[1531428669.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] '
'COMMAND GetSessions {\n\n}\n'
'[1531428669.535][INFO]: [a81dc5521092a5ba132b9c0b6cf6e84f] '
'COMMAND GetSessions {\n\n}\n'
'[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce] '
'RESPONSE GetSessions {\n"param2": 42\n}\n'
'[1531428670.535][INFO]: [a81dc5521092a5ba132b9c0b6cf6e84f] '
'RESPONSE GetSessions {\n"param2": 42\n}\n' + _COMMAND_ONLY)
_WINDOW_IDS = ["CDwindow-00", "CDwindow-98", "other thing"]
_ELEMENT_ID = {"element-6066-11e4-a52e-4f735466cecf": "0.87-1"}
_ELEMENT_IDS = [{"element-6066-11e4-a52e-4f735466cecf": "0.87-1"},
{"element-6066-11e4-a52e-4f735466cecf": "0.87-2"}]
class ChromeDriverClientReplayUnitTest(unittest.TestCase):
"""base class for test cases"""
def __init__(self, *args, **kwargs):
super(ChromeDriverClientReplayUnitTest, self).__init__(*args, **kwargs)
def testNextCommandEmptyParams(self):
string_buffer = io.StringIO(_NO_PARAMS)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
command = command_sequence.NextCommand(None)
response = command_sequence._last_response
self.assertEqual(command.name, "GetTitle")
self.assertEqual(command.GetPayloadPrimitive(), {"sessionId": _SESSION_ID})
self.assertEqual(command.session_id, _SESSION_ID)
self.assertEqual(response.name, "GetTitle")
self.assertEqual(response.GetPayloadPrimitive(), None)
self.assertEqual(response.session_id, _SESSION_ID)
def testNextCommandWithParams(self):
string_buffer = io.StringIO(_WITH_PARAMS)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
command = command_sequence.NextCommand(None)
response = command_sequence._last_response
self.assertEqual(command.name, "GetTitle")
self.assertEqual(command.GetPayloadPrimitive(), {"param1": 7,
"sessionId": _SESSION_ID})
self.assertEqual(command.session_id, _SESSION_ID)
self.assertEqual(response.name, "GetTitle")
self.assertEqual(response.GetPayloadPrimitive(), {"param2": 42})
self.assertEqual(response.session_id, _SESSION_ID)
def testParserGetNext(self):
string_buffer = io.StringIO(_WITH_PARAMS)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
command = command_sequence._parser.GetNext()
self.assertEqual(command.name, "GetTitle")
self.assertEqual(command.GetPayloadPrimitive(), {"param1": 7})
self.assertEqual(command.session_id, _SESSION_ID)
def testGetNextClientHeaderLine(self):
string_buffer = io.StringIO(_PAYLOAD_SCRIPT)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
self.assertEqual(command_sequence._parser._GetNextClientHeaderLine(),
("[1531428670.535][INFO]: [b15232d5497ec0d8300a5a1ea56f33ce]"
" RESPONSE GetTitle {\n"))
def testGetNextClientHeaderLine_readableTimeLinux(self):
string_buffer = io.StringIO(_PAYLOAD_READABLE_TIME_LINUX)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
self.assertEqual(command_sequence._parser._GetNextClientHeaderLine(),
("[08-12-2019_15:45:34.824002][INFO]:"
" [b15232d5497ec0d8300a5a1ea56f33ce] RESPONSE GetTitle {\n"))
def testGetNextClientHeaderLine_readableTimeWindows(self):
string_buffer = io.StringIO(_PAYLOAD_READABLE_TIME_WINDOWS)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
self.assertEqual(command_sequence._parser._GetNextClientHeaderLine(),
("[08-12-2019_15:45:34.824][INFO]:"
" [b15232d5497ec0d8300a5a1ea56f33ce] RESPONSE GetTitle {\n"))
def testIngestLoggedResponse(self):
string_buffer = io.StringIO(_RESPONSE_ONLY)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
response = command_sequence._parser.GetNext()
self.assertEqual(response.name, "GetTitle")
self.assertEqual(response.GetPayloadPrimitive(), {"param2": 42})
self.assertEqual(response.session_id, _SESSION_ID)
def testIngestRealResponseInitSession(self):
real_resp = {'value': {
'sessionId': 'b15232d5497ec0d8300a5a1ea56f33ce',
'capabilities': {
'browserVersion': '76.0.3809.100',
'browserName': 'chrome',
}
}}
command_sequence = client_replay.CommandSequence()
command_sequence._staged_logged_session_id = _SESSION_ID_ALT
command_sequence._IngestRealResponse(real_resp)
self.assertEqual(
command_sequence._id_map[_SESSION_ID_ALT], _SESSION_ID)
self.assertEqual(command_sequence._staged_logged_session_id, None)
def testIngestRealResponseNone(self):
real_resp = {'value': None}
command_sequence = client_replay.CommandSequence()
command_sequence._IngestRealResponse(real_resp)
self.assertEqual(command_sequence._last_response, None)
def testIngestRealResponseInt(self):
real_resp = {'value': 1}
command_sequence = client_replay.CommandSequence()
command_sequence._IngestRealResponse(real_resp)
#last response is not changed by IngestRealResponse,
#but we want to verify that int response content does not
#cause error.
self.assertEqual(command_sequence._last_response, None)
def testGetPayload_simple(self):
string_buffer = io.StringIO(_RESPONSE_ONLY)
header = string_buffer.readline()
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
payload_string = command_sequence._parser._GetPayloadString(header)
self.assertEqual(payload_string, '{"param2": 42\n}\n')
def testGetPayload_script(self):
string_buffer = io.StringIO(_PAYLOAD_SCRIPT)
header = string_buffer.readline()
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
payload_string = command_sequence._parser._GetPayloadString(header)
self.assertEqual(payload_string, '{"param2": "function(){func()}"\n}\n')
def testGetPayload_badscript(self):
string_buffer = io.StringIO(_BAD_SCRIPT)
header = string_buffer.readline()
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
payload_string = command_sequence._parser._GetPayloadString(header)
self.assertEqual(payload_string, '{"param2": "))}\\})}/{)}({(})}"\n}\n')
def testSubstitutePayloadIds_element(self):
id_map = {"0.78-1": "0.00-0", "0.78-2": "0.00-1"}
substituted = {"ELEMENT": "0.78-1"}
client_replay._ReplaceWindowAndElementIds(substituted, id_map)
self.assertEqual(substituted, {"ELEMENT": "0.00-0"})
def testSubstitutePayloadIds_elements(self):
id_map = {"0.78-1": "0.00-0", "0.78-2": "0.00-1"}
substituted = [{"ELEMENT": "0.78-1"}, {"ELEMENT": "0.78-2"}]
client_replay._ReplaceWindowAndElementIds(substituted, id_map)
self.assertEqual(substituted,
[{"ELEMENT": "0.00-0"}, {"ELEMENT": "0.00-1"}])
def testSubstitutePayloadIds_windows(self):
id_map = {"CDwindow-98": "CDwindow-00"}
substituted = ["CDwindow-98"]
client_replay._ReplaceWindowAndElementIds(substituted, id_map)
self.assertEqual(substituted, ["CDwindow-00"])
def testSubstitutePayloadIds_recursion(self):
id_map = {"0.78-1": "0.00-0", "0.78-2": "0.00-1"}
substituted = {"args": [{"1": "0.78-1", "2": "0.78-2"}]}
client_replay._ReplaceWindowAndElementIds(substituted, id_map)
self.assertEqual(substituted, {"args": [{"1": "0.00-0", "2": "0.00-1"}]})
def testGetAnyElementids_window(self):
ids = client_replay._GetAnyElementIds(_WINDOW_IDS)
self.assertEqual(ids, ["CDwindow-00", "CDwindow-98"])
def testGetAnyElementids_element(self):
ids = client_replay._GetAnyElementIds(_ELEMENT_ID)
self.assertEqual(ids, ["0.87-1"])
def testGetAnyElementids_elements(self):
ids = client_replay._GetAnyElementIds(_ELEMENT_IDS)
self.assertEqual(ids, ["0.87-1", "0.87-2"])
def testGetAnyElementids_string(self):
ids = client_replay._GetAnyElementIds("true")
self.assertEqual(ids, None)
def testGetAnyElementids_invalid(self):
ids = client_replay._GetAnyElementIds("[ gibberish ]")
self.assertEqual(ids, None)
def testCountChar_positive(self):
self.assertEqual(client_replay._CountChar("{;{;{)]", "{", "}"), 3)
def testCountChar_onebrace(self):
self.assertEqual(client_replay._CountChar("{", "{", "}"), 1)
def testCountChar_nothing(self):
self.assertEqual(client_replay._CountChar("", "{", "}"), 0)
def testCountChar_negative(self):
self.assertEqual(client_replay._CountChar("}){((}{(/)}=}", "{", "}"), -2)
def testCountChar_quotes(self):
self.assertEqual(
client_replay._CountChar('[[][]"[[]]]]]"[[]', "[", "]"), 2)
def testReplaceUrl_simple(self):
base_url = "https://base.url.test.com:0000"
payload = {"url": "https://localhost:12345/"}
client_replay._ReplaceUrl(payload, base_url)
self.assertEqual(payload, {"url": "https://base.url.test.com:0000/"})
def testReplaceUrl_nothing(self):
payload = {"url": "https://localhost:12345/"}
client_replay._ReplaceUrl(payload, None)
self.assertEqual(payload, {"url": "https://localhost:12345/"})
def testReplaceBinary(self):
payload_dict = {
"desiredCapabilities": {
"goog:chromeOptions": {
"binary": "/path/to/logged binary/with spaces/"
},
"other_things": ["some", "uninteresting", "strings"]
}
}
payload_replaced = {
"desiredCapabilities": {
"goog:chromeOptions": {
"binary": "replacement_binary"
},
"other_things": ["some", "uninteresting", "strings"]
}
}
client_replay._ReplaceBinary(payload_dict, "replacement_binary")
self.assertEqual(payload_replaced, payload_dict)
def testReplaceBinary_none(self):
payload_dict = {
"desiredCapabilities": {
"goog:chromeOptions": {
"binary": "/path/to/logged binary/with spaces/"
},
"other_things": ["some", "uninteresting", "strings"]
}
}
payload_replaced = {
"desiredCapabilities": {
"goog:chromeOptions": {},
"other_things": ["some", "uninteresting", "strings"]
}
}
client_replay._ReplaceBinary(payload_dict, None)
self.assertEqual(payload_replaced, payload_dict)
def testReplaceBinary_nocapabilities(self):
payload_dict = {"desiredCapabilities": {}}
payload_replaced = {
"desiredCapabilities": {
"goog:chromeOptions": {
"binary": "replacement_binary"
}
}
}
client_replay._ReplaceBinary(payload_dict, "replacement_binary")
self.assertEqual(payload_replaced, payload_dict)
def testGetCommandName(self):
self.assertEqual(client_replay._GetCommandName(_PAYLOAD_SCRIPT),
"GetTitle")
def testGetSessionId(self):
self.assertEqual(client_replay._GetSessionId(_PAYLOAD_SCRIPT),
_SESSION_ID)
def testParseCommand_true(self):
string_buffer = io.StringIO(_COMMAND_ONLY)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
self.assertTrue(command_sequence._parser.GetNext().IsCommand())
def testParseCommand_false(self):
string_buffer = io.StringIO(_RESPONSE_ONLY)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
self.assertFalse(command_sequence._parser.GetNext().IsCommand())
def testParseResponse_true(self):
string_buffer = io.StringIO(_RESPONSE_ONLY)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
self.assertTrue(command_sequence._parser.GetNext().IsResponse())
def testParseResponse_false(self):
string_buffer = io.StringIO(_COMMAND_ONLY)
command_sequence = client_replay.CommandSequence()
command_sequence._parser = client_replay._Parser(string_buffer)
self.assertFalse(command_sequence._parser.GetNext().IsResponse())
def testHandleGetSessions(self):
string_buffer = io.StringIO(_MULTI_SESSION)
command_sequence = client_replay.CommandSequence(string_buffer)
first_command = command_sequence._parser.GetNext()
command = command_sequence._HandleGetSessions(
first_command)
responses = command_sequence._last_response
self.assertEqual(command.name, "GetSessions")
self.assertEqual(command.GetPayloadPrimitive(), {})
self.assertEqual(command.session_id, _SESSION_ID)
self.assertEqual(responses.name, "GetSessions")
self.assertEqual(responses.GetPayloadPrimitive(), [
{
"capabilities": {"param2": 42},
"id": _SESSION_ID
}, {
"capabilities": {"param2": 42},
"id": _SESSION_ID_ALT
}
])
self.assertEqual(responses.session_id, "")
self.assertEqual(command_sequence._parser._saved_log_entry.name, "GetTitle")
def main():
parser = optparse.OptionParser()
parser.add_option(
"", "--filter", type="string", default="*",
help=('Filter for specifying what tests to run, "*" will run all. E.g., '
"*testReplaceUrl_nothing"))
parser.add_option(
"", "--isolated-script-test-output",
help="JSON output file used by swarming")
# this option is ignored
parser.add_option("--isolated-script-test-perf-output", type=str)
options, _ = parser.parse_args()
all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__])
test_suite = unittest_util.FilterTestSuite(all_tests_suite, options.filter)
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2)\
.run(test_suite)
if options.isolated_script_test_output:
util.WriteResultToJSONFile([test_suite], [result],
options.isolated_script_test_output)
sys.exit(len(result.failures) + len(result.errors))
if __name__ == "__main__":
main()
| |
# -*- test-case-name: twisted.test.test_log -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Logging and metrics infrastructure.
"""
from __future__ import division
import sys
import time
import warnings
from datetime import datetime
import logging
from zope.interface import Interface
from twisted.python import util, context, reflect
class ILogContext:
"""
Actually, this interface is just a synonym for the dictionary interface,
but it serves as a key for the default information in a log.
I do not inherit from C{Interface} because the world is a cruel place.
"""
class ILogObserver(Interface):
"""
An observer which can do something with log events.
Given that most log observers are actually bound methods, it's okay to not
explicitly declare provision of this interface.
"""
def __call__(eventDict):
"""
Log an event.
@type eventDict: C{dict} with C{str} keys.
@param eventDict: A dictionary with arbitrary keys. However, these
keys are often available:
- C{message}: A C{tuple} of C{str} containing messages to be
logged.
- C{system}: A C{str} which indicates the "system" which is
generating this event.
- C{isError}: A C{bool} indicating whether this event represents
an error.
- C{failure}: A L{failure.Failure} instance
- C{why}: Used as header of the traceback in case of errors.
- C{format}: A string format used in place of C{message} to
customize the event. The intent is for the observer to format
a message by doing something like C{format % eventDict}.
"""
context.setDefault(ILogContext,
{"isError": 0,
"system": "-"})
def callWithContext(ctx, func, *args, **kw):
newCtx = context.get(ILogContext).copy()
newCtx.update(ctx)
return context.call({ILogContext: newCtx}, func, *args, **kw)
def callWithLogger(logger, func, *args, **kw):
"""
Utility method which wraps a function in a try:/except:, logs a failure if
one occurrs, and uses the system's logPrefix.
"""
try:
lp = logger.logPrefix()
except KeyboardInterrupt:
raise
except:
lp = '(buggy logPrefix method)'
err(system=lp)
try:
return callWithContext({"system": lp}, func, *args, **kw)
except KeyboardInterrupt:
raise
except:
err(system=lp)
_keepErrors = 0
_keptErrors = []
_ignoreErrors = []
def err(_stuff=None, _why=None, **kw):
"""
Write a failure to the log.
The C{_stuff} and C{_why} parameters use an underscore prefix to lessen
the chance of colliding with a keyword argument the application wishes
to pass. It is intended that they be supplied with arguments passed
positionally, not by keyword.
@param _stuff: The failure to log. If C{_stuff} is C{None} a new
L{Failure} will be created from the current exception state. If
C{_stuff} is an C{Exception} instance it will be wrapped in a
L{Failure}.
@type _stuff: C{NoneType}, C{Exception}, or L{Failure}.
@param _why: The source of this failure. This will be logged along with
C{_stuff} and should describe the context in which the failure
occurred.
@type _why: C{str}
"""
if _stuff is None:
_stuff = failure.Failure()
if isinstance(_stuff, failure.Failure):
if _keepErrors:
if _ignoreErrors:
keep = 0
for err in _ignoreErrors:
r = _stuff.check(err)
if r:
keep = 0
break
else:
keep = 1
if keep:
_keptErrors.append(_stuff)
else:
_keptErrors.append(_stuff)
msg(failure=_stuff, why=_why, isError=1, **kw)
elif isinstance(_stuff, Exception):
msg(failure=failure.Failure(_stuff), why=_why, isError=1, **kw)
else:
msg(repr(_stuff), why=_why, isError=1, **kw)
deferr = err
class Logger:
"""
This represents a class which may 'own' a log. Used by subclassing.
"""
def logPrefix(self):
"""
Override this method to insert custom logging behavior. Its
return value will be inserted in front of every line. It may
be called more times than the number of output lines.
"""
return '-'
class LogPublisher:
"""
Class for singleton log message publishing.
"""
synchronized = ['msg']
def __init__(self):
self.observers = []
def addObserver(self, other):
"""
Add a new observer.
@type other: Provider of L{ILogObserver}
@param other: A callable object that will be called with each new log
message (a dict).
"""
assert callable(other)
self.observers.append(other)
def removeObserver(self, other):
"""
Remove an observer.
"""
self.observers.remove(other)
def msg(self, *message, **kw):
"""
Log a new message.
For example::
>>> log.msg('Hello, world.')
In particular, you MUST avoid the forms::
>>> log.msg(u'Hello, world.')
>>> log.msg('Hello ', 'world.')
These forms work (sometimes) by accident and will be disabled
entirely in the future.
"""
actualEventDict = (context.get(ILogContext) or {}).copy()
actualEventDict.update(kw)
actualEventDict['message'] = message
actualEventDict['time'] = time.time()
for i in xrange(len(self.observers) - 1, -1, -1):
try:
self.observers[i](actualEventDict)
except KeyboardInterrupt:
# Don't swallow keyboard interrupt!
raise
except UnicodeEncodeError:
raise
except:
observer = self.observers[i]
self.observers[i] = lambda event: None
try:
self._err(failure.Failure(),
"Log observer %s failed." % (observer,))
except:
# Sometimes err() will throw an exception,
# e.g. RuntimeError due to blowing the stack; if that
# happens, there's not much we can do...
pass
self.observers[i] = observer
def _err(self, failure, why):
"""
Log a failure.
Similar in functionality to the global {err} function, but the failure
gets published only to observers attached to this publisher.
@param failure: The failure to log.
@type failure: L{Failure}.
@param why: The source of this failure. This will be logged along with
the C{failure} and should describe the context in which the failure
occurred.
@type why: C{str}
"""
self.msg(failure=failure, why=why, isError=1)
def showwarning(self, message, category, filename, lineno, file=None,
line=None):
"""
Twisted-enabled wrapper around L{warnings.showwarning}.
If C{file} is C{None}, the default behaviour is to emit the warning to
the log system, otherwise the original L{warnings.showwarning} Python
function is called.
"""
if file is None:
self.msg(warning=message, category=reflect.qual(category),
filename=filename, lineno=lineno,
format="%(filename)s:%(lineno)s: %(category)s: %(warning)s")
else:
if sys.version_info < (2, 6):
_oldshowwarning(message, category, filename, lineno, file)
else:
_oldshowwarning(message, category, filename, lineno, file, line)
try:
theLogPublisher
except NameError:
theLogPublisher = LogPublisher()
addObserver = theLogPublisher.addObserver
removeObserver = theLogPublisher.removeObserver
msg = theLogPublisher.msg
showwarning = theLogPublisher.showwarning
def _safeFormat(fmtString, fmtDict):
"""
Try to format the string C{fmtString} using C{fmtDict} arguments,
swallowing all errors to always return a string.
"""
# There's a way we could make this if not safer at least more
# informative: perhaps some sort of str/repr wrapper objects
# could be wrapped around the things inside of C{fmtDict}. That way
# if the event dict contains an object with a bad __repr__, we
# can only cry about that individual object instead of the
# entire event dict.
try:
text = fmtString % fmtDict
except KeyboardInterrupt:
raise
except:
try:
text = ('Invalid format string or unformattable object in log message: %r, %s' % (fmtString, fmtDict))
except:
try:
text = 'UNFORMATTABLE OBJECT WRITTEN TO LOG with fmt %r, MESSAGE LOST' % (fmtString,)
except:
text = 'PATHOLOGICAL ERROR IN BOTH FORMAT STRING AND MESSAGE DETAILS, MESSAGE LOST'
return text
def textFromEventDict(eventDict):
"""
Extract text from an event dict passed to a log observer. If it cannot
handle the dict, it returns None.
The possible keys of eventDict are:
- C{message}: by default, it holds the final text. It's required, but can
be empty if either C{isError} or C{format} is provided (the first
having the priority).
- C{isError}: boolean indicating the nature of the event.
- C{failure}: L{failure.Failure} instance, required if the event is an
error.
- C{why}: if defined, used as header of the traceback in case of errors.
- C{format}: string format used in place of C{message} to customize
the event. It uses all keys present in C{eventDict} to format
the text.
Other keys will be used when applying the C{format}, or ignored.
"""
edm = eventDict['message']
if not edm:
if eventDict['isError'] and 'failure' in eventDict:
text = ((eventDict.get('why') or 'Unhandled Error')
+ '\n' + eventDict['failure'].getTraceback())
elif 'format' in eventDict:
text = _safeFormat(eventDict['format'], eventDict)
else:
# we don't know how to log this
return
else:
text = ' '.join(map(reflect.safe_str, edm))
return text
class FileLogObserver:
"""
Log observer that writes to a file-like object.
@type timeFormat: C{str} or C{NoneType}
@ivar timeFormat: If not C{None}, the format string passed to strftime().
"""
timeFormat = None
def __init__(self, f):
self.write = f.write
self.flush = f.flush
def getTimezoneOffset(self, when):
"""
Return the current local timezone offset from UTC.
@type when: C{int}
@param when: POSIX (ie, UTC) timestamp for which to find the offset.
@rtype: C{int}
@return: The number of seconds offset from UTC. West is positive,
east is negative.
"""
offset = datetime.utcfromtimestamp(when) - datetime.fromtimestamp(when)
return offset.days * (60 * 60 * 24) + offset.seconds
def formatTime(self, when):
"""
Format the given UTC value as a string representing that time in the
local timezone.
By default it's formatted as a ISO8601-like string (ISO8601 date and
ISO8601 time separated by a space). It can be customized using the
C{timeFormat} attribute, which will be used as input for the underlying
C{time.strftime} call.
@type when: C{int}
@param when: POSIX (ie, UTC) timestamp for which to find the offset.
@rtype: C{str}
"""
if self.timeFormat is not None:
return time.strftime(self.timeFormat, time.localtime(when))
tzOffset = -self.getTimezoneOffset(when)
when = datetime.utcfromtimestamp(when + tzOffset)
tzHour = abs(int(tzOffset / 60 / 60))
tzMin = abs(int(tzOffset / 60 % 60))
if tzOffset < 0:
tzSign = '-'
else:
tzSign = '+'
return '%d-%02d-%02d %02d:%02d:%02d%s%02d%02d' % (
when.year, when.month, when.day,
when.hour, when.minute, when.second,
tzSign, tzHour, tzMin)
def emit(self, eventDict):
text = textFromEventDict(eventDict)
if text is None:
return
timeStr = self.formatTime(eventDict['time'])
fmtDict = {'system': eventDict['system'], 'text': text.replace("\n", "\n\t")}
msgStr = _safeFormat("[%(system)s] %(text)s\n", fmtDict)
util.untilConcludes(self.write, timeStr + " " + msgStr)
util.untilConcludes(self.flush) # Hoorj!
def start(self):
"""
Start observing log events.
"""
addObserver(self.emit)
def stop(self):
"""
Stop observing log events.
"""
removeObserver(self.emit)
class PythonLoggingObserver(object):
"""
Output twisted messages to Python standard library L{logging} module.
WARNING: specific logging configurations (example: network) can lead to
a blocking system. Nothing is done here to prevent that, so be sure to not
use this: code within Twisted, such as twisted.web, assumes that logging
does not block.
"""
def __init__(self, loggerName="twisted"):
"""
@param loggerName: identifier used for getting logger.
@type loggerName: C{str}
"""
self.logger = logging.getLogger(loggerName)
def emit(self, eventDict):
"""
Receive a twisted log entry, format it and bridge it to python.
By default the logging level used is info; log.err produces error
level, and you can customize the level by using the C{logLevel} key::
>>> log.msg('debugging', logLevel=logging.DEBUG)
"""
if 'logLevel' in eventDict:
level = eventDict['logLevel']
elif eventDict['isError']:
level = logging.ERROR
else:
level = logging.INFO
text = textFromEventDict(eventDict)
if text is None:
return
self.logger.log(level, text)
def start(self):
"""
Start observing log events.
"""
addObserver(self.emit)
def stop(self):
"""
Stop observing log events.
"""
removeObserver(self.emit)
class StdioOnnaStick:
"""
Class that pretends to be stdout/err, and turns writes into log messages.
@ivar isError: boolean indicating whether this is stderr, in which cases
log messages will be logged as errors.
@ivar encoding: unicode encoding used to encode any unicode strings
written to this object.
"""
closed = 0
softspace = 0
mode = 'wb'
name = '<stdio (log)>'
def __init__(self, isError=0, encoding=None):
self.isError = isError
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
self.buf = ''
def close(self):
pass
def fileno(self):
return -1
def flush(self):
pass
def read(self):
raise IOError("can't read from the log!")
readline = read
readlines = read
seek = read
tell = read
def write(self, data):
if isinstance(data, unicode):
data = data.encode(self.encoding)
d = (self.buf + data).split('\n')
self.buf = d[-1]
messages = d[0:-1]
for message in messages:
msg(message, printed=1, isError=self.isError)
def writelines(self, lines):
for line in lines:
if isinstance(line, unicode):
line = line.encode(self.encoding)
msg(line, printed=1, isError=self.isError)
try:
_oldshowwarning
except NameError:
_oldshowwarning = None
def startLogging(file, *a, **kw):
"""
Initialize logging to a specified file.
@return: A L{FileLogObserver} if a new observer is added, None otherwise.
"""
if isinstance(file, StdioOnnaStick):
return
flo = FileLogObserver(file)
startLoggingWithObserver(flo.emit, *a, **kw)
return flo
def startLoggingWithObserver(observer, setStdout=1):
"""
Initialize logging to a specified observer. If setStdout is true
(defaults to yes), also redirect sys.stdout and sys.stderr
to the specified file.
"""
global defaultObserver, _oldshowwarning
if not _oldshowwarning:
_oldshowwarning = warnings.showwarning
warnings.showwarning = showwarning
if defaultObserver:
defaultObserver.stop()
defaultObserver = None
addObserver(observer)
msg("Log opened.")
if setStdout:
sys.stdout = logfile
sys.stderr = logerr
class NullFile:
softspace = 0
def read(self): pass
def write(self, bytes): pass
def flush(self): pass
def close(self): pass
def discardLogs():
"""
Throw away all logs.
"""
global logfile
logfile = NullFile()
# Prevent logfile from being erased on reload. This only works in cpython.
try:
logfile
except NameError:
logfile = StdioOnnaStick(0, getattr(sys.stdout, "encoding", None))
logerr = StdioOnnaStick(1, getattr(sys.stderr, "encoding", None))
class DefaultObserver:
"""
Default observer.
Will ignore all non-error messages and send error messages to sys.stderr.
Will be removed when startLogging() is called for the first time.
"""
stderr = sys.stderr
def _emit(self, eventDict):
if eventDict["isError"]:
if 'failure' in eventDict:
text = ((eventDict.get('why') or 'Unhandled Error')
+ '\n' + eventDict['failure'].getTraceback())
else:
text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
self.stderr.write(text)
self.stderr.flush()
def start(self):
addObserver(self._emit)
def stop(self):
removeObserver(self._emit)
# Some more sibling imports, at the bottom and unqualified to avoid
# unresolvable circularity
import threadable, failure
threadable.synchronize(LogPublisher)
try:
defaultObserver
except NameError:
defaultObserver = DefaultObserver()
defaultObserver.start()
| |
import numpy as np
def art(sinogram: np.ndarray, angles: np.ndarray, initial: np.ndarray = None,
iterations: int = 1, count=None, max_count=None) -> np.ndarray:
"""Algebraic Reconstruction Technique
The Algebraic Reconstruction Technique (ART) iteratively
computes the inverse of the Radon transform in two dimensions.
The reconstruction technique uses *rays* of the diameter of
one pixel to iteratively solve the system of linear equations
that describe the projection process. The binary weighting
factors are
- 1, if the center of the a pixel is within the *ray*
- 0, else
Parameters
----------
sinogram: ndarrayy, shape (A,N)
Two-dimensional sinogram of line recordings.
angles: ndarray, length A
Angular positions of the `sinogram` in radians. The angles
at which the sinogram slices were recorded do not have to be
distributed equidistantly as in :func:`backproject`.
The angles are internaly converted to modulo PI.
initial: ndarray, shape (N,N), optional
The initial guess for the solution.
iterations: int
Number of iterations to perform.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
Notes
-----
For theoretical backround, see
Kak, A. C., & Slaney, M.. *Principles of Computerized
Tomographic Imaging*, SIAM, (2001)
Sec. 7.2:
*"ART reconstrutions usually suffer from salt and pepper noise,
which is caused by the inconsitencies introuced in the set of
equations by the approximations commonly used for*
:math:`w_{ik}` *'s."*
"""
# make sure `iterations` is an integer
iterations = int(iterations)
N = sinogram.shape[1]
A = angles.shape[0]
if max_count is not None:
with max_count.get_lock():
max_count.value += A * iterations + 1
# Meshgrid for weigths
center = N / 2.0
x = np.arange(N) - center + .5
X, Y = np.meshgrid(x, x)
# initiate array
if initial is None:
f = np.zeros((N, N), dtype=np.dtype(float))
else:
f = 1 * initial.transpose()[::-1]
# Make sure all angles are in [0,PI)
for i in np.arange(A):
if angles[i] > np.pi:
offset = np.floor(angles[i] / np.pi)
angles[i] -= offset * np.pi
elif angles[i] < 0:
offset = np.floor(np.abs((angles[i] / np.pi))) + 1
angles[i] += offset * np.pi
if angles[i] == np.pi:
angles[i] = 0
# These lambda functions return the two x- and y- values of two
# points projected onto a line along the x- and y-axis, having the
# angle angle_k in radians.
# Sourcing this out to here makes things a little faster.
def GetLambdaLines(angle_k):
if angle_k == 0:
# Divide by zero error for tan
# We have horizontal lines (parallel to x)
def line1(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k): return (
x2 - .5, ypi1 * np.ones(y2.shape))
def line2(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k): return (
x2 + .5, ypi2 * np.ones(y2.shape))
elif angle_k == np.pi / 2:
# We have vertical lines (parallel to y)
def line1(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k): return (
xpi1 * np.ones(x2.shape), y2 + .5)
def line2(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k): return (
xpi2 * np.ones(x2.shape), y2 - .5)
elif angle_k < np.pi / 2:
# CASE 1
# Compute any other positions on the lines from the given things
def line1(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k):
return ((y2 - ypi1) / np.tan(angle_k) + xpi1,
(x2 - xpi1) * np.tan(angle_k) + ypi1)
# def line2(x2,y2):
# y = (x2-xpi2)/np.tan(angle_k) - ypi2
# x = (y2-ypi2)*np.tan(angle_k) - xpi2
# return x,y
def line2(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k):
return ((y2 - ypi2) / np.tan(angle_k) + xpi2,
(x2 - xpi2) * np.tan(angle_k) + ypi2)
else:
# CASE 2: Switch x-output - only for speed.
# Not very obvious - possibly a hack.
# Compute any other positions on the lines from the given things
def line1(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k):
return ((y2 - ypi2) / np.tan(angle_k) + xpi2,
(x2 - xpi1) * np.tan(angle_k) + ypi1)
# def line2(x2,y2):
# y = (x2-xpi2)/np.tan(angle_k) - ypi2
# x = (y2-ypi2)*np.tan(angle_k) - xpi2
# return x,y
def line2(x2, y2, xpi1, ypi1, xpi2, ypi2, angle_k):
return ((y2 - ypi1) / np.tan(angle_k) + xpi1,
(x2 - xpi2) * np.tan(angle_k) + ypi2)
return line1, line2
# Sort angles?
# This could increase the speed of convergence.
# f[j] is consistent with Kak, Slaney
f = f.flatten()
if count is not None:
with count.get_lock():
count.value += 1
for iteration in np.arange(iterations): # @UnusedVariable
#
# i iterates the rays
# j iterates the flattened outarr
# k iterates the angles
#
for k in np.arange(A):
# From now on we work in radians
angle_k = angles[k]
# p[i] is consistent with Kak, Slaney
p = sinogram[k]
# # We do not store the binary weights for each ray:
# # For large images this thing could get big:
# w = np.zeros((len(p),N*N), dtype=bool)
# # w[i][j]
line1, line2 = GetLambdaLines(angle_k)
# CASES for all 2 quadrants. Angles are modulo PI
# This case stuff is dependent on the angle. We enumerate
# pr in negative mathematical angular direction
#
# This is the position on the projection, centered around 0:
pr = np.arange(len(p)) - center + .5 # radial distance,
#
if angle_k <= np.pi / 2:
# 0 to PI/2
# case == 1
# position of each p[i] in the centered *outarr*.
# a position of line1 in X
x_p1 = (pr - .5) * np.sin(angle_k)
# a position of line1 in Y
y_p1 = -(pr - .5) * np.cos(angle_k)
# a position of line2 in X
x_p2 = (pr + .5) * np.sin(angle_k)
# a position of line2 in Y
y_p2 = -(pr + .5) * np.cos(angle_k)
else:
# PI/2 to PI
# case == 2
# position of each p[i] in the centered *outarr*.
# a position of line1 in X
x_p1 = (pr + .5) * np.sin(angle_k)
# a position of line1 in Y
y_p1 = -(pr + .5) * np.cos(angle_k)
# a position of line2 in X
x_p2 = (pr - .5) * np.sin(angle_k)
# a position of line2 in Y
y_p2 = -(pr - .5) * np.cos(angle_k)
for i in np.arange(len(p)):
# If the angle is zero, then we are looking at the
# projections onto the right side. The indices are
# enumerated from bottom to top.
w_i = np.zeros((N, N), dtype=bool)
# # where does the ray cut w_i?
# xpi1 = x_p1[i]
# ypi1 = y_p1[i]
# xpi2 = x_p2[i]
# ypi2 = y_p2[i]
# Check if a value is between the two lines (within ray)
# For each value of X and Y, compute what the values of the
# line would be.
xl1, yl1 = line1(
X, Y, x_p1[i], y_p1[i], x_p2[i], y_p2[i], angle_k)
xl2, yl2 = line2(
X, Y, x_p1[i], y_p1[i], x_p2[i], y_p2[i], angle_k)
#
AND = np.logical_and
# Values lie between the two lines if the following is True:
# Furthermore we restrict ourselves to a disk.
w_i = AND(X**2 + Y**2 < center**2,
AND(AND(xl1 < X, X < xl2),
AND(yl1 > Y, Y > yl2))).flatten()
#
# i iterates the rays
# j iterates the flattened outarr
# k iterates the angles
# In each iteration of the angles the image is changed
# if np.sum(w_i) != 0:
# f += ( p[i] - np.sum(f*w_i) )/np.sum(w_i) * w_i
# This is faster because we don't a the zeros.
valid = np.where(w_i)
f[valid] += (p[i] - np.sum(f[valid])) / np.sum(w_i)
if count is not None:
with count.get_lock():
count.value += 1
# By slicing in-place [::-1] we get rid of the inversion of the
# image along the y-axis.
return f.reshape((N, N))[::-1].transpose()
| |
"""Helpers for managing a pairing with a HomeKit accessory or bridge."""
import asyncio
import datetime
import logging
from homeassistant.helpers.event import async_track_time_interval
from .const import DOMAIN, HOMEKIT_ACCESSORY_DISPATCH, ENTITY_MAP
DEFAULT_SCAN_INTERVAL = datetime.timedelta(seconds=60)
RETRY_INTERVAL = 60 # seconds
_LOGGER = logging.getLogger(__name__)
def get_accessory_information(accessory):
"""Obtain the accessory information service of a HomeKit device."""
# pylint: disable=import-error
from homekit.model.services import ServicesTypes
from homekit.model.characteristics import CharacteristicsTypes
result = {}
for service in accessory["services"]:
stype = service["type"].upper()
if ServicesTypes.get_short(stype) != "accessory-information":
continue
for characteristic in service["characteristics"]:
ctype = CharacteristicsTypes.get_short(characteristic["type"])
if "value" in characteristic:
result[ctype] = characteristic["value"]
return result
def get_bridge_information(accessories):
"""Return the accessory info for the bridge."""
for accessory in accessories:
if accessory["aid"] == 1:
return get_accessory_information(accessory)
return get_accessory_information(accessories[0])
def get_accessory_name(accessory_info):
"""Return the name field of an accessory."""
for field in ("name", "model", "manufacturer"):
if field in accessory_info:
return accessory_info[field]
return None
class HKDevice:
"""HomeKit device."""
def __init__(self, hass, config_entry, pairing_data):
"""Initialise a generic HomeKit device."""
from homekit.controller.ip_implementation import IpPairing
self.hass = hass
self.config_entry = config_entry
# We copy pairing_data because homekit_python may mutate it, but we
# don't want to mutate a dict owned by a config entry.
self.pairing_data = pairing_data.copy()
self.pairing = IpPairing(self.pairing_data)
self.accessories = {}
self.config_num = 0
# A list of callbacks that turn HK service metadata into entities
self.listeners = []
# The platorms we have forwarded the config entry so far. If a new
# accessory is added to a bridge we may have to load additional
# platforms. We don't want to load all platforms up front if its just
# a lightbulb. And we dont want to forward a config entry twice
# (triggers a Config entry already set up error)
self.platforms = set()
# This just tracks aid/iid pairs so we know if a HK service has been
# mapped to a HA entity.
self.entities = []
# There are multiple entities sharing a single connection - only
# allow one entity to use pairing at once.
self.pairing_lock = asyncio.Lock()
self.available = True
self.signal_state_updated = "_".join((DOMAIN, self.unique_id, "state_updated"))
# Current values of all characteristics homekit_controller is tracking.
# Key is a (accessory_id, characteristic_id) tuple.
self.current_state = {}
self.pollable_characteristics = []
# If this is set polling is active and can be disabled by calling
# this method.
self._polling_interval_remover = None
def add_pollable_characteristics(self, characteristics):
"""Add (aid, iid) pairs that we need to poll."""
self.pollable_characteristics.extend(characteristics)
def remove_pollable_characteristics(self, accessory_id):
"""Remove all pollable characteristics by accessory id."""
self.pollable_characteristics = [
char for char in self.pollable_characteristics if char[0] != accessory_id
]
def async_set_unavailable(self):
"""Mark state of all entities on this connection as unavailable."""
self.available = False
self.hass.helpers.dispatcher.async_dispatcher_send(self.signal_state_updated)
async def async_setup(self):
"""Prepare to use a paired HomeKit device in homeassistant."""
cache = self.hass.data[ENTITY_MAP].get_map(self.unique_id)
if not cache:
if await self.async_refresh_entity_map(self.config_num):
self._polling_interval_remover = async_track_time_interval(
self.hass, self.async_update, DEFAULT_SCAN_INTERVAL
)
return True
return False
self.accessories = cache["accessories"]
self.config_num = cache["config_num"]
# Ensure the Pairing object has access to the latest version of the
# entity map.
self.pairing.pairing_data["accessories"] = self.accessories
self.async_load_platforms()
self.add_entities()
await self.async_update()
self._polling_interval_remover = async_track_time_interval(
self.hass, self.async_update, DEFAULT_SCAN_INTERVAL
)
return True
async def async_unload(self):
"""Stop interacting with device and prepare for removal from hass."""
if self._polling_interval_remover:
self._polling_interval_remover()
unloads = []
for platform in self.platforms:
unloads.append(
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, platform
)
)
results = await asyncio.gather(*unloads)
return False not in results
async def async_refresh_entity_map(self, config_num):
"""Handle setup of a HomeKit accessory."""
# pylint: disable=import-error
from homekit.exceptions import AccessoryDisconnectedError
try:
async with self.pairing_lock:
self.accessories = await self.hass.async_add_executor_job(
self.pairing.list_accessories_and_characteristics
)
except AccessoryDisconnectedError:
# If we fail to refresh this data then we will naturally retry
# later when Bonjour spots c# is still not up to date.
return
self.hass.data[ENTITY_MAP].async_create_or_update_map(
self.unique_id, config_num, self.accessories
)
self.config_num = config_num
# For BLE, the Pairing instance relies on the entity map to map
# aid/iid to GATT characteristics. So push it to there as well.
self.pairing.pairing_data["accessories"] = self.accessories
self.async_load_platforms()
# Register and add new entities that are available
self.add_entities()
await self.async_update()
return True
def add_listener(self, add_entities_cb):
"""Add a callback to run when discovering new entities."""
self.listeners.append(add_entities_cb)
self._add_new_entities([add_entities_cb])
def add_entities(self):
"""Process the entity map and create HA entities."""
self._add_new_entities(self.listeners)
def _add_new_entities(self, callbacks):
from homekit.model.services import ServicesTypes
for accessory in self.accessories:
aid = accessory["aid"]
for service in accessory["services"]:
iid = service["iid"]
stype = ServicesTypes.get_short(service["type"].upper())
service["stype"] = stype
if (aid, iid) in self.entities:
# Don't add the same entity again
continue
for listener in callbacks:
if listener(aid, service):
self.entities.append((aid, iid))
break
def async_load_platforms(self):
"""Load any platforms needed by this HomeKit device."""
from homekit.model.services import ServicesTypes
for accessory in self.accessories:
for service in accessory["services"]:
stype = ServicesTypes.get_short(service["type"].upper())
if stype not in HOMEKIT_ACCESSORY_DISPATCH:
continue
platform = HOMEKIT_ACCESSORY_DISPATCH[stype]
if platform in self.platforms:
continue
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
)
self.platforms.add(platform)
async def async_update(self, now=None):
"""Poll state of all entities attached to this bridge/accessory."""
# pylint: disable=import-error
from homekit.exceptions import (
AccessoryDisconnectedError,
AccessoryNotFoundError,
EncryptionError,
)
if not self.pollable_characteristics:
_LOGGER.debug("HomeKit connection not polling any characteristics.")
return
_LOGGER.debug("Starting HomeKit controller update")
try:
new_values_dict = await self.get_characteristics(
self.pollable_characteristics
)
except AccessoryNotFoundError:
# Not only did the connection fail, but also the accessory is not
# visible on the network.
self.async_set_unavailable()
return
except (AccessoryDisconnectedError, EncryptionError):
# Temporary connection failure. Device is still available but our
# connection was dropped.
return
self.available = True
for (aid, cid), value in new_values_dict.items():
accessory = self.current_state.setdefault(aid, {})
accessory[cid] = value
self.hass.helpers.dispatcher.async_dispatcher_send(self.signal_state_updated)
_LOGGER.debug("Finished HomeKit controller update")
async def get_characteristics(self, *args, **kwargs):
"""Read latest state from homekit accessory."""
async with self.pairing_lock:
chars = await self.hass.async_add_executor_job(
self.pairing.get_characteristics, *args, **kwargs
)
return chars
async def put_characteristics(self, characteristics):
"""Control a HomeKit device state from Home Assistant."""
chars = []
for row in characteristics:
chars.append((row["aid"], row["iid"], row["value"]))
async with self.pairing_lock:
await self.hass.async_add_executor_job(
self.pairing.put_characteristics, chars
)
@property
def unique_id(self):
"""
Return a unique id for this accessory or bridge.
This id is random and will change if a device undergoes a hard reset.
"""
return self.pairing_data["AccessoryPairingID"]
@property
def connection_info(self):
"""Return accessory information for the main accessory."""
return get_bridge_information(self.accessories)
@property
def name(self):
"""Name of the bridge accessory."""
return get_accessory_name(self.connection_info) or self.unique_id
| |
"""
Copyright (c) 2019 Eric Shook. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
@author: eshook (Eric Shook, eshook@gmail.edu)
@contributors: <Contribute and add your name here!>
"""
# Load forest
from forest import *
# PyCUDA imports
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import pycuda.curandom as curandom
from pycuda.compiler import SourceModule
from pycuda.characterize import sizeof
# Other imports
import matplotlib.pyplot as plt
from os import path
import sys
# Switch Engine to GPU
Config.engine = cuda_engine
print("Running Engine",Config.engine)
# .tif files to use as initial population and survival layer probabilities
initial_population_file = '/home/iaa/bures024/Forest/2000_init_pop.tif'
survival_probabilities_file = '/home/iaa/bures024/Forest/2000_surv_probs.tif'
# Make sure both files exist
if (path.isfile(initial_population_file) == False) or (path.isfile(survival_probabilities_file) == False):
print('Error. File not found. Exiting program...')
sys.exit()
# Load initial population and survival layer probabilities as numpy arrays
initial_population = plt.imread(initial_population_file).astype(np.float32)
survival_probabilities = plt.imread(survival_probabilities_file).astype(np.float32)
survival_probabilities = np.divide(survival_probabilities,255)
# Make sure initial population and survival layer probabilities grids are square (n x n)
if (initial_population.shape[0] != initial_population.shape[1]) or (survival_probabilities.shape[0] != survival_probabilities.shape[1]):
print('Invalid dimensions. Grid must be square (n x n dimensions). Exiting program...')
sys.exit()
# Make sure initial population and survival layer probabilities grids are the same dimensions
if (initial_population.shape[0] != survival_probabilities.shape[0]) or (initial_population.shape[1] != survival_probabilities.shape[1]):
print('Invalid entry. Initial population grid and survival probabilities grid must be same shape. Exiting program...')
sys,exit()
# Constants
matrix_size = initial_population.shape[0] # Size of square grid
block_dims = 32 # CUDA block dimensions - maximum dimensions = 32 x 32
grid_dims = (matrix_size + block_dims - 1) // block_dims # CUDA grid dimensions
p_local = 0.50 # probability an agent spreads during local diffusion
p_non_local = 0.33 # probability an agent spreads during non-local diffusion
growth_rate = 0.25 # expnential growth rate of population layer
mu = 0.0 # location parameter of cauchy distribution
gamma = 1.0 # scale parameter of cauchy distribution
n_iters = 1 # number of iterations
kernel_code = """
#include <curand_kernel.h>
#include <math.h>
extern "C" {
__device__ float get_random_number(curandState* global_state, int thread_id) {
curandState local_state = global_state[thread_id];
float num = curand_uniform(&local_state);
global_state[thread_id] = local_state;
return num;
}
__device__ float get_random_angle_in_radians(curandState* global_state, int thread_id) {
float radians = get_random_number(global_state, thread_id) * 2 * M_PI;
return radians;
}
__device__ float get_random_cauchy_distance(curandState* global_state, int thread_id, float mu, float gamma) {
float distance = fabsf(mu + gamma * tan(M_PI * (get_random_number(global_state,thread_id) - 0.5)));
return distance;
}
__device__ int get_x_coord(int x, float radians, float distance) {
int x_coord = (int) roundf(x + distance * sin(radians));
return x_coord;
}
__device__ int get_y_coord(int y, float radians, float distance) {
int y_coord = (int) roundf(y + distance * cos(radians));
return y_coord;
}
__global__ void init_generators(curandState* global_state, int seed, int grid_size) {
int x = threadIdx.x + blockIdx.x * blockDim.x; // column index of cell
int y = threadIdx.y + blockIdx.y * blockDim.y; // row index of cell
// make sure this cell is within bounds of grid
if (x < grid_size && y < grid_size) {
int thread_id = y * grid_size + x; // thread index
curandState local_state;
curand_init(seed, thread_id, 0, &local_state);
global_state[thread_id] = local_state;
}
}
__global__ void local_diffuse(float* grid_a, float* grid_b, curandState* global_state, int grid_size, float prob, int time) {
int x = threadIdx.x + blockIdx.x * blockDim.x; // column index of cell
int y = threadIdx.y + blockIdx.y * blockDim.y; // row index of cell
// make sure this cell is within bounds of grid
if (x < grid_size && y < grid_size) {
int thread_id = y * grid_size + x; // thread index
grid_b[thread_id] = grid_a[thread_id]; // copy current cell
int edge = (x == 0) || (x == grid_size - 1) || (y == 0) || (y == grid_size - 1);
// ignore cell if its an edge cell
if (!edge) {
// ignore cell if it is not already populated
if (grid_a[thread_id] > 0.0) {
int count = 0; // number of agents looked at so far
int n_iters = grid_a[thread_id]; // number of agents in this cell
float num; // random number between (0,1]
int neighbor;
// each agent has a chance to spread
while (count < n_iters) {
num = get_random_number(global_state, thread_id);
// this agent spreads to a neighbor
if (num < prob) {
// randomly select a neighbor
neighbor = (int) ceilf(get_random_number(global_state, thread_id) * 8.0);
atomicAdd(&grid_b[thread_id], (float)(-1.0));
switch(neighbor) {
case 1: // above
atomicAdd(&grid_b[thread_id - grid_size], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x, y - 1, time);
break;
case 2: // above and left
atomicAdd(&grid_b[thread_id - grid_size - 1], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x - 1, y - 1, time);
break;
case 3: // above and right
atomicAdd(&grid_b[thread_id - grid_size + 1], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x + 1, y - 1, time);
break;
case 4: // below
atomicAdd(&grid_b[thread_id + grid_size], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x, y + 1, time);
break;
case 5: // below and left
atomicAdd(&grid_b[thread_id + grid_size - 1], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x - 1, y + 1, time);
break;
case 6: // below and right
atomicAdd(&grid_b[thread_id + grid_size + 1], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x + 1, y + 1, time);
break;
case 7: // left
atomicAdd(&grid_b[thread_id - 1], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x - 1, y, time);
break;
case 8: // right
atomicAdd(&grid_b[thread_id + 1], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x + 1, y, time);
break;
default: // should never reach here
printf("Invalid number encountered\\n");
break;
}
}
count += 1;
}
}
}
}
}
__global__ void non_local_diffuse(float* grid_a, float* grid_b, curandState* global_state, int grid_size, float prob, float mu, float gamma, int time) {
int x = threadIdx.x + blockIdx.x * blockDim.x; // column index of cell
int y = threadIdx.y + blockIdx.y * blockDim.y; // row index of cell
// make sure this cell is within bounds of grid
if (x < grid_size && y < grid_size) {
int thread_id = y * grid_size + x; // thread index
grid_b[thread_id] = grid_a[thread_id]; // copy current cell
// ignore cell if it is not already populated
if (grid_a[thread_id] > 0.0) {
int count = 0; // number of agents looked at so far
int n_iters = grid_a[thread_id]; // number of agents in this cell
float num; // random number between (0,1]
float radians; // random angle between (0,2*PI)
float distance; // distance drawn from cauchy distribution
int x_coord; // row index of cell to spread to
int y_coord; // column index of cell to spread to
int spread_index; // thread index of cell to spread to
// each agent has a chance to spread
while (count < n_iters) {
num = get_random_number(global_state, thread_id);
// this agent spreads to a neighbor
if (num < prob) {
// randomly select a cell
radians = get_random_angle_in_radians(global_state, thread_id);
distance = get_random_cauchy_distance(global_state, thread_id, mu, gamma);
x_coord = get_x_coord(x, radians, distance);
y_coord = get_y_coord(y, radians, distance);
//printf("Radians = %f\\tDistance = %f\\tX = %d\\tY = %d\\tX_coord = %d\\tY_coord = %d\\n", radians, distance, x, y, x_coord, y_coord);
// make sure chosen cell is in the grid dimensions and is not the current cell
if (x_coord < grid_size && x_coord >= 0 && y_coord < grid_size && y_coord >= 0 && (x_coord != x || y_coord != y)) {
spread_index = y_coord * grid_size + x_coord;
atomicAdd(&grid_b[thread_id], (float)(-1.0));
atomicAdd(&grid_b[spread_index], (float)1.0);
//printf("Cell (%d,%d) spread to cell (%d,%d) at time %d\\n", x, y, x_coord, y_coord, time);
}
}
count += 1;
}
}
}
}
__global__ void survival_of_the_fittest(float* grid_a, float* grid_b, curandState* global_state, int grid_size, float* survival_probabilities, int time) {
int x = threadIdx.x + blockIdx.x * blockDim.x; // column index of cell
int y = threadIdx.y + blockIdx.y * blockDim.y; // row index of cell
// make sure this cell is within bounds of grid
if (x < grid_size && y < grid_size) {
int thread_id = y * grid_size + x; // thread index
grid_b[thread_id] = grid_a[thread_id]; // copy current cell
float num; // random number between (0,1]
// ignore cell if it is not already populated
if (grid_a[thread_id] > 0.0) {
num = get_random_number(global_state, thread_id);
// agents in this cell die
if (num < survival_probabilities[thread_id]) {
grid_b[thread_id] = 0.0;
//printf("Cell (%d,%d) died at time %d (probability of death was %f)\\n", x, y, time, survival_probabilities[thread_id]);
}
}
}
}
__global__ void population_growth(float* grid_a, float* grid_b, int grid_size, float growth_rate, int time) {
int x = threadIdx.x + blockIdx.x * blockDim.x; // column index of cell
int y = threadIdx.y + blockIdx.y * blockDim.y; // row index of cell
// make sure this cell is within bounds of grid
if (x < grid_size && y < grid_size) {
int thread_id = y * grid_size + x; // thread index
grid_b[thread_id] = grid_a[thread_id]; // copy current cell
//printf("Value at (%d,%d) is %f\\n", x, y, grid_b[thread_id]);
// ignore cell if population is 0
if (grid_a[thread_id] > 0.0) {
// growth formula: x(t) = x(t-1) * (1 + growth_rate)^time
int pop = grid_a[thread_id];
int add_pop = (int) truncf(pop * pow((1 + growth_rate), time));
grid_b[thread_id] += add_pop;
//printf("Cell (%d,%d)'s population grew by %d at time %d\\n", x, y, add_pop, time);
}
}
}
} // end extern "C"
"""
mod = SourceModule(kernel_code, no_extern_c = True)
# Get kernel functions
local = mod.get_function('local_diffuse')
non_local = mod.get_function('non_local_diffuse')
survival_layer = mod.get_function('survival_of_the_fittest')
population_layer = mod.get_function('population_growth')
init_generators = mod.get_function('init_generators')
# Initialize random number generator
generator = curandom.XORWOWRandomNumberGenerator()
data_type_size = sizeof(generator.state_type, "#include <curand_kernel.h>")
generator._state = drv.mem_alloc((matrix_size * matrix_size) * data_type_size)
seed = 123456789
init_generators(generator.state, np.int32(seed), np.int32(matrix_size),
grid = (grid_dims, grid_dims), block = (block_dims, block_dims, 1))
# Run n_iters of the Brown Marmorated Stink Bug (BMSB) Diffusion Simulation
run_primitive(
empty_grid.vars(matrix_size) ==
initialize_grid.vars(matrix_size, initial_population, survival_probabilities, generator) ==
bmsb_stop_condition.vars(n_iters) <=
local_diffusion.vars(local, matrix_size, p_local, grid_dims, block_dims) ==
non_local_diffusion.vars(non_local, matrix_size, p_non_local, mu, gamma, grid_dims, block_dims) ==
survival_function.vars(survival_layer, matrix_size, grid_dims, block_dims) ==
population_growth.vars(population_layer, matrix_size, growth_rate, grid_dims, block_dims) ==
bmsb_stop >=
AGStore.file("output.tif")
)
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from airflow.models import BaseOperator, TaskInstance
from airflow.utils.trigger_rule import TriggerRule
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils.state import State
class TriggerRuleDepTest(unittest.TestCase):
def _get_task_instance(self, trigger_rule=TriggerRule.ALL_SUCCESS,
state=None, upstream_task_ids=None):
task = BaseOperator(task_id='test_task', trigger_rule=trigger_rule,
start_date=datetime(2015, 1, 1))
if upstream_task_ids:
task._upstream_task_ids.update(upstream_task_ids)
return TaskInstance(task=task, state=state, execution_date=None)
def test_no_upstream_tasks(self):
"""
If the TI has no upstream TIs then there is nothing to check and the dep is passed
"""
ti = self._get_task_instance(TriggerRule.ALL_DONE, State.UP_FOR_RETRY)
self.assertTrue(TriggerRuleDep().is_met(ti=ti))
def test_dummy_tr(self):
"""
The dummy trigger rule should always pass this dep
"""
ti = self._get_task_instance(TriggerRule.DUMMY, State.UP_FOR_RETRY)
self.assertTrue(TriggerRuleDep().is_met(ti=ti))
def test_one_success_tr_success(self):
"""
One-success trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ONE_SUCCESS, State.UP_FOR_RETRY)
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=2,
failed=2,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 0)
def test_one_success_tr_failure(self):
"""
One-success trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.ONE_SUCCESS)
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=2,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_one_failure_tr_failure(self):
"""
One-failure trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.ONE_FAILED)
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_one_failure_tr_success(self):
"""
One-failure trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ONE_FAILED)
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=2,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 0)
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=0,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 0)
def test_all_success_tr_success(self):
"""
All-success trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ALL_SUCCESS,
upstream_task_ids=["FakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 0)
def test_all_success_tr_failure(self):
"""
All-success trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.ALL_SUCCESS,
upstream_task_ids=["FakeTaskID",
"OtherFakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=1,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_none_failed_tr_success(self):
"""
All success including skip trigger rule success
"""
ti = self._get_task_instance(TriggerRule.NONE_FAILED,
upstream_task_ids=["FakeTaskID",
"OtherFakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 0)
def test_none_failed_tr_failure(self):
"""
All success including skip trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.NONE_FAILED,
upstream_task_ids=["FakeTaskID",
"OtherFakeTaskID",
"FailedFakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=1,
upstream_failed=0,
done=3,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_all_failed_tr_success(self):
"""
All-failed trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ALL_FAILED,
upstream_task_ids=["FakeTaskID",
"OtherFakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=0,
failed=2,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 0)
def test_all_failed_tr_failure(self):
"""
All-failed trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.ALL_FAILED,
upstream_task_ids=["FakeTaskID",
"OtherFakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_all_done_tr_success(self):
"""
All-done trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ALL_DONE,
upstream_task_ids=["FakeTaskID",
"OtherFakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 0)
def test_all_done_tr_failure(self):
"""
All-done trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.ALL_DONE,
upstream_task_ids=["FakeTaskID",
"OtherFakeTaskID"])
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_unknown_tr(self):
"""
Unknown trigger rules should cause this dep to fail
"""
ti = self._get_task_instance()
ti.task.trigger_rule = "Unknown Trigger Rule"
dep_statuses = tuple(TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session"))
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import copy
import functools
import inspect
from oslo_config import cfg
from oslo_log import log as logging
import pkg_resources
import six
from iotronic.openstack.common._i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
deprecated_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
def list_opts():
"""Entry point for oslo_config-generator.
"""
return [(None, copy.deepcopy(deprecated_opts))]
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
LIBERTY = 'L'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
'L': 'Liberty',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
report_deprecated_feature(LOG, msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the oslo_incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
report_deprecated_feature(LOG, msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
# Track the messages we have sent already. See
# report_deprecated_feature().
_deprecated_messages_sent = {}
def report_deprecated_feature(logger, msg, *args, **kwargs):
"""Call this function when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
CONF.register_opts(deprecated_opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = _deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
logger.warn(stdmsg, *args, **kwargs)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| |
"""Sorbet - frozen sequence storage engine
Sorbet is fast - on a "standard" laptop it achieves:
- 200k writes per second,
- 300k reads per second.
"""
# ------------------------------------------------------------------------------
def sorbet(path, kind='disk'):
if kind=='disk':
return sorbet_on_disk(path)
elif kind=='mem':
return sorbet_in_mem(path)
elif kind=='mem_only':
return sorbet_in_mem_only(path)
else:
raise Exception(f"Invalid sorbet kind: '{kind}'")
# ------------------------------------------------------------------------------
from pickle import dump,load,HIGHEST_PROTOCOL
import os
# TODO parallel scan -> zwykly nie ma sensu bo jest funkcja filter
# TODO multiproc scan -> (path,index[lo:hi]->dict)
class sorbet_on_disk:
"""frozen sequence storage engine"""
def __init__(self, path, mp_pool=None):
"""configure prefix of sorbet files"""
self.path = path
self.mp_pool = mp_pool
self.protocol = HIGHEST_PROTOCOL
def new(self):
"""create new storage for appending data"""
self.f = open(f"{self.path}.data",'wb')
self.index = []
return self
def save(self):
"""save data on disk; removes ability to append new data"""
dump(self.index, open(f"{self.path}.index",'wb'), protocol=self.protocol)
self.f.close()
self.f = open(f"{self.path}.data",'rb')
print(f"size of {self.path}.data - {os.path.getsize(f'{self.path}.data')/1_000_000:.01f} MB") # XXX
return self
def load(self):
"""read data from disk"""
self.f = open(f"{self.path}.data",'rb')
self.index = load(open(f"{self.path}.index",'rb'))
return self
def dump(self, data):
"""save data on disk"""
self.new()
f = self.f
index = self.index
p = self.protocol
for val in data:
index.append( f.tell() )
dump(val, f, protocol=p)
f.close()
dump(index, open(f"{self.path}.index",'wb'), protocol=self.protocol)
self.f = open(f"{self.path}.data",'rb')
print(f"size of {self.path}.data - {os.path.getsize(f'{self.path}.data')/1_000_000:.01f} MB") # XXX
return self
def append(self, val):
"""append value (of any type) to the dataset"""
self.index.append( self.f.tell() )
dump(val, self.f, protocol=self.protocol)
def delete(self):
self.f.close()
os.remove(f"{self.path}.index")
os.remove(f"{self.path}.data")
def __getitem__(self, key):
"""return value for given key or iterator for given slice"""
if type(key) is slice:
return self.__getslice__(key)
else:
pos = self.index[key]
self.f.seek(pos)
return load(self.f)
def __getslice__(self, key):
"""return iterator over given slice"""
start,stop,step = key.indices(len(self))
for i in range(start,stop,step):
yield self[i]
def __len__(self):
"""return number of items"""
return len(self.index)
# experimental
def imap(self, fun):
yield from self.mp_pool.imap(fun, self)
# ------------------------------------------------------------------------------
class sorbet_in_mem:
"""frozen sequence storage engine"""
def __init__(self, path):
"""configure prefix of sorbet files"""
self.path = path
self.protocol = HIGHEST_PROTOCOL
def new(self):
"""create new storage for appending data"""
self.f = open(f"{self.path}.data",'wb')
self.data = []
return self
def save(self):
"""save data on disk; removes ability to append new data"""
dump(self.data, self.f, protocol=self.protocol)
self.f.close()
print(f"size of {self.path}.data - {os.path.getsize(f'{self.path}.data')/1_000_000:.01f} MB") # XXX
return self
def load(self):
"""read data from disk"""
self.f = open(f"{self.path}.data",'rb')
self.data = load(open(f"{self.path}.data",'rb'))
return self
def dump(self, data):
"""save data on disk"""
self.new()
self.data = list(data)
f = self.f
p = self.protocol
dump(self.data, f, protocol=p)
f.close()
print(f"size of {self.path}.data - {os.path.getsize(f'{self.path}.data')/1_000_000:.01f} MB") # XXX
return self
def append(self, val):
"""append value (of any type) to the dataset"""
self.data.append(val)
def delete(self):
del self.data
os.remove(f"{self.path}.data")
def __getitem__(self, key):
"""return value for given key or iterator for given slice"""
return self.data[key]
def __len__(self):
"""return number of items"""
return len(self.data)
# ------------------------------------------------------------------------------
class sorbet_in_mem_only:
"""frozen sequence storage engine"""
def __init__(self, path):
self.data = []
def new(self):
return self
def save(self):
return self
def load(self):
assert False
def dump(self, data):
self.data = list(data)
return self
def append(self, val):
self.data.append(val)
def delete(self):
del self.data
def __getitem__(self, key):
return self.data[key]
def __len__(self):
return len(self.data)
# ---[ TEST ]-------------------------------------------------------------------
if __name__=="__main__":
from time import time
import sys
label = sys.argv[0][:-3]
path = f'../data/{label}'
N = 1_100_000
kind = 'disk'
if 1:
data = ({'a':i,'aa':i*10} for i in range(N))
t0=time()
db = sorbet(path,kind).dump(data)
print("write time:",f"{time()-t0:.02f}s",f'{N/(time()-t0):.0f} items/s')
if 1:
db = sorbet(path,kind).load()
t0=time()
for i in range(N):
db[i]
print("read time:",f"{time()-t0:.02f}s",f'{N/(time()-t0):.0f} items/s')
exit()
print()
print(list(db[:3]))
print()
for x in list(db[:3]):
print(x)
print()
print(db[10])
print('\nfilter')
for x in filter(lambda x:x['a']%10000==0, db):
print(x)
print('\nfilter - slice')
for x in filter(lambda x:x['a']%10000==0, db[50000:]):
print(x)
| |
# Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from enum import Enum
from itertools import tee
import argparse
import sys
import collections
import operator
import re
def pairwise (iterable):
a, b = tee (iterable)
next (b, None)
return zip (a, b)
class InstructionFlags(Enum):
Default = 0
Branch = 1 # Conditional branch, so both the target and the next
# instruction are successors
Jump = 2 # Unconditional branch, so only the target is a valid
# successor
class OpCode:
def __init__(self, name, operandsWritten, operandsRead, instructionClass, flags = 0, cycleCount = None):
self.__name = name
self.__operandsWritten = frozenset (operandsWritten)
self.__operandsRead = frozenset (operandsRead)
self.__instructionClass = instructionClass
self.__cycleCount = cycleCount
self.__flags = flags
@property
def IsBranch(self):
return self.__flags & InstructionFlags.Branch.value
@property
def IsJump(self):
return self.__flags & InstructionFlags.Jump.value
@property
def Name(self):
return self.__name
@property
def Class(self):
return self.__instructionClass
@Class.setter
def Class(self, value):
self.__instructionClass = value
@property
def Written(self):
return self.__operandsWritten
@property
def Read(self):
return self.__operandsRead
@property
def ReadWritten(self):
return self.__operandsRead.intersection (self.__operandsWritten)
class RegisterSet:
def __init__(self, read, written):
self.__read = read
self.__written = written
@property
def Read(self):
return self.__read
@property
def Written(self):
return self.__written
def __str__ (self):
return 'Written: {}, read: {}'.format (len (self.__written), len (self.__read))
def MergeRegisterSets(registerSets):
read = frozenset().union (*[rs.Read for rs in registerSets])
written = frozenset().union (*[rs.Written for rs in registerSets])
return RegisterSet (read, written)
class Instruction:
'''An instruction consists of an op-code, the arguments passed to it, and
an optional label if the instruction is a jump target.'''
def __init__ (self, binCode, opcode, args, label = None):
import gcn
self.__opcode = gcn.opcodes.get (opcode, OpCode (opcode, {}, {}, gcn.InstructionClass.Unknown, 0, None))
self.__args = args
self.__label = label
self.__usedVGPRs = self.__ComputeUsedVGPRs()
# Some instructions have 2 possible encodings: VOP2 or VOP3. For these instructions, check
# the highest bit of binary code to find out the exact encoding type.
if self.__opcode.Class == gcn.InstructionClass.VOP2 or self.__opcode.Class == gcn.InstructionClass.VOP3:
hiCode = binCode.split(' ')[0]
if (int(hiCode, 16) & 0x80000000) == 0:
self.__opcode.Class = gcn.InstructionClass.VOP2
else:
self.__opcode.Class = gcn.InstructionClass.VOP3
def SetLabel(self, label):
assert label is not None
self.__label = label
@property
def OpCode(self):
return self.__opcode
def __ComputeUsedVGPRs (self):
class VGPRSet:
'''Helper class to add register ranges and filter out non-vector registers.'''
def __init__ (self):
self.__used = set ()
def Add (self, reg):
if reg.startswith ('abs('):
reg = reg [4:-1]
elif reg.startswith ('-'):
reg = reg [1:]
if reg[0] != 'v' or reg == 'vcc' or reg.startswith ('vmcnt'):
return
if reg[1] == '[':
# vector register range
s, e = reg [2:-1].split (':')
s = int (s)
e = int (e)
for i in range(s, e+1):
self.__used.add (i)
else:
self.__used.add (int (reg[1:]))
def Get(self):
return self.__used
def __str__(self):
return str(self.__used)
read = VGPRSet ()
written = VGPRSet ()
for i, arg in enumerate (self.__args):
if i in self.__opcode.ReadWritten:
written.Add (arg)
read.Add (arg)
elif i in self.__opcode.Written:
written.Add (arg)
else:
read.Add (arg)
return RegisterSet (read.Get (), written.Get ())
def __str__ (self):
return self.ToString ()
def ToString (self, includeLabel=True):
if self.__label and includeLabel:
return 'label_{}: {} {}'.format (self.__label, self.__opcode.Name, ', '.join (self.__args))
else:
return '{} {}'.format (self.__opcode.Name, ', '.join (self.__args))
@property
def Label(self):
return self.__label
@property
def UsedVGPRs (self):
return self.__usedVGPRs
def IsBranch(self):
return self.__opcode.IsBranch
def IsJump(self):
return self.__opcode.IsJump
def GetBranchTarget(self):
return self.__args [0]
def GetJumpTarget(self):
return self.__args [0]
class BasicBlock:
def __init__(self, label, instructions, order):
self.__instructions = instructions
self.__label = label
self.__order = order
self.__successors = []
self.__predecessors = []
@property
def Empty(self):
return len(self.__instructions) == 0
@property
def IsEntry(self):
return self.__label == 'entry'
@property
def IsExit(self):
return self.__label == 'exit'
def GetUsedVGPRs(self):
return MergeRegisterSets ([i.UsedVGPRs for i in self.__instructions])
@property
def Label(self):
return self.__label
def Link (self, otherBlock):
self.__successors.append (otherBlock)
otherBlock.__predecessors.append (self)
@property
def Successors (self):
return self.__successors
@property
def Predecessors (self):
return self.__predecessors
@property
def Order(self):
return self.__order
@property
def Instructions(self):
return self.__instructions
def __str__ (self):
return ('{} ({} instructions)'.format (self.__label, len (self.__instructions)))
class IsaReader:
def GetInstructions (self):
return []
class _BaseIsaReader(IsaReader):
def GetLines(self, inputStream):
lines = []
for line in inputStream:
line = line.strip ()
if line.startswith(';'):
continue
comment = line.find ('//')
commentStr = ""
instItems = []
if comment != -1:
# Split line into 2 parts: instruction & comment.
commentStr = line [comment+2 :]
line = line [:comment]
if line:
# Parse the comment. It should contain the instruction binary code.
binCodeStr = self.GetBinEncodingText(commentStr)
# Add the binary code and other instruction parts into "instItems"
if binCodeStr != "":
instItems.append(binCodeStr)
instItems.extend(line.split())
lines.append (instItems)
return lines
def ReadInstructions (self, lines):
nextLabel = None
result = []
inShader = False
for lineno, line in enumerate (lines):
# shader <foo>, asic(VI), type(PS) and similar are ignored here
if line[0].startswith ('asic') or line[0].startswith ('type') or line[0].startswith ('@kernel'):
continue
elif line[0] == 'shader':
continue
elif not ' ' in line[0] and line[0][-1] == ':':
nextLabel = line[0][:-1]
continue
binCode = line [0]
opCode = line [1]
args = []
# Join everything after the opcode and separate at the comma again
for param in ' '.join (line [2:]).split (','):
paramElements = param.split ()
if paramElements:
args.append (paramElements[0])
result.append (Instruction (binCode, opCode, args, nextLabel))
nextLabel = None
return result
# Tries to find binary code text in the "comment" string:
# 000000000130: D1190201 00000100
# `--- bin code ---'
def GetBinEncodingText(self, comment):
result = ""
if comment == "":
return result
foundBinCode = re.search(r' *[0-9a-fA-F]+: *([0-9a-fA-F]{8})( *[0-9a-fA-F]{8})?', comment)
if foundBinCode:
result = comment[comment.find(':')+1 :].lstrip(' ')
return result
class ShaderAnalyzerIsaReader(_BaseIsaReader):
def GetInstructions (self, inputStream):
lines = self.GetLines (inputStream)
inShader = False
shaderLines = []
for lineno, line in enumerate (lines):
# we skip everything until we find a line starting with
# 'shader', and then we'll stop once we read 'end'
# This enables us to read dumps which contain arbitrary metadata
# before/after the shader
if line [0] == 'shader' or line [0] == 'end':
if line [0] == 'shader' and not inShader:
inShader = True
continue
elif line [0] == 'end' and inShader:
break
else:
raise RuntimeError ("Mismatched shader/end in line {}".format (lineno))
if inShader:
shaderLines.append (line)
return self.ReadInstructions (shaderLines)
class HSAILIsaReader(_BaseIsaReader):
def GetInstructions(self, inputStream):
lines = self.GetLines (inputStream)
inShader = False
shaderLines = []
for lineno, line in enumerate (lines):
if (line [0] == 'Disassembly' and line [1] == 'for') or line [0] == 'end':
if line [0] == 'Disassembly' and line [1] == 'for':
inShader = True
continue
elif line [0] == 'end' and inShader:
break
else:
raise RuntimeError ("Mismatched Disassembly for/end in line {}".format (lineno))
if inShader:
shaderLines.append (line)
return self.ReadInstructions (shaderLines)
class ShaderDumpIsaReader(_BaseIsaReader):
def GetInstructions (self, inputStream):
lines = self.GetLines (inputStream)
inShader = False
shaderLines = []
for lineno, line in enumerate (lines):
if ' '.join (line) == '; -------- Disassembly --------------------':
inShader = True
continue
elif line[0] == 'end' and inShader:
break
if inShader:
shaderLines.append (line)
return self.ReadInstructions (shaderLines)
class RawIsaReader(_BaseIsaReader):
def GetInstructions (self, inputStream):
lines = self.GetLines (inputStream)
result = []
for line in lines:
result.append (list (map (str.lower, line)))
if line[0].lower () == 's_endpgm':
break
return self.ReadInstructions (result)
def GetIsaReader (inputStream, isaFormat):
if isaFormat == 'auto':
start = inputStream.buffer.peek (64)
if start.startswith (b'AMD Kernel Code for'):
return HSAILIsaReader ()
elif start.startswith (b';--------'):
return ShaderDumpIsaReader ()
else:
return ShaderAnalyzerIsaReader ()
elif isaFormat == 'HSAIL':
return HSAILIsaReader ()
elif isaFormat == 'ShaderAnalyzer':
return ShaderAnalyzerIsaReader ()
elif isaFormat == 'ShaderDump':
return ShaderDumpIsaReader ()
elif isaFormat == 'raw':
return RawIsaReader ()
else:
raise RuntimeError ("Unsupported input format: '{}'".format (isaFormat))
def LoadIsa (inputStream, isaFormat):
reader = GetIsaReader (inputStream, isaFormat)
instructions = reader.GetInstructions (inputStream)
basicBlocks = []
basicBlockMap = {}
currentBlockLabel = 'entry'
currentBlockInstructions = []
startOnNextBlock = True
# Build basic blocks
for i, instruction in enumerate (instructions):
label = instruction.Label
if label or startOnNextBlock:
block = BasicBlock (currentBlockLabel, currentBlockInstructions, len(basicBlocks))
basicBlocks.append (block)
assert block.Label not in basicBlockMap
basicBlockMap [block.Label] = block
currentBlockInstructions = []
if startOnNextBlock and label is None:
currentBlockLabel = 'basic_block_{}'.format (len(basicBlocks))
startOnNextBlock = False
else:
currentBlockLabel = label
startOnNextBlock = False
if instruction.IsBranch () or instruction.IsJump ():
startOnNextBlock = True
currentBlockInstructions.append (instruction)
# Add last block if some instructions are left
if currentBlockInstructions:
block = BasicBlock (currentBlockLabel, currentBlockInstructions, len(basicBlocks))
basicBlocks.append (block)
basicBlockMap [block.Label] = block
# Terminal block, we call it exit because there is no jump to its label
block = BasicBlock ('exit', [], len(basicBlocks))
basicBlocks.append (block)
assert block.Label not in basicBlockMap
basicBlockMap [block.Label] = block
for pair in pairwise(basicBlocks):
instructions = pair [0].Instructions
if instructions:
if instructions[0].Label != pair[0].Label:
instructions[0].SetLabel (pair[0].Label)
if instructions[-1].IsBranch ():
# Link branch target
pair [0].Link (basicBlockMap [instructions[-1].GetBranchTarget ()])
elif instructions[-1].IsJump ():
# Link jump target and exit -- nothing else to link here
pair [0].Link (basicBlockMap [instructions[-1].GetJumpTarget ()])
continue
# Link unless the last instruction was an jump (see above)
pair [0].Link (pair [1])
return basicBlocks
def DumpCFGDot(input, output, isaFormat):
"""Write out the control-flow-graph, with each node being a basic block."""
basicBlocks = LoadIsa (input, isaFormat)
def FormatBlockContent(block, compact):
if compact:
return '{} instructions\\l'.format (len (block.Instructions))
else:
return ''.join([i.ToString (includeLabel=False) + '\\l' for i in block.Instructions])
links = []
output.write ('digraph {\nnode [shape=box]\n')
for block in basicBlocks:
shape = ''
if block.IsEntry or block.IsExit:
shape = 'style=rounded'
output.write ('"n_{0}" [ label= "{0}\\n{1}" {2}]\n'.format (block.Label, FormatBlockContent (block, args.compact), shape))
for succ in block.Successors:
links.append ((block.Label, succ.Label))
for link in links:
output.write ('"n_{}":s -> "n_{}";\n'.format (link [0], link [1]))
output.write ('}\n')
def DumpBasicBlockVGPRUsage(input, output, isaFormat):
"""Write out the VGPRs used per basic block, without considering
inter-block dependencies."""
basicBlocks = LoadIsa (input, isaFormat)
for bb in basicBlocks:
vgpr = bb.GetUsedVGPRs ()
output.write ('Block: {0: >16} | Read: {1: >4} | Written: {2: >4}\n'.format (bb.Label, len(vgpr.Read), len(vgpr.Written)))
class PICFGNode:
"""A node in the per-instruction CFG tree. Each PICFG node has one
instruction at most."""
def __init__(self, instruction, block, inBlockOrder):
self.__instruction = instruction
self.__block = block
self.__successors = []
self.__predecessors = []
self.__in = set ()
self.__out = set ()
self.__order = inBlockOrder
@property
def Use(self):
'''Return the registers used (read) in this node.'''
if self.__instruction:
return self.__instruction.UsedVGPRs.Read
else:
return set ()
@property
def Def(self):
'''Return the registers defined (written) in this node.'''
if self.__instruction:
return self.__instruction.UsedVGPRs.Written
else:
return set ()
@property
def In(self):
"""Registers that are live at the incoming edges to this node. Update
using UpdateIn()."""
return self.__in
@property
def Out(self):
"""Registers that are live at the outgoing edges of this node. Update
using UpdateOut()."""
return self.__out
def UpdateOut(self):
"""Updates the set of live registers at the outgoing edges.
:return: True if the set of registers has changed in this update."""
oldOutSize = len (self.__out)
self.__out = self.__out.union (*[succ.In for succ in self.__successors])
return oldOutSize != len (self.__out)
def UpdateIn(self):
"""Updates the set of live registers at the incoming edges.
:return: True if the set of registers has changed in this update."""
oldInSize = len (self.__in)
self.__in = set.union (self.Use, self.Out - self.Def)
return oldInSize != len (self.__in)
@property
def Block(self):
"""Get a reference to the original enclosing block."""
return self.__block
@property
def Order(self):
"""Get an order number within the enclosing basic block."""
return self.__order
@property
def Successors(self):
"""Get the successor nodes."""
return self.__successors
@property
def Predecessors(self):
"""Get the predecessor nodes."""
return self.__predecessors
@property
def Instruction(self):
"""Get the contained instruction."""
return self.__instruction
def Link(self, other):
self.__successors.append (other)
other.__predecessors.append (self)
def LowerBasicBlockCFGtoPICFG(basicBlock, visitedBasicBlocks={}):
if basicBlock.Label in visitedBasicBlocks:
return visitedBasicBlocks [basicBlock.Label]
blockNodes = [PICFGNode(instruction,basicBlock,i) for i,instruction in enumerate (basicBlock.Instructions)]
if not blockNodes:
blockNodes = [PICFGNode(None, basicBlock, 0)] # Create an empty node for the entry/exit basic block
for pair in pairwise(blockNodes):
pair[0].Link (pair [1])
visitedBasicBlocks [basicBlock.Label] = blockNodes
for successor in basicBlock.Successors:
nodes = LowerBasicBlockCFGtoPICFG(successor, visitedBasicBlocks)
if nodes:
blockNodes [-1].Link (nodes [0])
return blockNodes
class GatherOrder(Enum):
Successors = 0
Predecessors = 1
def _GatherDepthFirst (node, order):
# Gather nodes by visiting the successors/predecessors
# Do not use recursion here as the graphs can be really deep
visitStack = [node]
visited = set ()
result = []
while visitStack:
currentNode = visitStack.pop ()
if id(currentNode) in visited:
continue
else:
visited.add (id (currentNode))
result.append (currentNode)
if order == GatherOrder.Successors:
for successor in currentNode.Successors:
visitStack.append (successor)
else:
for predecessor in currentNode.Predecessors:
visitStack.append (predecessor)
return result
def GatherSuccessorsDepthFirst (node):
return _GatherDepthFirst (node, GatherOrder.Successors)
def GatherPredecessorsDepthFirst(node):
return _GatherDepthFirst(node, GatherOrder.Predecessors)
def DumpInstructionVGPRUsage(input, output, isaFormat, summaryOnly):
'''Dump the live registers for every instruction.'''
basicBlocks = LoadIsa (input, isaFormat)
entry = LowerBasicBlockCFGtoPICFG (basicBlocks [0])
nodes = GatherSuccessorsDepthFirst (entry[0])
entryNode = entry[0]
# Find the exit node by looking for a node without successors
exitNode = None
for node in nodes:
if not node.Successors:
exitNode = node
break
dfs = GatherPredecessorsDepthFirst (exitNode)
while True:
updates = []
for node in dfs:
updates.append (node.UpdateOut ())
updates.append (node.UpdateIn ())
if any (updates):
continue
else:
break
# Enumerate the PICFG in original order
enumeratedNodes = [{'sortKey':(node.Block.Order,node.Order),'node':node} for node in dfs]
enumeratedNodes = sorted(enumeratedNodes, key=operator.itemgetter('sortKey'))
instructionNodes = [node['node'] for node in enumeratedNodes]
def _Max(s):
if s:
return max(s)
else:
return 0
print('Legend:', file=output)
print(' \':\' means that the register is kept alive, while it is not actively being used by the current instruction', file=output)
print(' \'^\' means that the current instruction writes to the register', file=output)
print(' \'v\' means that the current instruction reads from the register', file=output)
print(' \'x\' means that the current instruction both reads from the register and writes to it', file=output)
print(' \'Rn\': Number of live registers\n', file=output)
maxVGPR = 0
highestVGPR = 0
for node in instructionNodes:
maxVGPR = max (len (set.union (node.In, node.Out)), maxVGPR)
highestVGPR = max (highestVGPR, _Max (node.In), _Max (node.Out))
print(' Line | Rn | {:{width}} | Instruction'.format('Reg State', width=highestVGPR+1), file=output)
print('--------------------------------------------------------------------------------------------------------------------------', file=output)
if not summaryOnly:
for lineNumber, node in enumerate (instructionNodes):
if node.Instruction is None:
# entry/exit node
continue
liveVGPR = set.union (node.In, node.Out)
readVGPR = node.Use
writtenVGPR = node.Def
vgprStr = ''
for i in range (highestVGPR+1):
isRead = i in readVGPR
isWritten = i in writtenVGPR
if isRead and isWritten:
vgprStr += 'x'
elif isRead:
vgprStr += 'v'
elif isWritten:
vgprStr += '^'
elif i in liveVGPR:
vgprStr += ':'
else:
vgprStr += ' '
print ('{0: >5} | {1: >3} | {2: <9} | {3}'.format (lineNumber, len (liveVGPR), vgprStr, node.Instruction), file=output)
print (file=output)
# +1, if we only use VGPR 0 then the number of allocated ones is 1
print ('Maximum # VGPR used {0: >3}, # VGPR allocated: {1: >3}'.format (maxVGPR, highestVGPR+1), file=output)
def DumpPICFGDot(input, output, isaFormat):
'''Dump a per-instruction control flow graph.'''
basicBlocks = LoadIsa (input, isaFormat)
entry = LowerBasicBlockCFGtoPICFG (basicBlocks [0])
nodes = GatherSuccessorsDepthFirst (entry[0])
links = []
output.write ('digraph {\nnode [shape=box]\n')
for node in nodes:
shape = ''
if node.Instruction:
label = node.Instruction.ToString (includeLabel=False)
else:
if node.Block.IsEntry or node.Block.IsExit:
shape = 'style=rounded'
if node.Block.IsEntry:
label = 'Entry'
elif node.Block.IsExit:
label = 'Exit'
output.write ('"{}" [ label= "{}\\l" {}]\n'.format (id (node), label, shape))
for succ in node.Successors:
links.append ((node, succ))
for link in links:
output.write ('"{}":s -> "{}";\n'.format (id (link [0]), id (link [1])))
output.write ('}\n')
def DumpOpcodeHistogram (input, output, isaFormat, args):
import math
basicBlocks = LoadIsa (input, isaFormat)
usage = {}
for block in basicBlocks:
for instruction in block.Instructions:
opCodeName = instruction.OpCode.Name
if args.group == 'operand-size':
lastUnderscore = opCodeName.rfind ('_')
if lastUnderscore == -1:
opCodeName = 'unknown'
else:
opType = opCodeName [lastUnderscore+1:]
# check if starts with i, u, or f, otherwise it's something
# like _x2
if opType [0] in {'i', 'u', 'f', 'b'} and opType [1].isdigit ():
opCodeName = opType
else:
opCodeName = 'unknown'
elif args.group == 'instruction-class':
opCodeName = instruction.OpCode.Class.name
usage [opCodeName] = usage.get (opCodeName, 0) + 1
longestOpcodeName = max (map (len, usage.keys ())) + 2
totalInstructionCount = sum (usage.values ())
instructionLength = int (math.ceil (math.log10 (totalInstructionCount)) + 1)
formatString = '{{0:{0}}} {{1:>{1}}} ({{2:6.2f}} %)\n'.format (longestOpcodeName, instructionLength)
for key, value in sorted (usage.items (),key=operator.itemgetter (1), reverse=True):
output.write (formatString.format (key, value, value / totalInstructionCount * 100))
__version__ = '1.2.4'
if __name__=='__main__':
parser = argparse.ArgumentParser(description='SHAE shader analyzer')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format (__version__))
parser.add_argument ('-f', '--format', choices=['HSAIL', 'ShaderAnalyzer', 'ShaderDump', 'raw', 'auto'],
help="The input format.", default='auto')
subparsers = parser.add_subparsers (help='Subcommands', dest='command')
dump_dot = subparsers.add_parser ('dump-bb-cfg', help='Dump the basic-block CFG to a dot file.')
dump_dot.add_argument ('input', type=argparse.FileType ('r', encoding='UTF-8'), default=sys.stdin)
dump_dot.add_argument ('output', type=argparse.FileType ('w', encoding='UTF-8'), default=sys.stdout)
dump_dot.add_argument ('-c', '--compact', action='store_true', help='Compact output', default=False)
dump_picfg_dot = subparsers.add_parser ('dump-pi-cfg', help='Dump the per-instruction CFG to a dot file.')
dump_picfg_dot.add_argument ('input', type=argparse.FileType ('r', encoding='UTF-8'), default=sys.stdin)
dump_picfg_dot.add_argument ('output', type=argparse.FileType ('w', encoding='UTF-8'), default=sys.stdout)
dump_bb_vgpr = subparsers.add_parser ('dump-bb-vgpr', help='Dump per-basic-block VGPR usage.')
dump_bb_vgpr.add_argument ('input', type=argparse.FileType ('r', encoding='UTF-8'), default=sys.stdin)
dump_bb_vgpr.add_argument ('output', type=argparse.FileType ('w', encoding='UTF-8'), default=sys.stdout)
dump_vgpr = subparsers.add_parser ('analyse-liveness', help='Write per-instruction VGPR liveness and usage.',
aliases=['analyze-liveness'])
dump_vgpr.add_argument ('input', type=argparse.FileType ('r', encoding='UTF-8'), default=sys.stdin)
dump_vgpr.add_argument ('output', type=argparse.FileType ('w', encoding='UTF-8'), default=sys.stdout)
dump_vgpr.add_argument ('-s', '--summary', action='store_true', help='Write only the summary line', default=False)
dump_opcode_histogram = subparsers.add_parser ('opcode-histogram', help='Write a histogram showing how often each opcode has been used.')
dump_opcode_histogram.add_argument ('input', type=argparse.FileType ('r', encoding='UTF-8'), default=sys.stdin)
dump_opcode_histogram.add_argument ('output', type=argparse.FileType ('w', encoding='UTF-8'), default=sys.stdout)
dump_opcode_histogram.add_argument ('-g', '--group', choices=['operand-size', 'instruction-class'],
help="Group instructions by type")
args = parser.parse_args ()
if args.command is None:
parser.print_help ()
sys.exit (1)
if args.command == 'dump-bb-cfg':
DumpCFGDot (args.input, args.output, args.format)
elif args.command == 'dump-pi-cfg':
DumpPICFGDot (args.input, args.output, args.format)
elif args.command == 'dump-bb-vgpr':
DumpBasicBlockVGPRUsage (args.input, args.output, args.format)
elif args.command == 'analyse-liveness' or args.command == 'analyze-liveness':
DumpInstructionVGPRUsage (args.input, args.output, args.format, args.summary)
elif args.command == 'opcode-histogram':
DumpOpcodeHistogram (args.input, args.output, args.format, args)
| |
"""
ATD (1995) Colour Vision Model
==============================
Defines the *ATD (1995)* colour vision model objects:
- :class:`colour.CAM_Specification_ATD95`
- :func:`colour.XYZ_to_ATD95`
Notes
-----
- According to *CIE TC1-34* definition of a colour appearance model, the
*ATD (1995)* model cannot be considered as a colour appearance model.
It was developed with different aims and is described as a model of colour
vision.
References
----------
- :cite:`Fairchild2013v` : Fairchild, M. D. (2013). ATD Model. In Color
Appearance Models (3rd ed., pp. 5852-5991). Wiley. ISBN:B00DAYO8E2
- :cite:`Guth1995a` : Guth, S. L. (1995). Further applications of the ATD
model for color vision. In E. Walowit (Ed.), Proc. SPIE 2414,
Device-Independent Color Imaging II (Vol. 2414, pp. 12-26).
doi:10.1117/12.206546
"""
from __future__ import annotations
import numpy as np
from dataclasses import dataclass, field
from colour.algebra import spow, vector_dot
from colour.hints import (
ArrayLike,
FloatingOrArrayLike,
FloatingOrNDArray,
NDArray,
Optional,
)
from colour.utilities import (
MixinDataclassArithmetic,
as_float,
as_float_array,
from_range_degrees,
to_domain_100,
tsplit,
tstack,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"CAM_ReferenceSpecification_ATD95",
"CAM_Specification_ATD95",
"XYZ_to_ATD95",
"luminance_to_retinal_illuminance",
"XYZ_to_LMS_ATD95",
"opponent_colour_dimensions",
"final_response",
]
@dataclass
class CAM_ReferenceSpecification_ATD95(MixinDataclassArithmetic):
"""
Define the *ATD (1995)* colour vision model reference specification.
This specification has field names consistent with *Fairchild (2013)*
reference.
Parameters
----------
H
*Hue* angle :math:`H` in degrees.
C
Correlate of *saturation* :math:`C`. *Guth (1995)* incorrectly uses the
terms saturation and chroma interchangeably. However, :math:`C` is here
a measure of saturation rather than chroma since it is measured
relative to the achromatic response for the stimulus rather than that
of a similarly illuminated white.
Br
Correlate of *brightness* :math:`Br`.
A_1
First stage :math:`A_1` response.
T_1
First stage :math:`T_1` response.
D_1
First stage :math:`D_1` response.
A_2
Second stage :math:`A_2` response.
T_2
Second stage :math:`A_2` response.
D_2
Second stage :math:`D_2` response.
References
----------
:cite:`Fairchild2013v`, :cite:`Guth1995a`
"""
H: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
C: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
Br: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
A_1: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
T_1: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
D_1: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
A_2: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
T_2: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
D_2: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
@dataclass
class CAM_Specification_ATD95(MixinDataclassArithmetic):
"""
Define the *ATD (1995)* colour vision model specification.
This specification has field names consistent with the remaining colour
appearance models in :mod:`colour.appearance` but diverge from
*Fairchild (2013)* reference.
Parameters
----------
h
*Hue* angle :math:`H` in degrees.
C
Correlate of *saturation* :math:`C`. *Guth (1995)* incorrectly uses the
terms saturation and chroma interchangeably. However, :math:`C` is here
a measure of saturation rather than chroma since it is measured
relative to the achromatic response for the stimulus rather than that
of a similarly illuminated white.
Q
Correlate of *brightness* :math:`Br`.
A_1
First stage :math:`A_1` response.
T_1
First stage :math:`T_1` response.
D_1
First stage :math:`D_1` response.
A_2
Second stage :math:`A_2` response.
T_2
Second stage :math:`A_2` response.
D_2
Second stage :math:`D_2` response.
Notes
-----
- This specification is the one used in the current model implementation.
References
----------
:cite:`Fairchild2013v`, :cite:`Guth1995a`
"""
h: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
C: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
Q: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
A_1: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
T_1: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
D_1: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
A_2: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
T_2: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
D_2: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
def XYZ_to_ATD95(
XYZ: ArrayLike,
XYZ_0: ArrayLike,
Y_0: FloatingOrArrayLike,
k_1: FloatingOrArrayLike,
k_2: FloatingOrArrayLike,
sigma: FloatingOrArrayLike = 300,
) -> CAM_Specification_ATD95:
"""
Compute the *ATD (1995)* colour vision model correlates.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values of test sample / stimulus.
XYZ_0
*CIE XYZ* tristimulus values of reference white.
Y_0
Absolute adapting field luminance in :math:`cd/m^2`.
k_1
Application specific weight :math:`k_1`.
k_2
Application specific weight :math:`k_2`.
sigma
Constant :math:`\\sigma` varied to predict different types of data.
Returns
-------
:class:`colour.CAM_Specification_ATD95`
*ATD (1995)* colour vision model specification.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_0`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+-------------------------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===============================+=======================+===============+
| ``CAM_Specification_ATD95.h`` | [0, 360] | [0, 1] |
+-------------------------------+-----------------------+---------------+
- For unrelated colors, there is only self-adaptation and :math:`k_1` is
set to 1.0 while :math:`k_2` is set to 0.0. For related colors such as
typical colorimetric applications, :math:`k_1` is set to 0.0 and
:math:`k_2` is set to a value between 15 and 50 *(Guth, 1995)*.
References
----------
:cite:`Fairchild2013v`, :cite:`Guth1995a`
Examples
--------
>>> XYZ = np.array([19.01, 20.00, 21.78])
>>> XYZ_0 = np.array([95.05, 100.00, 108.88])
>>> Y_0 = 318.31
>>> k_1 = 0.0
>>> k_2 = 50.0
>>> XYZ_to_ATD95(XYZ, XYZ_0, Y_0, k_1, k_2) # doctest: +ELLIPSIS
CAM_Specification_ATD95(h=1.9089869..., C=1.2064060..., Q=0.1814003..., \
A_1=0.1787931... T_1=0.0286942..., D_1=0.0107584..., A_2=0.0192182..., \
T_2=0.0205377..., D_2=0.0107584...)
"""
XYZ = to_domain_100(XYZ)
XYZ_0 = to_domain_100(XYZ_0)
Y_0 = as_float_array(Y_0)
k_1 = as_float_array(k_1)
k_2 = as_float_array(k_2)
sigma = as_float_array(sigma)
XYZ = luminance_to_retinal_illuminance(XYZ, Y_0)
XYZ_0 = luminance_to_retinal_illuminance(XYZ_0, Y_0)
# Computing adaptation model.
LMS = XYZ_to_LMS_ATD95(XYZ)
XYZ_a = k_1[..., np.newaxis] * XYZ + k_2[..., np.newaxis] * XYZ_0
LMS_a = XYZ_to_LMS_ATD95(XYZ_a)
LMS_g = LMS * (sigma[..., np.newaxis] / (sigma[..., np.newaxis] + LMS_a))
# Computing opponent colour dimensions.
A_1, T_1, D_1, A_2, T_2, D_2 = tsplit(opponent_colour_dimensions(LMS_g))
# Computing the correlate of *brightness* :math:`Br`.
Br = spow(A_1**2 + T_1**2 + D_1**2, 0.5)
# Computing the correlate of *saturation* :math:`C`.
C = spow(T_2**2 + D_2**2, 0.5) / A_2
# Computing the *hue* :math:`H`. Note that the reference does not take the
# modulus of the :math:`H`, thus :math:`H` can exceed 360 degrees.
H = T_2 / D_2
return CAM_Specification_ATD95(
as_float(from_range_degrees(H)),
C,
Br,
A_1,
T_1,
D_1,
A_2,
T_2,
D_2,
)
def luminance_to_retinal_illuminance(
XYZ: ArrayLike, Y_c: FloatingOrArrayLike
) -> NDArray:
"""
Convert from luminance in :math:`cd/m^2` to retinal illuminance in
trolands.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values.
Y_c
Absolute adapting field luminance in :math:`cd/m^2`.
Returns
-------
:class:`numpy.ndarray`
Converted *CIE XYZ* tristimulus values in trolands.
Examples
--------
>>> XYZ = np.array([19.01, 20.00, 21.78])
>>> Y_0 = 318.31
>>> luminance_to_retinal_illuminance(XYZ, Y_0) # doctest: +ELLIPSIS
array([ 479.4445924..., 499.3174313..., 534.5631673...])
"""
XYZ = as_float_array(XYZ)
Y_c = as_float_array(Y_c)
return as_float_array(18 * spow(Y_c[..., np.newaxis] * XYZ / 100, 0.8))
def XYZ_to_LMS_ATD95(XYZ: ArrayLike) -> NDArray:
"""
Convert from *CIE XYZ* tristimulus values to *LMS* cone responses.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values.
Returns
-------
:class:`numpy.ndarray`
*LMS* cone responses.
Examples
--------
>>> XYZ = np.array([19.01, 20.00, 21.78])
>>> XYZ_to_LMS_ATD95(XYZ) # doctest: +ELLIPSIS
array([ 6.2283272..., 7.4780666..., 3.8859772...])
"""
LMS = vector_dot(
[
[0.2435, 0.8524, -0.0516],
[-0.3954, 1.1642, 0.0837],
[0.0000, 0.0400, 0.6225],
],
XYZ,
)
LMS *= np.array([0.66, 1.0, 0.43])
LMS_p = spow(LMS, 0.7)
LMS_p += np.array([0.024, 0.036, 0.31])
return as_float_array(LMS_p)
def opponent_colour_dimensions(LMS_g: ArrayLike) -> NDArray:
"""
Return opponent colour dimensions from given post adaptation cone signals.
Parameters
----------
LMS_g
Post adaptation cone signals.
Returns
-------
:class:`numpy.ndarray`
Opponent colour dimensions.
Examples
--------
>>> LMS_g = np.array([6.95457922, 7.08945043, 6.44069316])
>>> opponent_colour_dimensions(LMS_g) # doctest: +ELLIPSIS
array([ 0.1787931..., 0.0286942..., 0.0107584..., 0.0192182..., ...])
"""
L_g, M_g, S_g = tsplit(LMS_g)
A_1i = 3.57 * L_g + 2.64 * M_g
T_1i = 7.18 * L_g - 6.21 * M_g
D_1i = -0.7 * L_g + 0.085 * M_g + S_g
A_2i = 0.09 * A_1i
T_2i = 0.43 * T_1i + 0.76 * D_1i
D_2i = D_1i
A_1 = final_response(A_1i)
T_1 = final_response(T_1i)
D_1 = final_response(D_1i)
A_2 = final_response(A_2i)
T_2 = final_response(T_2i)
D_2 = final_response(D_2i)
return tstack([A_1, T_1, D_1, A_2, T_2, D_2])
def final_response(value: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the final response of given opponent colour dimension.
Parameters
----------
value
Opponent colour dimension.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Final response of opponent colour dimension.
Examples
--------
>>> final_response(43.54399695501678) # doctest: +ELLIPSIS
0.1787931...
"""
value = as_float_array(value)
return as_float(value / (200 + np.abs(value)))
| |
#===============================================================================
# Copyright 2009 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""This module defines writer and reader classes for a fast, immutable
on-disk key-value database format. The current format is identical
to D. J. Bernstein's CDB format (http://cr.yp.to/cdb.html).
"""
from array import array
from bisect import bisect_right
from cPickle import dumps, loads
from whoosh.system import _USHORT_SIZE, _INT_SIZE
from whoosh.util import utf8encode, utf8decode
from whoosh.util.collections2 import defaultdict
from whoosh.util.struct2 import Struct
def cdb_hash(key):
h = 5381L
for c in key:
h = (h + (h << 5)) & 0xffffffffL ^ ord(c)
return h
# Read/write convenience functions
_2ints = Struct("!II")
pack2ints = _2ints.pack
def writeints(f, value1, value2):
f.write(pack2ints(value1, value2))
_unpack2ints = _2ints.unpack
def unpack2ints(s):
return _unpack2ints(s)
_unpackint = Struct("!I").unpack
def readint(map, offset):
return _unpackint(map[offset:offset + 4])[0]
# Encoders and decoders for storing complex types in
# string -> string hash files.
_int_struct = Struct("!i")
packint = _int_struct.pack
_unpackint = _int_struct.unpack
def unpackint(s):
return _unpackint(s)[0]
_ushort_struct = Struct("!H")
packushort = _ushort_struct.pack
_unpackushort = _ushort_struct.unpack
def unpackushort(s):
return _unpackushort(s)[0]
def encode_termkey(term):
fieldnum, text = term
return packushort(fieldnum) + utf8encode(text)[0]
def decode_termkey(key):
return unpackushort(key[:_USHORT_SIZE]), utf8decode(key[_USHORT_SIZE:])[0]
_vkey_struct = Struct("!Ii")
_pack_vkey = _vkey_struct.pack
def encode_vectorkey(docandfield):
return _pack_vkey(*docandfield)
decode_vectorkey = _vkey_struct.unpack
encode_docnum = packint
decode_docnum = unpackint
_terminfo_struct = Struct("!ILI")
_terminfo_pack = _terminfo_struct.pack
def encode_terminfo(cf_offset_df):
return _terminfo_pack(*cf_offset_df)
decode_terminfo = _terminfo_struct.unpack
def enpickle(data):
"Encodes a value as a string for storage in a table."
return dumps(data, -1)
depickle = loads
# Table classes
class FileHashWriter(object):
def __init__(self, dbfile):
self.dbfile = dbfile
dbfile.seek(2048)
self.hashes = defaultdict(list)
def add_all(self, items):
dbfile = self.dbfile
hashes = self.hashes
pos = dbfile.tell()
write = dbfile.write
for key, value in items:
writeints(dbfile, len(key), len(value))
write(key + value)
h = cdb_hash(key)
hashes[h & 255].append((h, pos))
pos += len(key) + len(value) + 8
def add(self, key, value):
self.add_all(((key, value),))
def add_key(self, key):
dbfile = self.dbfile
writeints(dbfile)
def _write_hashes(self):
dbfile = self.dbfile
hashes = self.hashes
directory = self.directory = []
pos = dbfile.tell()
for i in xrange(0, 256):
entries = hashes[i]
numslots = 2 * len(entries)
directory.append((pos, numslots))
null = (0, 0)
hashtable = [null] * numslots
for hashval, position in entries:
n = (hashval >> 8) % numslots
while hashtable[n] is not null:
n = (n + 1) % numslots
hashtable[n] = (hashval, position)
for hashval, position in hashtable:
writeints(dbfile, hashval, position)
pos += 8
dbfile.flush()
def _write_directory(self):
dbfile = self.dbfile
directory = self.directory
dbfile.seek(0)
for position, numslots in directory:
writeints(dbfile, position, numslots)
assert dbfile.tell() == 2048
dbfile.flush()
def close(self):
self._write_hashes()
self._write_directory()
self.dbfile.close()
class FileHashReader(object):
def __init__(self, dbfile):
self.dbfile = dbfile
self.map = dbfile.map
self.end_of_data = dbfile.get_uint(0)
self.is_closed = False
def close(self):
if self.is_closed:
raise Exception("Tried to close %r twice" % self)
del self.map
self.dbfile.close()
self.is_closed = True
def read(self, position, length):
return self.map[position:position + length]
def read2ints(self, position):
return unpack2ints(self.map[position:position + _INT_SIZE * 2])
def _ranges(self, pos=2048):
read2ints = self.read2ints
eod = self.end_of_data
while pos < eod:
keylen, datalen = read2ints(pos)
keypos = pos + 8
datapos = pos + 8 + keylen
pos = datapos + datalen
yield (keypos, keylen, datapos, datalen)
def __iter__(self):
return self.items()
def items(self):
read = self.read
for keypos, keylen, datapos, datalen in self._ranges():
yield (read(keypos, keylen), read(datapos, datalen))
def keys(self):
read = self.read
for keypos, keylen, _, _ in self._ranges():
yield read(keypos, keylen)
def values(self):
read = self.read
for _, _, datapos, datalen in self._ranges():
yield read(datapos, datalen)
def __getitem__(self, key):
for data in self.all(key):
return data
raise KeyError(key)
def get(self, key, default=None):
for data in self.all(key):
return data
return default
def _hashtable_info(self, keyhash):
return self.read2ints(keyhash << 3 & 2047)
def _key_position(self, key):
keyhash = cdb_hash(key)
hpos, hslots = self._hashtable_info(keyhash)
if not hslots:
raise KeyError(key)
slotpos = hpos + (((keyhash >> 8) % hslots) << 3)
u, pos = self.read2ints(slotpos)
return pos
def _get_ranges(self, key):
read = self.read
read2ints = self.read2ints
keyhash = cdb_hash(key)
hpos, hslots = self._hashtable_info(keyhash)
if not hslots:
return
slotpos = hpos + (((keyhash >> 8) % hslots) << 3)
for _ in xrange(0, hslots):
u, pos = read2ints(slotpos)
if not pos:
return
slotpos += 8
# If we reach the end of the hashtable, wrap around
if slotpos == hpos + (hslots << 3):
slotpos = hpos
if u == keyhash:
keylen, datalen = read2ints(pos)
if keylen == len(key):
if key == read(pos + 8, keylen):
yield (pos + 8 + keylen, datalen)
def all(self, key):
read = self.read
for datapos, datalen in self._get_ranges(key):
yield read(datapos, datalen)
def __contains__(self, key):
for _ in self._get_ranges(key):
return True
return False
class OrderedHashWriter(FileHashWriter):
def __init__(self, dbfile, blocksize=100):
FileHashWriter.__init__(self, dbfile)
self.blocksize = blocksize
self.index = []
self.indexcount = None
self.lastkey = None
def add_all(self, items):
dbfile = self.dbfile
hashes = self.hashes
pos = dbfile.tell()
write = dbfile.write
ix = self.index
ic = self.indexcount
bs = self.blocksize
lk = self.lastkey
for key, value in items:
if key <= lk:
raise ValueError("Keys must increase: %r .. %r" % (lk, key))
lk = key
if ic is None:
ix.append(key)
ic = 0
else:
ic += 1
if ic == bs:
ix.append(key)
ic = 0
writeints(dbfile, len(key), len(value))
write(key + value)
h = cdb_hash(key)
hashes[h & 255].append((h, pos))
pos += len(key) + len(value) + 8
self.indexcount = ic
self.lastkey = lk
def close(self):
self._write_hashes()
self.dbfile.write_pickle(self.index)
self._write_directory()
self.dbfile.close()
class OrderedHashReader(FileHashReader):
def __init__(self, dbfile):
FileHashReader.__init__(self, dbfile)
lastpos, lastnum = self.read2ints(255 * 8)
dbfile.seek(lastpos + lastnum * 8)
self.index = dbfile.read_pickle()
def _closest_key(self, key):
index = self.index
i = max(0, bisect_right(index, key) - 1)
return index[i]
def _ranges_from(self, key):
read = self.read
ckey = self._closest_key(key)
pos = self._key_position(ckey)
if ckey != key:
for keypos, keylen, _, _ in self._ranges(pos=pos):
k = read(keypos, keylen)
if k >= key:
pos = keypos - 8
break
return self._ranges(pos=pos)
def items_from(self, key):
read = self.read
for keypos, keylen, datapos, datalen in self._ranges_from(key):
yield (read(keypos, keylen), read(datapos, datalen))
def keys_from(self, key):
read = self.read
for keypos, keylen, _, _ in self._ranges_from(key):
yield read(keypos, keylen)
def values(self, key):
read = self.read
for _, _, datapos, datalen in self._ranges_from(key):
yield read(datapos, datalen)
class FileTableWriter(OrderedHashWriter):
def __init__(self, dbfile, keycoder=None, valuecoder=None):
sup = super(FileTableWriter, self)
sup.__init__(dbfile)
self.keycoder = keycoder or str
self.valuecoder = valuecoder or enpickle
self._add = sup.add
def add(self, key, data):
key = self.keycoder(key)
data = self.valuecoder(data)
self._add(key, data)
class FileTableReader(OrderedHashReader):
def __init__(self, dbfile, keycoder=None, keydecoder=None,
valuedecoder=None):
sup = super(FileTableReader, self)
sup.__init__(dbfile)
self.keycoder = keycoder or str
self.keydecoder = keydecoder or int
self.valuedecoder = valuedecoder or depickle
self._items = sup.items
self._items_from = sup.items_from
self._keys = sup.keys
self._keys_from = sup.keys_from
self._getitem = sup.__getitem__
self._contains = sup.__contains__
def __getitem__(self, key):
k = self.keycoder(key)
return self.valuedecoder(self._getitem(k))
def __contains__(self, key):
return self._contains(self.keycoder(key))
def items(self):
kd = self.keydecoder
vd = self.valuedecoder
for key, value in self._items():
yield (kd(key), vd(value))
def items_from(self, key):
fromkey = self.keycoder(key)
kd = self.keydecoder
vd = self.valuedecoder
for key, value in self._items_from(fromkey):
yield (kd(key), vd(value))
def keys(self):
kd = self.keydecoder
for k in self._keys():
yield kd(k)
def keys_from(self, key):
kd = self.keydecoder
for k in self._keys_from(self.keycoder(key)):
yield kd(k)
class FileRecordWriter(object):
def __init__(self, dbfile, format):
self.dbfile = dbfile
self.format = format
self._pack = Struct(format).pack
def close(self):
self.dbfile.close()
def append(self, args):
self.dbfile.write(self._pack(*args))
class FileRecordReader(object):
def __init__(self, dbfile, format):
self.dbfile = dbfile
self.map = dbfile.map
self.format = format
struct = Struct(format)
self._unpack = struct.unpack
self.itemsize = struct.size
def close(self):
del self.map
self.dbfile.close()
def record(self, recordnum):
itemsize = self.itemsize
return self._unpack(self.map[recordnum * itemsize: recordnum * itemsize + itemsize])
def at(self, recordnum, itemnum):
return self.record(recordnum)[itemnum]
class FileListWriter(object):
def __init__(self, dbfile, valuecoder=str):
self.dbfile = dbfile
self.directory = array("I")
dbfile.write_uint(0)
self.valuecoder = valuecoder
def close(self):
f = self.dbfile
directory_pos = f.tell()
f.write_array(self.directory)
f.flush()
f.seek(0)
f.write_uint(directory_pos)
f.close()
def append(self, value):
f = self.dbfile
self.directory.append(f.tell())
v = self.valuecoder(value)
self.directory.append(len(v))
f.write(v)
class FileListReader(object):
def __init__(self, dbfile, length, valuedecoder=str):
self.dbfile = dbfile
self.length = length
self.valuedecoder = valuedecoder
self.offset = dbfile.get_uint(0)
def close(self):
self.dbfile.close()
def __getitem__(self, num):
dbfile = self.dbfile
offset = self.offset + num * (_INT_SIZE * 2)
position, length = unpack2ints(dbfile.map[offset:offset + _INT_SIZE * 2])
v = dbfile.map[position:position + length]
return self.valuedecoder(v)
# Utility functions
def dump_hash(hashreader):
dbfile = hashreader.dbfile
read = hashreader.read
read2ints = hashreader.read2ints
eod = hashreader.end_of_data
# Dump hashtables
for bucketnum in xrange(0, 255):
pos, numslots = read2ints(bucketnum * 8)
if numslots:
print "Bucket %d: %d slots" % (bucketnum, numslots)
dbfile.seek(pos)
for j in xrange(0, numslots):
print " %X : %d" % read2ints(pos)
pos += 8
# Dump keys and values
print "-----"
dbfile.seek(2048)
pos = 2048
while pos < eod:
keylen, datalen = read2ints(pos)
keypos = pos + 8
datapos = pos + 8 + keylen
key = read(keypos, keylen)
data = read(datapos, datalen)
print "%d +%d,%d:%r->%r" % (pos, keylen, datalen, key, data)
pos = datapos + datalen
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import code
import cpp_util
from model import Platforms
from schema_util import CapitalizeFirstLetter
from schema_util import JsFunctionNameToClassName
import json
import os
import re
# TODO(miket/asargent) - parameterize this.
SOURCE_BASE_PATH = 'chrome/common/extensions/api'
def _RemoveDescriptions(node):
"""Returns a copy of |schema| with "description" fields removed.
"""
if isinstance(node, dict):
result = {}
for key, value in node.items():
# Some schemas actually have properties called "description", so only
# remove descriptions that have string values.
if key == 'description' and isinstance(value, basestring):
continue
result[key] = _RemoveDescriptions(value)
return result
if isinstance(node, list):
return [_RemoveDescriptions(v) for v in node]
return node
class CppBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self, root, model, api_defs, cpp_type_generator, cpp_namespace):
self._root = root;
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
self._cpp_namespace = cpp_namespace
self.api_cc_generator = _APICCGenerator(self)
self.api_h_generator = _APIHGenerator(self)
self.schemas_cc_generator = _SchemasCCGenerator(self)
self.schemas_h_generator = _SchemasHGenerator(self)
def _GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % SOURCE_BASE_PATH)
ifndef_name = cpp_util.GenerateIfndefName(SOURCE_BASE_PATH, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def _GetPlatformIfdefs(self, model_object):
"""Generates the "defined" conditional for an #if check if |model_object|
has platform restrictions. Returns None if there are no restrictions.
"""
if model_object.platforms is None:
return None
ifdefs = []
for platform in model_object.platforms:
if platform == Platforms.CHROMEOS:
ifdefs.append('defined(OS_CHROMEOS)')
else:
raise ValueError("Unsupported platform ifdef: %s" % platform.name)
return ' and '.join(ifdefs)
def _GenerateRegisterFunctions(self, namespace_name, function):
c = code.Code()
function_ifdefs = self._GetPlatformIfdefs(function)
if function_ifdefs is not None:
c.Append("#if %s" % function_ifdefs, indent_level=0)
function_name = JsFunctionNameToClassName(namespace_name, function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
if function_ifdefs is not None:
c.Append("#endif // %s" % function_ifdefs, indent_level=0)
return c
def _GenerateFunctionRegistryRegisterAll(self):
c = code.Code()
c.Append('// static')
c.Sblock('void GeneratedFunctionRegistry::RegisterAll('
'ExtensionFunctionRegistry* registry) {')
for namespace in self._model.namespaces.values():
namespace_ifdefs = self._GetPlatformIfdefs(namespace)
if namespace_ifdefs is not None:
c.Append("#if %s" % namespace_ifdefs, indent_level=0)
namespace_name = CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
for function in namespace.functions.values():
if function.nocompile:
continue
c.Concat(self._GenerateRegisterFunctions(namespace.name, function))
for type_ in namespace.types.values():
for function in type_.functions.values():
if function.nocompile:
continue
namespace_types_name = JsFunctionNameToClassName(
namespace.name, type_.name)
c.Concat(self._GenerateRegisterFunctions(namespace_types_name,
function))
if namespace_ifdefs is not None:
c.Append("#endif // %s" % namespace_ifdefs, indent_level=0)
c.Eblock("}")
return c
class _APIHGenerator(object):
"""Generates the header for API registration / declaration"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <string>')
c.Append()
c.Append('#include "base/basictypes.h"')
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedFunctionRegistry {')
c.Sblock(' public:')
c.Append('static void RegisterAll('
'ExtensionFunctionRegistry* registry);')
c.Eblock('};');
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_api', c)
class _APICCGenerator(object):
"""Generates a code.Code object for the generated API .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(SOURCE_BASE_PATH,
'generated_api.h')))
c.Append()
for namespace in self._bundle._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
implementation_header = namespace.compiler_options.get(
"implemented_in",
"chrome/browser/extensions/api/%s/%s_api.h" % (namespace_name,
namespace_name))
if not os.path.exists(
os.path.join(self._bundle._root,
os.path.normpath(implementation_header))):
if "implemented_in" in namespace.compiler_options:
raise ValueError('Header file for namespace "%s" specified in '
'compiler_options not found: %s' %
(namespace.unix_name, implementation_header))
continue
ifdefs = self._bundle._GetPlatformIfdefs(namespace)
if ifdefs is not None:
c.Append("#if %s" % ifdefs, indent_level=0)
c.Append('#include "%s"' % implementation_header)
if ifdefs is not None:
c.Append("#endif // %s" % ifdefs, indent_level=0)
c.Append()
c.Append('#include '
'"chrome/browser/extensions/extension_function_registry.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Concat(self._bundle._GenerateFunctionRegistryRegisterAll())
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
class _SchemasHGenerator(object):
"""Generates a code.Code object for the generated schemas .h file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append();
c.Append('#include "base/string_piece.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedSchemas {')
c.Sblock(' public:')
c.Append('// Puts all API schemas in |schemas|.')
c.Append('static void Get('
'std::map<std::string, base::StringPiece>* schemas);')
c.Eblock('};');
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_schemas', c)
class _SchemasCCGenerator(object):
"""Generates a code.Code object for the generated schemas .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(SOURCE_BASE_PATH,
'generated_schemas.h')))
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('// static')
c.Sblock('void GeneratedSchemas::Get('
'std::map<std::string, base::StringPiece>* schemas) {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
# JSON parsing code expects lists of schemas, so dump a singleton list.
json_content = json.dumps([_RemoveDescriptions(api)],
separators=(',', ':'))
# Escape all double-quotes and backslashes. For this to output a valid
# JSON C string, we need to escape \ and ".
json_content = json_content.replace('\\', '\\\\').replace('"', '\\"')
c.Append('(*schemas)["%s"] = "%s";' % (namespace.name, json_content))
c.Eblock('}')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
| |
import unittest
import collections
from fontParts.base import FontPartsError
from fontTools.misc.py23 import basestring
testPNGData = """
89504e470d0a1a0a0000000d4948445200000080000000800806000000c33e61cb0000
02ee694343504943432050726f66696c65000078018554cf6b134114fe366ea9d02208
5a6b0eb27890224959ab6845d436fd11626b0cdb1fb64590643349d66e36ebee26b5a5
88e4e2d12ade45eda107ff801e7af0642f4a855a4528deab2862a1172df1cd6e4cb6a5
eac0ce7ef3de37ef7d6f76df000d72d234f58004e40dc752a211696c7c426afc88008e
a20941342555dbec4e2406418373f97be7d87a0f815b56c37bfb77b277ad9ad2b69a07
84fd40e0479ad92ab0ef17710a591202883cdfa129c77408dfe3d8f2ec8f394e7978c1
b50f2b3dc459227c40352dce7f4db853cd25d34083483894f571523e9cd78b94d71d07
696e66c6c810bd4f90a6bbcceeab62a19c4ef60e90bd9df47e4eb3de3ec221c20b19ad
3f46b88d9ef58cd53fe261e1a4e6c4863d1c1835f4f86015b71aa9f835c2145f104d27
a25471d92e0df198aefd56f24a82709038ca646180735a484fd74c6ef8ba87057d26d7
13afe2775651e1798f1367ded4ddef45da02af300e1d0c1a0c9a0d48501045046198b0
5040863c1a3134b2723f23ab061b937b3275246abb746244b1417b36dc3db751a4dd3c
fe2822719443b50892fc41fe2aafc94fe579f9cb5cb0d856f794ad9b9abaf2e03bc5e5
99b91a1ed7d3c8e3734d5e7c95d47693574796ac797abc9aec1a3fec579731e682358f
c473b0fbf12d5f95cc97298c14c5e355f3ea4b84755a3137df9f6c7f3b3de22ecf2eb5
d673ad898b37569b9767fd6a48fbeeaabc93e655f94f5ef5f1fc67cdc463e229312676
8ae7218997c52ef192d84bab0be2606dc7089d958629d26d91fa24d560609abcf52f5d
3f5b78bd467f0cf5519419ccd25489f77fc22a64349db90e6ffa8fdbc7fc17e4f78ae7
9f28022f6ce0c899ba6d5371ef10a165a56e73ae0217bfd17df0b66e6ba37e38fc0458
3cab16ad52359f20bc011c76877a1ee82998d39696cd3952872c9f93bae9ca6252cc50
db435252d725d7654b16b3995562e976d899d31d6e1ca13942f7c4a74a6593faaff111
b0fdb052f9f9ac52d97e4e1ad68197fa6fc0bcfd45c0788b8900000009704859730000
0b1300000b1301009a9c1800001ea3494441547801ed9d578c1dc59ac77b926d8c0360
9c6dec019383c0cb72612f4164010ff000175878602584001184b80fc003d24a2b2178
4144ed5e5602b1c2c6d2458056225d0c0f5c049820e20513cc1a6430c9018fd3cc9c99
fd7e55e73fa74ebb4fe83e3d33a7614aaa53d515beaafabe7f7d15baba4e4794a3e9e8
e888bacc24901cb6b00e850f0ff3586dc85b2b9c94f138d22785139644ab5e7a4728f9
a7aade9664786868c8aa92d080e4fc6d1f3a22948c35557e5cf94b196915251b18ef36
20940c074356e910cda1bf10ede9cea1969dd6bbba8c19a579f3e61d76fbedb7ff47d0
43048aaa62d41b2d9078c7343a552c9c3c23a4ca71a2572b7c244f9c1619ccb8322c4e
74460a505c9940a8714cd643c33ffffcf386b7df7efbef6bd6acf9fbb66ddbbeefecec
dcab9c76c05c0161a43dc415c18c30224365c98b355e744e3626f51f71c41167bcfbee
bb2fc0b0725c5db2301d13c8c33d670d8fd3129d303c0c7385957faccece17c613265b
2a958677eddad5b16eddba0dcf3cf3cce38f3efae8fd0684cdd6f61e4bb3db320b0434
ca37ac4cbb9d9d3c00d06d4c98624c183af4d043cf58bd7af5b3e61db446b7427b4c79
86d025f8d0b576380058d8b00180349db46bafbdf6eaf8e4934ffe71c30d375cb97efd
fa7fd89030c5e2775a1c20088130a6edc85258ab4380d300563074f04fe9eeee86694c
040b0500abaf13362e06e1a399b0089fb92d6e7f7fffd0d6ad5b070f38e0802356ae5c
f9bf575d75d5659f7ffef9bb163fb50c82b0f7877e4fb8cd7e41741623e1e26211788f
d949660b6fd00249165098e99c3c7972cfe6cd9b4b93264d5ab862c58abf2e5bb6ec04
13fe6e4060f17406a729cc159fccdb9e262b00688d1a2710d0f056354a5b7009e1cbe0
470b8480000826fcae9f7efaa9d4d3d333d740b0aab7b7f70f0108e8108500412b0010
8f040068e5414f74c7cd0d27a5a15f15020c80c03441d7d75f7f5d9a356bd65c9b14ae
3cf0c003ffb96820685560712dc0927064ec2cbadf26b72c711ab5c781c034c0bc871f
7ef8c93208facbc341db6b825601a04e812b30846185f40bb8aabc9e050601837826bd
bffefa6b575f5f5f69c992258060e5d2a54bffc934412140d02a002a83a5b855601741
874682972bc1872e0018181888b66fdf4e6f2f2d5cb870fe430f3db4d2c0b0bc082068
150021bf0aeb97e0e5d210848c91f0e5128e3f04017e5b1544d3a64deb3230a009163c
f0c0032b162f5e7cac8160c0e2591db4e57030010027662f68bc0853067f28ecd04f1c
bd1fd7560291ed0aba6c3366cce8b2bd8292cd0916dd7ffffd2b4d231c6313c610049a
3457ab1b153ac66ea5b5635c703b1787a031b8718bc043cb061100601830c1477befbd
f78826b0fd81c5f7dd77df8af9f3e71f5d0601ef0fb4692620b8b2c6eb275700c49955
b467044b9de5cacf3382c6ca2f97303401c6de154453a74e8da64c99e2408026b0edf1
2580c05e941d692018b47c6d05825c01305e28ceb35c848e41c09810c484c90a0c3ccb
bf63c70ef6061c0000c2f4e9d3dd7070f8e1872f35103c3977eedcb603c104009c98ab
7f0402b92110e417107049878b06401b0002acbd341a0181bd2905042be7cc9973443b
6982dc0110f698a2fb43e1aa2deaed02809e7177efdeedc060dbc4239a0010b03a6038
38f2c8237bdb0d04b903a0ba2f15f70981631034460048724903000607790bee378798
186a38084170f4d1471f5806c1e1eda0092600e04456ff476008854f8ef019bfadf9dd
3b02c0c0502010000081c0b444a99d40d0360008dfc0d517c7d8c54af0127658b2e204
02b480f568f7d690309e1b80e0a076d0046d0100bd6a0d19dcae7e095ca0d0332e4640
ae07025607654d70109b4536311cb7e1207700840c69d62f00349bbe1dd3d1e3a9978c
ea584b13b40b087207807a8018d1c88551a84e265021031be51bcb780953f5d33375c0
8f90e300509ce2c3e1808d22e60402c131c71c336e9a20770064110c9327cda0b3e41f
af3c714030f9939156d3336989d7c430dc27b0fd831220b01748633e1ce40e001a9ac6
d273a401d48bd2e46f97b4d41d01e362c27a290ee1cbb257801fd7de1fb8394119042b
66cf9e1d9f134094f1a532c650480e267700a4a913bd0446a1017899823fed1092a6bc
56d3523f99b85f82b7c3211c1089366dda14fdf2cb2fce6ed9b2c585eddcb9d3b58fde
3f73e6ccc804edecbefbee1bf116d1b6924b471d75d4b2071f7c70854d0c0f0bf60946
ed55725b1ce294061073dbdbf51a8e3a02020181decf56f0cb2fafb657c3bf4676749c
43220ed84a4b1ae6028cfff8ed055164ef07a259b3f63740cc88162c58e04e162d5fbe
7c19ab831b6fbcf1723b78fa99816b2fe311df1d840770423f456432e30e00188806c0
8a99995a3246993a3a3cdf257c5c7a7f67a71fe3bbbbfd30e0e3a9d4b0b5cd7f60421b
013bee0f3ffc605bc47b1b207a1c186c4218d96be3a8b7b7b76bc182f9a5934f3e79d9
238f3cf2e4d5575f7da97d96f6b995c1d757141e7e7bd93208c60c00c1496b53835e5a
b8c60fb3fe94adf7d3b37c7c3bfc8675f5f565d5a27a5343862d2acc32d0dbce4e40d0
65cfde020a86b6ee6e1a4e5b87590198da9fe9e6010082b3045f7ef965641f99988698
dab570e10203c12907fde52ffff53f575df56f97d8b0f2ad816ab2d1e133344000310a
2e73d37c194cee0000f99890715ec05ed0d6d6b2c049336c8c2a99ea1c3255391c6dd9
420fa16ded8300cd49bc2ba0b26c1d7675c6b577402640de067698cb3b01be8e9a64ed
c3f29189638903361a808f8ae9f97c4b333cdce380d2d3c31b44df6e3b4516ad5bf77f
5d6bd77e513af1c43f1c7ed75d77fdf7adb7fef9f25dbb766e31fef6184d8fa416854f
ad7207806f6abddfeaeff03c83aabfc449caeddb9c14d37c98c0d94c0e951777112080
aed4bb428d5e6f5f8ebb993de33cf2e9e90104ee8b2297d0e71f32a0f4b9c323dddd8a
67422c5a936d653095f943d75b6fbd3578f0c1cb4ebceeba6befbef7de7bafb7144cdc
2108084438b316c81d00494c26acbc3ab23a7b158f1aedec1c760c83698ca32c890607
2b132b97b8fcc392a9c2a030a6393f424bb3d7c024adabcb2f924261ab0733aeb37219
1c1c30b7dfb400cbbacad20e3fb3fd818141038c7f47e0e9f8f9c0c0c0761b02f6b13c
930d201ad6d5b149efe745364fe8deb8f187d2e2c58bff74d65967ad79f9e597ffb33c
1f1008320b1fce8d0a002a0c6302e41bd2df0fa33837873b688cf3e1a8fb81815d4ef5
6fd8f085eb59a148010ff93efae8a391f7ed617c337e04c0b2cbdec7ef413f9e9fba33
535fbb766df4e38f3fba091a6115a3decf10266dc0c40ebf9fe4d1b669d3a6ba3219db
bdc6505a2fe42953265b9e5d4683eb05e80c74123fa10478a6e9cd4e72342d6de7ae5d
bb2303c09f3ff8e083576d65f095f1a5dbea158e9761252bd56de0cb1d006216bdd5f7
581ae3fd3d3d9dd62bba1d28c4147afed6ad8ca3fdd1fefbcfb0064babc16034445764
1f5d441f7ef88e130c9a406534689b8b863ecb310070da692755d14fca0f6db66a5f7a
e90b1b87d745acd10171c5a0a12a4ff842ad077858f7b3bc3bf2c8435dd9e007ba00c6
fb876c9f60b36980e96e12e8dbe3014087080dfc30dbc16ea1d56bded9679ffdaff62d
e2bf5b99932c1f07103ca2c24c29fcb90340657ba6b034f20724085758c840040a183a
3afa6c1dbcd0f5769fce330d86b2b1326dda7427448609cf309554df05400804a1b2cc
a2a7d633309cf37cccd0599f23c8a4a10341568c7fa05e6acff4e933a2850b1739f0aa
beb8f203f879f3e633e3b7f6bbbb071c392f70269b5e43f20c00ad4ec6968ec8f608ce
5cb56ad5fd16d66719b443485e9053552b021b99dc0120e18505abd17215c733e97da3
fd0b21982d1a4a4f184c409d62142e3af55c00c0100270341ed74b4f5d484f797ec8ea
4f04403d1a6a03601060a9b3da8bcbfc00cb5040998a87aef84198fc46b383b61820e7
1b98f733adb6c3f8c4d53cba8c23b5f0292b770040346e24d07838cfc42559a5ad17a7
34f55ce5270dc3014cad67c2f4f2e3366b9496b2d03a8001132f973859c5e18656c227
0c3a00ca76183bcdcf3d0ce1f6b02b23cbcf9800204bc58a9e87deea570195771c1234
2041330800085a466908c32f10e0428f7c0600e416022093faa7ccdc01a01ea0063572
499f64954f713cc31059c53772c3f421ad5af994065779719b35ca437e8485152dd120
0dbd59360480d28a8e5cd20018e85918c247fd877300914fe5e60e8054a5ff46132334
3400026612cb7080215c46c247a0cc374213a6c32f2d409e32a0e8f1dcc5d011a60d69
34eb2f0c005a6d68b30cc9331dc24768f46a19dac1b3e242809046ed0c5dfc804000b0
64101451b9644f6d0a03005a0623649b6d69daf471ba69f32b3d42a6e7cb121e1ac5a3
29428090264c2b3f00a0f793de4c5ce8f167d234657205000d8937a6512d9427ee2a5f
182ee68a294a53cf559e904ebdf4a40b55368ccf5a1ec2c28a9eca859ee2702953262c
cb63a6029c182d3255328a404a375700345376d056973c7cc62f2b5a7a0e19a3b874ae
7fd9129697945fe5299ddca4b4f5c204246df156a7f51d852ddf247054a7f54fd063b8
0004799adc0110a259150db59f75a8114338f31fc5e3c78ae984e3d71c0910c88e1069
e0f15bcee48b6cf9c4264bfd0cc4938e77fc94a5fcf573556243a042cb76712d72cf8e
8ae07d19fe3c81da9cc43fab8dd312c425c757ca4febcb1d00aa000cc7e0c2448488eb
995271bbba3a46ce036cddcae9e0cac9209849bcddbfe4e8646bbc671ee5fffaab7f33
e76b96fccb7e3d3b860303fea6d0e454f54311a66d26daa64dc9de0594ac9797991164
830f7d7dec7efa6d60f2b058a0bdb85e73789776fbb03d354699e49e050465d5f38e1a
006a174a5d7d7de3bd19801057014f758f57fab097d52ec7c7280f4fa1bf56beea34d5
e5d7ca138657f2371e72103a16237fe599deae70b4844f97f7ef9800c0ab2e1ae3911c
36d89f056022e4efda41fdaaa7c34ce2fde9998a00096fd64820d0e4f0853f71543b37
1a8074d44779b395c73171cefbf558eff5fb0061a9d487b6110f7dce1a227092e2ca2a
0fe9651596879b3b0068ac9f39fbd9336fdf5073fec54ae54c0061fe2d58edf3003086
49cf8e1ddb1dcdac0d469d0e0cec8ebefffeab86742873d2a4c9362c6d3786ef29b846
75203f278376eedc166dd8f0a5b93b1201502a0d46f6170416d7ed844d3b393b386992
df21c4c58633ff4200000660c2ca826626af9c072895380f404ff40724008cbded7587
3d66cf9e59252001a0af8fa554f6ad598616981b3f6f90244ccae40d1de716b404549b
92d2c7c3484bbec993274573e6ccb439406527304cbb71e3ce689f7df6766063d8830f
582f70bf7cf47edff3cb6c0d49e4e2cf5d0320788c5c1a21e3c3fcd866292cd8ef897b
066f73efcfd114ca4b384b1fced8c31c9e6545b3914b7a04c22e1ae70de2dbaef1fca4
f5173ded950900d0130ddef7736f10758f9b9f7ffec500399bdb43ac4e2c4d7cc7a1be
98b80bcd902f2e510e3fb903205e27358470fcf167c2691ce1083f0e00c279151ae623
4f5a23facd00803a50a7560cf975a62009006a57bc2c09b995b2d3e41d7500c42b136f
60f88c5f56f9c2679826abf8466e983ea4552b5f3c4d98bf569e303c4c2f5ab849a651
7c521ec26ad1ab95be5ef89ebaa95eea89b8df1c0772d70069d119f682d02f4e2b8c67
f52edc664d9836a4552bbfd2e0b6525e48077fdc348aaf973e6c533c5ddae7dc0190b6
0269d2671508f99284d0a8ec56ca6b44bb5de27307405a46c77b829ec5a0f8b3c2b3b8
cdd06a264db3658b166edc847149f1f5d2c7e35a79ce1d00ad54a651deb1ee9159cad3
8aa6515bda253e77003483e6b0f1f19ea067a5893f2b3c8bdb0cad66d2a429bb163d85
cb6d44b3d9748de8c4e3730740bc803c9fe95d597a1879d21a188e065099cde6573edc
229889656011a4348a752c9406c832269327abc95a5e2b6566ad6bd67cbf0b0d502481
641564d67cb96a802c1315e589bb6a50389666ed9121ad909ec24357f55098cad47323
57e9f98a0713a7a7fc0a97abf05aaed2e1e6697205409e15132d18ca9b3cde0a720b97
18acf846aed267619cf2e2366b48cbc451006836df78a5cb1d0069192d64c300f94503
1786ea1b3aee09287f1ad534bf2444ad0444bb1601c5ab6ce5af953e1e2e0070091406
3aa2194fabb85af1617aa5911bc6b5e26ffb39000c4503d8458aeeb57096c6f23a964b
1ac357cdb5e8c060ca44dbc85f2b6d523879c9c7c512453085000002e4564dce056060
72b3969ecff0a11b3c39a042de5a06e101166ef9245fb3e5281d6700c8b7fffefbbbbc
d0cbd3402f4f9a6d0f00316fd1a245ee808518ddac0b00103a7fecf8d5575f39bf8603
d1964b3843cc77df7d17d93d3cce8f409b2d0b3a68194ef9008046874f54ee78bab903
4008cdcbd5699aa54b978e08240bc310ecabafbeea26680c2971c1227cd579f5ead54e
db84c7d99a2993fc0c1d5c45b3cf3efbb832a8bfe8b6eaaa0e00322f933b00f2aa98e8
c0347a151a8061201c9b9bed99089b5ef9c5175f448f3ffeb8d302fedcdf14375b6792
c9bd7eb8cf3df75cf4c61b6f8ccc399a2d4342e11a1bfb9f40277485a92d79b8f0234f
d336ab8078ef081b49ef44385cf3f6d24b2fb9439b617c337e687053185a60fdfaf591
ddc51bd9dd7b8e96fd7d4bb471e3462778aea363064f7dd20a90390acb3ffb53285725
f5fea4fad56b6fadf449e1ad86e50e80562b542fff89279e18bdf2ca2b2393c17a696b
c5a19abffdf6dbe8b1c71e73430a6a1e700002266f8004c11396c6206c96a9871c7248
d4dbdbeb2eb4cabbb7529fbc691602003017d56a7fc61cd93f70469f7cf289eba56985
0403190e50fffcc9733849230c938526f9000d43d519679cc1a37bce5b588e70ce3f6d
3f07507ba58ecf3ffffc4c3d5474701172287c8565153e00659571d8618745c71f7fbc
032b614530c5a8a5711286a2a6d100a79e7a6ab479f3e63d3e160124636d11b2f6272e
bef8e2913a8da6f0f3d42c8501000ca5e1f4dc4b2fbd343ae080035caf0318632df4b0
3cea04182fbcf0c2c8fe2ade81b428bd1f9ee60e0018325a16c60200c6efebafbfde2d
ddf8f42aed7a9d86e76128971dc3d34e3b2dbae8a28bdc1c80b0d16a3f7586769e2677
00e459b9245a808009214bb85b6eb9c52d0f197fc752134808f6572e6e39697febb2c7
9c22a9eeed18963b00eaa1bf1e03c4d47a691427101c7cf0c1d11d77dce13689e889a8
66e246cb5047e8f3ae80770ba8fd6bafbdd6f54a269069dad0282df171ab76d1cebccc
a82f032b0df59743549efd528986c05485b31697bf512361c4c2850ba33befbc337af6
d967a3bffded651b8377b8cd185471385637a29514af7ae89e0096794c44172f5e145d
7ef9e591fdc5db08e8d20e43d0260f6d8f8356f5a64e5ed6f9093cdece5103801ac198
8d8579b2fccb46b814dbb973979b49f3a749e44b63b8d10346fef18fff62d7bbef17bd
fefaeb6ea7afaf6f9bbd46f657b50a60213343bf2f8f1e57f6953dfe6e23fee4823f7d
1872fbfbc71fbf3c3ae59453dc86d1dab59f9585977e5c86e637df7c6375f480a78eb4
83f714b2fa4731e2c417802360a6e153adb4a306800a5351c9085545c12c7a273b6da8
4de2ed06274bd3d1c1664cba1d3864c5a6ddb66d25db283ac666e2c7b937796bd77e1a
7dfae947b6c5bbc12e69e8b3340c0fbed7516635232bcb47fdf30700f13b83fbdad2f3
40db863e3662878f092843405fdfa0096c9ad5379be1ff11ec4fc02cb33f3ae6e7e3f0
82fb14e0159667f85501a73de46a24955c897a62bed25c97c2ed1c066243b617160283
c108857b7230bb766db357a80b9c26c88a70699ddede7d0d0c87da5fac5c60d7b0fc68
57b57c63d7c36c70fead5b37bb2b6758bba389bc06e1ae9e6e37744c9b36c30e73ec67
f7f2cfb7b77a8bcd2eb29e3fd381a7bfdfff25cc94299cf649dfeb5d43cb3f7d7d9bec
85d36c5bc9ecedea0090b1a6041cafe01756e1d200218d3cfca30880b07af43084ce1e
bbae8ce3998b21f8c30818eb2f8240285901a012eddf55cc8b46e9b46b5ae6bb9b41a4
2906061892fc70c43d3dd4817b7ae8eda85c5caeb2c10054ae8c6388c2a85e84e76128
1f20fa3a54cf91280b9ea9cc3cca4ba2913b00922a0c924383303c2018f7643b8cf1ac
a1d3cda643ba7bfa01dea08de1c6490708988ce532a6eac9a6b407e977ef9684fd784b
dabc0d7ceaeee66634c677dfd37daff740f0f5f400805fe6cbbb0a8e5efe2d2b57d357
da378ccaa3da7a7aaa1b01d3b91871c70edec875994a64a7affaeedc5169b523eab592
e87be0c690aac8517011fef4e95d36a7a0edd57c492a0e908c86193500a4a92ccc67a6
3bb666f42656cdb423cd72b7197a59d3e40e80a421a051e5fcb8cb5fa94ae535ee118d
68b67b3c4b3d81be199e290d6e9e13c2b1ee768972d1fa3731f2371ac8194509753c9b
38ae0010034200286c3c993216657374ac1dda3aae0010a3430028ecb7eea20134048c
675b739f0364694cbb4c88b2d43d6b1ee60068803cc7f32c7519770d000360045a60bc
9991858159f3b40be8db4203a00a7f4fc20734eda0fea947ae00d0a4462e053463a40a
d3e66b8676bba641e3a5696f9ab469da9c2b00d2141c4f3b5a0d8c9793f539d45079d4
350f1a59db12e66b1b0084956ad62fa124315371d00ae3c3f0a472e269390780ba4e0a
a71717dd141a00a150e282a815572b3c9e5f4041c88080af7e38d0c23784d870d2da2c
cd7819edf05c3800a8477ef6d967d1ca952b1d0f399ec54719c4210c2c71efbdf75e74
eeb9e7dab980b35cba0f3ffc305ab56a953bd4e15fc3fa372cf4f0eddbb747279d7492
3be7c7092604ccb9bfe79f7f3e7afbedb7dd27e31c07e3bbc1dededee8f4d34f770742
290bb01415048503807acd962d5ba2b7de7acb3d9e77de79ce0d05b176ed5a773c8c2f
756538c5fbe69b6fba2f7ff9d894a51879246c048be1990f48efbefbee68ddba75ee1b
423ef8dc6fbffda2afbffeda7d44ba66cd9a087bd34d37b9f8b06c955704b7b0004048
1ccfc2e0c784bd1001f349383b6e32089c1e4cd8adb7de1a1d77dc71237fe9820089c7
a00deeb9e71ef711e9bc79f3a29b6fbed9a595909f78e289e8e9a79f761faa5287ebae
bb4e4514ce1df78da0ac1c4318a87c2cfeb8517c18a730f20010810821021676e7302f
bcf042f9c0664f74c9259738e1332c901f7bc51557b830f6f3f9dc9c9b47001f748b66
0a0b80d160b43667de79e71d37f347e5738f008638ac84cc1c00a1736105430126049b
0b28c04f618780acbc4548088e0b2166cd9ae50448185a60eedcb96ee2c75c81309eb9
2f00a3e1452059ba74a95b0df055d2860d1baad2b88782fcfc2e0180aabfefbefbdcb2
8e719fd93e2b053e30a147b3dcc3688e21d0843245fd6b7e411e8c4012a66b77ffef0e
000889f1fc0cbbc8811ece72908f340e3ae820272b84aac9201f9ed632e41150000326
0928b5f2b74b7861018020b18cc908346e100626de2b058073ce39c7dd39447ea975d2
73850c973c724d1c960d20268812ae5cd43e1f8860b8150ca332dd43417e0a3b096439
474f4580080aa3cd1d04c13e0182951a8fcb43a0c1c542078b39f6d8639d9fb9802678
4a2721bff6da6b2e0dc3c9f2e5cb5dbe38d85c609bff140e0062f292254b4626685ceb
c62e1dea1ba17ff0c1076e0d8f6aee2d6fee2007f2caaad7b314c4f22cda5c433367ce
1c07a8279f7cd26dfe0036e249fbe28b2fba0d258681134e38c1dd5a023044b3cd655e
55bdc20d0108819eca7efc65975de626736cf1de76db6d6e6dce260e1f88a21518e7e9
cdf45e0487ab099b7a7bc80dd16608e0ee017602f980934fd0d926269c0f58d96246db
409b8b2a8a6c0a0700984d4fa3c79d79e6996e678f5d3936633efdf453276876efaebc
f24a77954c281cc67234029a828da02423da7cfa0d009e7aeaa9e8fdf7df77f7132274
6e13e3b632f6072eb8e0024787ba487b24d16ce7b0ac07f0c9876508996c4c9b6e3d6a
8a5de478d6c71f7ffc88853103cb4adbb23667c4787a33f7f4b0264755b3be474328be
396ad5a9a08950b1ac06366ddae466fdcc29d820a21ce863c642f8569f61e3738769a0
9fec2ea23fd9f37a2b779bd561bb55a1df2c13983db7442db09e6955038cba90eb551e
c64b50081d2ba3703da775d104d040c8f47add23283a0c27a4190be1abcc3a6e6639b4
0a803a751a9b2884804150618f54782bb5108d386d84ce9ce2b7605a0540a872f087cf
63ca1f84325abd713469676492782d7ecb4d4dae5500a8402a30643d6508d5886bcf99
d592884eb8150ed870346c93d72ee32fbb5e99c6fb0ab58aaf15000875b8d892f594a1
b26a442f4f00a0c2e7967d000022c6df4ee33320f0b760b4a875b30280ca20605c7a3e
ffe5d26dffb4f1dd35d75cb3c29ed91ce7240603e504188c092d18f178c0561e43b6da
f9dec0c0ccbf647c160832936fa5979217e1022204cef11c2c0bec1966b9f189f00910
1813321a277ccbcbebc9dd66fbcc6e0bfcbcad225c40207d2a935503a81055100db0db
5453b7a92868f27e94718aca0100014dae054d98061c9030e123020604fdf47a33f097
b5bf046fde6ca6158128afb400e7a950fbf47a2c40c0924e69cd3b61527220ec640e04
969f8e85a5e30182cc93c25634001543b0b8540223bf7afec4f8eff9d2ea2f02962680
d7085ec287e7994dab3d53f971b1085c2a1f3f4669fcd3c46f160e2064598141bd5ee1
59e8e6221c093874437fa68a4d64da8303a1a0e3fe3d12371b2041359bbe5eba38adf8
73bdbc1371cd7100c18726fe1cc635e5ff7f6d102cb21055ee1c0000000049454e44ae
426082
""".strip().replace("\n", "")
testImageData = b"\x89PNG\r\n\x1a\n" + testPNGData.encode('utf-8')
class TestImage(unittest.TestCase):
def getImage_generic(self):
image, _ = self.objectGenerator("image")
image.data = testImageData
image.transformation = (1, 0, 0, 1, 0, 0)
image.color = (1, 0, 1, 1)
return image
# ----
# repr
# ----
def test_reprContents(self):
image = self.getImage_generic()
value = image._reprContents()
self.assertIsInstance(value, list)
color = False
glyph = False
for i in value:
self.assertIsInstance(i, basestring)
if "color" in i:
color = True
if "in glyph" in i:
glyph = True
self.assertTrue(color)
self.assertFalse(glyph)
def test_reprContents_noColor(self):
image, _ = self.objectGenerator("image")
image.data = testImageData
value = image._reprContents()
self.assertIsInstance(value, list)
color = False
glyph = False
for i in value:
self.assertIsInstance(i, basestring)
if "color" in i:
color = True
if "in glyph" in i:
glyph = True
self.assertFalse(color)
self.assertFalse(glyph)
def test_reprContents_glyph(self):
glyph, _ = self.objectGenerator("glyph")
image = glyph.image
value = image._reprContents()
self.assertIsInstance(value, list)
color = False
glyph = False
for i in value:
self.assertIsInstance(i, basestring)
if "color=" in value:
color = i
if "in glyph" in i:
glyph = True
self.assertFalse(color)
self.assertTrue(glyph)
def test_reprContents_glyph_color(self):
glyph, _ = self.objectGenerator("glyph")
image = glyph.image
image.color = (1, 0, 1, 1)
value = image._reprContents()
self.assertIsInstance(value, list)
color = False
glyph = False
for i in value:
self.assertIsInstance(i, basestring)
if "color=" in i:
color = True
if "in glyph" in i:
glyph = True
self.assertTrue(color)
self.assertTrue(glyph)
# ----
# bool
# ----
def test_bool_data(self):
image = self.getImage_generic()
self.assertTrue(image)
def test_bool_no_data(self):
image, _ = self.objectGenerator("image")
self.assertFalse(image)
def test_bool_data_len_zero(self):
image, _ = self.objectGenerator("image")
try:
image.data = "".encode('utf-8')
except FontPartsError:
raise unittest.SkipTest("Cannot set zero data")
self.assertFalse(image)
# -------
# Parents
# -------
def test_get_parent_font(self):
font, _ = self.objectGenerator("font")
layer = font.newLayer("L")
glyph = layer.newGlyph("X")
image = glyph.image
self.assertIsNotNone(image.font)
self.assertEqual(
image.font,
font
)
def test_get_parent_noFont(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
image = glyph.image
self.assertIsNone(image.font)
def test_get_parent_layer(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
image = glyph.image
self.assertIsNotNone(image.layer)
self.assertEqual(
image.layer,
layer
)
def test_get_parent_noLayer(self):
glyph, _ = self.objectGenerator("glyph")
image = glyph.image
self.assertIsNone(image.font)
self.assertIsNone(image.layer)
def test_get_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
image = glyph.image
self.assertIsNotNone(image.glyph)
self.assertEqual(
image.glyph,
glyph
)
def test_get_parent_noGlyph(self):
image, _ = self.objectGenerator("image")
self.assertIsNone(image.font)
self.assertIsNone(image.layer)
self.assertIsNone(image.glyph)
def test_set_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
image = self.getImage_generic()
image.glyph = glyph
self.assertIsNotNone(image.glyph)
self.assertEqual(
image.glyph,
glyph
)
def test_set_parent_glyph_none(self):
image, _ = self.objectGenerator("image")
image.glyph = None
self.assertIsNone(image.glyph)
def test_set_parent_glyph_exists(self):
glyph, _ = self.objectGenerator("glyph")
otherGlyph, _ = self.objectGenerator("glyph")
image = glyph.image
with self.assertRaises(AssertionError):
image.glyph = otherGlyph
# ----
# Data
# ----
def test_data_get(self):
image = self.getImage_generic()
# get
self.assertEqual(
image.data,
testImageData
)
def test_data_set_valid(self):
image = self.getImage_generic()
image.data = testImageData
self.assertEqual(
image.data,
testImageData
)
def test_data_get_direct(self):
image = self.getImage_generic()
# get
self.assertEqual(
image._get_base_data(),
testImageData
)
def test_data_set_valid_direct(self):
image = self.getImage_generic()
image._set_base_data(testImageData)
self.assertEqual(
image.data,
testImageData
)
def test_data_set_invalid(self):
image = self.getImage_generic()
with self.assertRaises(FontPartsError):
image.data = 123
def test_data_set_invalid_png(self):
image, _ = self.objectGenerator("image")
with self.assertRaises(FontPartsError):
image.data = testPNGData.encode('utf-8')
# -----
# Color
# -----
def test_get_color_no_parent(self):
image = self.getImage_generic()
self.assertEqual(
image.color,
(1, 0, 1, 1)
)
def test_get_color_parent(self):
font, _ = self.objectGenerator("font")
layer = font.layers[0]
glyph = layer.newGlyph("A")
image = glyph.image
image.data = testImageData
image.transformation = (1, 0, 0, 1, 0, 0)
image.color = (1, 0, 1, 1)
self.assertEqual(
image.color,
(1, 0, 1, 1)
)
def test_get_color_no_parent_none(self):
image = self.getImage_generic()
image.color = None
self.assertEqual(
image.color,
None
)
def test_get_color_parent_none(self):
font, _ = self.objectGenerator("font")
layer = font.layers[0]
glyph = layer.newGlyph("A")
image = glyph.image
image.data = testImageData
image.transformation = (1, 0, 0, 1, 0, 0)
self.assertEqual(
image.color,
None
)
def test_set_color(self):
image = self.getImage_generic()
image.color = (0, 1, 0, 0)
self.assertEqual(
image.color,
(0, 1, 0, 0)
)
image.color = (0.5, 0.5, 0.5, 0.5)
self.assertEqual(
image.color,
(0.5, 0.5, 0.5, 0.5)
)
def test_set_color_invalid(self):
image = self.getImage_generic()
with self.assertRaises(ValueError):
image.color = (0, 4, 0, 0)
# --------------
# Transformation
# --------------
def test_get_transformation(self):
image = self.getImage_generic()
self.assertEqual(
image.transformation,
(1, 0, 0, 1, 0, 0)
)
def test_set_tranformation(self):
image = self.getImage_generic()
image.transformation = (0, 1, 1, 0, 1, 1)
self.assertEqual(
image.transformation,
(0, 1, 1, 0, 1, 1)
)
image.transformation = (0.5, 0.5, 0.5, 0.5, 0.5, 0.5)
self.assertEqual(
image.transformation,
(0.5, 0.5, 0.5, 0.5, 0.5, 0.5)
)
def test_set_tranformation_invalid(self):
image = self.getImage_generic()
with self.assertRaises(TypeError):
image.transformation = (0, 1, "a", 0, 1, 1)
def test_transformBy_valid_no_origin(self):
image = self.getImage_generic()
image.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
image.transformation,
(2, 0, 0, 3, -3, 2)
)
def test_transformBy_valid_origin(self):
image = self.getImage_generic()
image.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
image.transformation,
(2, 0, 0, 2, -1, -2)
)
# ------
# Offset
# ------
def test_get_offset(self):
image = self.getImage_generic()
self.assertEqual(
image.offset,
(0, 0)
)
def test_get_offset_set(self):
image = self.getImage_generic()
image.offset = (1, 4.5)
self.assertEqual(
image.offset,
(1, 4.5)
)
def test_set_offset(self):
image = self.getImage_generic()
image.offset = (2.3, 5)
self.assertEqual(
image.offset,
(2.3, 5)
)
def test_set_offset_invalid_none(self):
image = self.getImage_generic()
with self.assertRaises(TypeError):
image.offset = None
def test_set_offset_invalid_string(self):
image = self.getImage_generic()
with self.assertRaises(TypeError):
image.offset = ("a", "b")
# -----
# Scale
# -----
def test_get_scale(self):
image = self.getImage_generic()
self.assertEqual(
image.scale,
(1, 1)
)
def test_get_scale_set(self):
image = self.getImage_generic()
image.scale = (2, 2.5)
self.assertEqual(
image.scale,
(2, 2.5)
)
def test_set_scale(self):
image = self.getImage_generic()
image.scale = (2.3, 5)
self.assertEqual(
image.scale,
(2.3, 5)
)
def test_set_scale_invalid_none(self):
image = self.getImage_generic()
with self.assertRaises(TypeError):
image.scale = None
def test_set_scale_invalid_string(self):
image = self.getImage_generic()
with self.assertRaises(TypeError):
image.scale = ("a", "b")
# -------------
# Normalization
# -------------
def test_round(self):
image = self.getImage_generic()
image.offset = (1.1, 1.1)
image.round()
self.assertEqual(
image.offset,
(1, 1)
)
def test_round_half(self):
image = self.getImage_generic()
image.offset = (1.5, 1.5)
image.round()
self.assertEqual(
image.offset,
(2, 2)
)
# ----
# Hash
# ----
def test_hash_object_self(self):
image_one = self.getImage_generic()
self.assertEqual(
hash(image_one),
hash(image_one)
)
def test_hash_object_other(self):
image_one = self.getImage_generic()
image_two = self.getImage_generic()
self.assertNotEqual(
hash(image_one),
hash(image_two)
)
def test_hash_object_self_variable_assignment(self):
image_one = self.getImage_generic()
a = image_one
self.assertEqual(
hash(image_one),
hash(a)
)
def test_hash_object_other_variable_assignment(self):
image_one = self.getImage_generic()
image_two = self.getImage_generic()
a = image_one
self.assertNotEqual(
hash(image_two),
hash(a)
)
def test_is_hashable(self):
image_one = self.getImage_generic()
self.assertTrue(
isinstance(image_one, collections.Hashable)
)
# --------
# Equality
# --------
def test_object_equal_self(self):
image_one = self.getImage_generic()
self.assertEqual(
image_one,
image_one
)
def test_object_not_equal_other(self):
image_one = self.getImage_generic()
image_two = self.getImage_generic()
self.assertNotEqual(
image_one,
image_two
)
def test_object_equal_self_variable_assignment(self):
image_one = self.getImage_generic()
a = image_one
self.assertEqual(
image_one,
a
)
def test_object_not_equal_other_variable_assignment(self):
image_one = self.getImage_generic()
image_two = self.getImage_generic()
a = image_one
self.assertNotEqual(
image_two,
a
)
# ---------
# Selection
# ---------
def test_selected_true(self):
image = self.getImage_generic()
try:
image.selected = False
except NotImplementedError:
return
image.selected = True
self.assertEqual(
image.selected,
True
)
def test_selected_false(self):
image = self.getImage_generic()
try:
image.selected = False
except NotImplementedError:
return
self.assertEqual(
image.selected,
False
)
| |
#!/usr/bin/env python
# -*- coding: utf-8
# Dmitry Abramov
# Python v. 2.7.9
from __future__ import division, print_function
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import operator
from structures.Graph import GraphNode, connected_components
from structures.SuffixTree import SuffixTree
from preprocessing.tokenize_and_stem import tokenize_and_stem
from Scraper import Scraper, search_articles
#from config import api_urls, api_keys
# global constants
ALPHA = 0.5 #for base-clusters' scores computing
BETA = 0.5 # penalty constant
K = 500 # max number of base clusters for merging
NUM_OF_FINAL_CLUSTERS = 10
class SuffixTreeClustering:
"""
Class for suffix tree clustering
"""
def __init__(self, snippets = []):
"""
Args:
snippets - list of strings where every element is a news snippet -
not required. You can just use it without parametrs:
STC = SuffixTreeClustering()
STC.add_strings(snippet)
"""
self.snippets = snippets
self.final_phrases = {}
self.cluster_document = {} #base cluster -> documents it covers
self.phrases = {} #phrases for each base cluster
self.scores = {} #scores for base clusters
self.sorted_clusters = [] #sorted base-clusters by the scores
self.final_clusters = [] #final merged clusters
self.top_final_clusters = [] #top n final clusters
self.suffix_tree = SuffixTree()
if len(snippets) > 0:
self.add_strings(snippets)
def add_strings(self, strings):
"""
strings - strings (snippets) to add to suffix tree
"""
for string in strings:
if string is not None:
self.suffix_tree.append_string(tokenize_and_stem(string))
self.suffix_tree.fix_input_string()
def find_base_clusters(self, node = None):
"""
Find base clusters, recursive
"""
if node is None:
node = self.suffix_tree.root
if(len(node.edges.values()) > 0):
for edge in node.edges.keys():
child = node.edges[edge]
self.find_base_clusters(child)
#if the child is a cluster - the parent is a cluster too
if self.cluster_document.get(child.identifier) != None and (child.parent != self.suffix_tree.root):
#clusters.append(child.parent.identifier)
if self.phrases.get(child.parent.identifier) is None:
self.phrases[child.parent.identifier] = child.parent.phrase
#if child.parent.edge
if self.cluster_document.get(child.parent.identifier) == None:
self.cluster_document[child.parent.identifier] = self.cluster_document[child.identifier][:]
else:
self.cluster_document[child.parent.identifier] += self.cluster_document[child.identifier]
else:
if node.parent != self.suffix_tree.root:
#clusters.append(node.parent.identifier)
if self.phrases.get(node.parent.identifier) is None:
self.phrases[node.parent.identifier] = node.parent.phrase
if self.cluster_document.get(node.parent.identifier) == None:
temp = []
temp.append(node.bit_vector)
self.cluster_document[node.parent.identifier] = temp[:]
else:
self.cluster_document[node.parent.identifier].append(node.bit_vector)
return
def count_scores(self):
"""
Count scores for base clusters
Formula: Score(S) = |B| F(|P|),
where |B| is the size of the cluster (number of covered documents),
|P| is the number of words in the phrase
"""
for cluster in self.phrases.keys():
self.scores[cluster] = len(self.cluster_document[cluster])*F(len(self.phrases[cluster].split(' ')))
return
def similarity(self, base_clusters):
"""
Compute Similarity Matrix
Args:
base_clusters - top (<= k = 500) sorted by score base clusters
Return:
Similarity Matrix of clusters
"""
Sim = [[0 for x in range(len(base_clusters))] for x in range(len(base_clusters))]
for i in range(len(base_clusters) - 1):
Sim[i][i] = 1
for j in range(i + 1, len(base_clusters)):
B1 = self.cluster_document[base_clusters[i]]
B2 = self.cluster_document[base_clusters[j]]
intersec = set(B1).intersection(B2) # intersection of two clusters (common covered documents)
if len(intersec) / len(B1) > ALPHA and len(intersec) / len(B2) > ALPHA:
Sim[i][j] = 1
Sim[j][i] = 1 #not important
return Sim
def merge_clusters(self, Sim):
"""
Merging base clusters
Args:
Sim - matrix of similarity between base clusters
"""
node_names = {} # dictionary ["name of base cluster"] = GraphNode
for i in range(len(Sim)):
if self.sorted_clusters[i] not in node_names.keys():
node = GraphNode(self.sorted_clusters[i])
node_names[self.sorted_clusters[i]] = node
else:
node = node_names[self.sorted_clusters[i]]
for j in range(i + 1, len(Sim)): # efficency: checking only further clusters, ignoring previous
if Sim[i][j] == 1:
if self.sorted_clusters[j] not in node_names.keys():
new_node = GraphNode(self.sorted_clusters[j])
node_names[self.sorted_clusters[j]] = new_node
node_names[self.sorted_clusters[i]].add_link(node_names[self.sorted_clusters[j]])
number = 1
for components in connected_components(node_names.values()):
names = sorted(node.name for node in components)
self.final_clusters.append(names)
number += 1
def find_clusters(self, number_of_clusters = NUM_OF_FINAL_CLUSTERS):
"""
Findning final clusters
Args:
number_of_clusters - max number of final clusters
default number of clusters = NUM_OF_FINAL_CLUSTERS = 10
"""
if len(self.snippets) < number_of_clusters:
print("Sorry, but number of snippets should be >= number of clusters")
return {}
self.sorted_clusters = []
self.clusters_document = {}
self.find_base_clusters()
self.count_scores() # computing scores of each base claster
self.final_clusters = []
self.final_phrases = {}
# sorting base clusters by scores
sorted_scores = sorted(self.scores.items(), key=operator.itemgetter(1), reverse=1)
#print(len(sorted_scores))
n = min(K, len(sorted_scores)) # number of selected top scored base clusters
#selecting
for i in range(n):
self.sorted_clusters.append(sorted_scores[i][0])
# computing Similarity matrix for selected clusters
Sim = self.similarity(self.sorted_clusters)
self.merge_clusters(Sim)
# final clusters - result of merging
# computing final scores for final clusters
final_scores = {}
for final_cluster_index in range(len(self.final_clusters)):
sum = 0
for base_cluster in self.final_clusters[final_cluster_index]:
if type(base_cluster) is list:
for cluster in base_cluster:
sum += self.scores[cluster]
else:
sum += self.scores[base_cluster]
if final_cluster_index not in self.final_phrases:
self.final_phrases[final_cluster_index] = []
if type(base_cluster) is list:
for cluster in base_cluster:
self.final_phrases[final_cluster_index].append(self.phrases[cluster])
else:
self.final_phrases[final_cluster_index].append(self.phrases[base_cluster])
final_scores[final_cluster_index] = sum
sorted_final_scores = sorted(final_scores.items(), key=operator.itemgetter(1), reverse=1)
# selecting top final clusters, the number of selecting is num_of_final_clusters = 10
self.top_final_clusters = []
self.top_final_phrases = {}
n = min(number_of_clusters, len(self.final_clusters))
self.n_goodclusters = 0
for cluster in range(n):
self.top_final_clusters.append(self.final_clusters[sorted_final_scores[cluster][0]])
if sorted_final_scores[cluster][1] > 0:
self.n_goodclusters += 1
self.top_final_phrases[cluster + 1] = self.final_phrases[sorted_final_scores[cluster][0]]
return self.get_clusters()
def get_common_phrases(self, num = 2):
def restemming(word, num_snippets):
for num_snippet in num_snippets:
tokenized_snippet = tokenize_and_stem(self.snippets[num_snippet], stem = 0)
for sn in tokenized_snippet:
if sn.find(word) != -1:
return sn
return ''
phrases = {}
for i in range(len(self.get_clusters().keys())):
for phrase in self.top_final_phrases[i + 1]:
if i + 1 not in phrases:
phrases[i + 1] = []
words = phrase.split(' ')
for word in words:
restem = restemming(word, self.get_clusters()[i + 1])
if restem != '':
if len(phrases[i + 1]) < num:
phrases[i + 1].append(restem)
return phrases
def print_common_phrases(self, num = 2):
result = self.get_common_phrases(num = num)
for cluster, phrases in result.items():
print("cluster #%i tags: " % cluster, end = ' ')
print(phrases)
def get_number_of_good_clusters(self):
return self.n_goodclusters
def print_clusters(self):
result = self.get_clusters()
for cluster, snippets in result.items():
print("cluster #%i contains documents: " % cluster, end = ' ')
print(snippets)
def get_clusters(self):
result = {}
count = 1
for cluster in self.top_final_clusters:
documents = []
for base_cluster in cluster:
documents.append(set(self.cluster_document[base_cluster]))
result[count] = list(frozenset().union(*documents))
count += 1
return result
def F(P):
"""
Penetializing function for computing score of a base cluster
Needed for count_scores function
Score(S) = |B| F(|P|)
Args:
P (here means |P|) - the length (number of words in the phrase of the base cluster)
Return:
float number - the result of function F for the cluster P
"""
if P == 1:
return 0
elif P >= 2 and P <= 6:
return P
else:
return BETA
def main():
query = "putin"
snippets = search_articles(api_urls, api_keys, query)['snippets']
if len(snippets) == 0:
print("Sorry, no results for your query!")
return
STC = SuffixTreeClustering(snippets)
#STC.add_strings(snippets)
#STC.find_base_clusters() # finding base clusters
STC.find_clusters()
STC.print_clusters()
STC.print_common_phrases(2)
if __name__ == "__main__":
main()
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
import posixpath
import unittest
import mock
from pyfakefs import fake_filesystem_unittest
from py_utils import tempfile_ext
from telemetry.core import android_platform
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.backends import android_browser_backend_settings
from telemetry.internal.backends.chrome import android_browser_finder
from telemetry.internal.browser import browser_finder
from telemetry.internal.platform import android_platform_backend
from telemetry.internal.util import binary_manager
from telemetry.testing import options_for_unittests
def FakeFetchPath(dependency, os_name, arch, os_version=None):
return os.path.join('dependency_dir', dependency,
'%s_%s_%s.apk' % (os_name, os_version, arch))
class AndroidBrowserFinderTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.finder_options = options_for_unittests.GetCopy()
# Mock out what's needed for testing with exact APKs
self.setUpPyfakefs()
self._fetch_path_patcher = mock.patch(
'telemetry.internal.backends.chrome.android_browser_finder.binary_manager.FetchPath', # pylint: disable=line-too-long
FakeFetchPath)
self._fetch_path_mock = self._fetch_path_patcher.start()
self._get_package_name_patcher = mock.patch(
'devil.android.apk_helper.GetPackageName')
self._get_package_name_mock = self._get_package_name_patcher.start()
self.fake_platform = mock.Mock(spec=android_platform.AndroidPlatform)
self.fake_platform.CanLaunchApplication.return_value = True
self.fake_platform._platform_backend = mock.Mock(
spec=android_platform_backend.AndroidPlatformBackend)
device = self.fake_platform._platform_backend.device
device.build_description = 'some L device'
device.build_version_sdk = 21
self.fake_platform.GetOSVersionName.return_value = 'L23ds5'
self.fake_platform.GetArchName.return_value = 'armeabi-v7a'
# The android_browser_finder converts the os version name to 'k' or 'l'
self.expected_reference_build = FakeFetchPath(
'chrome_stable', 'android', 'armeabi-v7a', 'l')
def tearDown(self):
self.tearDownPyfakefs()
self._get_package_name_patcher.stop()
self._fetch_path_patcher.stop()
def testNoPlatformReturnsEmptyList(self):
fake_platform = None
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, fake_platform)
self.assertEqual([], possible_browsers)
def testCanLaunchAlwaysTrueReturnsAllExceptExactAndReference(self):
self.finder_options.browser_type = 'any'
all_types = set(
android_browser_finder.FindAllBrowserTypes())
expected_types = all_types - {'exact', 'reference'}
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertEqual(
expected_types,
{b.browser_type for b in possible_browsers})
def testCanLaunchAlwaysTrueReturnsAllExceptExact(self):
self.finder_options.browser_type = 'any'
self.fs.CreateFile(self.expected_reference_build)
all_types = set(
android_browser_finder.FindAllBrowserTypes())
expected_types = all_types - {'exact'}
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertEqual(
expected_types,
{b.browser_type for b in possible_browsers})
def testCanLaunchAlwaysTrueWithExactApkReturnsAll(self):
self.fs.CreateFile(
'/foo/ContentShell.apk')
self.fs.CreateFile(self.expected_reference_build)
self.finder_options.browser_type = 'any'
self.finder_options.browser_executable = '/foo/ContentShell.apk'
self._get_package_name_mock.return_value = 'org.chromium.content_shell_apk'
expected_types = set(
android_browser_finder.FindAllBrowserTypes())
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertEqual(
expected_types,
{b.browser_type for b in possible_browsers})
def testErrorWithUnknownExactApk(self):
self.fs.CreateFile(
'/foo/ContentShell.apk')
self.finder_options.browser_executable = '/foo/ContentShell.apk'
self._get_package_name_mock.return_value = 'org.unknown.app'
self.assertRaises(Exception,
android_browser_finder._FindAllPossibleBrowsers,
self.finder_options, self.fake_platform)
def testErrorWithNonExistantExactApk(self):
self.finder_options.browser_executable = '/foo/ContentShell.apk'
self._get_package_name_mock.return_value = 'org.chromium.content_shell_apk'
self.assertRaises(Exception,
android_browser_finder._FindAllPossibleBrowsers,
self.finder_options, self.fake_platform)
def testErrorWithUnrecognizedApkName(self):
self.fs.CreateFile(
'/foo/unknown.apk')
self.finder_options.browser_executable = '/foo/unknown.apk'
self._get_package_name_mock.return_value = 'org.foo.bar'
with self.assertRaises(exceptions.UnknownPackageError):
android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
def testCanLaunchExactWithUnrecognizedApkNameButKnownPackageName(self):
self.fs.CreateFile(
'/foo/MyFooBrowser.apk')
self._get_package_name_mock.return_value = 'org.chromium.chrome'
self.finder_options.browser_executable = '/foo/MyFooBrowser.apk'
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertIn('exact', [b.browser_type for b in possible_browsers])
def testNoErrorWithMissingReferenceBuild(self):
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertNotIn('reference', [b.browser_type for b in possible_browsers])
def testNoErrorWithReferenceBuildCloudStorageError(self):
with mock.patch(
'telemetry.internal.backends.chrome.android_browser_finder.binary_manager.FetchPath', # pylint: disable=line-too-long
side_effect=binary_manager.CloudStorageError):
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertNotIn('reference', [b.browser_type for b in possible_browsers])
def testNoErrorWithReferenceBuildNoPathFoundError(self):
self._fetch_path_mock.side_effect = binary_manager.NoPathFoundError
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertNotIn('reference', [b.browser_type for b in possible_browsers])
def testWebViewBrowserReturned(self):
self.finder_options.browser_type = 'android-webview'
possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
self.finder_options, self.fake_platform)
self.assertEqual(possible_browsers[0].target_os, 'android_webview')
def testCanPossiblyHandlePath(self):
self.assertTrue(android_browser_finder._CanPossiblyHandlePath('foo.apk'))
self.assertTrue(android_browser_finder._CanPossiblyHandlePath('foo_bundle'))
self.assertFalse(android_browser_finder._CanPossiblyHandlePath('f.bundle'))
self.assertFalse(android_browser_finder._CanPossiblyHandlePath(''))
self.assertFalse(android_browser_finder._CanPossiblyHandlePath('fooaab'))
def testModulesPassedToInstallApplicationForBundle(self):
self.finder_options.modules_to_install = ['base']
self.fs.CreateFile('foo_bundle')
possible_browser = android_browser_finder.PossibleAndroidBrowser(
'android-chromium-bundle', self.finder_options, self.fake_platform,
android_browser_backend_settings.ANDROID_CHROMIUM_BUNDLE, 'foo_bundle')
with mock.patch.object(
self.fake_platform, 'InstallApplication') as m:
possible_browser.UpdateExecutableIfNeeded()
m.assert_called_with('foo_bundle', modules={'base'})
def testAndroid_Not_WebviewTagInTypExpectationsTags(self):
self.finder_options.modules_to_install = ['base']
self.fs.CreateFile('foo_bundle')
with mock.patch.object(self.fake_platform,
'GetTypExpectationsTags', return_value=['android']):
possible_browser = android_browser_finder.PossibleAndroidBrowser(
'android-chromium-bundle', self.finder_options, self.fake_platform,
android_browser_backend_settings.ANDROID_CHROMIUM_BUNDLE,
'foo_bundle')
self.assertIn('android-not-webview',
possible_browser.GetTypExpectationsTags())
self.assertIn('android',
possible_browser.GetTypExpectationsTags())
def testAndroidWebviewTagInTypExpectationsTags(self):
self.finder_options.modules_to_install = ['base']
self.fs.CreateFile('foo_bundle')
with mock.patch.object(self.fake_platform,
'GetTypExpectationsTags', return_value=['android']):
possible_browser = android_browser_finder.PossibleAndroidBrowser(
'android-webview-google', self.finder_options, self.fake_platform,
android_browser_backend_settings.ANDROID_WEBVIEW_GOOGLE,
'foo_bundle')
self.assertIn('android-webview',
possible_browser.GetTypExpectationsTags())
self.assertIn('android',
possible_browser.GetTypExpectationsTags())
def testAndroidWeblayerTagInTypExpectationsTags(self):
self.finder_options.modules_to_install = ['base']
self.fs.CreateFile('foo_bundle')
with mock.patch.object(self.fake_platform,
'GetTypExpectationsTags', return_value=['android']):
possible_browser = android_browser_finder.PossibleAndroidBrowser(
'android-weblayer', self.finder_options, self.fake_platform,
android_browser_backend_settings.ANDROID_WEBLAYER,
'foo_bundle')
self.assertIn('android-weblayer',
possible_browser.GetTypExpectationsTags())
self.assertIn('android',
possible_browser.GetTypExpectationsTags())
def _MockPossibleBrowser(modified_at):
m = mock.Mock(spec=android_browser_finder.PossibleAndroidBrowser)
m.last_modification_time = modified_at
return m
class SelectDefaultBrowserTest(unittest.TestCase):
def testEmptyListGivesNone(self):
self.assertIsNone(android_browser_finder.SelectDefaultBrowser([]))
def testSinglePossibleReturnsSame(self):
possible_browsers = [_MockPossibleBrowser(modified_at=1)]
self.assertIs(
possible_browsers[0],
android_browser_finder.SelectDefaultBrowser(possible_browsers))
def testListGivesNewest(self):
possible_browsers = [
_MockPossibleBrowser(modified_at=2),
_MockPossibleBrowser(modified_at=3), # newest
_MockPossibleBrowser(modified_at=1),
]
self.assertIs(
possible_browsers[1],
android_browser_finder.SelectDefaultBrowser(possible_browsers))
class SetUpProfileBrowserTest(unittest.TestCase):
@decorators.Enabled('android')
def testPushEmptyProfile(self):
finder_options = options_for_unittests.GetCopy()
finder_options.browser_options.profile_dir = None
browser_to_create = browser_finder.FindBrowser(finder_options)
profile_dir = browser_to_create.profile_directory
device = browser_to_create._platform_backend.device
# Depending on Android version, the profile directory may have a 'lib'
# folder. This folder must not be deleted when we push an empty profile.
# Remember the existence of this folder so that we can check for accidental
# deletion later.
has_lib_dir = 'lib' in device.ListDirectory(profile_dir, as_root=True)
try:
# SetUpEnvironment will call RemoveProfile on the device, due to the fact
# that there is no input profile directory in BrowserOptions.
browser_to_create.SetUpEnvironment(finder_options.browser_options)
# On some devices, "lib" is created after installing the browser,
# and pushing / removing the profile should never modify it.
profile_paths = device.ListDirectory(profile_dir, as_root=True)
expected_paths = ['lib'] if has_lib_dir else []
self.assertEqual(expected_paths, profile_paths)
finally:
browser_to_create.CleanUpEnvironment()
@decorators.Enabled('android')
def testPushDefaultProfileDir(self):
# Add a few files and directories to a temp directory, and ensure they are
# copied to the device.
with tempfile_ext.NamedTemporaryDirectory() as tempdir:
foo_path = os.path.join(tempdir, 'foo')
with open(foo_path, 'w') as f:
f.write('foo_data')
bar_path = os.path.join(tempdir, 'path', 'to', 'bar')
os.makedirs(os.path.dirname(bar_path))
with open(bar_path, 'w') as f:
f.write('bar_data')
expected_profile_paths = ['foo', posixpath.join('path', 'to', 'bar')]
finder_options = options_for_unittests.GetCopy()
finder_options.browser_options.profile_dir = tempdir
browser_to_create = browser_finder.FindBrowser(finder_options)
# SetUpEnvironment will end up calling PushProfile
try:
browser_to_create.SetUpEnvironment(finder_options.browser_options)
profile_dir = browser_to_create.profile_directory
device = browser_to_create._platform_backend.device
absolute_expected_profile_paths = [
posixpath.join(profile_dir, path)
for path in expected_profile_paths]
device = browser_to_create._platform_backend.device
self.assertTrue(device.PathExists(absolute_expected_profile_paths),
absolute_expected_profile_paths)
finally:
browser_to_create.CleanUpEnvironment()
@decorators.Enabled('android')
def testPushDefaultProfileFiles(self):
# Add a few files and directories to a temp directory, and ensure they are
# copied to the device.
with tempfile_ext.NamedTemporaryDirectory() as tempdir:
foo_path = os.path.join(tempdir, 'foo')
with open(foo_path, 'w') as f:
f.write('foo_data')
bar_path = os.path.join(tempdir, 'path', 'to', 'bar')
os.makedirs(os.path.dirname(bar_path))
with open(bar_path, 'w') as f:
f.write('bar_data')
finder_options = options_for_unittests.GetCopy()
finder_options.browser_options.profile_files_to_copy = [
(foo_path, 'foo'),
(bar_path, posixpath.join('path', 'to', 'bar'))]
browser_to_create = browser_finder.FindBrowser(finder_options)
# SetUpEnvironment will end up calling PushProfile
try:
browser_to_create.SetUpEnvironment(finder_options.browser_options)
profile_dir = browser_to_create.profile_directory
device = browser_to_create._platform_backend.device
absolute_expected_profile_paths = [
posixpath.join(profile_dir, path)
for _, path
in finder_options.browser_options.profile_files_to_copy]
device = browser_to_create._platform_backend.device
self.assertTrue(device.PathExists(absolute_expected_profile_paths),
absolute_expected_profile_paths)
finally:
browser_to_create.CleanUpEnvironment()
| |
# do not import dpga here, as that slows down test collection considerably,
# even if we do not run these tests.
import numba
import numpy as np
from . import rng # noqa: F401
def setup_module():
# do this separately so that we get distinct timing information for it
import clifford.dpga # noqa: F401
class TestBasicDPGA:
def test_non_orthogonal_metric(self):
from clifford.dpga import wbasis
w_metric = np.array([
[
(a | b)[()]
for a in wbasis
]
for b in wbasis
])
assert np.all(w_metric == np.array([
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
]) / 2)
def test_bivector_identities(self):
"""
These come from section 2 of the paper:
R(4, 4) As a Computational Framework for 3-Dimensional Computer Graphics
by Ron Goldman and Stephen Mann
"""
from clifford.dpga import wlist, wslist, wbasis
for wi in wlist:
for wj in wlist:
assert wi^wj == -wj*wi
for wis in wslist:
for wjs in wslist:
assert wis^wjs == -wjs*wis
for w in wbasis:
assert w**2 == 0
for wi, wis in zip(wlist, wslist):
assert wi*wis == 1 - wis*wi
def test_up_down(self, rng): # noqa: F811
from clifford.dpga import up, down
for i in range(10 if numba.config.DISABLE_JIT else 1000):
p = rng.standard_normal(3)
dpga_pnt = up(p)
pnt_down = down(np.random.rand()*dpga_pnt)
np.testing.assert_allclose(pnt_down, p)
def test_translate(self, rng): # noqa: F811
from clifford.dpga import w0, w1, w2, w3, w0s
from clifford.dpga import up
for i in range(10 if numba.config.DISABLE_JIT else 100):
tvec = rng.standard_normal(3)
wt = tvec[0]*w1 + tvec[1]*w2 + tvec[2]*w3
biv = w0s*wt
Rt = 1 - biv
exp_result = np.e**(-biv)
assert Rt == exp_result
assert Rt * w0 * ~Rt == w0 + wt
for wi in [w1, w2, w3]:
assert Rt * wi * ~Rt == wi
assert (Rt*~Rt) == 1 + 0*w1
pnt_vec = rng.standard_normal(3)
pnt = up(pnt_vec)
res = Rt*pnt*~Rt
desired_result = up(pnt_vec+tvec)
assert up(pnt_vec) + wt == desired_result
assert res == desired_result
def test_rotate(self, rng): # noqa: F811
from clifford.dpga import w0, w1, w2, w3, w1s, w2s, w3s
from clifford.dpga import up, down
for i in range(10 if numba.config.DISABLE_JIT else 100):
mvec = rng.standard_normal(3)
nvec = rng.standard_normal(3)
m = mvec[0] * w1 + mvec[1] * w2 + mvec[2] * w3
n = nvec[0] * w1 + nvec[1] * w2 + nvec[2] * w3
ms = mvec[0] * w1s + mvec[1] * w2s + mvec[2] * w3s
ns = nvec[0] * w1s + nvec[1] * w2s + nvec[2] * w3s
biv = 2*((ms^n) - (ns^m))
Rt = np.e**(-biv)
# Rotor should be unit
np.testing.assert_allclose((Rt*~Rt).value, (1 + 0*w1).value, atol=1E-4)
# The origin should be unaffected by rotation
np.testing.assert_allclose((Rt*w0*~Rt).value, w0.value, atol=1E-4)
# Vectors orthogonal to the rotation should be unaffected by rotation
vorthog = np.cross(mvec, nvec)
uporthog = up(vorthog)
np.testing.assert_allclose((Rt*uporthog*~Rt).value, uporthog.value, atol=1E-4)
# Points should maintain their distance from the origin
pnt_vec = rng.standard_normal(3)
l = np.linalg.norm(pnt_vec)
pnt = up(pnt_vec)
lres = np.linalg.norm(down(Rt * pnt * ~Rt))
np.testing.assert_allclose(l, lres, atol=1E-6)
def test_line(self, rng): # noqa: F811
from clifford.dpga import w0, w1, w2, w3, w0s
from clifford.dpga import e12, e13, e23, e1b2b, e1b3b, e2b3b
from clifford.dpga import up, down
for i in range(5 if numba.config.DISABLE_JIT else 100):
p1vec = rng.standard_normal(3)
p2vec = rng.standard_normal(3)
p1 = up(p1vec)
p2 = up(p2vec)
line_direc = p2vec - p1vec
# Plucker line is the outer product of two points or the outer product
# of a point and a free vector
line = p1 ^ p2
free_direc = line_direc[0]*w1 + line_direc[1]*w2 + line_direc[2]*w3
line_alt = p1 ^ free_direc
assert line_alt == line
# The line should be the outer product null space
lamb = rng.standard_normal()
assert up(lamb*p1vec + (1 - lamb)*p2vec) ^ line == 0
# Lines can be transformed with rotors
tvec = p1vec
wt = tvec[0] * w1 + tvec[1] * w2 + tvec[2] * w3
Raxis = 1 - w0s * wt
assert (~Raxis*line*Raxis) ^ w0 == 0
# Lines are invariant to screw transformations about their axis
axis = p1vec - p2vec
rotation_biv = axis[0]*(e23 - e2b3b) + axis[1]*(e1b3b - e13) + axis[2]*(e12 - e1b2b)
Rr = np.e**(-rng.standard_normal()*rotation_biv)
Rt = 1 - w0s * rng.standard_normal()*(axis[0] * w1 + axis[1] * w2 + axis[2] * w3)
np.testing.assert_allclose((Raxis*Rr*Rt*(~Raxis*line*Raxis)*~Rt*~Rr*~Raxis).value,
line.value, rtol=1E-4, atol=1E-4)
# A bivector line is invariant under its own exponential
Rline = np.e ** line
assert Rline*line*~Rline == line
# The exponential of a line is a rotation about the line
line_origin = (~Raxis*line*Raxis)
RLineOrigin = np.e ** line_origin
random_pnt = up(rng.standard_normal(3))
np.testing.assert_allclose(np.linalg.norm(down(RLineOrigin*random_pnt*~RLineOrigin)),
np.linalg.norm(down(random_pnt)), rtol=1E-3, atol=1E-4)
np.testing.assert_allclose(down(RLineOrigin * (random_pnt + free_direc) * ~RLineOrigin),
down(RLineOrigin * random_pnt * ~RLineOrigin) + line_direc,
rtol=1E-3, atol=1E-4)
np.testing.assert_allclose((Rline*~Rline).value, (1 + 0*w1).value, rtol=1E-4, atol=1E-4)
def test_quadric(self, rng): # noqa: F811
from clifford.dpga import w0, w1, w2, w3, w0s, w1s, w2s, w3s
from clifford.dpga import e12, e13, e23, e1b2b, e1b3b, e2b3b
from clifford.dpga import up, dual_point
# Make a cone which passes through the origin
# This is the construction from Transverse Approach paper
quadric_coefs = [0.0, 1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
a, b, c, d, e, f, g, h, i, j = quadric_coefs
quadric = (4 * a * (w0s ^ w0) + 4 * b * (w1s ^ w1) +
4 * c * (w2s ^ w2) + 4 * j * (w3s ^ w3) +
2 * d * ((w0s ^ w1) + (w1s ^ w0)) +
2 * e * ((w0s ^ w2) + (w2s ^ w0)) +
2 * f * ((w1s ^ w2) + (w2s ^ w1)) +
2 * g * ((w0s ^ w3) + (w3s ^ w0)) +
2 * h * ((w1s ^ w3) + (w3s ^ w1)) +
2 * i * ((w2s ^ w3) + (w3s ^ w2)))
# The quadrics do not form an OPNS
assert quadric ^ w0s != 0*w1
# They form a `double IPNS'
random_pnt = up(rng.standard_normal(3))
doubledp = (random_pnt | quadric | dual_point(random_pnt))
assert doubledp(0) == doubledp # Not 0 but is a scalar
assert (w0 | quadric | w0s) == 0 * w1 # The cone passes through the origin
# Now let's do the construction from R(4,4) As a computational framework
# Let's make a sphere
sphere_quad = (w1s^w1) + (w2s^w2) + (w3s^w3) - (w0s^w0)
# Let's try rotating, it should be invariant under rotation
axis = np.random.randn(3)
rotation_biv = axis[0] * (e23 - e2b3b) + axis[1] * (e1b3b - e13) + axis[2] * (e12 - e1b2b)
Rr = np.e ** (-rotation_biv)
np.testing.assert_allclose((Rr * sphere_quad * ~Rr).value, sphere_quad.value,
rtol=1E-4, atol=1E-4)
# Test points on the sphere surface
for i in range(10):
vec = np.random.randn(3)
pnt = up(vec/np.linalg.norm(vec))
pnts = dual_point(pnt)
np.testing.assert_allclose((pnt | sphere_quad | pnts).value, 0,
rtol=1E-4, atol=1E-6)
| |
import numpy as np
from six.moves import range
from ...core.utils import as_id_array
from ...utils.jaggedarray import JaggedArray
from .status import CORE_NODE, CLOSED_BOUNDARY
def _split_link_ends(link_ends):
"""
Examples
--------
>>> from landlab.grid.unstructured.links import _split_link_ends
>>> _split_link_ends(((0, 1, 2), (3, 4, 5)))
(array([0, 1, 2]), array([3, 4, 5]))
>>> _split_link_ends([(0, 3), (1, 4), (2, 5)])
(array([0, 1, 2]), array([3, 4, 5]))
>>> _split_link_ends((0, 3))
(array([0]), array([3]))
"""
links = np.array(list(link_ends), ndmin=2, dtype=np.int)
if len(links) != 2:
links = links.transpose()
if links.size == 0:
return (np.array([], dtype=np.int), np.array([], dtype=np.int))
else:
return links[0], links[1]
def link_is_active(status_at_link_ends):
"""link_is_active((status0, status1))
Check if a link is active.
Links are *inactive* if they connect two boundary nodes or touch a
closed boundary. Otherwise, the link is *active*.
Parameters
----------
status0, status1 : sequence of array-like
Status at link start and end
Returns
-------
ndarray, boolean :
Boolean array that indicates if a link is active.
"""
(status_at_link_start,
status_at_link_end) = _split_link_ends(status_at_link_ends)
return (((status_at_link_start == CORE_NODE) &
~ (status_at_link_end == CLOSED_BOUNDARY)) |
((status_at_link_end == CORE_NODE) &
~ (status_at_link_start == CLOSED_BOUNDARY)))
def find_active_links(node_status, node_at_link_ends):
"""find_active_links(node_status, (node0, node1))
IDs of active links.
Parameters
----------
node_status : ndarray
Status of nodes.
node0, node1 : sequence of array-like
Node ID at link start and end.
Returns
-------
ndarray :
Links IDs of active links.
Examples
--------
>>> from landlab.grid.unstructured.links import find_active_links
>>> links = [(0, 2), (1, 3), (0, 1), (1, 2), (0, 3)]
>>> status = np.array([0, 0, 0, 0])
>>> find_active_links(status, links)
array([0, 1, 2, 3, 4])
"""
node_at_link_start, node_at_link_end = _split_link_ends(node_at_link_ends)
if len(node_at_link_end) != len(node_at_link_start):
raise ValueError('Link arrays must be the same length')
status_at_link_ends = (node_status[node_at_link_start],
node_status[node_at_link_end])
(active_link_ids, ) = np.where(link_is_active(status_at_link_ends))
return as_id_array(active_link_ids)
def in_link_count_per_node(node_at_link_ends, number_of_nodes=None):
"""in_link_count_per_node((node0, node1), number_of_nodes=None)
Number of links entering nodes.
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
ndarray :
Number of links entering nodes.
Examples
--------
>>> from landlab.grid.unstructured.links import in_link_count_per_node
>>> link_ends = [(0, 3), (1, 4), (2, 5), (3, 6), (4, 7), (5, 8)]
>>> in_link_count_per_node(zip(*link_ends))
array([0, 0, 0, 1, 1, 1, 1, 1, 1])
"""
node_at_link_start, node_at_link_end = _split_link_ends(node_at_link_ends)
# if len(node_at_link_end) != len(node_at_link_start):
# raise ValueError('Link arrays must be the same length')
return as_id_array(np.bincount(node_at_link_end, minlength=number_of_nodes))
def out_link_count_per_node(node_at_link_ends, number_of_nodes=None):
"""out_link_count_per_node((node0, node1), number_of_nodes=None)
Number of links leaving nodes.
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
ndarray :
Number of links leaving nodes.
Examples
--------
>>> from landlab.grid.unstructured.links import out_link_count_per_node
>>> out_link_count_per_node(([0, 1, 2, 3, 4, 5], [3, 4, 5, 6, 7, 8]))
array([1, 1, 1, 1, 1, 1])
>>> out_link_count_per_node(([0, 1, 2, 3, 4, 5], [3, 4, 5, 6, 7, 8]),
... number_of_nodes=9)
array([1, 1, 1, 1, 1, 1, 0, 0, 0])
"""
node_at_link_start, node_at_link_end = _split_link_ends(node_at_link_ends)
if len(node_at_link_end) != len(node_at_link_start):
raise ValueError('Link arrays must be the same length')
return as_id_array(np.bincount(node_at_link_start,
minlength=number_of_nodes))
def link_count_per_node(node_at_link_ends, number_of_nodes=None):
"""link_count_per_node((node0, node1), number_of_nodes=None)
Number of links per node.
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
ndarray :
Number of links per nodes.
Examples
--------
>>> from landlab.grid.unstructured.links import link_count_per_node
>>> link_count_per_node(([0, 1, 2, 3, 4, 5], [3, 4, 5, 6, 7, 8]))
array([1, 1, 1, 2, 2, 2, 1, 1, 1])
"""
in_count = in_link_count_per_node(node_at_link_ends)
out_count = out_link_count_per_node(node_at_link_ends)
node_count = number_of_nodes or max(len(in_count), len(out_count))
if len(in_count) < node_count:
in_count = np.pad(in_count, (0, node_count - len(in_count)),
mode='constant')
if len(out_count) < node_count:
out_count = np.pad(out_count, (0, node_count - len(out_count)),
mode='constant')
return in_count + out_count
def _sort_links_by_node(node_at_link_ends, link_ids=None, sortby=0):
sorted_links = np.argsort(node_at_link_ends[sortby])
if link_ids is not None:
return np.array(link_ids, dtype=np.int)[sorted_links]
else:
return as_id_array(sorted_links)
def in_link_ids_at_node(node_at_link_ends, link_ids=None, number_of_nodes=None):
"""in_link_ids_at_node((node0, node1), number_of_nodes=None)
Links entering nodes.
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
tuple :
Tuple of link id array and offset into link id array.
Examples
--------
>>> from landlab.grid.unstructured.links import in_link_ids_at_node
>>> (links, count) = in_link_ids_at_node(([0, 1, 2, 3, 4, 5],
... [3, 4, 5, 6, 7, 8]))
>>> links
array([0, 1, 2, 3, 4, 5])
>>> count
array([0, 0, 0, 1, 1, 1, 1, 1, 1])
>>> (links, count) = in_link_ids_at_node(([0, 1, 2, 3, 4, 5],
... [3, 4, 5, 6, 7, 8]),
... link_ids=range(1, 7))
>>> links
array([1, 2, 3, 4, 5, 6])
>>> count
array([0, 0, 0, 1, 1, 1, 1, 1, 1])
"""
node_at_link_ends = _split_link_ends(node_at_link_ends)
link_ids = _sort_links_by_node(node_at_link_ends, link_ids=link_ids,
sortby=1)
links_per_node = in_link_count_per_node(node_at_link_ends,
number_of_nodes=number_of_nodes)
return link_ids, links_per_node
def out_link_ids_at_node(node_at_link_ends, link_ids=None, number_of_nodes=None):
"""out_link_ids_at_node((node0, node1), number_of_nodes=None)
Links leaving nodes.
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
tuple :
Tuple of link id array and offset into link id array.
Examples
--------
>>> from landlab.grid.unstructured.links import out_link_ids_at_node
>>> (links, count) = out_link_ids_at_node(
... ([0, 1, 2, 3, 4, 5], [3, 4, 5, 6, 7, 8]), link_ids=range(-1, 5),
... number_of_nodes=9)
>>> links
array([-1, 0, 1, 2, 3, 4])
>>> count
array([1, 1, 1, 1, 1, 1, 0, 0, 0])
>>> (links, count) = out_link_ids_at_node(
... ([0, 1, 2, 3, 4, 5], [3, 4, 5, 6, 7, 8]), number_of_nodes=9)
>>> links
array([0, 1, 2, 3, 4, 5])
>>> count
array([1, 1, 1, 1, 1, 1, 0, 0, 0])
"""
node_at_link_ends = _split_link_ends(node_at_link_ends)
link_ids = _sort_links_by_node(node_at_link_ends, link_ids=link_ids,
sortby=0)
links_per_node = out_link_count_per_node(node_at_link_ends,
number_of_nodes=number_of_nodes)
return link_ids, links_per_node
def link_ids_at_node(node_at_link_ends, number_of_nodes=None):
"""link_ids_at_node((node0, node1), number_of_nodes=None)
Links entering and leaving nodes.
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
tuple :
Tuple of link id array and offset into link id array.
Examples
--------
>>> from landlab.grid.unstructured.links import link_ids_at_node
>>> (links, count) = link_ids_at_node(
... ([0, 1, 2, 3, 4, 5], [3, 4, 5, 6, 7, 8]), number_of_nodes=9)
>>> links
array([0, 1, 2, 0, 3, 1, 4, 2, 5, 3, 4, 5])
>>> count
array([1, 1, 1, 2, 2, 2, 1, 1, 1])
"""
links_per_node = link_count_per_node(node_at_link_ends,
number_of_nodes=number_of_nodes)
in_links = JaggedArray(
*in_link_ids_at_node(node_at_link_ends,
number_of_nodes=number_of_nodes))
out_links = JaggedArray(
*out_link_ids_at_node(node_at_link_ends,
number_of_nodes=number_of_nodes))
links = np.empty(in_links.size + out_links.size, dtype=int)
offset = 0
for node, link_count in enumerate(links_per_node):
links[offset:offset + link_count] = np.concatenate(
(in_links.row(node), out_links.row(node)))
offset += link_count
return links, links_per_node
class LinkGrid(object):
"""Create a grid of links that enter and leave nodes.
__init__((node0, node1), number_of_nodes=None)
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
LinkGrid :
A newly-created grid
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2, 0), (2, 3, 1, 3, 3)], 4)
>>> lgrid.number_of_links
5
>>> lgrid.number_of_nodes
4
>>> lgrid.number_of_in_links_at_node(0)
0
>>> lgrid.number_of_out_links_at_node(0)
3
>>> lgrid.out_link_at_node(0)
array([0, 2, 4])
>>> lgrid.nodes_at_link_id(1)
array([1, 3])
>>> lgrid = LinkGrid([(0, 1, 0, 2, 0), (2, 3, 1, 3, 3)], 4,
... link_ids=range(1, 6))
>>> lgrid.nodes_at_link
array([[0, 2],
[1, 3],
[0, 1],
[2, 3],
[0, 3]])
>>> lgrid.out_link_at_node(0)
array([1, 3, 5])
>>> lgrid.nodes_at_link_id(1)
array([0, 2])
"""
def __init__(self, link_ends, number_of_nodes, link_ids=None,
node_status=None):
"""Create a grid of links that enter and leave nodes.
__init__((node0, node1), number_of_nodes=None)
Parameters
----------
node0, node1 : sequence of array-like
Node ID at link start and end.
number_of_nodes : int, optional
Number of nodes in the grid
Returns
-------
LinkGrid :
A newly-created grid
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2, 0), (2, 3, 1, 3, 3)], 4)
>>> lgrid.number_of_links
5
>>> lgrid.number_of_nodes
4
>>> lgrid.number_of_in_links_at_node(0)
0
>>> lgrid.number_of_out_links_at_node(0)
3
>>> lgrid.out_link_at_node(0)
array([0, 2, 4])
>>> lgrid.nodes_at_link_id(1)
array([1, 3])
>>> lgrid = LinkGrid([(0, 1, 0, 2, 0), (2, 3, 1, 3, 3)], 4,
... link_ids=range(1, 6))
>>> lgrid.nodes_at_link
array([[0, 2],
[1, 3],
[0, 1],
[2, 3],
[0, 3]])
>>> lgrid.out_link_at_node(0)
array([1, 3, 5])
>>> lgrid.nodes_at_link_id(1)
array([0, 2])
"""
link_ends = _split_link_ends(link_ends)
self._in_link_at_node = JaggedArray(
*in_link_ids_at_node(link_ends, link_ids=link_ids,
number_of_nodes=number_of_nodes)
)
self._out_link_at_node = JaggedArray(
*out_link_ids_at_node(link_ends, link_ids=link_ids,
number_of_nodes=number_of_nodes)
)
self._link_ends = np.array(link_ends)
if link_ids is not None:
self._link_id_map = dict(zip(link_ids, range(len(link_ids))))
self._link_ids = link_ids
self._number_of_links = len(link_ends[0])
self._number_of_nodes = number_of_nodes
self._node_status = node_status
@property
def number_of_links(self):
"""Number of links in the grid.
"""
return self._number_of_links
@property
def number_of_nodes(self):
"""Number of nodes in the grid.
"""
return self._number_of_nodes
def number_of_in_links_at_node(self, node):
"""Number of links entering a node.
Parameters
----------
node : int
Node ID
Returns
-------
int :
Number of links entering the node.
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2), (2, 3, 1, 3)], 4)
>>> [lgrid.number_of_in_links_at_node(node) for node in range(4)]
[0, 1, 1, 2]
"""
return self._in_link_at_node.length_of_row(node)
def number_of_out_links_at_node(self, node):
"""Number of links leaving a node.
Parameters
----------
node : int
Node ID
Returns
-------
int :
Number of links leaving the node.
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2), (2, 3, 1, 3)], 4)
>>> [lgrid.number_of_out_links_at_node(node) for node in range(4)]
[2, 1, 1, 0]
"""
return self._out_link_at_node.length_of_row(node)
def number_of_links_at_node(self, node):
"""Number of links entering and leaving a node.
Parameters
----------
node : int
Node ID
Returns
-------
int :
Number of links entering and leaving the node.
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2), (2, 3, 1, 3)], 4)
>>> [lgrid.number_of_links_at_node(node) for node in range(4)]
[2, 2, 2, 2]
"""
return (self.number_of_in_links_at_node(node) +
self.number_of_out_links_at_node(node))
@property
def node_at_link_start(self):
return self._link_ends[0]
@property
def node_at_link_end(self):
return self._link_ends[1]
@property
def nodes_at_link(self):
return self._link_ends.T
@property
def link_id(self):
try:
return self._link_ids
except AttributeError:
return np.arange(self.number_of_links)
def nodes_at_link_id(self, link_id):
try:
return self.nodes_at_link[self._link_id_map[link_id]]
except AttributeError:
return self.nodes_at_link[link_id]
def in_link_at_node(self, node):
"""Links entering a node.
Parameters
----------
node : int
Node ID
Returns
-------
ndarray :
Links entering the node
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2), (2, 3, 1, 3)], 4)
>>> len(lgrid.in_link_at_node(0)) == 0
True
>>> lgrid.in_link_at_node(3)
array([1, 3])
"""
return self._in_link_at_node.row(node)
def out_link_at_node(self, node):
"""Links leaving a node.
Parameters
----------
node : int
Node ID
Returns
-------
ndarray :
Links leaving the node
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2), (2, 3, 1, 3)], 4)
>>> lgrid.out_link_at_node(0)
array([0, 2])
>>> len(lgrid.out_link_at_node(3)) == 0
True
"""
return self._out_link_at_node.row(node)
def iter_nodes(self):
"""Iterate of the nodes of the grid.
Returns
-------
ndarray :
Links entering and leaving each node
Examples
--------
>>> from landlab.grid.unstructured.links import LinkGrid
>>> lgrid = LinkGrid([(0, 1, 0, 2), (2, 3, 1, 3)], 4)
>>> for link in lgrid.iter_nodes(): link
array([0, 2])
array([2, 1])
array([0, 3])
array([1, 3])
"""
for node in range(self.number_of_nodes):
yield np.concatenate((
self.in_link_at_node(node),
self.out_link_at_node(node),
))
@property
def node_status_at_link_start(self):
return self._node_status[self.node_at_link_start]
@property
def node_status_at_link_end(self):
return self._node_status[self.node_at_link_end]
| |
# encoding=utf-8
# Generated by cpy
# 2014-06-17 09:58:41.398125
import os, sys
from sys import stdin, stdout
import socket
class SSDB_Response(object):
pass
def __init__(this, code='', data_or_message=None):
pass
this.code = code
this.data = None
this.message = None
if code=='ok':
pass
this.data = data_or_message
else:
pass
this.message = data_or_message
def __repr__(this):
pass
return ((((str(this.code) + ' ') + str(this.message)) + ' ') + str(this.data))
def ok(this):
pass
return this.code=='ok'
def not_found(this):
pass
return this.code=='not_found'
class SSDB(object):
pass
def __init__(this, host, port):
pass
this.recv_buf = ''
this._closed = False
this.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
this.sock.connect(tuple([host, port]))
this.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def close(this):
pass
if not (this._closed):
pass
this.sock.close()
this._closed = True
def closed(this):
pass
return this._closed
def request(this, cmd, params=None):
pass
if params==None:
pass
params = []
params = ([cmd] + params)
this.send(params)
resp = this.recv()
if resp==None:
pass
return SSDB_Response('error', 'Unknown error')
if len(resp)==0:
pass
return SSDB_Response('disconnected', 'Connection closed')
# {{{ switch: cmd
_continue_1 = False
while True:
if False or ((cmd) == 'set') or ((cmd) == 'zset') or ((cmd) == 'hset') or ((cmd) == 'qpush') or ((cmd) == 'qpush_front') or ((cmd) == 'qpush_back') or ((cmd) == 'del') or ((cmd) == 'zdel') or ((cmd) == 'hdel') or ((cmd) == 'multi_set') or ((cmd) == 'multi_del') or ((cmd) == 'multi_hset') or ((cmd) == 'multi_hdel') or ((cmd) == 'multi_zset') or ((cmd) == 'multi_zdel'):
pass
if len(resp)>1:
pass
return SSDB_Response(resp[0], int(resp[1]))
else:
pass
return SSDB_Response(resp[0], 1)
break
if False or ((cmd) == 'substr') or ((cmd) == 'get') or ((cmd) == 'getset') or ((cmd) == 'hget') or ((cmd) == 'qfront') or ((cmd) == 'qback') or ((cmd) == 'qget') or ((cmd) == 'qpop') or ((cmd) == 'qpop_front') or ((cmd) == 'qpop_back'):
pass
if resp[0]=='ok':
pass
if len(resp)==2:
pass
return SSDB_Response('ok', resp[1])
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0])
break
if False or ((cmd) == 'getbit') or ((cmd) == 'setbit') or ((cmd) == 'countbit') or ((cmd) == 'strlen') or ((cmd) == 'ttl') or ((cmd) == 'expire') or ((cmd) == 'setnx') or ((cmd) == 'incr') or ((cmd) == 'decr') or ((cmd) == 'zincr') or ((cmd) == 'zdecr') or ((cmd) == 'hincr') or ((cmd) == 'hdecr') or ((cmd) == 'hsize') or ((cmd) == 'zsize') or ((cmd) == 'qsize') or ((cmd) == 'zget') or ((cmd) == 'zrank') or ((cmd) == 'zrrank') or ((cmd) == 'zsum') or ((cmd) == 'zcount') or ((cmd) == 'zavg') or ((cmd) == 'zremrangebyrank') or ((cmd) == 'zremrangebyscore') or ((cmd) == 'hclear') or ((cmd) == 'zclear') or ((cmd) == 'qclear') or ((cmd) == 'qpush') or ((cmd) == 'qpush_front') or ((cmd) == 'qpush_back'):
pass
if resp[0]=='ok':
pass
if len(resp)==2:
pass
try:
pass
if cmd=='zavg':
pass
val = float(resp[1])
else:
pass
val = int(resp[1])
return SSDB_Response('ok', val)
except Exception , e:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0])
break
if False or ((cmd) == 'keys') or ((cmd) == 'zkeys') or ((cmd) == 'hkeys') or ((cmd) == 'list') or ((cmd) == 'hlist') or ((cmd) == 'zlist'):
pass
data = []
if resp[0]=='ok':
pass
i = 1
while i<len(resp):
pass
data.append(resp[i])
pass
i += 1
return SSDB_Response(resp[0], data)
break
if False or ((cmd) == 'scan') or ((cmd) == 'rscan') or ((cmd) == 'hgetall') or ((cmd) == 'hscan') or ((cmd) == 'hrscan'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {'index': [],'items': {},}
i = 1
while i<len(resp):
pass
k = resp[i]
v = resp[(i + 1)]
data['index'].append(k)
data['items'][k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0])
break
if False or ((cmd) == 'zscan') or ((cmd) == 'zrscan') or ((cmd) == 'zrange') or ((cmd) == 'zrrange'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {'index': [],'items': {},}
i = 1
while i<len(resp):
pass
k = resp[i]
v = resp[(i + 1)]
try:
pass
v = int(v)
except Exception , e:
pass
v = - (1)
data['index'].append(k)
data['items'][k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0])
break
if False or ((cmd) == 'exists') or ((cmd) == 'hexists') or ((cmd) == 'zexists'):
pass
data = False
if resp[0]=='ok':
pass
if len(resp)>=2:
pass
if resp[1]=='1':
pass
data = True
return SSDB_Response(resp[0], data)
break
if False or ((cmd) == 'multi_exists') or ((cmd) == 'multi_hexists') or ((cmd) == 'multi_zexists'):
pass
data = {}
if len(resp) % 2==1:
pass
i = 1
while i<len(resp):
pass
k = resp[i]
if resp[(i + 1)]=='1':
pass
v = True
else:
pass
v = False
data[k] = v
pass
i += 2
return SSDB_Response('ok', data)
break
if False or ((cmd) == 'multi_get') or ((cmd) == 'multi_hget'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {}
i = 1
while i<len(resp):
pass
k = resp[i]
v = resp[(i + 1)]
data[k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0])
break
if False or ((cmd) == 'multi_hsize') or ((cmd) == 'multi_zsize') or ((cmd) == 'multi_zget'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {}
i = 1
while i<len(resp):
pass
k = resp[i]
v = int(resp[(i + 1)])
data[k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0])
break
### default
if len(resp)>1:
pass
data = []
i = 1
while i<len(resp):
pass
data.append(resp[i])
pass
i += 1
else:
pass
data = ''
return SSDB_Response(resp[0], data)
break
break
if _continue_1:
continue
# }}} switch
return SSDB_Response('error', 'Unknown error')
def send(this, data):
pass
ps = []
_cpy_r_0 = _cpy_l_1 = data
if type(_cpy_r_0).__name__ == 'dict': _cpy_b_3=True; _cpy_l_1=_cpy_r_0.iterkeys()
else: _cpy_b_3=False;
for _cpy_k_2 in _cpy_l_1:
if _cpy_b_3: p=_cpy_r_0[_cpy_k_2]
else: p=_cpy_k_2
pass
p = str(p)
ps.append(str(len(p)))
ps.append(p)
nl = '\n'
s = (nl.join(ps) + '\n\n')
try:
pass
while True:
pass
ret = this.sock.send(s)
if ret==0:
pass
return - (1)
s = s[ret : ]
if len(s)==0:
pass
break
except socket.error , e:
pass
return - (1)
return ret
def net_read(this):
pass
try:
pass
data = this.sock.recv(1024 * 8)
except Exception , e:
pass
data = ''
if data=='':
pass
this.close()
return 0
this.recv_buf += data
return len(data)
def recv(this):
pass
while True:
pass
ret = this.parse()
if ret==None:
pass
if this.net_read()==0:
pass
return []
else:
pass
return ret
def parse(this):
pass
ret = []
spos = 0
epos = 0
while True:
pass
spos = epos
epos = this.recv_buf.find('\n', spos)
if epos==- (1):
pass
break
epos += 1
line = this.recv_buf[spos : epos]
spos = epos
if line.strip()=='':
pass
if len(ret)==0:
pass
continue
else:
pass
this.recv_buf = this.recv_buf[spos : ]
return ret
try:
pass
num = int(line)
except Exception , e:
pass
return []
epos = (spos + num)
if epos>len(this.recv_buf):
pass
break
data = this.recv_buf[spos : epos]
ret.append(data)
spos = epos
epos = this.recv_buf.find('\n', spos)
if epos==- (1):
pass
break
epos += 1
return None
| |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from auto_gen_objects import Object, Property, Choice
XML_TYPE = 'xml'
class XMLObject(Object):
@staticmethod
def convert(obj):
if XML_TYPE in obj.layouts:
obj.__class__ = XMLObject
for prop in obj.properties:
prop.__class__ = XMLProperty
for choice in obj.choices:
choice.__class__ = XMLChoice
for prop in choice.properties:
prop.__class__ = XMLProperty
def getName(self):
try:
return self.layouts[XML_TYPE]['name']
except KeyError:
pass
return Object.getName(self)
def getNodeType(self):
try:
return self.layouts[XML_TYPE]['nodeType']
except KeyError:
pass
return 'xs:element'
def getXMLAttributes(self):
return [p for p in self.properties if p.hasSpec() and \
not p.isInferred() and p.getNodeType() == 'xs:attribute']
def getXMLElements(self):
return [p for p in self.properties if p.hasSpec() and \
not p.isInferred() and p.getNodeType() == 'xs:element']
def getXMLChoices(self):
return [c for c in self.choices if not c.isInverse() and \
any([p.hasSpec() for p in c.properties])]
def isXMLChoice(self):
return False
def getXMLInferredProperties(self):
return [p for p in self.properties if p.isInferred()]
def getConstructorPairs(self):
return [(f.getRegularName(), f.getRegularName())
for f in self.getPythonFields() if f.hasSpec()]
class XMLProperty(Property):
def hasSpec(self):
return self.specs.has_key(XML_TYPE)
def getName(self):
try:
return self.specs[XML_TYPE]['name']
except KeyError:
pass
return Property.getName(self)
def getNodeType(self):
try:
return self.specs[XML_TYPE]['nodeType']
except KeyError:
pass
return 'xs:attribute'
def getAttributeType(self):
try:
return self.specs[XML_TYPE]['type']
except KeyError:
pass
return 'xs:string'
def getAttributeUse(self):
try:
return self.specs[XML_TYPE]['use']
except KeyError:
pass
return None
def getAttributeUseText(self):
if self.getAttributeUse() is not None:
return ' use=%s' & self.getAttributeUse()
return ''
def getChoice(self):
try:
return self.specs[XML_TYPE]['choice']
except KeyError:
pass
return None
def isInferred(self):
try:
return self.specs[XML_TYPE]['inferred'] == 'true'
except KeyError:
pass
return False
def getXMLPropertyName(self):
if self.isReference():
refObj = self.getReferencedObject()
return refObj.getName()
return self.getName()
def getMinOccurs(self):
return '0'
def getMaxOccurs(self):
if self.isReference() and self.getMapping() != 'one-to-many':
return '1'
return 'unbounded'
class XMLChoice(Choice):
def hasSpec(self):
return self.properties[0].hasSpec()
def getXMLProperties(self):
return [p for p in self.properties if p.hasSpec()]
def getXMLAttributes(self):
return [p for p in self.properties if p.hasSpec() and \
not p.isInferred() and p.getNodeType() == 'xs:attribute']
def getXMLElements(self):
return [p for p in self.properties if p.hasSpec() and \
not p.isInferred() and p.getNodeType() == 'xs:element']
def getXMLInferredProperties(self):
return [p for p in self.properties if p.isInferred()]
def isXMLChoice(self):
return True
def convert(objects):
xml_objects = []
for obj in objects:
if XML_TYPE in obj.layouts:
XMLObject.convert(obj)
xml_objects.append(obj)
return xml_objects
def convert_schema_order(objects, root_type):
ref_objects = []
cur_objects = []
for obj in objects:
if obj.getName() == root_type:
ref_objects = [obj]
cur_objects = [obj]
break
if len(ref_objects) < 1:
raise ValueError("Cannot find root %s" % root_type)
while len(cur_objects) > 0:
next_objects = []
for obj in cur_objects:
for prop in obj.getXMLElements():
if prop.isReference() and \
prop.getReferencedObject() not in ref_objects and \
prop.getReferencedObject() not in next_objects:
next_objects.append(prop.getReferencedObject())
for choice in obj.getXMLChoices():
for prop in choice.getXMLElements():
if prop.isReference() and \
prop.getReferencedObject() not in ref_objects and \
prop.getReferencedObject() not in next_objects:
next_objects.append(prop.getReferencedObject())
ref_objects.extend(next_objects)
cur_objects = next_objects
return ref_objects
| |
import cuisine
from fabric.api import task, roles, run, sudo, cd, local
from fabric.contrib.files import uncomment
from utilities import notify
from conf import MACHINE, KEY
import templates
@task
@roles('web')
def new():
# just for convenience.
#local('deactivate')
local('gcutil addinstance ' + KEY + ' '
'--project=open-municipalities '
'--persistent_boot_disk '
'--zone=europe-west1-b '
'--external_ip_address=192.158.30.219 ' + MACHINE['LOCATION'] + ' '
'--machine_type=g1-small '
'--ssh_user=' + KEY + ' '
'--image=projects/debian-cloud/global/images/debian-7-wheezy-v20130617')
@task
@roles('web')
def delete():
local('gcutil deleteinstance ' + KEY + ' '
'--project=open-municipalities')
# this was also done
# gcutil addfirewall http-web --allowed=tcp:80 --project=open-municipalities
# gcutil addfirewall https-web --allowed=:443 --project=open-municipalities
# gcutil deleteinstance omuni --project=open-municipalities
@task
@roles('web')
def bootstrap():
notify('Configuring the server.')
tz_conf()
locale_conf()
apt_update()
apt_upgrade()
hosts_conf()
dir_conf()
package_conf()
nonmanaged_package_conf()
python_package_conf()
node_module_conf()
profile_conf()
#firewall_conf()
link_conf()
reboot()
def apt_update():
sudo('apt-get update')
def apt_upgrade():
sudo('apt-get upgrade')
def tz_conf():
notify('Configuring timezone defaults.')
sudo('echo "Etc/UTC" > /etc/timezone')
sudo('dpkg-reconfigure -f noninteractive tzdata')
def locale_conf():
notify('Configuring locale defaults.')
sudo('locale-gen --purge en_US.UTF-8')
sudo('echo -e "LANG=\'en_US.UTF-8\'\nLANGUAGE=\'en_US:en\'\n" > /etc/default/locale')
sudo('dpkg-reconfigure -f noninteractive locales')
def hosts_conf():
notify('Writing hostname and hosts files.')
cuisine.mode_sudo()
run('echo "{NAME}" > /etc/hostname'.format(NAME=MACHINE['KEY']))
run('hostname -F /etc/hostname')
hosts = cuisine.text_template(templates.hosts, MACHINE)
cuisine.file_append('/etc/hosts', hosts)
# Want to do an ensure here, the current method is not good for repeated
# runs.
#print 'GOING TO GO IN LINES'
#for l in hosts.splitlines():
# print 'LINE:'
# print l
# text_ensure_line(f, l)
#file_write('/etc/hosts', f)
def dir_conf():
notify('Creating the working directory structure.')
cuisine.mode_sudo()
cuisine.dir_ensure(MACHINE['DIR_WORKSPACE'])
cuisine.dir_ensure(MACHINE['DIR_ENVIRONMENTS'], recursive=True, mode=MACHINE['DIR_MODE'],
owner=KEY, group=MACHINE['OWNER_GROUP'])
cuisine.dir_ensure(MACHINE['DIR_PROJECTS'], recursive=True, mode=MACHINE['DIR_MODE'],
owner=KEY, group=MACHINE['OWNER_GROUP'])
cuisine.dir_ensure(MACHINE['DIR_SSL'], recursive=True, mode=MACHINE['DIR_MODE'],
owner=KEY, group=MACHINE['OWNER_GROUP'])
cuisine.dir_ensure(MACHINE['DIR_LOGS'], recursive=True, mode=MACHINE['DIR_MODE'],
owner=KEY, group=MACHINE['OWNER_GROUP'])
def package_conf(databases=MACHINE['DATABASES']):
notify('Installing all required system packages.')
#package_ensure('ufw')
cuisine.package_ensure('supervisor')
cuisine.package_ensure('python-dev')
cuisine.package_ensure('python-setuptools')
cuisine.package_ensure('python-software-properties')
cuisine.package_ensure('g++')
cuisine.package_ensure('make')
cuisine.package_ensure('build-essential')
cuisine.package_ensure('checkinstall')
cuisine.package_ensure('libxml2-dev')
cuisine.package_ensure('libjpeg8-dev')
cuisine.package_ensure('libpng-dev')
cuisine.package_ensure('zlib1g-dev')
cuisine.package_ensure('libfreetype6-dev')
cuisine.package_ensure('liblcms1-dev')
cuisine.package_ensure('python')
cuisine.package_ensure('python-pip')
cuisine.package_ensure('nginx')
cuisine.package_ensure('git-core')
cuisine.package_ensure('mercurial')
if databases:
if 'postgres' in databases:
cuisine.package_ensure('postgresql')
cuisine.package_ensure('postgresql-contrib')
cuisine.package_ensure('postgresql-server-dev-all')
# Not working, do manually
#postgres_conf()
if 'redis' in databases:
cuisine.package_ensure('redis-server')
def nonmanaged_package_conf():
notify('Installing additional non-managed packages.')
# NODE.JS
#
#
# IMPORTANT: REQUIRES YOU TO RESET THE VERSION NUMBER WITHOUT A "v"
# during install.
#
#
notify('Installing node.js')
cuisine.mode_sudo()
cuisine.dir_ensure(MACHINE['DIR_USER_HOME'])
with cd(MACHINE['DIR_USER_HOME']):
sudo('wget -N http://nodejs.org/dist/node-latest.tar.gz')
sudo('tar xzvf node-latest.tar.gz')
with cd('node-v*'):
sudo('./configure')
sudo('checkinstall')
sudo('sudo dpkg -i node_*')
def python_package_conf():
notify('Installing required system python packages.')
cuisine.mode_sudo()
cuisine.python_package_ensure('virtualenv')
cuisine.python_package_ensure('virtualenvwrapper')
def node_module_conf():
notify('Installing required system node modules.')
cuisine.mode_sudo()
sudo('npm install -g volo')
def postgres_conf():
uncomment('/etc/postgresql/9.1/main/postgresql.conf', 'listen_addresses',
use_sudo=True, char='#', backup='.bak')
sudo('passwd postgres')
with sudo('su - postgres'):
run('psql')
run('CREATE EXTENSION adminpack;')
#
# get out of postgresq shell here
#
run('createuser ' + KEY)
run('exit')
run('createdb ' + KEY)
@task
@roles('web')
def profile_conf():
notify('Configuring .profile settings.')
profile = cuisine.text_template(templates.profile, MACHINE)
cuisine.file_append(MACHINE['OWNER_PROFILE'], profile)
run('source ' + MACHINE['OWNER_PROFILE'])
def firewall_conf():
sudo('ufw default deny')
sudo('ufw allow 80')
sudo('ufw allow 443')
sudo('ufw allow 587')
sudo('ufw enable')
def link_conf():
notify('Configuring necessary symlinks for our libraries.')
cuisine.mode_sudo()
cuisine.file_link('/usr/lib/x86_64-linux-gnu/libjpeg.so', '/usr/lib/libjpeg.so', symbolic=True)
cuisine.file_link('/usr/lib/x86_64-linux-gnu/libpng.so', '/usr/lib/libpng.so', symbolic=True)
cuisine.file_link('/usr/lib/x86_64-linux-gnu/libz.so', '/usr/lib/libz.so', symbolic=True)
cuisine.file_link('/usr/lib/x86_64-linux-gnu/libfreetype.so', '/usr/lib/libfreetype.so', symbolic=True)
cuisine.file_link('/usr/lib/x86_64-linux-gnu/liblcms.so', '/usr/lib/liblcms.so', symbolic=True)
def reboot():
sudo('reboot')
| |
"""Common functionality for multiprocess system basees built with the
python 'multiprocess' module. Intended as a base class, not for
direct usage."""
import logging
from thespian.actors import *
from thespian.system.systemBase import systemBase
from thespian.system.utilis import thesplog, checkActorCapabilities, partition
from thespian.system.transport import *
from thespian.system.logdirector import *
from thespian.system.utilis import setProcName, StatsManager
from thespian.system.addressManager import ActorLocalAddress, CannotPickleAddress
from thespian.system.messages.multiproc import *
from thespian.system.sourceLoader import loadModuleFromHashSource
from functools import partial
import multiprocessing
import signal
from datetime import timedelta
MAX_ADMIN_STARTUP_DELAY = timedelta(seconds=5)
uncatchable_signals = []
for sname in ['SIGCONT', 'SIGPIPE', 'SIGKILL', 'SIGSTOP']:
try:
uncatchable_signals.append(eval('signal.%s'%sname))
except AttributeError:
pass # not defined for this OS
exit_signals = []
for sname in ['SIGTERM', 'SIGKILL', 'SIGQUIT', 'SIGABRT']:
try:
exit_signals.append(eval('signal.%s'%sname))
except AttributeError:
pass # not defined for this OS
child_exit_signals = []
for sname in ['SIGCHLD']:
try:
child_exit_signals.append(eval('signal.%s'%sname))
except AttributeError:
pass # not defined for this OS
set_signal_handler = signal.signal
#set_signal_handler = lambda *args: None
def detach_child(childref):
if hasattr(multiprocessing.process, '_children'):
# Python 3.4
multiprocessing.process._children.remove(childref)
if hasattr(multiprocessing.process, '_current_process'):
if hasattr(multiprocessing.process._current_process, '_children'):
# Python 2.6
multiprocessing.process._current_process._children.remove(childref)
def get_multiproc_context(capabilities):
best_concurrency = capabilities.get('Process Startup Method', 'fork')
if hasattr(multiprocessing, 'get_context'):
for each in (best_concurrency, 'fork', 'spawn'):
if hasattr(multiprocessing, 'get_all_start_methods'):
if each in multiprocessing.get_all_start_methods():
return multiprocessing.get_context(each)
else:
try:
return multiprocessing.get_context(each)
except ValueError:
pass # invalid concurrency for this system
return None
class multiprocessCommon(systemBase):
def __init__(self, system, logDefs = None):
import sys, time
system.capabilities['Python Version'] = tuple(sys.version_info)
system.capabilities['Thespian Generation'] = ThespianGeneration
system.capabilities['Thespian Version'] = str(int(time.time()*1000))
self.mpcontext = get_multiproc_context(system.capabilities)
self.transport = self.transportType(ExternalInterfaceTransportInit(),
system.capabilities, logDefs,
self.mpcontext)
super(multiprocessCommon, self).__init__(system, logDefs)
def _startAdmin(self, adminAddr, addrOfStarter, capabilities, logDefs):
mp = self.mpcontext if self.mpcontext else multiprocessing
endpointPrep = self.transport.prepEndpoint(adminAddr, capabilities)
multiprocessing.process._current_process._daemonic = False
admin = mp.Process(target=startAdmin,
args=(MultiProcAdmin,
addrOfStarter,
endpointPrep,
self.transport.__class__,
adminAddr,
capabilities,
logDefs,
self.mpcontext),
name='ThespianAdmin')
admin.start()
# admin must be explicitly shutdown and is not automatically
# stopped when this current process exits.
detach_child(admin)
self.transport.connectEndpoint(endpointPrep)
response = self.transport.run(None, MAX_ADMIN_STARTUP_DELAY)
if not isinstance(response, ReceiveEnvelope) or \
not isinstance(response.message, EndpointConnected):
raise InvalidActorAddress(adminAddr,
'not a valid ActorSystem admin')
def closeUnusedFiles(transport):
import os, sys
notouch = transport.protectedFileNumList()
for each in [sys.stdin, sys.stderr, sys.stdout]:
try:
notouch.append(each.fileno())
except AttributeError: pass
for fdnum in range(3, 255):
if fdnum not in notouch:
try:
os.close(fdnum)
except OSError: pass
def closeFileNums(list):
import os
for fdnum in list:
try:
os.close(fdnum)
except OSError: pass
from thespian.system.systemAdmin import ThespianAdmin
def signal_admin_sts(admin):
def signal_detected(signum, frame):
admin.thesplogStatus()
return signal_detected
def startAdmin(adminClass, addrOfStarter, endpointPrep, transportClass,
adminAddr, capabilities, logDefs, concurrency_context):
# Unix Daemonization; skipped if not available
import os,sys
if hasattr(os, 'setsid'):
os.setsid()
try:
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0,0)) # No core dumps
except Exception: pass
if hasattr(os, 'fork'):
if os.fork(): sys.exit(0)
# Slight trickiness here. There may *already* be an admin bound
# to this start address. However, the external process attempting
# to start is going to wait for the EndpointConnected message
# before continuing, so ensure that message is *not* sent until
# the local admin Transport has had time to bind and listen to the
# local address, but also ensure that the message is *always* sent
# even if the local admin could not start (caller will use
# _verifyAdminRunning to ensure things are OK.
transport = transportClass(endpointPrep)
try:
admin = adminClass(transport, adminAddr, capabilities, logDefs,
concurrency_context)
except Exception:
transport.scheduleTransmit(None,
TransmitIntent(addrOfStarter, EndpointConnected(0)))
raise
# Send of EndpointConnected is deferred until the logger is setup. See MultiProcReplicator.h_LoggerConnected below.
admin.addrOfStarter = addrOfStarter
setProcName(adminClass.__name__.rpartition('.')[-1],
admin.transport.myAddress)
# Admin does not do normal signal handling, but does want to know if children exit
for each in range(1, signal.NSIG):
# n.b. normally Python intercepts SIGINT to turn it into a
# KeyboardInterrupt exception. However, these Actors should
# be detached from the keyboard, so revert to normal SIGINT
# behavior.
if each not in uncatchable_signals:
if each in child_exit_signals:
set_signal_handler(each, admin.signalChildDied)
if hasattr(signal, 'SIGUSR1'):
set_signal_handler(signal.SIGUSR1, signal_admin_sts(admin))
_startLogger(transportClass, transport, admin, capabilities, logDefs,
concurrency_context)
#closeUnusedFiles(transport)
# Admin should never enter TX-only flow control state because this
# could deadlock or other non-progress conditions, especially if
# using admin routing.
transport.enableRXPauseFlowControl(False)
admin.run()
def _startLogger(transportClass, transport, admin, capabilities, logDefs,
concurrency_context):
# Generate the "placeholder" loggerAddr directly instead of going
# through the AddressManager because the logger is not managed as
# a normal child.
loggerAddr = ActorAddress(ActorLocalAddress(transport.myAddress, -1, None))
admin.asLogger = None
logAggregator = capabilities.get('Convention Address.IPv4', None)
if logAggregator:
try:
logAggregator = transportClass.getAddressFromString(logAggregator)
except Exception as ex:
thesplog('Unable to adapt log aggregator address "%s" to a transport address: %s',
logAggregator, ex, level=logging.WARNING)
admin.asLogProc = startASLogger(loggerAddr,
logDefs,
transport,
capabilities,
logAggregator
if logAggregator != admin.transport.myAddress
else None,
concurrency_context)
class ChildInfo(object):
def __init__(self, childAddr, childClass, childProc, childNum):
self.childAddr = childAddr
self.childClass = childClass
self.childProc = childProc
self.childNum = childNum
def __str__(self):
return "Child #%s: %s @ %s (proc %s)"%(str(self.childNum),
str(self.childClass),
str(getattr(self, 'childRealAddr', self.childAddr)),
str(self.childProc))
def startASLogger(loggerAddr, logDefs, transport, capabilities,
aggregatorAddress=None,
concurrency_context = None):
endpointPrep = transport.prepEndpoint(loggerAddr, capabilities)
multiprocessing.process._current_process._daemonic = False
NewProc = concurrency_context.Process if concurrency_context else multiprocessing.Process
logProc = NewProc(target=startupASLogger,
args = (transport.myAddress, endpointPrep,
logDefs,
transport.__class__, aggregatorAddress))
logProc.daemon = True
logProc.start()
transport.connectEndpoint(endpointPrep)
# When the caller that owns the transport starts their run(), it
# will receive the LoggerConnected from the child to complete the
# handshake and the sender will be the actual address of the
# logger.
return ChildInfo(loggerAddr, 'logger', logProc, endpointPrep.addrInst)
class MultiProcReplicator(object):
def init_replicator(self, transport, concurrency_context):
self.mpcontext = concurrency_context
def _startChildActor(self, childAddr, childClass, parentAddr, notifyAddr,
childRequirements=None,
sourceHash=None, sourceToLoad=None):
"""Create a new actor of type `childClass'.
The `childAddr' is the local address of this child in the
creator's address-space.
The `parentAddr' is the parent of this actor in the
heirarchy and will be another Actor or the local Admin.
The `notifyAddr' is the Actor or Admin which should be
notified on successful creation of this child Actor
(normally this will be the parentAddr, but if the local
Admin has been enlisted to create this Actor on behalf of
another (possibly remote) Actor, the local Admin should be
notified of the successful creation to complete it's
administration and the Admin will forward the completion to
the original requestor.).
The optional `childRequirements' are a list of requirements
dictated by the creating Actor.
"""
if parentAddr is None:
raise ActorSystemFailure('parentAddr cannot be None!')
if self.asLogger is None:
raise ActorSystemFailure('logger ADDR cannot be None!')
try:
if not checkActorCapabilities(childClass, self.capabilities, childRequirements,
partial(loadModuleFromHashSource,
sourceHash,
{ sourceHash: sourceToLoad })
if sourceHash # and sourceToLoad
else None):
raise NoCompatibleSystemForActor(childClass,
"no system has compatible capabilities")
except (InvalidActorSourceHash, ImportError):
# Allow these exceptions to propagate outward since they
# have special, public meaning
raise
except Exception:
# Most exceptions should be converted to
# NoCompatibleSystemForActor so that calling code
# recognizes this issue and defers the create request to
# the Admin.
raise NoCompatibleSystemForActor(childClass,
"no system has compatible capabilities")
# KWQ: when child starts it will have this parent address and it will initialize its transport and notify the parent, whereupon the parent will see the incoming message from the child with the id# indicated in the addressmanager localaddress and update the localaddress. All this should happen in the transport though, not here.
endpointPrep = self.transport.prepEndpoint(childAddr, self.capabilities)
multiprocessing.process._current_process._daemonic = False
# Ensure fileNumsToClose is a list, not an iterator because it
# is an argument passed to the child.
fileNumsToClose = list(self.transport.childResetFileNumList())
mp = self.mpcontext if self.mpcontext else multiprocessing
child = mp.Process(target=startChild, #KWQ: instantiates module specified by sourceHash to create actor
args=(childClass,
endpointPrep,
self.transport.__class__,
sourceHash or self._sourceHash,
sourceToLoad,
parentAddr,
self._adminAddr,
notifyAddr,
self.asLogger,
childRequirements,
self.capabilities,
fileNumsToClose,
self.mpcontext),
name='Actor_%s__%s'%(getattr(childClass, '__name__', childClass), str(childAddr)))
child.start()
# Also note that while non-daemonic children cause the current
# process to automatically join() those children on exit,
# daemonic children are sent a terminate() operation (usually
# indicated by a SIGTERM under unix or TERMINATE indicator
# under windows. To avoid this, use another dirty trick and
# remove all children from the _current_process._children list
# so that they are not automatically stopped when this process
# stops.
detach_child(child)
if not hasattr(self, '_child_procs'): self._child_procs = []
self._child_procs.append(ChildInfo(childAddr, childClass, child, endpointPrep.addrInst))
self.transport.connectEndpoint(endpointPrep)
@staticmethod
def _checkChildLiveness(childInfo):
if not childInfo.childProc.is_alive():
# Don't join forever; that might hang and it's ok to leave
# zombies as long as we continue to make progress.
childInfo.childProc.join(0.5)
return False
return True
def _childExited(self, childAddr):
children = getattr(self, '_child_procs', [])
self._child_procs = list(filter(self._checkChildLiveness, children))
# The following is obsolete with active signal handling which
# will re-examine child liveness on SIGCHLD.
#
# if len(children) == len(self._child_procs):
# # Sometimes the child doesn't indicate as not alive immediately.
# import time
# time.sleep(0.1)
# self._child_procs = list(filter(self._checkChildLiveness, children))
def signalChildDied(self, _signum, _frame):
self.transport.interrupt_wait(check_children=True)
def childDied(self):
logproc = getattr(self, 'asLogProc', None)
if logproc and not self._checkChildLiveness(logproc):
# Logger has died; need to start another
if not hasattr(self, '_exiting'):
_startLogger(self.transport.__class__, self.transport, self, self.capabilities, self.logdefs,
self.mpcontext)
# Signal handler for SIGCHLD; figure out which child and synthesize a ChildActorExited to handle it
self._child_procs, dead = partition(self._checkChildLiveness, getattr(self, '_child_procs', []))
for each in dead:
addr = getattr(each, 'childRealAddr', each.childAddr)
try:
self.transport.scheduleTransmit(None, TransmitIntent(self.transport.myAddress,
ChildActorExited(addr)))
except CannotPickleAddress:
thesplog('child %s is dead but cannot translate address to properly handle it',
addr, level=logging.ERROR)
return True # keep going
def h_EndpointConnected(self, envelope):
for C in getattr(self, '_child_procs', []):
if envelope.message.childInstance == C.childNum:
C.childRealAddr = envelope.sender
break
else:
thesplog('Unknown child process endpoint connected: %s', envelope, level=logging.WARNING)
self._pendingActorReady(envelope.message.childInstance, envelope.sender)
return True
def h_LoggerConnected(self, envelope):
self.asLogger = envelope.sender
# Dirty trick here to completely re-initialize logging in this
# process... something the standard Python logging interface does
# not allow via the API.
self.oldLoggerRoot = logging.root
logging.root = ThespianLogForwarder(self.asLogger, self.transport)
logging.Logger.root = logging.root
logging.Logger.manager = logging.Manager(logging.Logger.root)
logging.getLogger('Thespian.Admin') \
.info('ActorSystem Administrator startup @ %s', self.myAddress)
# Now that logging is started, Admin startup can be confirmed
self.transport.scheduleTransmit(None,
TransmitIntent(self.addrOfStarter, EndpointConnected(0)))
self._activate()
return True
def h_LogRecord(self, envelope):
self._send_intent(TransmitIntent(self.asLogger, envelope.message))
return True
def _handleReplicatorMessages(self, envelope):
# This method handles any messages related to multi-process
# management operations. This also ensures that any messages
# received before initialization is completed are held to be
# handled after initialization finishes.
if isinstance(envelope.message, EndpointConnected):
return True, self.h_EndpointConnected(envelope)
if self.asLogger is None and not isinstance(envelope.message, LoggerConnected):
self._pre_init_msgs.append(envelope)
return True, True
if isinstance(envelope.message, logging.LogRecord):
return True, self.h_LogRecord(envelope)
if isinstance(envelope.message, ChildMayHaveDied):
return True, self.childDied()
return False, True
def _reset_logging(self):
if hasattr(self, 'oldLoggerRoot'):
logging.root = self.oldLoggerRoot
logging.Logger.root = self.oldLoggerRoot
logging.Logger.manager = logging.Manager(logging.Logger.root)
delattr(self, 'oldLoggerRoot')
def _cleanupAdmin(self, finish_cleanup):
self.post_cleanup = finish_cleanup
if getattr(self, 'asLogger', None):
self._reset_logging()
#self.transport.run(TransmitOnly, maximumDuration=timedelta(milliseconds=250))
#import time
#time.sleep(0.05) # allow children to exit and log their exit
self.transport.scheduleTransmit(
None,
TransmitIntent(self.asLogger,
LoggerExitRequest(),
onSuccess=self._cleanupAdminFinish,
onError=self._cleanupAdminFinish))
return
self._cleanupAdminFinish(None, None)
def _cleanupAdminFinish(self, _intent, _status):
#self.transport.run(TransmitOnly)
if getattr(self, 'asLogProc', None):
if self._checkChildLiveness(self.asLogProc):
import time
time.sleep(0.02) # wait a little to allow logger to exit
self._checkChildLiveness(self.asLogProc) # cleanup defunct proc
self.post_cleanup()
from thespian.system.actorManager import ActorManager
def signal_detector(name, addr, am):
def signal_detected(signum, frame):
if signum == getattr(signal, 'SIGUSR1', 'no-sigusr1-avail'):
am.thesplogStatus()
else:
thesplog('Actor %s @ %s got signal: %s', name, addr, signum,
level = logging.WARNING)
# Simply exit; just by catching the signal the atexit handlers are enabled
# if this signal is going to cause a process exit.
return signal_detected
def shutdown_signal_detector(name, addr, am):
def shutdown_signal_detected(signum, frame):
thesplog('Actor %s @ %s got shutdown signal: %s', name, addr, signum,
level = logging.WARNING)
am.transport.interrupt_wait(signal_shutdown=True)
return shutdown_signal_detected
def startChild(childClass, endpoint, transportClass,
sourceHash, sourceToLoad,
parentAddr, adminAddr, notifyAddr, loggerAddr,
childRequirements, currentSystemCapabilities,
fileNumsToClose, concurrency_context):
closeFileNums(fileNumsToClose)
# Dirty trick here to workaround multiprocessing trying to impose
# an unnecessary restriction. A process should be set daemonic
# before start() if the parent shouldn't track it (an specifically
# automatically join() the subprocess on exit). For Actors, the
# parent exists independently of the child and the ActorSystem
# manages them, so daemonic processes are desired. However,
# multiprocessing imposes a restriction that daemonic processes
# cannot create more processes. The following reaches deep into
# the implementation of the multiprocessing module to override
# that restriction. This process was already started as daemonic,
# and it's detached from its parent. The following simply clears
# that flag locally so that other processes can be created from
# this one.
multiprocessing.process._current_process._daemonic = False
transport = transportClass(endpoint)
#closeUnusedFiles(transport)
# Dirty trick here to completely re-initialize logging in this
# process... something the standard Python logging interface does
# not allow via the API. We also do not want to run
# logging.shutdown() because (a) that does not do enough to reset,
# and (b) it shuts down handlers, but we want to leave the parent's
# handlers alone.
logging.root = ThespianLogForwarder(loggerAddr, transport)
logging.Logger.root = logging.root
logging.Logger.manager = logging.Manager(logging.Logger.root)
logger = logging.getLogger('Thespian.ActorManager')
am = MultiProcManager(childClass, transport,
sourceHash, sourceToLoad,
parentAddr, adminAddr,
childRequirements, currentSystemCapabilities,
concurrency_context)
am.asLogger = loggerAddr
am.transport.scheduleTransmit(None,
TransmitIntent(notifyAddr,
EndpointConnected(endpoint.addrInst)))
setProcName(getattr(childClass, '__name__',
str(childClass)).rpartition('.')[-1],
am.transport.myAddress)
sighandler = signal_detector(getattr(childClass, '__name__', str(childClass)),
am.transport.myAddress, am)
sigexithandler = shutdown_signal_detector(getattr(childClass, '__name__', str(childClass)),
am.transport.myAddress,
am)
for each in range(1, signal.NSIG):
# n.b. normally Python intercepts SIGINT to turn it into a
# KeyboardInterrupt exception. However, these Actors should
# be detached from the keyboard, so revert to normal SIGINT
# behavior.
if each not in uncatchable_signals:
if each in child_exit_signals:
set_signal_handler(each, am.signalChildDied)
continue
try:
set_signal_handler(each,
sigexithandler if each in exit_signals
else sighandler)
except (RuntimeError,ValueError,EnvironmentError) as ex:
# OK, this signal can't be caught for this
# environment. We did our best.
pass
am.run()
class MultiProcAdmin(MultiProcReplicator, ThespianAdmin): pass
class MultiProcManager(MultiProcReplicator, ActorManager): pass
| |
import re
import logging
from functools import partial
from py4jdbc.utils import CachedAttr
from py4jdbc.resultset import ResultSet, ColumnResultSet
from py4jdbc.gateway_process import GatewayProcess
from py4jdbc.exceptions import dbapi2 as dbapi2_exc
from py4jdbc.exceptions.py4j import reraise_jvm_exception
paramstyle = 'qmark'
# DB-API 2.0 Module Interface connect constructor
def connect(jdbc_url, user=None, password=None, gateway=None, autocommit=None):
return Connection(jdbc_url, user, password, gateway, autocommit=None)
class _ExceptionMixin:
'''Exposes dbapi2 exceptions classes as class-level attributes.
'''
Error = dbapi2_exc.Error
Warning = dbapi2_exc.Warning
InterfaceError = dbapi2_exc.InterfaceError
DatabaseError = dbapi2_exc.DatabaseError
InternalError = dbapi2_exc.InternalError
OperationalError = dbapi2_exc.OperationalError
ProgrammingError = dbapi2_exc.ProgrammingError
IntegrityError = dbapi2_exc.IntegrityError
DataError = dbapi2_exc.DataError
NotSupportedError = dbapi2_exc.NotSupportedError
class _JdbcPropertyAlias:
def __init__(self, property_name):
self.name = property_name
def __get__(self, inst, Cls):
return inst.get_property(self.name)
def __set__(self, inst, value):
inst.set_property(self.name, value)
# DB-API 2.0 Connection Object
class Connection(_ExceptionMixin):
autocommit = _JdbcPropertyAlias('AutoCommit')
def __init__(self, jdbc_url, user=None, password=None, gateway=None, autocommit=None):
self._gateway_arg = gateway
self._jdbc_url = jdbc_url
self._user = user
self._password = password
self._closed = False
self._logger = logging.getLogger('py4jdbc')
self._autostarted_gateway = False
# Set autocommit
if autocommit is not None:
self.autocommit = autocommit
def __enter__(self):
if not self._gateway.is_running:
self._gateway.run()
self._autostarted_gateway = True
return self
def __exit__(self, *args):
if not self._closed:
self.close()
# If this connection started a gateway server, shut it down now.
if self._gateway.is_running and self._autostarted_gateway:
msg = "Shutting down GatewayProcess auto-started by connection %r"
self._logger.info(msg, self)
self._gateway.shutdown()
else:
msg = ("GatewayProcess wasn't auto-started by connection. "
"Leaving it up.")
self._logger.info(msg)
# -----------------------------------------------------------------------
# Py4j accessors.
# -----------------------------------------------------------------------
@property
def _entry_point(self):
return self._gateway.entry_point
_ep = _entry_point
@CachedAttr
def _gateway_process(self):
proc = GatewayProcess(require_ctx_manager=False)
return proc
@CachedAttr
def _gateway(self):
gw = self._gateway_arg
if gw is not None:
# If a gateway process was passed in, get the actual gateway.
if isinstance(gw, GatewayProcess):
gw = self._gateway_arg.gateway
self._gateway = gw
return gw
return self._gateway_process.gateway
# -----------------------------------------------------------------------
# Java connection accessors.
# -----------------------------------------------------------------------
def get_property(self, name: str):
method_name = 'get' + name
if method_name in dir(self._jconn):
method = getattr(self._jconn, method_name)
else:
method = partial(self._jconn.getProperty, name)
with reraise_jvm_exception(self._gateway):
return method()
def set_property(self, name: str, value: str):
method_name = 'set' + name
if method_name in dir(self._jconn):
method = getattr(self._jconn, method_name)
else:
method = partial(self._jconn.setProperty, name)
with reraise_jvm_exception(self._gateway):
return method(value)
@CachedAttr
def _py4jdbc_connection(self):
self._logger.debug('Connecting as: %r', self._user, self._jdbc_url)
return self._ep.getConnection(self._jdbc_url, self._user, self._password)
@property
def _conn(self):
return self._py4jdbc_connection
@CachedAttr
def _jdbc_connection(self):
return self._py4jdbc_connection.getJdbcConnection()
@property
def _jconn(self):
return self._jdbc_connection
def close(self):
if self._closed:
raise self.Error("Connection is already closed.")
with reraise_jvm_exception(self._gateway):
self._jconn.close()
self._closed = True
def commit(self):
with reraise_jvm_exception(self._gateway):
self._jconn.commit()
def rollback(self):
with reraise_jvm_exception(self._gateway):
self._jconn.rollback()
def cursor(self):
return Cursor(self)
# ------------------------------------------------------------------------
# Introspection
# ------------------------------------------------------------------------
@CachedAttr
def _metadata(self):
return self._jdbc_connection.getMetaData()
def get_tables(self, catalog=None, schema=None, table='%'):
rs = self._metadata.getTables(catalog, schema, table, None)
rs = self._ep.mkPyResultSet(rs)
return ResultSet(rs, gateway=self._gateway, case_insensitive=True)
def get_columns(self, catalog=None, schema=None, table='%', column=None):
rs = self._metadata.getColumns(catalog, schema, table, column)
rs = self._ep.mkPyResultSet(rs)
return ColumnResultSet(rs, gateway=self._gateway)
def get_primary_keys(self, catalog=None, schema=None, table='%', column=None):
rs = self._metadata.getPrimaryKeys(catalog, schema, table)
rs = self._ep.mkPyResultSet(rs)
return ColumnResultSet(rs, gateway=self._gateway)
def get_imported_keys(self, catalog=None, schema=None, table='%', column=None):
rs = self._metadata.getImportedKeys(catalog, schema, table)
rs = self._ep.mkPyResultSet(rs)
return ColumnResultSet(rs, gateway=self._gateway)
def get_exported_keys(self, catalog=None, schema=None, table='%', column=None):
rs = self._metadata.getExportedKeys(catalog, schema, table)
rs = self._ep.mkPyResultSet(rs)
return ColumnResultSet(rs, gateway=self._gateway)
def get_privileges(self, catalog=None, schema=None, table='%', column=None):
rs = self._metadata.getTablePrivileges(catalog, schema, table)
rs = self._ep.mkPyResultSet(rs)
return ColumnResultSet(rs, gateway=self._gateway)
# DB-API 2.0 Cursor Object
class Cursor(_ExceptionMixin):
def __init__(self, connection):
self.connection = connection
self._reset()
self._logger = logging.getLogger('py4jdbc')
def _reset(self):
self._closed = False
self._rs = None
self._description = None
# ------------------------------------------------------------------------
# Shortcuts for reaching java objects.
# ------------------------------------------------------------------------
@property
def _gateway(self):
return self.connection._gateway
@property
def _entrypoint(self):
return self.connection._entry_point
_ep = _entrypoint
@property
def _jdbc_connection(self):
return self.connection._jdbc_connection
_jconn = _jdbc_connection
# ------------------------------------------------------------------------
# Actual dbapi 2.0 methods.
# ------------------------------------------------------------------------
@property
def description(self):
if self._description is None:
self._description = self._rs.description()
return self._description
def close(self):
if self._closed:
raise self.Error("Connection is already closed.")
self._logger.info('Closing cursor')
self._close_last()
self._closed = True
def _close_last(self):
if self._rs is not None:
with reraise_jvm_exception(self._gateway):
self._rs.close()
def execute(self, operation, parameters=None):
if self._closed:
raise self.Error("Connection is closed.")
with reraise_jvm_exception(self._gateway):
if parameters is None:
rs = self.connection._conn.execute(operation)
else:
rs = self.connection._conn.execute(operation, parameters)
if rs is None:
del self._rs
return
self._rs = ResultSet(rs, gateway=self._gateway)
return self._rs
def executemany(self, operation, parameter_seq):
if self._closed:
raise self.Error("Connection is closed.")
with reraise_jvm_exception(self._gateway):
rs = self.connection._conn.executeMany(operation, parameter_seq)
if rs is None:
del self._rs
return
self._rs = ResultSet(rs, gateway=self._gateway)
return self._rs
def fetchone(self):
if self._closed:
raise self.Error("Connection is closed.")
return self._rs.fetchone()
def fetchmany(self, size=None):
if self._closed:
raise self.Error("Connection is closed.")
return self._rs.fetchmany(size)
def fetchall(self):
if self._closed:
raise self.Error("Connection is closed.")
return self._rs.fetchall()
| |
"""Functions for signing notebooks"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import base64
from contextlib import contextmanager
import hashlib
from hmac import HMAC
import io
import os
from IPython.utils.io import atomic_writing
from IPython.utils.py3compat import string_types, unicode_type, cast_bytes
from IPython.utils.traitlets import Instance, Bytes, Enum, Any, Unicode, Bool
from IPython.config import LoggingConfigurable, MultipleInstanceError
from IPython.core.application import BaseIPythonApplication, base_flags
from . import read, write, NO_CONVERT
try:
# Python 3
algorithms = hashlib.algorithms_guaranteed
except AttributeError:
algorithms = hashlib.algorithms
def yield_everything(obj):
"""Yield every item in a container as bytes
Allows any JSONable object to be passed to an HMAC digester
without having to serialize the whole thing.
"""
if isinstance(obj, dict):
for key in sorted(obj):
value = obj[key]
yield cast_bytes(key)
for b in yield_everything(value):
yield b
elif isinstance(obj, (list, tuple)):
for element in obj:
for b in yield_everything(element):
yield b
elif isinstance(obj, unicode_type):
yield obj.encode('utf8')
else:
yield unicode_type(obj).encode('utf8')
def yield_code_cells(nb):
"""Iterator that yields all cells in a notebook
nbformat version independent
"""
if nb.nbformat >= 4:
for cell in nb['cells']:
if cell['cell_type'] == 'code':
yield cell
elif nb.nbformat == 3:
for ws in nb['worksheets']:
for cell in ws['cells']:
if cell['cell_type'] == 'code':
yield cell
@contextmanager
def signature_removed(nb):
"""Context manager for operating on a notebook with its signature removed
Used for excluding the previous signature when computing a notebook's signature.
"""
save_signature = nb['metadata'].pop('signature', None)
try:
yield
finally:
if save_signature is not None:
nb['metadata']['signature'] = save_signature
class NotebookNotary(LoggingConfigurable):
"""A class for computing and verifying notebook signatures."""
profile_dir = Instance("IPython.core.profiledir.ProfileDir")
def _profile_dir_default(self):
from IPython.core.application import BaseIPythonApplication
app = None
try:
if BaseIPythonApplication.initialized():
app = BaseIPythonApplication.instance()
except MultipleInstanceError:
pass
if app is None:
# create an app, without the global instance
app = BaseIPythonApplication()
app.initialize(argv=[])
return app.profile_dir
algorithm = Enum(algorithms, default_value='sha256', config=True,
help="""The hashing algorithm used to sign notebooks."""
)
def _algorithm_changed(self, name, old, new):
self.digestmod = getattr(hashlib, self.algorithm)
digestmod = Any()
def _digestmod_default(self):
return getattr(hashlib, self.algorithm)
secret_file = Unicode(config=True,
help="""The file where the secret key is stored."""
)
def _secret_file_default(self):
if self.profile_dir is None:
return ''
return os.path.join(self.profile_dir.security_dir, 'notebook_secret')
secret = Bytes(config=True,
help="""The secret key with which notebooks are signed."""
)
def _secret_default(self):
# note : this assumes an Application is running
if os.path.exists(self.secret_file):
with io.open(self.secret_file, 'rb') as f:
return f.read()
else:
secret = base64.encodestring(os.urandom(1024))
self._write_secret_file(secret)
return secret
def _write_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info("Writing notebook-signing key to %s", self.secret_file)
with io.open(self.secret_file, 'wb') as f:
f.write(secret)
try:
os.chmod(self.secret_file, 0o600)
except OSError:
self.log.warn(
"Could not set permissions on %s",
self.secret_file
)
return secret
def compute_signature(self, nb):
"""Compute a notebook's signature
by hashing the entire contents of the notebook via HMAC digest.
"""
hmac = HMAC(self.secret, digestmod=self.digestmod)
# don't include the previous hash in the content to hash
with signature_removed(nb):
# sign the whole thing
for b in yield_everything(nb):
hmac.update(b)
return hmac.hexdigest()
def check_signature(self, nb):
"""Check a notebook's stored signature
If a signature is stored in the notebook's metadata,
a new signature is computed and compared with the stored value.
Returns True if the signature is found and matches, False otherwise.
The following conditions must all be met for a notebook to be trusted:
- a signature is stored in the form 'scheme:hexdigest'
- the stored scheme matches the requested scheme
- the requested scheme is available from hashlib
- the computed hash from notebook_signature matches the stored hash
"""
if nb.nbformat < 3:
return False
stored_signature = nb['metadata'].get('signature', None)
if not stored_signature \
or not isinstance(stored_signature, string_types) \
or ':' not in stored_signature:
return False
stored_algo, sig = stored_signature.split(':', 1)
if self.algorithm != stored_algo:
return False
my_signature = self.compute_signature(nb)
return my_signature == sig
def sign(self, nb):
"""Sign a notebook, indicating that its output is trusted
stores 'algo:hmac-hexdigest' in notebook.metadata.signature
e.g. 'sha256:deadbeef123...'
"""
if nb.nbformat < 3:
return
signature = self.compute_signature(nb)
nb['metadata']['signature'] = "%s:%s" % (self.algorithm, signature)
def mark_cells(self, nb, trusted):
"""Mark cells as trusted if the notebook's signature can be verified
Sets ``cell.metadata.trusted = True | False`` on all code cells,
depending on whether the stored signature can be verified.
This function is the inverse of check_cells
"""
if nb.nbformat < 3:
return
for cell in yield_code_cells(nb):
cell['metadata']['trusted'] = trusted
def _check_cell(self, cell, nbformat_version):
"""Do we trust an individual cell?
Return True if:
- cell is explicitly trusted
- cell has no potentially unsafe rich output
If a cell has no output, or only simple print statements,
it will always be trusted.
"""
# explicitly trusted
if cell['metadata'].pop("trusted", False):
return True
# explicitly safe output
if nbformat_version >= 4:
safe = {'text/plain', 'image/png', 'image/jpeg'}
unsafe_output_types = ['execute_result', 'display_data']
safe_keys = {"output_type", "execution_count", "metadata"}
else: # v3
safe = {'text', 'png', 'jpeg'}
unsafe_output_types = ['pyout', 'display_data']
safe_keys = {"output_type", "prompt_number", "metadata"}
for output in cell['outputs']:
output_type = output['output_type']
if output_type in unsafe_output_types:
# if there are any data keys not in the safe whitelist
output_keys = set(output)
if output_keys.difference(safe_keys):
return False
return True
def check_cells(self, nb):
"""Return whether all code cells are trusted
If there are no code cells, return True.
This function is the inverse of mark_cells.
"""
if nb.nbformat < 3:
return False
trusted = True
for cell in yield_code_cells(nb):
# only distrust a cell if it actually has some output to distrust
if not self._check_cell(cell, nb.nbformat):
trusted = False
return trusted
trust_flags = {
'reset': (
{'TrustNotebookApp': {'reset': True}},
"""Generate a new key for notebook signature.
All previously signed notebooks will become untrusted.
"""
),
}
trust_flags.update(base_flags)
trust_flags.pop('init')
class TrustNotebookApp(BaseIPythonApplication):
description = """Sign one or more IPython notebooks with your key,
to trust their dynamic (HTML, Javascript) output.
Trusting a notebook only applies to the current IPython profile.
To trust a notebook for use with a profile other than default,
add `--profile [profile name]`.
Otherwise, you will have to re-execute the notebook to see output.
"""
examples = """
ipython trust mynotebook.ipynb and_this_one.ipynb
ipython trust --profile myprofile mynotebook.ipynb
"""
flags = trust_flags
reset = Bool(False, config=True,
help="""If True, generate a new key for notebook signature.
After reset, all previously signed notebooks will become untrusted.
"""
)
notary = Instance(NotebookNotary)
def _notary_default(self):
return NotebookNotary(parent=self, profile_dir=self.profile_dir)
def sign_notebook(self, notebook_path):
if not os.path.exists(notebook_path):
self.log.error("Notebook missing: %s" % notebook_path)
self.exit(1)
with io.open(notebook_path, encoding='utf8') as f:
nb = read(f, NO_CONVERT)
if self.notary.check_signature(nb):
print("Notebook already signed: %s" % notebook_path)
else:
print("Signing notebook: %s" % notebook_path)
self.notary.sign(nb)
with atomic_writing(notebook_path) as f:
write(nb, f, NO_CONVERT)
def generate_new_key(self):
"""Generate a new notebook signature key"""
print("Generating new notebook key: %s" % self.notary.secret_file)
self.notary._write_secret_file(os.urandom(1024))
def start(self):
if self.reset:
self.generate_new_key()
return
if not self.extra_args:
self.log.critical("Specify at least one notebook to sign.")
self.exit(1)
for notebook_path in self.extra_args:
self.sign_notebook(notebook_path)
| |
# 20170614 Add manual trade
import ael
import acm
import HTI_Util
import HTI_Email_Util
import HTI_FeedTrade_EDD_Util
import os
import sys, traceback
import datetime
import sqlite3
import csv
import decimal
import account_journal
import win32com.client
ael_variables = [['startdate', 'Start Date', 'string', [str(ael.date_today())], str(ael.date_today()), 1, 0, 'Start Date', None, 1], \
['enddate', 'End Date', 'string', [str(ael.date_today())], str(ael.date_today()), 1, 0, 'End Date', None, 1], \
['pfs', 'Portfolio(s)', 'string', HTI_Util.getAllPortfolios(), 'EDD Warrant Trading,EDD CBBC Trading,EDD Options,EDD Hedging,EDD Market Making 1,EDD Market Making 2,EDD Warrant,EDD Flow Strategy 1,EDD Flow Strategy 2,EDD HFT 1,EDD HFT 2,EDD HFT 3,EDD HFT 4,EDD OMM,EDD OTC OMM', 1, 1, 'Portfolio(s)', None, 1], \
['acq', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - EDD,HTIFS - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['prd', 'Product Type(s)', 'string', HTI_Util.getAllInstypes(), 'Stock,Option,Future/Forward,Warrant', 1, 1, 'Product Type(s)', None, 1], \
['created_by', 'Created By', 'string', None, '', 0, 0, 'Created By', None, 1], \
['by_trader', 'Trader', 'string', None, '', 0, 0, 'Trader', None, 1], \
['tfs', 'Trade Filter', 'string', None, 'TF_EDD_ACCOUNT_JOURNAL', 0, 0, 'Trade Filter', None, 1], \
['gen_add_info', 'Generate additional info?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Generate additional info?', None, 1], \
['gen_value_day', 'Generate Value Day?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Generate Value Day?', None, 1], \
['gen_manual_trd', 'Generate Manual Trade?', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Generate Manual Trade?', None, 1], \
['ss_bb', 'Short Sell or Buy Back Only?', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Consolidate trades?', None, 1], \
['consolid_trd', 'Consolidate trades?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Short Sell or Buy Back Only?', None, 1], \
['fileNameTrd', 'Trade File name', 'string', None, 'D:\\Temp\\Trade_Records\\Trade_Record_YYYYMMDD.csv', 1, 0, 'File Name', None, 1], \
['fileNameCon', 'Consolidated File name', 'string', None, 'D:\\Temp\\Trade_Records\\Consolid_Trade_Record_YYYYMMDD.csv', 1, 0, 'Consolidated File Name', None, 1] ]
def adapt_decimal(d):
return str(d)
def convert_decimal(s):
return decimal.Decimal(s)
def db_cur(source = ":memory:"):
# Register the adapter
sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
#conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = [], index_arr = []):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr != []:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def num(s):
if isinstance(s, basestring):
s = str(s).replace("#","")
if s == "" or str(float(s)) == "nan":
return 0
try:
return int(s)
except ValueError:
return float(str(s))
def dec(s):
if isinstance(s, basestring):
s = str(s).replace("#","")
if s == "" or s == "None" or str(float(s)) == "nan":
return 0
try:
return decimal.Decimal(str(s))
except:
return 0
return s
def csv_to_arr(csv_file):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f)
arr = list(reader)
header = ','.join(arr[0])
arr = arr[1:]
return header, arr
def tsv_to_arr(tsv_file):
arr = []
with open(tsv_file, 'rU') as f:
reader = csv.reader(f, dialect="excel-tab")
arr = list(reader)
arr = arr[1:]
return arr
def sortArray(x, y):
i = 0
len_array = len(x)
while i <= len_array - 1:
if x[i] > y[i]:
return 1
else:
return -1
i = i + 1
return 0
def arrs_to_xlsx(filename, header=[], arr=[]):
i = 1
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Add()
for x in range(0, len(header)):
ws = wb.Worksheets(x+1)
for i, cell in enumerate(header[x].split(',')):
ws.Cells(1,i+1).Value = cell
for i, row in enumerate(arr[x]):
for j, cell in enumerate(row):
ws.Cells(i+2,j+1).Value = str(cell)
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def export_to_file(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def header_to_col(header):
arr = {}
i = 0
for x in header.split(','):
arr[x] = i
i = i + 1
return arr
def check_banking_day(posdate):
hk_cal = acm.FCalendar.Select("name='Hong Kong'")[0]
while True:
if not hk_cal.IsNonBankingDay(hk_cal, hk_cal, posdate):
break
posdate = posdate.add_days(1)
return posdate
def ael_main(dict):
header_trd = 'trade_id,entity,portfolio,counterparty,instrument_type,call_put,currency,position,reporting_date,instrument,underlying,price,quantity,premium,fee,ss_bb'
conn, cur = db_cur()
ss_bb = dict['ss_bb']
created_by = dict['created_by']
consolid_trd = dict['consolid_trd']
manual_trd = dict['gen_manual_trd']
if consolid_trd == "Y":
dict['fileNameTrd'] = dict['fileNameCon']
startDateArr = dict['startdate'].split('/')
posdate = ael.date_from_ymd(int(startDateArr[2]), int(startDateArr[1]), int(startDateArr[0]))
endDateArr = dict['enddate'].split('/')
endDate = ael.date_from_ymd(int(endDateArr[2]), int(endDateArr[1]), int(endDateArr[0]))
posdate = check_banking_day(posdate)
endDate = check_banking_day(endDate)
while posdate <= endDate:
posdatetp1 = posdate.add_days(1)
posdatetp1 = check_banking_day(posdatetp1)
# Acquirers
acq_array_list = dict['acq']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Product Types
prod_type_list = dict['prd']
ptype_list = ''
for ptype in prod_type_list:
if ptype_list == '':
ptype_list = "'" + ptype + "'"
else:
ptype_list = ptype_list + ",'" + ptype+ "'"
portfolios = dict['pfs']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
strSql = """
select t.trdnbr
from instrument i, trade t, party acq, portfolio pf, user u
where i.insaddr = t.insaddr
and u.usrnbr = t.creat_usrnbr
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@acquirer_list)
and (i.exp_day >= '@dt' or i.exp_day = '0000-01-01')
and t.time < '@d_tp1'
and i.instype in (@ptype_list)
and pf.prfid in (@portfolio_list)
"""
if ss_bb == "Y":
strSql = strSql + """ and (add_info(t, 'Short Sell') = 'Yes' or add_info(t, 'Buy Back') = 'Yes') """
if manual_trd == "Y":
strSql = strSql + """ and ( (add_info(t, 'Manual Trade') = 'Cross' ) or (add_info(t, 'Manual Trade') = 'Give Up' ) or (add_info(t, 'Manual Trade') = 'Take Up' ) ) """
if dict['by_trader'] != "":
strSql = strSql + """ and (add_info(t, 'horizon User Id') = '%s' ) """ % dict['by_trader']
if created_by != "":
strSql = strSql + """ and u.userid = '%s' """ % (created_by)
strSql = strSql.replace('@acquirer_list', acq_list)
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@d_tp1', (endDate.add_days(1).to_string('%Y-%m-%d') if consolid_trd == "Y" else posdatetp1.to_string('%Y-%m-%d') ) )
strSql = strSql.replace('@dt', posdate.to_string('%Y-%m-%d'))
strSql = strSql.replace('@ptype_list', ptype_list)
#print strSql
trade_filter = dict['tfs']
tobject = ael.TextObject.read('type="SQL Query" and name="%s"' % ("tf_edd_account_jorunal_qry"))
tobject_c = tobject.clone()
tobject_c.set_text(strSql)
tobject_c.commit()
ael.poll()
print "Exporting " + posdate.to_string('%Y-%m-%d')
account_journal.trd_records(cur, strSql, posdate, dict)
posdate = endDate.add_days(1) if consolid_trd == "Y" else posdatetp1
print "Finished"
| |
# -*- coding: utf-8 -*-
"""
twython.api
~~~~~~~~~~~
This module contains functionality for access to core Twitter API calls,
Twitter Authentication, and miscellaneous methods that are useful when
dealing with the Twitter API
"""
import warnings
import requests
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth1, OAuth2
from . import __version__
from .advisory import TwythonDeprecationWarning
from .compat import json, urlencode, parse_qsl, quote_plus, str, is_py2
from .endpoints import EndpointsMixin
from .exceptions import TwythonError, TwythonAuthError, TwythonRateLimitError
from .helpers import _transparent_params
warnings.simplefilter('always', TwythonDeprecationWarning) # For Python 2.7 >
class Twython(EndpointsMixin, object):
def __init__(self, app_key=None, app_secret=None, oauth_token=None,
oauth_token_secret=None, access_token=None,
token_type='bearer', oauth_version=1, api_version='1.1',
client_args=None, auth_endpoint='authenticate'):
"""Instantiates an instance of Twython. Takes optional parameters for
authentication and such (see below).
:param app_key: (optional) Your applications key
:param app_secret: (optional) Your applications secret key
:param oauth_token: (optional) When using **OAuth 1**, combined with
oauth_token_secret to make authenticated calls
:param oauth_token_secret: (optional) When using **OAuth 1** combined
with oauth_token to make authenticated calls
:param access_token: (optional) When using **OAuth 2**, provide a
valid access token if you have one
:param token_type: (optional) When using **OAuth 2**, provide your
token type. Default: bearer
:param oauth_version: (optional) Choose which OAuth version to use.
Default: 1
:param api_version: (optional) Choose which Twitter API version to
use. Default: 1.1
:param client_args: (optional) Accepts some requests Session parameters
and some requests Request parameters.
See http://docs.python-requests.org/en/latest/api/#sessionapi
and requests section below it for details.
[ex. headers, proxies, verify(SSL verification)]
:param auth_endpoint: (optional) Lets you select which authentication
endpoint will use your application.
This will allow the application to have DM access
if the endpoint is 'authorize'.
Default: authenticate.
"""
# API urls, OAuth urls and API version; needed for hitting that there
# API.
self.api_version = api_version
self.api_url = 'https://api.twitter.com/%s'
self.app_key = app_key
self.app_secret = app_secret
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.access_token = access_token
# OAuth 1
self.request_token_url = self.api_url % 'oauth/request_token'
self.access_token_url = self.api_url % 'oauth/access_token'
self.authenticate_url = self.api_url % ('oauth/%s' % auth_endpoint)
if self.access_token: # If they pass an access token, force OAuth 2
oauth_version = 2
self.oauth_version = oauth_version
# OAuth 2
if oauth_version == 2:
self.request_token_url = self.api_url % 'oauth2/token'
self.client_args = client_args or {}
default_headers = {'User-Agent': 'Twython v' + __version__}
if 'headers' not in self.client_args:
# If they didn't set any headers, set our defaults for them
self.client_args['headers'] = default_headers
elif 'User-Agent' not in self.client_args['headers']:
# If they set headers, but didn't include User-Agent.. set
# it for them
self.client_args['headers'].update(default_headers)
# Generate OAuth authentication object for the request
# If no keys/tokens are passed to __init__, auth=None allows for
# unauthenticated requests, although I think all v1.1 requests
# need auth
auth = None
if oauth_version == 1:
# User Authentication is through OAuth 1
if self.app_key is not None and self.app_secret is not None and \
self.oauth_token is None and self.oauth_token_secret is None:
auth = OAuth1(self.app_key, self.app_secret)
if self.app_key is not None and self.app_secret is not None and \
self.oauth_token is not None and self.oauth_token_secret is \
not None:
auth = OAuth1(self.app_key, self.app_secret,
self.oauth_token, self.oauth_token_secret)
elif oauth_version == 2 and self.access_token:
# Application Authentication is through OAuth 2
token = {'token_type': token_type,
'access_token': self.access_token}
auth = OAuth2(self.app_key, token=token)
self.client = requests.Session()
self.client.auth = auth
# Make a copy of the client args and iterate over them
# Pop out all the acceptable args at this point because they will
# Never be used again.
client_args_copy = self.client_args.copy()
for k, v in client_args_copy.items():
if k in ('cert', 'hooks', 'max_redirects', 'proxies'):
setattr(self.client, k, v)
self.client_args.pop(k) # Pop, pop!
# Headers are always present, so we unconditionally pop them and merge
# them into the session headers.
self.client.headers.update(self.client_args.pop('headers'))
self._last_call = None
def __repr__(self):
return '<Twython: %s>' % (self.app_key)
def _request(self, url, method='GET', params=None, api_call=None):
"""Internal request method"""
method = method.lower()
params = params or {}
func = getattr(self.client, method)
params, files = _transparent_params(params)
requests_args = {}
for k, v in self.client_args.items():
# Maybe this should be set as a class variable and only done once?
if k in ('timeout', 'allow_redirects', 'stream', 'verify'):
requests_args[k] = v
if method == 'get':
requests_args['params'] = params
else:
requests_args.update({
'data': params,
'files': files,
})
try:
response = func(url, **requests_args)
except requests.RequestException as e:
raise TwythonError(str(e))
# create stash for last function intel
self._last_call = {
'api_call': api_call,
'api_error': None,
'cookies': response.cookies,
'headers': response.headers,
'status_code': response.status_code,
'url': response.url,
'content': response.text,
}
# greater than 304 (not modified) is an error
if response.status_code > 304:
error_message = self._get_error_message(response)
self._last_call['api_error'] = error_message
ExceptionType = TwythonError
if response.status_code == 429:
# Twitter API 1.1, always return 429 when
# rate limit is exceeded
ExceptionType = TwythonRateLimitError
elif response.status_code == 401 or 'Bad Authentication data' \
in error_message:
# Twitter API 1.1, returns a 401 Unauthorized or
# a 400 "Bad Authentication data" for invalid/expired
# app keys/user tokens
ExceptionType = TwythonAuthError
raise ExceptionType(
error_message,
error_code=response.status_code,
retry_after=response.headers.get('X-Rate-Limit-Reset'))
try:
content = response.json()
except ValueError:
raise TwythonError('Response was not valid JSON. \
Unable to decode.')
return content
def _get_error_message(self, response):
"""Parse and return the first error message"""
error_message = 'An error occurred processing your request.'
try:
content = response.json()
# {"errors":[{"code":34,"message":"Sorry,
# that page does not exist"}]}
error_message = content['errors'][0]['message']
except ValueError:
# bad json data from Twitter for an error
pass
except (KeyError, IndexError):
# missing data so fallback to default message
pass
return error_message
def request(self, endpoint, method='GET', params=None, version='1.1'):
"""Return dict of response received from Twitter's API
:param endpoint: (required) Full url or Twitter API endpoint
(e.g. search/tweets)
:type endpoint: string
:param method: (optional) Method of accessing data, either
GET or POST. (default GET)
:type method: string
:param params: (optional) Dict of parameters (if any) accepted
the by Twitter API endpoint you are trying to
access (default None)
:type params: dict or None
:param version: (optional) Twitter API version to access
(default 1.1)
:type version: string
:rtype: dict
"""
if endpoint.startswith('http://'):
raise TwythonError('api.twitter.com is restricted to SSL/TLS traffic.')
# In case they want to pass a full Twitter URL
# i.e. https://api.twitter.com/1.1/search/tweets.json
if endpoint.startswith('https://'):
url = endpoint
else:
url = '%s/%s.json' % (self.api_url % version, endpoint)
content = self._request(url, method=method, params=params,
api_call=url)
return content
def get(self, endpoint, params=None, version='1.1'):
"""Shortcut for GET requests via :class:`request`"""
return self.request(endpoint, params=params, version=version)
def post(self, endpoint, params=None, version='1.1'):
"""Shortcut for POST requests via :class:`request`"""
return self.request(endpoint, 'POST', params=params, version=version)
def get_lastfunction_header(self, header, default_return_value=None):
"""Returns a specific header from the last API call
This will return None if the header is not present
:param header: (required) The name of the header you want to get
the value of
Most useful for the following header information:
x-rate-limit-limit,
x-rate-limit-remaining,
x-rate-limit-class,
x-rate-limit-reset
"""
if self._last_call is None:
raise TwythonError('This function must be called after an API call. \
It delivers header information.')
return self._last_call['headers'].get(header, default_return_value)
def get_authentication_tokens(self, callback_url=None, force_login=False,
screen_name=''):
"""Returns a dict including an authorization URL, ``auth_url``, to
direct a user to
:param callback_url: (optional) Url the user is returned to after
they authorize your app (web clients only)
:param force_login: (optional) Forces the user to enter their
credentials to ensure the correct users
account is authorized.
:param screen_name: (optional) If forced_login is set OR user is
not currently logged in, Prefills the username
input box of the OAuth login screen with the
given value
:rtype: dict
"""
if self.oauth_version != 1:
raise TwythonError('This method can only be called when your \
OAuth version is 1.0.')
request_args = {}
if callback_url:
request_args['oauth_callback'] = callback_url
response = self.client.get(self.request_token_url, params=request_args)
if response.status_code == 401:
raise TwythonAuthError(response.content,
error_code=response.status_code)
elif response.status_code != 200:
raise TwythonError(response.content,
error_code=response.status_code)
request_tokens = dict(parse_qsl(response.content.decode('utf-8')))
if not request_tokens:
raise TwythonError('Unable to decode request tokens.')
oauth_callback_confirmed = request_tokens.get('oauth_callback_confirmed') \
== 'true'
auth_url_params = {
'oauth_token': request_tokens['oauth_token'],
}
if force_login:
auth_url_params.update({
'force_login': force_login,
'screen_name': screen_name
})
# Use old-style callback argument if server didn't accept new-style
if callback_url and not oauth_callback_confirmed:
auth_url_params['oauth_callback'] = self.callback_url
request_tokens['auth_url'] = self.authenticate_url + \
'?' + urlencode(auth_url_params)
return request_tokens
def get_authorized_tokens(self, oauth_verifier):
"""Returns a dict of authorized tokens after they go through the
:class:`get_authentication_tokens` phase.
:param oauth_verifier: (required) The oauth_verifier (or a.k.a PIN
for non web apps) retrieved from the callback url querystring
:rtype: dict
"""
if self.oauth_version != 1:
raise TwythonError('This method can only be called when your \
OAuth version is 1.0.')
response = self.client.get(self.access_token_url,
params={'oauth_verifier': oauth_verifier},
headers={'Content-Type': 'application/\
json'})
if response.status_code == 401:
try:
try:
# try to get json
content = response.json()
except AttributeError: # pragma: no cover
# if unicode detected
content = json.loads(response.content)
except ValueError:
content = {}
raise TwythonError(content.get('error', 'Invalid / expired To \
ken'), error_code=response.status_code)
authorized_tokens = dict(parse_qsl(response.content.decode('utf-8')))
if not authorized_tokens:
raise TwythonError('Unable to decode authorized tokens.')
return authorized_tokens # pragma: no cover
def obtain_access_token(self):
"""Returns an OAuth 2 access token to make OAuth 2 authenticated
read-only calls.
:rtype: string
"""
if self.oauth_version != 2:
raise TwythonError('This method can only be called when your \
OAuth version is 2.0.')
data = {'grant_type': 'client_credentials'}
basic_auth = HTTPBasicAuth(self.app_key, self.app_secret)
try:
response = self.client.post(self.request_token_url,
data=data, auth=basic_auth)
content = response.content.decode('utf-8')
try:
content = content.json()
except AttributeError:
content = json.loads(content)
access_token = content['access_token']
except (KeyError, ValueError, requests.exceptions.RequestException):
raise TwythonAuthError('Unable to obtain OAuth 2 access token.')
else:
return access_token
@staticmethod
def construct_api_url(api_url, **params):
"""Construct a Twitter API url, encoded, with parameters
:param api_url: URL of the Twitter API endpoint you are attempting
to construct
:param \*\*params: Parameters that are accepted by Twitter for the
endpoint you're requesting
:rtype: string
Usage::
>>> from twython import Twython
>>> twitter = Twython()
>>> api_url = 'https://api.twitter.com/1.1/search/tweets.json'
>>> constructed_url = twitter.construct_api_url(api_url, q='python',
result_type='popular')
>>> print constructed_url
https://api.twitter.com/1.1/search/tweets.json?q=python&result_type=popular
"""
querystring = []
params, _ = _transparent_params(params or {})
params = requests.utils.to_key_val_list(params)
for (k, v) in params:
querystring.append(
'%s=%s' % (Twython.encode(k), quote_plus(Twython.encode(v)))
)
return '%s?%s' % (api_url, '&'.join(querystring))
def search_gen(self, search_query, **params): # pragma: no cover
warnings.warn(
'This method is deprecated. You should use Twython.cursor instead. \
[eg. Twython.cursor(Twython.search, q=\'your_query\')]',
TwythonDeprecationWarning,
stacklevel=2
)
return self.cursor(self.search, q=search_query, **params)
def cursor(self, function, return_pages=False, **params):
"""Returns a generator for results that match a specified query.
:param function: Instance of a Twython function
(Twython.get_home_timeline, Twython.search)
:param \*\*params: Extra parameters to send with your request
(usually parameters accepted by the Twitter API endpoint)
:rtype: generator
Usage::
>>> from twython import Twython
>>> twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN,
OAUTH_TOKEN_SECRET)
>>> results = twitter.cursor(twitter.search, q='python')
>>> for result in results:
>>> print result
"""
if not hasattr(function, 'iter_mode'):
raise TwythonError('Unable to create generator for Twython \
method "%s"' % function.__name__)
while True:
content = function(**params)
if not content:
raise StopIteration
if hasattr(function, 'iter_key'):
results = content.get(function.iter_key)
else:
results = content
if return_pages:
yield results
else:
for result in results:
yield result
if function.iter_mode == 'cursor' and \
content['next_cursor_str'] == '0':
raise StopIteration
try:
if function.iter_mode == 'id':
if 'max_id' not in params:
# Add 1 to the id because since_id and
# max_id are inclusive
if hasattr(function, 'iter_metadata'):
since_id = content[function.iter_metadata].get('since_id_str')
else:
since_id = content[0]['id_str']
params['since_id'] = (int(since_id) - 1)
elif function.iter_mode == 'cursor':
params['cursor'] = content['next_cursor_str']
except (TypeError, ValueError): # pragma: no cover
raise TwythonError('Unable to generate next page of search \
results, `page` is not a number.')
@staticmethod
def unicode2utf8(text):
try:
if is_py2 and isinstance(text, str):
text = text.encode('utf-8')
except:
pass
return text
@staticmethod
def encode(text):
if is_py2 and isinstance(text, (str)):
return Twython.unicode2utf8(text)
return str(text)
@staticmethod
def html_for_tweet(tweet, use_display_url=True, use_expanded_url=False):
"""Return HTML for a tweet (urls, mentions, hashtags replaced with links)
:param tweet: Tweet object from received from Twitter API
:param use_display_url: Use display URL to represent link
(ex. google.com, github.com). Default: True
:param use_expanded_url: Use expanded URL to represent link
(e.g. http://google.com). Default False
If use_expanded_url is True, it overrides use_display_url.
If use_display_url and use_expanded_url is False, short url will
be used (t.co/xxxxx)
"""
if 'retweeted_status' in tweet:
tweet = tweet['retweeted_status']
if 'entities' in tweet:
text = tweet['text']
entities = tweet['entities']
# Mentions
for entity in entities['user_mentions']:
start, end = entity['indices'][0], entity['indices'][1]
mention_html = '<a href="https://twitter.com/%(screen_name)s" class="twython-mention">@%(screen_name)s</a>'
text = text.replace(tweet['text'][start:end],
mention_html % {'screen_name': entity['screen_name']})
# Hashtags
for entity in entities['hashtags']:
start, end = entity['indices'][0], entity['indices'][1]
hashtag_html = '<a href="https://twitter.com/search?q=%%23%(hashtag)s" class="twython-hashtag">#%(hashtag)s</a>'
text = text.replace(tweet['text'][start:end], hashtag_html % {'hashtag': entity['text']})
# Urls
for entity in entities['urls']:
start, end = entity['indices'][0], entity['indices'][1]
if use_display_url and entity.get('display_url') \
and not use_expanded_url:
shown_url = entity['display_url']
elif use_expanded_url and entity.get('expanded_url'):
shown_url = entity['expanded_url']
else:
shown_url = entity['url']
url_html = '<a href="%s" class="twython-url">%s</a>'
text = text.replace(tweet['text'][start:end],
url_html % (entity['url'], shown_url))
return text
| |
"""
Created on Fri Jan 24 2020.
@author: mtageld
"""
import copy
import os
from itertools import combinations
import numpy as np
from imageio import imwrite
from histomicstk.annotations_and_masks.annotation_and_mask_utils import (
_get_coords_from_element, _get_idxs_for_all_rois,
_simple_add_element_to_roi, get_bboxes_from_slide_annotations,
get_idxs_for_annots_overlapping_roi_by_bbox, get_image_from_htk_response,
get_scale_factor_and_appendStr, parse_slide_annotations_into_tables,
scale_slide_annotations)
from histomicstk.annotations_and_masks.annotations_to_masks_handler import (
_get_roi_bounds_by_run_mode, _visualize_annotations_on_rgb)
# %%===========================================================================
def _sanity_checks(
MPP, MAG, mode, bounds, idx_for_roi, get_rgb, get_visualization):
# MPP precedes MAG
if all([j is not None for j in (MPP, MAG)]):
MAG = None
# some sanity checks
for mf in (MPP, MAG):
if mf is not None:
assert mf > 0, "MPP or MAG must be positive."
if mode in ['wsi', 'min_bounding_box']:
bounds = None
idx_for_roi = None
if idx_for_roi is not None:
mode = 'polygonal_bounds'
elif bounds is not None:
mode = 'manual_bounds'
assert mode in [
'wsi', 'min_bounding_box', 'manual_bounds', 'polygonal_bounds'], \
"mode %s not recognized" % mode
if get_visualization:
assert get_rgb, "cannot get visualization without rgb."
return MPP, MAG, mode, bounds, idx_for_roi, get_rgb, get_visualization
def _keep_relevant_elements_for_roi(
element_infos, sf, mode='manual_bounds',
idx_for_roi=None, roiinfo=None):
# This stores information about the ROI like bounds, slide_name, etc
# Allows passing many parameters and good forward/backward compatibility
if roiinfo is None:
roiinfo = dict()
if mode != "polygonal_bounds":
# add to bounding boxes dataframe
element_infos = element_infos.append(
{'xmin': int(roiinfo['XMIN'] * sf),
'xmax': int(roiinfo['XMAX'] * sf),
'ymin': int(roiinfo['YMIN'] * sf),
'ymax': int(roiinfo['YMAX'] * sf)},
ignore_index=True)
idx_for_roi = element_infos.shape[0] - 1
# isolate annotations that potentially overlap roi
overlaps = get_idxs_for_annots_overlapping_roi_by_bbox(
element_infos, idx_for_roi=idx_for_roi)
if mode == "polygonal_bounds":
overlaps = overlaps + [idx_for_roi, ]
elinfos_roi = element_infos.loc[overlaps, :]
# update roiinfo -- remember, annotation elements can be
# really large and extend beyond the bounds asked by the user.
# since we're not parsing the polygons into mask form here, and
# therefore we're not 'cropping' the polygons to the requested bounds,
# we extend the requested bounds themselves to accomodate the overflowing
# annotations.
roiinfo['XMIN'] = int(np.min(elinfos_roi.xmin))
roiinfo['YMIN'] = int(np.min(elinfos_roi.ymin))
roiinfo['XMAX'] = int(np.max(elinfos_roi.xmax))
roiinfo['YMAX'] = int(np.max(elinfos_roi.ymax))
roiinfo['BBOX_WIDTH'] = roiinfo['XMAX'] - roiinfo['XMIN']
roiinfo['BBOX_HEIGHT'] = roiinfo['YMAX'] - roiinfo['YMIN']
# scale back coords
roiinfo = {k: int(v / sf) for k, v in roiinfo.items()}
return elinfos_roi, roiinfo
def _trim_slide_annotations_to_roi(annotations, elinfos_roi):
# unique relevent annotation document indices & slice
unique_annidxs = np.int32(np.unique(elinfos_roi.loc[:, "annidx"]))
annotations_slice = np.array(annotations)[unique_annidxs].tolist()
# anno is index relative to unique_annidxs, while
# annidx is index relative to original slide annotations
for anno, annidx in enumerate(unique_annidxs):
# indices of relevant elements in this annotation doc
eleidxs = np.int32(elinfos_roi.loc[
elinfos_roi.loc[:, 'annidx'] == annidx, 'elementidx'])
# slice relevant elements
elements_original = annotations_slice[anno]['annotation']['elements']
annotations_slice[anno]['annotation']['elements'] = np.array(
elements_original)[eleidxs].tolist()
return annotations_slice
# %%===========================================================================
def annotations_to_contours_no_mask(
gc, slide_id, MPP=5.0, MAG=None, mode='min_bounding_box',
bounds=None, idx_for_roi=None,
slide_annotations=None, element_infos=None,
linewidth=0.2, get_rgb=True, get_visualization=True, text=True):
"""Process annotations to get RGB and contours without intermediate masks.
Parameters
----------
gc : object
girder client object to make requests, for example:
gc = girder_client.GirderClient(apiUrl = APIURL)
gc.authenticate(interactive=True)
slide_id : str
girder id for item (slide)
MPP : float or None
Microns-per-pixel -- best use this as it's more well-defined than
magnification which is more scanner or manufacturer specific.
MPP of 0.25 often roughly translates to 40x
MAG : float or None
If you prefer to use whatever magnification is reported in slide.
If neither MPP or MAG is provided, everything is retrieved without
scaling at base (scan) magnification.
mode : str
This specifies which part of the slide to get the mask from. Allowed
modes include the following
- wsi: get scaled up or down version of mask of whole slide
- min_bounding_box: get minimum box for all annotations in slide
- manual_bounds: use given ROI bounds provided by the 'bounds' param
- polygonal_bounds: use the idx_for_roi param to get coordinates
bounds : dict or None
if not None, has keys 'XMIN', 'XMAX', 'YMIN', 'YMAX' for slide
region coordinates (AT BASE MAGNIFICATION) to get labeled image
(mask) for. Use this with the 'manual_bounds' run mode.
idx_for_roi : int
index of ROI within the element_infos dataframe.
Use this with the 'polygonal_bounds' run mode.
slide_annotations : list or None
Give this parameter to avoid re-getting slide annotations. If you do
provide the annotations, though, make sure you have used
scale_slide_annotations() to scale them up or down by sf BEFOREHAND.
element_infos : pandas DataFrame.
The columns annidx and elementidx
encode the dict index of annotation document and element,
respectively, in the original slide_annotations list of dictionaries.
This can be obained by get_bboxes_from_slide_annotations() method.
Make sure you have used scale_slide_annotations().
linewidth : float
visualization line width
get_rgb: bool
get rgb image?
get_visualization : bool
get overlayed annotation bounds over RGB for visualization
text : bool
add text labels to visualization?
Returns
--------
dict
Results dict containing one or more of the following keys
- bounds: dict of bounds at scan magnification
- rgb: (mxnx3 np array) corresponding rgb image
- contours: dict
- visualization: (mxnx3 np array) visualization overlay
"""
MPP, MAG, mode, bounds, idx_for_roi, get_rgb, get_visualization = \
_sanity_checks(
MPP, MAG, mode, bounds, idx_for_roi,
get_rgb, get_visualization)
# calculate the scale factor
sf, appendStr = get_scale_factor_and_appendStr(
gc=gc, slide_id=slide_id, MPP=MPP, MAG=MAG)
if slide_annotations is not None:
assert element_infos is not None, "must also provide element_infos"
else:
# get annotations for slide
slide_annotations = gc.get('/annotation/item/' + slide_id)
# scale up/down annotations by a factor
slide_annotations = scale_slide_annotations(slide_annotations, sf=sf)
# get bounding box information for all annotations -> scaled by sf
element_infos = get_bboxes_from_slide_annotations(slide_annotations)
# Determine get region based on run mode, keeping in mind that it
# must be at BASE MAGNIFICATION coordinates before it is passed
# on to get_mask_from_slide()
# if mode != 'polygonal_bound':
bounds = _get_roi_bounds_by_run_mode(
gc=gc, slide_id=slide_id, mode=mode, bounds=bounds,
element_infos=element_infos, idx_for_roi=idx_for_roi, sf=sf)
# only keep relevant elements and get uncropped bounds
elinfos_roi, uncropped_bounds = _keep_relevant_elements_for_roi(
element_infos, sf=sf, mode=mode, idx_for_roi=idx_for_roi,
roiinfo=copy.deepcopy(bounds))
# find relevant portion from slide annotations to use
# (with overflowing beyond edge)
annotations_slice = _trim_slide_annotations_to_roi(
copy.deepcopy(slide_annotations), elinfos_roi=elinfos_roi)
# get roi polygon vertices
rescaled_bounds = {k: int(v * sf) for k, v in bounds.items()}
if mode == 'polygonal_bounds':
roi_coords = _get_coords_from_element(copy.deepcopy(
slide_annotations[int(element_infos.loc[idx_for_roi, 'annidx'])]
['annotation']['elements']
[int(element_infos.loc[idx_for_roi, 'elementidx'])]))
cropping_bounds = None
else:
roi_coords = None
cropping_bounds = rescaled_bounds
# tabularize to use contours
_, contours_df = parse_slide_annotations_into_tables(
annotations_slice, cropping_bounds=cropping_bounds,
cropping_polygon_vertices=roi_coords,
use_shapely=mode in ('manual_bounds', 'polygonal_bounds'),
)
contours_list = contours_df.to_dict(orient='records')
# Final bounds (relative to slide at base magnification)
bounds = {k: int(v / sf) for k, v in rescaled_bounds.items()}
result = dict()
# get RGB
if get_rgb:
getStr = \
"/item/%s/tiles/region?left=%d&right=%d&top=%d&bottom=%d&encoding=PNG" \
% (slide_id,
bounds['XMIN'], bounds['XMAX'],
bounds['YMIN'], bounds['YMAX'])
getStr += appendStr
resp = gc.get(getStr, jsonResp=False)
rgb = get_image_from_htk_response(resp)
result['rgb'] = rgb
# Assign to results
result.update({
'contours': contours_list,
'bounds': bounds,
})
# get visualization of annotations on RGB
if get_visualization:
result['visualization'] = _visualize_annotations_on_rgb(
rgb=rgb, contours_list=contours_list, linewidth=linewidth,
text=text)
return result
# %%===========================================================================
def combs_with_unique_products(low, high, k):
prods = set()
for comb in combinations(range(low, high), k):
prod = np.prod(comb)
if prod not in prods:
yield comb
prods.add(prod)
def contours_to_labeled_object_mask(
contours, gtcodes, mode='object', verbose=False, monitorprefix=''):
"""Process contours to get and object segmentation labeled mask.
Parameters
----------
contours : DataFrame
contours corresponding to annotation elemeents from the slide.
All coordinates are relative to the mask that you want to output.
The following columns are expected.
- group: str, annotation group (ground truth label).
- ymin: int, minimun y coordinate
- ymax: int, maximum y coordinate
- xmin: int, minimum x coordinate
- xmax: int, maximum x coordinate
- coords_x: str, vertix x coordinates comma-separated values
- coords_y: str, vertix y coordinated comma-separated values
gtcodes : DataFrame
the ground truth codes and information dataframe.
This is a dataframe that is indexed by the annotation group name
and has the following columns.
- group: str, group name of annotation, eg. mostly_tumor.
- GT_code: int, desired ground truth code (in the mask).
Pixels of this value belong to corresponding group (class).
- color: str, rgb format. eg. rgb(255,0,0).
mode : str
run mode for getting masks. Must be in
- object: get 3-channel mask where first channel encodes label
(tumor, stroma, etc) while product of second and third
channel encodes the object ID (i.e. individual contours)
This is useful for object localization and segmentation tasks.
- semantic: get a 1-channel mask corresponding to the first channel
of the object mode.
verbose : bool
print to screen?
monitorprefix : str
prefix to add to printed statemens
Returns
-------
np.array
If mode is "object", this returns an (m, n, 3) np array of dtype uint8
that can be saved as a png
First channel: encodes label (can be used for semantic segmentation)
Second & third channels: multiplication of second and third channel
gives the object id (255 choose 2 = 32,385 max unique objects).
This allows us to save into a convenient 3-channel png object labels
and segmentation masks, which is more compact than traditional
mask-rcnn save formats like having one channel per object and a
separate csv file for object labels. This is also more convenient
than simply saving things into pickled np array objects, and allows
compatibility with data loaders that expect an image or mask.
If mode is "semantic" only the labels (corresponding to first
channel of the object mode) is output.
** IMPORTANT NOTE ** When you read this mask and decide to reconstruct
the object codes, convert it to float32 so that the product doesn't
saturate at 255.
"""
def _process_gtcodes(gtcodesdf):
# make sure ROIs are overlayed first
# & assigned background class if relevant
roi_groups = list(
gtcodesdf.loc[gtcodesdf.loc[:, 'is_roi'] == 1, "group"])
roi_order = np.min(gtcodesdf.loc[:, 'overlay_order']) - 1
bck_classes = gtcodesdf.loc[
gtcodesdf.loc[:, 'is_background_class'] == 1, :]
for roi_group in roi_groups:
gtcodesdf.loc[roi_group, 'overlay_order'] = roi_order
if bck_classes.shape[0] > 0:
gtcodesdf.loc[
roi_group, 'GT_code'] = bck_classes.iloc[0, :]['GT_code']
return gtcodesdf
if mode not in ['semantic', 'object']:
raise Exception("Unknown run mode:", mode)
# make sure roi is overlayed first + other processing
gtcodes = _process_gtcodes(gtcodes)
# unique combinations of number to be multiplied (second & third channel)
# to be able to reconstruct the object ID when image is re-read
object_code_comb = combs_with_unique_products(1, 256, 2)
# Add annotations in overlay order
overlay_orders = sorted(set(gtcodes.loc[:, 'overlay_order']))
N_elements = contours.shape[0]
# Make sure we don't run out of object encoding values.
if N_elements > 17437: # max unique products
raise Exception("Too many objects!!")
# Add roiinfo & init roi
roiinfo = {
'XMIN': int(np.min(contours.xmin)),
'YMIN': int(np.min(contours.ymin)),
'XMAX': int(np.max(contours.xmax)),
'YMAX': int(np.max(contours.ymax)),
}
roiinfo['BBOX_WIDTH'] = roiinfo['XMAX'] - roiinfo['XMIN']
roiinfo['BBOX_HEIGHT'] = roiinfo['YMAX'] - roiinfo['YMIN']
# init channels
labels_channel = np.zeros(
(roiinfo['BBOX_HEIGHT'], roiinfo['BBOX_WIDTH']), dtype=np.uint8)
if mode == 'object':
objects_channel1 = labels_channel.copy()
objects_channel2 = labels_channel.copy()
elNo = 0
for overlay_level in overlay_orders:
# get indices of relevant groups
relevant_groups = list(gtcodes.loc[
gtcodes.loc[:, 'overlay_order'] == overlay_level, 'group'])
relIdxs = []
for group_name in relevant_groups:
relIdxs.extend(list(contours.loc[
contours.group == group_name, :].index))
# get relevant infos and sort from largest to smallest (by bbox area)
# so that the smaller elements are layered last. This helps partially
# address issues describe in:
# https://github.com/DigitalSlideArchive/HistomicsTK/issues/675
elinfos_relevant = contours.loc[relIdxs, :].copy()
elinfos_relevant.sort_values(
'bbox_area', axis=0, ascending=False, inplace=True)
# Go through elements and add to ROI mask
for elId, elinfo in elinfos_relevant.iterrows():
elNo += 1
elcountStr = "%s: Overlay level %d: Element %d of %d: %s" % (
monitorprefix, overlay_level, elNo, N_elements,
elinfo['group'])
if verbose:
print(elcountStr)
# Add element to labels channel
labels_channel, element = _simple_add_element_to_roi(
elinfo=elinfo, ROI=labels_channel, roiinfo=roiinfo,
GT_code=gtcodes.loc[elinfo['group'], 'GT_code'],
verbose=verbose, monitorPrefix=elcountStr)
if (element is not None) and (mode == 'object'):
object_code = next(object_code_comb)
# Add element to object (instance) channel 1
objects_channel1, _ = _simple_add_element_to_roi(
elinfo=elinfo, ROI=objects_channel1, roiinfo=roiinfo,
GT_code=object_code[0], element=element,
verbose=verbose, monitorPrefix=elcountStr)
# Add element to object (instance) channel 2
objects_channel2, _ = _simple_add_element_to_roi(
elinfo=elinfo, ROI=objects_channel2, roiinfo=roiinfo,
GT_code=object_code[1], element=element,
verbose=verbose, monitorPrefix=elcountStr)
# Now concat to get final product
# If the mode is object segmentation, we get an np array where
# - First channel: encodes label (can be used for semantic segmentation)
# - Second & third channels: multiplication of second and third channel
# gives the object id (255 choose 2 = 32,385 max unique objects)
# This enables us to later save these masks in convenient compact
# .png format
if mode == 'semantic':
return labels_channel
else:
return np.uint8(np.concatenate((
labels_channel[..., None],
objects_channel1[..., None],
objects_channel2[..., None],
), -1))
# %%===========================================================================
def get_all_rois_from_slide_v2(
gc, slide_id, GTCodes_dict, save_directories,
annotations_to_contours_kwargs=None,
mode='object', get_mask=True,
slide_name=None, verbose=True, monitorprefix="",
callback=None, callback_kwargs=None):
"""Get all ROIs for a slide without an intermediate mask form.
This mainly relies on contours_to_labeled_object_mask(), which should
be referred to for extra documentation.
This can be run in either the "object" mode, whereby the saved masks
are a three-channel png where first channel encodes class label (i.e.
same as semantic segmentation) and the product of the values in the
second and third channel encodes the object ID. Otherwise, the user
may decide to run in the "semantic" mode and the resultant mask would
consist of only one channel (semantic segmentation with no object
differentiation).
The difference between this and version 1, found at
histomicstk.annotations_and_masks.annotations_to_masks_handler.
get_all_rois_from_slide()
is that this (version 2) gets the contours first, including cropping
to wanted ROI boundaries and other processing using shapely, and THEN
parses these into masks. This enables us to differentiate various objects
to use the data for object localization or classification or segmentation
tasks. If you would like to get semantic segmentation masks, i.e. you do
not really care about individual objects, you can use either version 1
or this method. They re-use much of the same code-base, but some edge
cases maybe better handled by version 1. For example, since
this version uses shapely first to crop, some objects may be incorrectly
parsed by shapely. Version 1, using PIL.ImageDraw may not have these
problems.
Bottom line is: if you need semantic segmentation masks, it is probably
safer to use version 1, whereas if you need object segmentation masks,
this method should be used.
Parameters
----------
gc : object
girder client object to make requests, for example:
gc = girder_client.GirderClient(apiUrl = APIURL)
gc.authenticate(interactive=True)
slide_id : str
girder id for item (slide)
GTCodes_dict : dict
the ground truth codes and information dict.
This is a dict that is indexed by the annotation group name and
each entry is in turn a dict with the following keys:
- group: group name of annotation (string), eg. mostly_tumor
- overlay_order: int, how early to place the annotation in the
mask. Larger values means this annotation group is overlayed
last and overwrites whatever overlaps it.
- GT_code: int, desired ground truth code (in the mask)
Pixels of this value belong to corresponding group (class)
- is_roi: Flag for whether this group encodes an ROI
- is_background_class: Flag, whether this group is the default
fill value inside the ROI. For example, you may descide that
any pixel inside the ROI is considered stroma.
save_directories : dict
paths to directories to save data. Each entry is a string, and the
following keys are allowed
- ROI: path to save masks (labeled images)
- rgb: path to save rgb images
- contours: path to save annotation contours
- visualization: path to save rgb visualzation overlays
mode : str
run mode for getting masks. Must me in
- object: get 3-channel mask where first channel encodes label
(tumor, stroma, etc) while product of second and third
channel encodes the object ID (i.e. individual contours)
This is useful for object localization and segmentation tasks.
- semantic: get a 1-channel mask corresponding to the first channel
of the object mode.
get_mask : bool
While the main purpose of this method IS to get object segmentation
masks, it is conceivable that some users might just want to get
the RGB and contours. Default is True.
annotations_to_contours_kwargs : dict
kwargs to pass to annotations_to_contours_no_mask()
default values are assigned if specific parameters are not given.
slide_name : str or None
If not given, its inferred using a server request using girder client.
verbose : bool
Print progress to screen?
monitorprefix : str
text to prepend to printed statements
callback : function
a callback function to run on the roi dictionary output. This is
internal, but if you really want to use this, make sure the callback
can accept the following keys and that you do NOT assign them yourself
gc, slide_id, slide_name, MPP, MAG, verbose, monitorprefix
Also, this callback MUST *ONLY* return thr roi dictionary, whether
or not it is modified inside it. If it is modified inside the callback
then the modified version is the one that will be saved to disk.
callback_kwargs : dict
kwargs to pass to callback, not including the mandatory kwargs
that will be passed internally (mentioned earlier here).
Returns
--------
list of dicts
each entry contains the following keys
mask - path to saved mask
rgb - path to saved rgb image
contours - path to saved annotation contours
visualization - path to saved rgb visualzation overlay
"""
from pandas import DataFrame
default_keyvalues = {
'MPP': None, 'MAG': None,
'linewidth': 0.2,
'get_rgb': True, 'get_visualization': True,
}
# assign defaults if nothing given
kvp = annotations_to_contours_kwargs or {} # for easy referencing
for k, v in default_keyvalues.items():
if k not in kvp.keys():
kvp[k] = v
# convert to df and sanity check
gtcodes_df = DataFrame.from_dict(GTCodes_dict, orient='index')
if any(gtcodes_df.loc[:, 'GT_code'] <= 0):
raise Exception("All GT_code must be > 0")
# if not given, assign name of first file associated with girder item
if slide_name is None:
resp = gc.get('/item/%s/files' % slide_id)
slide_name = resp[0]['name']
slide_name = slide_name[:slide_name.rfind('.')]
# get annotations for slide
slide_annotations = gc.get('/annotation/item/' + slide_id)
# scale up/down annotations by a factor
sf, _ = get_scale_factor_and_appendStr(
gc=gc, slide_id=slide_id, MPP=kvp['MPP'], MAG=kvp['MAG'])
slide_annotations = scale_slide_annotations(slide_annotations, sf=sf)
# get bounding box information for all annotations
element_infos = get_bboxes_from_slide_annotations(slide_annotations)
# get idx of all 'special' roi annotations
idxs_for_all_rois = _get_idxs_for_all_rois(
GTCodes=gtcodes_df, element_infos=element_infos)
savenames = []
for roino, idx_for_roi in enumerate(idxs_for_all_rois):
roicountStr = "%s: roi %d of %d" % (
monitorprefix, roino + 1, len(idxs_for_all_rois))
# get specified area
roi_out = annotations_to_contours_no_mask(
gc=gc, slide_id=slide_id,
mode='polygonal_bounds', idx_for_roi=idx_for_roi,
slide_annotations=slide_annotations,
element_infos=element_infos, **kvp)
# get corresponding mask (semantic or object)
if get_mask:
roi_out['mask'] = contours_to_labeled_object_mask(
contours=DataFrame(roi_out['contours']),
gtcodes=gtcodes_df,
mode=mode, verbose=verbose, monitorprefix=roicountStr)
# now run callback on roi_out
if callback is not None:
# these are 'compulsory' kwargs for the callback
# since it will not have access to these otherwise
callback_kwargs.update({
'gc': gc,
'slide_id': slide_id,
'slide_name': slide_name,
'MPP': kvp['MPP'],
'MAG': kvp['MAG'],
'verbose': verbose,
'monitorprefix': roicountStr,
})
callback(roi_out, **callback_kwargs)
# now save roi (rgb, vis, mask)
this_roi_savenames = dict()
ROINAMESTR = "%s_left-%d_top-%d_bottom-%d_right-%d" % (
slide_name,
roi_out['bounds']['XMIN'], roi_out['bounds']['YMIN'],
roi_out['bounds']['YMAX'], roi_out['bounds']['XMAX'])
for imtype in ['mask', 'rgb', 'visualization']:
if imtype in roi_out.keys():
savename = os.path.join(
save_directories[imtype], ROINAMESTR + ".png")
if verbose:
print("%s: Saving %s" % (roicountStr, savename))
imwrite(im=roi_out[imtype], uri=savename)
this_roi_savenames[imtype] = savename
# save contours
savename = os.path.join(
save_directories['contours'], ROINAMESTR + ".csv")
if verbose:
print("%s: Saving %s\n" % (roicountStr, savename))
contours_df = DataFrame(roi_out['contours'])
contours_df.to_csv(savename)
this_roi_savenames['contours'] = savename
savenames.append(this_roi_savenames)
return savenames
# %%===========================================================================
| |
#!/usr/bin/env python
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Wikimedia Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import shutil
import time
import git
import zuul.lib.cloner
from tests.base import ZuulTestCase
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-32s '
'%(levelname)-8s %(message)s')
class TestCloner(ZuulTestCase):
log = logging.getLogger("zuul.test.cloner")
workspace_root = None
def setUp(self):
super(TestCloner, self).setUp()
self.workspace_root = os.path.join(self.test_root, 'workspace')
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-cloner.yaml')
self.sched.reconfigure(self.config)
self.registerJobs()
def getWorkspaceRepos(self, projects):
repos = {}
for project in projects:
repos[project] = git.Repo(
os.path.join(self.workspace_root, project))
return repos
def getUpstreamRepos(self, projects):
repos = {}
for project in projects:
repos[project] = git.Repo(
os.path.join(self.upstream_root, project))
return repos
def test_cache_dir(self):
projects = ['org/project1', 'org/project2']
cache_root = os.path.join(self.test_root, "cache")
for project in projects:
upstream_repo_path = os.path.join(self.upstream_root, project)
cache_repo_path = os.path.join(cache_root, project)
git.Repo.clone_from(upstream_repo_path, cache_repo_path)
self.worker.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('CRVW', 2)
self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
self.waitUntilSettled()
self.assertEquals(1, len(self.builds), "One build is running")
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.setMerged()
upstream = self.getUpstreamRepos(projects)
states = [{
'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit('master')),
}]
for number, build in enumerate(self.builds):
self.log.debug("Build parameters: %s", build.parameters)
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=projects,
workspace=self.workspace_root,
zuul_branch=build.parameters['ZUUL_BRANCH'],
zuul_ref=build.parameters['ZUUL_REF'],
zuul_url=self.git_root,
cache_dir=cache_root,
)
cloner.execute()
work = self.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertEquals(state[project],
str(work[project].commit('HEAD')),
'Project %s commit for build %s should '
'be correct' % (project, number))
work = self.getWorkspaceRepos(projects)
upstream_repo_path = os.path.join(self.upstream_root, 'org/project1')
self.assertEquals(
work['org/project1'].remotes.origin.url,
upstream_repo_path,
'workspace repo origin should be upstream, not cache'
)
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
def test_one_branch(self):
self.worker.hold_jobs_in_build = True
projects = ['org/project1', 'org/project2']
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('CRVW', 2)
B.addApproval('CRVW', 2)
self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
self.waitUntilSettled()
self.assertEquals(2, len(self.builds), "Two builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
},
]
for number, build in enumerate(self.builds):
self.log.debug("Build parameters: %s", build.parameters)
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=projects,
workspace=self.workspace_root,
zuul_branch=build.parameters['ZUUL_BRANCH'],
zuul_ref=build.parameters['ZUUL_REF'],
zuul_url=self.git_root,
)
cloner.execute()
work = self.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertEquals(state[project],
str(work[project].commit('HEAD')),
'Project %s commit for build %s should '
'be correct' % (project, number))
shutil.rmtree(self.workspace_root)
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
def test_multi_branch(self):
self.worker.hold_jobs_in_build = True
projects = ['org/project1', 'org/project2',
'org/project3', 'org/project4']
self.create_branch('org/project2', 'stable/havana')
self.create_branch('org/project4', 'stable/havana')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'stable/havana',
'B')
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
A.addApproval('CRVW', 2)
B.addApproval('CRVW', 2)
C.addApproval('CRVW', 2)
self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
self.waitUntilSettled()
self.assertEquals(3, len(self.builds), "Three builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit('master')),
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].
commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].
commit('stable/havana')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit('master')),
'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
'org/project4': str(upstream['org/project4'].
commit('master')),
},
]
for number, build in enumerate(self.builds):
self.log.debug("Build parameters: %s", build.parameters)
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=projects,
workspace=self.workspace_root,
zuul_branch=build.parameters['ZUUL_BRANCH'],
zuul_ref=build.parameters['ZUUL_REF'],
zuul_url=self.git_root,
)
cloner.execute()
work = self.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertEquals(state[project],
str(work[project].commit('HEAD')),
'Project %s commit for build %s should '
'be correct' % (project, number))
shutil.rmtree(self.workspace_root)
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
def test_upgrade(self):
# Simulates an upgrade test
self.worker.hold_jobs_in_build = True
projects = ['org/project1', 'org/project2', 'org/project3',
'org/project4', 'org/project5', 'org/project6']
self.create_branch('org/project2', 'stable/havana')
self.create_branch('org/project3', 'stable/havana')
self.create_branch('org/project4', 'stable/havana')
self.create_branch('org/project5', 'stable/havana')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project3', 'stable/havana',
'C')
D = self.fake_gerrit.addFakeChange('org/project3', 'master', 'D')
E = self.fake_gerrit.addFakeChange('org/project4', 'stable/havana',
'E')
A.addApproval('CRVW', 2)
B.addApproval('CRVW', 2)
C.addApproval('CRVW', 2)
D.addApproval('CRVW', 2)
E.addApproval('CRVW', 2)
self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
self.fake_gerrit.addEvent(E.addApproval('APRV', 1))
self.waitUntilSettled()
self.assertEquals(5, len(self.builds), "Five builds are running")
# Check the old side of the upgrade first
upstream = self.getUpstreamRepos(projects)
states = [
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit(
'stable/havana')),
'org/project3': str(upstream['org/project3'].commit(
'stable/havana')),
'org/project4': str(upstream['org/project4'].commit(
'stable/havana')),
'org/project5': str(upstream['org/project5'].commit(
'stable/havana')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit(
'stable/havana')),
'org/project3': str(upstream['org/project3'].commit(
'stable/havana')),
'org/project4': str(upstream['org/project4'].commit(
'stable/havana')),
'org/project5': str(upstream['org/project5'].commit(
'stable/havana')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit(
'stable/havana')),
'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
'org/project4': str(upstream['org/project4'].commit(
'stable/havana')),
'org/project5': str(upstream['org/project5'].commit(
'stable/havana')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit(
'stable/havana')),
'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
'org/project4': str(upstream['org/project4'].commit(
'stable/havana')),
'org/project5': str(upstream['org/project5'].commit(
'stable/havana')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit(
'stable/havana')),
'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
'org/project4': self.builds[4].parameters['ZUUL_COMMIT'],
'org/project5': str(upstream['org/project5'].commit(
'stable/havana')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
]
for number, build in enumerate(self.builds):
self.log.debug("Build parameters: %s", build.parameters)
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=projects,
workspace=self.workspace_root,
zuul_branch=build.parameters['ZUUL_BRANCH'],
zuul_ref=build.parameters['ZUUL_REF'],
zuul_url=self.git_root,
branch='stable/havana', # Old branch for upgrade
)
cloner.execute()
work = self.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertEquals(state[project],
str(work[project].commit('HEAD')),
'Project %s commit for build %s should '
'be correct on old side of upgrade' %
(project, number))
shutil.rmtree(self.workspace_root)
# Check the new side of the upgrade
states = [
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit('master')),
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project3': self.builds[3].parameters['ZUUL_COMMIT'],
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project3': self.builds[3].parameters['ZUUL_COMMIT'],
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
]
for number, build in enumerate(self.builds):
self.log.debug("Build parameters: %s", build.parameters)
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=projects,
workspace=self.workspace_root,
zuul_branch=build.parameters['ZUUL_BRANCH'],
zuul_ref=build.parameters['ZUUL_REF'],
zuul_url=self.git_root,
branch='master', # New branch for upgrade
)
cloner.execute()
work = self.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertEquals(state[project],
str(work[project].commit('HEAD')),
'Project %s commit for build %s should '
'be correct on old side of upgrade' %
(project, number))
shutil.rmtree(self.workspace_root)
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
def test_project_override(self):
self.worker.hold_jobs_in_build = True
projects = ['org/project1', 'org/project2', 'org/project3',
'org/project4', 'org/project5', 'org/project6']
self.create_branch('org/project3', 'stable/havana')
self.create_branch('org/project4', 'stable/havana')
self.create_branch('org/project6', 'stable/havana')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project3', 'stable/havana',
'D')
A.addApproval('CRVW', 2)
B.addApproval('CRVW', 2)
C.addApproval('CRVW', 2)
D.addApproval('CRVW', 2)
self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
self.waitUntilSettled()
self.assertEquals(4, len(self.builds), "Four builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit('master')),
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project2': str(upstream['org/project2'].commit('master')),
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[2].parameters['ZUUL_COMMIT'],
'org/project3': str(upstream['org/project3'].commit('master')),
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit('master')),
},
{'org/project1': self.builds[1].parameters['ZUUL_COMMIT'],
'org/project2': self.builds[2].parameters['ZUUL_COMMIT'],
'org/project3': self.builds[3].parameters['ZUUL_COMMIT'],
'org/project4': str(upstream['org/project4'].commit('master')),
'org/project5': str(upstream['org/project5'].commit('master')),
'org/project6': str(upstream['org/project6'].commit(
'stable/havana')),
},
]
for number, build in enumerate(self.builds):
self.log.debug("Build parameters: %s", build.parameters)
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=projects,
workspace=self.workspace_root,
zuul_branch=build.parameters['ZUUL_BRANCH'],
zuul_ref=build.parameters['ZUUL_REF'],
zuul_url=self.git_root,
project_branches={'org/project4': 'master'},
)
cloner.execute()
work = self.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertEquals(state[project],
str(work[project].commit('HEAD')),
'Project %s commit for build %s should '
'be correct' % (project, number))
shutil.rmtree(self.workspace_root)
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
def test_periodic(self):
self.worker.hold_jobs_in_build = True
self.create_branch('org/project', 'stable/havana')
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-timer.yaml')
self.sched.reconfigure(self.config)
self.registerJobs()
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
self.waitUntilSettled()
builds = self.builds[:]
self.worker.hold_jobs_in_build = False
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-no-timer.yaml')
self.sched.reconfigure(self.config)
self.registerJobs()
self.worker.release()
self.waitUntilSettled()
projects = ['org/project']
self.assertEquals(2, len(builds), "Two builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{'org/project':
str(upstream['org/project'].commit('stable/havana')),
},
{'org/project':
str(upstream['org/project'].commit('stable/havana')),
},
]
for number, build in enumerate(builds):
self.log.debug("Build parameters: %s", build.parameters)
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=projects,
workspace=self.workspace_root,
zuul_branch=build.parameters.get('ZUUL_BRANCH', None),
zuul_ref=build.parameters.get('ZUUL_REF', None),
zuul_url=self.git_root,
branch='stable/havana',
)
cloner.execute()
work = self.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertEquals(state[project],
str(work[project].commit('HEAD')),
'Project %s commit for build %s should '
'be correct' % (project, number))
shutil.rmtree(self.workspace_root)
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
def test_post_checkout(self):
project = "org/project"
path = os.path.join(self.upstream_root, project)
repo = git.Repo(path)
repo.head.reference = repo.heads['master']
commits = []
for i in range(0, 3):
commits.append(self.create_commit(project))
newRev = commits[1]
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=[project],
workspace=self.workspace_root,
zuul_branch=None,
zuul_ref='master',
zuul_url=self.git_root,
zuul_project=project,
zuul_newrev=newRev,
)
cloner.execute()
repos = self.getWorkspaceRepos([project])
cloned_sha = repos[project].rev_parse('HEAD').hexsha
self.assertEqual(newRev, cloned_sha)
def test_post_and_master_checkout(self):
project = "org/project1"
master_project = "org/project2"
path = os.path.join(self.upstream_root, project)
repo = git.Repo(path)
repo.head.reference = repo.heads['master']
commits = []
for i in range(0, 3):
commits.append(self.create_commit(project))
newRev = commits[1]
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=[project, master_project],
workspace=self.workspace_root,
zuul_branch=None,
zuul_ref='master',
zuul_url=self.git_root,
zuul_project=project,
zuul_newrev=newRev
)
cloner.execute()
repos = self.getWorkspaceRepos([project, master_project])
cloned_sha = repos[project].rev_parse('HEAD').hexsha
self.assertEqual(newRev, cloned_sha)
self.assertEqual(
repos[master_project].rev_parse('HEAD').hexsha,
repos[master_project].rev_parse('master').hexsha)
def test_override_zuul_refs(self):
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=['project1'],
workspace=self.workspace_root,
zuul_branch='7.2',
zuul_ref='refs/zuul/7.2/Z86a9b9068320423db7bb882627187d2b',
zuul_url=self.git_root,
branch='abcd'
)
(override_zuul_ref,
fallback_zuul_ref) = cloner.getOverrideRefs('abcd', 'master')
self.assertEqual(override_zuul_ref,
'refs/zuul/abcd/Z86a9b9068320423db7bb882627187d2b')
self.assertEqual(fallback_zuul_ref,
'refs/zuul/master/Z86a9b9068320423db7bb882627187d2b')
cloner2 = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=['project1'],
workspace=self.workspace_root,
zuul_branch='7d2b',
zuul_ref='refs/zuul/7d2b/Z86a9b9068320423db7bb882627187d2b',
zuul_url=self.git_root,
branch='abcd'
)
(override_zuul_ref,
fallback_zuul_ref) = cloner2.getOverrideRefs('abcd', 'master')
self.assertEqual(override_zuul_ref,
'refs/zuul/abcd/Z86a9b9068320423db7bb882627187d2b')
self.assertEqual(fallback_zuul_ref,
'refs/zuul/master/Z86a9b9068320423db7bb882627187d2b')
def test_remote_head(self):
"""Simulate the state when the zuul-cloner would be aborted after
Repo.reset() before checking out sensible reference."""
project = 'org/project'
# create additional branch in the upstream repo
upstream = git.Repo(
os.path.join(self.upstream_root, project)
)
extra_branch = upstream.create_head('extra_branch')
# fetch the upstream to the workspace
cloner = zuul.lib.cloner.Cloner(
git_base_url=self.upstream_root,
projects=[project],
workspace=self.workspace_root,
zuul_branch='master',
zuul_ref='refs/heads/master',
zuul_url=self.git_root,
)
cloner.execute()
# simulate zuul-cloner falling in the middle and keeping HEAD pointing
# to refs/remotes/origin/HEAD
repo = git.Repo(
os.path.join(self.workspace_root, project)
)
repo.head.reference = repo.remotes.origin.refs['HEAD']
# remove the additional upstream branch
upstream.delete_head(extra_branch)
# fetch once again - this triggers the Repo.prune()
cloner.execute()
| |
"""Config flow for the Huawei LTE platform."""
from collections import OrderedDict
import logging
from typing import Any, Dict, Optional
from urllib.parse import urlparse
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
from huawei_lte_api.Connection import Connection
from huawei_lte_api.exceptions import (
LoginErrorPasswordWrongException,
LoginErrorUsernamePasswordOverrunException,
LoginErrorUsernamePasswordWrongException,
LoginErrorUsernameWrongException,
ResponseErrorException,
)
from requests.exceptions import Timeout
from url_normalize import url_normalize
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
CONF_URL,
CONF_USERNAME,
)
from homeassistant.core import callback
from .const import CONNECTION_TIMEOUT, DEFAULT_DEVICE_NAME, DEFAULT_NOTIFY_SERVICE_NAME
# see https://github.com/PyCQA/pylint/issues/3202 about the DOMAIN's pylint issue
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
class ConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Huawei LTE config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> "OptionsFlowHandler":
"""Get options flow."""
return OptionsFlowHandler(config_entry)
async def _async_show_user_form(
self,
user_input: Optional[Dict[str, Any]] = None,
errors: Optional[Dict[str, str]] = None,
) -> Dict[str, Any]:
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
OrderedDict(
(
(
vol.Required(
CONF_URL,
default=user_input.get(
CONF_URL,
# https://github.com/PyCQA/pylint/issues/3167
self.context.get( # pylint: disable=no-member
CONF_URL, ""
),
),
),
str,
),
(
vol.Optional(
CONF_USERNAME, default=user_input.get(CONF_USERNAME, "")
),
str,
),
(
vol.Optional(
CONF_PASSWORD, default=user_input.get(CONF_PASSWORD, "")
),
str,
),
)
)
),
errors=errors or {},
)
async def async_step_import(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle import initiated config flow."""
return await self.async_step_user(user_input)
def _already_configured(self, user_input: Dict[str, Any]) -> bool:
"""See if we already have a router matching user input configured."""
existing_urls = {
url_normalize(entry.data[CONF_URL], default_scheme="http")
for entry in self._async_current_entries()
}
return user_input[CONF_URL] in existing_urls
async def async_step_user(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle user initiated config flow."""
if user_input is None:
return await self._async_show_user_form()
errors = {}
# Normalize URL
user_input[CONF_URL] = url_normalize(
user_input[CONF_URL], default_scheme="http"
)
if "://" not in user_input[CONF_URL]:
errors[CONF_URL] = "invalid_url"
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
if self._already_configured(user_input):
return self.async_abort(reason="already_configured")
conn: Optional[Connection] = None
def logout() -> None:
if isinstance(conn, AuthorizedConnection):
try:
conn.user.logout()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not logout", exc_info=True)
def try_connect(user_input: Dict[str, Any]) -> Connection:
"""Try connecting with given credentials."""
username = user_input.get(CONF_USERNAME)
password = user_input.get(CONF_PASSWORD)
conn: Connection
if username or password:
conn = AuthorizedConnection(
user_input[CONF_URL],
username=username,
password=password,
timeout=CONNECTION_TIMEOUT,
)
else:
try:
conn = AuthorizedConnection(
user_input[CONF_URL],
username="",
password="",
timeout=CONNECTION_TIMEOUT,
)
user_input[CONF_USERNAME] = ""
user_input[CONF_PASSWORD] = ""
except ResponseErrorException:
_LOGGER.debug(
"Could not login with empty credentials, proceeding unauthenticated",
exc_info=True,
)
conn = Connection(user_input[CONF_URL], timeout=CONNECTION_TIMEOUT)
del user_input[CONF_USERNAME]
del user_input[CONF_PASSWORD]
return conn
def get_router_title(conn: Connection) -> str:
"""Get title for router."""
title = None
client = Client(conn)
try:
info = client.device.basic_information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get device.basic_information", exc_info=True)
else:
title = info.get("devicename")
if not title:
try:
info = client.device.information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get device.information", exc_info=True)
else:
title = info.get("DeviceName")
return title or DEFAULT_DEVICE_NAME
assert self.hass is not None
try:
conn = await self.hass.async_add_executor_job(try_connect, user_input)
except LoginErrorUsernameWrongException:
errors[CONF_USERNAME] = "incorrect_username"
except LoginErrorPasswordWrongException:
errors[CONF_PASSWORD] = "incorrect_password"
except LoginErrorUsernamePasswordWrongException:
errors[CONF_USERNAME] = "invalid_auth"
except LoginErrorUsernamePasswordOverrunException:
errors["base"] = "login_attempts_exceeded"
except ResponseErrorException:
_LOGGER.warning("Response error", exc_info=True)
errors["base"] = "response_error"
except Timeout:
_LOGGER.warning("Connection timeout", exc_info=True)
errors[CONF_URL] = "connection_timeout"
except Exception: # pylint: disable=broad-except
_LOGGER.warning("Unknown error connecting to device", exc_info=True)
errors[CONF_URL] = "unknown"
if errors:
await self.hass.async_add_executor_job(logout)
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
# pylint: disable=no-member
title = self.context.get("title_placeholders", {}).get(
CONF_NAME
) or await self.hass.async_add_executor_job(get_router_title, conn)
await self.hass.async_add_executor_job(logout)
return self.async_create_entry(title=title, data=user_input)
async def async_step_ssdp( # type: ignore # mypy says signature incompatible with supertype, but it's the same?
self, discovery_info: Dict[str, Any]
) -> Dict[str, Any]:
"""Handle SSDP initiated config flow."""
await self.async_set_unique_id(discovery_info[ssdp.ATTR_UPNP_UDN])
self._abort_if_unique_id_configured()
# Attempt to distinguish from other non-LTE Huawei router devices, at least
# some ones we are interested in have "Mobile Wi-Fi" friendlyName.
if "mobile" not in discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, "").lower():
return self.async_abort(reason="not_huawei_lte")
# https://github.com/PyCQA/pylint/issues/3167
url = self.context[CONF_URL] = url_normalize( # pylint: disable=no-member
discovery_info.get(
ssdp.ATTR_UPNP_PRESENTATION_URL,
f"http://{urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname}/",
)
)
if any(
url == flow["context"].get(CONF_URL) for flow in self._async_in_progress()
):
return self.async_abort(reason="already_in_progress")
user_input = {CONF_URL: url}
if self._already_configured(user_input):
return self.async_abort(reason="already_configured")
# pylint: disable=no-member
self.context["title_placeholders"] = {
CONF_NAME: discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME)
}
return await self._async_show_user_form(user_input)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Huawei LTE options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle options flow."""
# Recipients are persisted as a list, but handled as comma separated string in UI
if user_input is not None:
# Preserve existing options, for example *_from_yaml markers
data = {**self.config_entry.options, **user_input}
if not isinstance(data[CONF_RECIPIENT], list):
data[CONF_RECIPIENT] = [
x.strip() for x in data[CONF_RECIPIENT].split(",")
]
return self.async_create_entry(title="", data=data)
data_schema = vol.Schema(
{
vol.Optional(
CONF_NAME,
default=self.config_entry.options.get(
CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME
),
): str,
vol.Optional(
CONF_RECIPIENT,
default=", ".join(
self.config_entry.options.get(CONF_RECIPIENT, [])
),
): str,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
| |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
from generate import generate
MAX_ARGS = 3
MAX_HELPERS = 10
TYPE_CODE_TYPES = ['Int16', 'Int32', 'Int64', 'Boolean', 'Char', 'Byte', 'Decimal', 'DateTime', 'Double', 'Single', 'UInt16', 'UInt32', 'UInt64', 'String', 'SByte']
def get_args(i):
return ['arg' + str(x) for x in range(i)]
def get_arr_args(i):
return ['args[' + str(x) + ']' for x in range(i)]
def get_object_args(i):
return ['object arg' + str(x) for x in range(i)]
def get_type_names(i):
if i == 1: return ['T0']
return ['T' + str(x) for x in range(i)]
def get_func_type_names(i):
return get_type_names(i - 1) + ['TRet']
def get_cast_args(i):
return ['%s != null ? (%s)%s : default(%s)' % (x[0], x[1], x[0], x[1]) for x in zip(get_args(i), get_type_names(i))]
def get_type_params(i):
if i == 0: return ''
return '<' + ', '.join(get_type_names(i)) + '>'
def get_func_type_params(i):
if i == 0: return ''
return '<' + ', '.join(get_func_type_names(i)) + '>'
def gen_invoke_instance(cw):
cw.enter_block('public virtual object InvokeInstance(object instance, params object[] args)')
cw.enter_block('switch(args.Length)')
for i in range(MAX_HELPERS-1):
cw.write('case %d: return Invoke(%s);' % (i, ', '.join(['instance'] + get_arr_args(i))))
cw.write('default: throw new InvalidOperationException();')
cw.exit_block() # switch
cw.exit_block() # function
cw.write('')
def gen_invoke(cw):
cw.enter_block('public virtual object Invoke(params object[] args)')
cw.enter_block('switch(args.Length)')
for i in range(MAX_HELPERS):
cw.write('case %d: return Invoke(%s);' % (i, ', '.join(get_arr_args(i))))
cw.write('default: throw new InvalidOperationException();')
cw.exit_block() # switch
cw.exit_block() # function
cw.write('')
def gen_invoke_base_methods(cw):
for i in range(MAX_HELPERS):
cw.write('public virtual object Invoke(%s) { throw new InvalidOperationException(); }' % (', '.join(get_object_args(i)), ))
cw.write('')
def gen_fast_creation(cw):
cw.write('/// <summary>')
cw.write('/// Fast creation works if we have a known primitive types for the entire')
cw.write('/// method siganture. If we have any non-primitive types then FastCreate')
cw.write('/// falls back to SlowCreate which works for all types.')
cw.write('/// ')
cw.write('/// Fast creation is fast because it avoids using reflection (MakeGenericType')
cw.write('/// and Activator.CreateInstance) to create the types. It does this through')
cw.write('/// calling a series of generic methods picking up each strong type of the')
cw.write('/// signature along the way. When it runs out of types it news up the ')
cw.write('/// appropriate CallInstruction with the strong-types that have been built up.')
cw.write('/// ')
cw.write('/// One relaxation is that for return types which are non-primitive types')
cw.write('/// we can fallback to object due to relaxed delegates.')
cw.write('/// </summary>')
for i in range(MAX_ARGS):
cw.enter_block('private static CallInstruction FastCreate%s(MethodInfo target, ParameterInfo[] pi)' % get_type_params(i))
cw.write('Type t = TryGetParameterOrReturnType(target, pi, %d);' % (i, ))
cw.enter_block('if (t == null)')
typeArgs = ', '.join(get_type_names(i))
if i == 0:
cw.write('return new ActionCallInstruction(target);')
else:
cw.enter_block('if (target.ReturnType == typeof(void))')
cw.write('return new ActionCallInstruction<%s>(target);' % (typeArgs, ))
cw.exit_block()
cw.write('return new FuncCallInstruction<%s>(target);' % (typeArgs, ))
cw.exit_block()
cw.write('')
cw.write('if (t.IsEnum) return SlowCreate(target, pi);')
cw.enter_block('switch (t.GetTypeCode())')
cw.enter_block('case TypeCode.Object:')
if i == MAX_ARGS-1:
cw.write('Debug.Assert(pi.Length == %d);' % (MAX_ARGS-1))
cw.write('if (t.IsValueType) goto default;')
cw.write('')
cw.write('return new FuncCallInstruction<%s>(target);' % (', '.join(get_type_names(i) + ['Object']), ) )
else:
cw.enter_block('if (t != typeof(object) && (IndexIsNotReturnType(%d, target, pi) || t.IsValueType))' % (i, ))
cw.write("// if we're on the return type relaxed delegates makes it ok to use object")
cw.write("goto default;")
cw.exit_block() # if
cw.write('return FastCreate<%s>(target, pi);' % (', '.join(get_type_names(i) + ['Object']), ) )
cw.exit_block() # case
for typeName in TYPE_CODE_TYPES:
if i == MAX_ARGS-1:
cw.write('case TypeCode.%s: return new FuncCallInstruction<%s>(target);' % (typeName, ', '.join(get_type_names(i) + [typeName])))
else:
cw.write('case TypeCode.%s: return FastCreate<%s>(target, pi);' % (typeName, ', '.join(get_type_names(i) + [typeName])))
cw.write('default: return SlowCreate(target, pi);')
cw.exit_block() # switch
cw.exit_block() # method
cw.write('')
def get_get_helper_type(cw):
cw.enter_block('private static Type GetHelperType(MethodInfo info, Type[] arrTypes)')
cw.write('Type t;')
cw.enter_block('if (info.ReturnType == typeof(void))')
cw.enter_block('switch (arrTypes.Length)')
for i in range(MAX_HELPERS):
if i == 0:
cw.write('case %d: t = typeof(ActionCallInstruction); break;' % (i, ))
else:
cw.write('case %d: t = typeof(ActionCallInstruction<%s>).MakeGenericType(arrTypes); break;' % (i, ','*(i-1)))
cw.write('default: throw new InvalidOperationException();')
cw.exit_block() # switch
cw.else_block()
cw.enter_block('switch (arrTypes.Length)')
for i in range(1, MAX_HELPERS+1):
cw.write('case %d: t = typeof(FuncCallInstruction<%s>).MakeGenericType(arrTypes); break;' % (i, ','*(i-1)) )
cw.write('default: throw new InvalidOperationException();')
cw.exit_block() # switch
cw.exit_block() # else/if
cw.write('return t;')
cw.exit_block() # method
def get_explicit_caching(cw):
for delegate, type_params_maker, s in [('Func', get_func_type_params, 1), ('Action', get_type_params, 0)]:
for i in range(MAX_HELPERS):
type_params = type_params_maker(s + i)
cw.enter_block('public static MethodInfo Cache%s%s(%s%s method)' % (delegate, type_params, delegate, type_params))
cw.write('var info = method.GetMethodInfo();')
cw.enter_block('lock (_cache)')
cw.write('_cache[info] = new %sCallInstruction%s(method);' % (delegate, type_params))
cw.exit_block()
cw.write('return info;')
cw.exit_block()
cw.write('')
def gen_call_instruction(cw):
cw.enter_block('public partial class CallInstruction')
cw.write('private const int MaxHelpers = ' + str(MAX_HELPERS) + ';')
cw.write('private const int MaxArgs = ' + str(MAX_ARGS) + ';')
cw.write('')
gen_invoke_instance(cw)
gen_invoke(cw)
gen_invoke_base_methods(cw)
gen_fast_creation(cw)
get_get_helper_type(cw)
get_explicit_caching(cw)
cw.exit_block()
cw.write('')
def gen_action_call_instruction(cw, i):
type_params = get_type_params(i)
cw.enter_block('internal sealed class ActionCallInstruction%s : CallInstruction' % type_params)
cw.write('private readonly Action%s _target;' % type_params)
# properties
cw.write('public override MethodInfo Info { get { return _target.GetMethodInfo(); } }')
cw.write('public override int ArgumentCount { get { return %d; } }' % (i))
cw.write('')
# ctor(delegate)
cw.enter_block('public ActionCallInstruction(Action%s target)' % type_params)
cw.write('_target = target;')
cw.exit_block()
cw.write('')
# ctor(info)
cw.enter_block('public ActionCallInstruction(MethodInfo target)')
cw.write('_target = (Action%s)target.CreateDelegate(typeof(Action%s));' % (type_params, type_params))
cw.exit_block()
cw.write('')
# invoke
cw.enter_block('public override object Invoke(%s)' % (', '.join(get_object_args(i)), ))
cw.write('_target(%s);' % (', '.join(get_cast_args(i)), ))
cw.write('return null;')
cw.exit_block()
cw.write('')
# run
gen_interpreted_run(cw, i, False)
cw.exit_block()
cw.write('')
def gen_func_call_instruction(cw, i):
type_params = get_func_type_params(i)
cw.enter_block('internal sealed class FuncCallInstruction%s : CallInstruction' % type_params)
cw.write('private readonly Func%s _target;' % type_params)
# properties
cw.write('public override MethodInfo Info { get { return _target.GetMethodInfo(); } }')
cw.write('public override int ArgumentCount { get { return %d; } }' % (i - 1))
cw.write('')
# ctor(delegate)
cw.enter_block('public FuncCallInstruction(Func%s target)' % type_params)
cw.write('_target = target;')
cw.exit_block()
cw.write('')
# ctor(info)
cw.enter_block('public FuncCallInstruction(MethodInfo target)')
cw.write('_target = (Func%s)target.CreateDelegate(typeof(Func%s));' % (type_params, type_params))
cw.exit_block()
cw.write('')
# invoke
cw.enter_block('public override object Invoke(%s)' % (', '.join(get_object_args(i-1)), ))
cw.write('return _target(%s);' % (', '.join(get_cast_args(i-1)), ))
cw.exit_block()
cw.write('')
# run
gen_interpreted_run(cw, i - 1, True)
cw.exit_block()
cw.write('')
def gen_interpreted_run(cw, n, isFunc):
cw.enter_block('public override int Run(InterpretedFrame frame)')
args = ''
for i in range(0, n):
if i > 0: args += ', '
args += '(T%d)frame.Data[frame.StackIndex - %d]' % (i, n - i)
if isFunc:
call = 'frame.Data[frame.StackIndex - %d] = _target(%s);' % (n, args)
si = n - 1
else:
call = '_target(%s);' % args
si = n
cw.write(call)
cw.write('frame.StackIndex -= %d;' % si)
cw.write('return 1;')
cw.exit_block()
def gen_action_call_instructions(cw):
for i in range(MAX_HELPERS):
gen_action_call_instruction(cw, i)
def gen_func_call_instructions(cw):
for i in range(1, MAX_HELPERS+1):
gen_func_call_instruction(cw, i)
def gen_slow_caller(cw):
cw.enter_block('internal sealed partial class MethodInfoCallInstruction : CallInstruction')
for i in range(MAX_ARGS):
cw.enter_block('public override object Invoke(%s)' % (', '.join(get_object_args(i)), ))
cw.write('return InvokeWorker(%s);' % (', '.join(get_args(i)), ))
cw.exit_block()
cw.exit_block()
def gen_all(cw):
gen_call_instruction(cw)
gen_action_call_instructions(cw)
gen_func_call_instructions(cw)
gen_slow_caller(cw)
def main():
return generate(
("Reflected Caller", gen_all),
)
if __name__ == "__main__":
main()
| |
# Copyright (c) 2020 Rocky Bernstein
from uncompyle6.scanners.tok import Token
IFELSE_STMT_RULES = frozenset(
[
(
"ifelsestmt",
(
"testexpr",
"c_stmts_opt",
"jump_forward_else",
"else_suite",
"_come_froms",
),
),
(
"ifelsestmt",
(
"testexpr",
"c_stmts_opt",
"jump_forward_else",
"else_suite",
"\\e__come_froms",
),
),
(
"ifelsestmtl",
(
"testexpr",
"c_stmts_opt",
"jump_forward_else",
"else_suitec",
),
),
(
"ifelsestmtc",
(
"testexpr",
"c_stmts_opt",
"jump_forward_else",
"else_suitec",
"\\e__come_froms",
),
),
(
"ifelsestmtc",
(
"testexpr",
"c_stmts_opt",
"jump_absolute_else",
"else_suitec",
),
),
(
"ifelsestmt",
(
"testexpr",
"c_stmts_opt",
"jf_cfs",
"else_suite",
"\\e_opt_come_from_except",
),
),
(
"ifelsestmt",
(
"testexpr",
"c_stmts_opt",
"JUMP_FORWARD",
"else_suite",
"come_froms",
),
),
(
"ifelsestmt",
("testexpr", "c_stmts", "come_froms", "else_suite", "come_froms",),
),
(
"ifelsestmt",
(
"testexpr",
"c_stmts_opt",
"jf_cfs",
"else_suite",
"opt_come_from_except",
),
),
(
"ifelsestmt",
(
"testexpr",
"c_stmts_opt",
"jf_cf_pop",
"else_suite",
),
),
(
"ifelsestmt",
(
"testexpr",
"stmts",
"jf_cfs",
"else_suite_opt",
"opt_come_from_except",
),
),
(
"ifelsestmt",
(
"testexpr",
"stmts",
"jf_cfs",
"\\e_else_suite_opt",
"\\e_opt_come_from_except")
),
(
"ifelsestmt",
(
"testexpr",
"stmts",
"jf_cfs",
"\\e_else_suite_opt",
"opt_come_from_except")
),
])
def ifelsestmt(self, lhs, n, rule, ast, tokens, first, last):
if (last + 1) < n and tokens[last + 1] == "COME_FROM_LOOP" and lhs != "ifelsestmtc":
# ifelsestmt jumped outside of loop. No good.
return True
# print("XXX", first, last)
# for t in range(first, last):
# print(tokens[t])
# print("=" * 30)
if rule not in IFELSE_STMT_RULES:
# print("XXX", rule)
return False
# Avoid if/else where the "then" is a "raise_stmt1" for an
# assert statement. Parse this as an "assert" instead.
stmts = ast[1]
if stmts in ("c_stmts",) and len(stmts) == 1:
raise_stmt1 = stmts[0]
if (
raise_stmt1 == "raise_stmt1" and
raise_stmt1[0] in ("LOAD_ASSERT",)
):
return True
# Make sure all of the "come froms" offset at the
# end of the "if" come from somewhere inside the "if".
# Since the come_froms are ordered so that lowest
# offset COME_FROM is last, it is sufficient to test
# just the last one.
if len(ast) == 5:
end_come_froms = ast[-1]
if end_come_froms.kind != "else_suite" and self.version >= 3.0:
if end_come_froms == "opt_come_from_except" and len(end_come_froms) > 0:
end_come_froms = end_come_froms[0]
if not isinstance(end_come_froms, Token):
if len(end_come_froms):
return tokens[first].offset > end_come_froms[-1].attr
elif tokens[first].offset > end_come_froms.attr:
return True
# FIXME: There is weirdness in the grammar we need to work around.
# we need to clean up the grammar.
if self.version < 3.0:
last_token = ast[-1]
else:
last_token = tokens[last]
if last_token == "COME_FROM" and tokens[first].offset > last_token.attr:
if self.version < 3.0 and self.insts[self.offset2inst_index[last_token.attr]].opname != "SETUP_LOOP":
return True
testexpr = ast[0]
# Check that the condition portion of the "if"
# jumps to the "else" part.
if testexpr[0] in ("testtrue", "testfalse"):
if_condition = testexpr[0]
else_suite = ast[3]
assert else_suite.kind.startswith("else_suite")
if len(if_condition) > 1 and if_condition[1].kind.startswith("jmp_"):
if last == n:
last -= 1
jmp = if_condition[1]
if self.version > 2.6:
jmp_target = jmp[0].attr
else:
jmp_target = int(jmp[0].pattr)
# Below we check that jmp_target is jumping to a feasible
# location. It should be to the transition after the "then"
# block and to the beginning of the "else" block.
# However the "if/else" is inside a loop the false test can be
# back to the loop.
# FIXME: the below logic for jf_cfs could probably be
# simplified.
jump_else_end = ast[2]
if jump_else_end == "jf_cf_pop":
jump_else_end = jump_else_end[0]
if jump_else_end == "JUMP_FORWARD":
endif_target = int(jump_else_end.pattr)
last_offset = tokens[last].off2int()
if endif_target != last_offset:
return True
last_offset = tokens[last].off2int(prefer_last=False)
if jmp_target == last_offset:
# jmp_target should be jumping to the end of the if/then/else
# but is it jumping to the beginning of the "else"
return True
if (
jump_else_end in ("jf_cfs", "jump_forward_else")
and jump_else_end[0] == "JUMP_FORWARD"
):
# If the "else" jump jumps before the end of the the "if .. else end", then this
# is not this kind of "ifelsestmt".
jump_else_forward = jump_else_end[0]
jump_else_forward_target = jump_else_forward.attr
if jump_else_forward_target < last_offset:
return True
pass
if (
jump_else_end in ("jb_elsec", "jb_elsel", "jf_cfs", "jb_cfs")
and jump_else_end[-1] == "COME_FROM"
):
if jump_else_end[-1].off2int() != jmp_target:
return True
if tokens[first].off2int() > jmp_target:
return True
return (jmp_target > last_offset) and tokens[last] != "JUMP_FORWARD"
return False
| |
#!/usr/bin/env python2.7
from __future__ import absolute_import, division, print_function
from six.moves import range
import utool
from ibeis.control import SQLDatabaseControl
from os.path import join
from guitool.api_item_model import APIItemModel
from guitool.__PYQT__ import QtGui
import string
import random
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_GUI_LAZY] ')
def create_databse():
def _randstr(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
sqldb_fname = 'data_test_qt.sqlite3'
sqldb_dpath = utool.util_cplat.get_app_resource_dir('ibeis', 'testfiles')
utool.ensuredir(sqldb_dpath)
utool.util_path.remove_file(join(sqldb_dpath, sqldb_fname), dryrun=False)
db = SQLDatabaseControl.SQLDatabaseController(sqldb_dpath=sqldb_dpath,
sqldb_fname=sqldb_fname)
imagesets = [
('imageset_id', 'INTEGER PRIMARY KEY'),
('imageset_name', 'TEXT'),
]
db.add_table('imagesets', imagesets)
rows = 1 * (10 ** 3)
feats_iter = ( (_randstr(), ) for i in range(rows) )
print('[TEST] insert imagesets')
tt = utool.tic()
db.executemany(operation='''
INSERT
INTO imagesets
(
imageset_name
)
VALUES (?)
''', params_iter=feats_iter)
print(' * execute insert time=%r sec' % utool.toc(tt))
##############################################
headers = [
('data_id', 'INTEGER PRIMARY KEY'),
('imageset_id', 'INT'),
('data_float', 'FLOAT'),
('data_int', 'INT'),
('data_text', 'TEXT'),
('data_text2', 'TEXT'),
]
db.add_table('data', headers)
col_name_list = [ column[0] for column in headers ]
col_type_list = [ str ] * len(col_name_list)
col_edit_list = [ False, True, True, True, True, True ]
col_nice_list = [
'ID',
'ImageSet ID',
'TEST Float',
'TEST Int',
'TEST String 1',
'TEST String 2',
]
rows = 1 * (10 ** 4)
feats_iter = ((random.randint(0, 1000), random.uniform(0.0, 1.0), random.randint(0, 100), _randstr(), _randstr())
for i in range(rows) )
print('[TEST] insert data')
tt = utool.tic()
db.executemany(operation='''
INSERT
INTO data
(
imageset_id,
data_float,
data_int,
data_text,
data_text2
)
VALUES (?,?,?,?,?)
''', params_iter=feats_iter)
print(' * execute insert time=%r sec' % utool.toc(tt))
return col_name_list, col_type_list, col_edit_list, col_nice_list, db
class ImageModelSQL(APIItemModel):
def __init__(model, col_name_list, col_type_list, col_edit_list, col_nice_list, db, parent=None, *args):
model.db = db
model.imageset_id = '-1'
#row_index_callback=model._row_index_callback
headers = dict(col_name_list=col_name_list,
col_type_list=col_type_list,
col_nice_list=col_nice_list,
col_edit_list=col_edit_list,
#col_getter_list=model._getter,
#col_setter_list=model._setter
)
super(ImageModelSQL, model).__init__(headers, parent)
def _change_imageset(model, imageset_id):
model.imageset_id = imageset_id
model._update_rows()
def _row_index_callback(model, col_sort_name):
query = 'SELECT data_id FROM data WHERE (? IS "-1" OR imageset_id=?) ORDER BY ' + col_sort_name + ' ASC'
model.db.execute(query, [model.imageset_id, model.imageset_id])
return [result for result in model.db.result_iter()]
def _setter(model, column_name, row_id, value):
if value != '':
query = 'UPDATE data SET ' + column_name + '=? WHERE data_id=?'
model.db.execute(query, [value, row_id])
return True
def _getter(model, column_name, row_id):
query = 'SELECT ' + column_name + ' FROM data WHERE data_id=?'
model.db.execute(query, [row_id])
result_list = list(model.db.result())
return str(result_list[0])
class ImageSetModelSQL(APIItemModel):
def __init__(model, col_name_list, col_type_list, col_edit_list, db, parent=None, *args):
model.db = db
super(ImageSetModelSQL, model).__init__(col_name_list=col_name_list,
col_type_list=col_type_list,
col_getter_list=model._getter,
col_edit_list=col_edit_list,
col_setter_list=model._setter,
row_index_callback=model._row_index_callback,
parent=parent)
def _get_imageset_id_name(model, qtindex):
row, col = model._row_col(qtindex)
imageset_id = model._get_row_id(row)
imageset_name = model._get_cell(row, 0)
return imageset_id, imageset_name
def _row_index_callback(model, col_sort_name):
if col_sort_name == 'num_images':
col_sort_name = 'imageset_id'
model.db.execute('SELECT imageset_id FROM imagesets ORDER BY ' + col_sort_name + ' ASC', [])
return [result for result in model.db.result_iter()]
def _setter(model, column_name, row_id, value):
if value != '':
query = 'UPDATE imagesets SET ' + column_name + '=? WHERE imageset_id=?'
model.db.execute(query, [value, row_id])
# model.parent()._update_imageset_tab_name(row_id, value)
return True
def _getter(model, column_name, row_id):
if column_name == 'num_images':
return 0
query = 'SELECT ' + column_name + ' FROM imagesets WHERE imageset_id=?'
model.db.execute(query, [row_id])
result_list = list(model.db.result())
return str(result_list[0])
class ImageView(QtWidgets.QTableView):
def __init__(view, parent=None):
QtWidgets.QTableView.__init__(view, parent)
view.setSortingEnabled(True)
vh = view.verticalHeader()
vh.setVisible(False)
view.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
view.resizeColumnsToContents()
def _change_imageset(view, imageset_id):
view.model()._change_imageset(imageset_id)
class ImageSetView(QtWidgets.QTableView):
def __init__(view, parent=None):
QtWidgets.QTableView.__init__(view, parent)
view.setSortingEnabled(True)
vh = view.verticalHeader()
vh.setVisible(False)
#hh = view.horizontalHeader()
#hh.setVisible(False)
def mouseDoubleClickEvent(view, event):
index = view.selectedIndexes()[0]
imageset_id, imageset_name = view.model()._get_imageset_id_name(index)
view.parent()._add_imageset_tab(imageset_id, imageset_name)
class TabWidget(QtWidgets.QTabWidget):
def __init__(widget, parent=None):
QtWidgets.QTabWidget.__init__(widget, parent)
widget.setTabsClosable(True)
if sys.platform.startswith('darwin'):
tab_height = 21
else:
tab_height = 30
widget.setMaximumSize(9999, tab_height)
widget._tb = widget.tabBar()
widget._tb.setMovable(True)
widget.setStyleSheet('border: none;')
widget._tb.setStyleSheet('border: none;')
widget.tabCloseRequested.connect(widget._close_tab)
widget.currentChanged.connect(widget._on_change)
widget.imageset_id_list = []
widget._add_imageset_tab('-1', 'Database')
def _on_change(widget, index):
if 0 <= index and index < len(widget.imageset_id_list):
widget.parent()._change_imageset(widget.imageset_id_list[index])
def _close_tab(widget, index):
if widget.imageset_id_list[index] != '-1':
widget.imageset_id_list.pop(index)
widget.removeTab(index)
def _add_imageset_tab(widget, imageset_id, imageset_name):
if imageset_id not in widget.imageset_id_list:
tab_name = str(imageset_id) + ' - ' + str(imageset_name)
widget.addTab(QtWidgets.QWidget(), tab_name)
widget.imageset_id_list.append(imageset_id)
index = len(widget.imageset_id_list) - 1
else:
index = widget.imageset_id_list.index(imageset_id)
widget.setCurrentIndex(index)
widget._on_change(index)
def _update_imageset_tab_name(widget, imageset_id, imageset_name):
for index, _id in enumerate(widget.imageset_id_list):
if imageset_id == _id:
widget.setTabText(index, imageset_name)
class DummyWidget(QtWidgets.QWidget):
''' Test Main Window '''
def __init__(widget, parent=None):
QtWidgets.QWidget.__init__(widget, parent)
widget.vlayout = QtWidgets.QVBoxLayout(widget)
col_name_list, col_type_list, col_edit_list, col_nice_list, db = create_databse()
widget._image_model = ImageModelSQL(col_name_list, col_type_list, col_edit_list, col_nice_list, db, parent=widget)
widget._image_view = ImageView(parent=widget)
widget._image_view.setModel(widget._image_model)
col_name_list = ['imageset_name', 'num_images']
col_type_list = [str, int]
col_edit_list = [True, False]
#splitter = QtWidgets.QSplitter(centralwidget)
#splitter.setOrientation(QtCore.Qt.Vertical)
widget._imageset_model = ImageSetModelSQL(col_name_list, col_type_list, col_edit_list, db, parent=widget)
widget._imageset_view = ImageSetView(parent=widget)
widget._imageset_view.setModel(widget._imageset_model)
widget._tab_widget = TabWidget(parent=widget)
widget.vlayout.addWidget(widget._tab_widget)
widget.vlayout.addWidget(widget._image_view)
widget.vlayout.addWidget(widget._imageset_view)
def _change_imageset(widget, imageset_id):
widget._image_view._change_imageset(imageset_id)
def _add_imageset_tab(widget, imageset_id, imageset_name):
widget._tab_widget._add_imageset_tab(imageset_id, imageset_name)
def _update_imageset_tab_name(widget, imageset_id, imageset_name):
widget._tab_widget._update_imageset_tab_name(imageset_id, imageset_name)
if __name__ == '__main__':
import sys
import signal
import guitool
def _on_ctrl_c(signal, frame):
print('Caught ctrl+c')
sys.exit(0)
signal.signal(signal.SIGINT, _on_ctrl_c)
app = QtWidgets.QApplication(sys.argv)
widget = DummyWidget()
widget.show()
widget.timer = guitool.ping_python_interpreter()
widget.raise_()
sys.exit(app.exec_())
signal.signal(signal.SIGINT, signal.SIG_DFL) # reset ctrl+c behavior
| |
#!/usr/bin/env python
#
# Copyright 2008 FriendFeed
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Methods to interact with the FriendFeed API
Detailed documentation is available at http://friendfeed.com/api/.
Many parts of the FriendFeed API require authentication. To support
authentication, FriendFeed gives users a "remote key" that they give to
third party applications to access FriendFeed. The user's nickname and that
remote key are passed as arguments to the constructor of the FriendFeed class,
and the credentials are automatically passed to all called methods. For
example:
session = friendfeed.FriendFeed(nickname, remote_key)
entry = session.publish_message("Testing the FriendFeed API")
Users can get their remote key from http://friendfeed.com/remotekey. You
should direct users who don't know their remote key to that page.
For guidelines on user interface and terminology, check out
http://friendfeed.com/api/guidelines.
"""
import base64
import datetime
import time
import urllib
import urllib2
# We require a JSON parsing library. These seem to be the most popular.
try:
import cjson
parse_json = lambda s: cjson.decode(s.decode("utf-8"), True)
except ImportError:
try:
import simplejson
parse_json = lambda s: simplejson.loads(s.decode("utf-8"))
except ImportError:
import json
parse_json = lambda s: _unicodify(json.read(s))
class FriendFeed(object):
def __init__(self, auth_nickname=None, auth_key=None):
"""Creates a new FriendFeed session for the given user.
The credentials are optional for some operations, but required for
private feeds and all operations that write data, like publish_link.
"""
self.auth_nickname = auth_nickname
self.auth_key = auth_key
def fetch_public_feed(self, **kwargs):
"""Returns the public feed with everyone's public entries.
Authentication is not required.
"""
return self._fetch_feed("/api/feed/public", **kwargs)
def fetch_user_feed(self, nickname, **kwargs):
"""Returns the entries shared by the user with the given nickname.
Authentication is required if the user's feed is not public.
"""
return self._fetch_feed(
"/api/feed/user/" + urllib.quote_plus(nickname), **kwargs)
def fetch_user_comments_feed(self, nickname, **kwargs):
"""Returns the entries the given user has commented on."""
return self._fetch_feed(
"/api/feed/user/" + urllib.quote_plus(nickname) + "/comments",
**kwargs)
def fetch_user_likes_feed(self, nickname, **kwargs):
"""Returns the entries the given user has "liked"."""
return self._fetch_feed(
"/api/feed/user/" + urllib.quote_plus(nickname) + "/likes",
**kwargs)
def fetch_user_discussion_feed(self, nickname, **kwargs):
"""Returns the entries the given user has commented on or "liked"."""
return self._fetch_feed(
"/api/feed/user/" + urllib.quote_plus(nickname) + "/discussion",
**kwargs)
def fetch_multi_user_feed(self, nicknames, **kwargs):
"""Returns a merged feed with all of the given users' entries.
Authentication is required if any one of the users' feeds is not
public.
"""
return self._fetch_feed("/api/feed/user", nickname=",".join(nicknames),
**kwargs)
def fetch_home_feed(self, **kwargs):
"""Returns the entries the authenticated user sees on their home page.
Authentication is always required.
"""
return self._fetch_feed("/api/feed/home", **kwargs)
def search(self, q, **kwargs):
"""Searches over entries in FriendFeed.
If the request is authenticated, the default scope is over all of the
entries in the authenticated user's Friends Feed. If the request is
not authenticated, the default scope is over all public entries.
The query syntax is the same syntax as
http://friendfeed.com/advancedsearch
"""
kwargs["q"] = q
return self._fetch_feed("/api/feed/search", **kwargs)
def publish_message(self, message, **kwargs):
"""Publishes the given message to the authenticated user's feed.
See publish_link for additional options.
"""
return self.publish_link(title=message, link=None, **kwargs)
def publish_link(self, title, link, comment=None, image_urls=[],
images=[], via=None, audio_urls=[], audio=[],
room=None):
"""Publishes the given link/title to the authenticated user's feed.
Authentication is always required.
image_urls is a list of URLs that will be downloaded and included as
thumbnails beneath the link. The thumbnails will all link to the
destination link. If you would prefer that the images link somewhere
else, you can specify images[] instead, which should be a list of
dicts of the form {"url": ..., "link": ...}. The thumbnail with the
given url will link to the specified link.
audio_urls is a list of MP3 URLs that will show up as a play
button beneath the link. You can optionally supply audio[]
instead, which should be a list of dicts of the form
{"url": ..., "title": ...}. The given title will appear when the
audio file is played.
We return the parsed/published entry as returned from the server, which
includes the final thumbnail URLs as well as the ID for the new entry.
Example:
session = friendfeed.FriendFeed(nickname, remote_key)
entry = session.publish_link(
title="Testing the FriendFeed API",
link="http://friendfeed.com/",
image_urls=[
"http://friendfeed.com/static/images/jim-superman.jpg",
"http://friendfeed.com/static/images/logo.png",
],
)
print "Posted images at http://friendfeed.com/e/%s" % entry["id"]
"""
post_args = {"title": title}
if link:
post_args["link"] = link
if comment:
post_args["comment"] = comment
if via:
post_args["via"] = via
images = images[:]
for image_url in image_urls:
images.append({"url": image_url})
for i, image in enumerate(images):
post_args["image%d_url" % i] = image["url"]
if image.get("link"):
post_args["image%d_link" % i] = image["link"]
audio = audio[:]
for audio_url in audio_urls:
audio.append({"url": audio_url})
for i, clip in enumerate(audio):
post_args["audio%d_url" % i] = clip["url"]
if clip.get("title"):
post_args["audio%d_title" % i] = clip["title"]
if room:
post_args["room"] = room
feed = self._fetch_feed("/api/share", post_args=post_args)
return feed["entries"][0]
def add_comment(self, entry_id, body, via=None):
"""Adds the given comment to the entry with the given ID.
We return the ID of the new comment, which can be used to edit or
delete the comment.
"""
args = {
"entry": entry_id,
"body": body
}
if via: args["via"] = via
result = self._fetch("/api/comment", args)
return result["id"]
def edit_comment(self, entry_id, comment_id, body):
"""Updates the comment with the given ID."""
self._fetch("/api/comment", {
"entry": entry_id,
"comment": comment_id,
"body": body
})
def delete_comment(self, entry_id, comment_id):
"""Deletes the comment with the given ID."""
self._fetch("/api/comment/delete", {
"entry": entry_id,
"comment": comment_id,
})
def undelete_comment(self, entry_id, comment_id):
"""Un-deletes the comment with the given ID."""
self._fetch("/api/comment/delete", {
"entry": entry_id,
"comment": comment_id,
"undelete": 1,
})
def add_like(self, entry_id):
"""'Likes' the entry with the given ID."""
self._fetch("/api/like", {
"entry": entry_id,
})
def delete_like(self, entry_id):
"""Deletes the 'Like' for the entry with the given ID (if any)."""
self._fetch("/api/like/delete", {
"entry": entry_id,
})
def _fetch_feed(self, uri, post_args=None, **kwargs):
"""Publishes to the given URI and parses the returned JSON feed."""
# Parse all the dates in the result JSON
result = self._fetch(uri, post_args, **kwargs)
rfc3339_date = "%Y-%m-%dT%H:%M:%SZ"
date_properties = frozenset(("updated", "published"))
for entry in result.get("entries", []):
entry["updated"] = self._parse_date(entry["updated"])
entry["published"] = self._parse_date(entry["published"])
for comment in entry.get("comments", []):
comment["date"] = self._parse_date(comment["date"])
for like in entry.get("likes", []):
like["date"] = self._parse_date(like["date"])
return result
def _fetch(self, uri, post_args, **url_args):
url_args["format"] = "json"
args = urllib.urlencode(url_args)
url = "http://friendfeed.com" + uri + "?" + args
if post_args is not None:
request = urllib2.Request(url, urllib.urlencode(post_args))
else:
request = urllib2.Request(url)
if self.auth_nickname and self.auth_key:
pair = "%s:%s" % (self.auth_nickname, self.auth_key)
token = base64.b64encode(pair)
request.add_header("Authorization", "Basic %s" % token)
stream = urllib2.urlopen(request)
data = stream.read()
stream.close()
return parse_json(data)
def _parse_date(self, date_str):
rfc3339_date = "%Y-%m-%dT%H:%M:%SZ"
return datetime.datetime(*time.strptime(date_str, rfc3339_date)[:6])
def _unicodify(json):
"""Makes all strings in the given JSON-like structure unicode."""
if isinstance(json, str):
return json.decode("utf-8")
elif isinstance(json, dict):
for name in json:
json[name] = _unicodify(json[name])
elif isinstance(json, list):
for part in json:
_unicodify(part)
return json
def _example():
# Fill in a nickname and a valid remote key below for authenticated
# actions like posting an entry and reading a protected feed
# session = FriendFeed(auth_nickname=nickname, auth_key=remote_key)
session = FriendFeed()
feed = session.fetch_public_feed()
# feed = session.fetch_user_feed("bret")
# feed = session.fetch_user_feed("paul", service="twitter")
# feed = session.fetch_user_discussion_feed("bret")
# feed = session.fetch_multi_user_feed(["bret", "paul", "jim"])
# feed = session.search("who:bret friendfeed")
for entry in feed["entries"]:
print entry["published"].strftime("%m/%d/%Y"), entry["title"]
if session.auth_nickname and session.auth_key:
# The feed that the authenticated user would see on their home page
feed = session.fetch_home_feed()
# Post a message on this user's feed
entry = session.publish_message("Testing the FriendFeed API")
print "Posted new message at http://friendfeed.com/e/%s" % entry["id"]
# Post a link on this user's feed
entry = session.publish_link(title="Testing the FriendFeed API",
link="http://friendfeed.com/")
print "Posted new link at http://friendfeed.com/e/%s" % entry["id"]
# Post a link with two thumbnails on this user's feed
entry = session.publish_link(
title="Testing the FriendFeed API",
link="http://friendfeed.com/",
image_urls=[
"http://friendfeed.com/static/images/jim-superman.jpg",
"http://friendfeed.com/static/images/logo.png",
],
)
print "Posted images at http://friendfeed.com/e/%s" % entry["id"]
if __name__ == "__main__":
_example()
| |
# Sway
from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
from model import *
from space import *
from grid import *
def BASIC_5_2(): return BASIC(5,2)
def BASIC_10_2(): return BASIC(10,2)
def BASIC_20_2(): return BASIC(20,2)
def BASIC_40_2(): return BASIC(40,2)
class BASIC(Model):
def __init__(i,ndecs=5,nobjs=2):
i.ndecs, i.nobjs = ndecs,nobjs
Model.__init__(i)
def about(i):
def f(x):
return sum(x.decs)
def dec(x):
return An(x,lo=0,hi=1)
i.decs = [dec(x) for x in range(i.ndecs)]
i.objs = [Less("f%s" % j,
maker=f)
for j in range(i.nobjs)]
def BIASED_5_2(): return BIASED(5,2)
def BIASED_10_2(): return BIASED(10,2)
def BIASED_20_2(): return BIASED(20,2)
def BIASED_40_2(): return BIASED(40,2)
class BIASED(Model):
def __init__(i,ndecs=5,nobjs=2):
i.ndecs, i.nobjs = ndecs,nobjs
Model.__init__(i)
def about(i):
def f(x):
return sum(x.decs)
def dec(x):
return An(x,lo=x**3,hi=x**4)
i.decs = [dec(x) for x in range(i.ndecs)]
i.objs = [Less("f%s" % j,
maker=f)
for j in range(i.nobjs)]
class Kursawe(Model):
n = 3
a=2
b=3
def about(i):
def f1(x):
return sum( -10 * ee ** ( -0.2 * sqrt(z**2 + x.decs[i+1]**2))
for i,z in enumerate(x.decs[:-1]))
def f2(x):
a,b= Kursawe.a, Kursawe.b
return sum( abs(z)**a + 5 * math.sin(z)**b
for z in x.decs)
def dec(x):
return An(x,lo= -5,hi=5)
i.decs = [dec(x) for x in range(Kursawe.n)]
i.objs = [Less("f1",maker=f1),
Less("f2",maker=f2)]
class ZDT1(Model):
n=30
def about(i):
def f1(x):
return x.decs[0]
def f2(x):
g = 1 + 9 * sum(x for x in x.decs[1:] )/(ZDT1.n-1)
return g * abs(1 - sqrt(x.decs[0]/g))
def dec(x):
return An(x,lo=0,hi=1)
i.decs = [dec(x) for x in range(ZDT1.n)]
i.objs = [Less("f1",maker=f1),
Less("f2",maker=f2)]
def DTLZ7_2_3(): return DTLZ7()
def DTLZ7_4_5(): return DTLZ7(5)
def DTLZ7_6_7(): return DTLZ7(7)
class DTLZ7(Model):
def __init__(i,nobjs=3):
i.ndecs, i.nobjs = nobjs-1,nobjs
Model.__init__(i)
def g(i,x):
return 1 + 9/i.nobjs * sum(x.decs)
def h(i,x,g):
_h = i.nobjs
for j in range(i.nobjs-1):
_f = i.f("a",j,x)
_h -= _f /(1 + g) * (
1 + math.sin(3 * pi * _f))
return _h
def f(i,s,j,x):
if j < (i.nobjs - 1):
return x.decs[j]
else:
_g = i.g(x)
return (1+ _g) * i.h(x,_g)
def about(i):
def obj(j):
return lambda x: i.f("x",j,x)
def dec(x):
return An(x,lo=0,hi=1)
i.decs = [dec(x) for x in range(i.ndecs)]
i.objs = [Less("f%s" % j,
maker=obj(j))
for j in range(i.nobjs)]
#XXX complete dtlz7
class Fonseca(Model):
n=3
def about(i):
def f1(can):
z = sum([(x - 1/sqrt(Fonseca.n))**2 for x in can.decs])
return 1 - ee**(-1*z)
def f2(can):
z = sum([(x + 1/sqrt(Fonseca.n))**2 for x in can.decs])
return 1 - ee**(-1*z)
def dec(x):
return An(x, lo=-4, hi=4)
i.decs = [dec(x) for x in range(Fonseca.n)]
i.objs = [Less("f1", maker=f1),
Less("f2", maker=f2)]
class Viennet4(Model):
n=2
def ok(i,can):
one,two = can.decs
g1 = -1*two - 4*one + 4
g2 = one + 1
g3 = two - one + 2
return g1 >= 0 and g2 >= 0 and g3 >= 0
def about(i):
def f1(can):
one,two = can.decs
return (one - 2)**2 /2 + (two + 1)**2 /13 + 3
def f2(can):
one,two = can.decs
return (one + two - 3)**2 /175 + (2*two - one)**2 /17 - 13
def f3(can):
one,two= can.decs
return (3*one - 2*two + 4)**2 /8 + (one - two + 1)**2 /27 + 15
def dec(x):
return An(x,lo= -4,hi= 4)
i.decs = [dec(x) for x in range(Viennet4.n)]
i.objs = [Less("f1",maker=f1),
Less("f2",maker=f2),
Less("f3",maker=f3)]
########### test cases
def _models(repeats=32,items=32):
def worker(m):
x = m.eval(m.decide())
logDecs + x
logObjs + x
return x
for f in [Fonseca,Viennet4,ZDT1, DTLZ7_2_3,DTLZ7_4_5]:
reset()
m = f()
logDecs = Space(value=decisions)
logObjs = Space(value=objectives)
print("")
for _ in xrange(repeats):
all = [worker(m) for _ in xrange(items)]
one = all[0]
far,d1 = logDecs.furthest(one,all)
near,d2= logDecs.closest(one,all)
assert d2 < d1
far,d3 = logObjs.furthest(one,all)
near,d4= logObjs.closest(one,all)
assert d4 < d3
print("")
def _Viennet4():
reset()
m = Viennet4()
x = m.eval(m.decide())
assert {'objs' : [5.101, -12.897, 17.829],
'decs' : [-0.037, -0.404]} \
== {'decs' : r3s(x.decs), 'objs': r3s(x.objs)}
def _rahul(items=512):
models= [BASIC,BASIC,
BASIC_10_2,BASIC_10_2,
BASIC_20_2,BASIC_20_2,
BASIC_40_2,BASIC_40_2,
BIASED,BIASED,
BIASED_10_2,BIASED_10_2,
BIASED_20_2,BIASED_20_2,
BIASED_40_2,BIASED_40_2
] #ZDT1,Fonseca,Kursawe]
for f in models:
reset()
GRID(bins=16)
print(f.__name__)
rows= _gridding(f(),items)
print("\n")
printm([ [ _show(cell,items) for cell in row]
for row in rows])
def _gridding(m,items):
spaceDecs = Space(value=decisions)
spaceObjs = Space(value=objectives)
gridDecs = Grid(spaceDecs)
all = [_worker(m,spaceDecs,gridDecs,spaceObjs)
for _ in xrange(items)]
#xs1,ys1= _frontier(m,all,spaceObjs)
#xs2,ys2= _frontier(m,all,spaceObjs,how='cdom')
##print("Bdoms",len(xs1))
#print("Cdoms",len(xs2))
#textplot(
# (data(xs2), data(ys2), {'legend':'cdom'}),
# (data(xs1), data(ys1), {'legend':'bdom'}),
### cmds="set key bottom left")
return gridDecs.grid.cells
def _worker(m,spaceDecs,gridDecs,spaceObjs):
x = m.decide()
spaceDecs + x
gridDecs + x
x= m.eval(x)
spaceObjs + x
return x
def _show(cell,items):
n = len(cell)
p = int(100*n /items)
if p >= 1: return p
if n > 0 : return "."
return " "
def _frontier(m,all,spaceObjs,how="bdom"):
some = tournament(m,all,spaceObjs,how=how)
xs,ys= [],[]
for one in sorted(some,key=lambda z:z.objs):
xs += [one.objs[0]]
ys += [one.objs[1]]
return xs,ys
main(__name__,_models, _Viennet4,
_tournament)
| |
from django.shortcuts import render
from django.conf import settings
from django.http import Http404, HttpResponsePermanentRedirect
from django.urls import reverse
from datetime import date, timedelta, datetime
from chicago.models import ChicagoBill, ChicagoEvent
from councilmatic_core.views import *
from haystack.query import SearchQuerySet
from django.db.models import DateTimeField
from django.db.models.functions import Cast
class ChicagoIndexView(IndexView):
template_name = 'chicago/index.html'
bill_model = ChicagoBill
event_model = ChicagoEvent
def last_meeting(self):
return ChicagoEvent.most_recent_past_city_council_meeting()
def date_cutoff(self):
return self.last_meeting().start_time.date()
def council_bills(self):
return ChicagoBill.objects\
.filter(actions__date=self.date_cutoff(), from_organization__name=settings.OCD_CITY_COUNCIL_NAME)\
.prefetch_related('actions')
def topic_hierarchy(self):
# getting topic counts for meeting bills
topic_hierarchy = settings.TOPIC_HIERARCHY
topic_tag_counts = {}
for b in self.council_bills():
for topic in b.topics:
try:
topic_tag_counts[topic] += 1
except KeyError:
topic_tag_counts[topic] = 1
# put together data blob for topic hierarchy
for parent_blob in topic_hierarchy:
parent_blob['count'] = 0
for child_blob in parent_blob['children']:
child_name = child_blob['name']
child_blob['count'] = topic_tag_counts[child_name] if child_name in topic_tag_counts else 0
parent_blob['count'] += child_blob['count']
for gchild_blob in child_blob['children']:
gchild_name = gchild_blob['name']
gchild_blob['count'] = topic_tag_counts[gchild_name] if gchild_name in topic_tag_counts else 0
return topic_hierarchy
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
# Find activity at last council meeting
council_bills = self.council_bills()
context['council_bills'] = council_bills
context['nonroutine_council_bills'] = [bill for bill in council_bills if 'Non-Routine' in bill.topics]
# Find recent activitiy (since last council meeting)
recent_bills = ChicagoBill.objects.filter(actions__date__gt=self.date_cutoff())
context['recent_bills'] = recent_bills
context['nonroutine_recent_bills'] = [bill for bill in recent_bills if 'Non-Routine' in bill.topics]
seo = {}
seo.update(settings.SITE_META)
seo['image'] = '/static/images/city_hall.jpg'
context['seo'] = seo
context['last_council_meeting'] = self.event_model.most_recent_past_city_council_meeting
context['next_council_meeting'] = self.event_model.next_city_council_meeting
context['upcoming_committee_meetings'] = self.event_model.upcoming_committee_meetings
context['topic_hierarchy'] = self.topic_hierarchy
return context
class ChicagoAboutView(AboutView):
template_name = 'chicago/about.html'
def substitute_ordinance_redirect(request, substitute_ordinance_slug):
return redirect('bill_detail', slug=substitute_ordinance_slug[1:], permanent=True)
class ChicagoBillDetailView(BillDetailView):
template_name = 'chicago/legislation.html'
model = ChicagoBill
def dispatch(self, request, *args, **kwargs):
slug = self.kwargs['slug']
try:
bill = self.model.objects.get(slug=slug)
return super().dispatch(request, *args, **kwargs)
except ChicagoBill.DoesNotExist:
bill = None
if not bill:
'''
Chicago Councilmatic requires redirects for several old bill slugs:
(1) original Councilmatic slug, which used the Legistar GUID
(2) a mangled form: an added space, e.g., o-2018-2302 (old slug) vs. o2018-2302
'''
try:
pattern = '?ID=%s&GUID' % slug
bill = ChicagoBill.objects.get(sources__url__contains=pattern)
return HttpResponsePermanentRedirect(reverse('bill_detail', args=[bill.slug]))
except ChicagoBill.DoesNotExist:
try:
added_space = r'^([A-Za-z]+)-([-\d]+)$'
match_added_space = re.match(added_space, slug)
if match_added_space:
prefix = match_added_space.group(1)
remainder = match_added_space.group(2)
repaired_slug = '{prefix}{remainder}'.format(prefix=prefix, remainder=remainder)
bill = self.model.objects.get(slug=repaired_slug)
return HttpResponsePermanentRedirect(reverse('bill_detail', args=[bill.slug]))
except ChicagoBill.DoesNotExist:
raise Http404
raise Http404
def get_object(self, queryset=None):
"""
Returns a bill based on slug. If no bill found,
looks for bills based on legistar id (so that
urls from old Chicago councilmatic don't break)
"""
if queryset is None:
queryset = self.get_queryset()
slug = self.kwargs.get(self.slug_url_kwarg)
if slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
# Try looking up by slug
if slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404("No bill found matching the query")
return obj
def get_context_data(self, **kwargs):
context = super(ChicagoBillDetailView, self).get_context_data(**kwargs)
bill_classification, = context['object'].classification
bill_identifier = context['object'].identifier
if bill_classification in {'claim'} or bill_identifier=='Or 2013-382':
context['seo']['nofollow'] = True
return context
class ChicagoCouncilMembersView(CouncilMembersView):
def get_seo_blob(self):
seo = {}
seo.update(settings.SITE_META)
seo['site_desc'] = "Look up your local Alderman, and see what they're doing in your ward & your city"
seo['image'] = '/static/images/chicago_map.jpg'
seo['title'] = 'Wards & Aldermen - Chicago Councilmatic'
return seo
class ChicagoCouncilmaticFacetedSearchView(CouncilmaticFacetedSearchView):
def build_form(self, form_kwargs=None):
form = super(CouncilmaticFacetedSearchView, self).build_form(form_kwargs=form_kwargs)
# For faceted search functionality.
if form_kwargs is None:
form_kwargs = {}
form_kwargs['selected_facets'] = self.request.GET.getlist("selected_facets")
# For remaining search functionality.
data = None
kwargs = {
'load_all': self.load_all,
}
sqs = SearchQuerySet().facet('bill_type')\
.facet('sponsorships', sort='index')\
.facet('controlling_body')\
.facet('inferred_status')\
.facet('topics')\
.facet('legislative_session')\
.highlight()
if form_kwargs:
kwargs.update(form_kwargs)
dataDict = {}
if len(self.request.GET):
data = self.request.GET
dataDict = dict(data)
if self.searchqueryset is not None:
kwargs['searchqueryset'] = sqs
if dataDict.get('sort_by'):
for el in dataDict['sort_by']:
if el == 'date':
if dataDict.get('order_by') == ['asc']:
kwargs['searchqueryset'] = sqs.order_by('last_action_date')
else:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
if el == 'title':
if dataDict.get('order_by') == ['desc']:
kwargs['searchqueryset'] = sqs.order_by('-sort_name')
else:
kwargs['searchqueryset'] = sqs.order_by('sort_name')
if el == 'relevance':
kwargs['searchqueryset'] = sqs
elif dataDict.get('q'):
kwargs['searchqueryset'] = sqs
else:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
return self.form_class(data, **kwargs)
class ChicagoPersonDetailView(PersonDetailView):
template_name = 'chicago/person.html'
def get_context_data(self, **kwargs):
context = super(ChicagoPersonDetailView, self).get_context_data(**kwargs)
person = context['person']
if person.latest_council_membership:
context['tenure_start'] = person.latest_council_membership.start_date_dt.strftime("%B %d, %Y")
context['chair_positions'] = person.chair_role_memberships
if person.slug in settings.CONTACT_INFO:
context['phone'] = settings.CONTACT_INFO[person.slug]['phone']
context['address'] = settings.CONTACT_INFO[person.slug]['address']
context['twitter_handle'] = settings.CONTACT_INFO[person.slug]['twitter']['handle']
context['twitter_url'] = settings.CONTACT_INFO[person.slug]['twitter']['url']
return context
class EventDetailView(DetailView):
model = ChicagoEvent
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions called by the generated code to execute an eager-mode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
inputs, attrs, num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
except TypeError as e:
keras_symbolic_tensors = [
x for x in inputs if ops._is_keras_symbolic_tensor(x)
]
if keras_symbolic_tensors:
raise core._SymbolicException(
"Inputs to eager execution function cannot be Keras symbolic "
"tensors, but found {}".format(keras_symbolic_tensors))
raise e
# pylint: enable=protected-access
return tensors
def execute_with_cancellation(op_name,
num_outputs,
inputs,
attrs,
ctx,
cancellation_manager,
name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch. (Explicitly
provided instead of being inferred for performance reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
cancellation_manager: a `CancellationManager` object that can be used to
cancel the operation.
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
tensors = pywrap_tfe.TFE_Py_ExecuteCancelable(ctx._handle, device_name,
op_name, inputs, attrs,
cancellation_manager._impl,
num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
except TypeError as e:
keras_symbolic_tensors = [
x for x in inputs if ops._is_keras_symbolic_tensor(x)
]
if keras_symbolic_tensors:
raise core._SymbolicException(
"Inputs to eager execution function cannot be Keras symbolic "
"tensors, but found {}".format(keras_symbolic_tensors))
raise e
# pylint: enable=protected-access
return tensors
def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Monkey-patch to execute to enable execution callbacks."""
tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
for callback in ctx.op_callbacks:
callback(op_name, tuple(inputs), attrs, tensors, name)
return tensors
execute = quick_execute
def must_record_gradient():
"""Import backprop if you want gradients recorded."""
return False
def record_gradient(unused_op_name, unused_inputs, unused_attrs,
unused_results):
"""Import backprop if you want gradients recorded."""
pass
def make_float(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def make_int(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def make_str(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def make_bool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def make_type(v, arg_name):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(arg_name, repr(v)))
i = v.as_datatype_enum
return i
def make_shape(v, arg_name):
"""Convert v into a list."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# None if the rank is unknown, otherwise a list of ints (or Nones in the
# position where the dimension is unknown).
try:
shape = tensor_shape.as_shape(v)
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name,
e))
if shape.ndims is None:
return None
else:
return shape.as_list()
def make_tensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
elif isinstance(v, six.string_types):
pb = tensor_pb2.TensorProto()
text_format.Merge(v, pb)
return pb
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'." %
(repr(v), arg_name))
def args_to_matching_eager(l, ctx, allowed_dtypes, default_dtype=None):
"""Convert sequence `l` to eager same-type Tensors."""
if (not l) and (default_dtype is not None):
return default_dtype, [] # List is empty; assume default dtype.
EagerTensor = ops.EagerTensor # pylint: disable=invalid-name
for x in l:
if not isinstance(x, EagerTensor):
break
else: # note: intentional for-else
return l[0]._datatype_enum(), l # pylint: disable=protected-access
# Is some input already a Tensor with a dtype?
dtype = None
for t in l:
if isinstance(t, EagerTensor):
dtype = t.dtype
break
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
tensor = None
# First see if we can get a valid dtype with the default conversion
# and see if it matches an allowed dtypes. Some ops like ConcatV2 may
# not list allowed dtypes, in which case we should skip this.
if dtype is None and allowed_dtypes:
tensor = ops.convert_to_tensor(t, ctx=ctx)
# If we did not match an allowed dtype, try again with the default
# dtype. This could be because we have an empty tensor and thus we
# picked the wrong type.
if tensor.dtype not in allowed_dtypes:
tensor = None
if tensor is None:
tensor = ops.convert_to_tensor(
t, dtype, preferred_dtype=default_dtype, ctx=ctx)
ret.append(tensor)
if dtype is None:
dtype = tensor.dtype
else:
ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l]
# TODO(slebedev): consider removing this as it leaks a Keras concept.
# pylint: disable=protected-access
keras_symbolic_tensors = [x for x in ret if
ops._is_keras_symbolic_tensor(x)]
if keras_symbolic_tensors:
raise core._SymbolicException(
"Using symbolic output of a Keras layer during eager execution "
"{}".format(keras_symbolic_tensors))
# pylint: enable=protected-access
return dtype.as_datatype_enum, ret
def convert_to_mixed_eager_tensors(values, ctx):
v = [ops.convert_to_tensor(t, ctx=ctx) for t in values]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
def args_to_mixed_eager_tensors(lists, ctx):
"""Converts a list of same-length lists of values to eager tensors."""
assert len(lists) > 1
# Generate an error if len(lists[i]) is not the same for all i.
lists_ret = []
for l in lists[1:]:
if len(l) != len(lists[0]):
raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)."
% (len(lists[0]), len(l), lists[0], l))
lists_ret.append([])
# Convert the first element of each list first, then the second element, etc.
types = []
for i in range(len(lists[0])):
dtype = None
# If any list has a Tensor, use that dtype
for l in lists:
if isinstance(l[i], ops.EagerTensor):
dtype = l[i].dtype
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.convert_to_tensor(lists[0][i], ctx=ctx))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
types.append(dtype.as_datatype_enum)
return types, lists_ret
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import collections
import copy
import functools
import sys
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from cinderclient.v1 import client as v1_client
from keystoneauth1 import exceptions as keystone_exception
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
import six
from nova import availability_zones as az
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
cinder_opts = [
cfg.StrOpt('catalog_info',
default='volumev2:cinderv2:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
help='Region name of this node'),
cfg.IntOpt('http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones. If False, volumes attached to an '
'instance must be in the same availability zone in '
'Cinder as the instance availability zone in Nova. '
'This also means care should be taken when booting an '
'instance from a volume where source is not "volume" '
'because Nova will attempt to create a volume using '
'the same availability zone as what is assigned to the '
'instance. If that AZ is not in Cinder (or '
'allow_availability_zone_fallback=False in cinder.conf), '
'the volume create request will fail and the instance '
'will fail the build request.'),
]
CONF = cfg.CONF
CINDER_OPT_GROUP = 'cinder'
# cinder_opts options in the DEFAULT group were deprecated in Juno
CONF.register_opts(cinder_opts, group=CINDER_OPT_GROUP)
deprecated = {'timeout': [cfg.DeprecatedOpt('http_timeout',
group=CINDER_OPT_GROUP)],
'cafile': [cfg.DeprecatedOpt('ca_certificates_file',
group=CINDER_OPT_GROUP)],
'insecure': [cfg.DeprecatedOpt('api_insecure',
group=CINDER_OPT_GROUP)]}
ks_loading.register_session_conf_options(CONF,
CINDER_OPT_GROUP,
deprecated_opts=deprecated)
LOG = logging.getLogger(__name__)
_SESSION = None
_V1_ERROR_RAISED = False
def reset_globals():
"""Testing method to reset globals.
"""
global _SESSION
_SESSION = None
def cinderclient(context):
global _SESSION
global _V1_ERROR_RAISED
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(CONF,
CINDER_OPT_GROUP)
url = None
endpoint_override = None
auth = context.get_auth_plugin()
service_type, service_name, interface = CONF.cinder.catalog_info.split(':')
service_parameters = {'service_type': service_type,
'service_name': service_name,
'interface': interface,
'region_name': CONF.cinder.os_region_name}
if CONF.cinder.endpoint_template:
url = CONF.cinder.endpoint_template % context.to_dict()
endpoint_override = url
else:
url = _SESSION.get_endpoint(auth, **service_parameters)
# TODO(jamielennox): This should be using proper version discovery from
# the cinder service rather than just inspecting the URL for certain string
# values.
version = cinder_client.get_volume_api_from_url(url)
if version == '1' and not _V1_ERROR_RAISED:
msg = _LW('Cinder V1 API is deprecated as of the Juno '
'release, and Nova is still configured to use it. '
'Enable the V2 API in Cinder and set '
'cinder.catalog_info in nova.conf to use it.')
LOG.warn(msg)
_V1_ERROR_RAISED = True
return cinder_client.Client(version,
session=_SESSION,
auth=auth,
endpoint_override=endpoint_override,
connect_retries=CONF.cinder.http_retries,
**service_parameters)
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
d['multiattach'] = getattr(vol, 'multiattach', False)
if vol.attachments:
d['attachments'] = collections.OrderedDict()
for attachment in vol.attachments:
a = {attachment['server_id']:
{'attachment_id': attachment.get('attachment_id'),
'mountpoint': attachment.get('device')}
}
d['attachments'].update(a.items())
d['attach_status'] = 'attached'
else:
d['attach_status'] = 'detached'
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# and use 'description' instead of 'display_description' for volume.
if hasattr(vol, 'display_name'):
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
else:
d['display_name'] = vol.name
d['display_description'] = vol.description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['bootable'] = strutils.bool_from_string(vol.bootable)
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# 'description' instead of 'display_description' for snapshot.
if hasattr(snapshot, 'display_name'):
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
else:
d['display_name'] = snapshot.name
d['display_description'] = snapshot.description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def translate_cinder_exception(method):
"""Transforms a cinder exception but keeps its traceback intact."""
@functools.wraps(method)
def wrapper(self, ctx, *args, **kwargs):
try:
res = method(self, ctx, *args, **kwargs)
except (cinder_exception.ConnectionError,
keystone_exception.ConnectionError):
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = exception.CinderConnectionFailed(
reason=six.text_type(exc_value))
six.reraise(exc_value, None, exc_trace)
except (keystone_exception.BadRequest,
cinder_exception.BadRequest):
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = exception.InvalidInput(
reason=six.text_type(exc_value))
six.reraise(exc_value, None, exc_trace)
except (keystone_exception.Forbidden,
cinder_exception.Forbidden):
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = exception.Forbidden(message=six.text_type(exc_value))
six.reraise(exc_value, None, exc_trace)
return res
return wrapper
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except (cinder_exception.ClientException,
keystone_exception.ClientException):
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, (keystone_exception.NotFound,
cinder_exception.NotFound)):
exc_value = exception.VolumeNotFound(volume_id=volume_id)
six.reraise(exc_value, None, exc_trace)
return res
return translate_cinder_exception(wrapper)
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except (cinder_exception.ClientException,
keystone_exception.ClientException):
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, (keystone_exception.NotFound,
cinder_exception.NotFound)):
exc_value = exception.SnapshotNotFound(snapshot_id=snapshot_id)
six.reraise(exc_value, None, exc_trace)
return res
return translate_cinder_exception(wrapper)
class API(object):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id):
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
@translate_cinder_exception
def get_all(self, context, search_opts=None):
search_opts = search_opts or {}
items = cinderclient(context).volumes.list(detailed=True,
search_opts=search_opts)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
if volume['status'] != "in-use":
msg = _("volume '%(vol)s' status must be 'in-use'. Currently in "
"'%(status)s' status") % {"vol": volume['id'],
"status": volume['status']}
raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("volume '%(vol)s' status must be 'available'. Currently "
"in '%(status)s'") % {'vol': volume['id'],
'status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("volume %s already attached") % volume['id']
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder.cross_az_attach:
instance_az = az.get_instance_availability_zone(context, instance)
if instance_az != volume['availability_zone']:
msg = _("Instance %(instance)s and volume %(vol)s are not in "
"the same availability_zone. Instance is in "
"%(ins_zone)s. Volume is in %(vol_zone)s") % {
"instance": instance['id'],
"vol": volume['id'],
'ins_zone': instance_az,
'vol_zone': volume['availability_zone']}
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("volume %s already detached") % volume['id']
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# NOTE(ildikov):Preparation for multiattach support, when a volume
# can be attached to multiple hosts and/or instances,
# so just check the attachment specific to this instance
if instance is not None and instance.uuid not in volume['attachments']:
# TODO(ildikov): change it to a better exception, when enable
# multi-attach.
raise exception.VolumeUnattached(volume_id=volume['id'])
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id, instance_uuid=None,
attachment_id=None):
if attachment_id is None:
volume = self.get(context, volume_id)
if volume['multiattach']:
attachments = volume.get('attachments', {})
if instance_uuid:
attachment_id = attachments.get(instance_uuid, {}).\
get('attachment_id')
if not attachment_id:
LOG.warning(_LW("attachment_id couldn't be retrieved "
"for volume %(volume_id)s with "
"instance_uuid %(instance_id)s. The "
"volume has the 'multiattach' flag "
"enabled, without the attachment_id "
"Cinder most probably cannot perform "
"the detach."),
{'volume_id': volume_id,
'instance_id': instance_uuid})
else:
LOG.warning(_LW("attachment_id couldn't be retrieved for "
"volume %(volume_id)s. The volume has the "
"'multiattach' flag enabled, without the "
"attachment_id Cinder most probably "
"cannot perform the detach."),
{'volume_id': volume_id})
cinderclient(context).volumes.detach(volume_id, attachment_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
try:
connection_info = cinderclient(
context).volumes.initialize_connection(volume_id, connector)
connection_info['connector'] = connector
return connection_info
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Initialize connection failed for volume '
'%(vol)s on host %(host)s. Error: %(msg)s '
'Code: %(code)s. Attempting to terminate '
'connection.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(ex),
'code': ex.code})
try:
self.terminate_connection(context, volume_id, connector)
except Exception as exc:
LOG.error(_LE('Connection between volume %(vol)s and host '
'%(host)s might have succeeded, but attempt '
'to terminate connection has failed. '
'Validate the connection and determine if '
'manual cleanup is needed. Error: %(msg)s '
'Code: %(code)s.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(exc),
'code': (
exc.code if hasattr(exc, 'code') else None)})
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
@translate_cinder_exception
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
@translate_cinder_exception
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
client = cinderclient(context)
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
if isinstance(client, v1_client.Client):
kwargs['display_name'] = name
kwargs['display_description'] = description
else:
kwargs['name'] = name
kwargs['description'] = description
try:
item = client.volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
except cinder_exception.OverLimit:
raise exception.OverQuota(overs='volumes')
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
@translate_cinder_exception
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_volume_exception
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_volume_exception
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
@translate_cinder_exception
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
| |
import pygame
import render
import theme
import callback
import resource
import focus
import kvc
class View(object):
"""A rectangular portion of the window.
Views may have zero or more child views contained within it.
Signals
on_mouse_down(view, button, point)
on_mouse_up(view, button, point)
on_mouse_motion(view, point)
on_mouse_drag(view, point, delta)
on_key_down(view, key, code)
on_key_up(view, key)
on_parented(view)
on_orphaned(view) (from parent view)
on_focused(view)
on_blurred(view)
on_selected(view)
on_enabled(view)
on_disabled(view)
on_state_changed(view)
All mouse points passed to event methods and to slots are in local
view coordinates. Use `to_parent` and `to_window` to convert.
"""
def __init__(self, frame=None):
self.frame = frame
self.parent = None
self.children = [] # back->front
self._state = 'normal'
self._enabled = True
self.hidden = False
self.draggable = False
self.shadow_image = None
self.on_focused = callback.Signal()
self.on_blurred = callback.Signal()
self.on_selected = callback.Signal()
self.on_enabled = callback.Signal()
self.on_disabled = callback.Signal()
self.on_state_changed = callback.Signal()
self.on_mouse_up = callback.Signal()
self.on_mouse_down = callback.Signal()
self.on_mouse_motion = callback.Signal()
self.on_mouse_drag = callback.Signal()
self.on_key_down = callback.Signal()
self.on_key_up = callback.Signal()
self.on_parented = callback.Signal()
self.on_orphaned = callback.Signal()
def layout(self):
"""Call to have the view layout itself.
Subclasses should invoke this after laying out child
views and/or updating its own frame.
"""
if self.shadowed:
shadow_size = theme.current.shadow_size
shadowed_frame_size = (self.frame.w + shadow_size,
self.frame.h + shadow_size)
self.surface = pygame.Surface(
shadowed_frame_size, pygame.SRCALPHA, 32)
shadow_image = resource.get_image('shadow')
self.shadow_image = resource.scale_image(shadow_image,
shadowed_frame_size)
else:
self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)
self.shadow_image = None
def size_to_fit(self):
rect = self.frame
for child in self.children:
rect = rect.union(child.frame)
self.frame = rect
self.layout()
def update(self, dt):
for child in self.children:
child.update(dt)
def to_parent(self, point):
return (point[0] + self.frame.topleft[0],
point[1] + self.frame.topleft[1])
def from_parent(self, point):
return (point[0] - self.frame.topleft[0],
point[1] - self.frame.topleft[1])
def from_window(self, point):
curr = self
ancestors = [curr]
while curr.parent:
ancestors.append(curr.parent)
curr = curr.parent
for a in reversed(ancestors):
point = a.from_parent(point)
return point
def to_window(self, point):
curr = self
while curr:
point = curr.to_parent(point)
curr = curr.parent
return point
def mouse_up(self, button, point):
self.on_mouse_up(self, button, point)
def mouse_down(self, button, point):
self.on_mouse_down(self, button, point)
def mouse_motion(self, point):
self.on_mouse_motion(self, point)
# only called on drag event if .draggable is True
def mouse_drag(self, point, delta):
self.on_mouse_drag(self, point, delta)
self.frame.topleft = (self.frame.topleft[0] + delta[0],
self.frame.topleft[1] + delta[1])
if self.parent:
self.parent._child_dragged(self)
def key_down(self, key, code):
self.on_key_down(self, key, code)
def key_up(self, key):
self.on_key_up(self, key)
@property
def state(self):
"""The state of the view.
Potential values are 'normal', 'focused', 'selected', 'disabled'.
"""
return self._state
@state.setter
def state(self, new_state):
if self._state != new_state:
self._state = new_state
self.stylize()
self.on_state_changed()
def focus(self):
focus.set(self)
def has_focus(self):
return focus.view == self
def focused(self):
self.state = 'focused'
self.on_focused()
def blurred(self):
self.state = 'normal'
self.on_blurred()
def selected(self):
self.state = 'selected'
self.on_selected()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, yesno):
if self._enabled != yesno:
self._enabled = yesno
if yesno:
self.enabled()
else:
self.disabled()
def enabled(self):
self.state = 'normal'
self.on_enabled()
def disabled(self):
self.state = 'disabled'
self.on_disabled()
def stylize(self):
"""Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
"""
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout()
def draw(self):
"""Do not call directly."""
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
def get_border_widths(self):
"""Return border width for each side top, left, bottom, right."""
if type(self.border_widths) is int: # uniform size
return [self.border_widths] * 4
return self.border_widths
def hit(self, pt):
"""Find the view (self, child, or None) under the point `pt`."""
if self.hidden or not self._enabled:
return None
if not self.frame.collidepoint(pt):
return None
local_pt = (pt[0] - self.frame.topleft[0],
pt[1] - self.frame.topleft[1])
for child in reversed(self.children): # front to back
hit_view = child.hit(local_pt)
if hit_view is not None:
return hit_view
return self
def center(self):
if self.parent is not None:
self.frame.center = (self.parent.frame.w // 2,
self.parent.frame.h // 2)
def add_child(self, child):
assert child is not None
self.rm_child(child)
self.children.append(child)
child.parent = self
child.parented()
import scene
if scene.current is not None:
child.stylize()
def rm_child(self, child):
for index, ch in enumerate(self.children):
if ch == child:
ch.orphaned()
del self.children[index]
break
def rm(self):
if self.parent:
self.parent.rm_child(self)
def parented(self):
self.on_parented()
def orphaned(self):
self.on_orphaned()
def iter_ancestors(self):
curr = self
while curr.parent:
yield curr.parent
curr = curr.parent
def iter_children(self):
for child in self.children:
yield child
def bring_to_front(self):
"""TODO: explain depth sorting"""
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[-1], ch[index] = ch[index], ch[-1]
def move_to_back(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[0], ch[index] = ch[index], ch[0]
| |
#!/usr/bin/env python3
'''Take a prefix and model name run predictions, and generate evaluations for crystal, bestonly,
and all test sets (take max affinity; if pose score is available also consider
max pose score).
Generates graphs and overall CV results. Takes the prefix and (for now) assumes trial 0.
Will evaluate 100k model and best model prior to 100k, 50k and 25k
'''
import numpy as np
import os, sys
#os.environ["GLOG_minloglevel"] = "10"
sys.path.append("/home/dkoes/git/gninascripts/")
sys.path.append("/net/pulsar/home/koes/dkoes/git/gninascripts/")
import train, predict
import matplotlib, caffe
import matplotlib.pyplot as plt
import glob, re, sklearn, collections, argparse, sys
import sklearn.metrics
import scipy.stats
def evaluate_fold(testfile, caffemodel, modelname, datadir='../..',hasrmsd=False):
'''Evaluate the passed model and the specified test set.
Returns tuple:
(correct, prediction, receptor, ligand, label (optional), posescore (optional))
label and posescore are only provided is trained on pose data
'''
if not os.path.exists(modelname):
print(modelname,"does not exist")
caffe.set_mode_gpu()
test_model = 'predict.%d.prototxt' % os.getpid()
train.write_model_file(test_model, modelname, testfile, testfile, datadir)
test_net = caffe.Net(test_model, caffemodel, caffe.TEST)
lines = open(testfile).readlines()
res = None
i = 0 #index in batch
correct = 0
prediction = 0
receptor = ''
ligand = ''
label = 0
posescore = -1
ret = []
for line in lines:
#check if we need a new batch of results
if not res or i >= batch_size:
res = test_net.forward()
if 'output' in res:
batch_size = res['output'].shape[0]
else:
batch_size = res['affout'].shape[0]
i = 0
if 'labelout' in res:
label = float(res['labelout'][i])
if 'output' in res:
posescore = float(res['output'][i][1])
if 'affout' in res:
correct = float(res['affout'][i])
if 'predaff' in res:
prediction = float(res['predaff'][i])
if not np.isfinite(prediction).all():
os.remove(test_model)
return [] #gracefully handle nan?
#extract ligand/receptor for input file
tokens = line.split()
rmsd = -1
for t in range(len(tokens)):
if tokens[t].lower()=='none':
#Flag that none as the receptor file, for ligand-only models
ligand=tokens[t+1]
#we assume that ligand is rec/<ligname>
#set if correct, bail if not.
m=re.search(r'(\S+)/(\S+)gninatypes',ligand)
#Check that the match is not none, and that ligand ends in gninatypes
if m is not None:
receptor=m.group(1)
else:
print('Error: none receptor detected and ligand is improperly formatted.')
print('Ligand must be formatted: <rec>/<ligfile>.gninatypes')
print('Bailing.')
sys.exit(1)
break
elif tokens[t].endswith('gninatypes'):
receptor = tokens[t]
ligand = tokens[t+1]
break
if hasrmsd:
rmsd = float(tokens[2])
#(correct, prediction, receptor, ligand, label (optional), posescore (optional))
if posescore < 0:
ret.append((correct, prediction, receptor, ligand))
elif hasrmsd:
ret.append((correct, prediction, receptor, ligand, label, posescore, rmsd))
else:
ret.append((correct, prediction, receptor, ligand, label, posescore))
i += 1 #batch index
os.remove(test_model)
return ret
def reduce_results(results, index):
'''Return results with only one tuple for every receptor value,
taking the one with the max value at index in the tuple (predicted affinity or pose score)
'''
res = dict() #indexed by receptor
for r in results:
name = r[2]
if name not in res:
res[name] = r
elif res[name][index] < r[index]:
res[name] = r
return list(res.values())
def analyze_results(results, outname, uniquify=None):
'''Compute error metrics from resuls. RMSE, Pearson, Spearman.
If uniquify is set, AUC and top-1 percentage are also computed,
uniquify can be None, 'affinity', or 'pose' and is set with
the all training set to select the pose used for scoring.
Returns tuple:
(RMSE, Pearson, Spearman, AUCpose, AUCaffinity, top-1)
Writes (correct,prediction) pairs to outname.predictions
'''
#calc auc before reduction
if uniquify and len(results[0]) > 5:
labels = np.array([r[4] for r in results])
posescores = np.array([r[5] for r in results])
predictions = np.array([r[1] for r in results])
aucpose = sklearn.metrics.roc_auc_score(labels, posescores)
aucaff = sklearn.metrics.roc_auc_score(labels, predictions)
if uniquify == 'affinity':
results = reduce_results(results, 1)
elif uniquify == 'pose':
results = reduce_results(results, 5)
predictions = np.array([r[1] for r in results])
correctaff = np.array([abs(r[0]) for r in results])
#(correct, prediction, receptor, ligand, label (optional), posescore (optional))
rmse = np.sqrt(sklearn.metrics.mean_squared_error(correctaff, predictions))
R = scipy.stats.pearsonr(correctaff, predictions)[0]
S = scipy.stats.spearmanr(correctaff, predictions)[0]
out = open('%s.predictions'%outname,'w')
for (c,p) in zip(correctaff,predictions):
out.write('%f %f\n' % (c,p))
out.write('#RMSD %f\n'%rmse)
out.write('#R %f\n'%R)
if uniquify and len(results[0]) > 5:
labels = np.array([r[4] for r in results])
top = np.count_nonzero(labels > 0)/float(len(labels))
return (rmse, R, S, aucpose, aucaff, top)
else:
return (rmse, R, S)
if __name__ == '__main__':
if len(sys.argv) <= 4:
print("Need caffemodel prefix, modelname, output name and test prefixes (which should include _<slicenum>_ at end)")
sys.exit(1)
name = sys.argv[1]
modelname = sys.argv[2]
out = open(sys.argv[3],'w')
allresults = []
last = None
#for each test dataset
for testprefix in sys.argv[4:]:
m = re.search('([^/ ]*)_(\d+)_$', testprefix)
print(m,testprefix)
if not m:
print(testprefix,"does not end in slicenum")
slicenum = int(m.group(2))
testname = m.group(1)
#find the relevant models for each fold
testresults = {'best25': [], 'best50': [], 'best100': [], 'last': [], 'best250': [] }
for fold in [0,1,2]:
best25 = 0
best50 = 0
best100 = 0
best250 = 0
lastm = 0
#identify best iteration models at each cut point for this fold
for model in glob.glob('%s.%d_iter_*.caffemodel'%(name,fold)):
m = re.search(r'_iter_(\d+).caffemodel', model)
inum = int(m.group(1))
if inum < 25000 and inum > best25:
best25 = inum
if inum < 50000 and inum > best50:
best50 = inum
if inum < 100000 and inum > best100:
best100 = inum
if inum < 250000 and inum > best250:
best250 = inum
if inum > lastm:
lastm = inum
#evalute this fold
testfile = '../types/%stest%d.types' % (testprefix,fold)
#todo, avoid redundant repetitions
if best25 > 0: testresults['best25'] += evaluate_fold(testfile, '%s.%d_iter_%d.caffemodel' % (name,fold,best25), modelname)
if best50 > 0: testresults['best50'] += evaluate_fold(testfile, '%s.%d_iter_%d.caffemodel' % (name,fold,best50), modelname)
if best100 > 0: testresults['best100'] += evaluate_fold(testfile, '%s.%d_iter_%d.caffemodel' % (name,fold,best100), modelname)
if best250 > 0: testresults['best250'] += evaluate_fold(testfile, '%s.%d_iter_%d.caffemodel' % (name,fold,best250), modelname)
if lastm > 0: testresults['last'] += evaluate_fold(testfile, '%s.%d_iter_%d.caffemodel' % (name,fold,lastm), modelname)
for n in list(testresults.keys()):
if len(testresults[n]) == 0:
continue
if len(testresults[n][0]) == 6:
allresults.append( ('%s_pose'%testname, n) + analyze_results(testresults[n],('%s_pose_'%testname)+name+'_'+n,'pose'))
allresults.append( ('%s_affinity'%testname, n) + analyze_results(testresults[n],('%s_affinity_'%testname)+name+'_'+n,'affinity'))
for a in allresults:
out.write(' '.join(map(str,a))+'\n')
| |
########
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import wagon
from cloudify_rest_client.constants import VISIBILITY_EXCEPT_PRIVATE
from .. import utils
from ..table import print_data, print_single
from ..cli import helptexts, cfy
from ..utils import (prettify_client_error,
get_visibility,
validate_visibility)
PLUGIN_COLUMNS = ['id', 'package_name', 'package_version', 'distribution',
'supported_platform', 'distribution_release', 'uploaded_at',
'visibility', 'tenant_name', 'created_by', 'yaml_url_path']
GET_DATA_COLUMNS = ['file_server_path']
EXCLUDED_COLUMNS = ['archive_name', 'distribution_version', 'excluded_wheels',
'package_source', 'supported_py_versions', 'wheels']
@cfy.group(name='plugins')
@cfy.options.common_options
def plugins():
"""Handle plugins on the manager
"""
pass
@plugins.command(name='validate',
short_help='Validate a plugin')
@cfy.argument('plugin-path')
@cfy.options.common_options
@cfy.pass_logger
def validate(plugin_path, logger):
"""Validate a plugin
This will try to validate the plugin's archive is not corrupted.
A valid plugin is a wagon (http://github.com/cloudify-cosomo/wagon)
in the tar.gz format.
`PLUGIN_PATH` is the path to wagon archive to validate.
"""
logger.info('Validating plugin {0}...'.format(plugin_path))
wagon.validate(plugin_path)
logger.info('Plugin validated successfully')
@plugins.command(name='delete',
short_help='Delete a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.force(help=helptexts.FORCE_DELETE_PLUGIN)
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete(plugin_id, force, logger, client, tenant_name):
"""Delete a plugin from the manager
`PLUGIN_ID` is the id of the plugin to delete.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Deleting plugin {0}...'.format(plugin_id))
client.plugins.delete(plugin_id=plugin_id, force=force)
logger.info('Plugin deleted')
@plugins.command(name='upload',
short_help='Upload a plugin [manager only]')
@cfy.argument('plugin-path')
@cfy.options.plugin_yaml_path()
@cfy.options.private_resource
@cfy.options.visibility()
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_context
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def upload(ctx,
plugin_path,
yaml_path,
private_resource,
visibility,
logger,
client,
tenant_name):
"""Upload a plugin to the manager
`PLUGIN_PATH` is the path to wagon archive to upload.
"""
# Test whether the path is a valid URL. If it is, no point in doing local
# validations - it will be validated on the server side anyway
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Creating plugin zip archive..')
wagon_path = utils.get_local_path(plugin_path, create_temp=True)
yaml_path = utils.get_local_path(yaml_path, create_temp=True)
zip_path = utils.zip_files([wagon_path, yaml_path])
progress_handler = utils.generate_progress_handler(zip_path, '')
visibility = get_visibility(private_resource, visibility, logger)
logger.info('Uploading plugin archive (wagon + yaml)..')
try:
plugin = client.plugins.upload(zip_path,
visibility,
progress_handler)
logger.info("Plugin uploaded. Plugin's id is {0}".format(plugin.id))
finally:
os.remove(wagon_path)
os.remove(yaml_path)
os.remove(zip_path)
@plugins.command(name='bundle-upload',
short_help='Upload a bundle of plugins [manager only]')
@cfy.options.plugins_bundle_path
@cfy.pass_client()
@cfy.pass_logger
def upload_caravan(client, logger, path):
if not path:
logger.info("Starting upload of plugins bundle, "
"this may take few minutes to complete.")
path = 'http://repository.cloudifysource.org/' \
'cloudify/wagons/cloudify-plugins-bundle.tgz'
progress = utils.generate_progress_handler(path, '')
plugins_ = client.plugins.upload(path, progress_callback=progress)
logger.info("Bundle uploaded, {0} Plugins installed."
.format(len(plugins_)))
if len(plugins_) > 0:
logger.info("The plugins' ids are:\n{0}\n".
format('\n'.join([p.id for p in plugins_])))
@plugins.command(name='download',
short_help='Download a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.output_path
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_logger
@cfy.pass_client()
def download(plugin_id, output_path, logger, client, tenant_name):
"""Download a plugin from the manager
`PLUGIN_ID` is the id of the plugin to download.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Downloading plugin {0}...'.format(plugin_id))
plugin_name = output_path if output_path else plugin_id
progress_handler = utils.generate_progress_handler(plugin_name, '')
target_file = client.plugins.download(plugin_id,
output_path,
progress_handler)
logger.info('Plugin downloaded as {0}'.format(target_file))
@plugins.command(name='get',
short_help='Retrieve plugin information [manager only]')
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get(plugin_id, logger, client, tenant_name, get_data):
"""Retrieve information for a specific plugin
`PLUGIN_ID` is the id of the plugin to get information on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugin {0}...'.format(plugin_id))
plugin = client.plugins.get(plugin_id, _get_data=get_data)
_transform_plugin_response(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
print_single(columns, plugin, 'Plugin:')
@plugins.command(name='list',
short_help='List plugins [manager only]')
@cfy.options.sort_by('uploaded_at')
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugin')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list(sort_by,
descending,
tenant_name,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
get_data):
"""List all plugins on the manager
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Listing all plugins...')
plugins_list = client.plugins.list(sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_get_data=get_data,
_offset=pagination_offset,
_size=pagination_size)
for plugin in plugins_list:
_transform_plugin_response(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
print_data(columns, plugins_list, 'Plugins:')
total = plugins_list.metadata.pagination.total
logger.info('Showing {0} of {1} plugins'.format(len(plugins_list),
total))
def _transform_plugin_response(plugin):
"""Remove any columns that shouldn't be displayed in the CLI
"""
for column in EXCLUDED_COLUMNS:
plugin.pop(column, None)
@plugins.command(name='set-global',
short_help="Set the plugin's visibility to global")
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_global(plugin_id, logger, client):
"""Set the plugin's visibility to global
`PLUGIN_ID` is the id of the plugin to set global
"""
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_global(plugin_id)
logger.info('Plugin `{0}` was set to global'.format(plugin_id))
logger.info("This command will be deprecated soon, please use the "
"'set-visibility' command instead")
@plugins.command(name='set-visibility',
short_help="Set the plugin's visibility")
@cfy.argument('plugin-id')
@cfy.options.visibility(required=True, valid_values=VISIBILITY_EXCEPT_PRIVATE)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_visibility(plugin_id, visibility, logger, client):
"""Set the plugin's visibility
`PLUGIN_ID` is the id of the plugin to update
"""
validate_visibility(visibility, valid_values=VISIBILITY_EXCEPT_PRIVATE)
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_visibility(plugin_id, visibility)
logger.info('Plugin `{0}` was set to {1}'.format(plugin_id,
visibility))
| |
from itertools import product
import collections
import math
import numpy as np
import pytest
# public 3d array funcs
from rio_color.colorspace import convert_arr, saturate_rgb
# public scalar func
from rio_color.colorspace import convert
# enums required to define src and dst for convert and convert_arr
from rio_color.colorspace import ColorSpace as cs
from colormath.color_objects import LuvColor, sRGBColor, XYZColor, LCHabColor, LabColor
from colormath.color_conversions import convert_color
to_colormath = {
cs.rgb: sRGBColor,
cs.xyz: XYZColor,
cs.lab: LabColor,
cs.lch: LCHabColor,
cs.luv: LuvColor,
}
tests = (
# (rgb, expected_lch)
((0, 0, 0), (0, 0, 0)),
((1.0, 0, 0), (53.2, 104.6, 0.7)),
((0.392156, 0.776470, 0.164705), (71.7, 83.5, 2.3)),
((0.0392, 0.1960, 0.3529), (20.3517, 27.8757, -1.4612)),
((0.0456, 0.1929, 0.3941), (20.8945, 34.9429, -1.3244)),
((1.0, 1.0, 1.0), (100, 0, 2.8)),
)
test_tol = 1
@pytest.mark.parametrize("pair", tests)
def test_fixtures(pair):
# use colormath to confirm test values
rgb, lch = pair
cmlch = convert_color(sRGBColor(*rgb), LCHabColor).get_value_tuple()
assert _near(lch[0:2], cmlch[0:2], 0.2)
if lch[0] < 99.999999:
# If L == 100, the hue is indeterminate
# Otherwise, normalize to [0, 2*pi] and compare
h = lch[2] % (math.pi * 2)
cmh = math.radians(cmlch[2]) % (math.pi * 2)
assert _near([h], [cmh], 0.2)
def _near(a, b, tol):
if not isinstance(tol, collections.Iterable):
tol = [tol] * len(a)
for x, y, t in zip(a, b, tol):
if abs(x - y) > t:
return False
return True
def _make_array(x, y, z, dtype="float64"):
""" make a 3, 1, 1 array
"""
return np.array([[[x]], [[y]], [[z]]]).astype(dtype)
@pytest.mark.parametrize("pair", tests)
def test_rgb2lch(pair):
rgb, lch = pair
alch = convert(*rgb, src=cs.rgb, dst=cs.lch)
assert alch[0] >= 0
assert _near(alch, lch, (1.0, 1.0, 0.25))
@pytest.mark.parametrize("pair", tests)
def test_roundtrip(pair):
rgb, lch = pair
argb = convert(*convert(*rgb, src=cs.rgb, dst=cs.lch), src=cs.lch, dst=cs.rgb)
for v in argb:
assert v > -0.0001
assert v < 1.0001
assert _near(argb, rgb, 0.1)
@pytest.mark.parametrize("pair", tests)
def test_lch2rgb(pair):
rgb, lch = pair
argb = convert(*lch, src=cs.lch, dst=cs.rgb)
assert _near(argb, rgb, (1.0, 1.0, 0.1))
@pytest.mark.parametrize("pair", tests)
def test_arr_rgb(pair):
rgb, lch = pair
rgb = _make_array(*rgb)
lch = _make_array(*lch)
assert np.allclose(convert_arr(rgb, cs.rgb, cs.lch), lch, atol=0.2)
@pytest.mark.parametrize("pair", tests)
def test_arr_lch(pair):
rgb, lch = pair
rgb = _make_array(*rgb)
lch = _make_array(*lch)
assert np.allclose(convert_arr(lch, cs.lch, cs.rgb), rgb, atol=0.2)
@pytest.mark.parametrize("pair", tests)
def test_saturation_1(pair):
rgb, lch = pair
rgb = _make_array(*rgb)
assert np.allclose(saturate_rgb(rgb, 1.0), rgb, atol=0.2)
def test_saturation_bw():
rgb = _make_array(0.392156, 0.776470, 0.164705)
sat = saturate_rgb(rgb, 0.0)
assert _near((sat[0, 0, 0],), (sat[1, 0, 0],), tol=0.1)
assert _near((sat[1, 0, 0],), (sat[2, 0, 0],), tol=0.1)
def test_saturation():
rgb = _make_array(0.392156, 0.776470, 0.164705)
saturated = _make_array(0.3425, 0.78372, 0.0)
assert np.allclose(saturate_rgb(rgb, 1.1), saturated, atol=0.2)
rgb = _make_array(0.0392, 0.1960, 0.3529)
saturated = _make_array(0.0456, 0.1929, 0.3941)
assert np.allclose(saturate_rgb(rgb, 1.25), saturated, atol=0.2)
def test_bad_array_bands():
bad = np.random.random((2, 3, 3))
with pytest.raises(ValueError) as exc:
saturate_rgb(bad, 1.1)
assert "3 bands" in str(exc.value)
with pytest.raises(ValueError) as exc:
convert_arr(bad, cs.rgb, cs.lch)
assert "3 bands" in str(exc.value)
def test_bad_array_dims():
bad = np.random.random((3, 3))
with pytest.raises(ValueError) as exc:
saturate_rgb(bad, 1.1)
assert "wrong number of dimensions" in str(exc.value)
with pytest.raises(ValueError) as exc:
convert_arr(bad, cs.rgb, cs.lch)
assert "wrong number of dimensions" in str(exc.value)
def test_bad_array_type():
bad = np.random.random((3, 3, 3)).astype("uint8")
with pytest.raises(ValueError) as exc:
saturate_rgb(bad, 1.1)
assert "dtype mismatch" in str(exc.value)
with pytest.raises(ValueError) as exc:
convert_arr(bad, cs.rgb, cs.lch)
assert "dtype mismatch" in str(exc.value)
def test_array_bad_colorspace():
arr = np.random.random((3, 3))
with pytest.raises(ValueError):
convert_arr(arr, src="FOO", dst="RGB")
with pytest.raises(ValueError):
convert_arr(arr, src=999, dst=999)
def test_bad_colorspace_string():
"""String colorspaces raise ValueError"""
with pytest.raises(ValueError):
convert(0.1, 0.1, 0.1, src="FOO", dst="RGB")
def test_bad_colorspace_invalid_int():
"""Invalid colorspace integers raise ValueError"""
with pytest.raises(ValueError):
convert(0.1, 0.1, 0.1, src=999, dst=999)
def test_bad_colorspace_invalid_enum():
"""Invalid colorspace enum names raise AttributeError"""
with pytest.raises(AttributeError):
convert(0.1, 0.1, 0.1, src=cs.foo, dst=cs.bar)
def assert_color_roundtrip(color, src, dst, tolerance):
"""Asserts roundtrip of color correction within a given tolerance
Helper function for tests below.
"""
other = convert(*color, src=src, dst=dst)
rio_roundtrip = convert(*other, src=dst, dst=src)
if _near(rio_roundtrip, color, tol=tolerance):
return True
else:
# Did not roundtrip properly, can colormath do it?
src_cm = to_colormath[src]
dst_cm = to_colormath[dst]
cm_roundtrip = convert_color(
convert_color(src_cm(*color), dst_cm, illuminant="d65"),
src_cm,
illuminant="d65",
).get_value_tuple()
assert _near(rio_roundtrip, cm_roundtrip, tol=tolerance)
rgb_vals = [0.0, 0.01, 0.3, 0.5, 0.7, 0.99, 1.0]
rgb_colors = xyz_colors = list(product(rgb_vals, repeat=3))
# In parameterizing destination colorspaces we use a list comprehension,
# omitting the source colorspace.
# Test roundtrip from RGB to everything else
@pytest.mark.parametrize("color", rgb_colors)
@pytest.mark.parametrize("dst", [v for v in cs if v not in (cs.rgb,)])
@pytest.mark.parametrize("tolerance", [0.1])
def test_rgb_convert_roundtrip(color, dst, tolerance):
assert_color_roundtrip(color, cs.rgb, dst, tolerance)
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
import mock
import webapp2
import webtest
# Importing mock_oauth2_decorator before file_bug mocks out
# OAuth2Decorator usage in that file.
# pylint: disable=unused-import
from dashboard import mock_oauth2_decorator
# pylint: enable=unused-import
from dashboard import file_bug
from dashboard import testing_common
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import sheriff
class FileBugTest(testing_common.TestCase):
def setUp(self):
super(FileBugTest, self).setUp()
app = webapp2.WSGIApplication([('/file_bug', file_bug.FileBugHandler)])
self.testapp = webtest.TestApp(app)
testing_common.SetSheriffDomains(['chromium.org', 'google.com'])
testing_common.SetInternalDomain('google.com')
self.SetCurrentUser('foo@chromium.org')
# When requests are made to the issue tracker service (using the mock
# HTTP object in mock_oauth2_decorator), some data is expected,
# but not necessarily read.
mock_oauth2_decorator.HTTP_MOCK.data = '{"id": 123}'
def tearDown(self):
super(FileBugTest, self).tearDown()
mock_oauth2_decorator.MockOAuth2Decorator.past_bodies = []
self.UnsetCurrentUser()
def _AddSampleAlerts(self):
"""Adds sample data and returns a dict of rev to anomaly key."""
# Add sample sheriff, masters, bots, and tests.
sheriff_key = sheriff.Sheriff(id='Sheriff').put()
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'scrolling': {
'first_paint': {},
'mean_frame_time': {},
}
})
test_key1 = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
test_key2 = utils.TestKey('ChromiumPerf/linux/scrolling/mean_frame_time')
anomaly_key1 = self._AddAnomaly(111995, 112005, test_key1, sheriff_key)
anomaly_key2 = self._AddAnomaly(112000, 112010, test_key2, sheriff_key)
return (anomaly_key1, anomaly_key2)
def _AddAnomaly(self, start_rev, end_rev, test_key, sheriff_key):
return anomaly.Anomaly(
start_revision=start_rev, end_revision=end_rev, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
sheriff=sheriff_key).put()
def testGet_WithNoKeys_ShowsError(self):
# When a request is made and no keys parameter is given,
# an error message is shown in the reply.
response = self.testapp.get(
'/file_bug?summary=s&description=d&finish=true')
self.assertIn('<div class="error">', response.body)
self.assertIn('No alerts specified', response.body)
def testGet_WithNoFinish_ShowsForm(self):
# When a GET request is sent with keys specified but the finish parameter
# is not given, the response should contain a form for the sheriff to fill
# in bug details (summary, description, etc).
alert_keys = self._AddSampleAlerts()
response = self.testapp.get(
'/file_bug?summary=s&description=d&keys=%s' % alert_keys[0].urlsafe())
self.assertEqual(1, len(response.html('form')))
def testInternalBugLabel(self):
# If any of the alerts are marked as internal-only, which should happen
# when the corresponding test is internal-only, then the create bug dialog
# should suggest adding a Restrict-View-Google label.
self.SetCurrentUser('foo@google.com')
alert_keys = self._AddSampleAlerts()
anomaly_entity = alert_keys[0].get()
anomaly_entity.internal_only = True
anomaly_entity.put()
response = self.testapp.get(
'/file_bug?summary=s&description=d&keys=%s' % alert_keys[0].urlsafe())
self.assertIn('Restrict-View-Google', response.body)
@mock.patch(
'google.appengine.api.app_identity.get_default_version_hostname',
mock.MagicMock(return_value='chromeperf.appspot.com'))
@mock.patch.object(
file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={'issue_id': 123, 'issue_url': 'foo.com'}))
def _PostSampleBug(self):
alert_keys = self._AddSampleAlerts()
response = self.testapp.post(
'/file_bug',
[
('keys', '%s,%s' % (alert_keys[0].urlsafe(),
alert_keys[1].urlsafe())),
('summary', 's'),
('description', 'd\n'),
('finish', 'true'),
('label', 'one'),
('label', 'two'),
])
return response
@mock.patch.object(
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(
file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={'issue_id': 123, 'issue_url': 'foo.com'}))
def testGet_WithFinish_CreatesBug(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
mock_oauth2_decorator.HTTP_MOCK.data = '{"id": 277761}'
response = self._PostSampleBug()
# The response page should have a bug number.
self.assertIn('277761', response.body)
# The anomaly entities should be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision in [112005, 112010]:
self.assertEqual(277761, anomaly_entity.bug_id)
else:
self.assertIsNone(anomaly_entity.bug_id)
# Two HTTP requests are made when filing a bug; only test 2nd request.
comment = json.loads(mock_oauth2_decorator.HTTP_MOCK.body)['content']
self.assertIn(
'https://chromeperf.appspot.com/group_report?bug_id=277761', comment)
self.assertIn('https://chromeperf.appspot.com/group_report?keys=', comment)
self.assertIn(
'\n\n\nBot(s) for this bug\'s original alert(s):\n\nlinux', comment)
@mock.patch.object(
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[
{
'versions': [
{'branch_base_position': '112000', 'current_version': '2.0'},
{'branch_base_position': '111990', 'current_version': '1.0'}
]
}
]))
@mock.patch.object(
file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={'issue_id': 123, 'issue_url': 'foo.com'}))
def testGet_WithFinish_LabelsBugWithMilestone(self):
# Here, we expect the bug to have the following start revisions:
# [111995, 112005] and the milestones are M-1 for rev 111990 and
# M-2 for 11200. Hence the expected behavior is to label the bug
# M-2 since 111995 (lowest possible revision introducing regression)
# is less than 112000 (revision for M-2).
self._PostSampleBug()
self.assertIn(u'M-2', json.loads(
mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels'])
@unittest.skip('Flaky; see #1555.')
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, json.dumps([
{
'versions': [
{'branch_base_position': '111999',
'current_version': '3.0.1234.32'},
{'branch_base_position': '112000',
'current_version': '2.0'},
{'branch_base_position': '111990',
'current_version': '1.0'}
]
}
]))))
def testGet_WithFinish_LabelsBugWithLowestMilestonePossible(self):
# Here, we expect the bug to have the following start revisions:
# [111995, 112005] and the milestones are M-1 for rev 111990, M-2
# for 112000 and M-3 for 111999. Hence the expected behavior is to
# label the bug M-2 since 111995 is less than 112000 (M-2) and 111999
# (M-3) AND M-2 is lower than M-3.
self._PostSampleBug()
self.assertIn(u'M-2', json.loads(
mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels'])
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, '[]')))
def testGet_WithFinish_SucceedsWithNoVersions(self):
# Here, we test that we don't label the bug with an unexpected value when
# there is no version information from omahaproxy (for whatever reason)
self._PostSampleBug()
labels = json.loads(
mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, json.dumps([
{
'versions': [
{'branch_base_position': '0', 'current_version': '1.0'}
]
}
]))))
def testGet_WithFinish_SucceedsWithRevisionOutOfRange(self):
# Here, we test that we label the bug with the highest milestone when the
# revision introducing regression is beyond all milestones in the list.
self._PostSampleBug()
self.assertIn(u'M-1', json.loads(
mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels'])
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, json.dumps([
{
'versions': [
{'branch_base_position': 'N/A', 'current_version': 'N/A'}
]
}
]))))
@mock.patch('logging.warn')
def testGet_WithFinish_SucceedsWithNAAndLogsWarning(self, mock_warn):
self._PostSampleBug()
labels = json.loads(
mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
self.assertEqual(1, mock_warn.call_count)
if __name__ == '__main__':
unittest.main()
| |
"""Module for the xls diffing and CLI.
IndexedVenn
This is a named tuple that is meant to carry information about two
sequences (called "a" and "b"). It has the following ordered members:
- common_a: The index and item of all items in both a and b with the
condition that the item appears exactly once in both. It preserves
the order of what is in a.
- common_a_dup: The index and item of all items in both a and b with
the condition the item appears more than once in either a or b. It
preserves the order of what is in a.
- a_not_b: The index and item of all items in a that are not in b. It
preserves the order of what is in a.
- a_to_b: A map of indices from items in common_a to common_b.
- common_b: Same as common_a but a and b reversed.
- common_b_dup: Same as common_a_dup but a and b reversed.
- b_not_a: Same as a_not_b but a and b reversed.
- b_to_a: Same as a_to_b but a and b reversed.
CellDiff
This is a named tuple with information about cell differences between
workbook "a" and "b".
- cell_a: The cell in a
- cell_b: The cell in b
- row_a: The row number in a
- col_a: The column number in a
- row_b: The row number in b
- col_b: The col number in b
- row_key: The key for the row, e.g. name and type
- col_key: The key for the column, e.g. column header
"""
import argparse
from collections import defaultdict, Counter, namedtuple
import difflib
import os.path
from xlsxwriter.utility import xl_rowcol_to_cell
import pmix.workbook
CellDiff = namedtuple('CellDiff', ['cell_a', 'cell_b', 'row_a', 'col_a',
'row_b', 'col_b', 'row_key', 'col_key'])
IndexedVenn = namedtuple('IndexedVenn', ['common_a', 'common_a_dup', 'a_not_b',
'a_to_b', 'common_b', 'common_b_dup',
'b_not_a', 'b_to_a'])
def indexed_venn(seq1, seq2):
"""Get the intersection and two set differences for two lists.
This could be visualized as a Venn Diagram.
First what is common between the two lists is determined. Then various
lists are created.
The items in the input sequences must be hashable, since they are counted
and retrieved via list.index().
Args:
seq1 (list): The first sequence
seq2 (list): The second sequence
Returns:
IndexedVenn named tuple.
"""
seq1_set = set(seq1)
seq2_set = set(seq2)
common = seq1_set & seq2_set
counted = Counter(seq1) + Counter(seq2)
def build_indexed_a_b(seq_a, seq_b):
"""Build up indexed components for IndexedVenn."""
common_a = []
common_a_dup = []
a_not_b = []
a_to_b = {}
for i, item in enumerate(seq_a):
if item in common:
appearance_count = counted[item]
if appearance_count > 2:
common_a_dup.append((i, item))
else:
common_a.append((i, item))
seq_b_ind = seq_b.index(item)
a_to_b[i] = seq_b_ind
else:
a_not_b.append((i, item))
return common_a, common_a_dup, a_not_b, a_to_b
seq_1_info = build_indexed_a_b(seq1, seq2)
seq_2_info = build_indexed_a_b(seq2, seq1)
return IndexedVenn(*seq_1_info, *seq_2_info)
class XlsDiff:
"""A class to represent a difference between two Excel workbooks.
Class attributes:
sheet_diff_key (dict): A dictionary of sheet names to lists of column
headers. The column headers are what are combined to make unique
row identifiers for those rows.
Instance attributes:
base (Workbook): The base workbook
new (Workbook): The new workbook
simple (bool): True if this should be a simple diff
sheet_venn (IndexedVenn): The Venn diagram of sheetnames
col_venn (dict): Keys are sheetnames. Values are Venn diagrams of
columns in a given sheet
row_venn (dict): Keys are sheetnames. Values are Venn diagrams of
rows in a given sheet
cell_diff (dict): Keys are sheetnames. Values are individual cell
differences between cells sharing the same row, col, and sheet.
"""
sheet_diff_key = {
'survey': ['type', 'name'],
'choices': ['list_name', 'name'],
'external_choices': ['list_name', 'name']
}
def __init__(self, base, new, simple=True, **kwargs):
"""Initialize a diff between worksheets.
Args:
base (Workbook): The base workbook
new (Workbook): The new workbook
simple (bool): True if this should be a simple diff
**kwargs: Anything in kwargs updates the sheet_diff_key map
"""
self.base = base
self.new = new
self.simple = simple
self.sheet_diff_key.update(kwargs)
self.sheet_venn = self.sheet_comparison(base, new)
common_sheets = (s[1] for s in self.sheet_venn.common_a)
self.col_venn = {}
self.row_venn = {}
self.cell_diff = defaultdict(list)
for sheet in common_sheets:
base_sheet = base[sheet]
new_sheet = new[sheet]
col_venn = self.column_comparison(base_sheet, new_sheet, simple)
self.col_venn[sheet] = col_venn
key = None if simple else self.sheet_diff_key.get(sheet)
row_venn = self.row_comparison(base_sheet, new_sheet, key)
self.row_venn[sheet] = row_venn
self._find_cell_diffs(base_sheet, new_sheet)
@classmethod
def from_file(cls, base, new, simple, **kwargs):
"""Initialize XlsDiff from files.
Creates XlsForm objects and returns their diff.
Args:
base (str): A path to an xlsform
new (str): A path to an xlsform
simple (bool): True if this should be a simple diff
**kwargs: Anything in kwargs updates the sheet_diff_key map
"""
base_xlsform = pmix.workbook.Workbook(base, stripstr=False)
new_xlsform = pmix.workbook.Workbook(new, stripstr=False)
xls_diff = cls(base_xlsform, new_xlsform, simple, **kwargs)
return xls_diff
def swap(self):
"""Swap base and the new.
Returns:
A new XlsDiff object with the two workbooks swapped.
"""
return XlsDiff(self.new, self.base, self.simple, **self.sheet_diff_key)
def copy(self):
"""Get a diff of copies of the original workbooks.
Returns:
A new XlsDiff object with copies of the two original workbooks.
"""
base_copy = self.base.copy()
new_copy = self.new.copy()
return XlsDiff(base_copy, new_copy, self.simple, **self.sheet_diff_key)
@staticmethod
def sheet_comparison(base, new):
"""Get the full Venn diagram of sheet names for two workbooks.
Args:
base (Workbook): The base workbook
new (Workbook): The new workbook
"""
base_names = base.sheetnames()
new_names = new.sheetnames()
return indexed_venn(base_names, new_names)
@staticmethod
def column_comparison(base, new, ind=True):
"""Compare two sheet columns.
A simple comparison should compare on column indices.
Args:
base (Worksheet): The base worksheet
new (Worksheet): The new worksheet
ind (bool): If true, compare on column headers, else use indices
"""
if ind:
base_seq = list(range(base.ncol()))
new_seq = list(range(new.ncol()))
else:
base_seq = base.column_headers()
new_seq = new.column_headers()
return indexed_venn(base_seq, new_seq)
@staticmethod
def row_comparison(base, new, key=None):
"""Compare rows in two sheets by key.
Args:
base (Worksheet): The base worksheet
new (Worksheet): The new worksheet
key (seq, int, or str): The column(s) to use for determining unique
rows
"""
if key is None:
base_seq = list(range(len(base)))
new_seq = list(range(len(new)))
else:
base_cols = base.column_key(key)
base_iters = tuple(base.column_str(c) for c in base_cols)
base_seq = list(zip(*base_iters))
new_cols = new.column_key(key)
new_iters = tuple(new.column_str(c) for c in new_cols)
new_seq = list(zip(*new_iters))
return indexed_venn(base_seq, new_seq)
# pylint: disable=too-many-locals
def _find_cell_diffs(self, base_sheet, new_sheet):
"""Find cell differences between two sheets.
Pre-condition: Column and row comparisons should have been performed
first. These should be saved in the instance variables row_venn and
col_venn.
Args:
base_sheet (Worksheet): The base worksheet
new_sheet (Worksheet): The new worksheet
"""
sheet_name = base_sheet.name
common_rows_base = self.row_venn[sheet_name].common_a
common_cols_base = self.col_venn[sheet_name].common_a
rows_base_to_new = self.row_venn[sheet_name].a_to_b
cols_base_to_new = self.col_venn[sheet_name].a_to_b
for row in common_rows_base:
for col in common_cols_base:
base_cell = base_sheet[row[0]][col[0]]
new_row = rows_base_to_new[row[0]]
new_col = cols_base_to_new[col[0]]
new_cell = new_sheet[new_row][new_col]
if base_cell != new_cell:
record = CellDiff(base_cell, new_cell, row[0], col[0],
new_row, new_col, row[1], col[1])
self.cell_diff[sheet_name].append(record)
def write_diff(self, path):
"""Highlight the differences on the new spreadsheet and write out.
The higlighting is as follows:
- Orange shows columns and rows that are added
- Red shows columns and rows that are duplicated based on key
- Green shows columns and rows that are out of order
- Yellow shows cells that are different from common rows and cols
Args:
path (str): The path to the output file
copy (bool): Make a copy of the files first? Highlighting modifies
the original workbook
"""
diff = self.copy()
diff.highlight_all()
diff.new.write_out(path)
def highlight_all(self):
"""Make all highlights for the diff."""
self._highlight_cols_new()
self._highlight_rows_new()
self._highlight_cell_diffs_new()
def _highlight_cols_new(self):
"""Highlight duplicate and new columns."""
for sheet, venn in self.col_venn.items():
for col in venn.b_not_a:
for cell in self.new[sheet].column(col[0]):
cell.set_highlight('HL_ORANGE')
for col in venn.common_b_dup:
for cell in self.new[sheet].column(col[0]):
cell.set_highlight('HL_RED')
def _highlight_rows_new(self):
"""Highlight duplicate, mis-ordered, and new rows."""
for sheet, venn in self.row_venn.items():
for row in venn.b_not_a:
for cell in self.new[sheet][row[0]]:
cell.set_highlight('HL_ORANGE')
for row in venn.common_b_dup:
for cell in self.new[sheet][row[0]]:
cell.set_highlight('HL_RED')
mapping = sorted((k, v) for (k, v) in venn.a_to_b.items())
old = 0
for ind in (a_to_b[1] for a_to_b in mapping):
if ind < old:
for cell in self.new[sheet][ind]:
cell.set_highlight('HL_GREEN')
old = ind
def _highlight_cell_diffs_new(self):
"""Highlight cell differences."""
for _, diffs in self.cell_diff.items():
for cell_diff in diffs:
cell_diff.cell_b.set_highlight()
def report_overview(self):
"""Report an overview of the differences based on indexed Venns."""
inner = '{:^20}'.format('Overview of diff')
outer = '{:*^60}'.format(inner)
print(outer)
print('Base: {}'.format(self.base.file))
print('New: {}'.format(self.new.file))
print('*'*60)
msg = 'Sheets: {} in common, {} in base not new, {} in new not base'
common = len(self.sheet_venn.common_b)
common_dup = len(self.sheet_venn.common_b_dup)
a_not_b = len(self.sheet_venn.a_not_b)
b_not_a = len(self.sheet_venn.b_not_a)
msg = msg.format(common + common_dup, a_not_b, b_not_a)
print(msg)
for sheet in (i[1] for i in self.sheet_venn.common_b):
print(' ---')
self.report_sheet_overview(sheet)
def report_sheet_overview(self, sheetname):
"""Report an overview for the differences in a single sheet.
Args:
sheetname (str): The name of the sheet to report on. Should be a
common sheet name between the base and the new workbook.
"""
msg = ('Sheet "{}" columns: {} in common, {} duplicated, {} in base '
'not new, {} in new not base')
common = len(self.col_venn[sheetname].common_b)
dup = len(self.col_venn[sheetname].common_b_dup)
a_not_b = len(self.col_venn[sheetname].a_not_b)
b_not_a = len(self.col_venn[sheetname].b_not_a)
msg = msg.format(sheetname, common, dup, a_not_b, b_not_a)
print(msg)
msg = ('Sheet "{}" rows: {} in common, {} duplicated, {} in base '
'not new, {} in new not base')
common = len(self.row_venn[sheetname].common_b)
dup = len(self.row_venn[sheetname].common_b_dup)
a_not_b = len(self.row_venn[sheetname].a_not_b)
b_not_a = len(self.row_venn[sheetname].b_not_a)
msg = msg.format(sheetname, common, dup, a_not_b, b_not_a)
print(msg)
msg = 'Sheet "{}" count of cell differences: {}'
msg = msg.format(sheetname, len(self.cell_diff[sheetname]))
print(msg)
# pylint: disable=too-many-locals
def report_cell_diffs(self):
"""Report cell differences."""
inner = '{:^20}'.format('Diff report')
outer = '{:*^60}'.format(inner)
print(outer)
print('Base: {}'.format(self.base.file))
print('New: {}'.format(self.new.file))
print('*'*60)
differ = difflib.Differ()
for sheet, cell_list in self.cell_diff.items():
inner = ' Total diffs on "{}": {} '.format(sheet, len(cell_list))
outer = '{:-^60}'.format(inner)
print(outer)
for record in cell_list:
base_xl_name = xl_rowcol_to_cell(record.row_a, record.col_a)
new_xl_name = xl_rowcol_to_cell(record.row_b, record.col_b)
msg = f'>>> Base[{base_xl_name}] != New[{new_xl_name}]'
print(msg)
cell1_lines = str(record.cell_a).splitlines()
cell2_lines = str(record.cell_b).splitlines()
diff = differ.compare(cell1_lines, cell2_lines)
print('\n'.join(diff))
inner = '{:^20}'.format('End diff report')
outer = '{:*^60}'.format(inner)
print(outer)
def xlsdiff_cli():
"""Run the command line interface for this module."""
prog_desc = 'Create a diff of two Excel workbooks'
parser = argparse.ArgumentParser(description=prog_desc)
file_help = ('Path to source XLSForm. Two must be supplied. The first is '
'treated as the base, the second is treated as the new file.')
parser.add_argument('xlsxfile', help=file_help, nargs=2)
reverse_help = 'Reverse the base file and the new file for processing.'
parser.add_argument('-r', '--reverse', action='store_true',
help=reverse_help)
simple_help = 'Do a simple diff instead of the default ODK diff.'
parser.add_argument('-s', '--simple', action='store_true',
help=simple_help)
out_help = ('Path to write Excel output. If flag is given with no '
'argument then default out path is used. If flag is omitted, '
'then write text output to STDOUT.')
parser.add_argument('-e', '--excel', help=out_help, nargs='?', const=0)
args = parser.parse_args()
file1, file2 = args.xlsxfile
if args.reverse:
file1, file2 = file2, file1
diff = XlsDiff.from_file(file1, file2, args.simple)
diff.report_overview()
if args.excel is None:
diff.report_cell_diffs()
elif isinstance(args.excel, str):
diff.write_diff(args.excel)
else:
filename, extension = os.path.splitext(file2)
outpath = os.path.join(filename+'-diff'+extension)
diff.write_diff(outpath)
if __name__ == '__main__':
xlsdiff_cli()
| |
from __future__ import absolute_import, print_function
import logging
import six
from datetime import timedelta
from enum import IntEnum
from bitfield import BitField
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.functional import cached_property
from sentry import roles
from sentry.app import locks
from sentry.constants import RESERVED_ORGANIZATION_SLUGS, RESERVED_PROJECT_SLUGS
from sentry.db.models import BaseManager, BoundedPositiveIntegerField, Model, sane_repr
from sentry.db.models.utils import slugify_instance
from sentry.utils.http import absolute_uri
from sentry.utils.retries import TimedRetryPolicy
class OrganizationStatus(IntEnum):
ACTIVE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
# alias
VISIBLE = 0
def __str__(self):
return self.name
@property
def label(self):
return OrganizationStatus._labels[self]
@classmethod
def as_choices(cls):
result = []
for name, member in six.iteritems(cls.__members__):
# an alias
if name != member.name:
continue
# realistically Enum shouldn't even creating these, but alas
if name.startswith("_"):
continue
result.append((member.value, member.label))
return tuple(result)
OrganizationStatus._labels = {
OrganizationStatus.ACTIVE: "active",
OrganizationStatus.PENDING_DELETION: "pending deletion",
OrganizationStatus.DELETION_IN_PROGRESS: "deletion in progress",
}
class OrganizationManager(BaseManager):
# def get_by_natural_key(self, slug):
# return self.get(slug=slug)
def get_for_user(self, user, scope=None, only_visible=True):
"""
Returns a set of all organizations a user has access to.
"""
from sentry.models import OrganizationMember
if not user.is_authenticated():
return []
if settings.SENTRY_PUBLIC and scope is None:
if only_visible:
return list(self.filter(status=OrganizationStatus.ACTIVE))
else:
return list(self.filter())
qs = OrganizationMember.objects.filter(user=user).select_related("organization")
if only_visible:
qs = qs.filter(organization__status=OrganizationStatus.ACTIVE)
results = list(qs)
if scope is not None:
return [r.organization for r in results if scope in r.get_scopes()]
return [r.organization for r in results]
class Organization(Model):
"""
An organization represents a group of individuals which maintain ownership of projects.
"""
__core__ = True
name = models.CharField(max_length=64)
slug = models.SlugField(unique=True)
status = BoundedPositiveIntegerField(
choices=OrganizationStatus.as_choices(),
# south will generate a default value of `'<OrganizationStatus.ACTIVE: 0>'`
# if `.value` is omitted
default=OrganizationStatus.ACTIVE.value,
)
date_added = models.DateTimeField(default=timezone.now)
members = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through="sentry.OrganizationMember",
related_name="org_memberships",
through_fields=("organization", "user"),
)
default_role = models.CharField(
choices=roles.get_choices(), max_length=32, default=roles.get_default().id
)
flags = BitField(
flags=(
(
"allow_joinleave",
"Allow members to join and leave teams without requiring approval.",
),
(
"enhanced_privacy",
"Enable enhanced privacy controls to limit personally identifiable information (PII) as well as source code in things like notifications.",
),
(
"disable_shared_issues",
"Disable sharing of limited details on issues to anonymous users.",
),
(
"early_adopter",
"Enable early adopter status, gaining access to features prior to public release.",
),
("require_2fa", "Require and enforce two-factor authentication for all members."),
(
"disable_new_visibility_features",
"Temporarily opt out of new visibility features and ui",
),
),
default=1,
)
objects = OrganizationManager(cache_fields=("pk", "slug"))
class Meta:
app_label = "sentry"
db_table = "sentry_organization"
__repr__ = sane_repr("owner_id", "name", "slug")
@classmethod
def get_default(cls):
"""
Return the organization used in single organization mode.
"""
if settings.SENTRY_ORGANIZATION is not None:
return cls.objects.get(id=settings.SENTRY_ORGANIZATION)
return cls.objects.filter(status=OrganizationStatus.ACTIVE)[0]
def __unicode__(self):
return u"%s (%s)" % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get("slug:organization", duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, reserved=RESERVED_ORGANIZATION_SLUGS)
super(Organization, self).save(*args, **kwargs)
else:
super(Organization, self).save(*args, **kwargs)
def delete(self):
if self.is_default:
raise Exception("You cannot delete the the default organization.")
return super(Organization, self).delete()
@cached_property
def is_default(self):
if not settings.SENTRY_SINGLE_ORGANIZATION:
return False
return self == type(self).get_default()
def has_access(self, user, access=None):
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
return queryset.exists()
def get_audit_log_data(self):
return {
"id": self.id,
"slug": self.slug,
"name": self.name,
"status": int(self.status),
"flags": int(self.flags),
"default_role": self.default_role,
}
def get_owners(self):
from sentry.models import User
return User.objects.filter(
sentry_orgmember_set__role=roles.get_top_dog().id,
sentry_orgmember_set__organization=self,
is_active=True,
)
def get_default_owner(self):
if not hasattr(self, "_default_owner"):
self._default_owner = self.get_owners()[0]
return self._default_owner
def has_single_owner(self):
from sentry.models import OrganizationMember
count = OrganizationMember.objects.filter(
organization=self, role=roles.get_top_dog().id, user__isnull=False, user__is_active=True
)[:2].count()
return count == 1
def merge_to(from_org, to_org):
from sentry.models import (
ApiKey,
AuditLogEntry,
AuthProvider,
Commit,
OrganizationAvatar,
OrganizationIntegration,
OrganizationMember,
OrganizationMemberTeam,
Project,
Release,
ReleaseCommit,
ReleaseEnvironment,
ReleaseFile,
ReleaseHeadCommit,
Repository,
Team,
Environment,
)
for from_member in OrganizationMember.objects.filter(
organization=from_org, user__isnull=False
):
logger = logging.getLogger("sentry.merge")
try:
to_member = OrganizationMember.objects.get(
organization=to_org, user=from_member.user
)
except OrganizationMember.DoesNotExist:
from_member.update(organization=to_org)
to_member = from_member
else:
qs = OrganizationMemberTeam.objects.filter(
organizationmember=from_member, is_active=True
).select_related()
for omt in qs:
OrganizationMemberTeam.objects.create_or_update(
organizationmember=to_member, team=omt.team, defaults={"is_active": True}
)
logger.info(
"user.migrate",
extra={
"instance_id": from_member.id,
"new_member_id": to_member.id,
"from_organization_id": from_org.id,
"to_organization_id": to_org.id,
},
)
for from_team in Team.objects.filter(organization=from_org):
try:
with transaction.atomic():
from_team.update(organization=to_org)
except IntegrityError:
slugify_instance(from_team, from_team.name, organization=to_org)
from_team.update(organization=to_org, slug=from_team.slug)
logger.info(
"team.migrate",
extra={
"instance_id": from_team.id,
"new_slug": from_team.slug,
"from_organization_id": from_org.id,
"to_organization_id": to_org.id,
},
)
for from_project in Project.objects.filter(organization=from_org):
try:
with transaction.atomic():
from_project.update(organization=to_org)
except IntegrityError:
slugify_instance(
from_project,
from_project.name,
organization=to_org,
reserved=RESERVED_PROJECT_SLUGS,
)
from_project.update(organization=to_org, slug=from_project.slug)
logger.info(
"project.migrate",
extra={
"instance_id": from_project.id,
"new_slug": from_project.slug,
"from_organization_id": from_org.id,
"to_organization_id": to_org.id,
},
)
# TODO(jess): update this when adding unique constraint
# on version, organization for releases
for from_release in Release.objects.filter(organization=from_org):
try:
to_release = Release.objects.get(version=from_release.version, organization=to_org)
except Release.DoesNotExist:
Release.objects.filter(id=from_release.id).update(organization=to_org)
else:
Release.merge(to_release, [from_release])
logger.info(
"release.migrate",
extra={
"instance_id": from_release.id,
"from_organization_id": from_org.id,
"to_organization_id": to_org.id,
},
)
def do_update(queryset, params):
model_name = queryset.model.__name__.lower()
try:
with transaction.atomic():
queryset.update(**params)
except IntegrityError:
for instance in queryset:
try:
with transaction.atomic():
instance.update(**params)
except IntegrityError:
logger.info(
"{}.migrate-skipped".format(model_name),
extra={
"from_organization_id": from_org.id,
"to_organization_id": to_org.id,
},
)
else:
logger.info(
"{}.migrate".format(model_name),
extra={
"instance_id": instance.id,
"from_organization_id": from_org.id,
"to_organization_id": to_org.id,
},
)
else:
logger.info(
"{}.migrate".format(model_name),
extra={"from_organization_id": from_org.id, "to_organization_id": to_org.id},
)
INST_MODEL_LIST = (
AuthProvider,
ApiKey,
AuditLogEntry,
OrganizationAvatar,
OrganizationIntegration,
ReleaseEnvironment,
ReleaseFile,
)
ATTR_MODEL_LIST = (Commit, ReleaseCommit, ReleaseHeadCommit, Repository, Environment)
for model in INST_MODEL_LIST:
queryset = model.objects.filter(organization=from_org)
do_update(queryset, {"organization": to_org})
for model in ATTR_MODEL_LIST:
queryset = model.objects.filter(organization_id=from_org.id)
do_update(queryset, {"organization_id": to_org.id})
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import OrganizationOption
return OrganizationOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import OrganizationOption
return OrganizationOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import OrganizationOption
return OrganizationOption.objects.unset_value(self, *args, **kwargs)
def send_delete_confirmation(self, audit_log_entry, countdown):
from sentry import options
from sentry.utils.email import MessageBuilder
owners = self.get_owners()
context = {
"organization": self,
"audit_log_entry": audit_log_entry,
"eta": timezone.now() + timedelta(seconds=countdown),
"url": absolute_uri(reverse("sentry-restore-organization", args=[self.slug])),
}
MessageBuilder(
subject="%sOrganization Queued for Deletion" % (options.get("mail.subject-prefix"),),
template="sentry/emails/org_delete_confirm.txt",
html_template="sentry/emails/org_delete_confirm.html",
type="org.confirm_delete",
context=context,
).send_async([o.email for o in owners])
def flag_has_changed(self, flag_name):
"Returns ``True`` if ``flag`` has changed since initialization."
return getattr(self.old_value("flags"), flag_name, None) != getattr(self.flags, flag_name)
def handle_2fa_required(self, request):
from sentry.models import ApiKey
from sentry.tasks.auth import remove_2fa_non_compliant_members
actor_id = request.user.id if request.user and request.user.is_authenticated() else None
api_key_id = (
request.auth.id
if hasattr(request, "auth") and isinstance(request.auth, ApiKey)
else None
)
ip_address = request.META["REMOTE_ADDR"]
remove_2fa_non_compliant_members.delay(
self.id, actor_id=actor_id, actor_key_id=api_key_id, ip_address=ip_address
)
def get_url_viewname(self):
return "sentry-organization-issue-list"
def get_url(self):
return reverse(self.get_url_viewname(), args=[self.slug])
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import os
from core import perf_benchmark
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import page_test
from telemetry import story
from telemetry.value import scalar
from metrics import power
class _DromaeoMeasurement(page_test.PageTest):
def __init__(self):
super(_DromaeoMeasurement, self).__init__()
self._power_metric = None
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.document.getElementById("pause") &&' +
'window.document.getElementById("pause").value == "Run"',
120)
# Start spying on POST request that will report benchmark results, and
# intercept result data.
tab.ExecuteJavaScript('(function() {' +
' var real_jquery_ajax_ = window.jQuery;' +
' window.results_ = "";' +
' window.jQuery.ajax = function(request) {' +
' if (request.url == "store.php") {' +
' window.results_ =' +
' decodeURIComponent(request.data);' +
' window.results_ = window.results_.substring(' +
' window.results_.indexOf("=") + 1, ' +
' window.results_.lastIndexOf("&"));' +
' real_jquery_ajax_(request);' +
' }' +
' };' +
'})();')
# Starts benchmark.
tab.ExecuteJavaScript('window.document.getElementById("pause").click();')
tab.WaitForJavaScriptExpression('!!window.results_', 600)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
score = eval(tab.EvaluateJavaScript('window.results_ || "[]"'))
def Escape(k):
chars = [' ', '.', '-', '/', '(', ')', '*']
for c in chars:
k = k.replace(c, '_')
return k
def AggregateData(container, key, value):
if key not in container:
container[key] = {'count': 0, 'sum': 0}
container[key]['count'] += 1
container[key]['sum'] += math.log(value)
suffix = page.url[page.url.index('?') + 1 :]
def AddResult(name, value):
important = False
if name == suffix:
important = True
results.AddValue(scalar.ScalarValue(
results.current_page, Escape(name), 'runs/s', value, important))
aggregated = {}
for data in score:
AddResult('%s/%s' % (data['collection'], data['name']),
data['mean'])
top_name = data['collection'].split('-', 1)[0]
AggregateData(aggregated, top_name, data['mean'])
collection_name = data['collection']
AggregateData(aggregated, collection_name, data['mean'])
for key, value in aggregated.iteritems():
AddResult(key, math.exp(value['sum'] / value['count']))
class _DromaeoBenchmark(perf_benchmark.PerfBenchmark):
"""A base class for Dromaeo benchmarks."""
test = _DromaeoMeasurement
@classmethod
def Name(cls):
return 'dromaeo'
def CreateStorySet(self, options):
"""Makes a PageSet for Dromaeo benchmarks."""
# Subclasses are expected to define class members called query_param and
# tag.
if not hasattr(self, 'query_param') or not hasattr(self, 'tag'):
raise NotImplementedError('query_param or tag not in Dromaeo benchmark.')
archive_data_file = '../page_sets/data/dromaeo.%s.json' % self.tag
ps = story.StorySet(
archive_data_file=archive_data_file,
base_dir=os.path.dirname(os.path.abspath(__file__)),
cloud_storage_bucket=story.PUBLIC_BUCKET)
url = 'http://dromaeo.com?%s' % self.query_param
ps.AddStory(page_module.Page(
url, ps, ps.base_dir, make_javascript_deterministic=False))
return ps
class DromaeoDomCoreAttr(_DromaeoBenchmark):
"""Dromaeo DOMCore attr JavaScript benchmark.
Tests setting and getting DOM node attributes.
"""
tag = 'domcoreattr'
query_param = 'dom-attr'
@classmethod
def Name(cls):
return 'dromaeo.domcoreattr'
@benchmark.Disabled('xp') # crbug.com/501625
class DromaeoDomCoreModify(_DromaeoBenchmark):
"""Dromaeo DOMCore modify JavaScript benchmark.
Tests creating and injecting DOM nodes.
"""
tag = 'domcoremodify'
query_param = 'dom-modify'
@classmethod
def Name(cls):
return 'dromaeo.domcoremodify'
class DromaeoDomCoreQuery(_DromaeoBenchmark):
"""Dromaeo DOMCore query JavaScript benchmark.
Tests querying DOM elements in a document.
"""
tag = 'domcorequery'
query_param = 'dom-query'
@classmethod
def Name(cls):
return 'dromaeo.domcorequery'
class DromaeoDomCoreTraverse(_DromaeoBenchmark):
"""Dromaeo DOMCore traverse JavaScript benchmark.
Tests traversing a DOM structure.
"""
tag = 'domcoretraverse'
query_param = 'dom-traverse'
@classmethod
def Name(cls):
return 'dromaeo.domcoretraverse'
class DromaeoJslibAttrJquery(_DromaeoBenchmark):
"""Dromaeo JSLib attr jquery JavaScript benchmark.
Tests setting and getting DOM node attributes using the jQuery JavaScript
Library.
"""
tag = 'jslibattrjquery'
query_param = 'jslib-attr-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibattrjquery'
class DromaeoJslibAttrPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib attr prototype JavaScript benchmark.
Tests setting and getting DOM node attributes using the jQuery JavaScript
Library.
"""
tag = 'jslibattrprototype'
query_param = 'jslib-attr-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibattrprototype'
class DromaeoJslibEventJquery(_DromaeoBenchmark):
"""Dromaeo JSLib event jquery JavaScript benchmark.
Tests binding, removing, and triggering DOM events using the jQuery JavaScript
Library.
"""
tag = 'jslibeventjquery'
query_param = 'jslib-event-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibeventjquery'
class DromaeoJslibEventPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib event prototype JavaScript benchmark.
Tests binding, removing, and triggering DOM events using the Prototype
JavaScript Library.
"""
tag = 'jslibeventprototype'
query_param = 'jslib-event-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibeventprototype'
# xp: crbug.com/389731
# win7: http://crbug.com/479796
# linux: http://crbug.com/513853
@benchmark.Disabled('xp', 'win7', 'linux')
class DromaeoJslibModifyJquery(_DromaeoBenchmark):
"""Dromaeo JSLib modify jquery JavaScript benchmark.
Tests creating and injecting DOM nodes into a document using the jQuery
JavaScript Library.
"""
tag = 'jslibmodifyjquery'
query_param = 'jslib-modify-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibmodifyjquery'
class DromaeoJslibModifyPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib modify prototype JavaScript benchmark.
Tests creating and injecting DOM nodes into a document using the Prototype
JavaScript Library.
"""
tag = 'jslibmodifyprototype'
query_param = 'jslib-modify-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibmodifyprototype'
class DromaeoJslibStyleJquery(_DromaeoBenchmark):
"""Dromaeo JSLib style jquery JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the jQuery
JavaScript Library.
"""
tag = 'jslibstylejquery'
query_param = 'jslib-style-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibstylejquery'
class DromaeoJslibStylePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib style prototype JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the jQuery
JavaScript Library.
"""
tag = 'jslibstyleprototype'
query_param = 'jslib-style-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibstyleprototype'
class DromaeoJslibTraverseJquery(_DromaeoBenchmark):
"""Dromaeo JSLib traverse jquery JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the Prototype
JavaScript Library.
"""
tag = 'jslibtraversejquery'
query_param = 'jslib-traverse-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibtraversejquery'
class DromaeoJslibTraversePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib traverse prototype JavaScript benchmark.
Tests traversing a DOM structure using the jQuery JavaScript Library.
"""
tag = 'jslibtraverseprototype'
query_param = 'jslib-traverse-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibtraverseprototype'
class DromaeoCSSQueryJquery(_DromaeoBenchmark):
"""Dromaeo CSS Query jquery JavaScript benchmark.
Tests traversing a DOM structure using the Prototype JavaScript Library.
"""
tag = 'cssqueryjquery'
query_param = 'cssquery-jquery'
@classmethod
def Name(cls):
return 'dromaeo.cssqueryjquery'
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=no-member, too-many-locals, too-many-branches, no-self-use, broad-except, lost-exception, too-many-nested-blocks, too-few-public-methods, invalid-name, missing-docstring
"""
This file tests that the notebooks requiring a single GPU run without
warning or exception.
"""
import glob
import re
import os
import unittest
from straight_dope_test_utils import _test_notebook
from straight_dope_test_utils import _download_straight_dope_notebooks
NOTEBOOKS_WHITELIST = [
'chapter01_crashcourse/preface',
'chapter01_crashcourse/introduction',
'chapter01_crashcourse/chapter-one-problem-set',
'chapter02_supervised-learning/environment',
'chapter07_distributed-learning/multiple-gpus-scratch',
'chapter07_distributed-learning/multiple-gpus-gluon',
'chapter07_distributed-learning/training-with-multiple-machines'
]
class StraightDopeSingleGpuTests(unittest.TestCase):
@classmethod
def setUpClass(self):
assert _download_straight_dope_notebooks()
def test_completeness(self):
"""
Make sure that every tutorial that isn't in the whitelist is considered for testing by this
file. Exceptions should be added to the whitelist.
N.B. If the test is commented out, then that will be viewed as an intentional disabling of the
test.
"""
# Open up this test file.
with open(__file__, 'r') as f:
notebook_test_text = '\n'.join(f.readlines())
notebooks_path = os.path.join(os.path.dirname(__file__), 'straight_dope_book')
notebooks = glob.glob(os.path.join(notebooks_path, '**', '*.ipynb'))
# Compile a list of notebooks that are tested
tested_notebooks = set(re.findall(r"assert _test_notebook\('(.*)'\)", notebook_test_text))
# Ensure each notebook in the straight dope book directory is on the whitelist or is tested.
for notebook in notebooks:
friendly_name = '/'.join(notebook.split('/')[-2:]).split('.')[0]
if friendly_name not in tested_notebooks and friendly_name not in NOTEBOOKS_WHITELIST:
assert False, friendly_name + " has not been added to the nightly/tests/straight_" + \
"dope/test_notebooks_single_gpu.py test_suite. Consider also adding " + \
"it to nightly/tests/straight_dope/test_notebooks_multi_gpu.py as " + \
"well if the notebooks makes use of multiple GPUs."
def test_ndarray(self):
assert _test_notebook('chapter01_crashcourse/ndarray')
def test_linear_algebra(self):
assert _test_notebook('chapter01_crashcourse/linear-algebra')
def test_probability(self):
assert _test_notebook('chapter01_crashcourse/probability')
# TODO(vishaalk): Notebook contains the word 'Warning'. Needs to be updated to a synonym.
#def test_autograd(self):
# assert _test_notebook('chapter01_crashcourse/autograd')
# Chapter 2
def test_linear_regression_scratch(self):
assert _test_notebook('chapter02_supervised-learning/linear-regression-scratch')
def test_linear_regression_gluon(self):
assert _test_notebook('chapter02_supervised-learning/linear-regression-gluon')
# TODO(vishaalk): There is a relative file path needs to be fixed so that the
# python code can be run from another directory.
#def test_logistic_regression_gluon(self):
# assert _test_notebook('chapter02_supervised-learning/logistic-regression-gluon')
def test_softmax_regression_scratch(self):
assert _test_notebook('chapter02_supervised-learning/softmax-regression-scratch')
def test_softmax_regression_gluon(self):
assert _test_notebook('chapter02_supervised-learning/softmax-regression-gluon')
def test_regularization_scratch(self):
assert _test_notebook('chapter02_supervised-learning/regularization-scratch')
# TODO(vishaalk): Notebook does not appear to be JSON: '{\n "cells": [\n {\n "cell_type": "m....
#def test_regularization_gluon(self):
# assert _test_notebook('chapter02_supervised-learning/regularization-gluon')
def test_perceptron(self):
assert _test_notebook('chapter02_supervised-learning/perceptron')
# Chapter 3
def test_mlp_scratch(self):
assert _test_notebook('chapter03_deep-neural-networks/mlp-scratch')
def test_mlp_gluon(self):
assert _test_notebook('chapter03_deep-neural-networks/mlp-gluon')
def test_mlp_dropout_scratch(self):
assert _test_notebook('chapter03_deep-neural-networks/mlp-dropout-scratch')
def test_mlp_dropout_gluon(self):
assert _test_notebook('chapter03_deep-neural-networks/mlp-dropout-gluon')
def test_plumbing(self):
assert _test_notebook('chapter03_deep-neural-networks/plumbing')
def test_custom_layer(self):
assert _test_notebook('chapter03_deep-neural-networks/custom-layer')
#def test_kaggle_gluon_kfold(self):
# assert _test_notebook('chapter03_deep-neural-networks/kaggle-gluon-kfold')
# TODO(vishaalk): Load params and Save params are deprecated warning.
#def test_serialization(self):
# assert _test_notebook('chapter03_deep-neural-networks/serialization')
# Chapter 4
def test_cnn_scratch(self):
assert _test_notebook('chapter04_convolutional-neural-networks/cnn-scratch')
def test_cnn_gluon(self):
assert _test_notebook('chapter04_convolutional-neural-networks/cnn-gluon')
# TODO(vishaalk): Load params and Save params are deprecated warning.
#def test_deep_cnns_alexnet(self):
# assert _test_notebook('chapter04_convolutional-neural-networks/deep-cnns-alexnet')
def test_very_deep_nets_vgg(self):
assert _test_notebook('chapter04_convolutional-neural-networks/very-deep-nets-vgg')
def test_cnn_batch_norm_scratch(self):
assert _test_notebook('chapter04_convolutional-neural-networks/cnn-batch-norm-scratch')
def test_cnn_batch_norm_gluon(self):
assert _test_notebook('chapter04_convolutional-neural-networks/cnn-batch-norm-gluon')
# Chapter 5
# TODO(vishaalk): There is a relative file path needs to be fixed so that the
# python code can be run from another directory.
#def test_simple_rnn(self):
# assert _test_notebook('chapter05_recurrent-neural-networks/simple-rnn')
# TODO(vishaalk): There is a relative file path needs to be fixed so that the
# python code can be run from another directory.
#def test_lstm_scratch(self):
# assert _test_notebook('chapter05_recurrent-neural-networks/lstm-scratch')
# TODO(vishaalk): There is a relative file path needs to be fixed so that the
# python code can be run from another directory.
#def test_gru_scratch(self):
# assert _test_notebook('chapter05_recurrent-neural-networks/gru-scratch')
#def test_rnns_gluon(self):
# assert _test_notebook('chapter05_recurrent-neural-networks/rnns-gluon')
# Chapter 6
def test_optimization_intro(self):
assert _test_notebook('chapter06_optimization/optimization-intro')
# TODO(vishaalk): RuntimeWarning: Overflow encountered in reduce.
#def test_gd_sgd_scratch(self):
# assert _test_notebook('chapter06_optimization/gd-sgd-scratch')
# TODO(vishaalk): RuntimeWarning: Overflow encountered in reduce.
#def test_gd_sgd_gluon(self):
# assert _test_notebook('chapter06_optimization/gd-sgd-gluon')
def test_momentum_scratch(self):
assert _test_notebook('chapter06_optimization/momentum-scratch')
def test_momentum_gluon(self):
assert _test_notebook('chapter06_optimization/momentum-gluon')
def test_adagrad_scratch(self):
assert _test_notebook('chapter06_optimization/adagrad-scratch')
def test_adagrad_gluon(self):
assert _test_notebook('chapter06_optimization/adagrad-gluon')
def test_rmsprop_scratch(self):
assert _test_notebook('chapter06_optimization/rmsprop-scratch')
def test_rmsprop_gluon(self):
assert _test_notebook('chapter06_optimization/rmsprop-gluon')
def test_adadelta_scratch(self):
assert _test_notebook('chapter06_optimization/adadelta-scratch')
def test_adadelta_gluon(self):
assert _test_notebook('chapter06_optimization/adadelta-gluon')
def test_adam_scratch(self):
assert _test_notebook('chapter06_optimization/adam-scratch')
def test_adam_gluon(self):
assert _test_notebook('chapter06_optimization/adam-gluon')
# Chapter 7
def test_hybridize(self):
assert _test_notebook('chapter07_distributed-learning/hybridize')
# Chapter 8
# TODO(vishaalk): Load params and Save params are deprecated warning.
#def test_object_detection(self):
# assert _test_notebook('chapter08_computer-vision/object-detection')
# TODO(vishaalk): Module skimage needs to be added to docker image.
#def test_fine_tuning(self):
# assert _test_notebook('chapter08_computer-vision/fine-tuning')
# TODO(vishaalk):
#def test_visual_question_answer(self):
# assert _test_notebook('chapter08_computer-vision/visual-question-answer')
# Chapter 9
def test_tree_lstm(self):
assert _test_notebook('chapter09_natural-language-processing/tree-lstm')
# Chapter 11
# TODO(vishaalk): Deferred initialization failed because shape cannot be inferred.
#def test_intro_recommender_systems(self):
# assert _test_notebook('chapter11_recommender-systems/intro-recommender-systems')
# Chapter 12
def test_lds_scratch(self):
assert _test_notebook('chapter12_time-series/lds-scratch')
# TODO(vishaalk): File doesn't appear to be valid JSON.
#def test_issm_scratch(self):
# assert _test_notebook('chapter12_time-series/issm-scratch')
# TODO(vishaalk): Error: sequential1_batchnorm0_running_mean' has not been initialized
# def test_intro_forecasting_gluon(self):
# assert _test_notebook('chapter12_time-series/intro-forecasting-gluon')
#def test_intro_forecasting_2_gluon(self):
# assert _test_notebook('chapter12_time-series/intro-forecasting-2-gluon')
# Chapter 13
# TODO(vishaalk): Load params and Save params are deprecated warning.
#def test_vae_gluon(self):
# assert _test_notebook('chapter13_unsupervised-learning/vae-gluon')
# Chapter 14
def test_igan_intro(self):
assert _test_notebook('chapter14_generative-adversarial-networks/gan-intro')
def test_dcgan(self):
assert _test_notebook('chapter14_generative-adversarial-networks/dcgan')
def test_generative_adversarial_networks(self):
assert _test_notebook('chapter14_generative-adversarial-networks/conditional')
# Chapter 16
# TODO(vishaalk): Checked failed oshape.Size() != dshape.Size()
#def test_tensor_basics(self):
# assert _test_notebook('chapter16_tensor_methods/tensor_basics')
# TODO(vishaalk): Notebook does not appear to be valid JSON.
#def test_pixel2pixel(self):
# assert _test_notebook('chapter14_generative-adversarial-networks/pixel2pixel')
# Chapter 17
# TODO(vishaalk): Requires OpenAI Gym. Also uses deprecated load_params.
#def test_dqn(self):
# assert _test_notebook('chapter17_deep-reinforcement-learning/DQN')
#def test_ddqn(self):
# assert _test_notebook('chapter17_deep-reinforcement-learning/DDQN')
# Chapter 18
#def test_bayes_by_backprop(self):
# assert _test_notebook('chapter18_variational-methods-and-uncertainty/bayes-by-backprop')
#def test_bayes_by_backprop_gluon(self):
# assert _test_notebook('chapter18_variational-methods-and-uncertainty/bayes-by-backprop-gluon')
# TODO(vishaalk): AttributeError: 'list' object has no attribute 'keys'
#def test_bayes_by_backprop_rnn(self):
# assert _test_notebook('chapter18_variational-methods-and-uncertainty/bayes-by-backprop-rnn')
# Chapter 19
# TODO(vishaalk): Requires deepchem
#def test_graph_neural_networks(self):
# assert _test_notebook('chapter19_graph-neural-networks/Graph-Neural-Networks')
# Cheatsheets
# TODO(vishaalk): There is a relative file path needs to be fixed so that the
# python code can be run from another directory.
#def test_kaggle_gluon_kfold(self):
# assert _test_notebook('cheatsheets/kaggle-gluon-kfold')
| |
from __future__ import absolute_import, unicode_literals
# unicode_literals ensures that any render / __str__ methods returning HTML via calls to mark_safe / format_html
# return a SafeText, not SafeBytes; necessary so that it doesn't get re-encoded when the template engine
# calls force_text, which would cause it to lose its 'safe' flag
import collections
from importlib import import_module
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.encoding import python_2_unicode_compatible, force_text
from django.template.loader import render_to_string
from django import forms
__all__ = ['BaseBlock', 'Block', 'BoundBlock', 'DeclarativeSubBlocksMetaclass', 'BlockWidget', 'BlockField']
# =========================================
# Top-level superclasses and helper objects
# =========================================
class BaseBlock(type):
def __new__(mcs, name, bases, attrs):
meta_class = attrs.pop('Meta', None)
cls = super(BaseBlock, mcs).__new__(mcs, name, bases, attrs)
base_meta_class = getattr(cls, '_meta_class', None)
bases = tuple(cls for cls in [meta_class, base_meta_class] if cls) or ()
cls._meta_class = type(str(name + 'Meta'), bases + (object, ), {})
return cls
class Block(six.with_metaclass(BaseBlock, object)):
name = ''
creation_counter = 0
TEMPLATE_VAR = 'value'
class Meta:
label = None
icon = "placeholder"
classname = None
"""
Setting a 'dependencies' list serves as a shortcut for the common case where a complex block type
(such as struct, list or stream) relies on one or more inner block objects, and needs to ensure that
the responses from the 'media' and 'html_declarations' include the relevant declarations for those inner
blocks, as well as its own. Specifying these inner block objects in a 'dependencies' list means that
the base 'media' and 'html_declarations' methods will return those declarations; the outer block type can
then add its own declarations to the list by overriding those methods and using super().
"""
dependencies = []
def __new__(cls, *args, **kwargs):
# adapted from django.utils.deconstruct.deconstructible; capture the arguments
# so that we can return them in the 'deconstruct' method
obj = super(Block, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def all_blocks(self):
"""
Return a list consisting of self and all block objects that are direct or indirect dependencies
of this block
"""
result = [self]
for dep in self.dependencies:
result.extend(dep.all_blocks())
return result
def all_media(self):
media = forms.Media()
for block in self.all_blocks():
media += block.media
return media
def all_html_declarations(self):
declarations = filter(bool, [block.html_declarations() for block in self.all_blocks()])
return mark_safe('\n'.join(declarations))
def __init__(self, **kwargs):
self.meta = self._meta_class()
for attr, value in kwargs.items():
setattr(self.meta, attr, value)
# Increase the creation counter, and save our local copy.
self.creation_counter = Block.creation_counter
Block.creation_counter += 1
self.definition_prefix = 'blockdef-%d' % self.creation_counter
self.label = self.meta.label or ''
def set_name(self, name):
self.name = name
if not self.meta.label:
self.label = capfirst(force_text(name).replace('_', ' '))
@property
def media(self):
return forms.Media()
def html_declarations(self):
"""
Return an HTML fragment to be rendered on the form page once per block definition -
as opposed to once per occurrence of the block. For example, the block definition
ListBlock(label="Shopping list", CharBlock(label="Product"))
needs to output a <script type="text/template"></script> block containing the HTML for
a 'product' text input, to that these can be dynamically added to the list. This
template block must only occur once in the page, even if there are multiple 'shopping list'
blocks on the page.
Any element IDs used in this HTML fragment must begin with definition_prefix.
(More precisely, they must either be definition_prefix itself, or begin with definition_prefix
followed by a '-' character)
"""
return ''
def js_initializer(self):
"""
Returns a Javascript expression string, or None if this block does not require any
Javascript behaviour. This expression evaluates to an initializer function, a function that
takes the ID prefix and applies JS behaviour to the block instance with that value and prefix.
The parent block of this block (or the top-level page code) must ensure that this
expression is not evaluated more than once. (The resulting initializer function can and will be
called as many times as there are instances of this block, though.)
"""
return None
def render_form(self, value, prefix='', errors=None):
"""
Render the HTML for this block with 'value' as its content.
"""
raise NotImplementedError('%s.render_form' % self.__class__)
def value_from_datadict(self, data, files, prefix):
raise NotImplementedError('%s.value_from_datadict' % self.__class__)
def bind(self, value, prefix=None, errors=None):
"""
Return a BoundBlock which represents the association of this block definition with a value
and a prefix (and optionally, a ValidationError to be rendered).
BoundBlock primarily exists as a convenience to allow rendering within templates:
bound_block.render() rather than blockdef.render(value, prefix) which can't be called from
within a template.
"""
return BoundBlock(self, value, prefix=prefix, errors=errors)
def get_default(self):
"""
Return this block's default value (conventionally found in self.meta.default),
converted to the value type expected by this block. This caters for the case
where that value type is not something that can be expressed statically at
model definition type (e.g. something like StructValue which incorporates a
pointer back to the block definion object).
"""
return self.meta.default
def prototype_block(self):
"""
Return a BoundBlock that can be used as a basis for new empty block instances to be added on the fly
(new list items, for example). This will have a prefix of '__PREFIX__' (to be dynamically replaced with
a real prefix when it's inserted into the page) and a value equal to the block's default value.
"""
return self.bind(self.get_default(), '__PREFIX__')
def clean(self, value):
"""
Validate value and return a cleaned version of it, or throw a ValidationError if validation fails.
The thrown ValidationError instance will subsequently be passed to render() to display the
error message; the ValidationError must therefore include all detail necessary to perform that
rendering, such as identifying the specific child block(s) with errors, in the case of nested
blocks. (It is suggested that you use the 'params' attribute for this; using error_list /
error_dict is unreliable because Django tends to hack around with these when nested.)
"""
return value
def to_python(self, value):
"""
Convert 'value' from a simple (JSON-serialisable) value to a (possibly complex) Python value to be
used in the rest of the block API and within front-end templates . In simple cases this might be
the value itself; alternatively, it might be a 'smart' version of the value which behaves mostly
like the original value but provides a native HTML rendering when inserted into a template; or it
might be something totally different (e.g. an image chooser will use the image ID as the clean
value, and turn this back into an actual image object here).
"""
return value
def get_prep_value(self, value):
"""
The reverse of to_python; convert the python value into JSON-serialisable form.
"""
return value
def get_context(self, value):
return {
'self': value,
self.TEMPLATE_VAR: value,
}
def render(self, value):
"""
Return a text rendering of 'value', suitable for display on templates. By default, this will
use a template if a 'template' property is specified on the block, and fall back on render_basic
otherwise.
"""
template = getattr(self.meta, 'template', None)
if template:
return render_to_string(template, self.get_context(value))
else:
return self.render_basic(value)
def render_basic(self, value):
"""
Return a text rendering of 'value', suitable for display on templates. render() will fall back on
this if the block does not define a 'template' property.
"""
return force_text(value)
def get_searchable_content(self, value):
"""
Returns a list of strings containing text content within this block to be used in a search engine.
"""
return []
def check(self, **kwargs):
"""
Hook for the Django system checks framework -
returns a list of django.core.checks.Error objects indicating validity errors in the block
"""
return []
def _check_name(self, **kwargs):
"""
Helper method called by container blocks as part of the system checks framework,
to validate that this block's name is a valid identifier.
(Not called universally, because not all blocks need names)
"""
errors = []
if not self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block name cannot be empty",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if ' ' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block names cannot contain spaces",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if '-' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot contain dashes",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if self.name and self.name[0].isdigit():
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot begin with a digit",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
return errors
def id_for_label(self, prefix):
"""
Return the ID to be used as the 'for' attribute of <label> elements that refer to this block,
when the given field prefix is in use. Return None if no 'for' attribute should be used.
"""
return None
def deconstruct(self):
# adapted from django.utils.deconstruct.deconstructible
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
% (name, module_name))
# if the module defines a DECONSTRUCT_ALIASES dictionary, see if the class has an entry in there;
# if so, use that instead of the real path
try:
path = module.DECONSTRUCT_ALIASES[self.__class__]
except (AttributeError, KeyError):
path = '%s.%s' % (module_name, name)
return (
path,
self._constructor_args[0],
self._constructor_args[1],
)
def __eq__(self, other):
"""
The deep_deconstruct method in django.db.migrations.autodetector.MigrationAutodetector does not
recurse into arbitrary lists and dicts. As a result, when it is passed a field such as:
StreamField([
('heading', CharBlock()),
])
the CharBlock object will be left in its constructed form. This causes problems when
MigrationAutodetector compares two separate instances of the StreamField from different project
states: since the CharBlocks are different objects, it will report a change where there isn't one.
To prevent this, we implement the equality operator on Block instances such that the two CharBlocks
are reported as equal. Since block objects are intended to be immutable with the exception of
set_name(), it is sufficient to compare the 'name' property and the constructor args/kwargs of the
two block objects. The 'deconstruct' method provides a convenient way to access the latter.
"""
if not isinstance(other, Block):
# if the other object isn't a block at all, it clearly isn't equal.
return False
# Note that we do not require the two blocks to be of the exact same class. This is because
# we may wish the following blocks to be considered equal:
#
# class FooBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# class BarBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# FooBlock() == BarBlock() == StructBlock([('first_name', CharBlock()), ('surname': CharBlock())])
#
# For this to work, StructBlock will need to ensure that 'deconstruct' returns the same signature
# in all of these cases, including reporting StructBlock as the path:
#
# FooBlock().deconstruct() == (
# 'wagtail.wagtailcore.blocks.StructBlock',
# [('first_name', CharBlock()), ('surname': CharBlock())],
# {}
# )
#
# This has the bonus side effect that the StructBlock field definition gets frozen into
# the migration, rather than leaving the migration vulnerable to future changes to FooBlock / BarBlock
# in models.py.
return (self.name == other.name) and (self.deconstruct() == other.deconstruct())
def __ne__(self, other):
return not self.__eq__(other)
# Making block instances hashable in a way that's consistent with __eq__ is non-trivial, because
# self.deconstruct() is liable to contain unhashable data (e.g. lists and dicts). So let's set
# Block to be explicitly unhashable - Python 3 will do this automatically when defining __eq__,
# but Python 2 won't, and we'd like the behaviour to be consistent on both.
__hash__ = None
@python_2_unicode_compatible
class BoundBlock(object):
def __init__(self, block, value, prefix=None, errors=None):
self.block = block
self.value = value
self.prefix = prefix
self.errors = errors
def render_form(self):
return self.block.render_form(self.value, self.prefix, errors=self.errors)
def render(self):
return self.block.render(self.value)
def id_for_label(self):
return self.block.id_for_label(self.prefix)
def __str__(self):
"""Render the value according to the block's native rendering"""
return self.block.render(self.value)
class DeclarativeSubBlocksMetaclass(BaseBlock):
"""
Metaclass that collects sub-blocks declared on the base classes.
(cheerfully stolen from https://github.com/django/django/blob/master/django/forms/forms.py)
"""
def __new__(mcs, name, bases, attrs):
# Collect sub-blocks declared on the current class.
# These are available on the class as `declared_blocks`
current_blocks = []
for key, value in list(attrs.items()):
if isinstance(value, Block):
current_blocks.append((key, value))
value.set_name(key)
attrs.pop(key)
current_blocks.sort(key=lambda x: x[1].creation_counter)
attrs['declared_blocks'] = collections.OrderedDict(current_blocks)
new_class = (super(DeclarativeSubBlocksMetaclass, mcs).__new__(
mcs, name, bases, attrs))
# Walk through the MRO, collecting all inherited sub-blocks, to make
# the combined `base_blocks`.
base_blocks = collections.OrderedDict()
for base in reversed(new_class.__mro__):
# Collect sub-blocks from base class.
if hasattr(base, 'declared_blocks'):
base_blocks.update(base.declared_blocks)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in base_blocks:
base_blocks.pop(attr)
new_class.base_blocks = base_blocks
return new_class
# ========================
# django.forms integration
# ========================
class BlockWidget(forms.Widget):
"""Wraps a block object as a widget so that it can be incorporated into a Django form"""
def __init__(self, block_def, attrs=None):
super(BlockWidget, self).__init__(attrs=attrs)
self.block_def = block_def
def render_with_errors(self, name, value, attrs=None, errors=None):
bound_block = self.block_def.bind(value, prefix=name, errors=errors)
js_initializer = self.block_def.js_initializer()
if js_initializer:
js_snippet = """
<script>
$(function() {
var initializer = %s;
initializer('%s');
})
</script>
""" % (js_initializer, name)
else:
js_snippet = ''
return mark_safe(bound_block.render_form() + js_snippet)
def render(self, name, value, attrs=None):
return self.render_with_errors(name, value, attrs=attrs, errors=None)
@property
def media(self):
return self.block_def.all_media()
def value_from_datadict(self, data, files, name):
return self.block_def.value_from_datadict(data, files, name)
class BlockField(forms.Field):
"""Wraps a block object as a form field so that it can be incorporated into a Django form"""
def __init__(self, block=None, **kwargs):
if block is None:
raise ImproperlyConfigured("BlockField was not passed a 'block' object")
self.block = block
if 'widget' not in kwargs:
kwargs['widget'] = BlockWidget(block)
super(BlockField, self).__init__(**kwargs)
def clean(self, value):
return self.block.clean(value)
DECONSTRUCT_ALIASES = {
Block: 'wagtail.wagtailcore.blocks.Block',
}
| |
import json
import types
from bitstring import CreationError
from .constants import CONDITIONAL
from .errors import BadConditionalCaseError
from .utils import indent_text
class BreadStruct(object):
def __init__(self):
self._data_bits = None
self._fields = {}
self._conditional_fields = []
self._field_list = []
self._name = None
# __offsets__ retained for backwards compatibility
class Offsets(object):
pass
self.__offsets__ = Offsets()
def __eq__(self, other):
if not hasattr(other, '_data_bits'):
return False
return self._data_bits == other._data_bits
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return self._compute_length()
def _get_min_length(self):
total_length = 0
for field in self._field_list:
if isinstance(field, BreadConditional):
total_length += field._get_min_length()
else:
total_length += field._length
return total_length
def _field_strings(self):
field_strings = []
for field in self._field_list:
if isinstance(field, BreadStruct):
field_strings.append(
field._name + ': ' + indent_text(str(field)).lstrip())
elif isinstance(field, BreadConditional):
field_strings.append(str(field))
else:
if field._name[0] != '_':
field_strings.append(field._name + ': ' + str(field))
return field_strings
def __str__(self):
field_strings = self._field_strings()
return '{\n' + '\n'.join(map(indent_text, field_strings)) + '\n}'
def _set_data(self, data_bits):
self._data_bits = data_bits
for field in self._field_list:
field._set_data(data_bits)
@property
def _offset(self):
# A struct's offset is the offset where its first field starts
return self._field_list[0]._offset
@_offset.setter
def _offset(self, value):
offset = value
# All fields offsets are relative to the starting offset for the struct
for field in self._field_list:
field._offset = offset
offset += field._length
for name, field in list(self._fields.items()):
setattr(self.__offsets__, name, field._offset)
def _compute_length(self):
return sum([x._length for x in self._field_list])
def get(self):
return self
def set(self, value):
raise ValueError("Can't set a non-leaf struct to a value")
def __getattr__(self, attr):
if attr in ('_LENGTH', '_length'):
return self._compute_length()
if attr in self._fields:
return self._fields[attr].get()
for conditional_field in self._conditional_fields:
try:
return getattr(conditional_field, attr)
except AttributeError:
pass # pragma: no cover
raise AttributeError("No known field '%s'" % (attr))
def __setattr__(self, attr, value):
try:
if attr[0] == '_':
super(BreadStruct, self).__setattr__(attr, value)
elif attr in self._fields:
field = self._fields[attr]
field.set(value)
else:
for conditional_field in self._conditional_fields:
try:
return setattr(conditional_field, attr, value)
except AttributeError:
pass
raise AttributeError("No known field '%s'" % (attr))
except CreationError as e:
raise ValueError('Error while setting %s: %s' % (field._name, e))
def _add_field(self, field, name):
if name is not None:
self._fields[name] = field
field._name = name
self._field_list.append(field)
if isinstance(field, BreadConditional):
self._conditional_fields.append(field)
def as_native(self):
native_struct = {}
for field in self._field_list:
if isinstance(field, BreadConditional):
native_struct.update(field.as_native())
elif field._name[0] != '_':
native_struct[field._name] = field.as_native()
elif isinstance(field, BreadConditional):
native_struct.update(field.as_native())
return native_struct
def as_json(self):
return json.dumps(self.as_native())
class BreadConditional(object):
@staticmethod
def from_spec(spec, parent):
predicate_field_name, conditions = spec[1:]
field = BreadConditional(predicate_field_name, parent)
for predicate_value, condition in list(conditions.items()):
condition_struct = build_struct(condition)
field._add_condition(predicate_value, condition_struct)
return field
def __init__(self, conditional_field_name, parent_struct):
self._name = None
self._conditions = {}
self._parent_struct = parent_struct
self._conditional_field_name = conditional_field_name
def _get_min_length(self):
return min(map(lambda x: x._length, self._conditions.values()))
def _set_data(self, data_bits):
for struct in list(self._conditions.values()):
struct._set_data(data_bits)
def _add_condition(self, predicate_value, struct):
self._conditions[predicate_value] = struct
def _get_condition(self):
switch_value = getattr(
self._parent_struct, self._conditional_field_name)
if switch_value not in self._conditions:
raise BadConditionalCaseError(str(switch_value))
return switch_value
def __getattr__(self, attr):
if attr == '_length':
return self._conditions[self._get_condition()]._length
if attr in ('_name', '_conditions', '_parent_struct'):
return super(BreadConditional, self).__getattr__(attr)
return getattr(self._conditions[self._get_condition()], attr)
def __setattr__(self, attr, value):
if attr[0] == '_':
super(BreadConditional, self).__setattr__(attr, value)
else:
self._conditions[self._get_condition()].__setattr__(attr, value)
def as_native(self):
return self._conditions[self._get_condition()].as_native()
def __str__(self):
return '\n'.join(
self._conditions[self._get_condition()]._field_strings())
@property
def _offset(self):
return self._conditions[list(self._conditions.keys())[0]]._offset
@_offset.setter
def _offset(self, off):
for condition_struct in list(self._conditions.values()):
condition_struct._offset = off
def build_struct(spec, type_name=None):
# Give different structs the appearance of having different type names
class NewBreadStruct(BreadStruct):
pass
if type_name is not None:
NewBreadStruct.__name__ = type_name
struct = NewBreadStruct()
global_options = {}
unnamed_fields = 0
for spec_line in spec:
if type(spec_line) == dict:
# A dictionary in the spec indicates global options for parsing
global_options = spec_line
elif isinstance(spec_line, types.FunctionType) or len(spec_line) == 1:
# This part of the spec doesn't have a name; evaluate the function
# to get the field object and then give that object a fake name.
# Spec lines of length 1 are assumed to be functions.
if isinstance(spec_line, types.FunctionType):
field = spec_line
else:
field = spec_line[0]
# Don't give the field a name
struct._add_field(
field(struct, **global_options), '_unnamed_%d' %
(unnamed_fields))
unnamed_fields += 1
elif spec_line[0] == CONDITIONAL:
predicate_field_name, conditions = spec_line[1:]
field = BreadConditional.from_spec(spec_line, struct)
struct._add_field(
field, '_conditional_on_%s_%d' %
(predicate_field_name, unnamed_fields))
unnamed_fields += 1
else:
field_name = spec_line[0]
field = spec_line[1]
options = global_options
# Options for this field, if any, override the global options
if len(spec_line) == 3:
options = global_options.copy()
options.update(spec_line[2])
if type(field) == list:
struct._add_field(build_struct(field), field_name)
else:
struct._add_field(field(struct, **options), field_name)
return struct
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import functools
from trac.core import Component
from trac.util.concurrency import ThreadLocal, threading
__all__ = ['CacheManager', 'cached']
_id_to_key = {}
def key_to_id(s):
"""Return a hash of the given property key."""
# This is almost the same algorithm as Python's string hash,
# except we only keep a 31-bit result.
result = ord(s[0]) << 7 if s else 0
for c in s:
result = ((1000003 * result) & 0x7fffffff) ^ ord(c)
result ^= len(s)
_id_to_key[result] = s
return result
class CachedPropertyBase(property):
"""Base class for cached property descriptors.
:since 1.0.2: inherits from `property`.
"""
def __init__(self, retriever):
self.retriever = retriever
functools.update_wrapper(self, retriever)
def make_key(self, cls):
attr = self.retriever.__name__
for base in cls.mro():
if base.__dict__.get(attr) is self:
cls = base
break
return '%s.%s.%s' % (cls.__module__, cls.__name__, attr)
class CachedSingletonProperty(CachedPropertyBase):
"""Cached property descriptor for classes behaving as singletons
in the scope of one `~trac.env.Environment` instance.
This means there will be no more than one cache to monitor in the
database for this kind of cache. Therefore, using only "static"
information for the key is enough. For the same reason it is also
safe to store the corresponding id as a property of the descriptor
instance.
"""
def __get__(self, instance, owner):
if instance is None:
return self
try:
id = self.id
except AttributeError:
id = self.id = key_to_id(self.make_key(owner))
return CacheManager(instance.env).get(id, self.retriever, instance)
def __delete__(self, instance):
try:
id = self.id
except AttributeError:
id = self.id = key_to_id(self.make_key(instance.__class__))
CacheManager(instance.env).invalidate(id)
class CachedProperty(CachedPropertyBase):
"""Cached property descriptor for classes having potentially
multiple instances associated to a single `~trac.env.Environment`
instance.
As we'll have potentially many different caches to monitor for this
kind of cache, the key needs to be augmented by a string unique to
each instance of the owner class. As the resulting id will be
different for each instance of the owner class, we can't store it
as a property of the descriptor class, so we store it back in the
attribute used for augmenting the key (``key_attr``).
"""
def __init__(self, retriever, key_attr):
super(CachedProperty, self).__init__(retriever)
self.key_attr = key_attr
def __get__(self, instance, owner):
if instance is None:
return self
id = getattr(instance, self.key_attr)
if isinstance(id, str):
id = key_to_id(self.make_key(owner) + ':' + id)
setattr(instance, self.key_attr, id)
return CacheManager(instance.env).get(id, self.retriever, instance)
def __delete__(self, instance):
id = getattr(instance, self.key_attr)
if isinstance(id, str):
id = key_to_id(self.make_key(instance.__class__) + ':' + id)
setattr(instance, self.key_attr, id)
CacheManager(instance.env).invalidate(id)
def cached(fn_or_attr=None):
"""Method decorator creating a cached attribute from a data
retrieval method.
Accessing the cached attribute gives back the cached value. The
data retrieval method is transparently called by the
`CacheManager` on first use after the program start or after the
cache has been invalidated. Invalidating the cache for this value
is done by ``del``\ eting the attribute.
Note that the cache validity is maintained using the `cache` table
in the database. Cache invalidation is performed within a
transaction block, and can be nested within another transaction
block.
When the decorator is used in a class for which instances behave
as singletons within the scope of a given `~trac.env.Environment`
(typically `~trac.core.Component` classes), the key used to
identify the attribute in the database is constructed from the
names of the containing module, class and retriever method::
class WikiSystem(Component):
@cached
def pages(self):
return set(name for name, in self.env.db_query(
"SELECT DISTINCT name FROM wiki"))
Otherwise, when the decorator is used in non-"singleton" objects,
a string specifying the name of an attribute containing a string
unique to the instance must be passed to the decorator. This value
will be appended to the key constructed from module, class and
method name::
class SomeClass(object):
def __init__(self, env, name):
self.env = env
self.name = name
self._metadata_id = name
@cached('_metadata_id')
def metadata(self):
...
Note that in this case the key attribute is overwritten with a
hash of the key on first access, so it should not be used for any
other purpose.
In either case, this decorator requires that the object on which
it is used has an ``env`` attribute containing the application
`~trac.env.Environment`.
.. versionchanged:: 1.0
The data retrieval method used to be called with a single
argument ``db`` containing a reference to a database
connection. This is the same connection that can be retrieved
via the normal `~trac.env.Environment.db_query` or
`~trac.env.Environment.db_transaction`, so this is no longer
needed and is not supported.
"""
if hasattr(fn_or_attr, '__call__'):
return CachedSingletonProperty(fn_or_attr)
def decorator(fn):
return CachedProperty(fn, fn_or_attr)
return decorator
class CacheManager(Component):
"""Cache manager."""
required = True
def __init__(self):
self._cache = {}
self._local = ThreadLocal(meta=None, cache=None)
self._lock = threading.RLock()
# Public interface
def reset_metadata(self):
"""Reset per-request cache metadata."""
self._local.meta = self._local.cache = None
def get(self, id, retriever, instance):
"""Get cached or fresh data for the given id."""
# Get cache metadata
local_meta = self._local.meta
local_cache = self._local.cache
if local_meta is None:
# First cache usage in this request, retrieve cache metadata
# from the database and make a thread-local copy of the cache
meta = self.env.db_query("SELECT id, generation FROM cache")
self._local.meta = local_meta = dict(meta)
self._local.cache = local_cache = self._cache.copy()
db_generation = local_meta.get(id, -1)
# Try the thread-local copy first
try:
data, generation = local_cache[id]
if generation == db_generation:
return data
except KeyError:
pass
with self.env.db_query as db:
with self._lock:
# Get data from the process cache
try:
data, generation = local_cache[id] = self._cache[id]
if generation == db_generation:
return data
except KeyError:
generation = None # Force retrieval from the database
# Check if the process cache has the newest version, as it may
# have been updated after the metadata retrieval
for db_generation, in db(
"SELECT generation FROM cache WHERE id=%s", (id,)):
break
else:
db_generation = -1
if db_generation == generation:
return data
# Retrieve data from the database
data = retriever(instance)
local_cache[id] = self._cache[id] = data, db_generation
local_meta[id] = db_generation
return data
def invalidate(self, id):
"""Invalidate cached data for the given id."""
with self.env.db_transaction as db:
with self._lock:
# Invalidate in other processes
# The row corresponding to the cache may not exist in the table
# yet.
# - If the row exists, the UPDATE increments the generation,
# the SELECT returns a row and we're done.
# - If the row doesn't exist, the UPDATE does nothing, but
# starts a transaction. The SELECT then returns nothing,
# and we can safely INSERT a new row.
db("UPDATE cache SET generation=generation+1 WHERE id=%s",
(id,))
if not db("SELECT generation FROM cache WHERE id=%s", (id,)):
db("INSERT INTO cache VALUES (%s, %s, %s)",
(id, 0, _id_to_key.get(id, '<unknown>')))
# Invalidate in this process
self._cache.pop(id, None)
# Invalidate in this thread
try:
del self._local.cache[id]
except (KeyError, TypeError):
pass
| |
import datetime
from urlparse import urlparse
from utils import log as logging
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.http import condition
from django.http import HttpResponseForbidden, HttpResponseRedirect, HttpResponse, Http404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
# from django.db import IntegrityError
from apps.rss_feeds.models import Feed, merge_feeds
from apps.rss_feeds.models import MFetchHistory
from apps.rss_feeds.models import MFeedIcon
from apps.push.models import PushSubscription
from apps.analyzer.models import get_classifiers_for_user
from apps.reader.models import UserSubscription
from apps.rss_feeds.models import MStory
from utils.user_functions import ajax_login_required
from utils import json_functions as json, feedfinder2 as feedfinder
from utils.feed_functions import relative_timeuntil, relative_timesince
from utils.user_functions import get_user
from utils.view_functions import get_argument_or_404
from utils.view_functions import required_params
from utils.view_functions import is_true
from vendor.timezones.utilities import localtime_for_timezone
from utils.ratelimit import ratelimit
IGNORE_AUTOCOMPLETE = [
"facebook.com/feeds/notifications.php",
"inbox",
"secret",
"password",
"latitude",
]
@ajax_login_required
@json.json_view
def search_feed(request):
address = request.REQUEST.get('address')
offset = int(request.REQUEST.get('offset', 0))
if not address:
return dict(code=-1, message="Please provide a URL/address.")
logging.user(request.user, "~FBFinding feed (search_feed): %s" % address)
ip = request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META['REMOTE_ADDR']
logging.user(request.user, "~FBIP: %s" % ip)
aggressive = request.user.is_authenticated()
feed = Feed.get_feed_from_url(address, create=False, aggressive=aggressive, offset=offset)
if feed:
return feed.canonical()
else:
return dict(code=-1, message="No feed found matching that XML or website address.")
@json.json_view
def load_single_feed(request, feed_id):
user = get_user(request)
feed = get_object_or_404(Feed, pk=feed_id)
classifiers = get_classifiers_for_user(user, feed_id=feed.pk)
payload = feed.canonical(full=True)
payload['classifiers'] = classifiers
return payload
def feed_favicon_etag(request, feed_id):
try:
feed_icon = MFeedIcon.objects.get(feed_id=feed_id)
except MFeedIcon.DoesNotExist:
return
return feed_icon.color
@condition(etag_func=feed_favicon_etag)
def load_feed_favicon(request, feed_id):
not_found = False
try:
feed_icon = MFeedIcon.objects.get(feed_id=feed_id)
except MFeedIcon.DoesNotExist:
not_found = True
if not_found or not feed_icon.data:
return HttpResponseRedirect(settings.MEDIA_URL + 'img/icons/circular/world.png')
icon_data = feed_icon.data.decode('base64')
return HttpResponse(icon_data, content_type='image/png')
@json.json_view
def feed_autocomplete(request):
query = request.GET.get('term') or request.GET.get('query')
version = int(request.GET.get('v', 1))
format = request.GET.get('format', 'autocomplete')
# user = get_user(request)
# if True or not user.profile.is_premium:
# return dict(code=-1, message="Overloaded, no autocomplete results.", feeds=[], term=query)
if not query:
return dict(code=-1, message="Specify a search 'term'.", feeds=[], term=query)
if '.' in query:
try:
parts = urlparse(query)
if not parts.hostname and not query.startswith('http'):
parts = urlparse('http://%s' % query)
if parts.hostname:
query = [parts.hostname]
query.extend([p for p in parts.path.split('/') if p])
query = ' '.join(query)
except:
logging.user(request, "~FGAdd search, could not parse url in ~FR%s" % query)
query_params = query.split(' ')
tries_left = 5
while len(query_params) and tries_left:
tries_left -= 1
feed_ids = Feed.autocomplete(' '.join(query_params))
if feed_ids:
break
else:
query_params = query_params[:-1]
feeds = list(set([Feed.get_by_id(feed_id) for feed_id in feed_ids]))
feeds = [feed for feed in feeds if feed and not feed.branch_from_feed]
feeds = [feed for feed in feeds if all([x not in feed.feed_address for x in IGNORE_AUTOCOMPLETE])]
if format == 'autocomplete':
feeds = [{
'id': feed.pk,
'value': feed.feed_address,
'label': feed.feed_title,
'tagline': feed.data and feed.data.feed_tagline,
'num_subscribers': feed.num_subscribers,
} for feed in feeds]
else:
feeds = [feed.canonical(full=True) for feed in feeds]
feeds = sorted(feeds, key=lambda f: -1 * f['num_subscribers'])
feed_ids = [f['id'] for f in feeds]
feed_icons = dict((icon.feed_id, icon) for icon in MFeedIcon.objects.filter(feed_id__in=feed_ids))
for feed in feeds:
if feed['id'] in feed_icons:
feed_icon = feed_icons[feed['id']]
if feed_icon.data:
feed['favicon_color'] = feed_icon.color
feed['favicon'] = feed_icon.data
logging.user(request, "~FGAdd Search: ~SB%s ~SN(%s matches)" % (query, len(feeds),))
if version > 1:
return {
'feeds': feeds,
'term': query,
}
else:
return feeds
@ratelimit(minutes=1, requests=30)
@json.json_view
def load_feed_statistics(request, feed_id):
user = get_user(request)
timezone = user.profile.timezone
stats = dict()
feed = get_object_or_404(Feed, pk=feed_id)
feed.update_all_statistics()
feed.set_next_scheduled_update(verbose=True, skip_scheduling=True)
feed.save_feed_story_history_statistics()
feed.save_classifier_counts()
# Dates of last and next update
stats['active'] = feed.active
stats['last_update'] = relative_timesince(feed.last_update)
stats['next_update'] = relative_timeuntil(feed.next_scheduled_update)
stats['push'] = feed.is_push
if feed.is_push:
try:
stats['push_expires'] = localtime_for_timezone(feed.push.lease_expires,
timezone).strftime("%Y-%m-%d %H:%M:%S")
except PushSubscription.DoesNotExist:
stats['push_expires'] = 'Missing push'
feed.is_push = False
feed.save()
# Minutes between updates
update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False)
stats['update_interval_minutes'] = update_interval_minutes
original_active_premium_subscribers = feed.active_premium_subscribers
original_premium_subscribers = feed.premium_subscribers
feed.active_premium_subscribers = max(feed.active_premium_subscribers+1, 1)
feed.premium_subscribers += 1
premium_update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False,
premium_speed=True)
feed.active_premium_subscribers = original_active_premium_subscribers
feed.premium_subscribers = original_premium_subscribers
stats['premium_update_interval_minutes'] = premium_update_interval_minutes
stats['errors_since_good'] = feed.errors_since_good
# Stories per month - average and month-by-month breakout
average_stories_per_month, story_count_history = feed.average_stories_per_month, feed.data.story_count_history
stats['average_stories_per_month'] = average_stories_per_month
story_count_history = story_count_history and json.decode(story_count_history)
if story_count_history and isinstance(story_count_history, dict):
stats['story_count_history'] = story_count_history['months']
stats['story_days_history'] = story_count_history['days']
stats['story_hours_history'] = story_count_history['hours']
else:
stats['story_count_history'] = story_count_history
# Rotate hours to match user's timezone offset
localoffset = timezone.utcoffset(datetime.datetime.utcnow())
hours_offset = int(localoffset.total_seconds() / 3600)
rotated_hours = {}
for hour, value in stats['story_hours_history'].items():
rotated_hours[str(int(hour)+hours_offset)] = value
stats['story_hours_history'] = rotated_hours
# Subscribers
stats['subscriber_count'] = feed.num_subscribers
stats['num_subscribers'] = feed.num_subscribers
stats['stories_last_month'] = feed.stories_last_month
stats['last_load_time'] = feed.last_load_time
stats['premium_subscribers'] = feed.premium_subscribers
stats['active_subscribers'] = feed.active_subscribers
stats['active_premium_subscribers'] = feed.active_premium_subscribers
# Classifier counts
stats['classifier_counts'] = json.decode(feed.data.feed_classifier_counts)
# Fetch histories
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
stats['feed_fetch_history'] = fetch_history['feed_fetch_history']
stats['page_fetch_history'] = fetch_history['page_fetch_history']
stats['feed_push_history'] = fetch_history['push_history']
logging.user(request, "~FBStatistics: ~SB%s" % (feed))
return stats
@json.json_view
def load_feed_settings(request, feed_id):
stats = dict()
feed = get_object_or_404(Feed, pk=feed_id)
user = get_user(request)
timezone = user.profile.timezone
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
stats['feed_fetch_history'] = fetch_history['feed_fetch_history']
stats['page_fetch_history'] = fetch_history['page_fetch_history']
stats['feed_push_history'] = fetch_history['push_history']
stats['duplicate_addresses'] = feed.duplicate_addresses.all()
return stats
@ratelimit(minutes=10, requests=10)
@json.json_view
def exception_retry(request):
user = get_user(request)
feed_id = get_argument_or_404(request, 'feed_id')
reset_fetch = json.decode(request.POST['reset_fetch'])
feed = Feed.get_by_id(feed_id)
original_feed = feed
if not feed:
raise Http404
feed.schedule_feed_fetch_immediately()
changed = False
if feed.has_page_exception:
changed = True
feed.has_page_exception = False
if feed.has_feed_exception:
changed = True
feed.has_feed_exception = False
if not feed.active:
changed = True
feed.active = True
if changed:
feed.save(update_fields=['has_page_exception', 'has_feed_exception', 'active'])
original_fetched_once = feed.fetched_once
if reset_fetch:
logging.user(request, "~FRRefreshing exception feed: ~SB%s" % (feed))
feed.fetched_once = False
else:
logging.user(request, "~FRForcing refreshing feed: ~SB%s" % (feed))
feed.fetched_once = True
if feed.fetched_once != original_fetched_once:
feed.save(update_fields=['fetched_once'])
feed = feed.update(force=True, compute_scores=False, verbose=True)
feed = Feed.get_by_id(feed.pk)
try:
usersub = UserSubscription.objects.get(user=user, feed=feed)
except UserSubscription.DoesNotExist:
usersubs = UserSubscription.objects.filter(user=user, feed=original_feed)
if usersubs:
usersub = usersubs[0]
usersub.switch_feed(feed, original_feed)
else:
return {'code': -1}
usersub.calculate_feed_scores(silent=False)
feeds = {feed.pk: usersub and usersub.canonical(full=True), feed_id: usersub.canonical(full=True)}
return {'code': 1, 'feeds': feeds}
@ajax_login_required
@json.json_view
def exception_change_feed_address(request):
feed_id = request.POST['feed_id']
feed = get_object_or_404(Feed, pk=feed_id)
original_feed = feed
feed_address = request.POST['feed_address']
timezone = request.user.profile.timezone
code = -1
if (feed.has_page_exception or feed.has_feed_exception):
# Fix broken feed
logging.user(request, "~FRFixing feed exception by address: ~SB%s~SN to ~SB%s" % (feed.feed_address, feed_address))
feed.has_feed_exception = False
feed.active = True
feed.fetched_once = False
feed.feed_address = feed_address
duplicate_feed = feed.schedule_feed_fetch_immediately()
code = 1
if duplicate_feed:
new_feed = Feed.objects.get(pk=duplicate_feed.pk)
feed = new_feed
new_feed.schedule_feed_fetch_immediately()
new_feed.has_feed_exception = False
new_feed.active = True
new_feed = new_feed.save()
if new_feed.pk != feed.pk:
merge_feeds(new_feed.pk, feed.pk)
else:
# Branch good feed
logging.user(request, "~FRBranching feed by address: ~SB%s~SN to ~SB%s" % (feed.feed_address, feed_address))
try:
feed = Feed.objects.get(hash_address_and_link=Feed.generate_hash_address_and_link(feed_address, feed.feed_link))
except Feed.DoesNotExist:
feed = Feed.objects.create(feed_address=feed_address, feed_link=feed.feed_link)
code = 1
if feed.pk != original_feed.pk:
try:
feed.branch_from_feed = original_feed.branch_from_feed or original_feed
except Feed.DoesNotExist:
feed.branch_from_feed = original_feed
feed.feed_address_locked = True
feed = feed.save()
feed = feed.update()
feed = Feed.get_by_id(feed.pk)
try:
usersub = UserSubscription.objects.get(user=request.user, feed=feed)
except UserSubscription.DoesNotExist:
usersubs = UserSubscription.objects.filter(user=request.user, feed=original_feed)
if usersubs:
usersub = usersubs[0]
usersub.switch_feed(feed, original_feed)
else:
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': -1,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
usersub.calculate_feed_scores(silent=False)
feed.update_all_statistics()
classifiers = get_classifiers_for_user(usersub.user, feed_id=usersub.feed_id)
feeds = {
original_feed.pk: usersub and usersub.canonical(full=True, classifiers=classifiers),
}
if feed and feed.has_feed_exception:
code = -1
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': code,
'feeds': feeds,
'new_feed_id': usersub.feed_id,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
@ajax_login_required
@json.json_view
def exception_change_feed_link(request):
feed_id = request.POST['feed_id']
feed = get_object_or_404(Feed, pk=feed_id)
original_feed = feed
feed_link = request.POST['feed_link']
timezone = request.user.profile.timezone
code = -1
if (feed.has_page_exception or feed.has_feed_exception):
# Fix broken feed
logging.user(request, "~FRFixing feed exception by link: ~SB%s~SN to ~SB%s" % (feed.feed_link, feed_link))
found_feed_urls = feedfinder.find_feeds(feed_link)
if len(found_feed_urls):
code = 1
feed.has_page_exception = False
feed.active = True
feed.fetched_once = False
feed.feed_link = feed_link
feed.feed_address = found_feed_urls[0]
duplicate_feed = feed.schedule_feed_fetch_immediately()
if duplicate_feed:
new_feed = Feed.objects.get(pk=duplicate_feed.pk)
feed = new_feed
new_feed.schedule_feed_fetch_immediately()
new_feed.has_page_exception = False
new_feed.active = True
new_feed.save()
else:
# Branch good feed
logging.user(request, "~FRBranching feed by link: ~SB%s~SN to ~SB%s" % (feed.feed_link, feed_link))
try:
feed = Feed.objects.get(hash_address_and_link=Feed.generate_hash_address_and_link(feed.feed_address, feed_link))
except Feed.DoesNotExist:
feed = Feed.objects.create(feed_address=feed.feed_address, feed_link=feed_link)
code = 1
if feed.pk != original_feed.pk:
try:
feed.branch_from_feed = original_feed.branch_from_feed or original_feed
except Feed.DoesNotExist:
feed.branch_from_feed = original_feed
feed.feed_link_locked = True
feed.save()
feed = feed.update()
feed = Feed.get_by_id(feed.pk)
try:
usersub = UserSubscription.objects.get(user=request.user, feed=feed)
except UserSubscription.DoesNotExist:
usersubs = UserSubscription.objects.filter(user=request.user, feed=original_feed)
if usersubs:
usersub = usersubs[0]
usersub.switch_feed(feed, original_feed)
else:
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': -1,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
usersub.calculate_feed_scores(silent=False)
feed.update_all_statistics()
classifiers = get_classifiers_for_user(usersub.user, feed_id=usersub.feed_id)
if feed and feed.has_feed_exception:
code = -1
feeds = {
original_feed.pk: usersub.canonical(full=True, classifiers=classifiers),
}
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': code,
'feeds': feeds,
'new_feed_id': usersub.feed_id,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
@login_required
def status(request):
if not request.user.is_staff:
logging.user(request, "~SKNON-STAFF VIEWING RSS FEEDS STATUS!")
assert False
return HttpResponseForbidden()
minutes = int(request.GET.get('minutes', 1))
now = datetime.datetime.now()
hour_ago = now - datetime.timedelta(minutes=minutes)
feeds = Feed.objects.filter(last_update__gte=hour_ago).order_by('-last_update')
return render_to_response('rss_feeds/status.xhtml', {
'feeds': feeds
}, context_instance=RequestContext(request))
@required_params('story_id', feed_id=int)
@json.json_view
def original_text(request):
story_id = request.REQUEST.get('story_id')
feed_id = request.REQUEST.get('feed_id')
story_hash = request.REQUEST.get('story_hash', None)
force = request.REQUEST.get('force', False)
debug = request.REQUEST.get('debug', False)
if story_hash:
story, _ = MStory.find_story(story_hash=story_hash)
else:
story, _ = MStory.find_story(story_id=story_id, story_feed_id=feed_id)
if not story:
logging.user(request, "~FYFetching ~FGoriginal~FY story text: ~FRstory not found")
return {'code': -1, 'message': 'Story not found.', 'original_text': None, 'failed': True}
original_text = story.fetch_original_text(force=force, request=request, debug=debug)
return {
'feed_id': feed_id,
'story_id': story_id,
'original_text': original_text,
'failed': not original_text or len(original_text) < 100,
}
@required_params('story_hash')
def original_story(request):
story_hash = request.REQUEST.get('story_hash')
force = request.REQUEST.get('force', False)
debug = request.REQUEST.get('debug', False)
story, _ = MStory.find_story(story_hash=story_hash)
if not story:
logging.user(request, "~FYFetching ~FGoriginal~FY story page: ~FRstory not found")
return {'code': -1, 'message': 'Story not found.', 'original_page': None, 'failed': True}
original_page = story.fetch_original_page(force=force, request=request, debug=debug)
return HttpResponse(original_page or "")
@required_params('story_hash')
@json.json_view
def story_changes(request):
story_hash = request.REQUEST.get('story_hash', None)
show_changes = is_true(request.REQUEST.get('show_changes', True))
story, _ = MStory.find_story(story_hash=story_hash)
if not story:
logging.user(request, "~FYFetching ~FGoriginal~FY story page: ~FRstory not found")
return {'code': -1, 'message': 'Story not found.', 'original_page': None, 'failed': True}
return {
'story': Feed.format_story(story, show_changes=show_changes)
}
| |
# The MIT License (MIT)
#
# Copyright (c) 2014 Zulko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# File from the MoviePy project - released under licence MIT
# See https://github.com/Zulko/moviepy
"""
This module implements all the functions to read a video or a picture
using ffmpeg. It is quite ugly, as there are many pitfalls to avoid
"""
from __future__ import division
import subprocess as sp
import re
import warnings
import logging
logging.captureWarnings(True)
import numpy as np
from . ffmpeg_config import get_setting # ffmpeg, ffmpeg.exe, etc...
from . ffmpeg_tools import cvsecs
import os
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
class FFMPEG_VideoReader:
def __init__(self, filename, print_infos=False, bufsize = None,
pix_fmt="rgb24", check_duration=True):
self.filename = filename
infos = ffmpeg_parse_infos(filename, print_infos, check_duration)
self.fps = infos['video_fps']
self.size = infos['video_size']
self.duration = infos['video_duration']
self.ffmpeg_duration = infos['duration']
self.nframes = infos['video_nframes']
self.infos = infos
self.pix_fmt = pix_fmt
if pix_fmt == 'rgba':
self.depth = 4
else:
self.depth = 3
if bufsize is None:
w, h = self.size
bufsize = self.depth * w * h + 100
self.bufsize= bufsize
self.initialize()
self.pos = 1
self.lastread = self.read_frame()
def initialize(self, starttime=0):
"""Opens the file, creates the pipe. """
self.close() # if any
if starttime != 0 :
offset = min(1, starttime)
i_arg = ['-ss', "%.06f" % (starttime - offset),
'-i', self.filename,
'-ss', "%.06f" % offset]
else:
i_arg = [ '-i', self.filename]
cmd = ([get_setting("FFMPEG_BINARY")]+ i_arg +
['-loglevel', 'error',
'-f', 'image2pipe',
"-pix_fmt", self.pix_fmt,
'-vcodec', 'rawvideo', '-'])
popen_params = {"bufsize": self.bufsize,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
self.proc = sp.Popen(cmd, **popen_params)
def skip_frames(self, n=1):
"""Reads and throws away n frames """
w, h = self.size
for i in range(n):
self.proc.stdout.read(self.depth*w*h)
#self.proc.stdout.flush()
self.pos += n
def read_frame(self):
w, h = self.size
nbytes= self.depth*w*h
s = self.proc.stdout.read(nbytes)
if len(s) != nbytes:
warnings.warn("Warning: in file %s, "%(self.filename)+
"%d bytes wanted but %d bytes read,"%(nbytes, len(s))+
"at frame %d/%d, at time %.02f/%.02f sec. "%(
self.pos,self.nframes,
1.0*self.pos/self.fps,
self.duration)+
"Using the last valid frame instead.",
UserWarning)
if not hasattr(self, 'lastread'):
raise IOError(("MoviePy error: failed to read the first frame of "
"video file %s. That might mean that the file is "
"corrupted. That may also mean that you are using "
"a deprecated version of FFMPEG. On Ubuntu/Debian "
"for instance the version in the repos is deprecated. "
"Please update to a recent version from the website.")%(
self.filename))
result = self.lastread
else:
result = np.fromstring(s, dtype='uint8')
result.shape =(h, w, len(s)//(w*h)) # reshape((h, w, len(s)//(w*h)))
self.lastread = result
return result
def get_frame(self, t):
""" Read a file video frame at time t.
Note for coders: getting an arbitrary frame in the video with
ffmpeg can be painfully slow if some decoding has to be done.
This function tries to avoid fectching arbitrary frames
whenever possible, by moving between adjacent frames.
"""
# these definitely need to be rechecked sometime. Seems to work.
# I use that horrible '+0.00001' hack because sometimes due to numerical
# imprecisions a 3.0 can become a 2.99999999... which makes the int()
# go to the previous integer. This makes the fetching more robust in the
# case where you get the nth frame by writing get_frame(n/fps).
pos = int(self.fps*t + 0.00001)+1
if pos == self.pos:
return self.lastread
else:
if(pos < self.pos) or (pos > self.pos+100):
self.initialize(t)
self.pos = pos
else:
self.skip_frames(pos-self.pos-1)
result = self.read_frame()
self.pos = pos
return result
def close(self):
if hasattr(self,'proc'):
self.proc.terminate()
self.proc.stdout.close()
self.proc.stderr.close()
del self.proc
def __del__(self):
self.close()
if hasattr(self,'lastread'):
del self.lastread
def ffmpeg_read_image(filename, with_mask=True):
""" Read an image file (PNG, BMP, JPEG...).
Wraps FFMPEG_Videoreader to read just one image.
Returns an ImageClip.
This function is not meant to be used directly in MoviePy,
use ImageClip instead to make clips out of image files.
Parameters
-----------
filename
Name of the image file. Can be of any format supported by ffmpeg.
with_mask
If the image has a transparency layer, ``with_mask=true`` will save
this layer as the mask of the returned ImageClip
"""
if with_mask:
pix_fmt = 'rgba'
else:
pix_fmt = "rgb24"
reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, check_duration=False)
im = reader.lastread
del reader
return im
def ffmpeg_parse_infos(filename, print_infos=False, check_duration=True):
"""Get file infos using ffmpeg.
Returns a dictionnary with the fields:
"video_found", "video_fps", "duration", "video_nframes",
"video_duration", "audio_found", "audio_fps"
"video_duration" is slightly smaller than "duration" to avoid
fetching the uncomplete frames at the end, which raises an error.
"""
# open the file in a pipe, provoke an error, read output
is_GIF = filename.endswith('.gif')
cmd = [get_setting("FFMPEG_BINARY"), "-i", filename]
if is_GIF:
cmd += ["-f", "null", "/dev/null"]
popen_params = {"bufsize": 10**5,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
proc = sp.Popen(cmd, **popen_params)
proc.stdout.readline()
proc.terminate()
infos = proc.stderr.read().decode('utf8')
del proc
if print_infos:
# print the whole info text returned by FFMPEG
print( infos )
lines = infos.splitlines()
if "No such file or directory" in lines[-1]:
raise IOError(("MoviePy error: the file %s could not be found !\n"
"Please check that you entered the correct "
"path.")%filename)
result = dict()
# get duration (in seconds)
result['duration'] = None
if check_duration:
try:
keyword = ('frame=' if is_GIF else 'Duration: ')
line = [l for l in lines if keyword in l][0]
match = re.findall("([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])", line)[0]
result['duration'] = cvsecs(match)
except:
raise IOError(("MoviePy error: failed to read the duration of file %s.\n"
"Here are the file infos returned by ffmpeg:\n\n%s")%(
filename, infos))
# get the output line that speaks about video
lines_video = [l for l in lines if ' Video: ' in l and re.search('\d+x\d+', l)]
result['video_found'] = ( lines_video != [] )
if result['video_found']:
try:
line = lines_video[0]
# get the size, of the form 460x320 (w x h)
match = re.search(" [0-9]*x[0-9]*(,| )", line)
s = list(map(int, line[match.start():match.end()-1].split('x')))
result['video_size'] = s
except:
raise IOError(("MoviePy error: failed to read video dimensions in file %s.\n"
"Here are the file infos returned by ffmpeg:\n\n%s")%(
filename, infos))
# get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
# tbc, and sometimes tbc/2...
# Current policy: Trust tbr first, then fps. If result is near from x*1000/1001
# where x is 23,24,25,50, replace by x*1000/1001 (very common case for the fps).
try:
match = re.search("( [0-9]*.| )[0-9]* tbr", line)
tbr = float(line[match.start():match.end()].split(' ')[1])
result['video_fps'] = tbr
except:
match = re.search("( [0-9]*.| )[0-9]* fps", line)
result['video_fps'] = float(line[match.start():match.end()].split(' ')[1])
# It is known that a fps of 24 is often written as 24000/1001
# but then ffmpeg nicely rounds it to 23.98, which we hate.
coef = 1000.0/1001.0
fps = result['video_fps']
for x in [23,24,25,30,50]:
if (fps!=x) and abs(fps - x*coef) < .01:
result['video_fps'] = x*coef
if check_duration:
result['video_nframes'] = int(result['duration']*result['video_fps'])+1
result['video_duration'] = result['duration']
else:
result['video_nframes'] = 1
result['video_duration'] = None
# We could have also recomputed the duration from the number
# of frames, as follows:
# >>> result['video_duration'] = result['video_nframes'] / result['video_fps']
lines_audio = [l for l in lines if ' Audio: ' in l]
result['audio_found'] = lines_audio != []
if result['audio_found']:
line = lines_audio[0]
try:
match = re.search(" [0-9]* Hz", line)
result['audio_fps'] = int(line[match.start()+1:match.end()])
except:
result['audio_fps'] = 'unknown'
return result
| |
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
from mock import patch, MagicMock
try:
from IPython import __version__ as ipy_version
except:
ipy_version = '0.0'
from qtpy import QtCore
from glue.core.data import Data
from glue.core.component_link import ComponentLink
from glue.core.data_collection import DataCollection
from glue.core.tests.test_state import Cloner, containers_equal, doubler, clone
from glue.tests.helpers import requires_ipython
from glue.utils.qt import process_dialog
from glue.viewers.image.qt import ImageWidget
from glue.viewers.scatter.qt import ScatterWidget
from glue.viewers.histogram.qt import HistogramWidget
from ..application import GlueApplication
os.environ['GLUE_TESTING'] = 'True'
def tab_count(app):
return app.tab_bar.count()
class TestGlueApplication(object):
def setup_method(self, method):
self.app = GlueApplication()
self.app._create_terminal()
def teardown_method(self, method):
self.app.close()
def test_new_tabs(self):
t0 = tab_count(self.app)
self.app.new_tab()
assert tab_count(self.app) == t0 + 1
def test_save_session(self):
self.app.save_session = MagicMock()
with patch('qtpy.compat.getsavefilename') as fd:
fd.return_value = '/tmp/junk', 'jnk'
self.app._choose_save_session()
self.app.save_session.assert_called_once_with('/tmp/junk.glu', include_data=False)
def test_save_session_cancel(self):
"""shouldnt try to save file if no file name provided"""
self.app.save_session = MagicMock()
with patch('qtpy.compat.getsavefilename') as fd:
fd.return_value = '', 'jnk'
self.app._choose_save_session()
assert self.app.save_session.call_count == 0
def test_choose_save_session_ioerror(self):
"""should show box on ioerror"""
with patch('qtpy.compat.getsavefilename') as fd:
if sys.version_info[0] == 2:
mock_open = '__builtin__.open'
else:
mock_open = 'builtins.open'
with patch(mock_open) as op:
op.side_effect = IOError
fd.return_value = '/tmp/junk', '/tmp/junk'
with patch('glue.app.qt.application.QMessageBox') as mb:
self.app._choose_save_session()
assert mb.call_count == 1
@requires_ipython
def test_terminal_present(self):
"""For good setups, terminal is available"""
if not self.app.has_terminal():
sys.stderr.write(self.app._terminal_exception)
assert False
def app_without_terminal(self):
if not self.app.has_terminal():
return self.app
with patch('glue.app.qt.terminal.glue_terminal') as terminal:
terminal.side_effect = Exception("disabled")
app = GlueApplication()
app._create_terminal()
return app
def test_functional_without_terminal(self):
"""Can still create app without terminal"""
app = self.app_without_terminal()
def test_messagebox_on_disabled_terminal(self):
"""Clicking on the terminal toggle button raises messagebox on error"""
app = self.app_without_terminal()
with patch('glue.app.qt.application.QMessageBox') as qmb:
app._terminal_button.click()
assert qmb.call_count == 1
def is_terminal_importable(self):
try:
import glue.qt.widgets.glue_terminal
return True
except:
return False
@requires_ipython
def test_toggle_terminal(self):
term = MagicMock()
self.app._terminal = term
term.isVisible.return_value = False
self.app._terminal_button.click()
assert term.show.call_count == 1
term.isVisible.return_value = True
self.app._terminal_button.click()
assert term.hide.call_count == 1
def test_close_tab(self):
assert self.app.tab_widget.count() == 1
assert self.app.tab_bar.tabText(0) == 'Tab 1'
self.app.new_tab()
assert self.app.tab_widget.count() == 2
assert self.app.tab_bar.tabText(0) == 'Tab 1'
assert self.app.tab_bar.tabText(1) == 'Tab 2'
self.app.close_tab(0)
assert self.app.tab_widget.count() == 1
assert self.app.tab_bar.tabText(0) == 'Tab 2'
# do not delete last tab
self.app.close_tab(0)
assert self.app.tab_widget.count() == 1
# check that counter always goes up
self.app.new_tab()
assert self.app.tab_bar.tabText(0) == 'Tab 2'
assert self.app.tab_bar.tabText(1) == 'Tab 3'
def test_new_data_viewer_cancel(self):
with patch('glue.app.qt.application.pick_class') as pc:
pc.return_value = None
ct = len(self.app.current_tab.subWindowList())
self.app.choose_new_data_viewer()
assert len(self.app.current_tab.subWindowList()) == ct
def test_new_data_viewer(self):
with patch('glue.app.qt.application.pick_class') as pc:
pc.return_value = ScatterWidget
ct = len(self.app.current_tab.subWindowList())
self.app.choose_new_data_viewer()
assert len(self.app.current_tab.subWindowList()) == ct + 1
def test_move(self):
viewer = self.app.new_data_viewer(ScatterWidget)
viewer.move(10, 20)
assert viewer.position == (10, 20)
def test_resize(self):
viewer = self.app.new_data_viewer(ScatterWidget)
viewer.viewer_size = (100, 200)
assert viewer.viewer_size == (100, 200)
def test_new_data_defaults(self):
from glue.config import qt_client
with patch('glue.app.qt.application.pick_class') as pc:
pc.return_value = None
d2 = Data(x=np.array([[1, 2, 3], [4, 5, 6]]))
d1 = Data(x=np.array([1, 2, 3]))
self.app.choose_new_data_viewer(data=d1)
args, kwargs = pc.call_args
assert kwargs['default'] is ScatterWidget
self.app.choose_new_data_viewer(data=d2)
args, kwargs = pc.call_args
assert kwargs['default'] is ImageWidget
def test_drop_load_data(self):
m = QtCore.QMimeData()
m.setUrls([QtCore.QUrl('test.fits')])
e = MagicMock()
e.mimeData.return_value = m
load = MagicMock()
self.app.load_data = load
self.app.dropEvent(e)
assert load.call_count == 1
def test_subset_facet(self):
# regression test for 335
act = self.app._layer_widget._actions['facet']
self.app.data_collection.append(Data(x=[1, 2, 3]))
with patch('glue.dialogs.subset_facet.qt.SubsetFacet.exec_'):
act._do_action()
# FIXME: The following test fails and causes subsequent issues if run with
#
# pytest -s -v -x glue
#
# Need to investigate this, but for now, no solution other than skipping
# the test.
#
# def test_suggest_merge(self):
#
# x = Data(x=[1, 2, 3], label='x')
# y = Data(y=[4, 5, 6, 7], label='y')
# z = Data(z=[8, 9, 10], label='z')
#
# self.app.data_collection.append(x)
# self.app.data_collection.append(y)
#
# with process_dialog(delay=500, accept=True):
# result = self.app.add_datasets(self.app.data_collection, z)
#
# np.testing.assert_equal(self.app.data_collection[0]['x'], [1, 2, 3])
# np.testing.assert_equal(self.app.data_collection[0]['z'], [8, 9, 10])
# np.testing.assert_equal(self.app.data_collection[1]['y'], [4, 5, 6, 7])
def check_clone_app(app):
c = Cloner(app)
copy = c.us.object('__main__')
hub1 = app.session.hub
hub2 = copy.session.hub
assert len(hub1._subscriptions) == len(hub2._subscriptions)
# data collections are the same
for d1, d2 in zip(app.session.data_collection,
copy.session.data_collection):
assert d1.label == d2.label
for cid1, cid2 in zip(d1.components, d2.components):
assert cid1.label == cid2.label
# order of components unspecified if label collisions
cid2 = c.get(cid1)
np.testing.assert_array_almost_equal(d1[cid1, 0:1],
d2[cid2, 0:1], 3)
# same data viewers, in the same tabs
for tab1, tab2 in zip(app.viewers, copy.viewers):
assert len(tab1) == len(tab2)
for v1, v2 in zip(tab1, tab2):
assert type(v1) == type(v2)
# same window properties
assert v1.viewer_size == v2.viewer_size
assert v1.position == v2.position
# same viewer-level properties (axis label, scaling, etc)
assert set(v1.properties.keys()) == set(v2.properties.keys())
for k in v1.properties:
if hasattr(v1.properties[k], 'label'):
assert v1.properties[k].label == v2.properties[k].label
else:
assert v1.properties[k] == v2.properties[k] or \
containers_equal(v1.properties[k], v2.properties[k])
assert len(v1.layers) == len(v2.layers)
for l1, l2 in zip(v1.layers, v2.layers):
assert l1.layer.label == l2.layer.label # same data/subset
assert l1.layer.style == l2.layer.style
return copy
class TestApplicationSession(object):
def check_clone(self, app):
return check_clone_app(app)
def test_bare_application(self):
app = GlueApplication()
self.check_clone(app)
def test_data_application(self):
dc = DataCollection([Data(label='test',
x=[1, 2, 3], y=[2, 3, 4])])
app = GlueApplication(dc)
self.check_clone(app)
def test_links(self):
d1 = Data(label='x', x=[1, 2, 3])
d2 = Data(label='y', y=[3, 4, 8])
dc = DataCollection([d1, d2])
link = ComponentLink([d1.id['x']], d2.id['y'], doubler)
dc.add_link(link)
np.testing.assert_array_equal(d1['y'], [2, 4, 6])
app = GlueApplication(dc)
self.check_clone(app)
def test_scatter_viewer(self):
d = Data(label='x', x=[1, 2, 3, 4, 5], y=[2, 3, 4, 5, 6])
dc = DataCollection([d])
app = GlueApplication(dc)
w = app.new_data_viewer(ScatterWidget, data=d)
self.check_clone(app)
s1 = dc.new_subset_group()
s2 = dc.new_subset_group()
assert len(w.layers) == 3
l1, l2, l3 = w.layers
l1.zorder, l2.zorder = l2.zorder, l1.zorder
l3.visible = False
assert l3.visible is False
copy = self.check_clone(app)
assert copy.viewers[0][0].layers[-1].visible is False
def test_multi_tab(self):
d = Data(label='hist', x=[[1, 2], [2, 3]])
dc = DataCollection([d])
app = GlueApplication(dc)
w1 = app.new_data_viewer(HistogramWidget, data=d)
app.new_tab()
w2 = app.new_data_viewer(HistogramWidget, data=d)
assert app.viewers == ((w1,), (w2,))
self.check_clone(app)
def test_histogram(self):
d = Data(label='hist', x=[[1, 2], [2, 3]])
dc = DataCollection([d])
app = GlueApplication(dc)
w = app.new_data_viewer(HistogramWidget, data=d)
self.check_clone(app)
dc.new_subset_group()
assert len(w.layers) == 2
self.check_clone(app)
w.nbins = 7
self.check_clone(app)
def test_subset_groups_remain_synced_after_restore(self):
# regrssion test for 352
d = Data(label='hist', x=[[1, 2], [2, 3]])
dc = DataCollection([d])
dc.new_subset_group()
app = GlueApplication(dc)
app2 = clone(app)
sg = app2.data_collection.subset_groups[0]
assert sg.style.parent is sg
sg.style.color = '#112233'
assert sg.subsets[0].style.color == '#112233'
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the classes and utility functions for distance and
cartesian coordinates.
"""
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .angles import Angle
__all__ = ['Distance']
__doctest_requires__ = {'*': ['scipy.integrate']}
class Distance(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized in one of four ways:
* A distance ``value`` (array or float) and a ``unit``
* A `~astropy.units.Quantity` object
* A redshift and (optionally) a cosmology.
* Providing a distance modulus
Parameters
----------
value : scalar or `~astropy.units.Quantity`.
The value of this distance.
unit : `~astropy.units.UnitBase`
The units for this distance, *if* ``value`` is not a
`~astropy.units.Quantity`. Must have dimensions of distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : ``Cosmology`` or `None`
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle`
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible is some
cosmologies). Default: ``False``.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a distance.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``z`` is provided with a ``unit`` or ``cosmology`` is provided
when ``z`` is *not* given, or ``value`` is given as well as ``z``.
Examples
--------
>>> from astropy import units as u
>>> from astropy import cosmology
>>> from astropy.cosmology import WMAP5, WMAP7
>>> cosmology.set_current(WMAP7)
>>> d1 = Distance(10, u.Mpc)
>>> d2 = Distance(40, unit=u.au)
>>> d3 = Distance(value=5, unit=u.kpc)
>>> d4 = Distance(z=0.23)
>>> d5 = Distance(z=0.23, cosmology=WMAP5)
>>> d6 = Distance(distmod=24.47)
>>> d7 = Distance(Distance(10 * u.Mpc))
>>> d8 = Distance(parallax=21.34*u.mas)
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(cls, value=None, unit=None, z=None, cosmology=None,
distmod=None, parallax=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0, allow_negative=False):
if z is not None:
if value is not None or distmod is not None:
raise ValueError('Should given only one of `value`, `z` '
'or `distmod` in Distance constructor.')
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
else:
if cosmology is not None:
raise ValueError('A `cosmology` was given but `z` was not '
'provided in Distance constructor')
value_msg = ('Should given only one of `value`, `z`, `distmod`, or '
'`parallax` in Distance constructor.')
n_not_none = np.sum([x is not None
for x in [value, z, distmod, parallax]])
if n_not_none > 1:
raise ValueError(value_msg)
if distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the unit is not specified, guess based on the mean of
# the log of the distance
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
unit = u.Mpc
elif meanlogval > 3:
unit = u.kpc
elif meanlogval < -3: # ~200 AU
unit = u.AU
else:
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
elif parallax is not None:
value = parallax.to_value(u.pc, equivalencies=u.parallax())
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
if np.any(parallax < 0):
if allow_negative:
warnings.warn(
"Negative parallaxes are converted to NaN "
"distances even when `allow_negative=True`, "
"because negative parallaxes cannot be transformed "
"into distances. See discussion in this paper: "
"https://arxiv.org/abs/1507.02105", AstropyWarning)
else:
raise ValueError("Some parallaxes are negative, which "
"are notinterpretable as distances. "
"See the discussion in this paper: "
"https://arxiv.org/abs/1507.02105 . "
"If you want parallaxes to pass "
"through, with negative parallaxes "
"instead becoming NaN, use the "
"`allow_negative=True` argument.")
elif value is None:
raise ValueError('None of `value`, `z`, `distmod`, or '
'`parallax` were given to Distance '
'constructor')
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls, value, unit, dtype=dtype, copy=copy, order=order,
subok=subok, ndmin=ndmin)
if not allow_negative and np.any(distance.value < 0):
raise ValueError("Distance must be >= 0. Use the argument "
"'allow_negative=True' to allow negative values.")
return distance
@property
def z(self):
"""Short for ``self.compute_z()``"""
return self.compute_z()
def compute_z(self, cosmology=None):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : ``Cosmology`` or `None`
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
Returns
-------
z : float
The redshift of this distance given the provided ``cosmology``.
"""
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
from astropy.cosmology import z_at_value
return z_at_value(cosmology.luminosity_distance, self, ztol=1.e-10)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`"""
val = 5. * np.log10(self.to_value(u.pc)) - 5.
return u.Quantity(val, u.mag, copy=False)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object"""
return Angle(self.to(u.milliarcsecond, u.parallax()))
| |
import os
import time
import glob
from ctypes.util import find_library
from contextlib import suppress
import pytest
import astropy.units as u
from astropy.io import fits
import requests
from panoptes.pocs.camera.simulator.dslr import Camera as SimCamera
from panoptes.pocs.camera.simulator.ccd import Camera as SimSDKCamera
from panoptes.pocs.camera.sbig import Camera as SBIGCamera
from panoptes.pocs.camera.sbigudrv import INVALID_HANDLE_VALUE, SBIGDriver
from panoptes.pocs.camera.fli import Camera as FLICamera
from panoptes.pocs.camera.zwo import Camera as ZWOCamera
from panoptes.pocs.camera import AbstractCamera
from panoptes.pocs.focuser.simulator import Focuser
from panoptes.pocs.scheduler.field import Field
from panoptes.pocs.scheduler.observation.base import Observation
from panoptes.pocs.scheduler.observation.bias import BiasObservation
from panoptes.pocs.scheduler.observation.dark import DarkObservation
from panoptes.utils.error import NotFound
from panoptes.utils.images import fits as fits_utils
from panoptes.utils import error
from panoptes.utils.config.client import get_config
from panoptes.utils.config.client import set_config
from panoptes.pocs.camera import create_cameras_from_config
from panoptes.utils.serializers import to_json
from panoptes.utils.time import CountdownTimer
@pytest.fixture(scope='module', params=[
pytest.param([SimCamera, dict()]),
pytest.param([SimCamera, get_config('cameras.devices[0]')]),
pytest.param([SimCamera, get_config('cameras.devices[1]')]),
pytest.param([SimCamera, get_config('cameras.devices[2]')]),
pytest.param([SimSDKCamera, get_config('cameras.devices[3]')]),
pytest.param([SBIGCamera, 'sbig'], marks=[pytest.mark.with_camera]),
pytest.param([FLICamera, 'fli'], marks=[pytest.mark.with_camera]),
pytest.param([ZWOCamera, 'zwo'], marks=[pytest.mark.with_camera]),
], ids=[
'dslr',
'dslr.00',
'dslr.focuser.cooling.00',
'dslr.filterwheel.cooling.00',
'ccd.filterwheel.cooling.00',
'sbig',
'fli',
'zwo'
])
def camera(request) -> AbstractCamera:
CamClass = request.param[0]
cam_params = request.param[1]
camera = None
if isinstance(cam_params, dict):
# Simulator
camera = CamClass(**cam_params)
else:
# Lookup real hardware device name in real life config server.
for cam_config in get_config('cameras.devices'):
if cam_config['model'] == cam_params:
camera = CamClass(**cam_config)
break
camera.logger.log('testing', f'Camera created: {camera!r}')
# Wait for cooled camera
if camera.is_cooled_camera:
camera.logger.log('testing', f'Cooled camera needs to wait for cooling.')
assert not camera.is_temperature_stable
# Wait for cooling
cooling_timeout = CountdownTimer(60) # Should never have to wait this long.
while not camera.is_temperature_stable and not cooling_timeout.expired():
camera.logger.log('testing',
f'Still waiting for cooling: {cooling_timeout.time_left()}')
cooling_timeout.sleep(max_sleep=2)
assert camera.is_temperature_stable and cooling_timeout.expired() is False
assert camera.is_ready
camera.logger.debug(f'Yielding camera {camera}')
yield camera
# simulator_sdk needs this explicitly removed for some reason.
# SDK Camera class destructor *should* be doing this when the fixture goes out of scope.
with suppress(AttributeError):
type(camera)._assigned_cameras.discard(camera.uid)
@pytest.fixture(scope='module')
def counter(camera):
return {'value': 0}
@pytest.fixture(scope='module')
def patterns(camera, images_dir):
base_dir = os.path.join(images_dir, 'focus', camera.uid, '*')
patterns = {
'final': os.path.join(base_dir, ('*-final.' + camera.file_extension)),
'fine_plot': os.path.join(base_dir, 'fine-focus.png'),
'coarse_plot': os.path.join(base_dir, 'coarse-focus.png')
}
return patterns
def reset_conf(config_host, config_port):
url = f'http://{config_host}:{config_port}/reset-config'
response = requests.post(url,
data=to_json({'reset': True}),
headers={'Content-Type': 'application/json'}
)
assert response.ok
def test_create_cameras_from_config_no_autodetect(config_host, config_port):
set_config('cameras.auto_detect', False)
set_config('cameras.devices', [
dict(model='canon_gphoto2', port='/dev/fake01'),
dict(model='canon_gphoto2', port='/dev/fake02'),
])
with pytest.raises(error.CameraNotFound):
create_cameras_from_config()
reset_conf(config_host, config_port)
def test_create_cameras_from_config_autodetect(config_host, config_port):
set_config('cameras.defaults.auto_detect', True)
with pytest.raises(error.CameraNotFound):
create_cameras_from_config()
reset_conf(config_host, config_port)
# Hardware independent tests, mostly use simulator:
def test_sim_create_focuser():
sim_camera = SimCamera(focuser={'model': 'panoptes.pocs.focuser.simulator.Focuser',
'focus_port': '/dev/ttyFAKE'})
assert isinstance(sim_camera.focuser, Focuser)
def test_sim_passed_focuser():
sim_focuser = Focuser(port='/dev/ttyFAKE')
sim_camera = SimCamera(focuser=sim_focuser)
assert sim_camera.focuser is sim_focuser
def test_sim_bad_focuser():
with pytest.raises(NotFound):
SimCamera(focuser={'model': 'NOTAFOCUSER'})
def test_sim_worse_focuser():
with pytest.raises(NotFound):
sim_camera = SimCamera(focuser='NOTAFOCUSER')
def test_sim_string():
sim_camera = SimCamera()
assert str(sim_camera) == f'Simulated Camera ({sim_camera.uid})'
sim_camera = SimCamera(name='Sim', port='/dev/ttyFAKE')
assert str(sim_camera) == f'Sim ({sim_camera.uid}) port=/dev/ttyFAKE'
def test_sim_file_extension():
sim_camera = SimCamera()
assert sim_camera.file_extension == 'fits'
sim_camera = SimCamera(file_extension='FIT')
assert sim_camera.file_extension == 'FIT'
def test_sim_readout_time():
sim_camera = SimCamera()
assert sim_camera.readout_time == 1.0
sim_camera = SimCamera(readout_time=2.0)
assert sim_camera.readout_time == 2.0
def test_sdk_no_serial_number():
with pytest.raises(ValueError):
SimSDKCamera()
def test_sdk_already_in_use():
serial_number = get_config('cameras.devices[-1].serial_number')
sim_camera = SimSDKCamera(serial_number=serial_number)
assert sim_camera
with pytest.raises(error.PanError):
SimSDKCamera(serial_number=serial_number)
# Explicitly delete camera to clear `_assigned_cameras`.
del sim_camera
def test_sdk_camera_not_found():
with pytest.raises(error.InvalidConfig):
SimSDKCamera(serial_number='SSC404')
# Hardware independent tests for SBIG camera
def test_sbig_driver_bad_path():
"""
Manually specify an incorrect path for the SBIG shared library. The
CDLL loader should raise OSError when it fails. Can't test a successful
driver init as it would cause subsequent tests to fail because of the
CDLL unload problem.
"""
with pytest.raises(OSError):
SBIGDriver(library_path='no_library_here')
@pytest.mark.filterwarnings('ignore:Could not connect to SBIG Camera')
def test_sbig_bad_serial():
"""
Attempt to create an SBIG camera instance for a specific non-existent
camera. No actual cameras are required to run this test but the SBIG
driver does need to be installed.
"""
if find_library('sbigudrv') is None:
pytest.skip("Test requires SBIG camera driver to be installed")
with pytest.raises(error.PanError):
SBIGCamera(serial_number='NOTAREALSERIALNUMBER')
# *Potentially* hardware dependant tests:
def test_init(camera):
"""
Test that camera got initialised as expected
"""
assert camera.is_connected
if isinstance(camera, SBIGCamera):
# Successfully initialised SBIG cameras should also have a valid 'handle'
assert camera._handle != INVALID_HANDLE_VALUE
def test_uid(camera):
# Camera uid should be a string (or maybe an int?) of non-zero length. Assert True
assert camera.uid
def test_get_temp(camera):
try:
temperature = camera.temperature
except NotImplementedError:
pytest.skip("Camera {} doesn't implement temperature info".format(camera.name))
else:
assert temperature is not None
def test_is_cooled(camera):
cooled_camera = camera.is_cooled_camera
assert cooled_camera is not None
def test_set_target_temperature(camera):
if camera.is_cooled_camera:
camera.target_temperature = 10 * u.Celsius
assert abs(camera.target_temperature - 10 * u.Celsius) < 0.5 * u.Celsius
else:
pytest.skip("Camera {} doesn't implement temperature control".format(camera.name))
def test_cooling_enabled(camera):
print('Some test output')
assert camera.cooling_enabled == camera.is_cooled_camera
print('Some other output')
def test_enable_cooling(camera):
if camera.is_cooled_camera:
camera.cooling_enabled = True
assert camera.cooling_enabled
else:
pytest.skip("Camera {} doesn't implement control of cooling status".format(camera.name))
def test_get_cooling_power(camera):
if camera.is_cooled_camera:
power = camera.cooling_power
assert power is not None
else:
pytest.skip("Camera {} doesn't implement cooling power readout".format(camera.name))
def test_disable_cooling(camera):
if camera.is_cooled_camera:
camera.cooling_enabled = False
assert not camera.cooling_enabled
else:
pytest.skip("Camera {} doesn't implement control of cooling status".format(camera.name))
def test_temperature_tolerance(camera):
temp_tol = camera.temperature_tolerance
camera.temperature_tolerance = temp_tol.value + 1
assert camera.temperature_tolerance == temp_tol + 1 * u.Celsius
camera.temperature_tolerance = temp_tol
assert camera.temperature_tolerance == temp_tol
def test_is_temperature_stable(camera):
if camera.is_cooled_camera:
camera.target_temperature = camera.temperature
camera.cooling_enabled = True
while not camera.is_temperature_stable:
time.sleep(2)
assert camera.is_temperature_stable
camera.cooling_enabled = False
assert not camera.is_temperature_stable
camera.cooling_enabled = True
else:
assert not camera.is_temperature_stable
def test_exposure(camera, tmpdir):
"""
Tests basic take_exposure functionality
"""
fits_path = str(tmpdir.join('test_exposure.fits'))
assert not camera.is_exposing
assert camera.is_ready
# A one second normal exposure.
camera.take_exposure(filename=fits_path)
assert camera.is_exposing
assert not camera.is_ready
# By default take_exposure is non-blocking, need to give it some time to complete.
if isinstance(camera, FLICamera):
time.sleep(10)
else:
time.sleep(5)
# Output file should exist, Event should be set and camera should say it's not exposing.
assert os.path.exists(fits_path)
assert not camera.is_exposing
assert camera.is_ready
# If can retrieve some header data there's a good chance it's a valid FITS file
header = fits_utils.getheader(fits_path)
assert header['EXPTIME'] == 1.0
assert header['IMAGETYP'] == 'Light Frame'
def test_exposure_blocking(camera, tmpdir):
"""
Tests blocking take_exposure functionality. At least for now only SBIG cameras do this.
"""
fits_path = str(tmpdir.join('test_exposure_blocking.fits'))
# A one second exposure, command should block until complete so FITS
# should exist immediately afterwards
camera.take_exposure(filename=fits_path, blocking=True)
assert os.path.exists(fits_path)
# If can retrieve some header data there's a good chance it's a valid FITS file
header = fits_utils.getheader(fits_path)
assert header['EXPTIME'] == 1.0
assert header['IMAGETYP'] == 'Light Frame'
def test_long_exposure_blocking(camera, tmpdir):
"""
Tests basic take_exposure functionality
"""
fits_path = str(tmpdir.join('test_long_exposure_blocking.fits'))
original_timeout = camera.timeout
original_readout = camera._readout_time
try:
camera.timeout = 1
camera._readout_time = 0.5
assert not camera.is_exposing
assert camera.is_ready
seconds = 2 * (camera.timeout + camera._readout_time)
camera.take_exposure(filename=fits_path, seconds=seconds, blocking=True)
# Output file should exist, Event should be set and camera should say it's not exposing.
assert os.path.exists(fits_path)
assert not camera.is_exposing
assert camera.is_ready
finally:
camera.timeout = original_timeout
camera._readout_time = original_readout
def test_exposure_dark(camera, tmpdir):
"""
Tests taking a dark.
"""
fits_path = str(tmpdir.join('test_exposure_dark.fits'))
# A 1 second dark exposure
camera.take_exposure(filename=fits_path, dark=True, blocking=True)
assert os.path.exists(fits_path)
# If can retrieve some header data there's a good chance it's a valid FITS file
header = fits_utils.getheader(fits_path)
assert header['EXPTIME'] == 1.0
assert header['IMAGETYP'] == 'Dark Frame'
with suppress(AttributeError):
if not camera.can_take_internal_darks and camera.filterwheel._dark_position:
# Filterwheel should have moved to 'blank' position due to dark exposure.
assert camera.filterwheel.current_filter == 'blank'
fits_path2 = str(tmpdir.join('test_exposure_dark_light.fits'))
camera.take_exposure(filename=fits_path2, blocking=True)
# Filterwheel should have moved back to most recent non opaque filter now.
assert camera.filterwheel.current_filter == 'one'
def test_exposure_collision(camera, tmpdir):
"""
Tests attempting to take an exposure while one is already in progress.
With the SBIG cameras this will generate warning but still should work. Don't do this though!
"""
# Allow for cooling
if camera.is_cooled_camera and camera.cooling_enabled:
while camera.is_temperature_stable is False:
time.sleep(0.5)
fits_path_1 = str(tmpdir.join('test_exposure_collision1.fits'))
fits_path_2 = str(tmpdir.join('test_exposure_collision2.fits'))
camera.take_exposure(2 * u.second, filename=fits_path_1)
camera.logger.log('testing', 'Exposure 1 started')
with pytest.raises(error.PanError):
camera.take_exposure(1 * u.second, filename=fits_path_2)
camera.logger.log('testing', 'Exposure 2 collided')
# Wait for exposure.
while camera.is_exposing:
time.sleep(0.5)
# Wait for readout on file.
while not os.path.exists(fits_path_1):
time.sleep(0.5)
time.sleep(1) # Make sure the file is fully-written
assert os.path.exists(fits_path_1)
assert not os.path.exists(fits_path_2)
assert fits_utils.getval(fits_path_1, 'EXPTIME') == 2.0
def test_exposure_scaling(camera, tmpdir):
"""Regression test for incorrect pixel value scaling.
Checks for zero padding of LSBs instead of MSBs, as encountered
with ZWO ASI cameras.
"""
try:
bit_depth = camera.bit_depth
except NotImplementedError:
pytest.skip("Camera does not have bit_depth attribute")
else:
fits_path = str(tmpdir.join('test_exposure_scaling.fits'))
camera.take_exposure(filename=fits_path, dark=True, blocking=True)
image_data, image_header = fits.getdata(fits_path, header=True)
assert bit_depth == image_header['BITDEPTH'] * u.bit
pad_bits = image_header['BITPIX'] - image_header['BITDEPTH']
assert (image_data % 2 ** pad_bits).any()
def test_exposure_no_filename(camera):
with pytest.raises(AssertionError):
camera.take_exposure(1.0)
def test_exposure_not_connected(camera):
camera._connected = False
with pytest.raises(AssertionError):
camera.take_exposure(1.0)
camera._connected = True
def test_exposure_moving(camera, tmpdir):
if camera.filterwheel is None:
pytest.skip("Camera does not have a filterwheel")
fits_path_1 = str(tmpdir.join('test_not_moving.fits'))
fits_path_2 = str(tmpdir.join('test_moving.fits'))
camera.filterwheel.position = 1
exp_event = camera.take_exposure(filename=fits_path_1, blocking=True)
assert os.path.exists(fits_path_1)
move_event = camera.filterwheel.move_to(2)
with pytest.raises(error.PanError):
camera.take_exposure(filename=fits_path_2, blocking=True)
move_event.wait()
assert not os.path.exists(fits_path_2)
def test_exposure_timeout(camera, tmpdir, caplog):
"""
Tests response to an exposure timeout
"""
fits_path = str(tmpdir.join('test_exposure_timeout.fits'))
# Make timeout extremely short to force a timeout error
# This should result in a timeout error in the poll thread, but the exception won't
# be seen in the main thread. Can check for logged error though.
readout_thread = camera.take_exposure(seconds=2.0, filename=fits_path, timeout=0.01)
# Wait for it all to be over.
time.sleep(4)
# Should be an ERROR message in the log from the exposure timeout
assert caplog.records[-1].levelname == "ERROR"
# Should be no data file, camera should not be exposing, and exposure event should be set
assert not os.path.exists(fits_path)
assert not camera.is_exposing
assert not readout_thread.is_alive()
assert not camera._is_exposing_event.is_set()
def test_observation(camera, images_dir):
"""
Tests functionality of take_observation()
"""
field = Field('Test Observation', '20h00m43.7135s +22d42m39.0645s')
observation = Observation(field, exptime=1.5 * u.second)
observation.seq_time = '19991231T235959'
camera.take_observation(observation)
while camera.is_observing:
camera.logger.trace(f'Waiting for observation event from inside test.')
time.sleep(1)
observation_pattern = os.path.join(images_dir, 'TestObservation',
camera.uid, observation.seq_time, '*.fits*')
assert len(glob.glob(observation_pattern)) == 1
def test_observation_headers_and_blocking(camera, images_dir):
"""
Tests functionality of take_observation()
"""
field = Field('Test Observation', '20h00m43.7135s +22d42m39.0645s')
observation = Observation(field, exptime=1.5 * u.second)
observation.seq_time = '19991231T235559'
camera.take_observation(observation, headers={'field_name': 'TESTVALUE'}, blocking=True)
observation_pattern = os.path.join(images_dir, 'TestObservation',
camera.uid, observation.seq_time, '*.fits*')
image_files = glob.glob(observation_pattern)
assert len(image_files) == 1
camera.logger.debug(f'{image_files=}')
headers = fits_utils.getheader(image_files[0])
camera.logger.debug(f'{headers=!r}')
assert headers['FIELD'] == 'TESTVALUE'
def test_observation_nofilter(camera, images_dir):
"""
Tests functionality of take_observation()
"""
field = Field('Test Observation', '20h00m43.7135s +22d42m39.0645s')
observation = Observation(field, exptime=1.5 * u.second, filter_name=None)
observation.seq_time = '19991231T235159'
camera.take_observation(observation, blocking=True)
observation_pattern = os.path.join(images_dir, 'TestObservation',
camera.uid, observation.seq_time, '*.fits*')
assert len(glob.glob(observation_pattern)) == 1
def test_observation_dark(camera, images_dir):
"""
Tests functionality of take_observation()
"""
position = '20h00m43.7135s +22d42m39.0645s'
observation = DarkObservation(position, exptimes=[1])
assert observation.dark
observation.seq_time = '19991231T235959'
camera.take_observation(observation, blocking=True)
while camera.is_observing:
camera.logger.trace(f'Waiting for observation event from inside test. {camera.is_observing}')
time.sleep(1)
observation_pattern = os.path.join(images_dir, 'dark',
camera.uid, observation.seq_time, '*.fits*')
assert len(glob.glob(observation_pattern)) == 1
def test_observation_bias(camera, images_dir):
"""
Tests functionality of take_observation()
"""
position = '20h00m43.7135s +22d42m39.0645s'
observation = BiasObservation(position)
assert observation.dark
observation.seq_time = '19991231T235959'
camera.take_observation(observation)
while camera.is_observing:
camera.logger.trace(f'Waiting for observation event from inside test.')
time.sleep(1)
observation_pattern = os.path.join(images_dir, 'bias',
camera.uid, observation.seq_time, '*.fits*')
assert len(glob.glob(observation_pattern)) == 1
def test_autofocus_coarse(camera, patterns, counter):
if not camera.has_focuser:
pytest.skip("Camera does not have a focuser")
if camera.has_filterwheel:
camera.filterwheel.move_to("one", blocking=True)
autofocus_event = camera.autofocus(coarse=True, filter_name="deux")
autofocus_event.wait()
if camera.has_filterwheel:
assert camera.filterwheel.current_filter == "deux"
counter['value'] += 1
assert len(glob.glob(patterns['final'])) == counter['value']
def test_autofocus_fine(camera, patterns, counter):
if camera.focuser is None:
pytest.skip("Camera does not have a focuser")
autofocus_event = camera.autofocus()
autofocus_event.wait()
counter['value'] += 1
assert len(glob.glob(patterns['final'])) == counter['value']
def test_autofocus_fine_blocking(camera, patterns, counter):
if camera.focuser is None:
pytest.skip("Camera does not have a focuser")
autofocus_event = camera.autofocus(blocking=True)
assert autofocus_event.is_set()
counter['value'] += 1
assert len(glob.glob(patterns['final'])) == counter['value']
def test_autofocus_with_plots(camera, patterns, counter):
if camera.focuser is None:
pytest.skip("Camera does not have a focuser")
autofocus_event = camera.autofocus(make_plots=True)
autofocus_event.wait()
counter['value'] += 1
assert len(glob.glob(patterns['final'])) == counter['value']
assert len(glob.glob(patterns['fine_plot'])) == 1
def test_autofocus_coarse_with_plots(camera, patterns, counter):
if camera.focuser is None:
pytest.skip("Camera does not have a focuser")
autofocus_event = camera.autofocus(coarse=True, make_plots=True)
autofocus_event.wait()
counter['value'] += 1
assert len(glob.glob(patterns['final'])) == counter['value']
assert len(glob.glob(patterns['coarse_plot'])) == 1
def test_autofocus_keep_files(camera, patterns, counter):
if camera.focuser is None:
pytest.skip("Camera does not have a focuser")
autofocus_event = camera.autofocus(keep_files=True)
autofocus_event.wait()
counter['value'] += 1
assert len(glob.glob(patterns['final'])) == counter['value']
def test_autofocus_no_darks(camera, patterns, counter):
if camera.focuser is None:
pytest.skip("Camera does not have a focuser")
autofocus_event = camera.autofocus(keep_files=True, take_dark=False)
autofocus_event.wait()
counter['value'] += 1
assert len(glob.glob(patterns['final'])) == counter['value']
def test_autofocus_no_size(camera):
try:
initial_focus = camera.focuser.position
except AttributeError:
pytest.skip("Camera does not have an exposed focuser attribute")
initial_focus = camera.focuser.position
cutout_size = camera.focuser.autofocus_size
camera.focuser.autofocus_size = None
with pytest.raises(ValueError):
camera.autofocus()
camera.focuser.autofocus_size = cutout_size
assert camera.focuser.position == initial_focus
def test_autofocus_no_seconds(camera):
try:
initial_focus = camera.focuser.position
except AttributeError:
pytest.skip("Camera does not have an exposed focuser attribute")
initial_focus = camera.focuser.position
seconds = camera.focuser.autofocus_seconds
camera.focuser.autofocus_seconds = None
with pytest.raises(ValueError):
camera.autofocus()
camera.focuser.autofocus_seconds = seconds
assert camera.focuser.position == initial_focus
def test_autofocus_no_step(camera):
try:
initial_focus = camera.focuser.position
except AttributeError:
pytest.skip("Camera does not have an exposed focuser attribute")
initial_focus = camera.focuser.position
autofocus_step = camera.focuser.autofocus_step
camera.focuser.autofocus_step = None
with pytest.raises(ValueError):
camera.autofocus()
camera.focuser.autofocus_step = autofocus_step
assert camera.focuser.position == initial_focus
def test_autofocus_no_range(camera):
try:
initial_focus = camera.focuser.position
except AttributeError:
pytest.skip("Camera does not have an exposed focuser attribute")
initial_focus = camera.focuser.position
autofocus_range = camera.focuser.autofocus_range
camera.focuser.autofocus_range = None
with pytest.raises(ValueError):
camera.autofocus()
camera.focuser.autofocus_range = autofocus_range
assert camera.focuser.position == initial_focus
def test_autofocus_camera_disconnected(camera):
try:
initial_focus = camera.focuser.position
except AttributeError:
pytest.skip("Camera does not have an exposed focuser attribute")
initial_focus = camera.focuser.position
camera._connected = False
with pytest.raises(AssertionError):
camera.autofocus()
camera._connected = True
assert camera.focuser.position == initial_focus
def test_autofocus_focuser_disconnected(camera):
try:
initial_focus = camera.focuser.position
except AttributeError:
pytest.skip("Camera does not have an exposed focuser attribute")
initial_focus = camera.focuser.position
camera.focuser._connected = False
with pytest.raises(AssertionError):
camera.autofocus()
camera.focuser._connected = True
assert camera.focuser.position == initial_focus
def test_autofocus_no_focuser(camera):
try:
initial_focus = camera.focuser.position
except AttributeError:
pytest.skip("Camera does not have an exposed focuser attribute")
initial_focus = camera.focuser.position
focuser = camera.focuser
camera.focuser = None
with pytest.raises(AttributeError):
camera.autofocus()
camera.focuser = focuser
assert camera.focuser.position == initial_focus
def test_move_filterwheel_focus_offset(camera):
if not camera.has_filterwheel:
pytest.skip("Camera does not have a filterwheel.")
if not camera.has_focuser:
pytest.skip("Camera does not have a focuser.")
if camera.filterwheel.focus_offsets is None:
offsets = {}
else:
offsets = camera.filterwheel.focus_offsets
camera.filterwheel.move_to("one", blocking=True)
for filter_name in camera.filterwheel.filter_names:
offset = offsets.get(filter_name, 0) - offsets.get(camera.filterwheel.current_filter, 0)
initial_position = camera.focuser.position
camera.filterwheel.move_to(filter_name, blocking=True)
new_position = camera.focuser.position
if filter_name in offsets.keys():
assert new_position == initial_position + offset
else:
assert new_position == initial_position
| |
#!/usr/bin/env python
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from std_msgs.msg import String
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
import actionlib_msgs.msg
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib
import tf
from os import system
from random import random
from numpy import array,concatenate,vstack,delete,floor,ceil
from numpy import linalg as LA
from numpy import all as All
from functions import Nearest,Steer,Near,ObstacleFree,Find,Cost,prepEdges,gridValue,assigner1
import parameters as param
#-----------------------------------------------------
# Subscribers' callbacks------------------------------
mapData=OccupancyGrid()
def mapCallBack(data):
global mapData
mapData=data
# Node----------------------------------------------
def node():
r1=1
r2=1
global mapData
rospy.Subscriber("/robot_1/map", OccupancyGrid, mapCallBack)
pub = rospy.Publisher('shapes', Marker, queue_size=10)
rospy.init_node('RRTexplorer', anonymous=False)
#Actionlib client
client1 = actionlib.SimpleActionClient('/robot_1/move_base', MoveBaseAction)
client1.wait_for_server()
goal = MoveBaseGoal()
goal.target_pose.header.stamp=rospy.Time.now()
goal.target_pose.header.frame_id="/robot_1/map"
goal.target_pose.pose.position.x=1
goal.target_pose.pose.position.y=0
goal.target_pose.pose.position.z=0
goal.target_pose.pose.orientation.w = 1.0
client1.send_goal(goal)
h=client1.get_state()
print h,'\n ------',rospy.Time.now(),'------ \n'
client1.wait_for_result()
client1.get_result()
rate = rospy.Rate(50)
listener = tf.TransformListener()
listener.waitForTransform('/robot_1/map', '/robot_1/base_link', rospy.Time(0),rospy.Duration(10.0))
try:
(trans,rot) = listener.lookupTransform('/robot_1/map', '/robot_1/base_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
trans=[0,0]
xinx=trans[0]
xiny=trans[1]
x_init=array([xinx,xiny])
V=array([x_init])
i=1.0
E=concatenate((x_init,x_init))
points=Marker()
line=Marker()
#Set the frame ID and timestamp. See the TF tutorials for information on these.
points.header.frame_id=line.header.frame_id="/robot_1/map"
points.header.stamp=line.header.stamp=rospy.Time.now()
points.ns=line.ns = "markers"
points.id = 0
line.id =1
points.type = Marker.POINTS
line.type=Marker.LINE_LIST
#Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
points.action = line.action = Marker.ADD;
points.pose.orientation.w = line.pose.orientation.w = 1.0;
line.scale.x = 0.02;
points.scale.x=0.05;
line.scale.y= 0.02;
points.scale.y=0.05;
line.color.r =9.0/255.0
line.color.g= 91.0/255.0
line.color.b =236.0/255.0
points.color.r = 255.0/255.0
points.color.g = 244.0/255.0
points.color.b = 0.0/255.0
points.color.a=1;
line.color.a = 0.6;
points.lifetime =line.lifetime = rospy.Duration();
p=Point()
p.x = x_init[0] ;
p.y = x_init[0] ;
p.z = 0;
pp=[]
pl=[]
pp.append(copy(p))
xdim=mapData.info.width
ydim=mapData.info.height
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
#raw_input('Press Enter to start exploration')
#-------------------------------RRT------------------------------------------
while not rospy.is_shutdown():
# Sample free
#indxRand= floor( len(mapData.data)*random())
#yr=ceil(indxRand/xdim)
#xr=indxRand-(floor(indxRand/xdim))*xdim
#xr=xr*resolution+Xstartx
#yr=yr*resolution+Xstarty
xr=(random()*20.0)-10.0
yr=(random()*20.0)-10.0
x_rand = array([xr,yr])
# Nearest
x_nearest=V[Nearest(V,x_rand),:]
# Steer
x_new=Steer(x_nearest,x_rand,param.eta)
goal.target_pose.pose.position.x=x_new[0]
goal.target_pose.pose.position.y=x_new[1]
goal.target_pose.pose.orientation.w = 1.0
# unKnow discovery
if gridValue(mapData,x_new)==-1:
assigner1(goal,x_new,client1,listener)
# ObstacleFree
if ObstacleFree(x_nearest,x_new,mapData):
# Near function
X_near=Near(V,x_new,param.rneighb)
s_Xnear=X_near.shape[0]
if All(X_near==array([0])):
s_Xnear=-1
V=vstack((V,x_new))
xmin=x_nearest
cmin=Cost(E,x_nearest)+LA.norm(x_new-x_nearest)
ii=0
for ii in range(0,s_Xnear):
xnear=copy(X_near[ii,:])
if ObstacleFree(xnear,x_new,mapData) and ( Cost(E,xnear)+LA.norm(xnear-x_new) )<cmin:
xmin=copy(xnear)
cmin=Cost(E,xnear)+LA.norm(xnear-x_new)
temp=concatenate((xmin,x_new))
E=vstack((E,temp))
iii=0
for iii in range(0,s_Xnear):
xnear=copy(X_near[iii,:])
if ObstacleFree(xnear,x_new,mapData) and ( Cost(E,x_new)+LA.norm(xnear-x_new) )<Cost(E,xnear):
row=Find(E,xnear)
E=delete(E, (row), axis=0)
temp=concatenate((x_new,xnear))
E=vstack((E,temp))
#Plotting
pl=prepEdges(E)
p.x=x_new[0]
p.y=x_new[1]
pp.append(copy(p))
points.points=pp
line.points=pl
pub.publish(points)
pub.publish(line)
rate.sleep()
#_____________________________________________________________________________
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
| |
"""Compressed Sparse Row matrix format"""
__docformat__ = "restructuredtext en"
__all__ = ['csr_matrix', 'isspmatrix_csr']
from warnings import warn
import numpy as np
from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \
get_csr_submatrix
from sputils import upcast, isintlike
from compressed import _cs_matrix
class csr_matrix(_cs_matrix):
"""Compressed Sparse Row matrix
This can be instantiated in several ways:
csr_matrix(D)
with a dense matrix or rank-2 ndarray D
csr_matrix(S)
with another sparse matrix S (equivalent to S.tocsr())
csr_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csr_matrix((data, ij), [shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
csr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indices[i+1]]`` and their
corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
If the shape parameter is not supplied, the matrix dimensions
are inferred from the index arrays.
Notes
-----
Advantages of the CSR format
- efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
- efficient row slicing
- fast matrix vector products
Disadvantages of the CSR format
- slow column slicing operations (consider CSC)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> from scipy.sparse import *
>>> from scipy import *
>>> csr_matrix( (3,4), dtype=int8 ).todense()
matrix([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = array([0,0,1,2,2,2])
>>> col = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6])
>>> csr_matrix( (data,(row,col)), shape=(3,3) ).todense()
matrix([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6])
>>> csr_matrix( (data,indices,indptr), shape=(3,3) ).todense()
matrix([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
"""
def __getattr__(self, attr):
if attr == 'colind':
warn("colind attribute no longer in use. Use .indices instead",
DeprecationWarning)
return self.indices
else:
return _cs_matrix.__getattr__(self, attr)
def transpose(self, copy=False):
from csc import csc_matrix
M,N = self.shape
return csc_matrix((self.data,self.indices,self.indptr), shape=(N,M), copy=copy)
@np.deprecate
def rowcol(self, ind):
#TODO remove after 0.7
col = self.indices[ind]
row = np.searchsorted(self.indptr, ind+1)-1
return (row, col)
def tolil(self):
from lil import lil_matrix
lil = lil_matrix(self.shape,dtype=self.dtype)
self.sort_indices() #lil_matrix needs sorted column indices
ptr,ind,dat = self.indptr,self.indices,self.data
rows, data = lil.rows, lil.data
for n in xrange(self.shape[0]):
start = ptr[n]
end = ptr[n+1]
rows[n] = ind[start:end].tolist()
data[n] = dat[start:end].tolist()
return lil
def tocsr(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsc(self):
indptr = np.empty(self.shape[1] + 1, dtype=np.intc)
indices = np.empty(self.nnz, dtype=np.intc)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csr_tocsc(self.shape[0], self.shape[1], \
self.indptr, self.indices, self.data, \
indptr, indices, data)
from csc import csc_matrix
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def tobsr(self, blocksize=None, copy=True):
from bsr import bsr_matrix
if blocksize is None:
from spfuncs import estimate_blocksize
return self.tobsr(blocksize=estimate_blocksize(self))
elif blocksize == (1,1):
arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
return bsr_matrix(arg1, shape=self.shape, copy=copy )
else:
R,C = blocksize
M,N = self.shape
if R < 1 or C < 1 or M % R != 0 or N % C != 0:
raise ValueError('invalid blocksize %s' % blocksize)
blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
indptr = np.empty(M/R + 1, dtype=np.intc)
indices = np.empty(blks, dtype=np.intc)
data = np.zeros((blks,R,C), dtype=self.dtype)
csr_tobsr(M, N, R, C, self.indptr, self.indices, self.data, \
indptr, indices, data.ravel() )
return bsr_matrix((data,indices,indptr), shape=self.shape)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[0],x[1])
def __getitem__(self, key):
def asindices(x):
try:
x = np.asarray(x, dtype=np.intc)
except:
raise IndexError('invalid index')
else:
return x
def extractor(indices,N):
"""Return a sparse matrix P so that P*self implements
slicing of the form self[[1,2,3],:]
"""
indices = asindices(indices)
max_indx = indices.max()
if max_indx >= N:
raise IndexError('index (%d) out of range' % max_indx)
min_indx = indices.min()
if min_indx < -N:
raise IndexError('index (%d) out of range' % (N + min_indx))
if min_indx < 0:
indices = indices.copy()
indices[indices < 0] += N
indptr = np.arange(len(indices) + 1, dtype=np.intc)
data = np.ones(len(indices), dtype=self.dtype)
shape = (len(indices),N)
return csr_matrix((data,indices,indptr), shape=shape)
if isinstance(key, tuple):
row = key[0]
col = key[1]
if isintlike(row):
#[1,??]
if isintlike(col):
return self._get_single_element(row, col) #[i,j]
elif isinstance(col, slice):
return self._get_row_slice(row, col) #[i,1:2]
else:
P = extractor(col,self.shape[1]).T #[i,[1,2]]
return self[row,:]*P
elif isinstance(row, slice):
#[1:2,??]
if isintlike(col) or isinstance(col, slice):
return self._get_submatrix(row, col) #[1:2,j]
else:
P = extractor(col,self.shape[1]).T #[1:2,[1,2]]
return self[row,:]*P
else:
#[[1,2],??] or [[[1],[2]],??]
if isintlike(col) or isinstance(col,slice):
P = extractor(row, self.shape[0]) #[[1,2],j] or [[1,2],1:2]
return (P*self)[:,col]
else:
row = asindices(row)
col = asindices(col)
if len(row.shape) == 1:
if len(row) != len(col): #[[1,2],[1,2]]
raise IndexError('number of row and column indices differ')
val = []
for i,j in zip(row,col):
val.append(self._get_single_element(i,j))
return np.asmatrix(val)
elif len(row.shape) == 2:
row = np.ravel(row) #[[[1],[2]],[1,2]]
P = extractor(row, self.shape[0])
return (P*self)[:,col]
else:
raise NotImplementedError('unsupported indexing')
elif isintlike(key) or isinstance(key,slice):
return self[key,:] #[i] or [1:2]
else:
return self[asindices(key),:] #[[1,2]]
def _get_single_element(self,row,col):
"""Returns the single element self[row, col]
"""
M, N = self.shape
if (row < 0):
row += M
if (col < 0):
col += N
if not (0<=row<M) or not (0<=col<N):
raise IndexError("index out of bounds")
#TODO make use of sorted indices (if present)
start = self.indptr[row]
end = self.indptr[row+1]
indxs = np.where(col == self.indices[start:end])[0]
num_matches = len(indxs)
if num_matches == 0:
# entry does not appear in the matrix
return self.dtype.type(0)
elif num_matches == 1:
return self.data[start:end][indxs[0]]
else:
raise ValueError('nonzero entry (%d,%d) occurs more than once' % (row,col) )
def _get_row_slice(self, i, cslice):
"""Returns a copy of row self[i, cslice]
"""
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('index (%d) out of range' % i )
start, stop, stride = cslice.indices(self.shape[1])
if stride != 1:
raise ValueError, "slicing with step != 1 not supported"
if stop <= start:
raise ValueError, "slice width must be >= 1"
#TODO make [i,:] faster
#TODO implement [i,x:y:z]
indices = []
for ind in xrange(self.indptr[i], self.indptr[i+1]):
if self.indices[ind] >= start and self.indices[ind] < stop:
indices.append(ind)
index = self.indices[indices] - start
data = self.data[indices]
indptr = np.array([0, len(indices)])
return csr_matrix( (data, index, indptr), shape=(1, stop-start) )
def _get_submatrix( self, row_slice, col_slice ):
"""Return a submatrix of this matrix (new matrix is created)."""
M,N = self.shape
def process_slice( sl, num ):
if isinstance( sl, slice ):
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif isintlike( sl ):
if sl < 0:
sl += num
return sl, sl + 1
else:
raise TypeError('expected slice or scalar')
def check_bounds( i0, i1, num ):
if not (0<=i0<num) or not (0<i1<=num) or not (i0<i1):
raise IndexError( \
"index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %\
(i0, num, i1, num, i0, i1) )
i0, i1 = process_slice( row_slice, M )
j0, j1 = process_slice( col_slice, N )
check_bounds( i0, i1, M )
check_bounds( j0, j1, N )
indptr, indices, data = get_csr_submatrix( M, N, \
self.indptr, self.indices, self.data, \
int(i0), int(i1), int(j0), int(j1) )
shape = (i1 - i0, j1 - j0)
return self.__class__( (data,indices,indptr), shape=shape )
from sputils import _isinstance
def isspmatrix_csr(x):
return _isinstance(x, csr_matrix)
| |
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
"""Runs the KijiExpress Music tutorial."""
import glob
import logging
import os
import shutil
import sys
import tempfile
# Add the root directory to the Python path if necessary:
__path = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
if __path not in sys.path:
sys.path.append(__path)
from base import base
from kiji import kiji_bento
from kiji import tutorial_test
FLAGS = base.FLAGS
LogLevel = base.LogLevel
class Error(Exception):
"""Errors used in this module."""
pass
# Horizontal ruler:
LINE = '-' * 80
# ------------------------------------------------------------------------------
FLAGS.AddString(
name='work_dir',
help='Working directory.',
)
FLAGS.AddString(
name='maven_local_repo',
help='Optional Maven local repository from where to fetch artifacts.',
)
FLAGS.AddString(
name='maven_remote_repo',
help='Optional Maven remote repository from where to fetch artifacts.',
)
FLAGS.AddString(
name='kiji_bento_version',
default=None,
help=('Version of KijiBento to download and test against. '
'For example "1.0.0-rc4" or "1.0.0-rc5-SNAPSHOT". '
'If not specified, uses the most recent version in the nightly repo.'),
)
FLAGS.AddBoolean(
name='cleanup_after_test',
default=True,
help=('When set, disables cleaning up after test. '
'Bento cluster stay alive, working directory is not wiped.'),
)
FLAGS.AddBoolean(
name='help',
default=False,
help='Prints a help message.',
)
# ------------------------------------------------------------------------------
class Tutorial(object):
"""Runs the KijiMusic tutorial."""
def __init__(
self, work_dir, version,
maven_local_repo=None,
maven_remote_repo=None,
python=False):
"""Initializes the tutorial runner.
Args:
work_dir: Working directory where to operate.
version: Version of KijiBento to test, eg. '1.0.0-rc5-SNAPSHOT'.
maven_local_repo: Optional local Maven repository.
maven_remote_repo: Optional remote Maven repository.
"""
self._work_dir = work_dir
self._run_id = base.NowMS()
self._kiji_version = version
# TODO: inject these in KijiBento
self._maven_local_repo = maven_local_repo
self._maven_remote_repo = maven_remote_repo
self._python = python
# Initialized in Setup()
self._kiji_bento = kiji_bento.KijiBento(
path=os.path.join(self.work_dir, 'kiji-bento-%s' % self._kiji_version),
version=self._kiji_version,
)
@property
def work_dir(self):
"""Returns the working directory."""
return self._work_dir
@property
def kiji_bento(self):
"""Returns the KijiBento install."""
return self._kiji_bento
@property
def bento_cluster(self):
"""Returns the BentoCluster install."""
return self.kiji_bento.bento_cluster
def Setup(self):
"""Initializes the tutorial runner.
Fetches the KijiBento Maven artifact, unzip it, starts a Bento cluster,
and prepares a working environment.
"""
self.kiji_bento.Install()
self.bento_cluster.Start()
self._express_music_dir = (
os.path.join(self.kiji_bento.path, 'examples', 'express-music'))
assert os.path.exists(self._express_music_dir), (
'KijiExpress tutorial root directory not found: %r' % self._express_music_dir)
self._hdfs_base = 'express-music-%d' % self._run_id
self._kiji_instance_uri = 'kiji://.env/kiji_music_%d' % self._run_id
express_music_lib_dir = os.path.join(self._express_music_dir, 'lib')
# Find the jar for kiji-express-music in the lib dir.
music_jars = glob.glob("{}/{}".format(express_music_lib_dir, "kiji-express-music-*.jar"))
assert (len(music_jars) == 1)
express_music_jar = music_jars[0]
# Builds a working environment for KijiMusic tutorial commands:
self._env = dict(os.environ)
self._env.update({
'MUSIC_EXPRESS_HOME': self._express_music_dir,
'LIBS_DIR': express_music_lib_dir,
'EXPRESS_MUSIC_JAR': express_music_jar,
'KIJI': self._kiji_instance_uri,
'KIJI_CLASSPATH':
':'.join(glob.glob(os.path.join(express_music_lib_dir, '*'))),
'HDFS_BASE': self._hdfs_base,
})
def Command(self, command):
"""Runs a Kiji command-line.
Args:
command: Kiji command-line to run as a single string.
"""
cmd = tutorial_test.KijiCommand(
command=command,
work_dir=self.kiji_bento.path,
env=self._env,
)
logging.debug('Exit code: %d', cmd.exit_code)
if logging.getLogger().level <= LogLevel.DEBUG_VERBOSE:
logging.debug('Output:\n%s\n%s%s', LINE, cmd.output_text, LINE)
logging.debug('Error:\n%s\n%s%s', LINE, cmd.error_text, LINE)
else:
logging.debug('Output: %r', cmd.output_text)
logging.debug('Error: %r', cmd.error_text)
return cmd
def StripJavaHomeLine(self, output_lines):
"""Strips any line about JAVA_HOME being set, which sometimes happens in the output of a
`kiji` command, from the output lines of a `kiji` command.
Args:
command: Output lines from a `kiji` command.
"""
if "JAVA_HOME" in output_lines[0]:
return output_lines[1:]
else:
return output_lines
# ----------------------------------------------------------------------------
# KijiMusic setup:
def Part1(self):
"""Runs the setup part of the KijiExpress Music tutorial.
http://docs.kiji.org/tutorials/express-recommendation/DEVEL/express-setup/
"""
# --------------------------------------------------------------------------
install = self.Command('kiji install --kiji=${KIJI}')
assert (install.exit_code == 0)
assert ('Successfully created kiji instance: ' in install.output_text)
# --------------------------------------------------------------------------
create_table = self.Command(base.StripMargin("""
|kiji-schema-shell \\
| --kiji=${KIJI} \\
| --file=${MUSIC_EXPRESS_HOME}/music-schema.ddl \\
"""))
print(create_table.error_text)
assert (create_table.exit_code == 0)
# --------------------------------------------------------------------------
list_tables = self.Command('kiji ls ${KIJI}')
assert (list_tables.exit_code == 0)
assert ('songs' in list_tables.output_text), (
'Missing table "songs": %s' % list_tables.output_lines)
assert ('users' in list_tables.output_text), (
'Missing table "users": %s' % list_tables.output_lines)
# --------------------------------------------------------------------------
mkdir = self.Command('hadoop fs -mkdir ${HDFS_BASE}/express-tutorial/')
assert (mkdir.exit_code == 0)
copy = self.Command(base.StripMargin("""
|hadoop fs -copyFromLocal \\
| ${MUSIC_EXPRESS_HOME}/example_data/*.json \\
| ${HDFS_BASE}/express-tutorial/
"""))
assert (copy.exit_code == 0)
# ----------------------------------------------------------------------------
# KijiExpress Music bulk-importing:
def Part2(self):
"""Runs the importing part of the KijiExpress Music tutorial.
http://docs.kiji.org/tutorials/express-recommendation/DEVEL/express-importing-data/
"""
# --------------------------------------------------------------------------
cmd = ' '
if self._python == False:
cmd = base.StripMargin("""
|express job \\
| ${EXPRESS_MUSIC_JAR} \\
| org.kiji.express.music.SongMetadataImporter \\
| --libjars "${MUSIC_EXPRESS_HOME}/lib/*" \\
| --input ${HDFS_BASE}/express-tutorial/song-metadata.json \\
| --table-uri ${KIJI}/songs --hdfs
""")
else:
cmd = base.StripMargin("""
|express.py \\
| job \\
| --libjars="${MUSIC_EXPRESS_HOME}/lib/*" \\
| --user_jar=${EXPRESS_MUSIC_JAR} \\
| --job_name=org.kiji.express.music.SongMetadataImporter \\
| --mode=hdfs \\
| --input ${HDFS_BASE}/express-tutorial/song-metadata.json \\
| --table-uri ${KIJI}/songs
""")
songMetadataImport = self.Command(cmd)
assert (songMetadataImport.exit_code == 0)
# --------------------------------------------------------------------------
list_rows = self.Command('kiji scan ${KIJI}/songs --max-rows=5')
assert (list_rows.exit_code == 0)
# Strip the first line from the output, if it is about $JAVA_HOME not set.
stripped_output = self.StripJavaHomeLine(list_rows.output_lines)
assert (stripped_output[0].startswith('Scanning kiji table: kiji://'))
assert (len(stripped_output) >= 3 * 5 + 1), len(stripped_output)
for row in range(0, 5):
tutorial_test.ExpectRegexMatch(
expect=r"^entity-id=\['song-\d+'\] \[\d+\] info:metadata$",
actual=stripped_output[1 + row * 3])
tutorial_test.ExpectRegexMatch(
expect=r"^\s*{\s*\"song_name\".*\"album_name\".*\"artist_name\".*\"genre\".*\"tempo\".*\"duration\".*\s*}\s*$",
actual=stripped_output[2 + row * 3])
tutorial_test.ExpectRegexMatch(
expect=r"^$",
actual=stripped_output[3 + row * 3])
# --------------------------------------------------------------------------
cmd = ' '
if self._python == False:
cmd = base.StripMargin("""
|express job \\
| ${EXPRESS_MUSIC_JAR} \\
| org.kiji.express.music.SongPlaysImporter \\
| --libjars "${MUSIC_EXPRESS_HOME}/lib/*" \\
| --input ${HDFS_BASE}/express-tutorial/song-plays.json \\
| --table-uri ${KIJI}/users --hdfs
""")
else:
cmd = base.StripMargin("""
|express.py \\
| job \\
| -libjars="${MUSIC_EXPRESS_HOME}/lib/*" \\
| -user_jar=${EXPRESS_MUSIC_JAR} \\
| -job_name=org.kiji.express.music.SongPlaysImporter \\
| -mode=hdfs \\
| --input ${HDFS_BASE}/express-tutorial/song-plays.json \\
| --table-uri ${KIJI}/users
""")
userDataImport = self.Command(cmd)
assert (userDataImport.exit_code == 0)
# --------------------------------------------------------------------------
list_rows = self.Command('kiji scan ${KIJI}/users --max-rows=5')
assert (list_rows.exit_code == 0)
stripped_output = self.StripJavaHomeLine(list_rows.output_lines)
assert (stripped_output[0].startswith('Scanning kiji table: kiji://'))
assert (len(stripped_output) >= 3 * 5 + 1), len(stripped_output)
for row in range(0, 5):
tutorial_test.ExpectRegexMatch(
expect=r"^entity-id=\['user-\d+'\] \[\d+\] info:track_plays$",
actual=stripped_output[1 + row * 3])
tutorial_test.ExpectRegexMatch(
expect=r"^\s*song-\d+$",
actual=stripped_output[2 + row * 3])
tutorial_test.ExpectRegexMatch(
expect=r"^$",
actual=stripped_output[3 + row * 3])
# --------------------------------------------------------------------------
# play-count section.
def Part3(self):
cmd = ' '
if self._python == False:
cmd = base.StripMargin("""
|express job \\
| ${EXPRESS_MUSIC_JAR} \\
| org.kiji.express.music.SongPlayCounter \\
| --libjars "${MUSIC_EXPRESS_HOME}/lib/*" \\
| --table-uri ${KIJI}/users \\
| --output ${HDFS_BASE}/express-tutorial/songcount-output \\
| --hdfs
""")
else:
cmd = base.StripMargin("""
|express.py \\
| job \\
| -libjars="${MUSIC_EXPRESS_HOME}/lib/*" \\
| -user_jar=${EXPRESS_MUSIC_JAR} \\
| -job_name=org.kiji.express.music.SongPlayCounter \\
| -mode=hdfs \\
| --table-uri ${KIJI}/users \\
| --output ${HDFS_BASE}/express-tutorial/songcount-output \\
""")
play_count = self.Command(cmd)
assert (play_count.exit_code == 0)
fs_text = self.Command("""
hadoop fs -text ${HDFS_BASE}/express-tutorial/songcount-output/part-00000 | head -3
""")
tutorial_test.Expect(expect=0, actual=fs_text.exit_code)
lines = list(filter(None, self.StripJavaHomeLine(fs_text.output_lines))) # filter empty lines
tutorial_test.Expect(expect=3, actual=len(lines))
for line in lines:
tutorial_test.ExpectRegexMatch(expect=r'^song-\d+\t\d+$', actual=line)
# ----------------------------------------------------------------------------
# Top Next Songs section.
def Part4(self):
cmd = ' '
if self._python == False:
cmd = base.StripMargin("""
|express job \\
| ${EXPRESS_MUSIC_JAR} \\
| org.kiji.express.music.TopNextSongs \\
| --libjars "${MUSIC_EXPRESS_HOME}/lib/*" \\
| --users-table ${KIJI}/users \\
| --songs-table ${KIJI}/songs --hdfs
""")
else:
cmd = base.StripMargin("""
|express.py \\
| job \\
| -libjars="${MUSIC_EXPRESS_HOME}/lib/*" \\
| -user_jar=${EXPRESS_MUSIC_JAR} \\
| -job_name=org.kiji.express.music.TopNextSongs \\
| -mode=hdfs \\
| --users-table ${KIJI}/users \\
| --songs-table ${KIJI}/songs --hdfs
""")
top_songs = self.Command(cmd)
assert (top_songs.exit_code == 0)
list_rows = self.Command('kiji scan ${KIJI}/songs --max-rows=2')
assert (list_rows.exit_code == 0)
stripped_output = self.StripJavaHomeLine(list_rows.output_lines)
assert (stripped_output[0].startswith('Scanning kiji table: kiji://'))
assert (len(stripped_output) >= 5 * 2 + 1), len(stripped_output)
for row in range(0, 2):
tutorial_test.ExpectRegexMatch(
expect=r"^entity-id=\['song-\d+'\] \[\d+\] info:metadata$",
actual=stripped_output[1 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^\s*{\s*\"song_name\".*\"album_name\".*\"artist_name\".*\"genre\".*\"tempo\".*\"duration\".*\s*}\s*$",
actual=stripped_output[2 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^entity-id=\['song-\d+'\] \[\d+\] info:top_next_songs$",
actual=stripped_output[3 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^\s*{\s*\"top_songs\".*}$",
actual=stripped_output[4 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^$",
actual=stripped_output[5 + row * 5])
# ----------------------------------------------------------------------------
# Song recommender
def Part5(self):
cmd = ' '
if self._python == False:
cmd = base.StripMargin("""
|express job ${EXPRESS_MUSIC_JAR} \\
| org.kiji.express.music.SongRecommender \\
| --songs-table ${KIJI}/songs \\
| --users-table ${KIJI}/users
""")
else:
cmd = base.StripMargin("""
|express.py \\
| job \\
| -user_jar=${EXPRESS_MUSIC_JAR} \\
| -job_name=org.kiji.express.music.SongRecommender \\
| -mode=hdfs \\
| --songs-table ${KIJI}/songs \\
| --users-table ${KIJI}/users
""")
song_recommend = self.Command(cmd)
assert (song_recommend.exit_code == 0)
list_rows = self.Command("kiji scan ${KIJI}/users --max-rows=2")
assert (list_rows.exit_code == 0)
stripped_output = self.StripJavaHomeLine(list_rows.output_lines)
assert (stripped_output[0].startswith('Scanning kiji table: kiji://'))
assert (len(stripped_output) >= 5 * 2 + 1), len(stripped_output)
for row in range(0, 2):
tutorial_test.ExpectRegexMatch(
expect=r"^entity-id=\['user-\d+'\] \[\d+\] info:track_plays$",
actual=stripped_output[1 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^\s*song-\d+$",
actual=stripped_output[2 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^entity-id=\['user-\d+'\] \[\d+\] info:next_song_rec$",
actual=stripped_output[3 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^\s*song-\d+$",
actual=stripped_output[4 + row * 5])
tutorial_test.ExpectRegexMatch(
expect=r"^$",
actual=stripped_output[5 + row * 5])
# ----------------------------------------------------------------------------
# Cleanup:
def Cleanup(self):
self.bento_cluster.Stop()
shutil.rmtree(self.work_dir)
# ------------------------------------------------------------------------------
def Main(args):
"""Program entry point."""
if FLAGS.help:
FLAGS.PrintUsage()
return os.EX_OK
if len(args) > 0:
logging.error('Unexpected command-line arguments: %r' % args)
FLAGS.PrintUsage()
return os.EX_USAGE
# Create a temporary working directory:
cwd = os.getcwd()
work_dir = FLAGS.work_dir
if work_dir is None:
work_dir = tempfile.mkdtemp(prefix='work_dir.', dir=os.getcwd())
work_dir = os.path.abspath(work_dir)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
FLAGS.work_dir = work_dir
logging.info('Working directory: %r', work_dir)
logging.info('Testing tutorial of KijiBento %s', FLAGS.kiji_bento_version)
# Runs the tutorial:
deprecatedTutorial = Tutorial(
work_dir=work_dir,
version=FLAGS.kiji_bento_version,
maven_local_repo=FLAGS.maven_local_repo,
maven_remote_repo=FLAGS.maven_remote_repo,
python=False
)
try:
deprecatedTutorial.Setup()
deprecatedTutorial.Part1()
deprecatedTutorial.Part2()
deprecatedTutorial.Part3()
deprecatedTutorial.Part4()
deprecatedTutorial.Part5()
finally:
if FLAGS.cleanup_after_test:
deprecatedTutorial.Cleanup()
pythonTutorial = Tutorial(
work_dir=work_dir,
version=FLAGS.kiji_bento_version,
maven_local_repo=FLAGS.maven_local_repo,
maven_remote_repo=FLAGS.maven_remote_repo,
python=True
)
try:
pythonTutorial.Setup()
pythonTutorial.Part1()
pythonTutorial.Part2()
pythonTutorial.Part3()
pythonTutorial.Part4()
pythonTutorial.Part5()
finally:
if FLAGS.cleanup_after_test:
pythonTutorial.Cleanup()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
base.Run(Main)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Process
# For trace
import sys
import trace
import logging
from calvin.calvinsys import Sys as CalvinSys
from calvin.runtime.north import actormanager
from calvin.runtime.north import appmanager
from calvin.runtime.north import scheduler
from calvin.runtime.north import storage
from calvin.runtime.north import calvincontrol
from calvin.runtime.north import metering
from calvin.runtime.north.calvin_network import CalvinNetwork
from calvin.runtime.north.calvin_proto import CalvinProto
from calvin.runtime.north.portmanager import PortManager
from calvin.runtime.south.monitor import Event_Monitor
from calvin.runtime.south.plugins.async import async
from calvin.utilities.attribute_resolver import AttributeResolver
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities.security import security_modules_check
from calvin.utilities.authorization.policy_decision_point import PolicyDecisionPoint
from calvin.utilities import authorization
from calvin.utilities import calvinuuid
from calvin.utilities import certificate
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities import calvinconfig
_log = get_logger(__name__)
_conf = calvinconfig.get()
def addr_from_uri(uri):
_, host = uri.split("://")
addr, _ = host.split(":")
return addr
class Node(object):
"""A node of calvin
the uri is a list of server connection points
the control_uri is the local console
attributes is a supplied list of external defined attributes that will be used as the key when storing index
such as name of node
authz_server is True if the runtime can act as an authorization server
"""
def __init__(self, uri, control_uri, attributes=None, authz_server=False):
super(Node, self).__init__()
self.uri = uri
self.control_uri = control_uri
self.external_uri = attributes.pop('external_uri', self.uri) \
if attributes else self.uri
self.external_control_uri = attributes.pop('external_control_uri', self.control_uri) \
if attributes else self.control_uri
try:
self.attributes = AttributeResolver(attributes)
except:
_log.exception("Attributes not correct, uses empty attribute!")
self.attributes = AttributeResolver(None)
try:
self.sec_conf = _conf.get("security","security_conf")
if authz_server or self.sec_conf['authorization']['procedure'] == "local":
self.pdp = PolicyDecisionPoint(self.sec_conf['authorization'])
except:
self.sec_conf = None
# Obtain node id, when using security also handle runtime certificate
self.id = certificate.obtain_cert_node_info(self.attributes.get_node_name_as_str())['id']
self.metering = metering.set_metering(metering.Metering(self))
self.monitor = Event_Monitor()
self.am = actormanager.ActorManager(self)
self.control = calvincontrol.get_calvincontrol()
_scheduler = scheduler.DebugScheduler if _log.getEffectiveLevel() <= logging.DEBUG else scheduler.Scheduler
self.sched = _scheduler(self, self.am, self.monitor)
self.async_msg_ids = {}
self._calvinsys = CalvinSys(self)
# Default will multicast and listen on all interfaces
# TODO: be able to specify the interfaces
# @TODO: Store capabilities
self.storage = storage.Storage(self)
self.network = CalvinNetwork(self)
self.proto = CalvinProto(self, self.network)
self.pm = PortManager(self, self.proto)
self.app_manager = appmanager.AppManager(self)
# The initialization that requires the main loop operating is deferred to start function
async.DelayedCall(0, self.start)
def insert_local_reply(self):
msg_id = calvinuuid.uuid("LMSG")
self.async_msg_ids[msg_id] = None
return msg_id
def set_local_reply(self, msg_id, reply):
if msg_id in self.async_msg_ids:
self.async_msg_ids[msg_id] = reply
def connect(self, actor_id=None, port_name=None, port_dir=None, port_id=None,
peer_node_id=None, peer_actor_id=None, peer_port_name=None,
peer_port_dir=None, peer_port_id=None, cb=None):
self.pm.connect(actor_id=actor_id,
port_name=port_name,
port_dir=port_dir,
port_id=port_id,
peer_node_id=peer_node_id,
peer_actor_id=peer_actor_id,
peer_port_name=peer_port_name,
peer_port_dir=peer_port_dir,
peer_port_id=peer_port_id,
callback=CalvinCB(self.logging_callback, preamble="connect cb") if cb is None else cb)
def disconnect(self, actor_id=None, port_name=None, port_dir=None, port_id=None, cb=None):
_log.debug("disconnect(actor_id=%s, port_name=%s, port_dir=%s, port_id=%s)" %
(actor_id if actor_id else "", port_name if port_name else "",
port_dir if port_dir else "", port_id if port_id else ""))
self.pm.disconnect(actor_id=actor_id, port_name=port_name,
port_dir=port_dir, port_id=port_id,
callback=CalvinCB(self.logging_callback, preamble="disconnect cb") if cb is None else cb)
def peersetup(self, peers, cb=None):
""" Sets up a RT to RT communication channel, only needed if the peer can't be found in storage.
peers: a list of peer uris, e.g. ["calvinip://127.0.0.1:5001"]
"""
_log.debug("peersetup(%s)" % (peers))
peers_copy = peers[:]
peer_node_ids = {}
if not cb:
callback = CalvinCB(self.logging_callback, preamble="peersetup cb")
else:
callback = CalvinCB(self.peersetup_collect_cb, peers=peers_copy, peer_node_ids=peer_node_ids, org_cb=cb)
self.network.join(peers, callback=callback)
def peersetup_collect_cb(self, status, uri, peer_node_id, peer_node_ids, peers, org_cb):
if uri in peers:
peers.remove(uri)
peer_node_ids[uri] = (peer_node_id, status)
if not peers:
# Get highest status, i.e. any error
comb_status = max([s for _, s in peer_node_ids.values()])
org_cb(peer_node_ids=peer_node_ids, status=comb_status)
def logging_callback(self, preamble=None, *args, **kwargs):
_log.debug("\n%s# NODE: %s \n# %s %s %s \n%s" %
('#' * 40, self.id, preamble if preamble else "*", args, kwargs, '#' * 40))
def new(self, actor_type, args, deploy_args=None, state=None, prev_connections=None, connection_list=None):
# TODO requirements should be input to am.new
actor_id = self.am.new(actor_type, args, state, prev_connections, connection_list,
signature=deploy_args['signature'] if deploy_args and 'signature' in deploy_args else None,
credentials=deploy_args['credentials'] if deploy_args and 'credentials' in deploy_args else None)
if deploy_args:
app_id = deploy_args['app_id']
if 'app_name' not in deploy_args:
app_name = app_id
else:
app_name = deploy_args['app_name']
self.app_manager.add(app_id, actor_id,
deploy_info = deploy_args['deploy_info'] if 'deploy_info' in deploy_args else None)
return actor_id
def calvinsys(self):
"""Return a CalvinSys instance"""
# FIXME: We still need to sort out actor requirements vs. node capabilities and user permissions.
# @TODO: Write node capabilities to storage
return self._calvinsys
#
# Event loop
#
def run(self):
"""main loop on node"""
_log.debug("Node %s is running" % self.id)
self.sched.run()
def start(self):
""" Run once when main loop is started """
interfaces = _conf.get(None, 'transports')
self.network.register(interfaces, ['json'])
self.network.start_listeners(self.uri)
# Start storage after network, proto etc since storage proxy expects them
self.storage.start()
self.storage.add_node(self)
if hasattr(self, "pdp"):
self.storage.add_authz_server(self)
if self.sec_conf and "authorization" in self.sec_conf:
authorization.register_node(self)
# Start control API
proxy_control_uri = _conf.get(None, 'control_proxy')
_log.debug("Start control API on %s with uri: %s and proxy: %s" % (self.id, self.control_uri, proxy_control_uri))
if proxy_control_uri is not None:
self.control.start(node=self, uri=proxy_control_uri, tunnel=True)
else:
if self.control_uri is not None:
self.control.start(node=self, uri=self.control_uri)
def stop(self, callback=None):
def stopped(*args):
_log.analyze(self.id, "+", {'args': args})
_log.debug(args)
self.sched.stop()
_log.analyze(self.id, "+ SCHED STOPPED", {'args': args})
self.control.stop()
_log.analyze(self.id, "+ CONTROL STOPPED", {'args': args})
def deleted_node(*args, **kwargs):
_log.analyze(self.id, "+", {'args': args, 'kwargs': kwargs})
self.storage.stop(stopped)
_log.analyze(self.id, "+", {})
# FIXME: this function is never called when the node quits
if hasattr(self, "pdp"):
self.storage.delete_authz_server(self)
self.storage.delete_node(self, cb=deleted_node)
def create_node(uri, control_uri, attributes=None, authz_server=False):
n = Node(uri, control_uri, attributes, authz_server)
n.run()
_log.info('Quitting node "%s"' % n.uri)
def create_tracing_node(uri, control_uri, attributes=None, authz_server=False):
"""
Same as create_node, but will trace every line of execution.
Creates trace dump in output file '<host>_<port>.trace'
"""
n = Node(uri, control_uri, attributes, authz_server)
_, host = uri.split('://')
with open("%s.trace" % (host, ), "w") as f:
tmp = sys.stdout
# Modules to ignore
ignore = [
'fifo', 'calvin', 'actor', 'pickle', 'socket',
'uuid', 'codecs', 'copy_reg', 'string_escape', '__init__',
'colorlog', 'posixpath', 'glob', 'genericpath', 'base',
'sre_parse', 'sre_compile', 'fdesc', 'posixbase', 'escape_codes',
'fnmatch', 'urlparse', 're', 'stat', 'six'
]
with f as sys.stdout:
tracer = trace.Trace(trace=1, count=0, ignoremods=ignore)
tracer.runfunc(n.run)
sys.stdout = tmp
_log.info('Quitting node "%s"' % n.uri)
def start_node(uri, control_uri, trace_exec=False, attributes=None, authz_server=False):
if not security_modules_check():
raise Exception("Security module missing")
_create_node = create_tracing_node if trace_exec else create_node
p = Process(target=_create_node, args=(uri, control_uri, attributes, authz_server))
p.daemon = True
p.start()
return p
| |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
import testtools
from neutron.tests.api import base
from neutron.tests.tempest.common import tempest_fixtures as fixtures
CONF = config.CONF
class BgpSpeakerTestJSONBase(base.BaseAdminNetworkTest):
default_bgp_speaker_args = {'local_as': '1234',
'ip_version': 4,
'name': 'my-bgp-speaker',
'advertise_floating_ip_host_routes': True,
'advertise_tenant_networks': True}
default_bgp_peer_args = {'remote_as': '4321',
'name': 'my-bgp-peer',
'peer_ip': '192.168.1.1',
'auth_type': 'md5', 'password': 'my-secret'}
@classmethod
def resource_setup(cls):
super(BgpSpeakerTestJSONBase, cls).resource_setup()
if not test.is_extension_enabled('bgp_speaker', 'network'):
msg = "BGP Speaker extension is not enabled."
raise cls.skipException(msg)
cls.admin_routerports = []
cls.admin_floatingips = []
cls.admin_routers = []
cls.ext_net_id = CONF.network.public_network_id
@classmethod
def resource_cleanup(cls):
for floatingip in cls.admin_floatingips:
cls._try_delete_resource(cls.admin_client.delete_floatingip,
floatingip['id'])
for routerport in cls.admin_routerports:
cls._try_delete_resource(
cls.admin_client.remove_router_interface_with_subnet_id,
routerport['router_id'], routerport['subnet_id'])
for router in cls.admin_routers:
cls._try_delete_resource(cls.admin_client.delete_router,
router['id'])
super(BgpSpeakerTestJSONBase, cls).resource_cleanup()
def create_bgp_speaker(self, auto_delete=True, **args):
data = {'bgp_speaker': args}
bgp_speaker = self.admin_client.create_bgp_speaker(data)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
if auto_delete:
self.addCleanup(self.delete_bgp_speaker, bgp_speaker_id)
return bgp_speaker
def create_bgp_peer(self, **args):
bgp_peer = self.admin_client.create_bgp_peer({'bgp_peer': args})
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.addCleanup(self.delete_bgp_peer, bgp_peer_id)
return bgp_peer
def update_bgp_speaker(self, id, **args):
data = {'bgp_speaker': args}
return self.admin_client.update_bgp_speaker(id, data)
def delete_bgp_speaker(self, id):
return self.admin_client.delete_bgp_speaker(id)
def get_bgp_speaker(self, id):
return self.admin_client.get_bgp_speaker(id)
def create_bgp_speaker_and_peer(self):
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
return (bgp_speaker, bgp_peer)
def delete_bgp_peer(self, id):
return self.admin_client.delete_bgp_peer(id)
def add_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.add_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def remove_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.remove_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def delete_address_scope(self, id):
return self.admin_client.delete_address_scope(id)
class BgpSpeakerTestJSON(BgpSpeakerTestJSONBase):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
Create bgp-speaker
Delete bgp-speaker
Create bgp-peer
Update bgp-peer
Delete bgp-peer
"""
@test.idempotent_id('df259771-7104-4ffa-b77f-bd183600d7f9')
def test_delete_bgp_speaker(self):
bgp_speaker = self.create_bgp_speaker(auto_delete=False,
**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.delete_bgp_speaker(bgp_speaker_id)
self.assertRaises(lib_exc.NotFound,
self.get_bgp_speaker,
bgp_speaker_id)
@test.idempotent_id('81d9dc45-19f8-4c6e-88b8-401d965cd1b0')
def test_create_bgp_peer(self):
self.create_bgp_peer(**self.default_bgp_peer_args)
@test.idempotent_id('6ade0319-1ee2-493c-ac4b-5eb230ff3a77')
def test_add_bgp_peer(self):
bgp_speaker, bgp_peer = self.create_bgp_speaker_and_peer()
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertEqual(1, len(bgp_peers_list))
self.assertTrue(bgp_peer_id in bgp_peers_list)
@test.idempotent_id('f9737708-1d79-440b-8350-779f97d882ee')
def test_remove_bgp_peer(self):
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
bgp_peer_id = bgp_peer['bgp-peer']['id']
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(bgp_peer_id in bgp_peers_list)
bgp_speaker = self.remove_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(not bgp_peers_list)
@testtools.skip('bug/1553374')
@test.idempotent_id('23c8eb37-d10d-4f43-b2e7-6542cb6a4405')
def test_add_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertEqual(1, len(network_list))
self.assertTrue(self.ext_net_id in network_list)
@testtools.skip('bug/1553374')
@test.idempotent_id('6cfc7137-0d99-4a3d-826c-9d1a3a1767b0')
def test_remove_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
networks = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(self.ext_net_id in networks)
self.admin_client.remove_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(not network_list)
@testtools.skip('bug/1553374')
@test.idempotent_id('5bef22ad-5e70-4f7b-937a-dc1944642996')
def test_get_advertised_routes_null_address_scope(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(0, len(routes['advertised_routes']))
@testtools.skip('bug/1553374')
@test.idempotent_id('cae9cdb1-ad65-423c-9604-d4cd0073616e')
def test_get_advertised_routes_floating_ips(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
tenant_net = self.create_network()
tenant_subnet = self.create_subnet(tenant_net)
ext_gw_info = {'network_id': self.ext_net_id}
router = self.admin_client.create_router(
'my-router',
external_gateway_info=ext_gw_info,
admin_state_up=True,
distributed=False)
self.admin_routers.append(router['router'])
self.admin_client.add_router_interface_with_subnet_id(
router['router']['id'],
tenant_subnet['id'])
self.admin_routerports.append({'router_id': router['router']['id'],
'subnet_id': tenant_subnet['id']})
tenant_port = self.create_port(tenant_net)
floatingip = self.create_floatingip(self.ext_net_id)
self.admin_floatingips.append(floatingip)
self.client.update_floatingip(floatingip['id'],
port_id=tenant_port['id'])
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(1, len(routes['advertised_routes']))
self.assertEqual(floatingip['floating_ip_address'] + '/32',
routes['advertised_routes'][0]['destination'])
@testtools.skip('bug/1553374')
@test.idempotent_id('c9ad566e-fe8f-4559-8303-bbad9062a30c')
def test_get_advertised_routes_tenant_networks(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
addr_scope = self.create_address_scope('my-scope', ip_version=4)
ext_net = self.create_shared_network(**{'router:external': True})
tenant_net = self.create_network()
ext_subnetpool = self.create_subnetpool(
'test-pool-ext',
is_admin=True,
default_prefixlen=24,
address_scope_id=addr_scope['id'],
prefixes=['8.0.0.0/8'])
tenant_subnetpool = self.create_subnetpool(
'tenant-test-pool',
default_prefixlen=25,
address_scope_id=addr_scope['id'],
prefixes=['10.10.0.0/16'])
self.create_subnet({'id': ext_net['id']},
cidr=netaddr.IPNetwork('8.0.0.0/24'),
ip_version=4,
client=self.admin_client,
subnetpool_id=ext_subnetpool['id'])
tenant_subnet = self.create_subnet(
{'id': tenant_net['id']},
cidr=netaddr.IPNetwork('10.10.0.0/24'),
ip_version=4,
subnetpool_id=tenant_subnetpool['id'])
ext_gw_info = {'network_id': ext_net['id']}
router = self.admin_client.create_router(
'my-router',
external_gateway_info=ext_gw_info,
distributed=False)['router']
self.admin_routers.append(router)
self.admin_client.add_router_interface_with_subnet_id(
router['id'],
tenant_subnet['id'])
self.admin_routerports.append({'router_id': router['id'],
'subnet_id': tenant_subnet['id']})
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
ext_net['id'])
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(1, len(routes['advertised_routes']))
self.assertEqual(tenant_subnet['cidr'],
routes['advertised_routes'][0]['destination'])
fixed_ip = router['external_gateway_info']['external_fixed_ips'][0]
self.assertEqual(fixed_ip['ip_address'],
routes['advertised_routes'][0]['next_hop'])
| |
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Malach of the Dawn")
def malach_of_the_dawn(card, abilities):
def malach_of_the_dawn():
return AbilityNotImplemented
def malach_of_the_dawn():
return AbilityNotImplemented
return malach_of_the_dawn, malach_of_the_dawn,
@card("Temporal Extortion")
def temporal_extortion(card, abilities):
def temporal_extortion():
return AbilityNotImplemented
def temporal_extortion():
return AbilityNotImplemented
return temporal_extortion, temporal_extortion,
@card("Mana Tithe")
def mana_tithe(card, abilities):
def mana_tithe():
return AbilityNotImplemented
return mana_tithe,
@card("Damnation")
def damnation(card, abilities):
def damnation():
return AbilityNotImplemented
return damnation,
@card("Timebender")
def timebender(card, abilities):
def timebender():
return AbilityNotImplemented
def timebender():
return AbilityNotImplemented
return timebender, timebender,
@card("Dead")
def dead(card, abilities):
def dead():
return AbilityNotImplemented
def dead():
return AbilityNotImplemented
return dead, dead,
@card("Piracy Charm")
def piracy_charm(card, abilities):
def piracy_charm():
return AbilityNotImplemented
return piracy_charm,
@card("Serra Sphinx")
def serra_sphinx(card, abilities):
def serra_sphinx():
return AbilityNotImplemented
return serra_sphinx,
@card("Body Double")
def body_double(card, abilities):
def body_double():
return AbilityNotImplemented
return body_double,
@card("Primal Plasma")
def primal_plasma(card, abilities):
def primal_plasma():
return AbilityNotImplemented
return primal_plasma,
@card("Shivan Wumpus")
def shivan_wumpus(card, abilities):
def shivan_wumpus():
return AbilityNotImplemented
def shivan_wumpus():
return AbilityNotImplemented
return shivan_wumpus, shivan_wumpus,
@card("Frenetic Sliver")
def frenetic_sliver(card, abilities):
def frenetic_sliver():
return AbilityNotImplemented
return frenetic_sliver,
@card("Magus of the Tabernacle")
def magus_of_the_tabernacle(card, abilities):
def magus_of_the_tabernacle():
return AbilityNotImplemented
return magus_of_the_tabernacle,
@card("Firefright Mage")
def firefright_mage(card, abilities):
def firefright_mage():
return AbilityNotImplemented
return firefright_mage,
@card("Dreamscape Artist")
def dreamscape_artist(card, abilities):
def dreamscape_artist():
return AbilityNotImplemented
return dreamscape_artist,
@card("Detritivore")
def detritivore(card, abilities):
def detritivore():
return AbilityNotImplemented
def detritivore():
return AbilityNotImplemented
def detritivore():
return AbilityNotImplemented
return detritivore, detritivore, detritivore,
@card("Bog Serpent")
def bog_serpent(card, abilities):
def bog_serpent():
return AbilityNotImplemented
def bog_serpent():
return AbilityNotImplemented
return bog_serpent, bog_serpent,
@card("Dormant Sliver")
def dormant_sliver(card, abilities):
def dormant_sliver():
return AbilityNotImplemented
def dormant_sliver():
return AbilityNotImplemented
return dormant_sliver, dormant_sliver,
@card("Shivan Meteor")
def shivan_meteor(card, abilities):
def shivan_meteor():
return AbilityNotImplemented
def shivan_meteor():
return AbilityNotImplemented
return shivan_meteor, shivan_meteor,
@card("Synchronous Sliver")
def synchronous_sliver(card, abilities):
def synchronous_sliver():
return AbilityNotImplemented
return synchronous_sliver,
@card("Shade of Trokair")
def shade_of_trokair(card, abilities):
def shade_of_trokair():
return AbilityNotImplemented
def shade_of_trokair():
return AbilityNotImplemented
return shade_of_trokair, shade_of_trokair,
@card("Mesa Enchantress")
def mesa_enchantress(card, abilities):
def mesa_enchantress():
return AbilityNotImplemented
return mesa_enchantress,
@card("Big Game Hunter")
def big_game_hunter(card, abilities):
def big_game_hunter():
return AbilityNotImplemented
def big_game_hunter():
return AbilityNotImplemented
return big_game_hunter, big_game_hunter,
@card("Hunting Wilds")
def hunting_wilds(card, abilities):
def hunting_wilds():
return AbilityNotImplemented
def hunting_wilds():
return AbilityNotImplemented
def hunting_wilds():
return AbilityNotImplemented
return hunting_wilds, hunting_wilds, hunting_wilds,
@card("Hedge Troll")
def hedge_troll(card, abilities):
def hedge_troll():
return AbilityNotImplemented
def hedge_troll():
return AbilityNotImplemented
return hedge_troll, hedge_troll,
@card("Braids, Conjurer Adept")
def braids_conjurer_adept(card, abilities):
def braids_conjurer_adept():
return AbilityNotImplemented
return braids_conjurer_adept,
@card("Spellshift")
def spellshift(card, abilities):
def spellshift():
return AbilityNotImplemented
return spellshift,
@card("Brain Gorgers")
def brain_gorgers(card, abilities):
def brain_gorgers():
return AbilityNotImplemented
def brain_gorgers():
return AbilityNotImplemented
return brain_gorgers, brain_gorgers,
@card("Pallid Mycoderm")
def pallid_mycoderm(card, abilities):
def pallid_mycoderm():
return AbilityNotImplemented
def pallid_mycoderm():
return AbilityNotImplemented
def pallid_mycoderm():
return AbilityNotImplemented
return pallid_mycoderm, pallid_mycoderm, pallid_mycoderm,
@card("Veiling Oddity")
def veiling_oddity(card, abilities):
def veiling_oddity():
return AbilityNotImplemented
def veiling_oddity():
return AbilityNotImplemented
return veiling_oddity, veiling_oddity,
@card("Prodigal Pyromancer")
def prodigal_pyromancer(card, abilities):
def prodigal_pyromancer():
return AbilityNotImplemented
return prodigal_pyromancer,
@card("Deadwood Treefolk")
def deadwood_treefolk(card, abilities):
def deadwood_treefolk():
return AbilityNotImplemented
def deadwood_treefolk():
return AbilityNotImplemented
return deadwood_treefolk, deadwood_treefolk,
@card("Calciderm")
def calciderm(card, abilities):
def calciderm():
return AbilityNotImplemented
def calciderm():
return AbilityNotImplemented
return calciderm, calciderm,
@card("Enslave")
def enslave(card, abilities):
def enslave():
return AbilityNotImplemented
def enslave():
return AbilityNotImplemented
def enslave():
return AbilityNotImplemented
return enslave, enslave, enslave,
@card("Groundbreaker")
def groundbreaker(card, abilities):
def groundbreaker():
return AbilityNotImplemented
def groundbreaker():
return AbilityNotImplemented
return groundbreaker, groundbreaker,
@card("Fa'adiyah Seer")
def faadiyah_seer(card, abilities):
def faadiyah_seer():
return AbilityNotImplemented
return faadiyah_seer,
@card("Battering Sliver")
def battering_sliver(card, abilities):
def battering_sliver():
return AbilityNotImplemented
return battering_sliver,
@card("Crovax, Ascendant Hero")
def crovax_ascendant_hero(card, abilities):
def crovax_ascendant_hero():
return AbilityNotImplemented
def crovax_ascendant_hero():
return AbilityNotImplemented
def crovax_ascendant_hero():
return AbilityNotImplemented
return crovax_ascendant_hero, crovax_ascendant_hero, crovax_ascendant_hero,
@card("Tidewalker")
def tidewalker(card, abilities):
def tidewalker():
return AbilityNotImplemented
def tidewalker():
return AbilityNotImplemented
def tidewalker():
return AbilityNotImplemented
return tidewalker, tidewalker, tidewalker,
@card("Shrouded Lore")
def shrouded_lore(card, abilities):
def shrouded_lore():
return AbilityNotImplemented
return shrouded_lore,
@card("Cradle to Grave")
def cradle_to_grave(card, abilities):
def cradle_to_grave():
return AbilityNotImplemented
return cradle_to_grave,
@card("Ana Battlemage")
def ana_battlemage(card, abilities):
def ana_battlemage():
return AbilityNotImplemented
def ana_battlemage():
return AbilityNotImplemented
def ana_battlemage():
return AbilityNotImplemented
return ana_battlemage, ana_battlemage, ana_battlemage,
@card("Mirri the Cursed")
def mirri_the_cursed(card, abilities):
def mirri_the_cursed():
return AbilityNotImplemented
def mirri_the_cursed():
return AbilityNotImplemented
return mirri_the_cursed, mirri_the_cursed,
@card("Imp's Mischief")
def imps_mischief(card, abilities):
def imps_mischief():
return AbilityNotImplemented
return imps_mischief,
@card("Heroes Remembered")
def heroes_remembered(card, abilities):
def heroes_remembered():
return AbilityNotImplemented
def heroes_remembered():
return AbilityNotImplemented
return heroes_remembered, heroes_remembered,
@card("Benalish Commander")
def benalish_commander(card, abilities):
def benalish_commander():
return AbilityNotImplemented
def benalish_commander():
return AbilityNotImplemented
def benalish_commander():
return AbilityNotImplemented
return benalish_commander, benalish_commander, benalish_commander,
@card("Dichotomancy")
def dichotomancy(card, abilities):
def dichotomancy():
return AbilityNotImplemented
def dichotomancy():
return AbilityNotImplemented
return dichotomancy, dichotomancy,
@card("Sinew Sliver")
def sinew_sliver(card, abilities):
def sinew_sliver():
return AbilityNotImplemented
return sinew_sliver,
@card("Aven Riftwatcher")
def aven_riftwatcher(card, abilities):
def aven_riftwatcher():
return AbilityNotImplemented
def aven_riftwatcher():
return AbilityNotImplemented
def aven_riftwatcher():
return AbilityNotImplemented
return aven_riftwatcher, aven_riftwatcher, aven_riftwatcher,
@card("Erratic Mutation")
def erratic_mutation(card, abilities):
def erratic_mutation():
return AbilityNotImplemented
return erratic_mutation,
@card("AEther Membrane")
def aether_membrane(card, abilities):
def aether_membrane():
return AbilityNotImplemented
def aether_membrane():
return AbilityNotImplemented
return aether_membrane, aether_membrane,
@card("Frozen AEther")
def frozen_aether(card, abilities):
def frozen_aether():
return AbilityNotImplemented
return frozen_aether,
@card("Stingscourger")
def stingscourger(card, abilities):
def stingscourger():
return AbilityNotImplemented
def stingscourger():
return AbilityNotImplemented
return stingscourger, stingscourger,
@card("Deadly Grub")
def deadly_grub(card, abilities):
def deadly_grub():
return AbilityNotImplemented
def deadly_grub():
return AbilityNotImplemented
return deadly_grub, deadly_grub,
@card("Serendib Sorcerer")
def serendib_sorcerer(card, abilities):
def serendib_sorcerer():
return AbilityNotImplemented
return serendib_sorcerer,
@card("Citanul Woodreaders")
def citanul_woodreaders(card, abilities):
def citanul_woodreaders():
return AbilityNotImplemented
def citanul_woodreaders():
return AbilityNotImplemented
return citanul_woodreaders, citanul_woodreaders,
@card("Gaea's Anthem")
def gaeas_anthem(card, abilities):
def gaeas_anthem():
return AbilityNotImplemented
return gaeas_anthem,
@card("Boom")
def boom(card, abilities):
def boom():
return AbilityNotImplemented
def boom():
return AbilityNotImplemented
return boom, boom,
@card("Blood Knight")
def blood_knight(card, abilities):
def blood_knight():
return AbilityNotImplemented
return blood_knight,
@card("Mantle of Leadership")
def mantle_of_leadership(card, abilities):
def mantle_of_leadership():
return AbilityNotImplemented
def mantle_of_leadership():
return AbilityNotImplemented
def mantle_of_leadership():
return AbilityNotImplemented
return mantle_of_leadership, mantle_of_leadership, mantle_of_leadership,
@card("Timbermare")
def timbermare(card, abilities):
def timbermare():
return AbilityNotImplemented
def timbermare():
return AbilityNotImplemented
def timbermare():
return AbilityNotImplemented
return timbermare, timbermare, timbermare,
@card("Darkheart Sliver")
def darkheart_sliver(card, abilities):
def darkheart_sliver():
return AbilityNotImplemented
return darkheart_sliver,
@card("Molten Firebird")
def molten_firebird(card, abilities):
def molten_firebird():
return AbilityNotImplemented
def molten_firebird():
return AbilityNotImplemented
def molten_firebird():
return AbilityNotImplemented
return molten_firebird, molten_firebird, molten_firebird,
@card("Phantasmagorian")
def phantasmagorian(card, abilities):
def phantasmagorian():
return AbilityNotImplemented
def phantasmagorian():
return AbilityNotImplemented
return phantasmagorian, phantasmagorian,
@card("Saltfield Recluse")
def saltfield_recluse(card, abilities):
def saltfield_recluse():
return AbilityNotImplemented
return saltfield_recluse,
@card("Jedit Ojanen of Efrava")
def jedit_ojanen_of_efrava(card, abilities):
def jedit_ojanen_of_efrava():
return AbilityNotImplemented
def jedit_ojanen_of_efrava():
return AbilityNotImplemented
return jedit_ojanen_of_efrava, jedit_ojanen_of_efrava,
@card("Aeon Chronicler")
def aeon_chronicler(card, abilities):
def aeon_chronicler():
return AbilityNotImplemented
def aeon_chronicler():
return AbilityNotImplemented
def aeon_chronicler():
return AbilityNotImplemented
return aeon_chronicler, aeon_chronicler, aeon_chronicler,
@card("Riftmarked Knight")
def riftmarked_knight(card, abilities):
def riftmarked_knight():
return AbilityNotImplemented
def riftmarked_knight():
return AbilityNotImplemented
def riftmarked_knight():
return AbilityNotImplemented
return riftmarked_knight, riftmarked_knight, riftmarked_knight,
@card("Ovinize")
def ovinize(card, abilities):
def ovinize():
return AbilityNotImplemented
return ovinize,
@card("Blightspeaker")
def blightspeaker(card, abilities):
def blightspeaker():
return AbilityNotImplemented
def blightspeaker():
return AbilityNotImplemented
return blightspeaker, blightspeaker,
@card("Chronozoa")
def chronozoa(card, abilities):
def chronozoa():
return AbilityNotImplemented
def chronozoa():
return AbilityNotImplemented
def chronozoa():
return AbilityNotImplemented
return chronozoa, chronozoa, chronozoa,
@card("Fury Charm")
def fury_charm(card, abilities):
def fury_charm():
return AbilityNotImplemented
return fury_charm,
@card("Stormfront Riders")
def stormfront_riders(card, abilities):
def stormfront_riders():
return AbilityNotImplemented
def stormfront_riders():
return AbilityNotImplemented
def stormfront_riders():
return AbilityNotImplemented
return stormfront_riders, stormfront_riders, stormfront_riders,
@card("Fatal Frenzy")
def fatal_frenzy(card, abilities):
def fatal_frenzy():
return AbilityNotImplemented
return fatal_frenzy,
@card("Ridged Kusite")
def ridged_kusite(card, abilities):
def ridged_kusite():
return AbilityNotImplemented
return ridged_kusite,
@card("Circle of Affliction")
def circle_of_affliction(card, abilities):
def circle_of_affliction():
return AbilityNotImplemented
def circle_of_affliction():
return AbilityNotImplemented
return circle_of_affliction, circle_of_affliction,
@card("Akroma, Angel of Fury")
def akroma_angel_of_fury(card, abilities):
def akroma_angel_of_fury():
return AbilityNotImplemented
def akroma_angel_of_fury():
return AbilityNotImplemented
def akroma_angel_of_fury():
return AbilityNotImplemented
def akroma_angel_of_fury():
return AbilityNotImplemented
return akroma_angel_of_fury, akroma_angel_of_fury, akroma_angel_of_fury, akroma_angel_of_fury,
@card("Pouncing Wurm")
def pouncing_wurm(card, abilities):
def pouncing_wurm():
return AbilityNotImplemented
def pouncing_wurm():
return AbilityNotImplemented
return pouncing_wurm, pouncing_wurm,
@card("Poultice Sliver")
def poultice_sliver(card, abilities):
def poultice_sliver():
return AbilityNotImplemented
return poultice_sliver,
@card("Midnight Charm")
def midnight_charm(card, abilities):
def midnight_charm():
return AbilityNotImplemented
return midnight_charm,
@card("Vorosh, the Hunter")
def vorosh_the_hunter(card, abilities):
def vorosh_the_hunter():
return AbilityNotImplemented
def vorosh_the_hunter():
return AbilityNotImplemented
return vorosh_the_hunter, vorosh_the_hunter,
@card("Radha, Heir to Keld")
def radha_heir_to_keld(card, abilities):
def radha_heir_to_keld():
return AbilityNotImplemented
def radha_heir_to_keld():
return AbilityNotImplemented
return radha_heir_to_keld, radha_heir_to_keld,
@card("Dunerider Outlaw")
def dunerider_outlaw(card, abilities):
def dunerider_outlaw():
return AbilityNotImplemented
def dunerider_outlaw():
return AbilityNotImplemented
return dunerider_outlaw, dunerider_outlaw,
@card("Dismal Failure")
def dismal_failure(card, abilities):
def dismal_failure():
return AbilityNotImplemented
return dismal_failure,
@card("Dust Corona")
def dust_corona(card, abilities):
def dust_corona():
return AbilityNotImplemented
def dust_corona():
return AbilityNotImplemented
return dust_corona, dust_corona,
@card("Treacherous Urge")
def treacherous_urge(card, abilities):
def treacherous_urge():
return AbilityNotImplemented
return treacherous_urge,
@card("Torchling")
def torchling(card, abilities):
def torchling():
return AbilityNotImplemented
def torchling():
return AbilityNotImplemented
def torchling():
return AbilityNotImplemented
def torchling():
return AbilityNotImplemented
def torchling():
return AbilityNotImplemented
return torchling, torchling, torchling, torchling, torchling,
@card("Evolution Charm")
def evolution_charm(card, abilities):
def evolution_charm():
return AbilityNotImplemented
return evolution_charm,
@card("Pongify")
def pongify(card, abilities):
def pongify():
return AbilityNotImplemented
return pongify,
@card("Magus of the Arena")
def magus_of_the_arena(card, abilities):
def magus_of_the_arena():
return AbilityNotImplemented
return magus_of_the_arena,
@card("Reflex Sliver")
def reflex_sliver(card, abilities):
def reflex_sliver():
return AbilityNotImplemented
return reflex_sliver,
@card("Needlepeak Spider")
def needlepeak_spider(card, abilities):
def needlepeak_spider():
return AbilityNotImplemented
return needlepeak_spider,
@card("Whitemane Lion")
def whitemane_lion(card, abilities):
def whitemane_lion():
return AbilityNotImplemented
def whitemane_lion():
return AbilityNotImplemented
return whitemane_lion, whitemane_lion,
@card("Volcano Hellion")
def volcano_hellion(card, abilities):
def volcano_hellion():
return AbilityNotImplemented
def volcano_hellion():
return AbilityNotImplemented
return volcano_hellion, volcano_hellion,
@card("Skirk Shaman")
def skirk_shaman(card, abilities):
def skirk_shaman():
return AbilityNotImplemented
return skirk_shaman,
@card("Wild Pair")
def wild_pair(card, abilities):
def wild_pair():
return AbilityNotImplemented
return wild_pair,
@card("Cautery Sliver")
def cautery_sliver(card, abilities):
def cautery_sliver():
return AbilityNotImplemented
def cautery_sliver():
return AbilityNotImplemented
return cautery_sliver, cautery_sliver,
@card("Melancholy")
def melancholy(card, abilities):
def melancholy():
return AbilityNotImplemented
def melancholy():
return AbilityNotImplemented
def melancholy():
return AbilityNotImplemented
def melancholy():
return AbilityNotImplemented
return melancholy, melancholy, melancholy, melancholy,
@card("Shaper Parasite")
def shaper_parasite(card, abilities):
def shaper_parasite():
return AbilityNotImplemented
def shaper_parasite():
return AbilityNotImplemented
return shaper_parasite, shaper_parasite,
@card("Null Profusion")
def null_profusion(card, abilities):
def null_profusion():
return AbilityNotImplemented
def null_profusion():
return AbilityNotImplemented
def null_profusion():
return AbilityNotImplemented
return null_profusion, null_profusion, null_profusion,
@card("Dash Hopes")
def dash_hopes(card, abilities):
def dash_hopes():
return AbilityNotImplemented
def dash_hopes():
return AbilityNotImplemented
return dash_hopes, dash_hopes,
@card("Utopia Vow")
def utopia_vow(card, abilities):
def utopia_vow():
return AbilityNotImplemented
def utopia_vow():
return AbilityNotImplemented
def utopia_vow():
return AbilityNotImplemented
return utopia_vow, utopia_vow, utopia_vow,
@card("Numot, the Devastator")
def numot_the_devastator(card, abilities):
def numot_the_devastator():
return AbilityNotImplemented
def numot_the_devastator():
return AbilityNotImplemented
return numot_the_devastator, numot_the_devastator,
@card("Reckless Wurm")
def reckless_wurm(card, abilities):
def reckless_wurm():
return AbilityNotImplemented
def reckless_wurm():
return AbilityNotImplemented
return reckless_wurm, reckless_wurm,
@card("Roiling Horror")
def roiling_horror(card, abilities):
def roiling_horror():
return AbilityNotImplemented
def roiling_horror():
return AbilityNotImplemented
def roiling_horror():
return AbilityNotImplemented
return roiling_horror, roiling_horror, roiling_horror,
@card("Auramancer's Guise")
def auramancers_guise(card, abilities):
def auramancers_guise():
return AbilityNotImplemented
def auramancers_guise():
return AbilityNotImplemented
return auramancers_guise, auramancers_guise,
@card("Vitaspore Thallid")
def vitaspore_thallid(card, abilities):
def vitaspore_thallid():
return AbilityNotImplemented
def vitaspore_thallid():
return AbilityNotImplemented
def vitaspore_thallid():
return AbilityNotImplemented
return vitaspore_thallid, vitaspore_thallid, vitaspore_thallid,
@card("Gossamer Phantasm")
def gossamer_phantasm(card, abilities):
def gossamer_phantasm():
return AbilityNotImplemented
def gossamer_phantasm():
return AbilityNotImplemented
return gossamer_phantasm, gossamer_phantasm,
@card("Sophic Centaur")
def sophic_centaur(card, abilities):
def sophic_centaur():
return AbilityNotImplemented
return sophic_centaur,
@card("Urborg, Tomb of Yawgmoth")
def urborg_tomb_of_yawgmoth(card, abilities):
def urborg_tomb_of_yawgmoth():
return AbilityNotImplemented
return urborg_tomb_of_yawgmoth,
@card("Dawn Charm")
def dawn_charm(card, abilities):
def dawn_charm():
return AbilityNotImplemented
return dawn_charm,
@card("Hammerheim Deadeye")
def hammerheim_deadeye(card, abilities):
def hammerheim_deadeye():
return AbilityNotImplemented
def hammerheim_deadeye():
return AbilityNotImplemented
return hammerheim_deadeye, hammerheim_deadeye,
@card("Voidstone Gargoyle")
def voidstone_gargoyle(card, abilities):
def voidstone_gargoyle():
return AbilityNotImplemented
def voidstone_gargoyle():
return AbilityNotImplemented
def voidstone_gargoyle():
return AbilityNotImplemented
def voidstone_gargoyle():
return AbilityNotImplemented
return voidstone_gargoyle, voidstone_gargoyle, voidstone_gargoyle, voidstone_gargoyle,
@card("Wistful Thinking")
def wistful_thinking(card, abilities):
def wistful_thinking():
return AbilityNotImplemented
return wistful_thinking,
@card("Aquamorph Entity")
def aquamorph_entity(card, abilities):
def aquamorph_entity():
return AbilityNotImplemented
def aquamorph_entity():
return AbilityNotImplemented
return aquamorph_entity, aquamorph_entity,
@card("Waning Wurm")
def waning_wurm(card, abilities):
def waning_wurm():
return AbilityNotImplemented
return waning_wurm,
@card("Ghost Tactician")
def ghost_tactician(card, abilities):
def ghost_tactician():
return AbilityNotImplemented
return ghost_tactician,
@card("Simian Spirit Guide")
def simian_spirit_guide(card, abilities):
def simian_spirit_guide():
return AbilityNotImplemented
return simian_spirit_guide,
@card("Merfolk Thaumaturgist")
def merfolk_thaumaturgist(card, abilities):
def merfolk_thaumaturgist():
return AbilityNotImplemented
return merfolk_thaumaturgist,
@card("Magus of the Bazaar")
def magus_of_the_bazaar(card, abilities):
def magus_of_the_bazaar():
return AbilityNotImplemented
return magus_of_the_bazaar,
@card("Pyrohemia")
def pyrohemia(card, abilities):
def pyrohemia():
return AbilityNotImplemented
def pyrohemia():
return AbilityNotImplemented
return pyrohemia, pyrohemia,
@card("Mire Boa")
def mire_boa(card, abilities):
def mire_boa():
return AbilityNotImplemented
def mire_boa():
return AbilityNotImplemented
return mire_boa, mire_boa,
@card("Bust")
def bust(card, abilities):
def bust():
return AbilityNotImplemented
def bust():
return AbilityNotImplemented
return bust, bust,
@card("Kor Dirge")
def kor_dirge(card, abilities):
def kor_dirge():
return AbilityNotImplemented
return kor_dirge,
@card("Psychotrope Thallid")
def psychotrope_thallid(card, abilities):
def psychotrope_thallid():
return AbilityNotImplemented
def psychotrope_thallid():
return AbilityNotImplemented
def psychotrope_thallid():
return AbilityNotImplemented
return psychotrope_thallid, psychotrope_thallid, psychotrope_thallid,
@card("Riptide Pilferer")
def riptide_pilferer(card, abilities):
def riptide_pilferer():
return AbilityNotImplemented
def riptide_pilferer():
return AbilityNotImplemented
return riptide_pilferer, riptide_pilferer,
@card("Porphyry Nodes")
def porphyry_nodes(card, abilities):
def porphyry_nodes():
return AbilityNotImplemented
def porphyry_nodes():
return AbilityNotImplemented
return porphyry_nodes, porphyry_nodes,
@card("Essence Warden")
def essence_warden(card, abilities):
def essence_warden():
return AbilityNotImplemented
return essence_warden,
@card("Sunlance")
def sunlance(card, abilities):
def sunlance():
return AbilityNotImplemented
return sunlance,
@card("Healing Leaves")
def healing_leaves(card, abilities):
def healing_leaves():
return AbilityNotImplemented
return healing_leaves,
@card("Magus of the Coffers")
def magus_of_the_coffers(card, abilities):
def magus_of_the_coffers():
return AbilityNotImplemented
return magus_of_the_coffers,
@card("Rebuff the Wicked")
def rebuff_the_wicked(card, abilities):
def rebuff_the_wicked():
return AbilityNotImplemented
return rebuff_the_wicked,
@card("Magus of the Library")
def magus_of_the_library(card, abilities):
def magus_of_the_library():
return AbilityNotImplemented
def magus_of_the_library():
return AbilityNotImplemented
return magus_of_the_library, magus_of_the_library,
@card("Seal of Primordium")
def seal_of_primordium(card, abilities):
def seal_of_primordium():
return AbilityNotImplemented
return seal_of_primordium,
@card("Intet, the Dreamer")
def intet_the_dreamer(card, abilities):
def intet_the_dreamer():
return AbilityNotImplemented
def intet_the_dreamer():
return AbilityNotImplemented
return intet_the_dreamer, intet_the_dreamer,
@card("Muck Drubb")
def muck_drubb(card, abilities):
def muck_drubb():
return AbilityNotImplemented
def muck_drubb():
return AbilityNotImplemented
def muck_drubb():
return AbilityNotImplemented
return muck_drubb, muck_drubb, muck_drubb,
@card("Spitting Sliver")
def spitting_sliver(card, abilities):
def spitting_sliver():
return AbilityNotImplemented
return spitting_sliver,
@card("Dust Elemental")
def dust_elemental(card, abilities):
def dust_elemental():
return AbilityNotImplemented
def dust_elemental():
return AbilityNotImplemented
def dust_elemental():
return AbilityNotImplemented
return dust_elemental, dust_elemental, dust_elemental,
@card("Harmonize")
def harmonize(card, abilities):
def harmonize():
return AbilityNotImplemented
return harmonize,
@card("Rough")
def rough(card, abilities):
def rough():
return AbilityNotImplemented
def rough():
return AbilityNotImplemented
return rough, rough,
@card("Keldon Marauders")
def keldon_marauders(card, abilities):
def keldon_marauders():
return AbilityNotImplemented
def keldon_marauders():
return AbilityNotImplemented
return keldon_marauders, keldon_marauders,
@card("Lavacore Elemental")
def lavacore_elemental(card, abilities):
def lavacore_elemental():
return AbilityNotImplemented
def lavacore_elemental():
return AbilityNotImplemented
return lavacore_elemental, lavacore_elemental,
@card("Oros, the Avenger")
def oros_the_avenger(card, abilities):
def oros_the_avenger():
return AbilityNotImplemented
def oros_the_avenger():
return AbilityNotImplemented
return oros_the_avenger, oros_the_avenger,
@card("Life and Limb")
def life_and_limb(card, abilities):
def life_and_limb():
return AbilityNotImplemented
return life_and_limb,
@card("Keen Sense")
def keen_sense(card, abilities):
def keen_sense():
return AbilityNotImplemented
def keen_sense():
return AbilityNotImplemented
return keen_sense, keen_sense,
@card("Revered Dead")
def revered_dead(card, abilities):
def revered_dead():
return AbilityNotImplemented
return revered_dead,
@card("Gone")
def gone(card, abilities):
def gone():
return AbilityNotImplemented
def gone():
return AbilityNotImplemented
return gone, gone,
@card("Tumble")
def tumble(card, abilities):
def tumble():
return AbilityNotImplemented
def tumble():
return AbilityNotImplemented
return tumble, tumble,
@card("Stonecloaker")
def stonecloaker(card, abilities):
def stonecloaker():
return AbilityNotImplemented
def stonecloaker():
return AbilityNotImplemented
def stonecloaker():
return AbilityNotImplemented
def stonecloaker():
return AbilityNotImplemented
return stonecloaker, stonecloaker, stonecloaker, stonecloaker,
@card("Vampiric Link")
def vampiric_link(card, abilities):
def vampiric_link():
return AbilityNotImplemented
def vampiric_link():
return AbilityNotImplemented
return vampiric_link, vampiric_link,
@card("Teneb, the Harvester")
def teneb_the_harvester(card, abilities):
def teneb_the_harvester():
return AbilityNotImplemented
def teneb_the_harvester():
return AbilityNotImplemented
return teneb_the_harvester, teneb_the_harvester,
@card("Extirpate")
def extirpate(card, abilities):
def extirpate():
return AbilityNotImplemented
def extirpate():
return AbilityNotImplemented
return extirpate, extirpate,
@card("Jodah's Avenger")
def jodahs_avenger(card, abilities):
def jodahs_avenger():
return AbilityNotImplemented
return jodahs_avenger,
@card("Mycologist")
def mycologist(card, abilities):
def mycologist():
return AbilityNotImplemented
def mycologist():
return AbilityNotImplemented
def mycologist():
return AbilityNotImplemented
return mycologist, mycologist, mycologist,
@card("Venarian Glimmer")
def venarian_glimmer(card, abilities):
def venarian_glimmer():
return AbilityNotImplemented
return venarian_glimmer,
@card("Brute Force")
def brute_force(card, abilities):
def brute_force():
return AbilityNotImplemented
return brute_force,
@card("Uktabi Drake")
def uktabi_drake(card, abilities):
def uktabi_drake():
return AbilityNotImplemented
def uktabi_drake():
return AbilityNotImplemented
return uktabi_drake, uktabi_drake,
@card("Timecrafting")
def timecrafting(card, abilities):
def timecrafting():
return AbilityNotImplemented
return timecrafting,
@card("Reality Acid")
def reality_acid(card, abilities):
def reality_acid():
return AbilityNotImplemented
def reality_acid():
return AbilityNotImplemented
def reality_acid():
return AbilityNotImplemented
return reality_acid, reality_acid, reality_acid,
@card("Necrotic Sliver")
def necrotic_sliver(card, abilities):
def necrotic_sliver():
return AbilityNotImplemented
return necrotic_sliver,
@card("Serra's Boon")
def serras_boon(card, abilities):
def serras_boon():
return AbilityNotImplemented
def serras_boon():
return AbilityNotImplemented
return serras_boon, serras_boon,
@card("Saltblast")
def saltblast(card, abilities):
def saltblast():
return AbilityNotImplemented
return saltblast,
@card("Retether")
def retether(card, abilities):
def retether():
return AbilityNotImplemented
return retether,
@card("Kavu Predator")
def kavu_predator(card, abilities):
def kavu_predator():
return AbilityNotImplemented
def kavu_predator():
return AbilityNotImplemented
return kavu_predator, kavu_predator,
@card("Fungal Behemoth")
def fungal_behemoth(card, abilities):
def fungal_behemoth():
return AbilityNotImplemented
def fungal_behemoth():
return AbilityNotImplemented
def fungal_behemoth():
return AbilityNotImplemented
return fungal_behemoth, fungal_behemoth, fungal_behemoth,
@card("Giant Dustwasp")
def giant_dustwasp(card, abilities):
def giant_dustwasp():
return AbilityNotImplemented
def giant_dustwasp():
return AbilityNotImplemented
return giant_dustwasp, giant_dustwasp,
@card("Sulfur Elemental")
def sulfur_elemental(card, abilities):
def sulfur_elemental():
return AbilityNotImplemented
def sulfur_elemental():
return AbilityNotImplemented
def sulfur_elemental():
return AbilityNotImplemented
return sulfur_elemental, sulfur_elemental, sulfur_elemental,
@card("Rathi Trapper")
def rathi_trapper(card, abilities):
def rathi_trapper():
return AbilityNotImplemented
return rathi_trapper,
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
from urllib import unquote
import time
from swift.common.utils import public, csv_append, Timestamp
from swift.common.constraints import check_metadata
from swift.common import constraints
from swift.common.http import HTTP_ACCEPTED, is_success
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, clear_info_cache
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPNotFound
class ContainerController(Controller):
"""WSGI controller for container requests"""
server_type = 'Container'
# Ensure these are all lowercase
pass_through_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to',
'x-versions-location']
def __init__(self, app, account_name, container_name, **kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
def _x_remove_headers(self):
st = self.server_type.lower()
return ['x-remove-%s-read' % st,
'x-remove-%s-write' % st,
'x-remove-versions-location']
def _convert_policy_to_index(self, req):
"""
Helper method to convert a policy name (from a request from a client)
to a policy index (for a request to a backend).
:param req: incoming request
"""
policy_name = req.headers.get('X-Storage-Policy')
if not policy_name:
return
policy = POLICIES.get_by_name(policy_name)
if not policy:
raise HTTPBadRequest(request=req,
content_type="text/plain",
body=("Invalid %s '%s'"
% ('X-Storage-Policy', policy_name)))
if policy.is_deprecated:
body = 'Storage Policy %r is deprecated' % (policy.name)
raise HTTPBadRequest(request=req, body=body)
return int(policy)
def clean_acls(self, req):
if 'swift.clean_acl' in req.environ:
for header in ('x-container-read', 'x-container-write'):
if header in req.headers:
try:
req.headers[header] = \
req.environ['swift.clean_acl'](header,
req.headers[header])
except ValueError as err:
return HTTPBadRequest(request=req, body=str(err))
return None
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
if not self.account_info(self.account_name, req)[1]:
return HTTPNotFound(request=req)
part = self.app.container_ring.get_part(
self.account_name, self.container_name)
resp = self.GETorHEAD_base(
req, _('Container'), self.app.container_ring, part,
req.swift_entity_path)
if 'swift.authorize' in req.environ:
req.acl = resp.headers.get('x-container-read')
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not req.environ.get('swift_owner', False):
for key in self.app.swift_owner_headers:
if key in resp.headers:
del resp.headers[key]
return resp
@public
@delay_denial
@cors_validation
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@delay_denial
@cors_validation
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
def PUT(self, req):
"""HTTP PUT request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
policy_index = self._convert_policy_to_index(req)
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
if len(self.container_name) > constraints.MAX_CONTAINER_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Container name length of %d longer than %d' % \
(len(self.container_name),
constraints.MAX_CONTAINER_NAME_LENGTH)
return resp
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts and self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
if self.app.max_containers_per_account > 0 and \
container_count >= self.app.max_containers_per_account and \
self.account_name not in self.app.max_containers_whitelist:
container_info = \
self.container_info(self.account_name, self.container_name,
req)
if not is_success(container_info.get('status')):
resp = HTTPForbidden(request=req)
resp.body = 'Reached container limit of %s' % \
self.app.max_containers_per_account
return resp
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts,
policy_index)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring,
container_partition, 'PUT', req.swift_entity_path, headers)
return resp
@public
@cors_validation
def POST(self, req):
"""HTTP POST request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'POST',
req.swift_entity_path, [headers] * len(containers))
return resp
@public
@cors_validation
def DELETE(self, req):
"""HTTP DELETE request handler."""
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'DELETE',
req.swift_entity_path, headers)
# Indicates no server had the container
if resp.status_int == HTTP_ACCEPTED:
return HTTPNotFound(request=req)
return resp
def _backend_requests(self, req, n_outgoing, account_partition, accounts,
policy_index=None):
additional = {'X-Timestamp': Timestamp(time.time()).internal}
if policy_index is None:
additional['X-Backend-Storage-Policy-Default'] = \
int(POLICIES.default)
else:
additional['X-Backend-Storage-Policy-Index'] = str(policy_index)
headers = [self.generate_request_headers(req, transfer=True,
additional=additional)
for _junk in range(n_outgoing)]
for i, account in enumerate(accounts):
i = i % len(headers)
headers[i]['X-Account-Partition'] = account_partition
headers[i]['X-Account-Host'] = csv_append(
headers[i].get('X-Account-Host'),
'%(ip)s:%(port)s' % account)
headers[i]['X-Account-Device'] = csv_append(
headers[i].get('X-Account-Device'),
account['device'])
return headers
| |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import re
import subprocess
import sys
class TestFailedError(Exception):
pass
def escapeCmdArg(arg):
if '"' in arg or ' ' in arg:
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
def run_command(cmd):
print(' '.join([escapeCmdArg(arg) for arg in cmd]))
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def parseLine(line, line_no, test_case, incremental_edit_args, reparse_args,
current_reparse_start):
pre_edit_line = ""
post_edit_line = ""
# We parse one tag at a time in the line while eating away a prefix of the
# line
while line:
# The regular expression to match the template markers
subst_re = re.compile(r'^(.*?)<<(.*?)<(.*?)\|\|\|(.*?)>>>(.*\n?)')
reparse_re = re.compile(r'^(.*?)<(/?)reparse ?(.*?)>(.*\n?)')
subst_match = subst_re.match(line)
reparse_match = reparse_re.match(line)
if subst_match and reparse_match:
# If both regex match use the one with the shorter prefix
if len(subst_match.group(1)) < len(reparse_match.group(1)):
reparse_match = None
else:
subst_match = None
if subst_match:
prefix = subst_match.group(1)
match_test_case = subst_match.group(2)
pre_edit = subst_match.group(3)
post_edit = subst_match.group(4)
suffix = subst_match.group(5)
if match_test_case == test_case:
# Compute the -incremental-edit argument for swift-syntax-test
column = len(pre_edit_line) + len(prefix) + 1
edit_arg = '%d:%d-%d:%d=%s' % \
(line_no, column, line_no, column + len(pre_edit),
post_edit)
incremental_edit_args.append('-incremental-edit')
incremental_edit_args.append(edit_arg)
pre_edit_line += prefix + pre_edit
post_edit_line += prefix + post_edit
else:
# For different test cases just take the pre-edit text
pre_edit_line += prefix + pre_edit
post_edit_line += prefix + pre_edit
line = suffix
elif reparse_match:
prefix = reparse_match.group(1)
is_closing = len(reparse_match.group(2)) > 0
match_test_case = reparse_match.group(3)
suffix = reparse_match.group(4)
if match_test_case == test_case:
column = len(post_edit_line) + len(prefix) + 1
if is_closing:
if not current_reparse_start:
raise TestFailedError('Closing unopened reparse tag '
'in line %d' % line_no)
reparse_args.append('-reparse-region')
reparse_args.append(
'%d:%d-%d:%d' % (current_reparse_start[0],
current_reparse_start[1],
line_no, column))
current_reparse_start = None
else:
if current_reparse_start:
raise TestFailedError('Opening nested reparse tags '
'for the same test case in line '
'%d' % line_no)
current_reparse_start = [line_no, column]
pre_edit_line += prefix
post_edit_line += prefix
line = suffix
else:
pre_edit_line += line
post_edit_line += line
# Nothing more to do
line = ''
return (pre_edit_line, post_edit_line, current_reparse_start)
def prepareForIncrParse(test_file, test_case, pre_edit_file, post_edit_file,
incremental_edit_args, reparse_args):
with open(test_file, mode='r') as test_file_handle, \
open(pre_edit_file, mode='w+b') as pre_edit_file_handle, \
open(post_edit_file, mode='w+b') as post_edit_file_handle:
current_reparse_start = None
line_no = 1
for line in test_file_handle.readlines():
parseLineRes = parseLine(line, line_no, test_case,
incremental_edit_args,
reparse_args, current_reparse_start)
(pre_edit_line, post_edit_line, current_reparse_start) = \
parseLineRes
pre_edit_file_handle.write(pre_edit_line)
post_edit_file_handle.write(post_edit_line)
line_no += 1
if current_reparse_start:
raise TestFailedError('Unclosed reparse tag for test case %s' %
test_case)
def serializeIncrParseMarkupFile(test_file, test_case, mode,
serialization_mode, serialization_format,
omit_node_ids, output_file, temp_dir,
swift_syntax_test, print_visual_reuse_info):
test_file_name = os.path.basename(test_file)
pre_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.pre.swift'
post_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.post.swift'
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
# =========================================================================
# First generate the pre-edit and post-edit Swift file and gather the edits
# and expected reparse regions. This is the parser for the special edit
# markup for testing incremental parsing
# =========================================================================
# Gather command line arguments for swift-syntax-test specifiying the
# performed edits in this list
incremental_edit_args = []
reparse_args = []
prepareForIncrParse(test_file, test_case, pre_edit_file, post_edit_file,
incremental_edit_args, reparse_args)
# =========================================================================
# Now generate the requested serialized file
# =========================================================================
# Build the command to serialize the tree depending on the command line
# arguments
try:
command = [
swift_syntax_test,
'-serialize-raw-tree',
'-output-filename', output_file
]
if omit_node_ids:
command.extend(['-omit-node-ids'])
if serialization_mode == 'full':
# Nothing to do. This is the default behaviour of swift-syntax-test
pass
elif serialization_mode == 'incremental':
command.extend(['-incremental-serialization'])
else:
raise ValueError('Unknown serialization mode "%s"' %
serialization_mode)
if serialization_format == 'json':
# Nothing to do. This is the default behaviour of swift-syntax-test
pass
elif serialization_format == 'byteTree':
command.extend(['-serialize-byte-tree'])
else:
raise ValueError('Unknown serialization format "%s"' %
serialization_format)
if mode == 'pre-edit':
command.extend(['-input-source-filename', pre_edit_file])
elif mode == 'post-edit':
command.extend(['-input-source-filename', post_edit_file])
elif mode == 'incremental':
# We need to build the syntax tree of the pre-edit file first so
# that we can pass it to swift-syntax-test to perform incremental
# parsing
pre_edit_tree_file = pre_edit_file + '.serialized.json'
run_command([swift_syntax_test] +
['-serialize-raw-tree'] +
['-input-source-filename', pre_edit_file] +
['-output-filename', pre_edit_tree_file])
# Then perform incremental parsing with the old syntax tree on the
# post-edit file
command.extend(['-input-source-filename', post_edit_file])
command.extend(['-old-syntax-tree-filename',
pre_edit_tree_file])
command.extend(['--old-source-filename', pre_edit_file])
command.extend(incremental_edit_args)
command.extend(reparse_args)
if print_visual_reuse_info:
command.extend([
'-print-visual-reuse-info',
'-force-colored-output'
])
else:
raise ValueError('Unknown mode "%s"' % mode)
output = run_command(command)
if print_visual_reuse_info:
print(output)
except subprocess.CalledProcessError as e:
raise TestFailedError(e.output)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax parsing',
epilog='''
This utility can parse a special markup to dedicate a pre-edit and a
post-edit version of a file simulateously and generate a serialized version
of the libSyntax tree by parsing either the pre-edit file, the post-edit
file or the edits that are required to retrieve the post-edit file from the
pre-edit file incrementally.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all \
unnamed substitutions are applied')
parser.add_argument(
'--mode', choices=['pre-edit', 'incremental', 'post-edit'],
required=True, help='''
The type of parsing to perform:
- pre-edit: Serialize the syntax tree when parsing the pre-edit file \
from scratch
- incremental: Serialize the syntax tree that results from parsing the \
edits between the pre-edit and post-edit file incrementally
- post-edit: Serialize the syntax tree that results from parsing the \
post-edit file from scratch
''')
parser.add_argument(
'--serialization-mode', choices=['full', 'incremental'],
default='full', help='''
Only applicable if `--mode` is `incremental`. Whether to serialize the
entire tree or use the incremental transfer mode. Default is `full`.
''')
parser.add_argument(
'--serialization-format', choices=['json', 'byteTree'],
default='json', help='''
The format in which the syntax tree shall be serialized.
''')
parser.add_argument(
'--omit-node-ids', default=False, action='store_true',
help='Don\'t include the ids of the nodes in the serialized syntax \
tree')
parser.add_argument(
'--output-file', required=True,
help='The file to which the serialized tree shall be written.')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be \
saved')
parser.add_argument(
'--swift-syntax-test', required=True,
help='The path to swift-syntax-test')
parser.add_argument(
'--print-visual-reuse-info', default=False, action='store_true',
help='Print visual reuse information about the incremental parse \
instead of diffing the syntax trees. This option is intended \
for debug purposes only.')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_case = args.test_case
mode = args.mode
serialization_mode = args.serialization_mode
serialization_format = args.serialization_format
omit_node_ids = args.omit_node_ids
output_file = args.output_file
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
visual_reuse_info = args.print_visual_reuse_info
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode=mode,
serialization_mode=serialization_mode,
serialization_format=serialization_format,
omit_node_ids=omit_node_ids,
output_file=output_file,
temp_dir=temp_dir,
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=visual_reuse_info)
except TestFailedError as e:
print(e.message, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations:
"""DdosCustomPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> "_models.DdosCustomPolicy":
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> "_models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosCustomPolicy"]:
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.DdosCustomPolicy":
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
| |
"""
Handles a Minecraft world save using either the Anvil or McRegion format.
For more information about the world format:
https://minecraft.gamepedia.com/Level_format
"""
import os, glob, re
from . import region
from . import chunk
from .region import InconceivedChunk, Location
class UnknownWorldFormat(Exception):
"""Unknown or invalid world folder."""
def __init__(self, msg=""):
self.msg = msg
class _BaseWorldFolder(object):
"""
Abstract class, representing either a McRegion or Anvil world folder.
This class will use either Anvil or McRegion, with Anvil the preferred format.
Simply calling WorldFolder() will do this automatically.
"""
type = "Generic"
extension = ''
chunkclass = chunk.Chunk
def __init__(self, world_folder):
"""Initialize a WorldFolder."""
self.worldfolder = world_folder
self.regionfiles = {}
self.regions = {}
self.chunks = None
# os.listdir triggers an OSError for non-existant directories or permission errors.
# This is needed, because glob.glob silently returns no files.
os.listdir(world_folder)
self.set_regionfiles(self.get_filenames())
def get_filenames(self):
"""Find all matching file names in the world folder.
This method is private, and it's use it deprecated. Use get_regionfiles() instead."""
# Warning: glob returns a empty list if the directory is unreadable, without raising an Exception
return list(glob.glob(os.path.join(self.worldfolder,'region','r.*.*.'+self.extension)))
def set_regionfiles(self, filenames):
"""
This method directly sets the region files for this instance to use.
It assumes the filenames are in the form r.<x-digit>.<z-digit>.<extension>
"""
for filename in filenames:
# Assume that filenames have the name r.<x-digit>.<z-digit>.<extension>
m = re.match(r"r.(\-?\d+).(\-?\d+)."+self.extension, os.path.basename(filename))
if m:
x = int(m.group(1))
z = int(m.group(2))
else:
# Only raised if a .mca of .mcr file exists which does not comply to the
# r.<x-digit>.<z-digit>.<extension> filename format. This may raise false
# errors if a copy is made, e.g. "r.0.-1 copy.mca". If this is an issue, override
# get_filenames(). In most cases, it is an error, and we like to raise that.
# Changed, no longer raise error, because we want to continue the loop.
# raise UnknownWorldFormat("Unrecognized filename format %s" % os.path.basename(filename))
# TODO: log to stderr using logging facility.
pass
self.regionfiles[(x,z)] = filename
def get_regionfiles(self):
"""Return a list of full path of all region files."""
return list(self.regionfiles.values())
def nonempty(self):
"""Return True is the world is non-empty."""
return len(self.regionfiles) > 0
def get_region(self, x,z):
"""Get a region using x,z coordinates of a region. Cache results."""
if (x,z) not in self.regions:
if (x,z) in self.regionfiles:
self.regions[(x,z)] = region.RegionFile(self.regionfiles[(x,z)])
else:
# Return an empty RegionFile object
# TODO: this does not yet allow for saving of the region file
# TODO: this currently fails with a ValueError!
# TODO: generate the correct name, and create the file
# and add the fie to self.regionfiles
self.regions[(x,z)] = region.RegionFile()
self.regions[(x,z)].loc = Location(x=x,z=z)
return self.regions[(x,z)]
def iter_regions(self):
"""
Return an iterable list of all region files. Use this function if you only
want to loop through each region files once, and do not want to cache the results.
"""
# TODO: Implement BoundingBox
# TODO: Implement sort order
for x,z in self.regionfiles.keys():
close_after_use = False
if (x,z) in self.regions:
regionfile = self.regions[(x,z)]
else:
# It is not yet cached.
# Get file, but do not cache later.
regionfile = region.RegionFile(self.regionfiles[(x,z)], chunkclass = self.chunkclass)
regionfile.loc = Location(x=x,z=z)
close_after_use = True
try:
yield regionfile
finally:
if close_after_use:
regionfile.close()
def call_for_each_region(self, callback_function, boundingbox=None):
"""
Return an iterable that calls callback_function for each region file
in the world. This is equivalent to:
```
for the_region in iter_regions():
yield callback_function(the_region)
````
This function is threaded. It uses pickle to pass values between threads.
See [What can be pickled and unpickled?](https://docs.python.org/library/pickle.html#what-can-be-pickled-and-unpickled) in the Python documentation
for limitation on the output of `callback_function()`.
"""
raise NotImplementedError()
def get_nbt(self,x,z):
"""
Return a NBT specified by the chunk coordinates x,z. Raise InconceivedChunk
if the NBT file is not yet generated. To get a Chunk object, use get_chunk.
"""
rx,cx = divmod(x,32)
rz,cz = divmod(z,32)
if (rx,rz) not in self.regions and (rx,rz) not in self.regionfiles:
raise InconceivedChunk("Chunk %s,%s is not present in world" % (x,z))
nbt = self.get_region(rx,rz).get_nbt(cx,cz)
assert nbt != None
return nbt
def set_nbt(self,x,z,nbt):
"""
Set a chunk. Overrides the NBT if it already existed. If the NBT did not exists,
adds it to the Regionfile. May create a new Regionfile if that did not exist yet.
nbt must be a nbt.NBTFile instance, not a Chunk or regular TAG_Compound object.
"""
raise NotImplementedError()
# TODO: implement
def iter_nbt(self):
"""
Return an iterable list of all NBT. Use this function if you only
want to loop through the chunks once, and don't need the block or data arrays.
"""
# TODO: Implement BoundingBox
# TODO: Implement sort order
for region in self.iter_regions():
for c in region.iter_chunks():
yield c
def call_for_each_nbt(self, callback_function, boundingbox=None):
"""
Return an iterable that calls callback_function for each NBT structure
in the world. This is equivalent to:
```
for the_nbt in iter_nbt():
yield callback_function(the_nbt)
````
This function is threaded. It uses pickle to pass values between threads.
See [What can be pickled and unpickled?](https://docs.python.org/library/pickle.html#what-can-be-pickled-and-unpickled) in the Python documentation
for limitation on the output of `callback_function()`.
"""
raise NotImplementedError()
def get_chunk(self,x,z):
"""
Return a chunk specified by the chunk coordinates x,z. Raise InconceivedChunk
if the chunk is not yet generated. To get the raw NBT data, use get_nbt.
"""
return self.chunkclass(self.get_nbt(x, z))
def get_chunks(self, boundingbox=None):
"""
Return a list of all chunks. Use this function if you access the chunk
list frequently and want to cache the result.
Use iter_chunks() if you only want to loop through the chunks once or have a
very large world.
"""
if self.chunks == None:
self.chunks = list(self.iter_chunks())
return self.chunks
def iter_chunks(self):
"""
Return an iterable list of all chunks. Use this function if you only
want to loop through the chunks once or have a very large world.
Use get_chunks() if you access the chunk list frequently and want to cache
the results. Use iter_nbt() if you are concerned about speed and don't want
to parse the block data.
"""
# TODO: Implement BoundingBox
# TODO: Implement sort order
for c in self.iter_nbt():
yield self.chunkclass(c)
def chunk_count(self):
"""Return a count of the chunks in this world folder."""
c = 0
for r in self.iter_regions():
c += r.chunk_count()
return c
def get_boundingbox(self):
"""
Return minimum and maximum x and z coordinates of the chunks that
make up this world save
"""
b = BoundingBox()
for rx,rz in self.regionfiles.keys():
region = self.get_region(rx,rz)
rx,rz = 32*rx,32*rz
for cc in region.get_chunk_coords():
x,z = (rx+cc['x'],rz+cc['z'])
b.expand(x,None,z)
return b
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__,self.worldfolder)
class McRegionWorldFolder(_BaseWorldFolder):
"""Represents a world save using the old McRegion format."""
type = "McRegion"
extension = 'mcr'
chunkclass = chunk.McRegionChunk
class AnvilWorldFolder(_BaseWorldFolder):
"""Represents a world save using the new Anvil format."""
type = "Anvil"
extension = 'mca'
chunkclass = chunk.AnvilChunk
class _WorldFolderFactory(object):
"""Factory class: instantiate the subclassses in order, and the first instance
whose nonempty() method returns True is returned. If no nonempty() returns True,
a UnknownWorldFormat exception is raised."""
def __init__(self, subclasses):
self.subclasses = subclasses
def __call__(self, *args, **kwargs):
for cls in self.subclasses:
wf = cls(*args, **kwargs)
if wf.nonempty(): # Check if the world is non-empty
return wf
raise UnknownWorldFormat("Empty world or unknown format")
WorldFolder = _WorldFolderFactory([AnvilWorldFolder, McRegionWorldFolder])
"""
Factory instance that returns a AnvilWorldFolder or McRegionWorldFolder
instance, or raise a UnknownWorldFormat.
"""
class BoundingBox(object):
"""A bounding box of x,y,z coordinates."""
def __init__(self, minx=None, maxx=None, miny=None, maxy=None, minz=None, maxz=None):
self.minx,self.maxx = minx, maxx
self.miny,self.maxy = miny, maxy
self.minz,self.maxz = minz, maxz
def expand(self,x,y,z):
"""
Expands the bounding
"""
if x != None:
if self.minx is None or x < self.minx:
self.minx = x
if self.maxx is None or x > self.maxx:
self.maxx = x
if y != None:
if self.miny is None or y < self.miny:
self.miny = y
if self.maxy is None or y > self.maxy:
self.maxy = y
if z != None:
if self.minz is None or z < self.minz:
self.minz = z
if self.maxz is None or z > self.maxz:
self.maxz = z
def lenx(self):
if self.maxx is None or self.minx is None:
return 0
return self.maxx-self.minx+1
def leny(self):
if self.maxy is None or self.miny is None:
return 0
return self.maxy-self.miny+1
def lenz(self):
if self.maxz is None or self.minz is None:
return 0
return self.maxz-self.minz+1
def __repr__(self):
return "%s(%s,%s,%s,%s,%s,%s)" % (self.__class__.__name__,self.minx,self.maxx,
self.miny,self.maxy,self.minz,self.maxz)
| |
from django.shortcuts import render, redirect
from datetime import datetime
from core.views import initRequest
from django.db import connection, transaction
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.core.cache import cache
from pprint import pprint
import json, re, os
from collections import defaultdict
from operator import itemgetter, attrgetter
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return str(obj)
return json.JSONEncoder.default(self, obj)
def nviewDemo(request):
valid, response = initRequest(request)
if 'nightly' in request.session['requestParams'] and len(request.session['requestParams']['nightly']) < 100:
nname = request.session['requestParams']['nightly']
else:
nname = 'master_Athena_x86_64-centos7-gcc8-opt'
if 'rel' in request.session['requestParams'] and len(request.session['requestParams']['rel']) < 100:
rname = request.session['requestParams']['rel']
else:
rname = '*'
ar_sel="unknown"
pjname='unknown'
new_cur = connection.cursor()
dict_from_cache = cache.get('art-monit-dict')
check_icon='<div class="ui-widget ui-state-check" style="display:inline-block;"> <span s\
tyle="display:inline-block;" title="OK" class="DataTables_sort_icon css_right ui-icon ui-ico\
n-circle-check">ICON33</span></div>'
clock_icon='<div class="ui-widget ui-state-hover" style="display:inline-block;"> <span s\
tyle="display:inline-block;" title="UPDATING" class="DataTables_sort_icon css_right ui-icon \
ui-icon-clock">ICON39</span></div>'
minorwarn_icon='<div class="ui-widget ui-state-highlight" style="display:inline-block;"> <s\
pan style="display:inline-block;" title="MINOR WARNING" class="DataTables_sort_icon css_righ\
t ui-icon ui-icon-alert">ICON34</span></div>'
warn_icon='<div class="ui-widget ui-state-error" style="display:inline-block;"> <span st\
yle="display:inline-block;" title="WARNING" class="DataTables_sort_icon css_right ui-icon ui\
-icon-lightbulb">ICON35</span></div>'
error_icon='<div class="ui-widget ui-state-error" style="display:inline-block;"> <span s\
tyle="display:inline-block;" title="ERROR" class="DataTables_sort_icon css_right ui-icon ui-\
icon-circle-close">ICON36</span></div>'
help_icon='<span style="display:inline-block;" title="HELP" class="DataTables_sort_icon \
css_right ui-icon ui-icon-help">ICONH</span>'
clip_icon='<span style="display:inline-block; float: left; margin-right: .9em;" title="C\
LIP" class="DataTables_sort_icon ui-icon ui-icon-clipboard">ICONCL</span>'
person_icon='<span style="display:inline-block; float: left; margin-right: .9em;" title=\
"MASTER ROLE" class="DataTables_sort_icon ui-icon ui-icon-person">ICONP</span>'
person_icon1='<span style="display:inline-block;" title="MASTER ROLE" class="DataTables_\
sort_icon ui-icon ui-icon-person">ICONP1</span>'
mailyes_icon='<span style="cursor:pointer; display:inline-block; " title="MAIL ENABLED" \
class="ui-icon ui-icon-mail-closed"> ICON1</span>'
radiooff_icon='<div class="ui-widget ui-state-default" style="display:inline-block";> <s\
pan title="N/A" class="ui-icon ui-icon-radio-off">ICONRO</span></div>'
majorwarn_icon=warn_icon
di_res={'-1':clock_icon,'N/A':radiooff_icon,'0':check_icon,'1':error_icon,'2':majorwarn_icon,'3':error_icon,'4':minorwarn_icon,'10':clock_icon}
query="select to_char(jid),arch||'-'||os||'-'||comp||'-'||opt as AA, to_char(tstamp, 'RR/MM/DD HH24:MI') as tstamp, nname, name, webarea, webbuild, gitmrlink, tstamp as tst1,tcrel,tcrelbase,buildarea,relnstamp,gitbr,lartwebarea from NIGHTLIES@ATLR.CERN.CH natural join releases@ATLR.CERN.CH natural join jobs@ATLR.CERN.CH where nname ='%s' and tstamp between sysdate-11+1/24 and sysdate order by tstamp desc" % nname
if rname != '*':
query="select to_char(jid),arch||'-'||os||'-'||comp||'-'||opt as AA, to_char(tstamp, 'RR/MM/DD HH24:MI') as tstamp, nname, name, webarea, webbuild, gitmrlink, tstamp as tst1,tcrel,tcrelbase,buildarea,relnstamp,gitbr,lartwebarea from NIGHTLIES@ATLR.CERN.CH natural join releases@ATLR.CERN.CH natural join jobs@ATLR.CERN.CH where nname ='%s' and name ='%s' and tstamp between sysdate-11+1/24 and sysdate order by tstamp desc" % (nname,rname)
####HEADERS <th>Release</th>
# <th>Platform</th>
# <th>Project</th>
# <th>Job time stamp</th>
# <th>git clone</th>
# <th>Externals<BR>build</th>
# <th>CMake<BR>config</th>
# <th>Build time</th>
# <th>Comp. Errors<BR>(w/warnings)</th>
# <th>Test time</th>
# <th>Pct. of Successful<BR>CTest tests<BR>(no warnings)</th>
# <th>CVMFS time</th>
# <th>Host</th>
# <th>Image</th>
new_cur.execute(query)
result = new_cur.fetchall()
di_res={'-1':clock_icon,'N/A':radiooff_icon,'0':check_icon,'1':error_icon,'2':majorwarn_icon,'3':error_icon,'4':minorwarn_icon,'10':clock_icon}
dict_cache_transf={}
if dict_from_cache:
for k46, v46 in dict_from_cache.items():
for kk, vv in v46.items():
kk_transf = re.sub('/','_',k46)
key_transf = kk_transf+'_'+kk
string_vv = '<B><span style="color: blue">' + str(vv['active']) + '</span></B>'
string_vv = string_vv + ',<B><span style="color: green">'+ str(vv['succeeded']) +'</span></B>,'
string_vv = string_vv + '<B><span style="color: brown">' + str(vv['finished']) + '</span></B>'
string_vv = string_vv +',<B><span style="color: red">' + str(vv['failed']) + '</span></B>'
dict_cache_transf[key_transf] = [string_vv, k46]
ar_sel="unknown"
pjname='unknown'
i=0
rows_s = []
for row in result:
i+=1
if i > 1000: break
jid_sel = row[0]
ar_sel = row[1]
rname = row[4]
rname_trun = re.sub(r'\([^)]*\)', '', rname)
webarea_cur = row[5]
if webarea_cur == None: webarea_cur = "";
job_start = row[8]
t_start='N/A'
if job_start != None: t_start=job_start.strftime('%Y/%m/%d %H:%M')
## mrlink = row[7]
gitbr = row[13]
lartwebarea=row[14]
if lartwebarea == None or lartwebarea == '': lartwebarea="http://atlas-computing.web.cern.ch/atlas-computing/links/distDirectory/gitwww/GITWebArea/nightlies"
# print("JIDX",jid_sel)
#
query01="select to_char(jid),projname,stat,eb,sb,ei,si,ecv,ecvkv,suff,scv,scvkv,scb,sib,sco,ela,sla,erla,sula,wala,eim,sim,eext,sext,vext,hname from jobstat@ATLR.CERN.CH natural join projects@ATLR.CERN.CH where jid = '%s' order by projname" % (jid_sel)
new_cur.execute(query01)
reslt1 = new_cur.fetchall()
lenres=len(reslt1)
if lenres != 0 and ( reslt1[0][2] == 'cancel' or reslt1[0][2] == 'CANCEL' or reslt1[0][2] == 'ABORT' or reslt1[0][2] == 'abort' ):
pjname=reslt1[0][1]
s_ext = reslt1[0][23]
if s_ext == None or s_ext == '': s_ext = 'N/A'
vext = reslt1[0][24]
if vext == None or vext == '': vext = '0'
s_checkout = 'N/A'
if reslt1[0][14] != None: s_checkout = str(reslt1[0][14])
s_config = 'N/A';
s_inst = 'N/A'
if str(vext) != 1: s_config = '0'; s_inst = '0'
if reslt1[0][12] != None: s_config = str(reslt1[0][12])
if reslt1[0][13] != None: s_inst = str(reslt1[0][13])
hname=reslt1[0][25]
if re.search(r'\.',hname):
hname=(re.split(r'\.',hname))[0]
area_suffix = reslt1[0][9]
if area_suffix == None: area_suffix = "";
[i_checkout, i_inst, i_config, i_ext] = \
map(lambda x: di_res.get(str(x), str(x)),
[s_checkout, s_inst, s_config, s_ext])
if i_checkout == None or i_checkout == "None": i_checkout = radiooff_icon;
if i_inst == None or i_inst == "None": i_inst = radiooff_icon;
if i_config == None or i_config == "None": i_config = radiooff_icon;
if i_ext == None or i_ext == "None": i_ext = radiooff_icon;
ii_checkout, ii_config, ii_ext = i_checkout, i_config, i_ext
if str(vext) != '1':
ii_ext = i_inst
else:
if ii_checkout == check_icon or ii_checkout == error_icon or ii_checkout == majorwarn_icon or ii_checkout == minorwarn_icon:
ii_checkout = "<a href=\"" + webarea_cur + os.sep + 'ardoc_web_area' + area_suffix + os.sep + 'ARDOC_Log_' + rname_trun + os.sep + 'ardoc_checkout.html' + "\">" + i_checkout + "</a>"
if ii_ext == check_icon or ii_ext == error_icon or ii_ext == majorwarn_icon or ii_ext == minorwarn_icon:
ii_ext = "<a href=\"" + webarea_cur + os.sep + 'ardoc_web_area' + area_suffix + os.sep + 'ARDOC_Log_' + rname_trun + os.sep + 'ardoc_externals_build.html' + "\">" + i_ext + "</a>"
if ii_config == check_icon or ii_config == error_icon or ii_config == majorwarn_icon or ii_config == minorwarn_icon:
ii_config = "<a href=\"" + webarea_cur + os.sep + 'ardoc_web_area' + area_suffix + os.sep + 'ARDOC_Log_' + rname_trun + os.sep + 'ardoc_cmake_config.html' + "\">" + i_config + "</a>"
if reslt1[0][2] == 'ABORT' or reslt1[0][2] == 'abort':
row_cand = [rname, t_start, ii_checkout, ii_ext, ii_config, 'ABORTED', 'N/A', 'N/A', 'N/A',
'N/A', 'N/A', 'N/A', 'N/A', hname, 'N/A']
rows_s.append(row_cand)
else:
row_cand=[rname,t_start,'NO NEW<BR>CODE','N/A','N/A','CANCELLED','N/A','N/A','N/A','N/A','N/A','N/A','N/A',hname,'N/A']
rows_s.append(row_cand)
else:
query1="select to_char(jid),projname,ncompl,pccompl,npb,ner,pcpb,pcer from cstat@ATLR.CERN.CH natural join projects@ATLR.CERN.CH where jid = '%s' order by projname" % (jid_sel)
new_cur.execute(query1)
reslt_addtl = new_cur.fetchall()
lenres_addtl=len(reslt_addtl)
if lenres_addtl != 0:
dict_jid01={}
for row02 in reslt_addtl:
# print("------", row02[0],row02[1],row02[2])
pj2=row02[1]
if not pj2 in dict_jid01 : dict_jid01[pj2]=[]
dict_jid01[pj2]=[row02[2],row02[3],row02[5],row02[4],row02[7],row02[6]]
#
query2="select to_char(jid),projname,ncompl,pccompl,npb,ner,pcpb,pcer from tstat@ATLR.CERN.CH natural join projects@ATLR.CERN.CH where jid = '%s' order by projname" % (jid_sel)
new_cur.execute(query2)
reslt2 = new_cur.fetchall()
dict_jid02={}
for row02 in reslt2:
# print("=======", row02[0],row02[1],row02[2])
pj2=row02[1]
if not pj2 in dict_jid02 : dict_jid02[pj2]=[]
dict_jid02[pj2]=[row02[2],row02[3],row02[5],row02[4],row02[7],row02[6]]
reslt2 = {}
for row01 in reslt1:
# print("JID",row01[0])
pjname=row01[1]
erla=row01[17]
if erla == None or erla == '': erla='N/A'
sula=row01[18]
if sula == None or sula == '': sula='N/A'
wala=row01[19]
if wala == None or wala == '': wala = 'N/A'
if wala.isdigit() and sula.isdigit():
sula = str(int(sula) - int(wala))
e_im = row01[20]
if e_im == None or e_im == '': e_im='N/A'
s_im=row01[21]
if s_im == None or s_im == '': s_im='N/A'
s_ext = row01[23]
if s_ext == None or s_ext == '': s_ext = 'N/A'
vext = row01[24]
if vext == None or vext == '': vext = '0'
hname=row01[25]
if re.search(r'\.', hname):
hname = (re.split(r'\.', hname))[0]
area_suffix = reslt1[0][9]
if area_suffix == None: area_suffix = "";
t_cv_serv=row01[7]
t_cv_clie=row01[8]
s_cv_serv=row01[10]
s_cv_clie=row01[11]
nccompl='0';cpccompl='0';nc_er='0';nc_pb='0';cpcer='0';cpcpb='0'
if pjname in dict_jid01 :
nccompl=dict_jid01[pjname][0]
cpccompl=dict_jid01[pjname][1]
nc_er=dict_jid01[pjname][2]
nc_pb=dict_jid01[pjname][3]
if nccompl == None or nccompl == 'N/A' or nccompl <= 0:
nc_er='N/A'
nc_pb='N/A'
cpcer=dict_jid01[pjname][4]
cpcpb=dict_jid01[pjname][5]
ntcompl='0';tpccompl='0';nt_er='0';nt_pb='0';tpcer='0';tpcpb='0'
if pjname in dict_jid02 :
ntcompl=dict_jid02[pjname][0]
tpccompl=dict_jid02[pjname][1]
nt_er=dict_jid02[pjname][2]
nt_pb=dict_jid02[pjname][3]
if ntcompl == None or ntcompl == 'N/A' or ntcompl <= 0:
nt_er='N/A'
nt_pb='N/A'
tpcer=dict_jid02[pjname][4]
tpcpb=dict_jid02[pjname][5]
# [tpcer_s,tpcpb_s]=map(lambda c: 100 - c, [tpcer,tpcpb])
# [tpcer_sf,tpcpb_sf]=map(lambda c: format(c,'.1f'), [tpcer_s,tpcpb_s])
s_checkout='N/A'
if row01[14] != None: s_checkout=str(row01[14])
s_config='N/A'; s_inst='N/A'
if str(vext) != 1: s_config='0'; s_inst='0'
if row01[12] != None: s_config=str(row01[12])
if row01[13] != None: s_inst=str(row01[13])
t_build='N/A'
if row01[3] != None: t_build=row01[3].strftime('%Y/%m/%d %H:%M')
t_test='N/A'
if row01[5] != None: t_test=row01[5].strftime('%Y/%m/%d %H:%M')
tt_cv_serv='N/A'
if t_cv_serv != None and t_cv_serv != '': tt_cv_serv=t_cv_serv.strftime('%Y/%m/%d %H:%M')
tt_cv_clie='N/A'
if t_cv_clie != None and t_cv_clie != '': tt_cv_clie=t_cv_clie.strftime('%Y/%m/%d %H:%M')
ss_cv_serv='N/A'
if s_cv_serv != None and s_cv_serv != '': ss_cv_serv=str(s_cv_serv)
ss_cv_clie='N/A'
if s_cv_clie != None and s_cv_clie != '': ss_cv_clie=str(s_cv_clie)
#
combo_c=str(nc_er)+' ('+str(nc_pb)+')'
combo_t=str(nt_er)+' ('+str(nt_pb)+')'
if nt_er == 'N/A': combo_t='N/A(N/A)'
# mrlink_a="<a href=\""+mrlink+"\">"+gitbr+"</a>"
[i_checkout,i_inst,i_config,i_cv_serv,i_cv_clie,i_ext,i_image]=\
map(lambda x: di_res.get(str(x),str(x)), [s_checkout,s_inst,s_config,ss_cv_serv,ss_cv_clie,s_ext,s_im])
if i_checkout == None or i_checkout == "None" : i_checkout=radiooff_icon;
if i_inst == None or i_inst == "None" : i_inst=radiooff_icon;
if i_config == None or i_config == "None" : i_config=radiooff_icon;
if i_ext == None or i_ext == "None" : i_ext=radiooff_icon;
if i_image == None or i_image == "None": i_image = radiooff_icon;
ii_checkout, ii_config, ii_ext, ii_image = i_checkout, i_config, i_ext, i_image
if str(vext) != '1' :
ii_ext = i_inst
if e_im != 'N/A':
if isinstance(e_im, datetime):
ii_image = ii_image + " " + e_im.strftime('%d-%b %H:%M').upper()
else :
if ii_checkout == check_icon or ii_checkout == error_icon or ii_checkout == majorwarn_icon or ii_checkout == minorwarn_icon:
ii_checkout = "<a href=\"" + webarea_cur + os.sep + 'ardoc_web_area' + area_suffix + os.sep + 'ARDOC_Log_' + rname_trun + os.sep + 'ardoc_checkout.html' + "\">" + i_checkout + "</a>"
if ii_ext == check_icon or ii_ext == error_icon or ii_ext == majorwarn_icon or ii_ext == minorwarn_icon:
ii_ext = "<a href=\"" + webarea_cur + os.sep + 'ardoc_web_area' + area_suffix + os.sep + 'ARDOC_Log_' + rname_trun + os.sep + 'ardoc_externals_build.html' + "\">" + i_ext + "</a>"
if ii_config == check_icon or ii_config == error_icon or ii_config == majorwarn_icon or ii_config == minorwarn_icon:
ii_config = "<a href=\"" + webarea_cur + os.sep + 'ardoc_web_area' + area_suffix + os.sep + 'ARDOC_Log_' + rname_trun + os.sep + 'ardoc_cmake_config.html' + "\">" + i_config + "</a>"
if ii_image == check_icon or ii_image == error_icon or ii_image == majorwarn_icon or ii_image == minorwarn_icon:
ii_image = "<a href=\"" + webarea_cur + os.sep + 'ardoc_web_area' + area_suffix + os.sep + 'ARDOC_Log_' + rname_trun + os.sep + 'ardoc_image_build.html' + "\">" + i_image + "</a>"
if e_im != 'N/A':
if isinstance(e_im, datetime):
ii_image = ii_image + " " + e_im.strftime('%d-%b %H:%M').upper()
link_to_testsRes=reverse('TestsRes')
link_to_compsRes=reverse('CompsRes')
i_combo_t="<a href=\""+link_to_testsRes+"?nightly="+nname+"&rel="+rname+"&ar="+ar_sel+"&proj="+pjname+"\">"+combo_t+"</a>"
if combo_t == 'N/A(N/A)': i_combo_t=combo_t
i_combo_c="<a href=\""+link_to_compsRes+"?nightly="+nname+"&rel="+rname+"&ar="+ar_sel+"&proj="+pjname+"\">"+combo_c+"</a>"
if tt_cv_serv != 'N/A' : i_combo_cv_serv=tt_cv_serv+i_cv_serv
else: i_combo_cv_serv=i_cv_serv
if tt_cv_clie != 'N/A' : i_combo_cv_clie=tt_cv_clie+i_cv_clie
else: i_combo_cv_clie=i_cv_clie
key_cache_transf=nname + '_' + rname
val_cache_transf,nightly_name_art=dict_cache_transf.get(key_cache_transf,['N/A','N/A'])
if val_cache_transf != 'N/A' and nightly_name_art != 'N/A':
vacasf = "<a href=\"https://bigpanda.cern.ch/art/overview/?branch="
val_cache_transf = vacasf + nightly_name_art + "&ntag_full=" + rname + "\">" + val_cache_transf + "</a>"
local_art_res=''
if sula == 'N/A' and erla == 'N/A':
local_art_res='N/A'
elif sula == '0' and erla == '0':
local_art_res='N/A'
else:
local_art_res=local_art_res+'<B><span style="color: green">'+ str(sula)+'</span></B>,'
local_art_res = local_art_res + '<B><span style="color: brown">' + str(wala) + '</span></B>,'
local_art_res=local_art_res+'<B><span style="color: red">'+ str(erla)+'</span></B>'
arrk=re.split('_',nname)
branch=arrk[0]
loares="<a href=\""+lartwebarea+"/"+branch+"/"+rname+"/"+pjname+"/"+ar_sel+"/"+pjname+"/art.log.html\">"
local_art_res=loares+local_art_res+"</a>"
row_cand=[rname,t_start,ii_checkout,ii_ext,ii_config,t_build,i_combo_c,t_test,i_combo_t,local_art_res,val_cache_transf,i_combo_cv_serv,tt_cv_clie,hname,ii_image]
rows_s.append(row_cand)
data={"nightly": nname, "rel": rname, "platform": ar_sel, "project": pjname, 'viewParams': request.session['viewParams'],'rows_s':json.dumps(rows_s, cls=DateEncoder)}
return render(request,'nviewDemo.html', data, content_type='text/html')
| |
from __future__ import unicode_literals
import json
import requests
import six
from datetime import datetime
from six.moves.urllib.parse import parse_qs
from xml.etree.ElementTree import tostring, SubElement, Element
from .exceptions import (
XeroBadRequest, XeroExceptionUnknown, XeroForbidden, XeroInternalError,
XeroNotAvailable, XeroNotFound, XeroNotImplemented, XeroRateLimitExceeded,
XeroUnauthorized
)
from .utils import singular, isplural, json_load_object_hook
class BaseManager(object):
DECORATED_METHODS = (
'get',
'save',
'filter',
'all',
'put',
'delete',
'get_attachments',
'get_attachment_data',
'put_attachment_data',
)
DATETIME_FIELDS = (
'UpdatedDateUTC',
'Updated',
'FullyPaidOnDate',
'DateTimeUTC',
'CreatedDateUTC'
)
DATE_FIELDS = (
'DueDate',
'Date',
'PaymentDate',
'StartDate',
'EndDate',
'PeriodLockDate',
'DateOfBirth',
'OpeningBalanceDate',
'PaymentDueDate',
'ReportingDate',
'DeliveryDate',
'ExpectedArrivalDate',
)
BOOLEAN_FIELDS = (
'IsSupplier',
'IsCustomer',
'IsDemoCompany',
'PaysTax',
'IsAuthorisedToApproveTimesheets',
'IsAuthorisedToApproveLeave',
'HasHELPDebt',
'AustralianResidentForTaxPurposes',
'TaxFreeThresholdClaimed',
'HasSFSSDebt',
'EligibleToReceiveLeaveLoading',
'IsExemptFromTax',
'IsExemptFromSuper',
'SentToContact',
'IsSubscriber',
'HasAttachments',
'ShowOnCashBasisReports',
'IncludeInEmails',
'SentToContact',
'CanApplyToRevenue',
'IsReconciled',
'EnablePaymentsToAccount',
'ShowInExpenseClaims'
)
DECIMAL_FIELDS = (
'Hours',
'NumberOfUnit',
)
INTEGER_FIELDS = (
'FinancialYearEndDay',
'FinancialYearEndMonth',
)
NO_SEND_FIELDS = (
'UpdatedDateUTC',
'HasValidationErrors',
'IsDiscounted',
'DateString',
'HasErrors',
'DueDateString',
)
OPERATOR_MAPPINGS = {
'gt': '>',
'lt': '<',
'lte': '<=',
'gte': '>=',
'ne': '!='
}
def __init__(self):
pass
def dict_to_xml(self, root_elm, data):
for key in data.keys():
# Xero will complain if we send back these fields.
if key in self.NO_SEND_FIELDS:
continue
sub_data = data[key]
elm = SubElement(root_elm, key)
# Key references a dict. Unroll the dict
# as it's own XML node with subnodes
if isinstance(sub_data, dict):
self.dict_to_xml(elm, sub_data)
# Key references a list/tuple
elif isinstance(sub_data, list) or isinstance(sub_data, tuple):
# key name is a plural. This means each item
# in the list needs to be wrapped in an XML
# node that is a singular version of the list name.
if isplural(key):
for d in sub_data:
self.dict_to_xml(SubElement(elm, singular(key)), d)
# key name isn't a plural. Just insert the content
# as an XML node with subnodes
else:
for d in sub_data:
self.dict_to_xml(elm, d)
# Normal element - just insert the data.
else:
if key in self.BOOLEAN_FIELDS:
val = 'true' if sub_data else 'false'
elif key in self.DATE_FIELDS:
val = sub_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
val = six.text_type(sub_data)
elm.text = val
return root_elm
def _prepare_data_for_save(self, data):
if isinstance(data, list) or isinstance(data, tuple):
root_elm = Element(self.name)
for d in data:
sub_elm = SubElement(root_elm, self.singular)
self.dict_to_xml(sub_elm, d)
else:
root_elm = self.dict_to_xml(Element(self.singular), data)
# In python3 this seems to return a bytestring
return six.u(tostring(root_elm))
def _parse_api_response(self, response, resource_name):
data = json.loads(response.text, object_hook=json_load_object_hook)
assert data['Status'] == 'OK', "Expected the API to say OK but received %s" % data['Status']
try:
return data[resource_name]
except KeyError:
return data
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
timeout = kwargs.pop('timeout', None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
if headers is None:
headers = {}
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if 'Accept' not in headers:
headers['Accept'] = 'application/json'
# Set a user-agent so Xero knows the traffic is coming from pyxero
# or individual user/partner
headers['User-Agent'] = self.user_agent
response = getattr(requests, method)(
uri, data=body, headers=headers, auth=self.credentials.oauth,
params=params, timeout=timeout)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a binary file
if not response.headers['content-type'].startswith('application/json'):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 204:
return response.content
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None, params=None):
uri = '/'.join([self.base_url, self.name, id])
uri_params = self.extra_params.copy()
uri_params.update(params if params else {})
return uri, uri_params, 'get', None, headers, True
def _get_attachments(self, id):
"""Retrieve a list of attachments associated with this Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments']) + '/'
return uri, {}, 'get', None, None, False
def _get_attachment_data(self, id, filename):
"""
Retrieve the contents of a specific attachment (identified by filename).
"""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
return uri, {}, 'get', None, None, False
def get_attachment(self, id, filename, file):
"""
Retrieve the contents of a specific attachment (identified by filename).
Writes data to file object, returns length of data written.
"""
data = self.get_attachment_data(id, filename)
file.write(data)
return len(data)
def save_or_put(self, data, method='post', headers=None, summarize_errors=True):
uri = '/'.join([self.base_url, self.name])
body = {'xml': self._prepare_data_for_save(data)}
params = self.extra_params.copy()
if not summarize_errors:
params['summarizeErrors'] = 'false'
return uri, params, method, body, headers, False
def _save(self, data):
return self.save_or_put(data, method='post')
def _put(self, data, summarize_errors=True):
return self.save_or_put(data, method='put', summarize_errors=summarize_errors)
def _delete(self, id):
uri = '/'.join([self.base_url, self.name, id])
return uri, {}, 'delete', None, None, False
def _put_attachment_data(self, id, filename, data, content_type, include_online=False):
"""Upload an attachment to the Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
params = {'IncludeOnline': 'true'} if include_online else {}
headers = {'Content-Type': content_type, 'Content-Length': str(len(data))}
return uri, params, 'put', data, headers, False
def put_attachment(self, id, filename, file, content_type, include_online=False):
"""Upload an attachment to the Xero object (from file object)."""
self.put_attachment_data(id, filename, file.read(), content_type,
include_online=include_online)
def prepare_filtering_date(self, val):
if isinstance(val, datetime):
val = val.strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
val = '"%s"' % val
return {'If-Modified-Since': val}
def _filter(self, **kwargs):
params = self.extra_params.copy()
headers = None
uri = '/'.join([self.base_url, self.name])
if kwargs:
if 'since' in kwargs:
val = kwargs['since']
headers = self.prepare_filtering_date(val)
del kwargs['since']
def get_filter_params(key, value):
last_key = key.split('_')[-1]
if last_key.upper().endswith('ID'):
return 'Guid("%s")' % six.text_type(value)
if key in self.BOOLEAN_FIELDS:
return 'true' if value else 'false'
elif key in self.DATE_FIELDS:
return 'DateTime(%s,%s,%s)' % (value.year, value.month, value.day)
elif key in self.DATETIME_FIELDS:
return value.isoformat()
else:
return '"%s"' % six.text_type(value)
def generate_param(key, value):
parts = key.split("__")
field = key.replace('_', '.')
fmt = '%s==%s'
if len(parts) == 2:
# support filters:
# Name__Contains=John becomes Name.Contains("John")
if parts[1] in ["contains", "startswith", "endswith"]:
field = parts[0]
fmt = ''.join(['%s.', parts[1], '(%s)'])
elif parts[1] in self.OPERATOR_MAPPINGS:
field = parts[0]
key = field
fmt = '%s' + self.OPERATOR_MAPPINGS[parts[1]] + '%s'
elif parts[1] in ["isnull"]:
sign = '=' if value else '!'
return '%s%s=null' % (parts[0], sign)
field = field.replace('_', '.')
return fmt % (
field,
get_filter_params(key, value)
)
# Move any known parameter names to the query string
KNOWN_PARAMETERS = ['order', 'offset', 'page', 'includeArchived']
for param in KNOWN_PARAMETERS:
if param in kwargs:
params[param] = kwargs.pop(param)
filter_params = []
if 'raw' in kwargs:
raw = kwargs.pop('raw')
filter_params.append(raw)
# Treat any remaining arguments as filter predicates
# Xero will break if you search without a check for null in the first position:
# http://developer.xero.com/documentation/getting-started/http-requests-and-responses/#title3
sortedkwargs = sorted(six.iteritems(kwargs),
key=lambda item: -1 if 'isnull' in item[0] else 0)
for key, value in sortedkwargs:
filter_params.append(generate_param(key, value))
if filter_params:
params['where'] = '&&'.join(filter_params)
return uri, params, 'get', None, headers, False
def _all(self):
uri = '/'.join([self.base_url, self.name])
return uri, {}, 'get', None, None, False
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract size information for chrome, executed by buildbot.
When this is run, the current directory (cwd) should be the outer build
directory (e.g., chrome-release/build/).
For a list of command-line options, call this script with '--help'.
"""
import errno
import json
import platform
import optparse
import os
import re
import stat
import subprocess
import sys
import tempfile
from slave import build_directory
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..'))
class ResultsCollector(object):
def __init__(self):
self.results = {}
def add_result(self, name, identifier, value, units):
assert name not in self.results
self.results[name] = {
'identifier': identifier,
'value': int(value),
'units': units
}
# Legacy printing, previously used for parsing the text logs.
print 'RESULT %s: %s= %s %s' % (name, identifier, value, units)
def get_size(filename):
return os.stat(filename)[stat.ST_SIZE]
def get_linux_stripped_size(filename):
EU_STRIP_NAME = 'eu-strip'
# Assumes |filename| is in out/Release
# build/linux/bin/eu-strip'
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(filename)))
eu_strip_path = os.path.join(src_dir, 'build', 'linux', 'bin', EU_STRIP_NAME)
if (platform.architecture()[0] == '64bit' or
not os.path.exists(eu_strip_path)):
eu_strip_path = EU_STRIP_NAME
with tempfile.NamedTemporaryFile() as stripped_file:
strip_cmd = [eu_strip_path, '-o', stripped_file.name, filename]
result = 0
result, _ = run_process(result, strip_cmd)
if result != 0:
return (result, 0)
return (result, get_size(stripped_file.name))
def run_process(result, command):
p = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout = p.communicate()[0]
if p.returncode != 0:
print 'ERROR from command "%s": %d' % (' '.join(command), p.returncode)
if result == 0:
result = p.returncode
return result, stdout
def print_si_fail_hint(path_to_tool):
"""Print a hint regarding how to handle a static initializer failure."""
print '# HINT: To get this list, run %s' % path_to_tool
print '# HINT: diff against the log from the last run to see what changed'
def main_mac(options, args, results_collector):
"""Print appropriate size information about built Mac targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
result = 0
# Work with either build type.
base_names = ('Chromium', 'Google Chrome')
for base_name in base_names:
app_bundle = base_name + '.app'
framework_name = base_name + ' Framework'
framework_bundle = framework_name + '.framework'
framework_dsym_bundle = framework_bundle + '.dSYM'
chromium_app_dir = os.path.join(target_dir, app_bundle)
chromium_executable = os.path.join(chromium_app_dir,
'Contents', 'MacOS', base_name)
chromium_framework_dir = os.path.join(target_dir, framework_bundle)
chromium_framework_executable = os.path.join(chromium_framework_dir,
framework_name)
chromium_framework_dsym_dir = os.path.join(target_dir,
framework_dsym_bundle)
chromium_framework_dsym = os.path.join(chromium_framework_dsym_dir,
'Contents', 'Resources', 'DWARF',
framework_name)
if os.path.exists(chromium_executable):
print_dict = {
# Remove spaces in the names so any downstream processing is less
# likely to choke.
'app_name' : re.sub(r'\s', '', base_name),
'app_bundle' : re.sub(r'\s', '', app_bundle),
'framework_name' : re.sub(r'\s', '', framework_name),
'framework_bundle' : re.sub(r'\s', '', framework_bundle),
'app_size' : get_size(chromium_executable),
'framework_size' : get_size(chromium_framework_executable)
}
# Collect the segment info out of the App
result, stdout = run_process(result, ['size', chromium_executable])
print_dict['app_text'], print_dict['app_data'], print_dict['app_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the segment info out of the Framework
result, stdout = run_process(result, ['size',
chromium_framework_executable])
print_dict['framework_text'], print_dict['framework_data'], \
print_dict['framework_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the whole size of the App bundle on disk (include the framework)
result, stdout = run_process(result, ['du', '-s', '-k', chromium_app_dir])
du_s = re.search(r'(\d+)', stdout).group(1)
print_dict['app_bundle_size'] = (int(du_s) * 1024)
# Count the number of files with at least one static initializer.
pipes = [['otool', '-l', chromium_framework_executable],
['grep', '__mod_init_func', '-C', '5'],
['grep', 'size']]
last_stdout = None
for pipe in pipes:
p = subprocess.Popen(pipe, stdin=last_stdout, stdout=subprocess.PIPE)
last_stdout = p.stdout
stdout = p.communicate()[0]
initializers = re.search('0x([0-9a-f]+)', stdout)
if initializers:
initializers_s = initializers.group(1)
if result == 0:
result = p.returncode
else:
initializers_s = '0'
word_size = 4 # Assume 32 bit
si_count = int(initializers_s, 16) / word_size
print_dict['initializers'] = si_count
# For Release builds only, use dump-static-initializers.py to print the
# list of static initializers.
if si_count > 0 and options.target == 'Release':
dump_static_initializers = os.path.join(
os.path.dirname(build_dir), 'tools', 'mac',
'dump-static-initializers.py')
result, stdout = run_process(result, [dump_static_initializers,
chromium_framework_dsym])
print '\n# Static initializers in %s:' % chromium_framework_executable
print_si_fail_hint('tools/mac/dump-static-initializers.py')
print stdout
results_collector.add_result(
print_dict['app_name'], print_dict['app_name'],
print_dict['app_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['app_name'], '__TEXT',
print_dict['app_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['app_name'], '__DATA',
print_dict['app_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['app_name'], '__OBJC',
print_dict['app_objc'], 'bytes')
results_collector.add_result(
print_dict['framework_name'], print_dict['framework_name'],
print_dict['framework_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['framework_name'], '__TEXT',
print_dict['framework_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['framework_name'], '__DATA',
print_dict['framework_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['framework_name'], '__OBJC',
print_dict['framework_objc'], 'bytes')
results_collector.add_result(
print_dict['app_bundle'], print_dict['app_bundle'],
print_dict['app_bundle_size'], 'bytes')
results_collector.add_result(
'chrome-si', 'initializers',
print_dict['initializers'], 'files')
# Found a match, don't check the other base_names.
return result
# If no base_names matched, fail script.
return 66
def check_linux_binary(target_dir, binary_name, options):
"""Collect appropriate size information about the built Linux binary given.
Returns a tuple (result, sizes). result is the first non-zero exit
status of any command it executes, or zero on success. sizes is a list
of tuples (name, identifier, totals_identifier, value, units).
The printed line looks like:
name: identifier= value units
When this same data is used for totals across all the binaries, then
totals_identifier is the identifier to use, or '' to just use identifier.
"""
binary_file = os.path.join(target_dir, binary_name)
if not os.path.exists(binary_file):
# Don't print anything for missing files.
return 0, []
result = 0
sizes = []
def get_elf_section_size(readelf_stdout, section_name):
# Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8
match = re.search(r'\.%s.*$' % re.escape(section_name),
readelf_stdout, re.MULTILINE)
if not match:
return (False, -1)
size_str = re.split(r'\W+', match.group(0))[5]
return (True, int(size_str, 16))
sizes.append((binary_name, binary_name, 'size',
get_size(binary_file), 'bytes'))
result, stripped_size = get_linux_stripped_size(binary_file)
sizes.append((binary_name + '-stripped', 'stripped', 'stripped',
stripped_size, 'bytes'))
result, stdout = run_process(result, ['size', binary_file])
text, data, bss = re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
sizes += [
(binary_name + '-text', 'text', '', text, 'bytes'),
(binary_name + '-data', 'data', '', data, 'bytes'),
(binary_name + '-bss', 'bss', '', bss, 'bytes'),
]
# Find the number of files with at least one static initializer.
# First determine if we're 32 or 64 bit
result, stdout = run_process(result, ['readelf', '-h', binary_file])
elf_class_line = re.search('Class:.*$', stdout, re.MULTILINE).group(0)
elf_class = re.split(r'\W+', elf_class_line)[1]
if elf_class == 'ELF32':
word_size = 4
else:
word_size = 8
# Then find the number of files with global static initializers.
# NOTE: this is very implementation-specific and makes assumptions
# about how compiler and linker implement global static initializers.
si_count = 0
result, stdout = run_process(result, ['readelf', '-SW', binary_file])
has_init_array, init_array_size = get_elf_section_size(stdout, 'init_array')
if has_init_array:
si_count = init_array_size / word_size
si_count = max(si_count, 0)
sizes.append((binary_name + '-si', 'initializers', '', si_count, 'files'))
# For Release builds only, use dump-static-initializers.py to print the list
# of static initializers.
if si_count > 0 and options.target == 'Release':
build_dir = os.path.dirname(target_dir)
dump_static_initializers = os.path.join(os.path.dirname(build_dir),
'tools', 'linux',
'dump-static-initializers.py')
result, stdout = run_process(result, [dump_static_initializers,
'-d', binary_file])
print '\n# Static initializers in %s:' % binary_file
print_si_fail_hint('tools/linux/dump-static-initializers.py')
print stdout
# Determine if the binary has the DT_TEXTREL marker.
result, stdout = run_process(result, ['readelf', '-Wd', binary_file])
if re.search(r'\bTEXTREL\b', stdout) is None:
# Nope, so the count is zero.
count = 0
else:
# There are some, so count them.
result, stdout = run_process(result, ['eu-findtextrel', binary_file])
count = stdout.count('\n')
sizes.append((binary_name + '-textrel', 'textrel', '', count, 'relocs'))
return result, sizes
def main_linux(options, args, results_collector):
"""Print appropriate size information about built Linux targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
binaries = [
'chrome',
'nacl_helper',
'nacl_helper_bootstrap',
'libffmpegsumo.so',
'libgcflashplayer.so',
'libppGoogleNaClPluginChrome.so',
]
result = 0
totals = {}
for binary in binaries:
this_result, this_sizes = check_linux_binary(target_dir, binary, options)
if result == 0:
result = this_result
for name, identifier, totals_id, value, units in this_sizes:
results_collector.add_result(name, identifier, value, units)
totals_id = totals_id or identifier, units
totals[totals_id] = totals.get(totals_id, 0) + int(value)
files = [
'nacl_irt_x86_64.nexe',
'resources.pak',
]
for filename in files:
path = os.path.join(target_dir, filename)
try:
size = get_size(path)
except OSError, e:
if e.errno == errno.ENOENT:
continue # Don't print anything for missing files.
raise
results_collector.add_result(filename, filename, size, 'bytes')
totals['size', 'bytes'] += size
# TODO(mcgrathr): This should all be refactored so the mac and win flavors
# also deliver data structures rather than printing, and the logic for
# the printing and the summing totals is shared across all three flavors.
for (identifier, units), value in sorted(totals.iteritems()):
results_collector.add_result(
'totals-%s' % identifier, identifier, value, units)
return result
def check_android_binaries(binaries, target_dir, options):
"""Common method for printing size information for Android targets.
"""
result = 0
for binary in binaries:
this_result, this_sizes = check_linux_binary(target_dir, binary, options)
if result == 0:
result = this_result
for name, identifier, _, value, units in this_sizes:
print 'RESULT %s: %s= %s %s' % (name.replace('/', '_'), identifier, value,
units)
return result
def main_android(options, args, results_collector):
"""Print appropriate size information about built Android targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = [
'chrome_public_apk/libs/armeabi-v7a/libchrome.so',
'lib/libchrome.so',
]
return check_android_binaries(binaries, target_dir, options)
def main_android_webview(options, args, results_collector):
"""Print appropriate size information about Android WebViewChromium targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = ['lib/libwebviewchromium.so']
return check_android_binaries(binaries, target_dir, options)
def main_android_cronet(options, args, results_collector):
"""Print appropriate size information about Android Cronet targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = ['cronet_sample_apk/libs/arm64-v8a/libcronet.so',
'cronet_sample_apk/libs/armeabi-v7a/libcronet.so',
'cronet_sample_apk/libs/armeabi/libcronet.so',
'cronet_sample_apk/libs/mips/libcronet.so',
'cronet_sample_apk/libs/x86_64/libcronet.so',
'cronet_sample_apk/libs/x86/libcronet.so']
return check_android_binaries(binaries, target_dir, options)
def main_win(options, args, results_collector):
"""Print appropriate size information about built Windows targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
chrome_dll = os.path.join(target_dir, 'chrome.dll')
chrome_child_dll = os.path.join(target_dir, 'chrome_child.dll')
chrome_exe = os.path.join(target_dir, 'chrome.exe')
mini_installer_exe = os.path.join(target_dir, 'mini_installer.exe')
setup_exe = os.path.join(target_dir, 'setup.exe')
result = 0
print 'RESULT chrome.dll: chrome.dll= %s bytes' % get_size(chrome_dll)
if os.path.exists(chrome_child_dll):
fmt = 'RESULT chrome_child.dll: chrome_child.dll= %s bytes'
print fmt % get_size(chrome_child_dll)
print 'RESULT chrome.exe: chrome.exe= %s bytes' % get_size(chrome_exe)
if os.path.exists(mini_installer_exe):
fmt = 'RESULT mini_installer.exe: mini_installer.exe= %s bytes'
print fmt % get_size(mini_installer_exe)
if os.path.exists(setup_exe):
print 'RESULT setup.exe: setup.exe= %s bytes' % get_size(setup_exe)
return result
def main():
if sys.platform in ('win32', 'cygwin'):
default_platform = 'win'
elif sys.platform.startswith('darwin'):
default_platform = 'mac'
elif sys.platform == 'linux2':
default_platform = 'linux'
else:
default_platform = None
main_map = {
'android' : main_android,
'android-webview' : main_android_webview,
'android-cronet' : main_android_cronet,
'linux' : main_linux,
'mac' : main_mac,
'win' : main_win,
}
platforms = sorted(main_map.keys())
option_parser = optparse.OptionParser()
option_parser.add_option('--target',
default='Release',
help='build target (Debug, Release) '
'[default: %default]')
option_parser.add_option('--target-dir', help='ignored')
option_parser.add_option('--build-dir', help='ignored')
option_parser.add_option('--platform',
default=default_platform,
help='specify platform (%s) [default: %%default]'
% ', '.join(platforms))
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
real_main = main_map.get(options.platform)
if not real_main:
if options.platform is None:
sys.stderr.write('Unsupported sys.platform %s.\n' % repr(sys.platform))
else:
sys.stderr.write('Unknown platform %s.\n' % repr(options.platform))
msg = 'Use the --platform= option to specify a supported platform:\n'
sys.stderr.write(msg + ' ' + ' '.join(platforms) + '\n')
return 2
results_collector = ResultsCollector()
rc = real_main(options, args, results_collector)
if options.json:
with open(options.json, 'w') as f:
json.dump(results_collector.results, f)
return rc
if '__main__' == __name__:
sys.exit(main())
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import collections
import os
import tempfile
from urllib.parse import quote
from urllib.parse import unquote_to_bytes
import apache_beam as beam
from apache_beam import coders
from apache_beam.io import filesystems
from apache_beam.io import textio
from apache_beam.io import tfrecordio
from apache_beam.transforms import combiners
class CacheManager(object):
"""Abstract class for caching PCollections.
A PCollection cache is identified by labels, which consist of a prefix (either
'full' or 'sample') and a cache_label which is a hash of the PCollection
derivation.
"""
def exists(self, *labels):
# type (*str) -> bool
"""Returns if the PCollection cache exists."""
raise NotImplementedError
def is_latest_version(self, version, *labels):
# type (str, *str) -> bool
"""Returns if the given version number is the latest."""
return version == self._latest_version(*labels)
def _latest_version(self, *labels):
# type (*str) -> str
"""Returns the latest version number of the PCollection cache."""
raise NotImplementedError
def read(self, *labels, **args):
# type (*str, Dict[str, Any]) -> Tuple[str, Generator[Any]]
"""Return the PCollection as a list as well as the version number.
Args:
*labels: List of labels for PCollection instance.
**args: Dict of additional arguments. Currently only 'tail' as a boolean.
When tail is True, will wait and read new elements until the cache is
complete.
Returns:
A tuple containing an iterator for the items in the PCollection and the
version number.
It is possible that the version numbers from read() and_latest_version()
are different. This usually means that the cache's been evicted (thus
unavailable => read() returns version = -1), but it had reached version n
before eviction.
"""
raise NotImplementedError
def write(self, value, *labels):
# type (Any, *str) -> None
"""Writes the value to the given cache.
Args:
value: An encodable (with corresponding PCoder) value
*labels: List of labels for PCollection instance
"""
raise NotImplementedError
def clear(self, *labels):
# type (*str) -> Boolean
"""Clears the cache entry of the given labels and returns True on success.
Args:
value: An encodable (with corresponding PCoder) value
*labels: List of labels for PCollection instance
"""
raise NotImplementedError
def source(self, *labels):
# type (*str) -> ptransform.PTransform
"""Returns a PTransform that reads the PCollection cache."""
raise NotImplementedError
def sink(self, labels, is_capture=False):
# type (*str, bool) -> ptransform.PTransform
"""Returns a PTransform that writes the PCollection cache.
TODO(BEAM-10514): Make sure labels will not be converted into an
arbitrarily long file path: e.g., windows has a 260 path limit.
"""
raise NotImplementedError
def save_pcoder(self, pcoder, *labels):
# type (coders.Coder, *str) -> None
"""Saves pcoder for given PCollection.
Correct reading of PCollection from Cache requires PCoder to be known.
This method saves desired PCoder for PCollection that will subsequently
be used by sink(...), source(...), and, most importantly, read(...) method.
The latter must be able to read a PCollection written by Beam using
non-Beam IO.
Args:
pcoder: A PCoder to be used for reading and writing a PCollection.
*labels: List of labels for PCollection instance.
"""
raise NotImplementedError
def load_pcoder(self, *labels):
# type (*str) -> coders.Coder
"""Returns previously saved PCoder for reading and writing PCollection."""
raise NotImplementedError
def cleanup(self):
# type () -> None
"""Cleans up all the PCollection caches."""
raise NotImplementedError
def size(self, *labels):
# type: (*str) -> int
"""Returns the size of the PCollection on disk in bytes."""
raise NotImplementedError
class FileBasedCacheManager(CacheManager):
"""Maps PCollections to local temp files for materialization."""
_available_formats = {
'text': (textio.ReadFromText, textio.WriteToText),
'tfrecord': (tfrecordio.ReadFromTFRecord, tfrecordio.WriteToTFRecord)
}
def __init__(self, cache_dir=None, cache_format='text'):
if cache_dir:
self._cache_dir = cache_dir
else:
self._cache_dir = tempfile.mkdtemp(
prefix='it-', dir=os.environ.get('TEST_TMPDIR', None))
self._versions = collections.defaultdict(lambda: self._CacheVersion())
self.cache_format = cache_format
if cache_format not in self._available_formats:
raise ValueError("Unsupported cache format: '%s'." % cache_format)
self._reader_class, self._writer_class = self._available_formats[
cache_format]
self._default_pcoder = (
SafeFastPrimitivesCoder() if cache_format == 'text' else None)
# List of saved pcoders keyed by PCollection path. It is OK to keep this
# list in memory because once FileBasedCacheManager object is
# destroyed/re-created it loses the access to previously written cache
# objects anyways even if cache_dir already exists. In other words,
# it is not possible to resume execution of Beam pipeline from the
# saved cache if FileBasedCacheManager has been reset.
#
# However, if we are to implement better cache persistence, one needs
# to take care of keeping consistency between the cached PCollection
# and its PCoder type.
self._saved_pcoders = {}
def size(self, *labels):
if self.exists(*labels):
return sum(os.path.getsize(path) for path in self._match(*labels))
return 0
def exists(self, *labels):
return bool(self._match(*labels))
def _latest_version(self, *labels):
timestamp = 0
for path in self._match(*labels):
timestamp = max(timestamp, filesystems.FileSystems.last_updated(path))
result = self._versions["-".join(labels)].get_version(timestamp)
return result
def save_pcoder(self, pcoder, *labels):
self._saved_pcoders[self._path(*labels)] = pcoder
def load_pcoder(self, *labels):
return (
self._default_pcoder if self._default_pcoder is not None else
self._saved_pcoders[self._path(*labels)])
def read(self, *labels, **args):
# Return an iterator to an empty list if it doesn't exist.
if not self.exists(*labels):
return iter([]), -1
# Otherwise, return a generator to the cached PCollection.
source = self.source(*labels)._source
range_tracker = source.get_range_tracker(None, None)
reader = source.read(range_tracker)
version = self._latest_version(*labels)
return reader, version
def write(self, values, *labels):
sink = self.sink(labels)._sink
path = self._path(*labels)
init_result = sink.initialize_write()
writer = sink.open_writer(init_result, path)
for v in values:
writer.write(v)
writer.close()
def clear(self, *labels):
if self.exists(*labels):
filesystems.FileSystems.delete(self._match(*labels))
return True
return False
def source(self, *labels):
return self._reader_class(
self._glob_path(*labels), coder=self.load_pcoder(*labels))
def sink(self, labels, is_capture=False):
return self._writer_class(
self._path(*labels), coder=self.load_pcoder(*labels))
def cleanup(self):
if filesystems.FileSystems.exists(self._cache_dir):
filesystems.FileSystems.delete([self._cache_dir])
self._saved_pcoders = {}
def _glob_path(self, *labels):
return self._path(*labels) + '-*-of-*'
def _path(self, *labels):
return filesystems.FileSystems.join(self._cache_dir, *labels)
def _match(self, *labels):
match = filesystems.FileSystems.match([self._glob_path(*labels)])
assert len(match) == 1
return [metadata.path for metadata in match[0].metadata_list]
class _CacheVersion(object):
"""This class keeps track of the timestamp and the corresponding version."""
def __init__(self):
self.current_version = -1
self.current_timestamp = 0
def get_version(self, timestamp):
"""Updates version if necessary and returns the version number.
Args:
timestamp: (int) unix timestamp when the cache is updated. This value is
zero if the cache has been evicted or doesn't exist.
"""
# Do not update timestamp if the cache's been evicted.
if timestamp != 0 and timestamp != self.current_timestamp:
assert timestamp > self.current_timestamp
self.current_version = self.current_version + 1
self.current_timestamp = timestamp
return self.current_version
class ReadCache(beam.PTransform):
"""A PTransform that reads the PCollections from the cache."""
def __init__(self, cache_manager, label):
self._cache_manager = cache_manager
self._label = label
def expand(self, pbegin):
# pylint: disable=expression-not-assigned
return pbegin | 'Read' >> self._cache_manager.source('full', self._label)
class WriteCache(beam.PTransform):
"""A PTransform that writes the PCollections to the cache."""
def __init__(
self,
cache_manager,
label,
sample=False,
sample_size=0,
is_capture=False):
self._cache_manager = cache_manager
self._label = label
self._sample = sample
self._sample_size = sample_size
self._is_capture = is_capture
def expand(self, pcoll):
prefix = 'sample' if self._sample else 'full'
# We save pcoder that is necessary for proper reading of
# cached PCollection. _cache_manager.sink(...) call below
# should be using this saved pcoder.
self._cache_manager.save_pcoder(
coders.registry.get_coder(pcoll.element_type), prefix, self._label)
if self._sample:
pcoll |= 'Sample' >> (
combiners.Sample.FixedSizeGlobally(self._sample_size)
| beam.FlatMap(lambda sample: sample))
# pylint: disable=expression-not-assigned
return pcoll | 'Write' >> self._cache_manager.sink(
(prefix, self._label), is_capture=self._is_capture)
class SafeFastPrimitivesCoder(coders.Coder):
"""This class add an quote/unquote step to escape special characters."""
# pylint: disable=deprecated-urllib-function
def encode(self, value):
return quote(
coders.coders.FastPrimitivesCoder().encode(value)).encode('utf-8')
def decode(self, value):
return coders.coders.FastPrimitivesCoder().decode(unquote_to_bytes(value))
| |
# Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details.
import sys
import traceback
from gevent import core
from gevent.hub import greenlet, getcurrent, get_hub, GreenletExit, Waiter
from gevent.timeout import Timeout
__all__ = ['Greenlet',
'joinall',
'killall']
class SpawnedLink(object):
"""A wrapper around link that calls it in another greenlet.
Can be called only from main loop.
"""
__slots__ = ['callback']
def __init__(self, callback):
self.callback = callback
def __call__(self, source):
g = greenlet(self.callback, get_hub())
g.switch(source)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
class SuccessSpawnedLink(SpawnedLink):
"""A wrapper around link that calls it in another greenlet only if source succeed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if source.successful():
return SpawnedLink.__call__(self, source)
class FailureSpawnedLink(SpawnedLink):
"""A wrapper around link that calls it in another greenlet only if source failed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if not source.successful():
return SpawnedLink.__call__(self, source)
class GreenletLink(object):
"""A wrapper around greenlet that raises a LinkedExited exception when called.
Can be called only from main loop.
"""
__slots__ = ['greenlet']
def __init__(self, greenlet):
self.greenlet = greenlet
def __call__(self, source):
if source.successful():
if isinstance(source.value, GreenletExit):
error = LinkedKilled(source)
else:
error = LinkedCompleted(source)
else:
error = LinkedFailed(source)
self.greenlet.throw(error)
def __hash__(self):
return hash(self.greenlet)
def __eq__(self, other):
return self.greenlet == getattr(other, 'greenlet', other)
def __str__(self):
return str(self.greenlet)
def __repr__(self):
return repr(self.greenlet)
class SuccessGreenletLink(GreenletLink):
"""A wrapper around greenlet that raises a LinkedExited exception when called
if source has succeed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if source.successful():
return GreenletLink.__call__(self, source)
class FailureGreenletLink(GreenletLink):
"""A wrapper around greenlet that raises a LinkedExited exception when called
if source has failed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if not source.successful():
return GreenletLink.__call__(self, source)
class Greenlet(greenlet):
"""A light-weight cooperatively-scheduled execution unit."""
def __init__(self, run=None, *args, **kwargs):
greenlet.__init__(self, parent=get_hub())
if run is not None:
self._run = run
self.args = args
self.kwargs = kwargs
self._links = []
self.value = None
self._exception = _NONE
self._notifier = None
self._start_event = None
@property
def started(self):
return self._start_event is not None or bool(self)
def ready(self):
"""Return true if and only if the greenlet has finished execution."""
return self.dead or self._exception is not _NONE
def successful(self):
"""Return true if and only if the greenlet has finished execution successfully,
that is, without raising an error."""
return self._exception is None
def __repr__(self):
classname = self.__class__.__name__
result = '<%s at %s' % (classname, hex(id(self)))
formatted = self._formatinfo()
if formatted:
result += ': ' + formatted
return result + '>'
def _formatinfo(self):
try:
return self._formatted_info
except AttributeError:
pass
try:
result = getfuncname(self.__dict__['_run'])
except Exception:
pass
else:
args = []
if self.args:
args = [repr(x)[:50] for x in self.args]
if self.kwargs:
args.extend(['%s=%s' % (key, repr(value)[:50]) for (key, value) in self.kwargs.items()])
if args:
result += '(' + ', '.join(args) + ')'
# it is important to save the result here, because once the greenlet exits '_run' attribute will be removed
self._formatted_info = result
return result
return ''
@property
def exception(self):
"""Holds the exception instance raised by the function if the greenlet has finished with an error.
Otherwise ``None``.
"""
if self._exception is not _NONE:
return self._exception
def throw(self, *args):
"""Immediatelly switch into the greenlet and raise an exception in it.
Should only be called from the HUB, otherwise the current greenlet is left unscheduled forever.
To raise an exception in a safely manner from any greenlet, use :meth:`kill`.
If a greenlet was started but never switched to yet, then also
a) cancel the event that will start it
b) fire the notifications as if an exception was raised in a greenlet
"""
if self._start_event is not None:
self._start_event.cancel()
self._start_event = None
try:
greenlet.throw(self, *args)
finally:
if self._exception is _NONE and self.dead:
# the greenlet was not started yet, so _report_error was not called, so
# the result was not set and the links weren't notified. let's do it here.
# checking that self.dead is true is essential, because the exception raised by
# throw() could have been cancelled by the greenlet's function.
if len(args) == 1:
arg = args[0]
#if isinstance(arg, type):
if type(arg) is type(Exception):
args = (arg, arg(), None)
else:
args = (type(arg), arg, None)
elif not args:
args = (GreenletExit, GreenletExit(), None)
self._report_error(args)
def start(self):
"""Schedule the greenlet to run in this loop iteration"""
assert not self.started, 'Greenlet already started'
self._start_event = core.active_event(self.switch)
def start_later(self, seconds):
"""Schedule the greenlet to run in the future loop iteration *seconds* later"""
assert not self.started, 'Greenlet already started'
self._start_event = core.timer(seconds, self.switch)
@classmethod
def spawn(cls, *args, **kwargs):
"""Return a new :class:`Greenlet` object, scheduled to start.
The arguments are passed to :meth:`Greenlet.__init__`.
"""
g = cls(*args, **kwargs)
g.start()
return g
@classmethod
def spawn_later(cls, seconds, *args, **kwargs):
"""Return a Greenlet object, scheduled to start *seconds* later.
The arguments are passed to :meth:`Greenlet.__init__`.
"""
g = cls(*args, **kwargs)
g.start_later(seconds)
return g
@classmethod
def spawn_link(cls, *args, **kwargs):
g = cls.spawn(*args, **kwargs)
g.link()
return g
@classmethod
def spawn_link_value(cls, *args, **kwargs):
g = cls.spawn(*args, **kwargs)
g.link_value()
return g
@classmethod
def spawn_link_exception(cls, *args, **kwargs):
g = cls.spawn(*args, **kwargs)
g.link_exception()
return g
def kill(self, exception=GreenletExit, block=True, timeout=None):
"""Raise the exception in the greenlet.
If block is ``True`` (the default), wait until the greenlet dies or the optional timeout expires.
If block is ``False``, the current greenlet is not unscheduled.
The function always returns ``None`` and never raises an error.
`Changed in version 0.13.0:` *block* is now ``True`` by default.
"""
if self._start_event is not None:
self._start_event.cancel()
self._start_event = None
if not self.dead:
waiter = Waiter()
core.active_event(_kill, self, exception, waiter)
if block:
waiter.get()
self.join(timeout)
# it should be OK to use kill() in finally or kill a greenlet from more than one place;
# thus it should not raise when the greenlet is already killed (= not started)
def get(self, block=True, timeout=None):
"""Return the result the greenlet has returned or re-raise the exception it has raised.
If block is ``False``, raise :class:`gevent.Timeout` if the greenlet is still alive.
If block is ``True``, unschedule the current greenlet until the result is available
or the timeout expires. In the latter case, :class:`gevent.Timeout` is raised.
"""
if self.ready():
if self.successful():
return self.value
else:
raise self._exception
if block:
switch = getcurrent().switch
self.rawlink(switch)
try:
t = Timeout.start_new(timeout)
try:
result = self.parent.switch()
assert result is self, 'Invalid switch into Greenlet.get(): %r' % (result, )
finally:
t.cancel()
except:
# unlinking in 'except' instead of finally is an optimization:
# if switch occurred normally then link was already removed in _notify_links
# and there's no need to touch the links set.
# Note, however, that if "Invalid switch" assert was removed and invalid switch
# did happen, the link would remain, causing another invalid switch later in this greenlet.
self.unlink(switch)
raise
if self.ready():
if self.successful():
return self.value
else:
raise self._exception
else:
raise Timeout
def join(self, timeout=None):
"""Wait until the greenlet finishes or *timeout* expires.
Return ``None`` regardless.
"""
if self.ready():
return
else:
switch = getcurrent().switch
self.rawlink(switch)
try:
t = Timeout.start_new(timeout)
try:
result = self.parent.switch()
assert result is self, 'Invalid switch into Greenlet.join(): %r' % (result, )
finally:
t.cancel()
except Timeout, ex:
self.unlink(switch)
if ex is not t:
raise
except:
self.unlink(switch)
raise
def _report_result(self, result):
self._exception = None
self.value = result
if self._links and self._notifier is None:
self._notifier = core.active_event(self._notify_links)
def _report_error(self, exc_info):
exception = exc_info[1]
if isinstance(exception, GreenletExit):
self._report_result(exception)
return
try:
traceback.print_exception(*exc_info)
except:
pass
self._exception = exception
if self._links and self._notifier is None:
self._notifier = core.active_event(self._notify_links)
info = str(self) + ' failed with '
try:
info += self._exception.__class__.__name__
except Exception:
info += str(self._exception) or repr(self._exception)
sys.stderr.write(info + '\n\n')
def run(self):
try:
self._start_event = None
try:
result = self._run(*self.args, **self.kwargs)
except:
self._report_error(sys.exc_info())
return
self._report_result(result)
finally:
self.__dict__.pop('_run', None)
self.__dict__.pop('args', None)
self.__dict__.pop('kwargs', None)
def rawlink(self, callback):
"""Register a callable to be executed when the greenlet finishes the execution.
WARNING: the callable will be called in the HUB greenlet.
"""
if not callable(callback):
raise TypeError('Expected callable: %r' % (callback, ))
self._links.append(callback)
if self.ready() and self._notifier is None:
self._notifier = core.active_event(self._notify_links)
def link(self, receiver=None, GreenletLink=GreenletLink, SpawnedLink=SpawnedLink):
"""Link greenlet's completion to callable or another greenlet.
If *receiver* is a callable then it will be called with this instance as an argument
once this greenlet's dead. A callable is called in its own greenlet.
If *receiver* is a greenlet then an :class:`LinkedExited` exception will be
raised in it once this greenlet's dead.
If *receiver* is ``None``, link to the current greenlet.
Always asynchronous, unless receiver is a current greenlet and the result is ready.
If this greenlet is already dead, then notification will performed in this loop
iteration as soon as this greenlet switches to the hub.
"""
current = getcurrent()
if receiver is None or receiver is current:
receiver = GreenletLink(current)
if self.ready():
# special case : linking to current greenlet when the result is ready
# raise LinkedExited immediatelly
receiver(self)
return
elif not callable(receiver):
if isinstance(receiver, greenlet):
receiver = GreenletLink(receiver)
else:
raise TypeError('Expected callable or greenlet: %r' % (receiver, ))
else:
receiver = SpawnedLink(receiver)
self.rawlink(receiver)
def unlink(self, receiver=None):
"""Remove the receiver set by :meth:`link` or :meth:`rawlink`"""
if receiver is None:
receiver = getcurrent()
# discarding greenlets when we have GreenletLink instances in _links works, because
# a GreenletLink instance pretends to be a greenlet, hash-wise and eq-wise
try:
self._links.remove(receiver)
except ValueError:
pass
def link_value(self, receiver=None, GreenletLink=SuccessGreenletLink, SpawnedLink=SuccessSpawnedLink):
"""Like :meth:`link` but *receiver* is only notified when the greenlet has completed successfully"""
self.link(receiver=receiver, GreenletLink=GreenletLink, SpawnedLink=SpawnedLink)
def link_exception(self, receiver=None, GreenletLink=FailureGreenletLink, SpawnedLink=FailureSpawnedLink):
"""Like :meth:`link` but *receiver* is only notified when the greenlet dies because of unhandled exception"""
self.link(receiver=receiver, GreenletLink=GreenletLink, SpawnedLink=SpawnedLink)
def _notify_links(self):
try:
while self._links:
link = self._links.pop()
try:
link(self)
except:
traceback.print_exc()
try:
sys.stderr.write('Failed to notify link %s of %r\n\n' % (getfuncname(link), self))
except:
traceback.print_exc()
finally:
self._notifier = None
def _kill(greenlet, exception, waiter):
try:
greenlet.throw(exception)
except:
traceback.print_exc()
waiter.switch()
def joinall(greenlets, timeout=None, raise_error=False):
from gevent.queue import Queue
queue = Queue()
put = queue.put
timeout = Timeout.start_new(timeout)
try:
try:
for greenlet in greenlets:
greenlet.rawlink(put)
if raise_error:
for _ in xrange(len(greenlets)):
greenlet = queue.get()
if not greenlet.successful():
raise greenlet.exception
else:
for _ in xrange(len(greenlets)):
queue.get()
except:
for greenlet in greenlets:
greenlet.unlink(put)
if sys.exc_info()[1] is not timeout:
raise
finally:
timeout.cancel()
def _killall3(greenlets, exception, waiter):
diehards = []
for g in greenlets:
if not g.dead:
try:
g.throw(exception)
except:
traceback.print_exc()
if not g.dead:
diehards.append(g)
waiter.switch(diehards)
def _killall(greenlets, exception):
for g in greenlets:
if not g.dead:
try:
g.throw(exception)
except:
traceback.print_exc()
def killall(greenlets, exception=GreenletExit, block=True, timeout=None):
if block:
waiter = Waiter()
core.active_event(_killall3, greenlets, exception, waiter)
if block:
t = Timeout.start_new(timeout)
try:
alive = waiter.get()
if alive:
joinall(alive, raise_error=False)
finally:
t.cancel()
else:
core.active_event(_killall, greenlets, exception)
class LinkedExited(Exception):
pass
class LinkedCompleted(LinkedExited):
"""Raised when a linked greenlet finishes the execution cleanly"""
msg = "%r completed successfully"
def __init__(self, source):
assert source.ready(), source
assert source.successful(), source
LinkedExited.__init__(self, self.msg % source)
class LinkedKilled(LinkedCompleted):
"""Raised when a linked greenlet returns GreenletExit instance"""
msg = "%r returned %s"
def __init__(self, source):
try:
result = source.value.__class__.__name__
except:
result = str(source) or repr(source)
LinkedExited.__init__(self, self.msg % (source, result))
class LinkedFailed(LinkedExited):
"""Raised when a linked greenlet dies because of unhandled exception"""
msg = "%r failed with %s"
def __init__(self, source):
exception = source.exception
try:
excname = exception.__class__.__name__
except:
excname = str(exception) or repr(exception)
LinkedExited.__init__(self, self.msg % (source, excname))
def getfuncname(func):
if not hasattr(func, 'im_self'):
try:
funcname = func.__name__
except AttributeError:
pass
else:
if funcname != '<lambda>':
return funcname
return repr(func)
_NONE = Exception("Neither exception nor value")
| |
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import re
import os
import sys
import glob
from CPAC.utils.datasource import create_grp_analysis_dataflow
from CPAC.utils import Configuration
from CPAC.utils.utils import prepare_gp_links
from CPAC.group_analysis import create_group_analysis
def prep_group_analysis_workflow(c, group_config_file, resource, subject_infos, threshold_val):
#
# this function runs once per output file during group analysis
#
import yaml
import commands
# p_id = a list of pipeline IDs, i.e. the name of the output folder for
# the strat
# s_ids = a list of all the subject IDs
# scan_ids = a list of scan IDs
# s_paths = a list of all of the filepaths of this particular output
# file that prep_group_analysis_workflow is being called for
p_id, s_ids, scan_ids, s_paths = (list(tup) for tup in zip(*subject_infos))
try:
group_conf = Configuration(yaml.load(open(os.path.realpath(group_config_file), 'r')))
except Exception as e:
err_string = "\n\n[!] CPAC says: Could not read group model " \
"configuration YML file. Ensure you have read access " \
"for the file and that it is formatted properly.\n\n" \
"Configuration file: %s\n\nError details: %s" \
% (group_config_file, e)
raise Exception(err_string)
group_sublist_file = open(group_conf.subject_list, 'r')
group_sublist_items = group_sublist_file.readlines()
group_sublist = [line.rstrip('\n') for line in group_sublist_items \
if not (line == '\n') and not line.startswith('#')]
# list of subjects for which paths which DO exist
exist_paths = []
# paths to the actual derivatives for those subjects
derivative_paths = []
z_threshold = float(group_conf.z_threshold[0])
p_threshold = float(group_conf.p_threshold[0])
custom_confile = group_conf.custom_contrasts
if ((custom_confile == None) or (custom_confile == '') or \
("None" in custom_confile)):
if (len(group_conf.f_tests) == 0) or (group_conf.f_tests == None):
fTest = False
else:
fTest = True
else:
if not os.path.exists(custom_confile):
errmsg = "\n[!] CPAC says: You've specified a custom contrasts " \
".CSV file for your group model, but this file cannot " \
"be found. Please double-check the filepath you have " \
"entered.\n\nFilepath: %s\n\n" % custom_confile
raise Exception(errmsg)
evs = open(custom_confile, 'r').readline()
evs = evs.rstrip('\r\n').split(',')
count_ftests = 0
fTest = False
for ev in evs:
if "f_test" in ev:
count_ftests += 1
if count_ftests > 0:
fTest = True
''' begin iteration through group subject list for processing '''
print "Sorting through subject list to check for missing outputs " \
"for %s..\n" % resource
for ga_sub in group_sublist:
# Strip out carriage-return character if it is there
if ga_sub.endswith('\r'):
ga_sub = ga_sub.rstrip('\r')
# ga_sub = subject ID taken off the group analysis subject list
# let's check to make sure the subject list is formatted for
# repeated measures properly if repeated measures is enabled
# and vice versa
if (group_conf.repeated_measures == True) and (',' not in ga_sub):
print '\n\n'
print '[!] CPAC says: The group analysis subject list ' \
'is not in the appropriate format for repeated ' \
'measures.\n'
print 'Please use the appropriate format as described in ' \
'the CPAC User Guide or turn off Repeated Measures ' \
'in the CPAC pipeline configuration editor, found ' \
'in the \'Group Analysis Settings\' tab of the ' \
'pipeline configuration editor.\n'
print 'NOTE: CPAC generates a properly-formatted group ' \
'analysis subject list meant for running repeated ' \
'measures when you create your original subject ' \
'list. Look for \'subject_list_group_analysis_' \
'repeated_measures.txt\' in the directory where ' \
'you created your subject list.\n\n'
raise Exception
elif (group_conf.repeated_measures == False) and (',' in ga_sub):
print '\n\n'
print '[!] CPAC says: It looks like your group analysis ' \
'subject list is formatted for running repeated ' \
'measures, but \'Run Repeated Measures\' is not ' \
'enabled in the pipeline configuration, found in ' \
'the \'Group Analysis Settings\' tab of the ' \
'pipeline configuration editor.\n'
print 'Double-check your pipeline configuration?\n\n'
raise Exception
''' process subject ids for repeated measures, if it is on '''
# if repeated measures is being run and the subject list
# is a list of subject IDs and scan IDs concatenated
if (group_conf.repeated_measures == True):
# sub.count(',') equals 1 when there is either multiple scans
# or multiple sessions but not both, for repeated measures
# sub.count(',') equals 2 when there are multiple sessions
# AND scans, for repeated measures
if ga_sub.count(',') == 1:
sub_id = ga_sub.split(',',1)[0]
other_id = ga_sub.split(',',1)[1]
elif ga_sub.count(',') == 2:
sub_id = ga_sub.split(',',2)[0]
scan_id = ga_sub.split(',',2)[1]
session_id = ga_sub.split(',',2)[2]
''' drop subjects from the group subject list '''
# check the path files in path_files_here folder in the
# subject's output folder - and drop any subjects from the
# group analysis subject list which do not exist in the paths
# to the output files
'''
REVISIT THIS LATER to establish a potentially better way to
pull output paths (instead of path_files_here)
'''
for path in s_paths:
if (group_conf.repeated_measures == True):
if ga_sub.count(',') == 1:
if (sub_id in path) and (other_id in path):
exist_paths.append(ga_sub)
derivative_paths.append(path)
elif ga_sub.count(',') == 2:
if (sub_id in path) and (scan_id in path) and \
(session_id in path):
exist_paths.append(ga_sub)
derivative_paths.append(path)
else:
if ga_sub in path:
exist_paths.append(ga_sub)
derivative_paths.append(path)
# END subject-dropping!
if len(derivative_paths) == 0:
print '\n\n\n[!] CPAC says: None of the subjects listed in the ' \
'group analysis subject list were found to have outputs ' \
'produced by individual-level analysis.\n\nEnsure that ' \
'the subjects listed in your group analysis subject list ' \
'are the same as the ones included in the individual-' \
'level analysis you are running group-level analysis for.' \
'\n\n\n'
raise Exception
''' END subject list iteration '''
# check to see if any derivatives of subjects are missing
if len(list(set(group_sublist) - set(exist_paths))) >0:
print "List of outputs missing for subjects:"
print list(set(group_sublist) - set(exist_paths))
print "..for derivatives:"
print resource
print "..at paths:"
print os.path.dirname(s_paths[0]).replace(s_ids[0], '*')
# create the path string for the group analysis output
out_dir = os.path.dirname(s_paths[0]).split(p_id[0] + '/')
out_dir = os.path.join(group_conf.output_dir, out_dir[1])
out_dir = out_dir.replace(s_ids[0], 'group_analysis_results_%s/_grp_model_%s'%(p_id[0],group_conf.model_name))
model_out_dir = os.path.join(group_conf.output_dir, 'group_analysis_results_%s/_grp_model_%s'%(p_id[0],group_conf.model_name))
mod_path = os.path.join(out_dir, 'model_files')
if not os.path.isdir(mod_path):
os.makedirs(mod_path)
''' write the new subject list '''
new_sub_file = os.path.join(mod_path, os.path.basename(group_conf.subject_list))
try:
f = open(new_sub_file, 'w')
for sub in exist_paths:
print >>f, sub
f.close()
except:
print "Error: Could not open subject list file: ", new_sub_file
raise Exception
group_conf.update('subject_list',new_sub_file)
sub_id_label = group_conf.subject_id_label
# Run 'create_fsl_model' script to extract phenotypic data from
# the phenotypic file for each of the subjects in the subject list
''' get the motion statistics parameter file, if present '''
# get the parameter file so it can be passed to create_fsl_model.py
# so MeanFD or other measures can be included in the design matrix
measure_list = ['MeanFD', 'MeanFD_Jenkinson', 'MeanDVARS']
for measure in measure_list:
if (measure in group_conf.design_formula):
parameter_file = os.path.join(c.outputDirectory, p_id[0], '%s%s_all_params.csv'%(scan_ids[0].strip('_'),threshold_val))
if 1 in c.runGenerateMotionStatistics:
if not os.path.exists(parameter_file):
print '\n\n[!] CPAC says: Could not find or open the motion ' \
'parameter file. This is necessary if you have included ' \
'any of the MeanFD measures in your group model.\n\n' \
'If Generate Motion Statistics is enabled, this file can ' \
'usually be found in the output directory of your ' \
'individual-level analysis runs. If it is not there, ' \
'double-check to see if individual-level analysis had ' \
'completed successfully.\n'
print 'Path not found: ', parameter_file, '\n\n'
raise Exception
else:
def no_measures_error(measure):
print '\n\n[!] CPAC says: The measure %s was included in ' \
'your group analysis design matrix formula, but ' \
'Generate Motion Statistics was not run during ' \
'individual-level analysis.\n' % measure
print 'Please run Generate Motion Statistics if you wish ' \
'to include this measure in your model.\n'
print 'If you HAVE completed a run with this option ' \
'enabled, then you are seeing this error because ' \
'the motion parameter file normally created by this ' \
'option is missing.\n\n'
raise Exception
for measure in measure_list:
if (measure in group_conf.design_formula):
no_measures_error(measure)
parameter_file = None
break
else:
parameter_file = None
# path to the pipeline folder to be passed to create_fsl_model.py
# so that certain files like output_means.csv can be accessed
pipeline_path = os.path.join(c.outputDirectory, p_id[0])
# the current output that cpac_group_analysis_pipeline.py and
# create_fsl_model.py is currently being run for
current_output = resource #s_paths[0].replace(pipeline_path, '').split('/')[2]
# generate working directory for this output's group analysis run
workDir = '%s/group_analysis/%s/%s_%s' % (c.workingDirectory, group_conf.model_name, resource, scan_ids[0])
# s_paths is a list of paths to each subject's derivative (of the current
# derivative gpa is being run on) - s_paths_dirList is a list of each directory
# in this path separated into list elements
# this makes strgy_path basically the directory path of the folders after
# the scan ID folder level
strgy_path = os.path.dirname(s_paths[0]).split(scan_ids[0])[1]
# get rid of periods in the path
for ch in ['.']:
if ch in strgy_path:
strgy_path = strgy_path.replace(ch, "")
# create nipype-workflow-name-friendly strgy_path
# (remove special characters)
strgy_path_name = strgy_path.replace('/', "_")
workDir = workDir + '/' + strgy_path_name
''' merge the remaining subjects for this current output '''
# then, take the group mask, and iterate over the list of subjects
# remaining to extract the mean of each subject using the group
# mask
merge_input = " "
merge_output_dir = workDir + "/merged_files"
if not os.path.exists(merge_output_dir):
os.makedirs(merge_output_dir)
merge_output = merge_output_dir + "/" + current_output + "_merged.nii.gz"
merge_mask_output = merge_output_dir + "/" + current_output + "_merged_mask.nii.gz"
# create a string per derivative filled with every subject's path to the
# derivative output file
for derivative_path in derivative_paths:
merge_input = merge_input + " " + derivative_path
merge_string = "fslmerge -t %s %s" % (merge_output, merge_input)
# MERGE the remaining outputs
try:
commands.getoutput(merge_string)
except Exception as e:
print "[!] CPAC says: FSL Merge failed for output: %s" % current_output
print "Error details: %s\n\n" % e
raise
merge_mask_string = "fslmaths %s -abs -Tmin -bin %s" % (merge_output, merge_mask_output)
# CREATE A MASK of the merged file
try:
commands.getoutput(merge_mask_string)
except Exception as e:
print "[!] CPAC says: FSL Mask failed for output: %s" % current_output
print "Error details: %s\n\n" % e
raise
derivative_means_dict = {}
roi_means_dict = {}
# CALCULATE THE MEANS of each remaining output using the group mask
for derivative_path in derivative_paths:
try:
if "Group Mask" in group_conf.mean_mask:
maskave_output = commands.getoutput("3dmaskave -mask %s %s" % (merge_mask_output, derivative_path))
elif "Individual Mask" in group_conf.mean_mask:
maskave_output = commands.getoutput("3dmaskave -mask %s %s" % (derivative_path, derivative_path))
except Exception as e:
print "[!] CPAC says: AFNI 3dmaskave failed for output: %s\n" \
"(Measure Mean calculation)" % current_output
print "Error details: %s\n\n" % e
raise
# get the subject ID of the current derivative path reliably
derivative_path_subID = derivative_path.replace(pipeline_path,"").strip("/").split("/")[0]
# this crazy-looking command simply extracts the mean from the
# verbose AFNI 3dmaskave output string
derivative_means_dict[derivative_path_subID] = maskave_output.split("\n")[-1].split(" ")[0]
# derivative_means_dict is now something like this:
# { 'sub001': 0.3124, 'sub002': 0.2981, .. }
# if custom ROI means are included in the model, do the same for those
if "Custom_ROI_Mean" in group_conf.design_formula:
try:
if "centrality" in derivative_path:
# resample custom roi mask to 3mm, then use that
resampled_roi_mask = merge_output_dir + "/" + current_output + "_resampled_roi_mask.nii.gz"
commands.getoutput("flirt -in %s -ref %s -o %s -applyxfm -init %s -interp nearestneighbour" % (group_conf.custom_roi_mask, derivative_path, resampled_roi_mask, c.identityMatrix))
ROIstats_output = commands.getoutput("3dROIstats -mask %s %s" % (resampled_roi_mask, derivative_path))
else:
ROIstats_output = commands.getoutput("3dROIstats -mask %s %s" % (group_conf.custom_roi_mask, derivative_path))
except Exception as e:
print "[!] CPAC says: AFNI 3dROIstats failed for output: %s" \
"\n(Custom ROI Mean calculation)" % current_output
print "Error details: %s\n\n" % e
raise
ROIstats_list = ROIstats_output.split("\t")
# calculate the number of ROIs - 3dROIstats output can be split
# into a list, and the actual ROI means begin at a certain point
num_rois = (len(ROIstats_list)-3)/2
roi_means = []
# create a list of the ROI means - each derivative of each subject
# will have N number of ROIs depending on how many ROIs were
# specified in the custom ROI mask
for num in range(num_rois+3,len(ROIstats_list)):
roi_means.append(ROIstats_list[num])
roi_means_dict[derivative_path_subID] = roi_means
else:
roi_means_dict = None
if len(derivative_means_dict.keys()) == 0:
err_string = "[!] CPAC says: Something went wrong with the " \
"calculation of the output means via the group mask.\n\n"
raise Exception(err_string)
''' run create_fsl_model.py to generate the group analysis models '''
from CPAC.utils import create_fsl_model
create_fsl_model.run(group_conf, fTest, parameter_file, derivative_means_dict, pipeline_path, current_output, model_out_dir, roi_means_dict, True)
''' begin GA workflow setup '''
if not os.path.exists(new_sub_file):
raise Exception("path to input subject list %s is invalid" % new_sub_file)
#if c.mixedScanAnalysis == True:
# wf = pe.Workflow(name = 'group_analysis/%s/grp_model_%s'%(resource, os.path.basename(model)))
#else:
wf = pe.Workflow(name = resource)
wf.base_dir = workDir
wf.config['execution'] = {'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(c.crashLogDirectory)}
log_dir = os.path.join(group_conf.output_dir, 'logs', 'group_analysis', resource, 'model_%s' % (group_conf.model_name))
if not os.path.exists(log_dir):
os.makedirs(log_dir)
else:
pass
# gp_flow
# Extracts the model files (.con, .grp, .mat, .fts) from the model
# directory and sends them to the create_group_analysis workflow gpa_wf
gp_flow = create_grp_analysis_dataflow("gp_dataflow_%s" % resource)
gp_flow.inputs.inputspec.grp_model = os.path.join(model_out_dir, "model_files", current_output)
gp_flow.inputs.inputspec.model_name = group_conf.model_name
gp_flow.inputs.inputspec.ftest = fTest
# gpa_wf
# Creates the actual group analysis workflow
gpa_wf = create_group_analysis(fTest, "gp_analysis_%s" % resource)
gpa_wf.inputs.inputspec.merged_file = merge_output
gpa_wf.inputs.inputspec.merge_mask = merge_mask_output
gpa_wf.inputs.inputspec.z_threshold = z_threshold
gpa_wf.inputs.inputspec.p_threshold = p_threshold
gpa_wf.inputs.inputspec.parameters = (c.FSLDIR, 'MNI152')
wf.connect(gp_flow, 'outputspec.mat',
gpa_wf, 'inputspec.mat_file')
wf.connect(gp_flow, 'outputspec.con',
gpa_wf, 'inputspec.con_file')
wf.connect(gp_flow, 'outputspec.grp',
gpa_wf, 'inputspec.grp_file')
if fTest:
wf.connect(gp_flow, 'outputspec.fts',
gpa_wf, 'inputspec.fts_file')
# ds
# Creates the datasink node for group analysis
ds = pe.Node(nio.DataSink(), name='gpa_sink')
if 'sca_roi' in resource:
out_dir = os.path.join(out_dir, \
re.search('sca_roi_(\d)+',os.path.splitext(os.path.splitext(os.path.basename(s_paths[0]))[0])[0]).group(0))
if 'dr_tempreg_maps_zstat_files_to_standard_smooth' in resource:
out_dir = os.path.join(out_dir, \
re.search('temp_reg_map_z_(\d)+',os.path.splitext(os.path.splitext(os.path.basename(s_paths[0]))[0])[0]).group(0))
if 'centrality' in resource:
names = ['degree_centrality_binarize', 'degree_centrality_weighted', \
'eigenvector_centrality_binarize', 'eigenvector_centrality_weighted', \
'lfcd_binarize', 'lfcd_weighted']
for name in names:
if name in os.path.basename(s_paths[0]):
out_dir = os.path.join(out_dir, name)
break
if 'tempreg_maps' in resource:
out_dir = os.path.join(out_dir, \
re.search('\w*[#]*\d+', os.path.splitext(os.path.splitext(os.path.basename(s_paths[0]))[0])[0]).group(0))
# if c.mixedScanAnalysis == True:
# out_dir = re.sub(r'(\w)*scan_(\w)*(\d)*(\w)*[/]', '', out_dir)
ds.inputs.base_directory = out_dir
ds.inputs.container = ''
ds.inputs.regexp_substitutions = [(r'(?<=rendered)(.)*[/]','/'),
(r'(?<=model_files)(.)*[/]','/'),
(r'(?<=merged)(.)*[/]','/'),
(r'(?<=stats/clusterMap)(.)*[/]','/'),
(r'(?<=stats/unthreshold)(.)*[/]','/'),
(r'(?<=stats/threshold)(.)*[/]','/'),
(r'_cluster(.)*[/]',''),
(r'_slicer(.)*[/]',''),
(r'_overlay(.)*[/]','')]
'''
if 1 in c.runSymbolicLinks:
link_node = pe.MapNode(interface=util.Function(
input_names=['in_file',
'resource'],
output_names=[],
function=prepare_gp_links),
name='link_gp_', iterfield=['in_file'])
link_node.inputs.resource = resource
wf.connect(ds, 'out_file', link_node, 'in_file')
'''
########datasink connections#########
if fTest:
wf.connect(gp_flow, 'outputspec.fts',
ds, 'model_files.@0')
wf.connect(gp_flow, 'outputspec.mat',
ds, 'model_files.@1' )
wf.connect(gp_flow, 'outputspec.con',
ds, 'model_files.@2')
wf.connect(gp_flow, 'outputspec.grp',
ds, 'model_files.@3')
wf.connect(gpa_wf, 'outputspec.merged',
ds, 'merged')
wf.connect(gpa_wf, 'outputspec.zstats',
ds, 'stats.unthreshold')
wf.connect(gpa_wf, 'outputspec.zfstats',
ds,'stats.unthreshold.@01')
wf.connect(gpa_wf, 'outputspec.fstats',
ds,'stats.unthreshold.@02')
wf.connect(gpa_wf, 'outputspec.cluster_threshold_zf',
ds, 'stats.threshold')
wf.connect(gpa_wf, 'outputspec.cluster_index_zf',
ds,'stats.clusterMap')
wf.connect(gpa_wf, 'outputspec.cluster_localmax_txt_zf',
ds, 'stats.clusterMap.@01')
wf.connect(gpa_wf, 'outputspec.overlay_threshold_zf',
ds, 'rendered')
wf.connect(gpa_wf, 'outputspec.rendered_image_zf',
ds, 'rendered.@01')
wf.connect(gpa_wf, 'outputspec.cluster_threshold',
ds, 'stats.threshold.@01')
wf.connect(gpa_wf, 'outputspec.cluster_index',
ds, 'stats.clusterMap.@02')
wf.connect(gpa_wf, 'outputspec.cluster_localmax_txt',
ds, 'stats.clusterMap.@03')
wf.connect(gpa_wf, 'outputspec.overlay_threshold',
ds, 'rendered.@02')
wf.connect(gpa_wf, 'outputspec.rendered_image',
ds, 'rendered.@03')
######################################
# Run the actual group analysis workflow
wf.run()
'''
except:
print "Error: Group analysis workflow run command did not complete successfully."
print "subcount: ", subcount
print "pathcount: ", pathcount
print "sublist: ", sublist_items
print "input subject list: "
print "conf: ", conf.subjectListFile
raise Exception
'''
print "**Workflow finished for model %s and resource %s"%(os.path.basename(group_conf.output_dir), resource)
#diag.close()
def run(config, subject_infos, resource):
import re
import commands
commands.getoutput('source ~/.bashrc')
import os
import sys
import pickle
import yaml
c = Configuration(yaml.load(open(os.path.realpath(config), 'r')))
prep_group_analysis_workflow(c, pickle.load(open(resource, 'r') ), pickle.load(open(subject_infos, 'r')))
| |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import cPickle as pickle
from openmdao.api import Problem
from pyoptsparse import Optimization, SNOPT
from wakeexchange.OptimizationGroups import OptAEP
from wakeexchange.gauss import gauss_wrapper, add_gauss_params_IndepVarComps
from wakeexchange.floris import floris_wrapper, add_floris_params_IndepVarComps
from wakeexchange.utilities import sunflower_points
def tuning_obj_function(xdict={'ky': 0.022, 'kz': 0.022, 'I': 0.06, 'shear_exp': 0.15}, plot=False):
global prob
global model
# prob.setup()
set_param_vals(xdict)
turbineX = np.array([1118.1, 1881.9])
turbineY = np.array([1279.5, 1720.5])
yaw_weight = 1.
# load data
ICOWESdata = loadmat('../data/YawPosResults.mat')
with open('../data/yawPower.p', 'rb') as handle:
yawrange_4D, SOWFApower_yaw_4D, _, _ = pickle.load(handle)
with open('../data/offset4DPower.p', 'rb') as handle:
posrange_cs_4D, SOWFApower_cs_4D = pickle.load(handle)
with open('../data/offset6DPower.p', 'rb') as handle:
posrange_cs_6D, SOWFApower_cs_6D = pickle.load(handle)
with open('../data/spacePower.p', 'rb') as handle:
posrange_ds, SOWFApower_ds = pickle.load(handle)
# set tuning params
ICOWESvelocity = 8.0
PFvelocity = 8.48673684
PFvelocity = 8.38673684
rotor_diameter = rotorDiameter[0]
error_turbine2 = 0.0
prob['turbineX'] = turbineX
prob['turbineY'] = turbineY
prob['rotorDiameter'] = rotorDiameter
prob['hubHeight'] = rotorDiameter
prob['axialInduction'] = axialInduction
prob['generatorEfficiency'] = generatorEfficiency
prob['air_density'] = air_density
prob['Cp_in'] = Cp
prob['Ct_in'] = Ct
prob['windSpeeds'] = np.array([wind_speed])
prob['windDirections'] = np.array([wind_direction])
# ################## compare yaw ######################
# 4D yaw
yawrange = np.array(list(yawrange_4D))
Power = list()
# set to 4D positions and inflow velocity
prob['turbineX'] = np.array([1118.1, 1556.0])
prob['turbineY'] = np.array([1279.5, 1532.3])
prob['windSpeeds'] = np.array([PFvelocity])
for yaw1 in yawrange:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
Power.append(list(prob['wtPower0']))
Power = np.array(Power)
# print FlorisPower
SOWFApower = SOWFApower_yaw_4D*1E-3
error_turbine2 += yaw_weight*np.sum((SOWFApower[:, 1]-Power[:, 1])**2)
# 7D yaw
yawrange = ICOWESdata['yaw'][0]
# yawrange = ICOWESdata['yaw'][0, 2:-2]
Power = list()
# set to 7D positions
prob['turbineX'] = np.array([1118.1, 1881.9])
prob['turbineY'] = np.array([1279.5, 1720.5])
prob['windSpeeds'] = np.array([ICOWESvelocity])
# run analysis
for yaw1 in yawrange:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
Power.append(list(prob['wtPower0']))
Power = np.array(Power)
SOWFApower = np.array([ICOWESdata['yawPowerT1'][0], ICOWESdata['yawPowerT2'][0]]).transpose()/1000.
error_turbine2 += yaw_weight*np.sum((SOWFApower[:, 1]-Power[:, 1])**2)
# error_turbine2 += yaw_weight*np.sum((SOWFApower[2:-2, 1]-Power[:, 1])**2)
# ################## compare position ######################
PosPowFig, PosPowAx = plt.subplots(ncols=2, nrows=2, sharey=False)
prob['yaw0'] = np.array([0.0, 0.0])
prob['windSpeeds'] = np.array([PFvelocity])
# position crosswind 4D
posrange = np.array(list(posrange_cs_4D))
Power = list()
for pos2 in posrange:
# Define turbine locations and orientation (4D)
effUdXY = 0.523599
Xinit = np.array([1118.1, 1556.0])
Yinit = np.array([1279.5, 1532.3])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
Power.append(list(prob['wtPower0']))
Power = np.array(Power)
SOWFApower = SOWFApower_cs_4D*1E-3
error_turbine2 += np.sum((SOWFApower[:, 1]-Power[:, 1])**2)
# position crosswind 6D
posrange = np.array(list(posrange_cs_6D))
Power = list()
prob['windSpeeds'] = np.array([PFvelocity])
for pos2 in posrange:
# Define turbine locations and orientation (4D)
effUdXY = 0.523599
Xinit = np.array([1118.1, 1556.0])
Yinit = np.array([1279.5, 1532.3])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
Power.append(list(prob['wtPower0']))
Power = np.array(Power)
SOWFApower = SOWFApower_cs_6D*1E-3
error_turbine2 += np.sum((SOWFApower[:, 1]-Power[:, 1])**2)
# position crosswind 7D
posrange = ICOWESdata['pos'][0]
Power = list()
prob['windSpeeds'] = np.array([ICOWESvelocity])
for pos2 in posrange:
# Define turbine locations and orientation
effUdXY = 0.523599
Xinit = np.array([1118.1, 1881.9])
Yinit = np.array([1279.5, 1720.5])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
Power.append(list(prob['wtPower0']))
Power = np.array(Power)
SOWFApower = np.array([ICOWESdata['posPowerT1'][0], ICOWESdata['posPowerT2'][0]]).transpose()/1000.
error_turbine2 += np.sum((SOWFApower[:, 1]-Power[:, 1])**2)
# position downstream
posrange = np.array(list(posrange_ds))*rotor_diameter
Power = list()
prob['windSpeeds'] = np.array([PFvelocity])
prob['turbineY'] = np.array([0.0, 0.0])
prob['windDirections'] = np.array([270.0])
for pos2 in posrange:
prob['turbineX'] = np.array([0.0, pos2])
prob.run()
Power.append(list(prob['wtPower0']))
Power = np.array(Power)
SOWFApower = SOWFApower_ds*1E-3
error_turbine2 += np.sum((SOWFApower[:, 1]-Power[:, 1])**2)
if model is 'gauss':
print 'error_turbine2: ', error_turbine2, 'ky: ', prob['model_params:ky'], \
'kz: ', prob['model_params:kz'], 'I: ', prob['model_params:I'], \
'shear_exp: ', prob['model_params:shear_exp']
elif model is 'floris':
print 'error_turbine2: ', error_turbine2
print 'kd: ', xdict['kd'], 'initialWakeAngle: ', xdict['initialWakeAngle'], \
'initialWakeDisplacement: ', xdict['initialWakeDisplacement'], 'bd: ', xdict['bd'], 'ke: ', xdict['ke'], \
'me: ', np.array([xdict['me'][0], xdict['me'][1], 1.0]), \
'MU: ', np.array([xdict['MU'][0], 1.0, xdict['MU'][1]]), 'aU: ', xdict['aU'], 'bU: ', xdict['bU'], \
'cos_spread: ', xdict['cos_spread']
funcs = {'obj': error_turbine2}
fail = False
return funcs, fail
def set_param_vals(xdict):
global prob
global model
# I = xdict['I']
ky = 0.3837 * TI + 0.003678
kz = 0.3837 * TI + 0.003678
if model is 'gauss':
try:
prob['model_params:ky'] = ky
except:
tmp = 0
try:
prob['model_params:kz'] = kz
except:
tmp = 0
try:
prob['model_params:I'] = xdict['I']
except:
tmp = 0
try:
prob['model_params:shear_exp'] = xdict['shear_exp']
# print prob['model_params:n_std_dev']
except:
tmp = 0
# quit()
# raise UserWarning("shear_exp not found")
elif model is 'floris':
# set tuning variables
# prob['gen_params:pP'] = xdict['pP']
prob['model_params:kd'] = xdict['kd']
prob['model_params:initialWakeAngle'] = xdict['initialWakeAngle']
prob['model_params:initialWakeDisplacement'] = xdict['initialWakeDisplacement']
prob['model_params:bd'] = xdict['bd']
prob['model_params:ke'] = xdict['ke']
prob['model_params:me'] = np.array([xdict['me'][0], xdict['me'][1], 1.0])
prob['model_params:MU'] = np.array([xdict['MU'][0], 1.0, xdict['MU'][1]])
prob['model_params:aU'] = xdict['aU']
prob['model_params:bU'] = xdict['bU']
prob['model_params:cos_spread'] = xdict['cos_spread']
if __name__ == "__main__":
global model
model = 'gauss' # floris or gauss
nTurbines = 2
nDirections = 1
rotorDiameter = 126.4
rotorArea = np.pi*rotorDiameter*rotorDiameter/4.0
axialInduction = 1.0/3.0
CP = 0.7737/0.944 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
# CP =0.768 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
CT = 4.0*axialInduction*(1.0-axialInduction)
generator_efficiency = 0.944
hub_height = 90.0
# Define turbine characteristics
axialInduction = np.array([axialInduction, axialInduction])
rotorDiameter = np.array([rotorDiameter, rotorDiameter])
generatorEfficiency = np.array([generator_efficiency, generator_efficiency])
yaw = np.array([0., 0.])
hubHeight = np.array([hub_height, hub_height])
# Define site measurements
wind_direction = 270.-0.523599*180./np.pi
wind_speed = 8. # m/s
air_density = 1.1716
Ct = np.array([CT, CT])
Cp = np.array([CP, CP])
global prob
if model is 'gauss':
sort_turbs = True
wake_combination_method = 1 # can be [0:Linear freestreem superposition,
# 1:Linear upstream velocity superposition,
# 2:Sum of squares freestream superposition,
# 3:Sum of squares upstream velocity superposition]
ti_calculation_method = 2 # can be [0:No added TI calculations,
# 1:TI by Niayifar and Porte Agel altered by Annoni and Thomas,
# 2:TI by Niayifar and Porte Agel 2016,
# 3:no yet implemented]
calc_k_star = False
z_ref = 90.0
z_0 = 0.0
TI = 0.06
# k_calc = 0.022
k_calc = 0.3837 * TI + 0.003678
nRotorPoints = 16
prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=gauss_wrapper, wake_model_options={'nSamples': 0}, datasize=0,
params_IdepVar_func=add_gauss_params_IndepVarComps,
params_IndepVar_args={}))
prob.setup()
prob['model_params:wake_combination_method'] = wake_combination_method
prob['model_params:ti_calculation_method'] = ti_calculation_method
prob['model_params:calc_k_star'] = calc_k_star
prob['model_params:sort'] = sort_turbs
prob['model_params:z_ref'] = z_ref
prob['model_params:z_0'] = z_0
prob['model_params:ky'] = k_calc
prob['model_params:kz'] = k_calc
prob['model_params:I'] = TI
if nRotorPoints > 1:
prob['model_params:RotorPointsY'], prob['model_params:RotorPointsZ'] = sunflower_points(nRotorPoints)
print "setting rotor points"
elif model is 'floris':
prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=floris_wrapper,
wake_model_options={'nSamples': 0, 'use_rotor_components': False,
'differentiable': True}, datasize=0,
params_IdepVar_func=add_floris_params_IndepVarComps))
prob.setup()
prob['model_params:useWakeAngle'] = True
# initialize optimization problem
optProb = Optimization('Tuning %s Model to SOWFA' % model, tuning_obj_function)
if model is 'gauss':
# optProb.addVarGroup('ky', 1, lower=0.01, upper=1.0, value=0.022, scalar=1E1)
# optProb.addVarGroup('kz', 1, lower=0.01, upper=1.0, value=0.022, scalar=1E1)
# optProb.addVarGroup('I', 1, lower=0.04, upper=0.5, value=0.06, scalar=1E1)
optProb.addVarGroup('shear_exp', 1, lower=0.01, upper=1.0, value=0.15, scalar=1)
# optProb.addVarGroup('yshift', 1, lower=-126.4, upper=126.4, value=0.0)#, scalar=1E-3)
elif model is 'floris':
# optProb.addVarGroup('pP', 1, lower=0.0, upper=5.0, value=1.5) # , scalar=1E-1)
optProb.addVarGroup('kd', 1, lower=0.0, upper=1.0, value=0.15) # , scalar=1E-1)
optProb.addVarGroup('initialWakeAngle', 1, lower=-4.0, upper=4.0, value=1.5) # , scalar=1E-1)
optProb.addVarGroup('initialWakeDisplacement', 1, lower=-30.0, upper=30.0, value=-4.5) # , scalar=1E-1)
optProb.addVarGroup('bd', 1, lower=-1.0, upper=1.0, value=-0.01) # , scalar=1E-1)
optProb.addVarGroup('ke', 1, lower=0.0, upper=1.0, value=0.065) # , scalar=1E-1)
optProb.addVarGroup('me', 2, lower=np.array([-1.0, 0.0]), upper=np.array([0.0, 0.9]),
value=np.array([-0.5, 0.3])) # , scalar=1E-1)
optProb.addVarGroup('MU', 2, lower=np.array([0.0, 1.5]), upper=np.array([1.0, 20.0]),
value=np.array([0.5, 5.5])) # , scalar=1E-1)
optProb.addVarGroup('aU', 1, lower=0.0, upper=20.0, value=5.0) # , scalar=1E-1)
optProb.addVarGroup('bU', 1, lower=0.0, upper=5.0, value=1.66) # , scalar=1E-1)
optProb.addVarGroup('cos_spread', 1, lower=0.0, upper=10.0, value=2.0) # , scalar=1E-1)
# add objective
optProb.addObj('obj', scale=1E0)
# initialize optimizer
snopt = SNOPT(options={'Print file': 'SNOPT_print_tune_all.out'})
# run optimizer
sol = snopt(optProb, sens=None)
# print solution
print sol
# plot fit
tuning_obj_function(xdict=sol.xStar, plot=True)
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop, checkpoint saving and loading, evaluation code."""
import json
import os.path
import shutil
import numpy as np
import tensorflow as tf
from absl import flags
from easydict import EasyDict
from tqdm import trange
from libml import data, utils
FLAGS = flags.FLAGS
flags.DEFINE_string('train_dir', './experiments',
'Folder where to save training data.')
flags.DEFINE_float('lr', 0.0001, 'Learning rate.')
flags.DEFINE_integer('batch', 64, 'Batch size.')
flags.DEFINE_integer('train_kimg', 1 << 14, 'Training duration in kibi-samples.')
flags.DEFINE_integer('report_kimg', 64, 'Report summary period in kibi-samples.')
flags.DEFINE_integer('save_kimg', 64, 'Save checkpoint period in kibi-samples.')
flags.DEFINE_integer('keep_ckpt', 50, 'Number of checkpoints to keep.')
flags.DEFINE_string('eval_ckpt', '', 'Checkpoint to evaluate. If provided, do not do training, just do eval.')
class Model:
def __init__(self, train_dir: str, dataset: data.DataSet, **kwargs):
self.train_dir = os.path.join(train_dir, self.experiment_name(**kwargs))
self.params = EasyDict(kwargs)
self.dataset = dataset
self.session = None
self.tmp = EasyDict(print_queue=[], cache=EasyDict())
self.step = tf.train.get_or_create_global_step()
self.ops = self.model(**kwargs)
self.ops.update_step = tf.assign_add(self.step, FLAGS.batch)
self.add_summaries(**kwargs)
print(' Config '.center(80, '-'))
print('train_dir', self.train_dir)
print('%-32s %s' % ('Model', self.__class__.__name__))
print('%-32s %s' % ('Dataset', dataset.name))
for k, v in sorted(kwargs.items()):
print('%-32s %s' % (k, v))
print(' Model '.center(80, '-'))
to_print = [tuple(['%s' % x for x in (v.name, np.prod(v.shape), v.shape)]) for v in utils.model_vars(None)]
to_print.append(('Total', str(sum(int(x[1]) for x in to_print)), ''))
sizes = [max([len(x[i]) for x in to_print]) for i in range(3)]
fmt = '%%-%ds %%%ds %%%ds' % tuple(sizes)
for x in to_print[:-1]:
print(fmt % x)
print()
print(fmt % to_print[-1])
print('-' * 80)
self._create_initial_files()
@property
def arg_dir(self):
return os.path.join(self.train_dir, 'args')
@property
def checkpoint_dir(self):
return os.path.join(self.train_dir, 'tf')
def train_print(self, text):
self.tmp.print_queue.append(text)
def _create_initial_files(self):
for dir in (self.checkpoint_dir, self.arg_dir):
if not os.path.exists(dir):
os.makedirs(dir)
self.save_args()
def _reset_files(self):
shutil.rmtree(self.train_dir)
self._create_initial_files()
def save_args(self, **extra_params):
with open(os.path.join(self.arg_dir, 'args.json'), 'w') as f:
json.dump({**self.params, **extra_params}, f, sort_keys=True, indent=4)
@classmethod
def load(cls, train_dir):
with open(os.path.join(train_dir, 'args/args.json'), 'r') as f:
params = json.load(f)
instance = cls(train_dir=train_dir, **params)
instance.train_dir = train_dir
return instance
def experiment_name(self, **kwargs):
args = [x + str(y) for x, y in sorted(kwargs.items())]
return '_'.join([self.__class__.__name__] + args)
def eval_mode(self, ckpt=None):
self.session = tf.Session(config=utils.get_config())
saver = tf.train.Saver()
if ckpt is None:
ckpt = utils.find_latest_checkpoint(self.checkpoint_dir)
else:
ckpt = os.path.abspath(ckpt)
saver.restore(self.session, ckpt)
self.tmp.step = self.session.run(self.step)
print('Eval model %s at global_step %d' % (self.__class__.__name__, self.tmp.step))
return self
def model(self, **kwargs):
raise NotImplementedError()
def add_summaries(self, **kwargs):
raise NotImplementedError()
class ClassifySemi(Model):
"""Semi-supervised classification."""
def __init__(self, train_dir: str, dataset: data.DataSet, nclass: int, **kwargs):
self.nclass = nclass
Model.__init__(self, train_dir, dataset, nclass=nclass, **kwargs)
def train_step(self, train_session, data_labeled, data_unlabeled):
x, y = self.session.run([data_labeled, data_unlabeled])
self.tmp.step = train_session.run([self.ops.train_op, self.ops.update_step],
feed_dict={self.ops.x: x['image'],
self.ops.y: y['image'],
self.ops.label: x['label']})[1]
def train(self, train_nimg, report_nimg):
if FLAGS.eval_ckpt:
self.eval_checkpoint(FLAGS.eval_ckpt)
return
batch = FLAGS.batch
train_labeled = self.dataset.train_labeled.batch(batch).prefetch(16)
train_labeled = train_labeled.make_one_shot_iterator().get_next()
train_unlabeled = self.dataset.train_unlabeled.batch(batch).prefetch(16)
train_unlabeled = train_unlabeled.make_one_shot_iterator().get_next()
scaffold = tf.train.Scaffold(saver=tf.train.Saver(max_to_keep=FLAGS.keep_ckpt,
pad_step_number=10))
with tf.Session(config=utils.get_config()) as sess:
self.session = sess
self.cache_eval()
with tf.train.MonitoredTrainingSession(
scaffold=scaffold,
checkpoint_dir=self.checkpoint_dir,
config=utils.get_config(),
save_checkpoint_steps=FLAGS.save_kimg << 10,
save_summaries_steps=report_nimg - batch) as train_session:
self.session = train_session._tf_sess()
self.tmp.step = self.session.run(self.step)
while self.tmp.step < train_nimg:
loop = trange(self.tmp.step % report_nimg, report_nimg, batch,
leave=False, unit='img', unit_scale=batch,
desc='Epoch %d/%d' % (1 + (self.tmp.step // report_nimg), train_nimg // report_nimg))
for _ in loop:
self.train_step(train_session, train_labeled, train_unlabeled)
while self.tmp.print_queue:
loop.write(self.tmp.print_queue.pop(0))
while self.tmp.print_queue:
print(self.tmp.print_queue.pop(0))
def tune(self, train_nimg):
batch = FLAGS.batch
train_labeled = self.dataset.train_labeled.batch(batch).prefetch(16)
train_labeled = train_labeled.make_one_shot_iterator().get_next()
train_unlabeled = self.dataset.train_unlabeled.batch(batch).prefetch(16)
train_unlabeled = train_unlabeled.make_one_shot_iterator().get_next()
for _ in trange(0, train_nimg, batch, leave=False, unit='img', unit_scale=batch, desc='Tuning'):
x, y = self.session.run([train_labeled, train_unlabeled])
self.session.run([self.ops.tune_op], feed_dict={self.ops.x: x['image'],
self.ops.y: y['image'],
self.ops.label: x['label']})
def eval_checkpoint(self, ckpt=None):
self.eval_mode(ckpt)
self.cache_eval()
raw = self.eval_stats(classify_op=self.ops.classify_raw)
ema = self.eval_stats(classify_op=self.ops.classify_op)
self.tune(16384)
tuned_raw = self.eval_stats(classify_op=self.ops.classify_raw)
tuned_ema = self.eval_stats(classify_op=self.ops.classify_op)
print('%16s %8s %8s %8s' % ('', 'labeled', 'valid', 'test'))
print('%16s %8s %8s %8s' % (('raw',) + tuple('%.2f' % x for x in raw)))
print('%16s %8s %8s %8s' % (('ema',) + tuple('%.2f' % x for x in ema)))
print('%16s %8s %8s %8s' % (('tuned_raw',) + tuple('%.2f' % x for x in tuned_raw)))
print('%16s %8s %8s %8s' % (('tuned_ema',) + tuple('%.2f' % x for x in tuned_ema)))
def cache_eval(self):
"""Cache datasets for computing eval stats."""
def collect_samples(dataset):
"""Return numpy arrays of all the samples from a dataset."""
it = dataset.batch(1).prefetch(16).make_one_shot_iterator().get_next()
images, labels = [], []
while 1:
try:
v = self.session.run(it)
except tf.errors.OutOfRangeError:
break
images.append(v['image'])
labels.append(v['label'])
images = np.concatenate(images, axis=0)
labels = np.concatenate(labels, axis=0)
return images, labels
if 'test' not in self.tmp.cache:
self.tmp.cache.test = collect_samples(self.dataset.test)
self.tmp.cache.valid = collect_samples(self.dataset.valid)
self.tmp.cache.train_labeled = collect_samples(self.dataset.eval_labeled)
def eval_stats(self, batch=None, feed_extra=None, classify_op=None):
"""Evaluate model on train, valid and test."""
batch = batch or FLAGS.batch
classify_op = self.ops.classify_op if classify_op is None else classify_op
accuracies = []
for subset in ('train_labeled', 'valid', 'test'):
images, labels = self.tmp.cache[subset]
predicted = []
for x in range(0, images.shape[0], batch):
p = self.session.run(
classify_op,
feed_dict={
self.ops.x: images[x:x + batch],
**(feed_extra or {})
})
predicted.append(p)
predicted = np.concatenate(predicted, axis=0)
accuracies.append((predicted.argmax(1) == labels).mean() * 100)
self.train_print('kimg %-5d accuracy train/valid/test %.2f %.2f %.2f' %
tuple([self.tmp.step >> 10] + accuracies))
return np.array(accuracies, 'f')
def add_summaries(self, feed_extra=None, **kwargs):
del kwargs
def gen_stats():
return self.eval_stats(feed_extra=feed_extra)
accuracies = tf.py_func(gen_stats, [], tf.float32)
tf.summary.scalar('accuracy/train_labeled', accuracies[0])
tf.summary.scalar('accuracy/valid', accuracies[1])
tf.summary.scalar('accuracy', accuracies[2])
| |
# -*- coding: utf-8
# 'version': '0.3'
#
# Copyright (c) 2017, Stephen B, Hope, All rights reserved.
#
# CommAI-env Copyright (c) 2016-present, Facebook, Inc., All rights reserved.
# Round1 Copyright (c) 2017-present, GoodAI All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE_CHALLENGE file in the root directory of this source tree.
import curses
import curses.textpad
import logging
import locale
# TODO byte_channels, channels unresolved ref
from core.byte_channels import ByteInputChannel
from core.channels import InputChannel
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
import platform
if platform.python_version_tuple()[0] == '2':
# TODO text.converters, channels unresolved ref
from kitchen.text.converters import to_unicode
class BaseView(object):
"""
"""
def __init__(self, env, session):
""" # observe basic high level information about the session and environment# we save information for
display later
:param env:
:param session:
"""
# TODO: Move environment and session outside of the class
self._env = env
self._session = session
env.task_updated.register(self.on_task_updated)
session.total_reward_updated.register(self.on_total_reward_updated)
session.total_time_updated.register(self.on_total_time_updated)
self.logger = logging.getLogger(__name__)
self.info = {'reward': 0, 'time': 0, 'current_task': 'None'}
def on_total_reward_updated(self, reward):
"""
:param reward:
:return:
"""
self.info['reward'] = reward
self.paint_info_win()
def on_total_time_updated(self, time):
"""
:param time:
:return:
"""
self.info['time'] = time
self.paint_info_win()
self._stdscr.nodelay(1)
key = self._stdscr.getch()
if key == ord('+'):
self._session.add_sleep(-0.001)
elif key == ord('-'):
self._session.add_sleep(0.001)
if key == ord('0'):
self._session.reset_sleep()
def on_task_updated(self, task):
"""
:param task:
:return:
"""
if 'current_task' in self.info:
self.info['current_task'] = task.get_name()
self.paint_info_win()
def paint_info_win(self):
"""
:return:
"""
self._info_win.addstr(0, 0, 'Total time: {0}'.format(self.info['time']))
self._info_win.clrtobot()
self._info_win.addstr(1, 0, 'Total reward: {0}'.format(self.info['reward']))
self._info_win.clrtobot()
if 'current_task' in self.info:
self._info_win.addstr(2, 0, 'Current Task: {0}'.format(self.info['current_task']))
self._info_win.clrtobot()
self._info_win.refresh()
def initialize(self):
"""# initialize curses
:return:
"""
# TODO _stdscr, _info_win_height, .height, ._win, _info_win def outside init
self._stdscr = curses.initscr()
# TODO generalize this:
begin_x = 0
begin_y = 0
# self._info_win_width = 20
self._info_win_height = 4
self.height, self.width = self._stdscr.getmaxyx()
self._win = self._stdscr.subwin(self.height, self.width, begin_y, begin_x)
# create info box with reward and time
self._info_win = self._win.subwin(self._info_win_height, self.width, 0, 0)
curses.noecho()
curses.cbreak()
def finalize(self):
# TODO static method
"""
:return:
"""
curses.nocbreak()
curses.echo()
curses.endwin()
class ConsoleView(BaseView):
"""
"""
def __init__(self, env, session, serializer, show_world=False, byte_channels=False):
"""# for visualization purposes, we keep an internal buffer of the input and output stream so when they are
cleared from task to task, we can keep the history intact.# record what the learner says# record what the
environment says# record what the learner says# record what the environment says# listen to the updates in
these channels# register a handler to plot the world if show_world is active# connect the channels with the
observed input bits
:param env:
:param session:
:param serializer:
:param show_world:
:param byte_channels:
"""
super(ConsoleView, self).__init__(env, session)
self.input_buffer = ''
self.output_buffer = ''
self.reward_buffer = ''
self.panic = 'SKIP'
if byte_channels:
self._learner_channel = ByteInputChannel(serializer)
self._env_channel = ByteInputChannel(serializer)
else:
self._learner_channel = InputChannel(serializer)
self._env_channel = InputChannel(serializer)
self._learner_channel.sequence_updated.register(self.on_learner_sequence_updated)
self._learner_channel.message_updated.register(self.on_learner_message_updated)
self._env_channel.sequence_updated.register(self.on_env_sequence_updated)
self._env_channel.message_updated.register(self.on_env_message_updated)
if show_world:
env.world_updated.register(self.on_world_updated)
session.env_token_updated.register(self.on_env_token_updated)
session.learner_token_updated.register(self.on_learner_token_updated)
del self.info['current_task']
def on_total_reward_updated(self, reward):
"""
:param reward:
:return:
"""
change = reward - self.info['reward']
BaseView.on_total_reward_updated(self, reward)
self.reward_buffer = "_" * self._scroll_msg_length + self.reward_buffer + self.encode_reward(change)
self.reward_buffer = self.reward_buffer[-self._scroll_msg_length+11:]
self._win.addstr(self._reward_seq_y, 0, self.reward_buffer)
self._win.refresh()
@staticmethod
def encode_reward(reward):
"""
:param reward:
:return:
"""
d = {0: " ", 1: "+", -1: "-", 2: "2", -2: "\u01BB"}
return d[reward]
def on_env_token_updated(self, token):
"""
:param token:
:return:
"""
self._env_channel.consume(token)
def on_learner_token_updated(self, token):
"""
:param token:
:return:
"""
self._learner_channel.consume(token)
def on_learner_message_updated(self, message):
# TODO message not used
"""# we use the fact that messages arrive character by character
:param message:
:return:
"""
if self._learner_channel.get_text():
self.input_buffer += self._learner_channel.get_text()[-1]
self.input_buffer = self.input_buffer[-self._scroll_msg_length:]
learner_input = self.channel_to_str(self.input_buffer + ' ', self._learner_channel.get_undeserialized())
self._win.addstr(self._learner_seq_y, 0, learner_input.encode(code).decode(code))
self._win.refresh()
def on_learner_sequence_updated(self, sequence):
# TODO sequence not used
"""
:param sequence:
:return:
"""
learner_input = self.channel_to_str(
self.input_buffer + ' ', self._learner_channel.get_undeserialized())
self._win.addstr(self._learner_seq_y, 0, learner_input.encode(code).decode(code))
self._win.refresh()
def on_env_message_updated(self, message):
# TODO message not used
"""
:param message:
:return:
"""
if self._env_channel.get_text():
self.output_buffer += self._env_channel.get_text()[-1]
self.output_buffer = self.output_buffer[-self._scroll_msg_length:]
env_output = self.channel_to_str(self.output_buffer, self._env_channel.get_undeserialized())
self._win.addstr(self._teacher_seq_y, 0, env_output.encode(code).decode(code))
self._win.refresh()
def on_env_sequence_updated(self, sequence):
# TODO sequence not used
"""
:param sequence:
:return:
"""
env_output = self.channel_to_str(self.output_buffer, self._env_channel.get_undeserialized())
self._win.addstr(self._teacher_seq_y, 0, env_output.encode(code).decode(code))
self._win.refresh()
def on_world_updated(self, world):
"""
:param world:
:return:
"""
if world:
world.state_updated.register(self.on_world_state_updated)
self._worldwin.addstr(0, 0, str(world))
self._worldwin.refresh()
else:
self._worldwin.clear()
self._worldwin.refresh()
def on_world_state_updated(self, world):
"""
:param world:
:return:
"""
self._worldwin.addstr(0, 0, str(world))
self._worldwin.refresh()
def initialize(self):
"""# initialize curses# create info box with reward and time
:return:
"""
# TODO def outside init
self._stdscr = curses.initscr()
begin_x = 0
begin_y = 0
self._teacher_seq_y = 0
self._learner_seq_y = 1
self._reward_seq_y = 2
self._world_win_y = 4
self._world_win_x = 0
self._info_win_width = 20
self._info_win_height = 4
self._user_input_win_y = 4
self._user_input_win_x = 10
self.height, self.width = self._stdscr.getmaxyx()
self._scroll_msg_length = self.width - self._info_win_width - 1
self._win = self._stdscr.subwin(self.height, self.width, begin_y, begin_x)
self._worldwin = self._win.subwin(self.height - self._world_win_y, self.width - self._world_win_x,
self._world_win_y, self._world_win_x)
self._info_win = self._win.subwin(self._info_win_height, self._info_win_width, 0,
self.width - self._info_win_width)
self._user_input_win = self._win.subwin(1, self.width - self._user_input_win_x, self._user_input_win_y,
self._user_input_win_x)
self._user_input_label_win = self._win.subwin(1, self._user_input_win_x - 1, self._user_input_win_y, 0)
curses.noecho()
curses.cbreak()
def get_input(self):
"""
:return:
"""
self._user_input_label_win.addstr(0, 0, 'input:')
self._user_input_label_win.refresh()
curses.echo()
inputstr = self._user_input_win.getstr(0, 0, self.width - self._user_input_win_x).decode(code)
curses.noecho()
if platform.python_version_tuple()[0] == '2':
inputstr = to_unicode(inputstr)
self._user_input_win.clear()
if inputstr == self.panic:
inputstr = ''
self._env._task_time = float('inf')
return inputstr
def channel_to_str(self, text, bits):
"""
:param text:
:param bits:
:return:
"""
length = self._scroll_msg_length - 10
return "{0:_>{length}}[{1: <8}]".format(text[-length:], bits[-7:], length=length)
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.cloud import datastore_v1
from google.cloud.datastore_v1 import enums
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore_v1.proto import entity_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestDatastoreClient(object):
def test_lookup(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.LookupResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
keys = []
response = client.lookup(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.LookupRequest(project_id=project_id, keys=keys)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_lookup_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
keys = []
with pytest.raises(CustomException):
client.lookup(project_id, keys)
def test_run_query(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.RunQueryResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
partition_id = {}
response = client.run_query(project_id, partition_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.RunQueryRequest(
project_id=project_id, partition_id=partition_id
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_run_query_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
partition_id = {}
with pytest.raises(CustomException):
client.run_query(project_id, partition_id)
def test_begin_transaction(self):
# Setup Expected Response
transaction = b"-34"
expected_response = {"transaction": transaction}
expected_response = datastore_pb2.BeginTransactionResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
response = client.begin_transaction(project_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.BeginTransactionRequest(project_id=project_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_begin_transaction_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
with pytest.raises(CustomException):
client.begin_transaction(project_id)
def test_commit(self):
# Setup Expected Response
index_updates = 1425228195
expected_response = {"index_updates": index_updates}
expected_response = datastore_pb2.CommitResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
mode = enums.CommitRequest.Mode.MODE_UNSPECIFIED
mutations = []
response = client.commit(project_id, mode, mutations)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.CommitRequest(
project_id=project_id, mode=mode, mutations=mutations
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_commit_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
mode = enums.CommitRequest.Mode.MODE_UNSPECIFIED
mutations = []
with pytest.raises(CustomException):
client.commit(project_id, mode, mutations)
def test_rollback(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.RollbackResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
transaction = b"-34"
response = client.rollback(project_id, transaction)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.RollbackRequest(
project_id=project_id, transaction=transaction
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_rollback_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
transaction = b"-34"
with pytest.raises(CustomException):
client.rollback(project_id, transaction)
def test_allocate_ids(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.AllocateIdsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
keys = []
response = client.allocate_ids(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.AllocateIdsRequest(
project_id=project_id, keys=keys
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_allocate_ids_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
keys = []
with pytest.raises(CustomException):
client.allocate_ids(project_id, keys)
def test_reserve_ids(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.ReserveIdsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
keys = []
response = client.reserve_ids(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.ReserveIdsRequest(
project_id=project_id, keys=keys
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_reserve_ids_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
keys = []
with pytest.raises(CustomException):
client.reserve_ids(project_id, keys)
| |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from datetime import datetime, timezone
from typing import Union, Any, Dict
from dateparser import parse
import urllib3
import traceback
# Disable insecure warnings
urllib3.disable_warnings()
''' GLOBAL VARIABLES '''
MALICIOUS_DICTIONARY: Dict[Any, int] = {
'low': Common.DBotScore.GOOD,
'medium': Common.DBotScore.SUSPICIOUS,
'high': Common.DBotScore.BAD
}
MALICIOUS_THRESHOLD = MALICIOUS_DICTIONARY.get(demisto.params().get('threshold', 'high'))
''' CLIENT '''
class Client:
"""
The integration's client
"""
def __init__(self, params: Dict[str, str]):
self.cs_client: CrowdStrikeClient = CrowdStrikeClient(params=params)
self.query_params: Dict[str, str] = {'offset': 'offset', 'limit': 'limit', 'sort': 'sort', 'free_search': 'q'}
self.date_params: Dict[str, Dict[str, str]] = {
'created_date': {'operator': '', 'api_key': 'created_date'},
'last_updated_date': {'operator': '', 'api_key': 'last_updated'},
'max_last_modified_date': {'operator': '<=', 'api_key': 'last_modified_date'},
'min_last_activity_date': {'operator': '>=', 'api_key': 'first_activity_date'},
'max_last_activity_date': {'operator': '<=', 'api_key': 'last_activity_date'},
}
def build_request_params(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""
Build the params dict for the request
:param args: Cortex XSOAR args
:return: The params dict
"""
params: Dict[str, Any] = {key: args.get(arg) for arg, key in self.query_params.items()}
query = args.get('query')
params['filter'] = query if query else self.build_filter_query(args)
return assign_params(**params)
def build_filter_query(self, args: Dict[str, str]) -> str:
"""
Builds the filter query in Falcon Query Language (FQL)
:param args: Cortex XSOAR args
:return: The query
"""
filter_query: str = str()
for key in args:
if key not in self.query_params:
if key not in self.date_params:
values: List[str] = argToList(args[key], ',')
for value in values:
filter_query += f"{key}:'{value}'+"
else:
operator: Optional[str] = self.date_params.get(key, {}).get('operator')
api_key: Optional[str] = self.date_params.get(key, {}).get('api_key')
# Parsing date argument of ISO format or free language into datetime object,
# replacing TZ with UTC, taking its timestamp format and rounding it up.
filter_query += f"{api_key}:" \
f"{operator}{int(parse(args[key]).replace(tzinfo=timezone.utc).timestamp())}+"
if filter_query.endswith('+'):
filter_query = filter_query[:-1]
return filter_query
def get_indicator(self, indicator_value: str, indicator_type: str) -> Dict[str, Any]:
# crowdstrike do not allow passing single quotes - so we encode them
# we are not encoding the entire indicator value, as the other reserved chars (such as + and &) are allowed
indicator_value = indicator_value.replace("'", "%27")
args: Dict[str, Any] = {
'indicator': indicator_value,
'limit': 1
}
if indicator_type == 'hash':
args['type'] = get_indicator_hash_type(indicator_value)
elif indicator_type == 'ip':
args['type'] = 'ip_address'
else:
args['type'] = indicator_type
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params)
def cs_actors(self, args: Dict[str, str]) -> Dict[str, Any]:
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/actors/v1', params=params)
def cs_indicators(self, args: Dict[str, str]) -> Dict[str, Any]:
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params)
def cs_reports(self, args: Dict[str, str]) -> Dict[str, Any]:
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/reports/v1', params=params)
''' HELPER FUNCTIONS '''
def get_dbot_score_type(indicator_type: str) -> Union[Exception, DBotScoreType, str]:
"""
Returns the dbot score type
:param indicator_type: The indicator type
:return: The dbot score type
"""
if indicator_type == 'ip':
return DBotScoreType.IP
elif indicator_type == 'domain':
return DBotScoreType.DOMAIN
elif indicator_type == 'file' or indicator_type == 'hash':
return DBotScoreType.FILE
elif indicator_type == 'url':
return DBotScoreType.URL
else:
raise DemistoException('Indicator type is not supported.')
def get_score_from_resource(r: Dict[str, Any]) -> int:
"""
Calculates the DBotScore for the resource
:param r: The resource
:return: The DBotScore
"""
malicious_confidence: int = MALICIOUS_DICTIONARY.get(r.get('malicious_confidence'), 0)
if malicious_confidence == 3 or MALICIOUS_THRESHOLD == 1:
score = 3
elif malicious_confidence == 2 or MALICIOUS_THRESHOLD == 2:
score = 2
else:
score = 1
return score
def get_indicator_hash_type(indicator_value: str) -> Union[str, Exception]:
"""
Calculates the type of the hash
:param indicator_value: The hash value
:return: The hash type
"""
length: int = len(indicator_value)
if length == 32:
return 'hash_md5'
elif length == 40:
return 'hash_sha1'
elif length == 64:
return 'hash_sha256'
else:
raise DemistoException(f'Invalid hash. Hash length is: {length}. Please provide either MD5 (32 length)'
f', SHA1 (40 length) or SHA256 (64 length) hash.')
def get_indicator_object(indicator_value: Any, indicator_type: str, dbot_score: Common.DBotScore) \
-> Union[Common.IP, Common.URL, Common.File, Common.Domain, None]:
"""
Returns the corresponding indicator common object
:param indicator_value: The indicator value
:param indicator_type: The indicator value
:param dbot_score: The indicator DBotScore
:return: The indicator common object
"""
if indicator_type == 'ip':
return Common.IP(
ip=indicator_value,
dbot_score=dbot_score
)
elif indicator_type == 'url':
return Common.URL(
url=indicator_value,
dbot_score=dbot_score
)
elif indicator_type == 'hash':
hash_type: Union[str, Exception] = get_indicator_hash_type(indicator_value)
if hash_type == 'hash_md5':
return Common.File(
md5=indicator_value,
dbot_score=dbot_score
)
elif hash_type == 'hash_sha1':
return Common.File(
sha1=indicator_value,
dbot_score=dbot_score
)
else:
return Common.File(
sha256=indicator_value,
dbot_score=dbot_score
)
elif indicator_type == 'domain':
return Common.Domain(
domain=indicator_value,
dbot_score=dbot_score
)
else:
return None
def build_indicator(indicator_value: str, indicator_type: str, title: str, client: Client) -> List[CommandResults]:
"""
Builds an indicator entry
:param indicator_value: The indicator value
:param indicator_type: The indicator type
:param title: The title to show to the user
:param client: The integration's client
:return: The indicator entry
"""
res: Dict[str, Any] = client.get_indicator(indicator_value, indicator_type)
resources: List[Any] = res.get('resources', [])
results: List[CommandResults] = []
if resources:
for r in resources:
output = get_indicator_outputs(r)
score = get_score_from_resource(r)
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=get_dbot_score_type(indicator_type),
integration_name='CrowdStrike Falcon Intel v2',
malicious_description='High confidence',
score=score
)
indicator = get_indicator_object(indicator_value, indicator_type, dbot_score)
results.append(CommandResults(
outputs=output,
outputs_prefix='FalconIntel.Indicator',
outputs_key_field='ID',
indicator=indicator,
readable_output=tableToMarkdown(name=title, t=output, headerTransform=pascalToSpace),
raw_response=res
))
else:
results.append(CommandResults(
readable_output=f'No indicator found for {indicator_value}.'
))
return results
def get_values(items_list: List[Any], return_type: str = 'str', keys: Union[str, List[Any]] = 'value') \
-> Union[str, List[Union[str, Dict]]]:
"""
Returns the values of list's items
:param items_list: The items list
:param return_type: Whether to return string or list
:param keys: The key to get the data
:return: The values list
"""
new_list: List[Any] = list()
if isinstance(keys, str):
new_list = [item.get(keys) for item in items_list]
elif isinstance(keys, list):
new_list = [{underscoreToCamelCase(f): item.get(f) for f in item if f in keys} for item in items_list]
if return_type == 'list':
return new_list
return ', '.join(str(item) for item in new_list)
def get_indicator_outputs(resource: Dict[str, Any]) -> Dict[str, Any]:
"""
Build the output and extra context of an indicator
:param resource: The indicator's object
:return: The indicator's human readable
"""
output: Dict[str, Any] = dict()
if resource:
indicator_id = resource.get('id')
indicator_value = resource.get('indicator')
indicator_type = resource.get('type')
last_update = resource.get('last_update')
publish_date = resource.get('publish_date')
malicious_confidence = resource.get('malicious_confidence')
reports = resource.get('reports')
actors = resource.get('actors')
malware_families = resource.get('malware_families')
kill_chains = resource.get('kill_chains')
domain_types = resource.get('domain_types')
ip_address_types = resource.get('ip_address_types')
relations: List[Any] = resource.get('relations', [])[:10]
labels: List[Any] = resource.get('labels', [])[:10]
output = assign_params(**{
'ID': indicator_id,
'Type': indicator_type,
'Value': indicator_value,
'LastUpdate': datetime.fromtimestamp(last_update, timezone.utc).isoformat() if last_update
else None,
'PublishDate': datetime.fromtimestamp(publish_date, timezone.utc).isoformat() if publish_date
else None,
'MaliciousConfidence': malicious_confidence,
'Reports': reports,
'Actors': actors,
'MalwareFamilies': malware_families,
'KillChains': kill_chains,
'DomainTypes': domain_types,
'IPAddressTypes': ip_address_types,
'Relations': [f'{item.get("Type")}: {item.get("Indicator")}' for item in # type: ignore
get_values(relations, return_type='list', keys=['indicator', 'type'])],
'Labels': get_values(labels, return_type='list', keys='name')
})
return output
''' COMMANDS '''
def run_test_module(client: Client) -> Union[str, Exception]:
"""
If a client is successfully constructed then an access token was successfully created,
therefore the username and password are valid and a connection was made.
On top of the above, this function validates the http request to indicators endpoint.
:param client: the client object with an access token
:return: ok if got a valid access token and not all the quota is used at the moment
"""
client.cs_client.http_request('GET', 'intel/combined/indicators/v1', params={'limit': 1})
return 'ok'
def file_command(files: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for file in files:
results += build_indicator(file, 'hash', 'Falcon Intel file reputation:\n', client)
return results
def ip_command(ips: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for ip in ips:
results += build_indicator(ip, 'ip', 'Falcon Intel IP reputation:\n', client)
return results
def url_command(urls: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for url in urls:
results += build_indicator(url, 'url', 'Falcon Intel URL reputation:\n', client)
return results
def domain_command(domains: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for domain in domains:
results += build_indicator(domain, 'domain', 'Falcon Intel domain reputation:\n', client)
return results
def cs_actors_command(client: Client, args: Dict[str, str]) -> CommandResults:
res: Dict[str, Any] = client.cs_actors(args)
resources: List[Any] = res.get('resources', [])
outputs: List[Dict[str, Any]] = list()
md_outputs: List[Dict[str, Any]] = list()
md: str = str()
title: str = 'Falcon Intel Actor search:'
if resources:
for r in resources:
image_url = r.get('image', {}).get('url')
name = r.get('name')
actor_id = r.get('id')
url = r.get('url')
slug = r.get('slug')
short_description = r.get('short_description')
first_activity_date = r.get('first_activity_date')
last_activity_date = r.get('last_activity_date')
active = r.get('active')
known_as = r.get('known_as')
target_industries = r.get('target_industries', [])
target_countries = r.get('target_countries', [])
origins = r.get('origins', [])
motivations = r.get('motivations', [])
capability = r.get('capability', {}).get('value')
group = r.get('group')
region = r.get('region', {}).get('value')
kill_chain = r.get('kill_chain')
output: Dict[str, Any] = assign_params(**{
'ImageURL': image_url,
'Name': name,
'ID': actor_id,
'URL': url,
'Slug': slug,
'ShortDescription': short_description,
'FirstActivityDate': datetime.fromtimestamp(first_activity_date, timezone.utc).isoformat()
if first_activity_date else None,
'LastActivityDate': datetime.fromtimestamp(last_activity_date, timezone.utc).isoformat()
if last_activity_date else None,
'Active': active,
'KnownAs': known_as,
'TargetIndustries': get_values(target_industries, return_type='list'),
'TargetCountries': get_values(target_countries, return_type='list'),
'Origins': get_values(origins, return_type='list'),
'Motivations': get_values(motivations, return_type='list'),
'Capability': capability,
'Group': group,
'Region': region,
'KillChains': kill_chain
})
outputs.append(output)
md_output: Dict[str, Any] = output
for key in ('URL', 'ImageURL'):
if key in md_output:
value = md_output[key]
md_output[key] = f'[{value}]({value})'
md_outputs.append(md_output)
else:
md = 'No actors found.'
results: CommandResults = CommandResults(
outputs=outputs,
outputs_key_field='ID',
outputs_prefix='FalconIntel.Actor',
readable_output=md if md else tableToMarkdown(name=title, t=md_outputs, headerTransform=pascalToSpace),
raw_response=res
)
return results
def cs_indicators_command(client: Client, args: Dict[str, str]) -> List[CommandResults]:
res: Dict[str, Any] = client.cs_indicators(args)
resources: List[Any] = res.get('resources', [])
results: List[CommandResults] = []
title: str = 'Falcon Intel Indicator search:'
if resources:
for r in resources:
output = get_indicator_outputs(r)
indicator_value = output.get('Value')
indicator_type = output.get('Type')
indicator: Optional[Common.Indicator] = None
if indicator_type in ('hash_md5', 'hash_sha256', 'hash_sha1', 'ip_address', 'url', 'domain'):
if indicator_type in ('hash_md5', 'hash_sha1', 'hash_sha256'):
indicator_type = 'hash'
elif indicator_type == 'ip_address':
indicator_type = 'ip'
score = get_score_from_resource(r)
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=get_dbot_score_type(indicator_type),
integration_name='CrowdStrike Falcon Intel v2',
malicious_description='High confidence',
score=score
)
indicator = get_indicator_object(indicator_value, indicator_type, dbot_score)
results.append(CommandResults(
outputs=output,
outputs_prefix='FalconIntel.Indicator',
outputs_key_field='ID',
readable_output=tableToMarkdown(name=title, t=output, headerTransform=pascalToSpace),
raw_response=res,
indicator=indicator
))
else:
results.append(CommandResults(
readable_output='No indicators found.'
))
return results
def cs_reports_command(client: Client, args: Dict[str, str]) -> CommandResults:
res: Dict[str, Any] = client.cs_reports(args)
resources: List[Any] = res.get('resources', [])
outputs: List[Dict[str, Any]] = list()
md_outputs: List[Dict[str, Any]] = list()
md: str = str()
title: str = 'Falcon Intel Report search:'
if resources:
for r in resources:
report_id: int = r.get('id')
url: str = r.get('url')
name: str = r.get('name')
report_type: str = r.get('type', {}).get('name')
sub_type: str = r.get('sub_type', {}).get('name')
slug: str = r.get('slug')
created_date: int = r.get('created_date')
last_modified_date: int = r.get('last_modified_date')
short_description: str = r.get('short_description')
target_industries: List[Any] = r.get('target_industries', [])
target_countries: List[Any] = r.get('target_countries', [])
motivations: List[Any] = r.get('motivations', [])
tags: List[Any] = r.get('tags', [])
actors: List[Any] = r.get('actors', [])
output: Dict[str, Any] = assign_params(**{
'ID': report_id,
'URL': url,
'Name': name,
'Type': report_type,
'SubType': sub_type,
'Slug': slug,
'CreatedDate': datetime.fromtimestamp(created_date, timezone.utc).isoformat()
if created_date else None,
'LastModifiedSate': datetime.fromtimestamp(last_modified_date, timezone.utc).isoformat()
if last_modified_date else None,
'ShortDescription': short_description,
'TargetIndustries': get_values(target_industries, return_type='list'),
'TargetCountries': get_values(target_countries, return_type='list'),
'Motivations': get_values(motivations, return_type='list'),
'Tags': get_values(tags, return_type='list'),
'Actors': get_values(actors, return_type='list', keys='name')
})
outputs.append(output)
md_output: Dict[str, Any] = output
if 'URL' in md_output:
value = md_output['URL']
md_output['URL'] = f'[{value}]({value})'
md_outputs.append(md_output)
else:
md = 'No reports found.'
results: CommandResults = CommandResults(
outputs_prefix='FalconIntel.Report',
outputs=outputs,
outputs_key_field='ID',
readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace),
raw_response=res
)
return results
def main():
params: Dict[str, str] = demisto.params()
args: Dict[str, str] = demisto.args()
results: Union[CommandResults, List[CommandResults]]
try:
command: str = demisto.command()
LOG(f'Command being called in CrowdStrike Falcon Intel v2 is: {command}')
client: Client = Client(params=params)
if command == 'test-module':
result: Union[str, Exception] = run_test_module(client)
return_results(result)
elif command == 'file':
results = file_command(argToList(args['file']), client)
return_results(results)
elif command == 'ip':
results = ip_command(argToList(args['ip']), client)
return_results(results)
elif command == 'url':
results = url_command(argToList(args['url']), client)
return_results(results)
elif command == 'domain':
results = domain_command(argToList(args['domain']), client)
return_results(results)
elif command == 'cs-actors':
results = cs_actors_command(client, args)
return_results(results)
elif command == 'cs-indicators':
results = cs_indicators_command(client, args)
return_results(results)
elif command == 'cs-reports':
results = cs_reports_command(client, args)
return_results(results)
else:
raise NotImplementedError(f'{command} command is not an existing CrowdStrike Falcon Intel v2 integration')
except Exception as err:
return_error(f'Unexpected error:\n{str(err)}', error=traceback.format_exc())
from CrowdStrikeApiModule import * # noqa: E402
if __name__ in ('__main__', 'builtin', 'builtins'):
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations(object):
"""RouteFilterRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilterRule"]
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2020_06_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterRuleListResult"]
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.